This is shortcut code of web-crawler that runs on Crawler and it's work well. The problem is that the crawler returns 20 strings for half of a second and they are storing in MySQL much slower than the new comes. How properly synchronize this both processes.
function crawler(done) {
for (i=0, ...) {
crawler.queue([{
"uri": link,
"callback": function (error, result, $) {
var arr = $('.someclass');
done(arr);
}
}])
};
};
crawler (function (arr) {
savetosql(s, arr);
});
function savetosql (s, arr) {
var query = connection.query('UPDATE ...');
};
Nnot sure if this is what you're looking for, I never used crawler but this is how you serialize stuff in node.js.
function crawl_step(step,limit) {
//for (i=0, ...) { no more for
crawler.queue([{
"uri": link,
"callback": function (error, result, $) {
var arr = $('.someclass');
//done(arr);
var query = connection.query('UPDATE ...',calbback(...){
//this runs when the sql query is over
if(step<limit) crawl_step(step+1,limit);
else {....do something when all crawls are over ...}
});
}
}])
//};
};
crawl_step(0,100);
To make it easier to understand the serialization process let's work on some basic api: we want to read 100 files only if they exist (and we use the async calls, but in a sync manner).
function step(step_nr){
fs.exists( step_nr+'.txt' , function (exists) {
fs.readFile( step_nr+'.txt' , function (err, data) {
if (err) throw err;
console.log(data);
if(step_nr<100) step(step_nr+1);
});
});
}
step(0);