Node.js unexpected stack overflow with multiple get requests - javascript

I have a function that GETs a JSON object from a remote server, or from a local cache on-disk.
In a use-case, i have to call this function several thousand times with varying arguments, but when i do so, i get max stack overflow errors. I must be making a recursive call somewhere, but i can't see where it could be as my process.nextTick function calls seem to be in the right place.
I get none of my log.error readouts in the console, which would be evident if any of the recursive calls to retry the request were made.
The console output shows a repeated occurrence of
(node) warning: Recursive process.nextTick detected. This will break in the next version of node. Please use setImmediate for recursive deferral.
then...
RangeError: Maximum call stack size exceeded
Then the program exits.
Can anyone offer any help regarding what i may be doing wrong? I'm completely stumped.
Below is the function that invokes the problematic function "tf2inv.loadInventory()"
function refreshInventories(accounts, force, callback) {
//job executes download function, then pushes to inventories object
var inventories = {};
var Qinv = async.queue(function (task, invCallback) {
tf2inv.loadInventory(
task.force,
task.steamid,
function(inv, alias) {
inventories[alias] = inv;
process.nextTick(invCallback);
}
);
}, 100)
//when all queue jobs have finished, callback with populated inventories object
Qinv.drain = function (err) {
log.info('All inventories downloaded');
callback(inventories);
}
//adding jobs to the queue
for (var i = accounts.length - 1; i >= 0; i--) {
Qinv.push({
force: force,
steamid: accounts[i]
});
};
}
Shown here is the function that either parses from the cache, or requests from the remote server.
//tf2inv
var loadInventory = function(force, sid, callback) {
var invLoc = invFolder+sid
if(force) {
if(fs.existsSync(invLoc)) {
fs.unlinkSync(invLoc);
}
}
if(fs.existsSync(invLoc)) {
var body = fs.readFileSync(invLoc);
try {
var inventory = JSON.parse(body);
} catch (e) {
fs.unlinkSync(invLoc);
log.error("parsing " + sid+"'s inventory");
loadInventory(true, sid, invFolder, callback);
return;
}
process.nextTick(function() { callback(inventory, sid) })
return;
} else {
var urlPre = "http://api.steampowered.com/IEconItems_440/GetPlayerItems/v0001/?key=";
var urlSidPre = "&steamid=";
var urlInvSuf = "&inventory=yes";
var URL = urlPre+steam_API+urlSidPre+sid+urlInvSuf;
http.get(URL, function (res) {
var body = '';
res.on('data', function (data) {
body+=data;
fs.appendFile(invLoc, data);
});
res.on('end', function() {
try {
inventory = JSON.parse(body);
} catch (e) {
if(fs.existsSync(invLoc)) {
fs.unlinkSync(invLoc);
}
log.error("parsing " + sid+"'s downloaded inventory");
loadInventory(force, sid, invFolder, callback)
return;
}
process.nextTick(function() { callback(inventory, sid) })
return;
});
res.on('error', function (e, socket) {
log.error(sid + " inventory error")
if(fs.existsSync(invLoc)) {
fs.unlinkSync(invLoc);
}
log.debug('Retrying inventory')
loadInventory(force, sid, invFolder, callback);
return;
})
res.on('close', function () {res.emit('end'); log.error('connection closed')})
})
.on('error', function(e) {
log.error(JSON.stringify(e));
if(fs.existsSync(invLoc)) {
fs.unlinkSync(invLoc);
}
log.debug('Retrying inventory')
loadInventory(force, sid, invFolder, callback)
return;
})
}
};

It is likely to be failing to parse the body coming back from the server. It then immediately calls itself again, failing again, infinitely looping and causing a stack overflow.
I suggest you do not retry automatically on a failed parse - if it fails once, it is likely to fail again. It would be best to call back with the error, and let the part of your programming calling this handle the error, or passing it back to the point where it can let the user know that something is wrong.

Related

How can I add a setTimeOut() to run a function and insert data multiple times?

A few days ago I did a project and I had some problems, which where solved in this question, let me try to resume it.
I need to insert multiple objects into a DB in SQLServer, for that, I did a function that loops another function, which opens a connection, inserts and closes the connection, then, repeats it over and over again.
It worked fine, till today that was tested in a collegue PC, in the server of the job, I get this error:
Error: Requests can only be made in the LoggedIn state, not the LoggedInSendingInitialSql state
Error: Requests can only be made in the LoggedIn state, not the SentLogin7WithStandardLogin state
Here's the code we tested (the same in my last question), it works in my PC, but not in the other:
var config = {
...
};
function insertOffice(index) {
var connection = new Connection(config);
connection.on("connect", function (err) {
console.log("Successful connection");
});
connection.connect();
let url = `https://api.openweathermap.org/data/2.5/weather?lat=${offices[index].latjson}&lon=${offices[index].lonjson}&appid=${api_key}&units=metric&lang=sp`;
fetch(url)
.then((response) => { return response.json(); })
.then(function (data) {
var myObject = {
Id_Oficina: offices[index].IdOficina,
...
};
const request = new Request(
"EXEC USP_BI_CSL_insert_reg_RegistroTemperaturaXidOdicina #IdOficina, ...",
function (err) {
if (err) {
console.log("Couldnt insert data (" + index + "), " + err);
} else {
console.log("Data with ID: " + myObject.Id_Oficina +" inserted succesfully(" + index + ").")
}
}
);
request.addParameter("IdOficina", TYPES.SmallInt, myObject.Id_Oficina);
...
request.on("row", function (columns) {
columns.forEach(function (column) {
if (column.value === null) {
console.log("NULL");
} else {
console.log("Product id of inserted item is " + column.value);
}
});
});
request.on("requestCompleted", function () {
connection.close();
});
connection.execSql(request);
});
}
function functionLooper() {
for (let i = 0; i < offices.length; i++) {
let response = insertOffice(i);
}
}
functionLooper();
So, I thought it would be a good idea to use a setTimeOut, to:
Run functionLooper().
Open connection, insert and close.
Wait a few seconds.
Repeat.
So, I changed to this:
setTimeout(functionLooper, 2000);
function functionLooper() {
for (let i = 0; i < offices.length; i++) {
let response = insertOffice(i);
}
}
It works, but, as you can see, only waits when I first run it, so tried to make a function that runs setTimeout(functionLooper, 2000); like functionLooper() does, but it didn't work either.
function TimerLooper() {
for (let i = 0; i < offices.length; i++) {
setTimeout(functionLooper, 500);
}
}
function functionLooper() {
for (let i = 0; i < offices.length; i++) {
let response = insertOffice(i);
}
}
TimerLooper();
This shows me this error:
Error: Validation failed for parameter 'Descripcion'. No collation was set by the server for the current connection.
file:///...:/.../.../node_modules/node-fetch/src/index.js:95
reject(new FetchError(request to ${request.url} failed, reason: ${error.message}, 'system', error));
^ FetchError: request to https://api.openweathermap.org/data/2.5/weather?lat=XX&lon=XX&appid=XX&units=metric&lang=sp failed, reason: connect ETIMEDOUT X.X.X.X:X
So, I have some questions
How can I use properly setTimeOut? I did this function based on what I watch here in SO, but I just can't get it and I don't know what I'm doing wrong.
Why it works in my PC and the other don't? Do we have to change some kind of config or something?
Using setTimeOut, is the correct way to solve this problem? if not, what would you suggest me?
Could you do something like:
//edit: not disconnect but end
connection.on("end", function(){
functionLopper(index++)
})
function functionLooper(i) {
if(i<offices.length) insertOffice(i)
}
Edit: according to tidious doc
There is an end event emitted on connection.close()
Event: 'end'
function () { }
The connection has ended. This may be as a result of the client calling close(), the server closing the connection, or a network error.
My suggestion from above
var config = {
...
};
function insertOffice(index) {
var connection = new Connection(config);
connection.on("connect", function (err) {
console.log("Successful connection");
});
connection.connect();
let url = `...`;
fetch(url)
.then((response) => { return response.json(); })
.then(function (data) {
...
});
connection.on("end", function(){
functionLopper(index++)
})
}
function functionLooper(i) {
if(i<offices.length) insertOffice(i)
}
``

Asynchronously Write Large Array of Objects to Redis with Node.js

I created a Node.js script that creates a large array of randomly generated test data and I want to write it to a Redis DB. I am using the redis client library and the async library. Initially, I tried executing a redisClient.hset(...) command within the for loop that generates my test data, but after some Googling, I learned the Redis method is asynchronous while the for loop is synchronous. After seeing some questions on StackOverflow, I can't get it to work the way I want.
I can write to Redis without a problem with a small array or larger, such as one with 100,000 items. However, it does not work well when I have an array of 5,000,000 items. I end up not having enough memory because the redis commands seem to be queueing up, but aren't executed until after async.each(...) is complete and the node process does not exit. How do I get the Redis client to actually execute the commands, as I call redisClient.hset(...)?
Here a fragment of the code I am working with.
var redis = require('redis');
var async = require('async');
var redisClient = redis.createClient(6379, '192.168.1.150');
var testData = generateTestData();
async.each(testData, function(item, callback) {
var someData = JSON.stringify(item.data);
redisClient.hset('item:'+item.key, 'hashKey', someData, function(err, reply) {
console.log("Item was persisted. Result: " +reply);
});
callback();
}, function(err) {
if (err) {
console.error(err);
} else {
console.log.info("Items have been persisted to Redis.");
}
});
You could call eachLimit to ensure you are not executing too many redisClient.hset calls at the same time.
To avoid overflowing the call stack you could do setTimeout(callback, 0); instead of calling the callback directly.
edit:
Forget what I said about setTimeout. All you need to do is call the callback at the right place. Like so:
redisClient.hset('item:'+item.key, 'hashKey', someData, function(err, reply) {
console.log("Item was persisted. Result: " +reply);
callback();
});
You may still want to use eachLimit and try out which limit works best.
By the way - async.each is supposed to be used only on code that schedules the invocation of the callback in the javascript event queue (e.g. timer, network, etc) . Never use it on code that calls the callback immediately as was the case in your original code.
edit:
You can implement your own eachLimit function that instead of an array takes a generator as it's first argument. Then you write a generator function to create the test data. For that to work, node needs to be run with "node --harmony code.js".
function eachLimit(generator, limit, iterator, callback) {
var isError = false, j;
function startNextSetOfActions() {
var elems = [];
for(var i = 0; i < limit; i++) {
j = generator.next();
if(j.done) break;
elems.push(j.value);
}
var activeActions = elems.length;
if(activeActions === 0) {
callback(null);
}
elems.forEach(function(elem) {
iterator(elem, function(err) {
if(isError) return;
else if(err) {
callback(err);
isError = true;
return;
}
activeActions--;
if(activeActions === 0) startNextSetOfActions();
});
});
}
startNextSetOfActions();
}
function* testData() {
while(...) {
yield new Data(...);
}
}
eachLimit(testData(), 10, function(item, callback) {
var someData = JSON.stringify(item.data);
redisClient.hset('item:'+item.key, 'hashKey', someData, function(err, reply) {
if(err) callback(err);
else {
console.log("Item was persisted. Result: " +reply);
callback();
}
});
}, function(err) {
if (err) {
console.error(err);
} else {
console.log.info("Items have been persisted to Redis.");
}
});

RangeError: call stack exceed on async .eachSeries

At last, an actual stack overflow error reported on stackoverflow!
I get the following error in the code below:
var m = pathA.substr(-(pathB.length)); // var
^
RangeError: Maximum call stack size exceeded
I'm fairly sure the answer is reported here, towards the bottom:
https://github.com/caolan/async/issues/75
However, I don't understand how to fix my code. I am not calling sync functions inside async functions, as far as I know. Can anyone clarify what I have to do to fix my code?
I'm iterating over the cross-product of a result-set to concatenate the path strings where one is the substring of the other.
var i = 0;
async.eachSeries(results.rows, function (r, next2a) {
var pathA = results.rows[i].id_path;
var j = 0;
async.eachSeries(results.rows, function (r2, next1a) {
var pathB = results.rows[j].id_path;
//check i=j
if (!(i == j)) {
var m = pathA.substr(-(pathB.length)); // var m = (pathA || '').substr(-((pathB) ? pathB.length : 0));
if ((m == pathB) && (pathA.length > pathB.length)) {
logger.log('DEBUG', (pathB + ' => ' + pathA));
conn.query("UPDATE user_token_details SET id_l1=$1, id_l2=$2, id_l3=$3, id_l4=$4,id_l5=$5,id_path2=$9, id_path=$6 WHERE token_uuid=$7 AND user_uuid=$8",
[results.rows[i].id_l1, results.rows[i].id_l2, results.rows[i].id_l3, results.rows[i].id_l4, results.rows[i].id_l5, results.rows[i].id_path,
results.rows[j].token_uuid, user_uuid, results.rows[j].id_path],
function (error, result) {
if (error) {
throw error;
}
j++;
next1a();
})
} else {
j++;
next1a();
}
} else {
j++;
next1a();
}
}, function () {
i++;
next2a();
});
}, function (err) {
});
Here is the form of this spaghetti:
var A = [0, 1, 2, 3, 4...300];
async.eachSeries(A, function (a, next_a) {
async.eachSeries(A, function (b, next_b) {
// "Range Error: Maximum call stack size exceeded"
doSomethingAsync(a,b, function () {
next_a();
});
}, function (err) {
next_b();
})
}, function (err) {
// resume
})
The problem is that async.eachSeries only behaves asynchronously if the callback inside it is called asynchronously. In your case, your last two calls to next1a are not performing a query, so they occur synchronously, and thus extend the call stack. In this case, you are likely iterating enough that you hit max stack depth. The simplest fix is to always call next1a asynchronously.
Replace each instance of
next1a();
with
setImmediate(next1a);
except the one that is already async because of the query. Note that while process.nextTick(next1a) would also work, it has the potential to block the event loop from processing any other tasks. This is because process.nextTick queues the callback as a microtask, whereas setImmediate queues the callback as a macrotask.

Using Async waterfall in node.js

I have 2 functions that I'm running asynchronously. I'd like to write them using waterfall model. The thing is, I don't know how..
Here is my code :
var fs = require('fs');
function updateJson(ticker, value) {
//var stocksJson = JSON.parse(fs.readFileSync("stocktest.json"));
fs.readFile('stocktest.json', function(error, file) {
var stocksJson = JSON.parse(file);
if (stocksJson[ticker]!=null) {
console.log(ticker+" price : " + stocksJson[ticker].price);
console.log("changing the value...")
stocksJson[ticker].price = value;
console.log("Price after the change has been made -- " + stocksJson[ticker].price);
console.log("printing the the Json.stringify")
console.log(JSON.stringify(stocksJson, null, 4));
fs.writeFile('stocktest.json', JSON.stringify(stocksJson, null, 4), function(err) {
if(!err) {
console.log("File successfully written");
}
if (err) {
console.error(err);
}
}); //end of writeFile
} else {
console.log(ticker + " doesn't exist on the json");
}
});
} // end of updateJson
Any idea how can I write it using waterfall, so i'll be able to control this? Please write me some examples because I'm new to node.js
First identify the steps and write them as asynchronous functions (taking a callback argument)
read the file
function readFile(readFileCallback) {
fs.readFile('stocktest.json', function (error, file) {
if (error) {
readFileCallback(error);
} else {
readFileCallback(null, file);
}
});
}
process the file (I removed most of the console.log in the examples)
function processFile(file, processFileCallback) {
var stocksJson = JSON.parse(file);
if (stocksJson[ticker] != null) {
stocksJson[ticker].price = value;
fs.writeFile('stocktest.json', JSON.stringify(stocksJson, null, 4), function (error) {
if (err) {
processFileCallback(error);
} else {
console.log("File successfully written");
processFileCallback(null);
}
});
}
else {
console.log(ticker + " doesn't exist on the json");
processFileCallback(null); //callback should always be called once (and only one time)
}
}
Note that I did no specific error handling here, I'll take benefit of async.waterfall to centralize error handling at the same place.
Also be careful that if you have (if/else/switch/...) branches in an asynchronous function, it always call the callback one (and only one) time.
Plug everything with async.waterfall
async.waterfall([
readFile,
processFile
], function (error) {
if (error) {
//handle readFile error or processFile error here
}
});
Clean example
The previous code was excessively verbose to make the explanations clearer. Here is a full cleaned example:
async.waterfall([
function readFile(readFileCallback) {
fs.readFile('stocktest.json', readFileCallback);
},
function processFile(file, processFileCallback) {
var stocksJson = JSON.parse(file);
if (stocksJson[ticker] != null) {
stocksJson[ticker].price = value;
fs.writeFile('stocktest.json', JSON.stringify(stocksJson, null, 4), function (error) {
if (!err) {
console.log("File successfully written");
}
processFileCallback(err);
});
}
else {
console.log(ticker + " doesn't exist on the json");
processFileCallback(null);
}
}
], function (error) {
if (error) {
//handle readFile error or processFile error here
}
});
I left the function names because it helps readability and helps debugging with tools like chrome debugger.
If you use underscore (on npm), you can also replace the first function with _.partial(fs.readFile, 'stocktest.json')
First and foremost, make sure you read the documentation regarding async.waterfall.
Now, there are couple key parts about the waterfall control flow:
The control flow is specified by an array of functions for invocation as the first argument, and a "complete" callback when the flow is finished as the second argument.
The array of functions are invoked in series (as opposed to parallel).
If an error (usually named err) is encountered at any operation in the flow array, it will short-circuit and immediately invoke the "complete"/"finish"/"done" callback.
Arguments from the previously executed function are applied to the next function in the control flow, in order, and an "intermediate" callback is supplied as the last argument. Note: The first function only has this "intermediate" callback, and the "complete" callback will have the arguments of the last invoked function in the control flow (with consideration to any errors) but with an err argument prepended instead of an "intermediate" callback that is appended.
The callbacks for each individual operation (I call this cbAsync in my examples) should be invoked when you're ready to move on: The first parameter will be an error, if any, and the second (third, fourth... etc.) parameter will be any data you want to pass to the subsequent operation.
The first goal is to get your code working almost verbatim alongside the introduction of async.waterfall. I decided to remove all your console.log statements and simplified your error handling. Here is the first iteration (untested code):
var fs = require('fs'),
async = require('async');
function updateJson(ticker,value) {
async.waterfall([ // the series operation list of `async.waterfall`
// waterfall operation 1, invoke cbAsync when done
function getTicker(cbAsync) {
fs.readFile('stocktest.json',function(err,file) {
if ( err ) {
// if there was an error, let async know and bail
cbAsync(err);
return; // bail
}
var stocksJson = JSON.parse(file);
if ( stocksJson[ticker] === null ) {
// if we don't have the ticker, let "complete" know and bail
cbAsync(new Error('Missing ticker property in JSON.'));
return; // bail
}
stocksJson[ticker] = value;
// err = null (no error), jsonString = JSON.stringify(...)
cbAsync(null,JSON.stringify(stocksJson,null,4));
});
},
function writeTicker(jsonString,cbAsync) {
fs.writeFile('stocktest.json',jsonString,function(err) {
cbAsync(err); // err will be null if the operation was successful
});
}
],function asyncComplete(err) { // the "complete" callback of `async.waterfall`
if ( err ) { // there was an error with either `getTicker` or `writeTicker`
console.warn('Error updating stock ticker JSON.',err);
} else {
console.info('Successfully completed operation.');
}
});
}
The second iteration divides up the operation flow a bit more. It puts it into smaller single-operation oriented chunks of code. I'm not going to comment it, it speaks for itself (again, untested):
var fs = require('fs'),
async = require('async');
function updateJson(ticker,value,callback) { // introduced a main callback
var stockTestFile = 'stocktest.json';
async.waterfall([
function getTicker(cbAsync) {
fs.readFile(stockTestFile,function(err,file) {
cbAsync(err,file);
});
},
function parseAndPrepareStockTicker(file,cbAsync) {
var stocksJson = JSON.parse(file);
if ( stocksJson[ticker] === null ) {
cbAsync(new Error('Missing ticker property in JSON.'));
return;
}
stocksJson[ticker] = value;
cbAsync(null,JSON.stringify(stocksJson,null,4));
},
function writeTicker(jsonString,cbAsync) {
fs.writeFile('stocktest.json',jsonString,,function(err) {
cbAsync(err);
});
}
],function asyncComplete(err) {
if ( err ) {
console.warn('Error updating stock ticker JSON.',err);
}
callback(err);
});
}
The last iteration short-hands a lot of this with the use of some bind tricks to decrease the call stack and increase readability (IMO), also untested:
var fs = require('fs'),
async = require('async');
function updateJson(ticker,value,callback) {
var stockTestFile = 'stocktest.json';
async.waterfall([
fs.readFile.bind(fs,stockTestFile),
function parseStockTicker(file,cbAsync) {
var stocksJson = JSON.parse(file);
if ( stocksJson[ticker] === null ) {
cbAsync(new Error('Missing ticker property in JSON.'));
return;
}
cbAsync(null,stocksJson);
},
function prepareStockTicker(stocksJson,cbAsync) {
stocksJson[ticker] = value;
cbAsync(null,JSON.stringify(stocksJson,null,4));
},
fs.writeFile.bind(fs,stockTestFile)
],function asyncComplete(err) {
if ( err ) {
console.warn('Error updating stock ticker JSON.',err);
}
callback(err);
});
}
Basically nodejs (and more generally javascript) functions that require some time to execute (be it for I/O or cpu processing) are typically asynchronous, so the event loop (to make it simple is a loop that continuously checks for tasks to be executed) can invoke the function right below the first one, without getting blocked for a response. If you are familiar with other languages like C or Java, you can think an asynchronous function as a function that runs on another thread (it's not necessarily true in javascript, but the programmer shouldn't care about it) and when the execution terminates this thread notifies the main one (the event loop one) that the job is done and it has the results.
As said once the first function has ended its job it must be able to notify that its job is finished and it does so invoking the callback function you pass to it. to make an example:
var callback = function(data,err)
{
if(!err)
{
do something with the received data
}
else
something went wrong
}
asyncFunction1(someparams, callback);
asyncFunction2(someotherparams);
the execution flow would call: asyncFunction1, asyncFunction2 and every function below until asyncFunction1 ends, then the callback function which is passed as the last parameter to asyncFunction1 is called to do something with data if no errors occurred.
So, to make 2 or more asynchronous functions execute one after another only when they ended you have to call them inside their callback functions:
function asyncTask1(data, function(result1, err)
{
if(!err)
asyncTask2(data, function(result2, err2)
{
if(!err2)
//call maybe a third async function
else
console.log(err2);
});
else
console.log(err);
});
result1 is the return value from asyncTask1 and result2 is the return value for asyncTask2. You can this way nest how many asynchronous functions you want.
In your case if you want another function to be called after updateJson() you must call it after this line:
console.log("File successfully written");

How to do repeated requests until one succeeds without blocking in node?

I have a function that takes a parameter and a callback. It's supposed to do a request to a remote API and get some info based on the parameter. When it gets the info, it needs to send it to the callback. Now, the remote API sometimes fails to provide. I need my function to keep trying until it manages to do it and then call the callback with the correct data.
Currently, I have the below code inside the function but I think that stuff like while (!done); isn't proper node code.
var history = {};
while (true) {
var done = false;
var retry = true;
var req = https.request(options, function(res) {
var acc = "";
res.on("data", function(msg) {
acc += msg.toString("utf-8");
});
res.on("end", function() {
done = true;
history = JSON.parse(acc);
if (history.success) {
retry = false;
}
});
});
req.end();
while (!done);
if (!retry) break;
}
callback(history);
How do I do it the right way?
There is no need to re-invent the wheel... you can use a popular async utility library, 'retry' method in this case.
// try calling apiMethod 3 times
async.retry(3, apiMethod, function(err, result) {
// do something with the result
});
// try calling apiMethod 3 times, waiting 200 ms between each retry
async.retry({times: 3, interval: 200}, apiMethod, function(err, result) {
// do something with the result
});
async GitHub page
async.retry docs
Definitely not the way to go - while(!done); will go into a hard loop and take up all of your cpu.
Instead you could do something like this (untested and you may want to implement a back-off of some sort):
function tryUntilSuccess(options, callback) {
var req = https.request(options, function(res) {
var acc = "";
res.on("data", function(msg) {
acc += msg.toString("utf-8");
});
res.on("end", function() {
var history = JSON.parse(acc); //<== Protect this if you may not get JSON back
if (history.success) {
callback(null, history);
} else {
tryUntilSuccess(options, callback);
}
});
});
req.end();
req.on('error', function(e) {
// Decide what to do here
// if error is recoverable
// tryUntilSuccess(options, callback);
// else
// callback(e);
});
}
// Use the standard callback pattern of err in first param, success in second
tryUntilSuccess(options, function(err, resp) {
// Your code here...
});
I found Dmitry's answer using the async utility library very useful and the best answer.
This answer expands his example to a working version that defines the apiMethod function and passes it a parameter. I was going to add the code as a comment but a separate answer is clearer.
const async = require('async');
const apiMethod = function(uri, callback) {
try {
// Call your api here (or whatever thing you want to do) and assign to result.
const result = ...
callback(null, result);
} catch (err) {
callback(err);
}
};
const uri = 'http://www.test.com/api';
async.retry(
{ times: 5, interval: 200 },
function (callback) { return apiMethod(uri, callback) },
function(err, result) {
if (err) {
throw err; // Error still thrown after retrying N times, so rethrow.
}
});
Retry documentation: https://caolan.github.io/async/docs.html#retry
Note, an alternative to calling apiMethod(uri, callback) in the task is to use async.apply:
async.retry(
{times: 5, interval: 200},
async.apply(task, dir),
function(err, result) {
if (err) {
throw err; // Error still thrown after retrying N times, so rethrow.
}
});
I hope this provides a good copy/paste boiler plate solution for someone.
Is this what you are trying to do?
var history = {};
function sendRequest(options, callback) {
var req = https.request(options, function (res) {
var acc = "";
res.on("data", function (msg) {
acc += msg.toString("utf-8");
});
res.on("end", function () {
history = JSON.parse(acc);
if (history.success) {
callback(history);
}
else {
sendRequest(options, callback);
}
});
});
req.end();
}
sendRequest(options, callback);
without using any library.. retry untill it succeed AND retry count is less than 11
let retryCount = 0;
let isDone = false;
while (!isDone && retryCount < 10) {
try {
retryCount++;
const response = await notion.pages.update(newPage);
isDone = true;
} catch (e) {
console.log("Error: ", e.message);
// condition for retrying
if (e.code === APIErrorCode.RateLimited) {
console.log(`retrying due to rate limit, retry count: ${retryCount}`);
} else {
// we don't want to retry
isDone = true;
}
}
}
I've solved this problem using the retry module.
Example:
var retry = require('retry');
// configuration
var operation = retry.operation({
retries: 2, // try 1 time and retry 2 times if needed, total = 3
minTimeout: 1 * 1000, // the number of milliseconds before starting the first retry
maxTimeout: 3 * 1000 // the maximum number of milliseconds between two retries
});
// your unreliable task
var task = function(input, callback) {
Math.random() > 0.5
? callback(null, 'ok') // success
: callback(new Error()); // error
}
// define a function that wraps our unreliable task into a fault tolerant task
function faultTolerantTask(input, callback) {
operation.attempt(function(currentAttempt) {
task(input, function(err, result) {
console.log('Current attempt: ' + currentAttempt);
if (operation.retry(err)) { // retry if needed
return;
}
callback(err ? operation.mainError() : null, result);
});
});
}
// test
faultTolerantTask('some input', function(err, result) {
console.log(err, result);
});
You could try something along the following lines. I'm writing a general idea, you should replace trySomething with your HTTP request.
function keepTrying(onSuccess) {
function trySomething(onSuccess, onError) {
if (Date.now() % 7 === 0) {
process.nextTick(onSuccess);
} else {
process.nextTick(onError);
}
}
trySomething(onSuccess, function () {
console.log('Failed, retrying...');
keepTrying(onSuccess);
});
}
keepTrying(function () {
console.log('Succeeded!');
});
I hope this helps.
A library called Flashheart is also a suitable alternative. It's a rest client designed to be easy to use and supports retries.
For example, configure Flashheart to retry 10 times, with a delay of 500ms between requests:
const client = require('flashheart').createClient({
retries: 10,
retryTimeout: 500
});
const url = "https://www.example.com/";
client.get(url, (err, body) => {
if (err) {
console.error('handle error: ', err);
return;
}
console.log(body);
});
For further information, check out the docs:
https://github.com/bbc/flashheart
Disclaimer: I have contributed to this library.
const INITIAL_DELAY = 2000
const MAX_ATTEMPTS = 10
function repeatUntilSucceeds(request) {
return new Promise((resolve, reject) => {
let attempt = 0
let delay = INITIAL_DELAY
function handleErrorRec(error) {
if (attempt < MAX_ATTEMPTS) {
setTimeout(execRequestRec, delay)
attempt += 1
delay *= 2
} else {
reject(error)
}
}
function execRequestRec() {
request().then(({ data, status, statusText }) => {
if (status === 200) {
resolve(data)
} else {
handleErrorRec(new Error(statusText))
}
}).catch(handleErrorRec)
}
execRequestRec()
})
}

Categories

Resources