I'm new to the Node.js platform and I'm trying to learn as much as I can. After playing with callbacks one thing really confuses me:
So, I have this function :
function registerUser(body, res, UserModel){
var userJSON = {
email : body.email,
password : body.password,
accessToken : null
};
var user = null;
var userAlreadyExists = false;
UserModel.find({}).select('email').exec(function(err, results){
if(err){
console.log('Database error : ' + err);
// send the appropriate response
}else{
for(var index in results){
if(results[index].email == userJSON.email){
userAlreadyExists = true;
break;
}
}
if(userAlreadyExists){
// send the appropriate response
}else{
newAccessToken(UserModel, function(error, token){
if(error != null){
// handle the error
}else{
userJSON.accessToken = token;
user = new UserModel(userJSON);
user.save(function(err){
if(err){
// .. handle the error
}else{
// .. handle the registration
}
});}});}}});}
And then the function which accepts the callback:
function newAccessToken(UserModel, callback){
UserModel.find({}).select('email accessToken').exec(function(err, results){
if(err){
callback(err, null);
}else{
// .... bunch of logic for generating the token
callback(null, token);
}
});
}
I would expect the callback to not work(maybe throw an error) since both user and userJSON are not defined in it's context.(well, that's not exactly true, but since it is executed async - after a while - , I would expect the callback to lose it's references to those variables, which were defined locally in the registerUser function). Instead this example works perfectly, the callback function keeps it's references with those two variables defined in the registerUser function. Could somebody explain me how the async callback and the references work and why does the example work?
Instead of callbacks, those are called closures, and in JavaScript the scope treatment is special. Check this document:
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Closures
H i, the function you 'calllback to' is within the scope of the variables you are trying to access, so all good to go for accessing them.
This is not a nodejs thing, regular JS works the same way .
The difference
1) Will not be able to access a var called 'foo'
function finishfunction() {
console.log(foo); /* undefined */
}
function functionwithcallback(callback) {
callback();
}
function doStuff() {
var foo = "bar";
functionwithcallback(finishfunction);
}
doStuff();
2) like yours, access to 'foo' is fine.
function functionwithcallback(callback) {
callback();
}
function doStuff() {
var foo = "bar";
functionwithcallback(function() {
console.log(foo) /* all fine */
});
}
doStuff();
Related
The following code supposed to be update username in the data base then retrieve updated username.
updateUserMame and getUserName are two different REST calls.
updateName(name) {
var obj = this;
if (name === 'None') {
name = null;
}
obj.UtilityService.updateUserName(name, obj.userId)
.success(function (data) {
if (data) {
obj.getUserName(obj.userId);
console.log('Name is updated for ID:'||obj.userId);
} else {
console.log('Something Wrong');
}
});
}
getUserName(userId){
obj.UtilityService.getUserName(userId)
.then(function (result) {
console.log(result.user.userId);
}
}
I have user name 'Nathan Drake' in the dataBase.
When I run the update function with 'Elena Fisher', it is returning 'Nathan Drake'.
I've read some articles to make synchronus service calls, but unable to figure out what is going wrong.
Please help.
You could wrap your update function in a promise:
var updatePromise = $q.when(updateName(name)); // creates a promise
When your promise has finished processing, you can resolve it using then() which takes a success callback and an error callback
updatePromise().then(function successCallback(response){ // resolves the promise using then
getUserName(userId) // execute the rest of your code
},
function errorCallback(response){
console.log(error)
});
You would need to inject $q into the scope you are working with
Your code does not make much sense, that is I see possible mistakes as it looks like you are interchanging user name and user id and calling the obj context from inside a function even when its not declared there etc. Either we are missing code or this will fail when you try to run it.
Here is your example with some fixes and comments that show how you could do it using callbacks (no sync code, as mentioned by everyone else on this thread you should avoid actually waiting for I/O and use callbacks instead).
updateName(name) {
var obj = this; // good, you captured this
if (name === 'None') {
name = null;
}
obj.UtilityService.updateUserName(name, obj.userId)
.success(function (data) {
if (data) {
// ok, you successfully updated the name so why would you go back to the server and get it again? You know the value based on your update.
console.log('Name is updated for ID:' + obj.userId.toString());
// for your example though here is how you could handle it
obj.getUserName(obj, obj.userId, function(user){ // i assumed the name is stored in variable userName
console.log('Name from server = ' + user.userName); // no idea what you are returning but you can figure it out from here
// maybe you also want to capture it again??
obj.name = user.userName;
});
} else {
console.log('Something Wrong');
}
});
}
// pass in captured this as obj, the user id, and a callback
getUserName(obj, userId, callback){
obj.UtilityService.getUserName(userId)
.then(function (result) {
callback(result); // call the callback with the result. The caller can then do something with it
}
}
I have 2 functions that I'm running asynchronously. I'd like to write them using waterfall model. The thing is, I don't know how..
Here is my code :
var fs = require('fs');
function updateJson(ticker, value) {
//var stocksJson = JSON.parse(fs.readFileSync("stocktest.json"));
fs.readFile('stocktest.json', function(error, file) {
var stocksJson = JSON.parse(file);
if (stocksJson[ticker]!=null) {
console.log(ticker+" price : " + stocksJson[ticker].price);
console.log("changing the value...")
stocksJson[ticker].price = value;
console.log("Price after the change has been made -- " + stocksJson[ticker].price);
console.log("printing the the Json.stringify")
console.log(JSON.stringify(stocksJson, null, 4));
fs.writeFile('stocktest.json', JSON.stringify(stocksJson, null, 4), function(err) {
if(!err) {
console.log("File successfully written");
}
if (err) {
console.error(err);
}
}); //end of writeFile
} else {
console.log(ticker + " doesn't exist on the json");
}
});
} // end of updateJson
Any idea how can I write it using waterfall, so i'll be able to control this? Please write me some examples because I'm new to node.js
First identify the steps and write them as asynchronous functions (taking a callback argument)
read the file
function readFile(readFileCallback) {
fs.readFile('stocktest.json', function (error, file) {
if (error) {
readFileCallback(error);
} else {
readFileCallback(null, file);
}
});
}
process the file (I removed most of the console.log in the examples)
function processFile(file, processFileCallback) {
var stocksJson = JSON.parse(file);
if (stocksJson[ticker] != null) {
stocksJson[ticker].price = value;
fs.writeFile('stocktest.json', JSON.stringify(stocksJson, null, 4), function (error) {
if (err) {
processFileCallback(error);
} else {
console.log("File successfully written");
processFileCallback(null);
}
});
}
else {
console.log(ticker + " doesn't exist on the json");
processFileCallback(null); //callback should always be called once (and only one time)
}
}
Note that I did no specific error handling here, I'll take benefit of async.waterfall to centralize error handling at the same place.
Also be careful that if you have (if/else/switch/...) branches in an asynchronous function, it always call the callback one (and only one) time.
Plug everything with async.waterfall
async.waterfall([
readFile,
processFile
], function (error) {
if (error) {
//handle readFile error or processFile error here
}
});
Clean example
The previous code was excessively verbose to make the explanations clearer. Here is a full cleaned example:
async.waterfall([
function readFile(readFileCallback) {
fs.readFile('stocktest.json', readFileCallback);
},
function processFile(file, processFileCallback) {
var stocksJson = JSON.parse(file);
if (stocksJson[ticker] != null) {
stocksJson[ticker].price = value;
fs.writeFile('stocktest.json', JSON.stringify(stocksJson, null, 4), function (error) {
if (!err) {
console.log("File successfully written");
}
processFileCallback(err);
});
}
else {
console.log(ticker + " doesn't exist on the json");
processFileCallback(null);
}
}
], function (error) {
if (error) {
//handle readFile error or processFile error here
}
});
I left the function names because it helps readability and helps debugging with tools like chrome debugger.
If you use underscore (on npm), you can also replace the first function with _.partial(fs.readFile, 'stocktest.json')
First and foremost, make sure you read the documentation regarding async.waterfall.
Now, there are couple key parts about the waterfall control flow:
The control flow is specified by an array of functions for invocation as the first argument, and a "complete" callback when the flow is finished as the second argument.
The array of functions are invoked in series (as opposed to parallel).
If an error (usually named err) is encountered at any operation in the flow array, it will short-circuit and immediately invoke the "complete"/"finish"/"done" callback.
Arguments from the previously executed function are applied to the next function in the control flow, in order, and an "intermediate" callback is supplied as the last argument. Note: The first function only has this "intermediate" callback, and the "complete" callback will have the arguments of the last invoked function in the control flow (with consideration to any errors) but with an err argument prepended instead of an "intermediate" callback that is appended.
The callbacks for each individual operation (I call this cbAsync in my examples) should be invoked when you're ready to move on: The first parameter will be an error, if any, and the second (third, fourth... etc.) parameter will be any data you want to pass to the subsequent operation.
The first goal is to get your code working almost verbatim alongside the introduction of async.waterfall. I decided to remove all your console.log statements and simplified your error handling. Here is the first iteration (untested code):
var fs = require('fs'),
async = require('async');
function updateJson(ticker,value) {
async.waterfall([ // the series operation list of `async.waterfall`
// waterfall operation 1, invoke cbAsync when done
function getTicker(cbAsync) {
fs.readFile('stocktest.json',function(err,file) {
if ( err ) {
// if there was an error, let async know and bail
cbAsync(err);
return; // bail
}
var stocksJson = JSON.parse(file);
if ( stocksJson[ticker] === null ) {
// if we don't have the ticker, let "complete" know and bail
cbAsync(new Error('Missing ticker property in JSON.'));
return; // bail
}
stocksJson[ticker] = value;
// err = null (no error), jsonString = JSON.stringify(...)
cbAsync(null,JSON.stringify(stocksJson,null,4));
});
},
function writeTicker(jsonString,cbAsync) {
fs.writeFile('stocktest.json',jsonString,function(err) {
cbAsync(err); // err will be null if the operation was successful
});
}
],function asyncComplete(err) { // the "complete" callback of `async.waterfall`
if ( err ) { // there was an error with either `getTicker` or `writeTicker`
console.warn('Error updating stock ticker JSON.',err);
} else {
console.info('Successfully completed operation.');
}
});
}
The second iteration divides up the operation flow a bit more. It puts it into smaller single-operation oriented chunks of code. I'm not going to comment it, it speaks for itself (again, untested):
var fs = require('fs'),
async = require('async');
function updateJson(ticker,value,callback) { // introduced a main callback
var stockTestFile = 'stocktest.json';
async.waterfall([
function getTicker(cbAsync) {
fs.readFile(stockTestFile,function(err,file) {
cbAsync(err,file);
});
},
function parseAndPrepareStockTicker(file,cbAsync) {
var stocksJson = JSON.parse(file);
if ( stocksJson[ticker] === null ) {
cbAsync(new Error('Missing ticker property in JSON.'));
return;
}
stocksJson[ticker] = value;
cbAsync(null,JSON.stringify(stocksJson,null,4));
},
function writeTicker(jsonString,cbAsync) {
fs.writeFile('stocktest.json',jsonString,,function(err) {
cbAsync(err);
});
}
],function asyncComplete(err) {
if ( err ) {
console.warn('Error updating stock ticker JSON.',err);
}
callback(err);
});
}
The last iteration short-hands a lot of this with the use of some bind tricks to decrease the call stack and increase readability (IMO), also untested:
var fs = require('fs'),
async = require('async');
function updateJson(ticker,value,callback) {
var stockTestFile = 'stocktest.json';
async.waterfall([
fs.readFile.bind(fs,stockTestFile),
function parseStockTicker(file,cbAsync) {
var stocksJson = JSON.parse(file);
if ( stocksJson[ticker] === null ) {
cbAsync(new Error('Missing ticker property in JSON.'));
return;
}
cbAsync(null,stocksJson);
},
function prepareStockTicker(stocksJson,cbAsync) {
stocksJson[ticker] = value;
cbAsync(null,JSON.stringify(stocksJson,null,4));
},
fs.writeFile.bind(fs,stockTestFile)
],function asyncComplete(err) {
if ( err ) {
console.warn('Error updating stock ticker JSON.',err);
}
callback(err);
});
}
Basically nodejs (and more generally javascript) functions that require some time to execute (be it for I/O or cpu processing) are typically asynchronous, so the event loop (to make it simple is a loop that continuously checks for tasks to be executed) can invoke the function right below the first one, without getting blocked for a response. If you are familiar with other languages like C or Java, you can think an asynchronous function as a function that runs on another thread (it's not necessarily true in javascript, but the programmer shouldn't care about it) and when the execution terminates this thread notifies the main one (the event loop one) that the job is done and it has the results.
As said once the first function has ended its job it must be able to notify that its job is finished and it does so invoking the callback function you pass to it. to make an example:
var callback = function(data,err)
{
if(!err)
{
do something with the received data
}
else
something went wrong
}
asyncFunction1(someparams, callback);
asyncFunction2(someotherparams);
the execution flow would call: asyncFunction1, asyncFunction2 and every function below until asyncFunction1 ends, then the callback function which is passed as the last parameter to asyncFunction1 is called to do something with data if no errors occurred.
So, to make 2 or more asynchronous functions execute one after another only when they ended you have to call them inside their callback functions:
function asyncTask1(data, function(result1, err)
{
if(!err)
asyncTask2(data, function(result2, err2)
{
if(!err2)
//call maybe a third async function
else
console.log(err2);
});
else
console.log(err);
});
result1 is the return value from asyncTask1 and result2 is the return value for asyncTask2. You can this way nest how many asynchronous functions you want.
In your case if you want another function to be called after updateJson() you must call it after this line:
console.log("File successfully written");
I got a file newuser.js (node.js environment featuring a mongodb database managed via mongoose) containing the following code:
//newuser.js
//basically creates new user documents in the database and takes a GET parameter and an externally generated random code (see randomcode.js)
[...]
var randomCode = require ('randomcode');
var newTempUser = new tempUser({name: req.body.name, vericode: randomCode.randomveriCode(parameter)
});
newTempUser.save(function (err){
//some output
});
//randomcode.js
//creates a random sequence of characters (=vericode), checks if code already exists in DB and restarts function if so or returns generated code
exports.randomveriCode = function randomveriCode(parameter){
[...]
var TempUser = conn.model('TempUser', TempUserSchema);
TempUser.count({vericode: generatedcode}, function(err, counter){
if (counter=='0'){
return generatedcode;
}else{
randomveriCode(parameter);
}
});
};
Problem is, that newuser.js throws an error as variable vericode is 'undefined' (thus mongoose model validations fails). The error does not occur if I skip the database query and instantly return the generated code (which in fact has got a value as verified by several console.log instructions). It occurs to me that the db query takes to long and empty or null value returned before query is complete? I thought about introducing promises unless you got any other suggestions or hints what may cause this behaviour?
Kind regards
Igor
Since querying the database is a non-blocking operation, you cannot expect the function call to return the value from the database immediately. Try passing in a callback instead:
// newuser.js
var randomCode = require('randomcode');
randomCode.randomveriCode(parameter, function(err, code) {
if (err) throw err; // TODO: handle better
var newTempUser = new tempUser({name: req.body.name, vericode: code});
newTempUser.save(function (err){
//some output
});
});
// randomcode.js
exports.randomveriCode = function randomveriCode(parameter, cb) {
var TempUser = conn.model('TempUser', TempUserSchema);
TempUser.count({vericode: generatedcode}, function(err, counter) {
if (err) return cb(err);
if (counter == '0') {
cb(null, generatedcode);
} else {
randomveriCode(parameter, cb);
}
});
};
your randomveriCode function contains calls to an asynchronous function and therefore, your function really needs to provide a callback argument like this:
exports.randomveriCode = function randomveriCode(parameter, callback){
[...]
var TempUser = conn.model('TempUser', TempUserSchema);
TempUser.count({vericode: generatedcode}, function(err, counter){
if(err) return callback(err);
if (counter=='0'){
return callback(null, generatedcode);
}else{
randomveriCode(parameter, callback);
}
});
};
You'd then call it like so:
var randomCode = require ('randomcode');
randomCode(function(err, vericode){
if(err) throw err;
var newTempUser = new tempUser({name: req.body.name, vericode: vericode});
newTempUser.save(function(err,newUser){
//do something here
});
});
Btw - you could also use a synchronous function to create a GUID. See https://www.npmjs.org/package/node-uuid.
As a novice in Javascript, I'm confused on which could be the best way to differentiate between the result computed by an asynchronous function, and any exception/error.
If I'm right, you cannot use try-catch in this scenario, as the called function
ends before the callback, and it is this latter who actually may throw an exception.
Well.
I've seen so far some library functions expecting a callback like: function(err, result).
So, one have to test err before using result.
Also I tried myself to return either the actual result or an Error object.
Here, the callback is of the form function(result)
and you have to test result instanceof Error before using it.
It follows an example of this:
function myAsyncFunction ( callBack ) {
async_library_function( "some data", function (err, result) {
if (err) { callBack ( new Error ("my function failed") ); return; }
callBack ( some calculation with result );
});
} // myFunction ()
//
// calling myFunction
//
myAsyncFunction ( function (result) {
if (result instanceof Error ) { log ("some error occurred"); return; }
log ("Alright, this is the result: " + result);
});
What is the best (maybe the common) way to do this?
Thanks.
There are three main approaches that I've been using myself:
Having an "error" parameter passed to the callback.
Having an "error" callback. This is usually combined with (1).
Having some sort of global exception manager.
I'll start with the third one. The idea is to have an object that will allow dispatching errors as well as catching them globally. Something like this:
var ErrorManager = {
subscribers: [],
subscribe: function (callback) {
this.subscribers.push(callback);
},
dispatchError: function (error) {
this.subscribers.forEach(function (s) {
s.apply(window, [ error ]);
});
}
}
This is quite specific to a given situation because there's basically no easy way of tracking the origin of an error as well as it's easy to mess up with this. For example, if you need to hide a dialog box whose contents failed to load, you'd have to propagate this information (e.g. dialog box Id/element) to all the subscribers.
The above approach is good when you want to execute an action that doesn't alter (or alters an independent part) of the web application (e.g. displays a status bar or a message to a console).
The second approach basically makes a separation between successful call and a failure. For example:
$.ajax('/articles', {
success: function () { /* All is good - populating article list */ },
error: function () { /* An error occured */ }
});
In the above example, the success callback is never executed in case of a failure so if you want to have some default behavior to always trigger, you'd need to either sync between the two callbacks or have a callback that is always called (for the above example - complete).
I personally prefer the first approach - having a callback where you have an error object passed along with potential result. This eliminates problems with having to "track" the origin/context on an error as well as worrying about the clean-up procedure (default behavior). So, something like you provided:
function beginFetchArticles(onComplete) {
$.ajax('/articles', {
complete: function (xhr) {
onComplete(xhr.status != 200 ? xhr.status.toString() : null,
$.parseJSON(xhr.responseText)); /* Something a bit more secure, probably */
}
});
}
Hope this helps.
It depends vastly on your implementation. Is this a recoverable error? If it isn't, then the way you are suggesting should work just fine. If it is recoverable then you shouldn't be returning an error. You should be returning an "empty" result. Keep in mind maintainability as well. Do you want instanceof checks throughout the code? Also, I know some programmers like that JavaScript is loose with types, but you run into consistency issues when the expected object passed through can actually be unexpected. Is it a result, or an error, or even something else altogether?
That's one way to do it. Though I'd usually leave any manipulations/processing of the result to the callback function.
Another way is you can pass back both the error and result values to the callback:
callback (err, result); \\ if no error, err = null, if no result, result = null
Alternatively, you can ask for separate error and success callbacks:
function myAsyncFunction ( successCallBack, errorCallBack ) {
\* ... *\
}
And then trigger the appropriate function depending on the received response.
One approach can be like this:
function Exception(errorMessage)
{
this.ErrorMessage = errorMessage;
this.GetMessage = function()
{
return this.ErrorMessage;
}
}
function ResultModel(value, exception)
{
exception = typeof exception == "undefined"? null, exception;
this.Value = value;
this.Exception = exception;
this.GetResult = function()
{
if(exception === null)
{
return this.Value;
}
else
{
throw this.Exception;
}
}
};
And in your usage:
function myAsyncFunction ( callBack ) {
var result;
async_library_function( "some data", function (err, result) {
if (err)
{
result = new ResultModel(null, new Exception("my function failed"));
}
else
{
result = new ResultModel(some calculation with result);
}
callBack ( result );
});
}
myAsyncFunction ( function (result) {
try
{
log ("Alright, this is the result: " + result.GetResult());
}
catch(ex)
{
log ("some error occurred" + ex.GetMessage());
return;
}
});
If you want to make robust programs, you should use promises. Otherwise you have to handle 2 different kinds of errors which is pretty crazy.
Consider how to read a file as JSON without crashing the server:
var fs = require("fs");
fs.readFile("myfile.json", function(err, contents) {
if( err ) {
console.error("Cannot read file");
}
else {
try {
var result = JSON.parse(contents);
console.log(result); //Or continue callback hell here
}
catch(e) {
console.error("Invalid json");
}
}
});
With promises e.g:
var Promise = require("bluebird");
var readFile = Promise.promisify(require("fs").readFile);
readFile("myfile.json").then(JSON.parse).then(function(result){
console.log(result);
}).catch(SyntaxError, function(e){
console.error("Invalid json");
}).catch(function(e){
console.error("Cannot read file");
});
Notice also how the code grows vertically like with synchronous code instead of horizontally.
Lets say I wanna send an email then update the database, both actions are async. This is how I would normally write it.
send_email(function(err, id){
if(err){
console.log("error");
}else{
update_database(id,function(err, id){
if(err){
console.log("error");
}else{
console.log("success");
}
});
}
});
I would like to do this instead with middleware.
var mid = {};
mid.send_email = function(){
return function(next){
send_email(function(err,id){
if(err){
console.log("error");
}else{
next(id);
}
});
}
}
mid.update_database = function(){
return function(id,next){
update_database(id,function(err,id){
if(err){
console.log("error");
}else{
next(id);
}
});
}
}
mid.success = function(){
return function(id,next){
console.log("success")
next(id);
}
}
Stacking the middleware.
middleware.use(mid.send_email());
middleware.use(mid.update_database());
middleware.use(mid.success());
There are two main questions at hand.
How can I use middleware in place of nested callbacks?
Is it possible to pass variables to next()?
What you want is to be able to handle a async control flow. Alot of js library can help you to achieve this. You can try the Async library with the waterfall function since you want to be able to pass variables to the next function that will be executed :
https://github.com/caolan/async#waterfall
"Runs an array of functions in series, each passing their results to the next in the array. However, if any of the functions pass an error to the callback, the next function is not executed and the main callback is immediately called with the error."
Example :
async.waterfall([
function(callback){
callback(null, 'one', 'two');
},
function(arg1, arg2, callback){
callback(null, 'three');
},
function(arg1, callback){
// arg1 now equals 'three'
callback(null, 'done');
}
], function (err, result) {
// result now equals 'done'
});
You are probably better off using CommonJS module.exports.
You can create a file like this:
module.exports = function (){
function sendEmail(doneCallback){
// do your stuff, then when you are done:
if(!err){
doneCallback(whatever,args,you,need);
}
}
function updateDB(success){
// do your stuff, then when you are done:
success(whatever,args,you,need);
}
return {
send: sendEmail,
update: updateDB
};
};
Then in your server.js:
var lib = require('./mylib.js');
lib.send(function(result){
console.log(result);
});
This is a similar pattern, and it might give you a better idea of what I mean. It consists of the library baking a function and passing it to whoever needs to chain, like this (more down to earth example, client-side this time):
ui.bistate($('#mybutton'), function(restore){
$.ajax({
url: '/api/1.0/catfood',
type: 'PUT',
data: {
catfood: {
price: 1.23,
name: 'cheap',
text: 'Catzy'
}
}
}).done(function(res){
// stuff with res
restore();
});
});
and in the library, this is how restore is provided:
var ui = function(){
function bistate(button, action) {
var originalText = buttonText.data('text'),
disabledText = buttonText.data('text-disabled');
function restore(){
button.prop('disabled', false);
button.text(originalText);
}
function disable(){
button.prop('disabled', true);
button.text(disabledText);
}
button.on('click', function(){
disable();
action(restore);
});
restore();
}
return {
bistate: bistate
};
}();
Allowing the consumer to control the flow for when he wants to restore the button, and reliving the library from having to handle complex cases where the consumer wants to do an async operation in between.
In general the point is: passing callbacks back and forth is huge and not used widely enough.
I have been using Queue.js in my work for some time.