I've been developing with node.js for months but now I'm starting a new project and I'd like to know how to structure the app.
My problem comes when talking about unit testing. I will use nodeunit to write unit tests.
Also I'm using express to define my REST routes.
I was thinking about writing my code that access databases in two "separate" files (They will be more, obviously, but I'm just trying to simplify the code). There will be the routes code.
var mongoose = require('mongoose')
, itemsService = require('./../../lib/services/items-service');
// GET '/items'
exports.list = function(req, res) {
itemsService.findAll({
start: req.query.start,
size: req.query.size,
cb: function(offers) {
res.json(offers);
}
});
};
And, as I'm using there, an item service used just to access data layer. I'm doing this to test only data access layer on unit testing. It'll be something like this:
var mongoose = require('mongoose')
, Item = require('./../mongoose-models').Item;
exports.findAll = function(options) {
var query = Offer
.find({});
if (options.start && options.size) {
query
.limit(size)
.skip(start)
}
query.exec(function(err, offers) {
if (!err) {
options.cb(offers);
}
})
};
This way I can check with unit testing if it works correctly and I can use this code everywhere I want. The only thing I'm not sure if it's been correctly done is the way I pass a callback function to use returned value.
What do you think?
Thanks!
Yes, quite easily!
You can use a unit testing module like mocha and either node's own assert or another such as should.
As an example of a test case for your example model:
var ItemService = require('../../lib/services/items-service');
var should = require('should');
var mongoose = require('mongoose');
// We need a database connection
mongoose.connect('mongodb://localhost/project-db-test');
// Now we write specs using the mocha BDD api
describe('ItemService', function() {
describe('#findAll( options )', function() {
it('"args.size" returns the correct length', function( done ) { // Async test, the lone argument is the complete callback
var _size = Math.round(Math.random() * 420));
ItemService.findAll({
size : _size,
cb : function( result ) {
should.exist(result);
result.length.should.equal(_size);
// etc.
done(); // We call async test complete method
}
},
});
it('does something else...', function() {
});
});
});
And so on, ad nauseum.
Then when you're done writing your tests - assuming you've $ npm install mocha'd - then you'd simply run $ ./node_modules/.bin/mocha or $ mocha if you used npm's -g flag.
Depends how rectal/detailed you want to be really. I've always been advised to, and find it easier to: Write the tests first, to get a clear specification perspective. Then write the implementation against the tests, with any extra insight a freebie.
Related
I am using Istanbul for code coverage, but i m getting very low coverage percentage particularly in Models file.
Consider the following is the model file:
ModelA.js
const mongoose = require('mongoose');
const Schema = mongoose.Schema;
var app = require('../server')
var db = require('../db/dbConnection');
var config = require('../configs/config')
const Schema1 = new Schema({ 'configurations': [] });
exports.save = function (aa, data, callback) {
var logMeta = {
file: 'models/modelA',
function: 'save',
data: {},
error: {}
}
if (!aa) {
return callback('aa is required')
}
global.logs[aa].log('info', 'AA: ' + aa, logMeta);
db.connectDatabase(aa, function(error, mongoDB){
if(error){
logMeta.data['error'] = error
global.logs[aa].log('error', 'error', logMeta);
return callback(error)
}
const ModelA = mongoDB.model('bbb', cccc);
ModelA.findOneAndUpdate({}, data, {upsert: true, new: true, runValidators: true}, function(error ,result){
if (error) {
logMeta.data['error'] = error
global.logs[aa].log('error', 'error', logMeta);
}
else {
logMeta.data = {}
logMeta.data['result'] = JSON.parse(JSON.stringify(result))
global.logs[aa].log('info', 'result', logMeta);
}
callback(error, result);
});
})
}
TestA.js:
var should = require('should'),
sinon = require('sinon'),
ModelA= require("../models/ModelA");
describe('Model test', function () {
it('Should save Model', function (done) {
var todoMock = sinon.mock(new ModelA({'configurations': []}));
var todo = todoMock.object;
todoMock
.expects('save')
.yields(null, 'SAVED');
todo.save(function(err, result) {
todoMock.verify();
todoMock.restore();
should.equal('SAVED', result, "Test fails due to unexpected result")
done();
});
});
});
But i am getting codecoverage percentage 20. SO how can i increase the percentage:
ALso:
1.Whether i have to mock the db.connectDatabase if yews how can i acheive that?
Whether i have to use TestDb to run all my UnitTest? Or i have to assert??
Code Coverage will work for Unit Test or integration test???
Please share your ideas. Thanks
I have been using Istanbul to 100% code cover most of my client/server projects so I might have the answers you are looking for.
How does it work
Whenever you require some local file, this gets wrapped all over the place to understand if every of its parts is reached by your code.
Not only the required file is tainted, your running test is too.
However, while it's easy to code cover the running test file, mocked classes and their code might never be executed.
todoMock.expects('save')
Accordingly to Sinon documentation:
Overrides todo. save with a mock function and returns it.
If Istanbul tainted the real save method, anything within that scope won't ever be reached so that you are actually testing that mock works, not that your real code does.
This should answer your question: Code Coverage will work for Unit Test or integration test ???
The answer is that it covers the code, which is the only thing you're interested from a code cover perspective. Covering Sinon JS is nobody goal.
No need to assert ... but
Once you've understood how Istanbul works, it follows up naturally to understand that it doesn't matter if you assert or not, all it matters is that you reach the code for real and execute it.
Asserting is just your guard against failures, not a mechanism interesting per se in any Istanbul test. When your assertion fails, your test does too, so it's good for you to know that things didn't work and there's no need to keep testing the rest of the code (early failure, faster fixes).
Whether you have to mock the db.connectDatabase
Yes, at least for the code you posted. You can assign db as generic object mock to the global context and expect methods to be called but also you can simplify your life writing this:
function createDB(err1, err2) {
return {
connectDatabase(aa, callback) {
callback(err1, {
model(name, value) {
return {
findOneAndUpdate($0, $1, $3, fn) {
fn(err2, {any: 'object'});
}
};
}
});
}
};
}
global.db = createDB(null, null);
This code in your test file can be used to create a global db that behaves differently accordingly with the amount of errors you pass along, giving you the ability to run the same test file various times with different expectations.
How to run the same test more than once
Once your test is completed, delete require.cache[require.resolve('../test/file')] and then require('../test/file') again.
Do this as many times as you need.
When there are conditional features detection
I usually run the test various times deleting global constructors in case these are patched with a fallback. I also usually store them to be able to put 'em back later on.
When the code is obvious but shouldn't be reached
In case you have if (err) process.exit(1); you rarely want to reach that part of the code. There are various comments understood by Istanbul that would help you skip parts of the test like /* istanbul ignore if */ or ignore else, or even the generic ignore next.
Please consider thinking twice if it's just you being lazy, or that part can really, safely, be skipped ... I got bitten a couple of times with a badly handled error, which is a disaster since when it happens is when you need the most your code to keep running and/or giving you all the info you need.
What is being covered?
Maybe you know this already but the coverage/lcov-report/index.html file, that you can open right away with any browser, will show you all the parts that aren't covered by your tests.
I'm trying to figure out how to properly stub this scenario, but i'm a little stuck.
The scenario is, i've got a db.js file that has a list of couchdb databases in it (each database contains tweet entries for a particular year).
Each year a new database is created and added to this list to hold the new entries for that year (so the list of databases isn't constant, it changes each year).
So my db.js file looks like this:
var nano = require('nano')(`http://${host}`);
var databaseList = {
db1: nano.use('db2012'),
db2: nano.use('db2013'),
db4: nano.use('db2014'),
db5: nano.use('db2015'),
db6: nano.use('db2016')
};
module.exports.connection = nano;
module.exports.databaseList = databaseList;
And event.js (a simple model file), before methods are added looks like this:
var lastInObject = require('../../helpers/last_in_object');
var db = require('../../db');
var EventModel = function EventModel() {
this.connection = db.connection;
this.databaseList = db.databaseList;
this.defaultDatabase = lastInObject(db.databaseList);
};
EventModel.prototype.findAll =
function findAll(db, callback) {/* ... */}
My question is, how do i stub the databaseList, so i can safely test each of the model methods without having any brittleness from the growing databaseList object?
Ideally i'd like to be able hijack the contents of the databaseList in my tests, to mock different scenarios, but i'm unsure how to tackle it.
Here's an example test, to ensure the defaultDatabase property is always pointing to the last known event, but obviously i don't want to have to update this test every year, when databaseList changes, as that makes the tests very brittle.
it('should set the default database to the last known event', () => {
var Event = require('../event');
var newEventModel = new Event();
expect(newEventModel.defaultDatabase.config.db)
.to.equal('db2014');
});
Suggestions welcome! If i've gone about this wrong, let me know what i've done and how i can approach it!
Also, this is just a scenario, i do have tests for lastInObject, i'm more interested in how to mock the concerning data.
In my opinion you need to stub the whole "db" module. That way you won't have any real db connection and you can easily control the environment of your tests. You can achieve this by using the mockery module.
That way you can stub the object that the require('../../db') returns. This will allow you to set whatever value you like in the properties of that object.
Building upon Scotty's comment in the accepted answer a bit... here's some code I've got which seems to do the job. No guarantees that this is a good implementation, but it does successfully stub out the insert and get methods. Hopefully it helps someone. :)
// /src/common/database.js
const database = require('nano')('http://127.0.0.1/database');
module.exports = {
async write(document, documentId) {
return await new Promise(resolve => database.insert(document, documentId, resolve));
},
async read(documentId){
return await new Promise(resolve => database.get(documentId, resolve));
}
};
// /test/setup.js
const proxyquire = require('proxyquire');
proxyquire('../src/common/database.js', {
nano() {
return {
insert(document, documentId, callback){ callback(); },
get(documentId, callback){ callback(); }
};
}
});
I'm pretty new to node.js and it seems fairly easy to use but when it comes to getting a value using the command line and returning that value to be used in another package or .js, it seems harder than I expected.
Long story short, I've used a npm package (akamai-ccu-purge), to enter a file to purge on the akamai network successfully.
I want to make it more dynamic though by prompting the user to enter the file they want purged and then using that in the akamai package.
After making a few tries using var stdin = process.openStdin(); I actually found another npm package called Prompt that seemed to be easier. Both ways seem to have the same problem though.
Node doesn't seem to want to stop for the input. It seems to want to automatically make the purge without waiting for input even though I've called that module first. It actually gets to the point where I should enter the file but it doesn't wait.
I am definitely missing something in my understanding or usage here, what am I doing wrong?
My code so far is:
var purgeUrl = require('./getUrl2');
var PurgerFactory = require('../../node_modules/akamai-ccu-purge/index'); // this is the directory where the index.js of the node module was installed
// area where I placed the authentication tokens
var config = {
clientToken: //my tokens and secrets akamai requires
};
// area where urls are placed. More than one can be listed with comma separated values
var objects = [
purgeUrl // I am trying to pull this from the getUrl2 module
];
// Go for it!
var Purger = PurgerFactory.create(config);
Purger.purgeObjects(objects, function(err, res) {
console.log('------------------------');
console.log('Purge Result:', res.body);
console.log('------------------------');
Purger.checkPurgeStatus(res.body.progressUri, function(err, res) {
console.log('Purge Status', res.body);
console.log('------------------------');
Purger.checkQueueLength(function(err, res) {
console.log('Queue Length', res.body);
console.log('------------------------');
});
});
});
The getUrl2 module looks like this:
var prompt = require('../../node_modules/prompt');
//
// Start the prompt
//
prompt.start();
//
// Get property from the user
//
prompt.get(['newUrl'], function (err, result) {
//
// Log the results.
//
console.log('Command-line input received:');
console.log(' http://example.com/custom/' + result.newUrl);
var purgeUrl = 'http://example.com/custom/' + result.newUrl;
console.log(purgeUrl);
module.exports = purgeUrl;
});
Thanks again for the help!
I would probably just allow getURL2 to expose a method that will be invoked in the main module. For example:
// getURL2
var prompt = require('../../node_modules/prompt');
module.exports = {
start: function(callback) {
prompt.start();
prompt.get(['newUrl'], function (err, result) {
// the callback is defined in your main module
return callback('http://example.com/custom/' + result.newUrl);
});
}
}
Then in your main module:
require('./getUrl2').start(function(purgeURL) {
// do stuff with the purgeURL defined in the other module
});
The implementation may differ, but conceptually, you need to make your second module, which requires some sort of input from the user, happen as a result of that input. Callbacks are a common way to do this (as are Promises). However, as prompt is not necessarily exposing a method that would necessitate a Promise, you can do it with plain old callbacks.
You might also want to search around for articles on writing command line tools (sometimes referenced as CLIs) or command line apps with Node. I found the following article to be helpful when trying to figure this out myself:
http://javascriptplayground.com/blog/2015/03/node-command-line-tool/
Also, the command-line-args module worked well for me (though there's a number of other modules out there to choose from):
https://www.npmjs.com/package/command-line-args
Good luck!
I am new to javascript and Node.js and having problems testing some code I wrote recently. I am trying to test code written in a file called "compareCrowe.js" with "testCrowe.js" using Node.js.
Here are the contents of testCrowe.js:
var compareCrowe = required['./compareCrowe'];
console.log('begin test');
var connection = {Type:1, Label:"label", linkTo:null};
var table1 = {name:"table1", body:"description1", out:[connection]};
var table2 = {name:"table2", body:"description2", out:null};
connection.linkTo = table2;
var crowe = [table1, table2];
var result = compareCrowe.compareCrowesFoot(crowe, crowe);
console.log(result.feedback);
where the function "compareCrowesFoot" is defined in compareCrowe.js. From the console on an Ubuntu virtual machine I ran:
node compareCrowe.js testCrowe.js
however, nothing was printed. There were no errors or warnings or explanation of any kind. It didn't even print the "begin test" line I placed at the top of testCrowe.js. If I run the command:
node testCrowe.js
it complains that compareCrowesFoot is undefined. How can I test the contents of compareCrowe.js?
Welcome to the party of JS.
I'm not sure where you're learning from, but a few of the resources that have helped me and many others are superherojs.com, nodeschool.io, the MDN developer docs, Node.js API docs, and Youtube (seriously).
The basic idea of Node.js is that it operates with modules (chunks of reusable code), which is what NPM is made up of. These can then be included in other modules and used anywhere else in your application.
So for a basic example, say you had compareCrowe.js, to make it includable/reusable in another file, you could write something like:
module.exports = function() {
var compareCrowesFoot = function(crowe1, crowe2) { /* compare crows feet and return something here */ }
return { compareCrowesFoot: compareCrowesFoot };
// return an object with a property of whatever you want to access it as , and the value as your function name
// e.g. - you could return { compare: compareCrowesFoot };
}
Then in testCrowe.js you could require compareCrowe like this:
var compareCrowe = require("./compareCrowe");
/* your code here... */
var result = compareCrowe.compareCrowesFoot(crowe1, crowe2);
// if your returned object was { compare: compareCrowesFoot };
// this would be compareCrowe.compare(crowe1, crowe1);
And to run your tests, you could then run node testCrowe.js from the command line.
In your case it seems like you've got your syntax a little messed up. It should be more like:
var compareCrowe = require('./compareCrowe.js');
That would make any methods you've exported in compareCrowe.js, such as your compareCrowe.compareCrowesFoot function, available to testCrowe.js.
Then, in your terminal, you would run the following:
node testCrowe.js
And that should do the trick provided you don't have any further errors in your code.
I have just started experimenting with building a website using node.js, and I am encountering an issue when organizing the models of my project.
All the real world examples I have found on the Internet are using Mongoose. This library allows you to define your models in a static way. So you can write this:
// models/foo.js
module.exports = require('mongoose').model('Foo', ...);
// app.js
mongoose.connect(...);
// some_controller_1.js
var Foo = require('./models/foo');
Foo.find(...);
// some_controller_2.js
var Foo = require('./models/foo');
Foo.find(...);
But since I don't want to use MongoDB, I need another ORM. And all the other ORMs I have found don't allow this. You first need to create an instance, and then only you can register your models. Also they don't seem to allow access to the list of registered models.
So I tried doing this:
// models/user.js
var registrations = [];
module.exports = function(sequelize) {
var result = null;
registrations.forEach(function(elem) {
if (elem.db == sequelize)
result = elem.value;
});
if (result) return result;
// data definition
var user = sequelize.define("User", ...);
registrations.push({ db: sequelize, value: user });
return user;
};
Which I can use this like:
// some_controller_1.js
var Foo = require('./models/foo')(app.get('database'));
Foo.find(...); // using Foo
But these small header and footer that I have to write on every single model file are a bit annoying and directly violate the "don't repeat youself" principle. Also, while not a huge issue, this is kind of a memory leak since the "sequelize" object will never be freed.
Is there a better way to do, which I didn't think about?
You can find an article about handling models with sequelize here: http://sequelizejs.com/articles/express#the-application
You basically just create a models/index.js as described here: http://sequelizejs.com/articles/express#block-3-line-0
Afterwards you just put your model definitions within files in the models folder as pointed out here: http://sequelizejs.com/articles/express#block-4-line-0