Jasmine spyOn mongoose save - javascript

I'd like to mock the save() function of a Mongoose model. The function I want to test looks like this in a file called user.js:
var User = import('User.js')
post: function(req, res) {
var user = new User({
password : req.body.password,
email : req.body.email,
});
user.save( function(err) {
if (err) {
....
} else {
....
}
});
I tried to write a test that looks like this in another file called user_spec.js:
var Hander = require('user.js')
it('works properly', function() {
spyOn(User, 'save').andReturn(null)
Handler.post(req, res);
});
but that gives me the error:
save() method does not exist
I've done some more digging and it looks like the User model itself does not have the save() method, an instance does. This would mean I have to mock the constructor of User, but I'm having a lot of trouble with this. Other posts refer to a statement like:
spyOn(window, User)
to fix this, but in NodeJS, global (the window equivalent here), does not have User, since I import is as a variable. Is it possible to mock the constructor to give me something with a mocked save()? I've also taken a look at an npm module called rewire, but I was hoping I could do this without mocking and replacing the entire user module in my handler.

This does not solve the issue of mocking a local variable, but it will solve the issue of unit testing the creation of new documents.
When creating a new document, it is better to use Model.create(). This can be mocked effectively, and it is simply less code. The right way to handle this and test it would be:
var User = import('User.js')
post: function(req, res) {
User.create({
password : req.body.password,
email : req.body.email,
}, function(err) {
if (err) {
....
} else {
....
}
});
});
Corresponding test:
var Hander = require('user.js')
it('works properly', function() {
spyOn(User, 'create').andReturn(null)
Handler.post(req, res);
});
Hopefully this workaround will help other people getting frustrated with jasmine and mongoose unit testing.

You can only swap a function with a spy after the object is created. Hence this will work:
var user = new User(…);
spyOn(user, save).…;
doSomething();
where this will not:
spyOn(User, save).…
doSomething()
Of course you could change the function inside mongoose that creates the save function on the object… but you probably don't want to go there.

In a sane world, you would be able to do this.
spyOn(Model.prototype, 'save')
However, Mongoose tries to overload all their Model functions to work as node.js callbacks and Promises simultaneously. To do this, they manipulate the prototype in a way that is a little hard to predict without reading the actual Model code (https://github.com/Automattic/mongoose/blob/master/lib/model.js).
Here's an example that actually worked for me.
spyOn(Model.prototype, '$__save').and.callFake(function (options, callback) {
callback();
});
For the record, I am using Mongoose with Promises in the application code.

Related

How to increase the code coverage using istanbul in node.js

I am using Istanbul for code coverage, but i m getting very low coverage percentage particularly in Models file.
Consider the following is the model file:
ModelA.js
const mongoose = require('mongoose');
const Schema = mongoose.Schema;
var app = require('../server')
var db = require('../db/dbConnection');
var config = require('../configs/config')
const Schema1 = new Schema({ 'configurations': [] });
exports.save = function (aa, data, callback) {
var logMeta = {
file: 'models/modelA',
function: 'save',
data: {},
error: {}
}
if (!aa) {
return callback('aa is required')
}
global.logs[aa].log('info', 'AA: ' + aa, logMeta);
db.connectDatabase(aa, function(error, mongoDB){
if(error){
logMeta.data['error'] = error
global.logs[aa].log('error', 'error', logMeta);
return callback(error)
}
const ModelA = mongoDB.model('bbb', cccc);
ModelA.findOneAndUpdate({}, data, {upsert: true, new: true, runValidators: true}, function(error ,result){
if (error) {
logMeta.data['error'] = error
global.logs[aa].log('error', 'error', logMeta);
}
else {
logMeta.data = {}
logMeta.data['result'] = JSON.parse(JSON.stringify(result))
global.logs[aa].log('info', 'result', logMeta);
}
callback(error, result);
});
})
}
TestA.js:
var should = require('should'),
sinon = require('sinon'),
ModelA= require("../models/ModelA");
describe('Model test', function () {
it('Should save Model', function (done) {
var todoMock = sinon.mock(new ModelA({'configurations': []}));
var todo = todoMock.object;
todoMock
.expects('save')
.yields(null, 'SAVED');
todo.save(function(err, result) {
todoMock.verify();
todoMock.restore();
should.equal('SAVED', result, "Test fails due to unexpected result")
done();
});
});
});
But i am getting codecoverage percentage 20. SO how can i increase the percentage:
ALso:
1.Whether i have to mock the db.connectDatabase if yews how can i acheive that?
Whether i have to use TestDb to run all my UnitTest? Or i have to assert??
Code Coverage will work for Unit Test or integration test???
Please share your ideas. Thanks
I have been using Istanbul to 100% code cover most of my client/server projects so I might have the answers you are looking for.
How does it work
Whenever you require some local file, this gets wrapped all over the place to understand if every of its parts is reached by your code.
Not only the required file is tainted, your running test is too.
However, while it's easy to code cover the running test file, mocked classes and their code might never be executed.
todoMock.expects('save')
Accordingly to Sinon documentation:
Overrides todo. save with a mock function and returns it.
If Istanbul tainted the real save method, anything within that scope won't ever be reached so that you are actually testing that mock works, not that your real code does.
This should answer your question: Code Coverage will work for Unit Test or integration test ???
The answer is that it covers the code, which is the only thing you're interested from a code cover perspective. Covering Sinon JS is nobody goal.
No need to assert ... but
Once you've understood how Istanbul works, it follows up naturally to understand that it doesn't matter if you assert or not, all it matters is that you reach the code for real and execute it.
Asserting is just your guard against failures, not a mechanism interesting per se in any Istanbul test. When your assertion fails, your test does too, so it's good for you to know that things didn't work and there's no need to keep testing the rest of the code (early failure, faster fixes).
Whether you have to mock the db.connectDatabase
Yes, at least for the code you posted. You can assign db as generic object mock to the global context and expect methods to be called but also you can simplify your life writing this:
function createDB(err1, err2) {
return {
connectDatabase(aa, callback) {
callback(err1, {
model(name, value) {
return {
findOneAndUpdate($0, $1, $3, fn) {
fn(err2, {any: 'object'});
}
};
}
});
}
};
}
global.db = createDB(null, null);
This code in your test file can be used to create a global db that behaves differently accordingly with the amount of errors you pass along, giving you the ability to run the same test file various times with different expectations.
How to run the same test more than once
Once your test is completed, delete require.cache[require.resolve('../test/file')] and then require('../test/file') again.
Do this as many times as you need.
When there are conditional features detection
I usually run the test various times deleting global constructors in case these are patched with a fallback. I also usually store them to be able to put 'em back later on.
When the code is obvious but shouldn't be reached
In case you have if (err) process.exit(1); you rarely want to reach that part of the code. There are various comments understood by Istanbul that would help you skip parts of the test like /* istanbul ignore if */ or ignore else, or even the generic ignore next.
Please consider thinking twice if it's just you being lazy, or that part can really, safely, be skipped ... I got bitten a couple of times with a badly handled error, which is a disaster since when it happens is when you need the most your code to keep running and/or giving you all the info you need.
What is being covered?
Maybe you know this already but the coverage/lcov-report/index.html file, that you can open right away with any browser, will show you all the parts that aren't covered by your tests.

Stubbing variables in a constructor?

I'm trying to figure out how to properly stub this scenario, but i'm a little stuck.
The scenario is, i've got a db.js file that has a list of couchdb databases in it (each database contains tweet entries for a particular year).
Each year a new database is created and added to this list to hold the new entries for that year (so the list of databases isn't constant, it changes each year).
So my db.js file looks like this:
var nano = require('nano')(`http://${host}`);
var databaseList = {
db1: nano.use('db2012'),
db2: nano.use('db2013'),
db4: nano.use('db2014'),
db5: nano.use('db2015'),
db6: nano.use('db2016')
};
module.exports.connection = nano;
module.exports.databaseList = databaseList;
And event.js (a simple model file), before methods are added looks like this:
var lastInObject = require('../../helpers/last_in_object');
var db = require('../../db');
var EventModel = function EventModel() {
this.connection = db.connection;
this.databaseList = db.databaseList;
this.defaultDatabase = lastInObject(db.databaseList);
};
EventModel.prototype.findAll =
function findAll(db, callback) {/* ... */}
My question is, how do i stub the databaseList, so i can safely test each of the model methods without having any brittleness from the growing databaseList object?
Ideally i'd like to be able hijack the contents of the databaseList in my tests, to mock different scenarios, but i'm unsure how to tackle it.
Here's an example test, to ensure the defaultDatabase property is always pointing to the last known event, but obviously i don't want to have to update this test every year, when databaseList changes, as that makes the tests very brittle.
it('should set the default database to the last known event', () => {
var Event = require('../event');
var newEventModel = new Event();
expect(newEventModel.defaultDatabase.config.db)
.to.equal('db2014');
});
Suggestions welcome! If i've gone about this wrong, let me know what i've done and how i can approach it!
Also, this is just a scenario, i do have tests for lastInObject, i'm more interested in how to mock the concerning data.
In my opinion you need to stub the whole "db" module. That way you won't have any real db connection and you can easily control the environment of your tests. You can achieve this by using the mockery module.
That way you can stub the object that the require('../../db') returns. This will allow you to set whatever value you like in the properties of that object.
Building upon Scotty's comment in the accepted answer a bit... here's some code I've got which seems to do the job. No guarantees that this is a good implementation, but it does successfully stub out the insert and get methods. Hopefully it helps someone. :)
// /src/common/database.js
const database = require('nano')('http://127.0.0.1/database');
module.exports = {
async write(document, documentId) {
return await new Promise(resolve => database.insert(document, documentId, resolve));
},
async read(documentId){
return await new Promise(resolve => database.get(documentId, resolve));
}
};
// /test/setup.js
const proxyquire = require('proxyquire');
proxyquire('../src/common/database.js', {
nano() {
return {
insert(document, documentId, callback){ callback(); },
get(documentId, callback){ callback(); }
};
}
});

Meteor Method not defined in Template from schema

I have a meteor method defined in ClassModel.js, which is located within /server. On the client js file, I set up a template event that tries to call this function, but the function keeps throwing a ReferenceError and is undefined. Any idea why?
Code:
client/client.js
Template.class_disc.events({
'click .pick_class': function (event) {
event.preventDefault();
var id = $(event.currentTarget).parent('div')[0].id;
var explo = id.split("\"");
var id = explo[0];
Meteor.call(findClassByID, id, function(err, res) {
console.log(res.content);
});
});
/server/classModel.js
Meteor.methods({
findClassByID: function(id) {
console.log('in findclassbyid')
return Classes.find({ _id: id }).fetch();
}
});
Could the problem be that I have multiple Meteor.methods({}) declarations across different server files? Help would be much appreciated.
I mostly just need to look at the Classes collection and verify the ID's I'm pulling match some in the database, for sanity. Might there also be a way to query/publish the whole classes database to the client so I can query it within the console?
lol wow, this turned out to be a syntax error.
When calling Meteor.methods, the method name needs to be in quotations!
Meteor.call("methodName", args, callback);

I guess I don't "get" async programming

I've been using node.js for about 6 months now, off and on. But, I guess I still don't completely understand designing a program around asynchronous calls.
Take, for example, my most recent program, that needs to read a config, use that config to connect to the database, and then connect to every address in the database asynchronously.
I'm using the modules fnoc and node-mysql, but this is just pseudocode.
// first, get the config
fnoc(function(err, confs){
// code stuff in here
//now check that there's a database config
if (confs.hasOwnProperty("database"))
{
// set up db connection
var mysql_conn = mysql.createConnection({
host: confs.database.host,
user: confs.database.user,
password: confs.database.password,
database: confs.database.database
});
// do some querying here.
mysql_conn.query(query, function(err, records, fields){
records.forEach(function(host){
// four levels in, and just now starting the device connections
});
});
}
});
Every time I write something like this with callback inside of callback inside of callback, I feel like I'm doing something wrong. I know of promises and the async node library, but it seems like if those are the solutions, they should be default functionality. Am I doing something wrong, or is it just not clicking for me?
EDIT: Some suggestions include using functions for the callbacks, but that seems worse somehow (unless I'm doing it wrong, which is entirely possible). You end up calling one function inside of another, and it seems especially spaghetti-ish.
The example above, with functions:
function make_connection (hosts) {
hosts.foreach(function(host){
//here's where the fun starts
};
}
function query_db(dbinfo){
var mysql_conn = mysql.createConnection({
host: dbinfo.host,
user: dbinfo.user,
password: dbinfo.password,
database: dbinfo.database
});
// do some querying here.
mysql_conn.query(query, function(err, records, fields){
make_connection(records);
});
}
// first, get the config
fnoc(function(err, confs){
// code stuff in here
//now check that there's a database config
if (confs.hasOwnProperty("database"))
{
// set up db connection
query_db(confs.database);
var mysql_conn = mysql.createConnection({
host: confs.database.host,
user: confs.database.user,
password: confs.database.password,
database: confs.database.database
});
// do some querying here.
mysql_conn.query(query, function(err, records, fields){
records.forEach(function(host){
// four levels in, and just now starting the device connections
});
});
}
});
The aim of asynchronous functions and callbacks is to avoid any conflicts (which can happen more than you think!) between objects.
I'd like to point you to this asynch enthusiast: http://www.sebastianseilund.com/nodejs-async-in-practice
Yes, the callbacks do take some getting use to, but it's worth it!
In short words: instead of
foo(function () {
// ... stuff #1 ...
bar(function () {
// ... stuff #2 ...
baz();
});
});
do
foo(handleFoo);
function handleFoo() {
// ... stuff #1 ...
bar(handleBar);
}
function handleBar() {
// ... stuff #2 ...
baz();
}
Of course, it can (and maybe should) be more granular, but that depends on the actual code. This is just a pattern to avoid nesting functions. You can also encapulsate these methods more.
This is the "vanilla" approach. There are also libraries that allow you managing this in nice ways.
If you feel really tired of endless callbacks, try Q or co. You can write node.js code like this:
co(function *(){
var a = get('http://google.com');
var b = get('http://yahoo.com');
var c = get('http://cloudup.com');
var res = yield [a, b, c];
console.log(res);
})()
It's almost a different way of writing, so it can be difficult to grasp but the results are quite good.

Structure for unit testing on node.js with mongoose

I've been developing with node.js for months but now I'm starting a new project and I'd like to know how to structure the app.
My problem comes when talking about unit testing. I will use nodeunit to write unit tests.
Also I'm using express to define my REST routes.
I was thinking about writing my code that access databases in two "separate" files (They will be more, obviously, but I'm just trying to simplify the code). There will be the routes code.
var mongoose = require('mongoose')
, itemsService = require('./../../lib/services/items-service');
// GET '/items'
exports.list = function(req, res) {
itemsService.findAll({
start: req.query.start,
size: req.query.size,
cb: function(offers) {
res.json(offers);
}
});
};
And, as I'm using there, an item service used just to access data layer. I'm doing this to test only data access layer on unit testing. It'll be something like this:
var mongoose = require('mongoose')
, Item = require('./../mongoose-models').Item;
exports.findAll = function(options) {
var query = Offer
.find({});
if (options.start && options.size) {
query
.limit(size)
.skip(start)
}
query.exec(function(err, offers) {
if (!err) {
options.cb(offers);
}
})
};
This way I can check with unit testing if it works correctly and I can use this code everywhere I want. The only thing I'm not sure if it's been correctly done is the way I pass a callback function to use returned value.
What do you think?
Thanks!
Yes, quite easily!
You can use a unit testing module like mocha and either node's own assert or another such as should.
As an example of a test case for your example model:
var ItemService = require('../../lib/services/items-service');
var should = require('should');
var mongoose = require('mongoose');
// We need a database connection
mongoose.connect('mongodb://localhost/project-db-test');
// Now we write specs using the mocha BDD api
describe('ItemService', function() {
describe('#findAll( options )', function() {
it('"args.size" returns the correct length', function( done ) { // Async test, the lone argument is the complete callback
var _size = Math.round(Math.random() * 420));
ItemService.findAll({
size : _size,
cb : function( result ) {
should.exist(result);
result.length.should.equal(_size);
// etc.
done(); // We call async test complete method
}
},
});
it('does something else...', function() {
});
});
});
And so on, ad nauseum.
Then when you're done writing your tests - assuming you've $ npm install mocha'd - then you'd simply run $ ./node_modules/.bin/mocha or $ mocha if you used npm's -g flag.
Depends how rectal/detailed you want to be really. I've always been advised to, and find it easier to: Write the tests first, to get a clear specification perspective. Then write the implementation against the tests, with any extra insight a freebie.

Categories

Resources