Testing requireJS methods async with Jasmine - javascript

I am trying to test a function that requires a module using jasmine and requirejs.
Here is a dummy code:
define("testModule", function() {
return 123;
});
var test = function() {
require(['testModule'], function(testModule) {
return testModule + 1;
});
}
describe("Async requirejs test", function() {
it("should works", function() {
expect(test()).toBe(124);
});
});
It fails, because it is an async method. How can I perform a test with it?
Note: I dont want to change my code, just my tests describe function.

For testing of an asynchronous stuff check runs(), waits() and waitsFor():
https://github.com/pivotal/jasmine/wiki/Asynchronous-specs
Though this way looks a bit ugly as for me, therefore you could also consider following options.
1. I'd recommend you to try jasmine.async that allows you to write asynchronous test cases in this way:
// Run an async expectation
async.it("did stuff", function(done) {
// Simulate async code:
setTimeout(function() {
expect(1).toBe(1);
// All async stuff done, and spec asserted
done();
});
});
2. Also you can run your tests inside require's callback:
require([
'testModule',
'anotherTestModule'
], function(testModule, anotherTestModule) {
describe('My Great Modules', function() {
describe('testModule', function() {
it('should be defined', function() {
expect(testModule).toBeDefined();
});
});
describe('anotherTestModule', function() {
it('should be defined', function() {
expect(anoterTestModule).toBeDefined();
});
});
});
});
3. Another point is I guess that this code is not working as you're expecting:
var test = function() {
require(['testModule'], function(testModule) {
return testModule + 1;
});
};
Because if you call test(), it won't return you testModule + 1.

Related

Jasmine - how to test functions that should only be called once (executed via a closure variable)

I'm using a closure to ensure that something is only called once:
var pageDOM = (function() {
var mounted = false
return {
initializePage: function() {
if (mounted == false) {
pageDOM.addBoxes();
mount = true
}
pageDOM.otherInitProcedures();
},
otherFunction: function() {
}
}
})();
I'm not sure what's the right way of thinking about unit testing pageDOM.initializePage. Jasmine specs are run in random order, and I think it's important to keep this for testing integrity (i.e., I would NOT want to impose order). This is my spec code:
describe("pageDOM", function() {
describe("initializePage", function() {
beforeEach(function() {
spyOn(pageDOM, "addBoxes")
spyOn(pageDOM, "otherInitProcedures")
})
describe("calling initializePage first time", function() {
beforeEach(function() {
pageDOM.initializePage();
})
it("should call both functions", function() {
expect(pageDOM.otherInitProcedures).toHaveBeenCalled()
expect(pageDOM.addBoxes).toHaveBeenCalled()
})
describe("calling initializePage again", function() {
beforeEach(function() {
pageDOM.initializePage();
})
it("should only call otherInitProcedures", function() {
expect(pageDOM.otherInitProcedures).toHaveBeenCalled()
expect(pageDOM.addBoxes).not.toHaveBeenCalled()
})
})
})
})
})
The problem is that if the specs don't run in order, then both will fail. What's a way to test this, or should I even try to test this?
I would assign the spies to variables and reset the spies in an afterEach hook.
Something like this (follow the !! in the comments):
describe("pageDOM", function() {
describe("initializePage", function() {
// !! initialize these variables
let addBoxesSpy;
let otherInitProceduresSpy;
beforeEach(function() {
// !! assign the variables
addBoxesSpy = spyOn(pageDOM, "addBoxes")
otherInitProceduresSpy = spyOn(pageDOM, "otherInitProcedures")
})
describe("calling initializePage first time", function() {
beforeEach(function() {
pageDOM.initializePage();
})
it("should call both functions", function() {
expect(pageDOM.otherInitProcedures).toHaveBeenCalled()
expect(pageDOM.addBoxes).toHaveBeenCalled()
})
describe("calling initializePage again", function() {
beforeEach(function() {
pageDOM.initializePage();
})
it("should only call otherInitProcedures", function() {
expect(pageDOM.otherInitProcedures).toHaveBeenCalled()
expect(pageDOM.addBoxes).not.toHaveBeenCalled()
})
})
})
// !! Reset the spies in an afterEach
afterEach(() => {
addBoxesSpy.calls.reset();
otherInitProceduresSpy.calls.reset();
});
})
})
After resetting the calls to what you're spying on, order should not matter anymore.
So your "pageDOM" method is state full, so why use 2 times describe and set the call to "initializePage" method every time by hooking it in beforeEach, doesn't make sense. Instead you can do like this -
describe("pageDOM:initializePage", function() {
describe("calling initializePage first time", function() {
beforeEach(function() {
spyOn(pageDOM, "addBoxes");
spyOn(pageDOM, "otherInitProcedures");
})
it("should call both functions", function() {
pageDOM.initializePage();
expect(pageDOM.otherInitProcedures).toHaveBeenCalled()
expect(pageDOM.addBoxes).toHaveBeenCalled()
})
it("should only call otherInitProcedures", function() {
pageDOM.initializePage();
expect(pageDOM.otherInitProcedures).toHaveBeenCalled()
expect(pageDOM.addBoxes).not.toHaveBeenCalled()
})
})
})
Jasmine executes it blocks within a describe sequentially and you can get the desired checks as well. Working stackblitz link for you(ignore other testcases)
To make the code more testable, I would not initiate the function immediately.
var pageDOMConstructor = vfunction() {
var mounted = false
return {
initializePage: function() {
if (mounted == false) {
pageDOM.addBoxes();
mount = true
}
pageDOM.otherInitProcedures();
},
otherFunction: function() {
}
}
};
var pageDOM = pageDOMConstructor();
then you can test pageDOMConstructor easily.
To check how often something has been called you can use toHaveBeenCalledTimes
this is not quite complete and might need some small changes, its just to give you an idea of how to solve this:
describe("pageDOMConstructor", function () {
describe("initializePage", function () {
// setup variable, its a let because it will be reset before every test
let pageDom;
beforeEach(function () {
pageDom = pageDOMConstructor();
spyOn(pageDOM, "addBoxes");
spyOn(pageDOM, "otherInitProcedures");
});
it("should call both functions when calling initializePage first time", function () {
pageDOM.initializePage();
expect(pageDOM.otherInitProcedures).toHaveBeenCalledTimes(1);
expect(pageDOM.addBoxes).toHaveBeenCalledTimes(1);
});
it("should only call otherInitProcedures when calling initializePage again", function () {
pageDOM.initializePage();
// you could remove these two lines because they are in the other test
expect(pageDOM.otherInitProcedures).toHaveBeenCalledTimes(1);
expect(pageDOM.addBoxes).toHaveBeenCalledTimes(1);
pageDOM.initializePage();
expect(pageDOM.otherInitProcedures).toHaveBeenCalledTimes(1);
expect(pageDOM.addBoxes).not.toHaveBeenCalledTimes(1);
});
});
});
Each it should be treated like a separate test, and they should be completely self reliant - with some exceptions like beforeEach
When code is hard to test, it's often a sign that you could benefit from refactoring it a bit. I have found that testable code equals usable and flexible code in production.

Testing a function that is called from an jQuery AJAX callback with Jasmine

I have a function that makes an AJAX call to a service. I'm attempting to expect that the displayError function is called on a failure.
I have my function ajaxCall that accepts a url. Upon success I pass the result to displaySuccess and when there's an error I pass the details to displayError.
function ajaxCall(url) {
$.ajax({
method: "GET",
url: url,
data: "json",
beforeSend: function (xhr) {
//Do Stuff
},
error: function(xhr, textStatus, errorThrown) { displayError(xhr, textStatus, errorThrow, url)},
success: function (results) { displaySuccess(result) }
});
}
function displayError(xhr, textStatus, errorThrow, url) {
//Do Stuff//
}
function displaySuccess(results) {
//Do Stuff//
}
In Jasmine I have it successfully verifying the URL. My problem is in testing to insure that the displayError and displaySuccess functions are called.
I have the following for this specific issue so far.
describe('The ajaxCall component', function() {
it('should call the error function when the ajax call fails', function () {
var obj = {};
spyOn(obj, 'displayError');
spyOn($, "ajax").and.callFake(function (options) {
options.error();
});
ajaxCall('/myResource/get');
expect(obj.method).toHaveBeenCalled();
});
}
I'm a little new to unit testing and I've searched trying to find suggestions that would help but they make the unit test fail. Where am I going wrong with this?
This all boils down to how you spy on your objects and writing code that is more testable. Let's work through a few strategies.
Strategy 1
Given your current code is not within an object, you could test that these functions are called by simply testing their implementation directly.
Instead of testing that the functions were called, you would test their implementation directly.
Example
describe("strategy 1", function () {
var ajaxSpy;
beforeEach(function () {
ajaxSpy = spyOn($, 'ajax');
ajaxCall();
});
describe("error callback", function () {
beforeEach(function() {
spyOn(window, 'alert');
var settings = ajaxSpy.calls.mostRecent().args[0];
settings.error();
});
describe("when there is an error", function() {
it("should alert an error message", function() {
expect(window.alert).toHaveBeenCalledWith('Error');
});
});
});
});
Strategy 2
While the above works, it can be cumbersome to write tests. Ideally, you want to test the invocation and implementation separately.
To do so, we can spy on these functions. Since these are in the global namespace, you can spy on them through the window object.
Example
describe("strategy 2", function () {
var ajaxSpy;
beforeEach(function () {
ajaxSpy = spyOn($, 'ajax');
ajaxCall();
});
describe("error callback", function () {
beforeEach(function() {
spyOn(window, 'displayError');
var settings = ajaxSpy.calls.mostRecent().args[0];
settings.error();
});
describe("when there is an error", function() {
it("should alert an error message", function() {
expect(window.displayError).toHaveBeenCalled();
});
});
});
});
Strategy 3 (Recommended)
The final strategy, and what I recommend, has a similar setup to the second strategy, except we encapsulate our implementation into a custom object.
Doing so makes the code more testable by wrapping functionality in objects and avoids the global namespace (i.e. window).
Example
describe("solution 3", function() {
var ajaxSpy;
beforeEach(function() {
ajaxSpy = spyOn($, 'ajax');
ajaxService.ajaxCall();
});
describe("error callback", function() {
beforeEach(function() {
spyOn(ajaxService, 'displayError');
var settings = ajaxSpy.calls.mostRecent().args[0];
settings.error();
});
it("should alert an error message", function() {
expect(ajaxService.displayError).toHaveBeenCalled();
});
});
});

How to run Mocha tests in a defined order

Background
I am working on a program in Node.js and writing my test suites in Mocha with Chai and SinonJS. I have core graphics module which controls access to a node-webgl context.
Due to how node-webgl works, I only wish to initialize a context once for the entire test run. I have some tests I wish to run prior to the initialization of the core module, like so:
describe('module:core', function () {
describe('pre-init', function () {
describe('.isInitialized', function () {
it('should return false if the module is not initialized', function () {
expect(core.isInitialized()).to.be.false;
});
});
describe('.getContext', function () {
it('should error if no context is available', function () {
expect(function () {
core.getContext();
}).to.throw(/no context/i);
});
});
});
describe('.init', function () {
it('should error on an invalid canvas', function () {
expect(function () {
core.init(null);
}).to.throw(/undefined or not an object/i);
expect(function () {
core.init({});
}).to.throw(/missing getcontext/i);
});
it('should error if the native context could not be created', function () {
var stub = sinon.stub(global._canvas, 'getContext').returns(null);
expect(function () {
core.init(global._canvas);
}).to.throw(/returned null/i);
stub.restore();
});
it('should initialize the core module', function () {
expect(function () {
core.init(global._canvas);
}).not.to.throw();
});
});
describe('post-init', function () {
describe('.isInitialized', function () {
it('should return true if the module is initialized', function () {
expect(core.isInitialized()).to.be.true;
});
});
describe('.getContext', function () {
it('should return the current WebGL context', function () {
var gl = null;
expect(function () {
gl = core.getContext();
}).not.to.throw();
// TODO Figure out if it's actually a WebGL context.
expect(gl).to.exist;
});
});
});
});
Then I can run the remaining tests.
Problem
When I run this through Mocha, everything is fine since the core test suite is the first thing to be run. My concern is that if any test suites get run before the core test suite, then those test suites will fail as the core is not initialized yet.
What is the best way to ensure the core test suite is always run before any other test suites?
In the end I refactored my code to permit the core module to be torn down without affecting node-webgl and using a before block to initialize it, like so:
// Run this before all tests to ensure node-webgl is initialized
before(function () {
if (!global._document) {
global._document = WebGL.document();
global._document.setTitle('Machination Graphics Test Suite');
}
if (!global._canvas) {
global._canvas = global._document.createElement('canvas', 640, 480);
}
});
describe('test suite goes here', function () {
// Ensure core is ready for us before testing (except when testing core)
before(function () {
if (!core.isInitialized()) {
core.init(global._canvas);
}
});
// Tear down core after all tests are run
after(function () {
core.deinit();
});
...
});
Use before() as described in their documentation.
describe('hooks', function() {
before(function() {
// runs before all tests in this block
});
......
});
the function in before will run first and everything else int he describe after it.
hope this helps.

load data from module before test executes

(I asked this question recently and accepted an answer but it's still not what I need.) I really need to create dynamic tests from data loaded from a module. Each item from the array will have it's own describe statement with certain protractor actions. My previous post has an answer that says to use an it statement, but I can't do that because there's too much going on.
My main problem is that the data doesn't get loaded in time for the describe. I had another suggestion to use VCR.js or something similar but I don't think those will work because I'm using a module. Is there a way I can save the data to a separate file and load it in? Would that be a good way to go?
var data = require('get-data'); //custom module here
describe('Test', function() {
var itemsArr;
beforeAll(function(done) {
data.get(function(err, result) {
itemsArr = result; //load data from module
done();
});
})
//error: Cannot read property 'forEach' of undefined
describe('check each item', function() {
itemsArr.forEach(function(item) {
checkItem(item);
});
});
function checkItem (item) {
var itemName = item.name;
describe(itemName, function() {
console.log('describe');
it('should work', function() {
console.log('it');
expect(true).toBeTruthy();
});
});
}
});
UPDATE:
I used Eugene's answer and came up with this. I can't test each individual study how I want because the it statement doesn't fire. Is this problem even solvable??
describe('check each item', function () {
it('should load data', function (done) {
browser.wait(itemsPromise, 5000);
itemsPromise.then(function(itemsArr) {
expect(itemsArr).toBeTruthy();
studyArr = itemsArr.filter(function (item) {
return item.enabled && _.contains(item.tags, 'study');
});
studyCount = studyArr.length;
expect(studies.count()).toEqual(studyCount);
checkItems(studyArr);
done();
});
});
function checkItems (itemsArr) {
itemsArr.forEach(function (item) {
describe(item.id, function () {
console.log('checkItems', item.id);
// doesn't work
it('should work', function (done) {
expect(false).toBeTruthy();
done();
});
});
});
}
});
You're trying to do something that Jasmine does not allow: generating tests after the test suite has started. See this comment on an issue of Jasmine:
Jasmine doesn't support adding specs once the suite has started running. Usually, when I've needed to do this, I've been able to know the list of options ahead of time and just loop through them to make the it calls. [...]
("adding specs" === "adding tests")
The point is that you can generate tests dynamically but only before the test suite has started executing tests. One corollary of this is that the test generation cannot be asynchronous.
Your second attempt does not work because it is trying to add tests to a suite that is already running.
Your first attempt is closer to what you need but it does not work either because describe calls its callback immediately, so beforeAll has not run by the time your describe tries to generate the tests.
Solutions
It all boils down to computing the value of itemsArr before the test suite start executing tests.
You could create a .getSync method that would return results synchronously. Your code would then be something like:
var data = require('get-data'); //custom module here
var itemsArr = data.getSync();
describe('Test', function() {
describe('check each item', function() {
itemsArr.forEach(function(item) {
checkItem(item);
});
});
[...]
If writing .getSync function is not possible, you could have an external process be responsible for producing a JSON output that you could then deserialize into itemsArr. You'd execute this external process with one of the ...Sync functions of child_process.
Here's an example of how the 2nd option could work. I've created a get-data.js file with the following code which uses setTimeout to simulate an asynchronous operation:
var Promise = require("bluebird"); // Bluebird is a promise library.
var get = exports.get = function () {
return new Promise(function (resolve, reject) {
var itemsArr = [
{
name: "one",
param: "2"
},
{
name: "two",
param: "2"
}
];
setTimeout(function () {
resolve(itemsArr);
}, 1000);
});
};
// This is what we run when were are running this module as a "script" instead
// of a "module".
function main() {
get().then(function (itemsArr) {
console.log(JSON.stringify(itemsArr));
});
};
// Check whether we are a script or a module...
if (require.main === module) {
main();
}
Then, inside the spec file:
var child_process = require('child_process');
var itemsArr = JSON.parse(child_process.execFileSync(
"/usr/bin/node", ["get-data.js"]));
describe('Test', function() {
itemsArr.forEach(function(item) {
checkItem(item);
});
function checkItem (item) {
var itemName = item.name;
describe(itemName, function() {
console.log('describe');
it('should work', function() {
console.log('it');
expect(true).toBeTruthy();
});
});
}
});
I've tested the code above using jasmine-node. And the following file structure:
.
├── data.js
├── get-data.js
└── test
└── foo.spec.js
./node_modules has bluebird and jasmine-node in it. This is what I get:
$ ./node_modules/.bin/jasmine-node --verbose test
describe
describe
it
it
Test - 5 ms
one - 4 ms
should work - 4 ms
two - 1 ms
should work - 1 ms
Finished in 0.007 seconds
2 tests, 2 assertions, 0 failures, 0 skipped
Try to use a promise, something like:
var deferred = protractor.promise.defer();
var itemsPromise = deferred.promise;
beforeAll(function() {
data.get(function(err, result) {
deferred.fulfill(result);
});
})
And then:
describe('check each item', function() {
itemsPromise.then(function(itemsArr) {
itemsArr.forEach(function(item) {
checkItem(item);
});
});
});
Another solution I can think of is to use browser.wait to wait until itemsArr becomes not empty.
Is your get-data module doing some browser things with protractor? If so, you will need to set/get itemsArr within the context of the controlFlow. Otherwise it will read all the code in the get-data module, but defer its execution and not wait for it to finish before moving right along to those expect statements.
var data = require('get-data'); //custom module here
var itemsArr;
describe('Test', function() {
beforeAll(function() {
// hook into the controlFlow and set the value of the variable
browser.controlFlow().execute(function() {
data.get(function(err, result) {
itemsArr = result; //load data from module
});
});
});
//error: Cannot read property 'forEach' of undefined
describe('check each item', function() {
// hook into the controlFlow and get the value of the variable (at that point in time)
browser.controlFlow().execute(function() {
itemsArr.forEach(function(item) {
checkItem(item);
});
});
});
function checkItem (item) {
var itemName = item.name;
describe(itemName, function() {
console.log('describe');
it('should work', function() {
console.log('it');
expect(true).toBeTruthy();
});
});
}
});

Karma testing a lot of files similar in structure automatically

So I have a folder full of scripts that all resemble a structure like this
// Adapter-100.js
angular.module('myModule', ['myParentFactory', function(myParentFactory) {
return angular.extend(myParentFactory, {
"someFunctionA" : function() {},
"someFunctionB" : function() {},
"someFunctionC" : function() {}
});
}]);
And my test just checks that they have these three methods, trouble is there is about 100 of these files (they're adapters for communicating with a server)
Here is a representation of my tests file
// api-adapter-tests.js
describe('Unit: EndPointMethods', function() {
var values, factory, adapter;
// Boot the module
beforeEach(function() {
module('myModule');
inject(function ($injector) {
values = $injector.get('AppConsts');
factory = $injector.get('EndPointConnection');
adapter = $injector.get('TestAdapter'); // This needs to change to match what adapter is being tested
});
});
// Run some tests
describe('AppConsts', function() {
it('Should have an api_host key', function() {
expect(values).toBeDefined();
expect(values.api_host).toBeDefined();
expect(typeof values.api_host).toBe('string');
});
});
// Is this able to be generated to test each adapter independently?
describe('TestEndPointMethod has minimum functional definitions', function() {
it('should have 3 defined functions', function() {
expect(factory.consumeResponse).toBeDefined();
expect(factory.getEndPoint).toBeDefined();
expect(factory.mutator).toBeDefined();
});
});
});
I don't want to have to write a separate describes/it block for each adapter but rather have Karma loop over all of these and create the tests on the fly (the tests are very unlikely to ever change)
I've Googled around for a solution to this but can't seem to find anything that does this kind of thing for me.
You can wrap your suites in a clojure and pass the Adapter you want to test: mocha will take care of running it in the right way - and so Karma.
function runSuiteFor(newAdapter){
return function(){
// api-adapter-tests.js
describe('Unit: EndPointMethods', function() {
var values, factory, adapter;
// Boot the module
beforeEach(function() {
module('myModule');
inject(function ($injector) {
values = $injector.get('AppConsts');
factory = $injector.get('EndPointConnection');
adapter = $injector.get(newAdapter); // set the Adapter here
});
});
// Run some tests
describe('AppConsts', function() {
it('Should have an api_host key', function() {
expect(values).toBeDefined();
expect(values.api_host).toBeDefined();
expect(typeof values.api_host).toBe('string');
});
});
// Is this able to be generated to test each adapter independently?
describe('TestEndPointMethod has minimum functional definitions', function() {
it('should have 3 defined functions', function() {
expect(factory.consumeResponse).toBeDefined();
expect(factory.getEndPoint).toBeDefined();
expect(factory.mutator).toBeDefined();
});
});
});
}
}
var adapters = ['MyTestAdapter1', 'MyTestAdapter2', etc...];
for( var i=0; i<adapters.length; i++){
runSuiteFor(adapters[i])();
}
Note: IE8 has some issues with this approach sometimes, so in case you're with Angular 1.2 bare in mind this.

Categories

Resources