beforeEach and AfterEach for every test case using Globals.js - javascript

So, I have a globals.js file where I have mentioned beforeEach and afterEach, but what I could understand from this link Nightwatch Globals, the beforeEach and afterEach are called once before and after a Test Suite (A single Js file). But in my framework I have multiple test cases in the a single js file (or Test Suite) and I want to call beforeEach and afterEach before and after every test case. Anyway to achieve that? Below is my globals.js file:
module.exports = {
asyncHookTimeout: 40000,
beforeEach: function (browser, done) {
// browser.maximizeWindow();
// browser.deleteCookies();
browser.perform(function () {
console.log('Inside BeforeEach');
done();
});
}
afterEach: function (browser, done) {
browser.end(function () {
console.log("Inside After Each");
done();
});
},
};

Of course there is! Just make use of the notorious Nightwatch test hooks.
If you want to filter your test-suites, then as you pointed out,
we'll use the global test
hooks.
If you want to filter your test-cases, then we'll use the test
hooks
Example (your test-file should look like this):
module.exports = {
before(browser) {
// > this will get run only ONCE, before all the tests <
},
beforeEach(browser) {
// > this will get run before every test case <
}
tags: ['your', 'tags', 'go', 'here'],
'Test Case No.1': (browser) => {
// > this test does something here <
},
'Test Case No.2': (browser) => {
// > this test does something else here <
},
'Test Case No.3': (browser) => {
// > this test does something else here <
},
afterEach(browser) {
// > this will get run after every test case <
},
after(browser) {
// > this will get run ONCE, after all tests have run <
}
};
Lastly, quoting the DOCs:
The before and after will run before and after the execution of the
test suite respectively (in our case, the test-file), while beforeEach and afterEach are ran before
and after each test case (test step).
LE: What #AlapanDas wants is to custom-tailor the way the Nightwatch test-runner handles test-level hooks. This is of course doable, but dirty. You have to re-write the hooking logic from the following files:
Nightwatch#v0.9.x:
testcase.js (path: /nightwatch/lib/runner/testcase.js);
testsuite.js (path: /nightwatch/lib/runner/testsuite.js);
Nightwatch#v1.0.x:
every {hookName}.js file from the /hooks folder (path: /nightwatch/lib/testsuite/hooks/*.js);
Still, a compromise can be made here! Just try to find the common, recurring steps/instructions from your before, after, etc. hooks and extract that logic inside a /custom_commands file. That would condense your test files, as well as decoupling the login from your hooks. On the long run, this will also grant the advantage of a single-point-of-change when maintaining the hooks.

Related

How to remove all cy.route from beforeEach in Cypress

I'm creating a lot of tests for application in my firm. Before each test I need to create a state to work on it, and it's always the same, so I created some routes in my own method and then in support/index.js file I created beforeEach that looks like this
beforeEach(() => {
cy.server();
cy.mockSearches(SHORTEN_SEARCHES); // this only creates mocks
cy.loginAdmin();
});
And in 99% percent of tests it's working fine, but there is one test, that needs to work on real data. What should I do? Is there a way to ignore global beforeEach? I guess I can move this part of code to each test before each, but that's code repetition? Or maybe I should override this cy.route with empty responses?
You can add a condition to your beforeEach() to exit before your setup:
beforeEach(() => {
if (shouldUseRealData) return;
cy.server();
cy.mockSearches(SHORTEN_SEARCHES); // this only creates mocks
cy.loginAdmin();
});
As stated in the docs about environment variables, you can set an environment variable in different ways. One way would be to set it in your command line when calling cypress run:
cypress run --env use_mock=true
And then you would use it with Cypress.env('use_mock').
beforeEach(() => {
if (Cypress.env('use_mock')) {
cy.server();
cy.mockSearches(SHORTEN_SEARCHES); // this only creates mocks
cy.loginAdmin();
}
});

How to increase the code coverage using istanbul in node.js

I am using Istanbul for code coverage, but i m getting very low coverage percentage particularly in Models file.
Consider the following is the model file:
ModelA.js
const mongoose = require('mongoose');
const Schema = mongoose.Schema;
var app = require('../server')
var db = require('../db/dbConnection');
var config = require('../configs/config')
const Schema1 = new Schema({ 'configurations': [] });
exports.save = function (aa, data, callback) {
var logMeta = {
file: 'models/modelA',
function: 'save',
data: {},
error: {}
}
if (!aa) {
return callback('aa is required')
}
global.logs[aa].log('info', 'AA: ' + aa, logMeta);
db.connectDatabase(aa, function(error, mongoDB){
if(error){
logMeta.data['error'] = error
global.logs[aa].log('error', 'error', logMeta);
return callback(error)
}
const ModelA = mongoDB.model('bbb', cccc);
ModelA.findOneAndUpdate({}, data, {upsert: true, new: true, runValidators: true}, function(error ,result){
if (error) {
logMeta.data['error'] = error
global.logs[aa].log('error', 'error', logMeta);
}
else {
logMeta.data = {}
logMeta.data['result'] = JSON.parse(JSON.stringify(result))
global.logs[aa].log('info', 'result', logMeta);
}
callback(error, result);
});
})
}
TestA.js:
var should = require('should'),
sinon = require('sinon'),
ModelA= require("../models/ModelA");
describe('Model test', function () {
it('Should save Model', function (done) {
var todoMock = sinon.mock(new ModelA({'configurations': []}));
var todo = todoMock.object;
todoMock
.expects('save')
.yields(null, 'SAVED');
todo.save(function(err, result) {
todoMock.verify();
todoMock.restore();
should.equal('SAVED', result, "Test fails due to unexpected result")
done();
});
});
});
But i am getting codecoverage percentage 20. SO how can i increase the percentage:
ALso:
1.Whether i have to mock the db.connectDatabase if yews how can i acheive that?
Whether i have to use TestDb to run all my UnitTest? Or i have to assert??
Code Coverage will work for Unit Test or integration test???
Please share your ideas. Thanks
I have been using Istanbul to 100% code cover most of my client/server projects so I might have the answers you are looking for.
How does it work
Whenever you require some local file, this gets wrapped all over the place to understand if every of its parts is reached by your code.
Not only the required file is tainted, your running test is too.
However, while it's easy to code cover the running test file, mocked classes and their code might never be executed.
todoMock.expects('save')
Accordingly to Sinon documentation:
Overrides todo. save with a mock function and returns it.
If Istanbul tainted the real save method, anything within that scope won't ever be reached so that you are actually testing that mock works, not that your real code does.
This should answer your question: Code Coverage will work for Unit Test or integration test ???
The answer is that it covers the code, which is the only thing you're interested from a code cover perspective. Covering Sinon JS is nobody goal.
No need to assert ... but
Once you've understood how Istanbul works, it follows up naturally to understand that it doesn't matter if you assert or not, all it matters is that you reach the code for real and execute it.
Asserting is just your guard against failures, not a mechanism interesting per se in any Istanbul test. When your assertion fails, your test does too, so it's good for you to know that things didn't work and there's no need to keep testing the rest of the code (early failure, faster fixes).
Whether you have to mock the db.connectDatabase
Yes, at least for the code you posted. You can assign db as generic object mock to the global context and expect methods to be called but also you can simplify your life writing this:
function createDB(err1, err2) {
return {
connectDatabase(aa, callback) {
callback(err1, {
model(name, value) {
return {
findOneAndUpdate($0, $1, $3, fn) {
fn(err2, {any: 'object'});
}
};
}
});
}
};
}
global.db = createDB(null, null);
This code in your test file can be used to create a global db that behaves differently accordingly with the amount of errors you pass along, giving you the ability to run the same test file various times with different expectations.
How to run the same test more than once
Once your test is completed, delete require.cache[require.resolve('../test/file')] and then require('../test/file') again.
Do this as many times as you need.
When there are conditional features detection
I usually run the test various times deleting global constructors in case these are patched with a fallback. I also usually store them to be able to put 'em back later on.
When the code is obvious but shouldn't be reached
In case you have if (err) process.exit(1); you rarely want to reach that part of the code. There are various comments understood by Istanbul that would help you skip parts of the test like /* istanbul ignore if */ or ignore else, or even the generic ignore next.
Please consider thinking twice if it's just you being lazy, or that part can really, safely, be skipped ... I got bitten a couple of times with a badly handled error, which is a disaster since when it happens is when you need the most your code to keep running and/or giving you all the info you need.
What is being covered?
Maybe you know this already but the coverage/lcov-report/index.html file, that you can open right away with any browser, will show you all the parts that aren't covered by your tests.

How to clean up after failed Intern functional tests?

I have some functional tests that run using Intern (3) and the last step in the test is to do some cleanup, including clearing localStorage on the remote browser, and switching back to the original window by storing the window handle right away and switching back to it before the tests end (so any subsequent tests don't fail because they're trying to run on a closed window if the previous test ended on a different one). However if some chaijs assertions fail in a .then() the cleanup code at the end gets skipped. Is there a better way to do cleanup for functional tests that will still get run even when some assertions fail?
this.leadfoot.remote
.get(require.toUrl(url))
.execute(function() { return document.querySelector('#welcome').textContent})
.then(welcome => {
assert.equal(welcome, 'hello world', 'properly greeted');
})
.execute(function() {
localStorage.clear();
});
If the assertion fails it'll never clear localStorage at the end and if the next test that runs expects localStorage to be empty it will fail too. Is there a better way to clean up after a functional test?
Use an afterEach method.
afterEach() {
return this.remote
.execute(function () {
localStorage.clear();
});
},
'my test'() {
return this.remote
.get(...)
.execute(...)
.then(welcome => {
assert.equal(welcome, ...);
});
}
in our project we do it like this:
afterEach: function() {
var command = this.remote;
if(intern.variables.currentCase && !intern.variables.currentCase.hasPassed) {
command = command.execute(function () {
localStorage.clear();
});
}
return command;
},
and local Storage.clear() will be executed only if the test has failed:

Is there a way to get current Mocha instance and edit options at runtime?

Let's say you have a simple mocha test:
describe("Suite", function(){
it("test",function(doneCallback){
// here be tests
});
});
In this test I can change the timeout by adding this.timeout(VALUE); anywhere within the describe function.
However, besides the timeout value, there are plenty of other Mocha options that can be exclusively declared either from the command line or from a mocha.opts file that lives in the test folder (./test/mocha.opts).
What I want is to change some of these options at run-time (for example, the reporter) and not in command line / mocha.opts file.
From my research of what's possible, I found that there is an article explaining how you can use mocha programmatically, which would allow changing these options at run-time, but you need to create the Mocha instance yourself, whereas in an ordinary test one doesn't have direct access to the Mocha instance.
So, is there a way to get the Mocha instance from an existent test and change some of these options like reporter at run-time during a test?
I would like to have an option that doesn't require to modify the source code of Mocha in any way (I suppose I could tamper with the Mocha instance to implement a way to get an instance directly in the Mocha constructor).
The best way that you can achieve that is by using Mocha as per the wiki link that you have already referenced, which is using Mocha programmatically.
So to your inquiry on changing the reporter parameter here is a brief example that would do what you want, in order to run the tests against a theoretically already existing file named test-file-a.js that contains your tests:
var Mocha = require('mocha'),
mocha = new Mocha(),
path = require('path');
mocha.addFile(path.join(__dirname, 'test-file-a.js'));
mocha
.reporter('list')
.run();
Besides that there are plenty other options that you can use and also there are some listeners for events, like test that you may want to do something during a test, for example:
mocha
.reporter('list')
.ui('tdd')
.bail()
.timeout(10000)
.run()
.on('test', function(test) {
if (test.title === 'some title that you want here') {
//do something
}
});
Please note that you can define the options per each Mocha instance that will run again a test suite, but not during the runtime of a test suite, so for example if you start your tests for test-file-a.js with the option reporter('list') as above you cannot change it while the tests are running to something else, like you may do for example with the timeout option where you can do this.timeout().
So you would have to instantiate a new Mocha instance as the examples above with different options each time.
No, you cannot. without changing the code.
In short, mocha is created in a scope you cannot access from tests. Without going in details, the objects provided in your scope cannot change the options you want. (You cannot do this: link)
But there is a way to define your own reporter and customize the output for each test:
Create a file called MyCustomReporter.js:
'use strict';
module.exports = MyCustomReporter;
function MyCustomReporter (runner) {
runner.on('start', function () {
var reporter = this.suite.suites["0"].reporter;
process.stdout.write('\n');
});
runner.on('pending', function () {
process.stdout.write('\n ');
});
runner.on('pass', function (test) {
var reporter = this.suite.useReporter;
if(reporter == 'do this') {
}
else if(reporter == 'do that'){
}
process.stdout.write('\n ');
process.stdout.write('passed');
});
runner.on('fail', function () {
var reporter = this.suite.useReporter;
process.stdout.write('\n ');
process.stdout.write('failed ');
});
runner.on('end', function () {
console.log();
});
}
When you run mocha, pass the path of MyCustomReporter.js as reporter parameter(without .js), eg:
mocha --reporter "/home/user/path/to/MyCustomReporter"
The default mocha script actually tries to require a reporter file if it is not found in the default ones(under lib/reporters), github link
Finally, in your tests, you can pass some parameters to customize the output of your reporter:
var assert = require('assert');
describe('Array', function() {
describe('#indexOf()', function() {
this.parent.reporter = 'do this';
it('should return -1 when the value is not present', function() {
this.runnable().parent.useReporter = 'do this';
assert.equal([1,2,3].indexOf(4), -1);
});
});
});

Mocha test order file-wise and webdriverjs instance persistence

I'm testing my web-app with Mocha and WebDriver. I'm struggling with the best practices regarding Mocha test-order and persistent state of the driver.
I want to separate tests to different files, e.g.
test\
index.js
selenium\
login.js
search.js
So in execution wise, login.js has to be first because it logs in to the app and gets authenticated. Only after that search.js is possible to do. But how? In login.js I now have this:
webdriverjs = require('webdriverjs');
describe 'UI/Selenium', ->
client = {}
before ->
client = webdriverjs.remote
desiredCapabilities:
browserName: 'chrome'
client.init()
client.windowHandleSize({width: 1920, height: 1080})
it 'should let us login', (done) ->
client.url('http://127.0.0.1:1337/login')
.setValue('#username', 'username')
.setValue('#password', 'password')
.buttonClick('button[type="submit"]')
.waitFor '#search_results_user', 5000, (err) -> throw err if err
.call done
How can I persist the state of client to other tests without having to re-init it every time? And how do I define the execution order of files with Mocha?
How can I persist the state of client to other tests without having to re-init it every time?
You setup whatever you want to share among tests in a before hook (and tear it down in an after hook). This would mean moving the code in your test for logging in into your before hook. Supposing you are testing the "foo" view, you could do:
describe("foo view", function () {
before(function () { /* create selenium driver */ });
describe("when used by a logged in user", function () {
before(function () { /* log in */ });
it(...
it(...
after(function () { /* log out */ });
});
describe("when used by a logged out user", function () {
it(...
it(...
});
after(function () { /* shut down the driver */ });
});
And how do I define the execution order of files with Mocha?
Mocha tests should not depend on one another and consequently should not depend on the order in which they are executed.
If you are in a situation where you must break this cardinal rule, you could just invoke Mocha from the command line with the list of test files, in the order you want. Or you could launch Mocha programmatically and use addFile to add the files in order.

Categories

Resources