Is there a way of quitting test suite and stop executing further test cases, if a test case fails in protractor?
In case of jasmine testing framework, you are not the first asking about it.
There are relevant open discussions/issues on exiting after a first failure, --fail-fast option:
Bail on first failure
--fail-fast option?
Please add --fail-fast support
Long story short, this is an open issue and some day jasmine would have the functionality built-in. Currently, use a third-party jasmine-bail-fast module.
Aside from that, there is a handy realtimeFailure jasmine setting. If you set it to true it would not fail the whole test run, but it would show errors in a real time - immediately after happening - this can possibly cover your use case. Set it in jasmineNodeOpts:
exports.config = {
seleniumAddress: 'http://127.0.0.1:4444/wd/hub',
...
jasmineNodeOpts: {
realtimeFailure: true
}
}
Here is my solution to skip tests on first fail with Jasmine 2 and Protractor.
Hope it helps.
exports.config = {
onPrepare: function () {
//skip tests after first fail
var specs = [];
var orgSpecFilter = jasmine.getEnv().specFilter;
jasmine.getEnv().specFilter = function (spec) {
specs.push(spec);
return orgSpecFilter(spec);
};
jasmine.getEnv().addReporter(new function () {
this.specDone = function (result) {
if (result.failedExpectations.length > 0) {
specs.forEach(function (spec) {
spec.disable()
});
}
};
});
}
};
jasmine-bail-fast didn't work in my case. Not sure if it was because of some conflicts with my other report plugins.
In case anyone is having the same problem. You can try protractor-fast-fail
import failFast from 'protractor-fail-fast';
exports.config = {
// if import statement doesn't work, use this instead of import for older versions of node
// plugins: [{
// package: 'protractor-fail-fast'
// }],
onPrepare: function() {
jasmine.getEnv().addReporter(failFast.init());
},
afterLaunch: function() {
failFast.clean();
}
}
Worked perfectly well for me.
EDIT: added import statement in code snippet to reflect readme of projactor-fast-fail
you don't need all those third party plugins. Use native process.exit().
Code example:
it("test", function()
{
...
if(isExit)
{
browser.driver.close().then(function()
{
process.exit(1);
});
}
});
profit.
Related
I am using protractor to test a Non-Angular app and when I use the browser.forkNewDriverInstance() then it seems browser.forkNewDriverInstance() is no longer working correctly since I get this error when executing:
Failed: Error while waiting for Protractor to sync with the page: "window.angular is undefined. This could be either beca use this is a
non-angular page or because your test involves client-side navigation,
which can interfere with Protractor's boo tstrapping. See
http://git.io/v4gXM for details"
here the code:
conf.js
exports.config = {
framework: 'jasmine',
specs: ['test.js','chat_featuresx.js'],
multiCapabilities: [{
browserName: 'chrome'
}],
directConnect: 'true'
}
test.js
describe('First interaction customer-agent', () => {
beforeEach(function() {
global.agent = browser;
global.customer = browser.forkNewDriverInstance();
agent.ignoreSynchronization = true;
customer.ignoreSynchronization = true;
agent.get('http://engager-stage.brandembassy.com/');
customer.get('https://vps-web-utils.awsbrandembassy.com/livechat-window-gherkin/');
agent.driver.manage().window().maximize();
customer.driver.manage().window().maximize();
});
it('should be seen offline when agent is offline and viceversa', () => {
// check that default status is minimized
browser.sleep(2000);
expect(customer.isElementPresent(by.css('.be-chat.be-chat--minimize'))).toBe(true);
});
});
After checking the code. Few things that I saw are:
1. If you declare a variable as global, I think you need to include it when using them.
e.g. global.agent.ignoreSynchronization
2. isElementPresent seems to be used for angular pages.
In your case, I replaced it with isPresent
Please see sample code below:
describe('First interaction customer-agent', () => {
var agent = browser;
var customer = browser.forkNewDriverInstance();
beforeEach(function() {
agent.ignoreSynchronization = true;
customer.ignoreSynchronization = true;
agent.get('http://engager-stage.brandembassy.com/');
customer.get('https://vps-web-utils.awsbrandembassy.com/livechat-window-gherkin/');
agent.driver.manage().window().maximize();
customer.driver.manage().window().maximize();
});
it('should be seen offline when agent is offline and viceversa', () => {
// check that default status is minimized
browser.sleep(2000);
var elm = customer.element(by.css('[class="be-chat-wrap be-chat-wrap--minimize"]'));
expect(elm.isPresent()).toBe(true);
});
});
Note: I did not use global, since we can just declare the variable outside it function.
Lets say we are using setInterval inside of a hapi plugin, like so:
// index.js
internals = {
storage: {},
problemFunc: () => {
setInterval((storage) => {
storage.forEach((problem) => {
problem.foo = 'bar';
});
}, 500);
}
};
module.exports.register = (server, options, next) => {
server.on('start', () => { // called when the server starts
internals.storage.problem1 = {};
internals.storage.problem2 = {};
internals.storage.problem3 = {};
internals.problemFunc(internals.storage);
});
}
In our tests for this server, we may start up and stop the server many times to test different aspects of the server. Sometimes, we will get an error like cannot set property 'foo' of undefined. This is because the server gets shutdown right before that async code runs, and internals.storage.problem gets removed right along with the server stop.
This makes total sense, and I don't have a problem with that. I'd really like to know what would be some good ways to make sure my tests work 100% of the time rather than 90% of the time.
We could do:
problemFunc: () => {
setInterval((storage) => {
storage.forEach((problem) => {
if (problem !== undefined) { // check if deleted
problem.foo = 'bar';
}
});
}, 500);
}
or:
problemFunc: () => {
setInterval((storage = {}) => { // default assignment
storage.forEach((problem) => {
problem.foo = 'bar';
});
}, 500);
}
But I would rather not add conditionals to my code just so that my tests pass. Also, this can cause issues with keeping 100% code coverage because then sometimes that conditional will get run and sometimes it wont. What would be a better way to go about this?
It's absolutely normal to have slight differences in set-up and configuration when running code in a test environment.
A simple approach is to let the application know the current environment, so it can obtain the appropriate configuration and correctly set-up the service. Common environments are testing, development, staging and production.
Simple example, using an environment variable:
// env.js
module.exports.getName = function() {
return process.env['ENV'] || 'development'
}
// main.js
if (env.getName() !== 'testing') {
scheduleBackgroundTasks()
}
Then run your tests passing the ENV variable, or tell your test runner to do it:
ENV=testing npm test
I am writing test cases for NODE JS API. But wherever console.log() is there in routes or services of NODE JS File, it gets printed to CLI. Is there a way to mock these so that these won't get printed in CLI.
I have explored couple of libraries like Sinon, Stub for mocking. But couldn't grasp the working of those libraries.
You can override function entirely: console.log = function () {}.
You should not try to mock console.log itself, a better approach is for your node modules to take a logging object. This allows you to provide an alternative (ie. a mock) during testing. For example:
<my_logger.js>
module.exports = {
err: function(message) {
console.log(message);
}
}
<my_module.js>
var DefaultLogger = require('my_logger.js');
module.exports = function(logger) {
this.log = logger || DefaultLogger;
// Other setup goes here
};
module.exports.prototype.myMethod = function() {
this.log.err('Error message.');
};
<my_module_test.js>
var MyModule = require('my_module.js');
describe('Test Example', function() {
var log_mock = { err: function(msg) {} };
it('Should not output anything.', function() {
var obj = new MyModule(log_mock);
obj.myMethod();
});
});
The code here I've simplified, as the actual test isn't the reason for the example. Merely the insertion of alternative logging.
If you have a large codebase with lots of console.log calls, it is better to simply update the code as you add tests for each method. Making your logging pluggable in this way makes your code easier and more receptive to testing. Also, there are many logging frameworks available for node. console.log is fine during development when you just want to dump out something to see what's going on. But, if possible, try to avoid using it as your logging solution.
I could not find a solution which only hides the console.log calls in the module to be tested, and mocks none of the calls of the testing framework (mocha/chai in my case).
I came up with using a copy of console in the app code:
/* console.js */
module.exports = console;
/* app.js */
const console = require('./console');
console.log("I'm hidden in the tests");
/* app.spec.js */
const mockery = require('mockery');
var app;
before(() => {
// Mock console
var consoleMock = {
log: () => {}
}
mockery.registerMock('./console', consoleMock);
// Require test module after mocking
app = require('./app');
});
after(() => {
mockery.deregisterAll();
mockery.disable();
});
it('works', () => {});
You could do something along the lines of adding these before/after blocks to your tests, but the issue is that mocha actually uses console.log to print the pretty messages about the results of the test, so you would lose those
describe('Test Name', function() {
var originalLog;
beforeEach(function() {
originalLog = console.log;
console.log = function () {};
});
// test code here
afterEach(function() {
console.log = originalLog;
})
})
The problem is that your output would just look like
Test Name
X passing (Yms)
Without any intermediate text
My goal is to fake out getting some requirejs code working via babel. I've found that if I add the following: if (typeof define !== "function") { var define = require("amdefine")(module); } to the top of every file while running in nodejs things seem to work out.
Here is some code I wrote, which I thought would work or nearly work:
function injectDefine(babel) {
var header = 'if (typeof define !== "function") { var define = require("amdefine")(module); }';
return new babel.Plugin('amdefine', {
visitor: {
Program: {
enter: function(path, file) {
path.unshiftContainer(
'body',
babel.types.expressionStatement(
babel.types.stringLiteral(header)
)
);
},
},
},
});
}
require('babel-core/register')({
stage: 0,
plugins: [{transformer: injectDefine}],
});
require('../components/button');
The components/button file is just me trying to test that some file can load.
Other notes: I'm using babel 5, and I can't upgrade right now. I also can't use a .babelrc very easily right now.
Tip 1: the environment variable BABEL_DISABLE_CACHE=1 is needed if you are doing heavy testing of plugins. If you had a script that you ran like npm run unit you may instead want to run like BABEL_DISABLE_CACHE=1 npm run unit while testing your plugin.
Tip 2: babel.parse will give you a full program out of some source. The easiest thing you could do is babel.parse(header).program.body[0].
The following ended up working:
function injectDefine(babel) {
var header = 'if (typeof define !== "function") { var define = require("amdefine")(module); }';
return new babel.Plugin('amdefine', {
visitor: {
Program: {
enter: function(node, parent) {
node.body.unshift(
babel.parse(header).program.body[0]
);
},
},
},
});
}
require('babel-core/register')({
cache: false,
stage: 0,
plugins: [injectDefine],
});
At this stage, a cleaner solution can be to use #babel/traverse and #babel/types.
Let's suppose you want to append a comment to the top of every file, you could use some code like the following:
// Import the required modules
import * as t from "#babel/types";
import traverse from "#babel/traverse";
// Get your ast (for this, you can use #babel/parser)
// Traverse your ast
traverse(ast, {
// When the current node is the Program node (so the main node)
Program(path) {
// Insert at the beginning a string "Hello World" --> not valid JS code
path.unshiftContainer('body', t.stringLiteral("Hello World"));
}
});
I need a way to take a screenshot during a test which uses QUnit and Karma to run inside PhantomJS 2.0.1
I've found this command:
window.top.callPhantom('render');
That doesn't throw any error but doesn't seem to work, or at least, I don't know where to look for the taken screenshot.
Any clue?
Found a way!
Solution
I had to edit my custom PhantomJS custom launcher adding an option:
PhantomJSCustom: {
base: 'PhantomJS',
options: {
onCallback: function(data){
if (data.type === "render") {
// this function will not have the scope of karma.conf.js so we must define any global variable inside it
if (window.renderId === undefined) { window.renderId = 0; }
page.render(data.fname || ("screenshot_" + (window.renderId++) + ".png"));
}
}
}
}
As you can see, we are defining the onCallback option, it will be injected inside the script launched by phantomjs.
The script, then, will contain:
page.onCallback = <our function>
Now, we are able to use callPhantom to ask PhantomJS to run the content of our onCallback function and use all the native PhantomJS methods.
Usage
Now, you can use in your tests the function:
window.top.callPhantom({type: 'render'});
To take a screenshot that will be saved in the root directory of your application.
Additionally, if you define the fname you'll be able to define a custom path and file name to your screenshot.
window.top.callPhantom({type: 'render', fname: '/tmp/myscreen.png'});
Pack all together for ease of use
I've created an handy function to use inside my tests. The onCallback function is reduced to the minimum necessary, in this way all the logic is managed inside my test environment:
karma.conf.js
PhantomJSCustom: {
base: 'PhantomJS',
options: {
onCallback: function(data){
if (data.type === 'render' && data.fname !== undefined) {
page.render(data.fname);
}
}
}
}
helper
// With this function you can take screenshots in PhantomJS!
// by default, screenshots will be saved in .tmp/screenshots/ folder with a progressive name (n.png)
var renderId = 0;
function takeScreenshot(file) {
// check if we are in PhantomJS
if (window.top.callPhantom === undefined) return;
var options = {type: 'render'};
// if the file argument is defined, we'll save the file in the path defined eg: `fname: '/tmp/myscreen.png'
// otherwise we'll save it in the default directory with a progressive name
options.fname = file || '.tmp/screenshots/' + (renderId++) + '.png';
// this calls the onCallback function of PhantomJS, the type: 'render' will trigger the screenshot script
window.top.callPhantom(options);
}
Credits
I got this script from this answer, adapted it and found by myself where to put it to make it work with karma.
My Karma entry for a customized phantomjs that takes snapshots looked like this:
module.exports = function (config) {
config.set({
..
browsers: [ 'PhantomJSCustom'],
customLaunchers: {
'PhantomJSCustom': {
base: 'PhantomJS',
options: {
onCallback: function(data){
if (data.type === "render") {
// this function will not have the scope of karma.conf.js so we must define any global variable inside it
if (window.renderId === undefined) { window.renderId = 0; }
page.render(data.fname || ("screenshot_" + (window.renderId++) + ".png"));
}
}
}
}
},
phantomjsLauncher: {
// Have phantomjs exit if a ResourceError is encountered (useful if karma exits without killing // phantom)
exitOnResourceError: true
},
..
})