How do I read in a page from localhost into a headless Jasmine spec so test cases can work on the DOM elements?
My Gulp task is successfully running Jasmine specs for unit testing, and now I need to build integration tests to verify full web pages served from localhost. I'm using the gulp-jasmine-browser plugin to run PhantomJS.
Example:
gulpfile.js
var gulp = require('gulp');
var jasmineBrowser = require('gulp-jasmine-browser');
function specRunner() {
gulp.src(['node_modules/jquery/dist/jquery.js', 'src/js/*.js', 'spec/*.js'])
.pipe(jasmineBrowser.specRunner({ console: true }))
.pipe(jasmineBrowser.headless());
}
gulp.task('spec', specRunner);
spec/cart-spec.js
describe('Cart component', function() {
it('displays on the gateway page', function() {
var page = loadWebPage('http://localhost/'); //DOES NOT WORK
var cart = page.find('#cart');
expect(cart.length).toBe(1);
});
});
There is no loadWebPage() function. It's just to illustrate the functionality I believe is needed.
End-to-End testing frameworks like a Selenium, WebdriverIO, Nightwatch.js, Protractor and so on are more suitable in such case.
The gulp-jasmine-browser plugin still is about the Unit testing in the browser environment. It is not possible to navigate between pages.
I put together the following code that appears to work. Please feel free to check out my repo and confirm in your own environment.
package.json
{
"name": "40646680",
"version": "1.0.0",
"main": "index.js",
"scripts": {
"test": "gulp jasmine"
},
"devDependencies": {
"gulp": "^3.9.1",
"gulp-jasmine-browser": "^1.7.1",
"jasmine": "^2.5.2",
"phantomjs": "^2.1.7"
}
}
gulpfile.js
(() => {
"use strict";
var gulp = require("gulp"),
jasmineBrowser = require("gulp-jasmine-browser");
gulp.task("jasmine", () => {
return gulp.src("test/*.js")
.pipe(jasmineBrowser.specRunner({
console: true
}))
.pipe(jasmineBrowser.headless());
});
})();
test/sampleJasmine.js
describe("A suite", function() {
it("contains spec with an expectation", function() {
expect(true).toBe(true);
});
it("contains failing spec with an expectation", function() {
expect(true).toBe(false);
});
});
Execution
Bob Chatman#CHATBAG42 F:\Development\StackOverflow\40646680
> npm test
> 40646680#1.0.0 test F:\Development\StackOverflow\40646680
> gulp jasmine
[21:56:44] Using gulpfile F:\Development\StackOverflow\40646680\gulpfile.js
[21:56:44] Starting 'jasmine'...
[21:56:44] Jasmine server listening on port 8000
.F
Failures:
1) A suite contains failing spec with an expectation
1.1) Expected true to be false.
2 specs, 1 failure
Finished in 0 seconds
[21:56:49] 'jasmine' errored after 4.26 s
[21:56:49] Error in plugin 'gulp-jasmine-browser'
Message:
1 failure
npm ERR! Test failed. See above for more details.
Dependencies
node 7.2
npm 3.9.3
jasmine 2.5.2
phantomjs 2.1.7
gulp 3.9.1
jsdom to the rescue!
It turns out it's pretty easy to load a web page into a headless Jasmine spec... but you need to swap out PhantomJS for jsdom.
Strategy:
Use Jasmine's beforeAll() to call a function that will run JSDOM.fromURL() to request the web page.
Once the web page has been loaded into the DOM, expose window and jQuery for use in your test cases.
Finally, call done() to indicate the tests are now ready to run.
Make sure to close the window after the tests have run.
spec.js
const url = 'http://dnajs.org/';
const { JSDOM } = require('jsdom');
let window, $;
function loadWebPage(done) {
function handleWebPage(dom) {
function waitForScripts() {
window = dom.window;
$ = dom.window.jQuery;
done();
}
dom.window.onload = waitForScripts;
}
const options = { resources: 'usable', runScripts: 'dangerously' };
JSDOM.fromURL(url, options).then(handleWebPage);
}
function closeWebPage() { window.close(); }
describe('The web page', () => {
beforeAll(loadWebPage);
afterAll(closeWebPage);
it('has the correct URL', () => {
expect(window.location.href).toBe(url);
});
it('has exactly one header, main, and footer', () => {
const actual = {
header: $('body >header').length,
main: $('body >main').length,
footer: $('body >footer').length
};
const expected = { header: 1, main: 1, footer: 1 };
expect(actual).toEqual(expected);
});
});
Test output
Note: Above screenshot is from a similar Mocha spec since Mocha has a nice default reporter.
Project
It's on GitHub if you want try it out yourself:
https://github.com/dnajs/load-web-page-jsdom-jasmine
EDITED: Updated for jsdom 11
Related
I am trying to transfer an old node-express project over to be able to use es6. I have seen many posts about using gulp with es6. Most of them discuss using a syntax like this:
const gulp = require("gulp");
const babel = require("gulp-babel");
gulp.src('./index.js')
.pipe(
babel({
presets: [
["#babel/env", { modules: false }],
],
})
)
However my existing project's gulpfile does't use gulp.src at all. Instead, it uses gulp-develop-server. The gulpfile looks like this:
const gulp = require("gulp");
const devServer = require("gulp-develop-server");
const spawn = require("child_process").spawn;
const fs = require("fs");
const basedir = ".";
function serverRestart(done) {
// perform some cleanup code here
devServer.restart();
done();
}
function serverStart() {
devServer.listen({
path: basedir + "/index.js",
});
}
function serverWatch() {
serverStart();
gulp.watch(
[
basedir + "/paths/**/*",
// more directories to watch
],
serverRestart
);
}
function reload(done) {
serverWatch();
done();
}
function defaultTask() {
let p;
gulp.watch(["gulpfile.js"], killProcess);
spawnChild();
function killProcess(e) {
if (p && !p.killed) {
devServer.kill();
p.kill("SIGINT");
spawnChild();
}
}
function spawnChild() {
p = spawn("gulp", ["reload"], { stdio: "inherit" });
}
}
process.stdin.resume();
process.on("exit", handleExit.bind(null, { cleanup: true }));
process.on("SIGINT", handleExit.bind(null, { exit: true }));
process.on("uncaughtException", handleExit.bind(null, { exit: true }));
function handleExit(options, err) {
// perform some cleanup code here
if (options.cleanup) {
devServer.kill();
}
if (err) {
console.log(err.stack);
}
if (options.exit) {
process.exit();
}
}
gulp.task("serverRestart", serverRestart);
gulp.task("serverStart", serverStart);
gulp.task("serverWatch", serverWatch);
gulp.task("reload", reload);
gulp.task("default", defaultTask);
The existing flow is important because it executes needed code for setup and cleanup every time I hit save, which runs serverRestart. I've been trying a few different methods based on the other questions which recommended using gulp.src().pipe(), but I havne't had much luck integrating it with the existing pattern which uses gulp-develop-server. I am trying to not have to rewrite the whole gulpfile. Is there a simple way to integrate babel with my existing gulpfile such that I can use es6 in my source code?
There's an example with CoffeeScript in the gulp-develop-server documentation.
Using that as a model, try this:
function serverStart() {
devServer.listen({
path: "./dist/index.js",
});
}
function serverWatch() {
serverStart();
gulp.watch(
[
basedir + "/paths/**/*",
],
serverRestart
);
}
function serverRestart() {
gulp.src('./index.js')
.pipe(
babel({
presets: [
["#babel/env", { modules: false }],
],
})
)
.pipe( gulp.dest( './dist' ) )
.pipe( devServer() );
}
Other suggestions
That being said, your existing Gulp file doesn't actually really use Gulp. That is, everything is defined as a function and it doesn't leverage any of Gulp's useful features, like managing task dependencies. This is because (pre-es6), this was a very simple project. The Gulp tasks in that file are an over-elaborate way to watch files and run a server. The same could be done (with less code) using nodemon.
With the introduction of React and more complicated build processes, Gulp seems to have fallen out of favor with the community (and in my personal experience, Gulp was a time sinkhole anyhow).
If the main change you want to make is to use import, you can simply use a more recent Node version. You'll surely run into the error SyntaxError: Cannot use import statement outside a module. Simply rename the file to .mjs and it will work. This provides a way to incrementally migrate files to import syntax. Other features should automatically work (and are all backwards-compatible, anyhow). Once your project is mostly, or all, compliant, you can add "type": "module" to your package.json file, then rename all of your require-style js files to .cjs, and rename all of your .mjs files to .js, or leave them as .mjs. Read more about the rules of mixing CommonJS and Module imports in the Node.js blog post (note that some things may have changed since that article was written).
I am using the Detox Test tool, and I am having difficulties.
I only installed Detox, I only ran the basic code for the ios test, and I get the following error:
Please help me.
Just iOS
Error Log
$ detox test --configuration ios.sim.debug --debug-synchronization --take-screenshots all --record-videos nonex --record-logs all
node_modules/.bin/jest e2e --config=e2e/config.json --maxWorkers=1 --testNamePattern='^((?!:android:).)*$'
FAIL e2e/firstTest.spec.js
● Test suite failed to run
ReferenceError: before is not defined
3 | const adapter = require('detox/runners/mocha/adapter');
4 |
> 5 | before(async () => {
| ^
6 | await detox.init(config);
7 | });
8 |
at Object.<anonymous> (init.js:5:1)
package.json
"script":{
"e2e:ios": "detox test --configuration ios.sim.debug --debug-synchronization --take-screenshots all --record-videos nonex --record-logs all",
"e2e:android": "detox test --configuration android.emu.debug --loglevel verbose --take-screenshots all --record-videos none --record-logs all"
},
dependencies": {
"detox": "^8.0.0",
"jest": "^23.1.0",
"mocha": "^5.2.0",
},
"detox": {
"configurations": {
"ios.sim.debug": {
"binaryPath": "ios/build/Build/Products/Debug-iphonesimulator/{app_name[enter image description here][1]}.app",
"build": "xcodebuild -workspace ios/{workspace_Name}.xcworkspace -scheme {scheme_name} Dev -configuration Debug -sdk iphonesimulator -derivedDataPath ios/build",
"type": "ios.simulator",
"name": "iPhone 7"
},
"android.emu.debug": {
"binaryPath": "android/app/build/outputs/apk/dev/debug/{apk_name}.apk",
"build": "react-native run-android --variant=devDebug --appId com.noahclient.dev",
"type": "android.emulator",
"name": "Nexus_5X_API_26"
}
},
"test-runner": "jest"
}
}
I looks like you are trying to run a mocha test on the jest runner. As your init.js is setup for mocha but the test runner that you are using is jest. This is confirmed by the error message node_modules/.bin/jest e2e... that you are getting.
You should pick either one, jest or mocha and use it. Rather than trying to use both.
#Jest
If you are using jest your init.js should look like this:
const detox = require('detox');
const config = require('../package.json').detox;
const adapter = require('detox/runners/jest/adapter');
jest.setTimeout(120000);
jasmine.getEnv().addReporter(adapter);
beforeAll(async () => {
await detox.init(config);
});
beforeEach(async () => {
await adapter.beforeEach();
});
afterAll(async () => {
await adapter.afterAll();
await detox.cleanup();
});
and you should add "test-runner": "jest" to the detox object in your package.json.
You should also have a config.json file in the same location as the init.js containing:
{
"setupFilesAfterEnv" : ["./init.js"]
}
#Mocha
If you are using mocha then your init.js should look like this:
const detox = require('detox');
const config = require('../package.json').detox;
const adapter = require('detox/runners/mocha/adapter');
before(async () => {
await detox.init(config);
});
beforeEach(async function () {
await adapter.beforeEach(this);
});
afterEach(async function () {
await adapter.afterEach(this);
});
after(async () => {
await detox.cleanup();
});
and you should remove the "test-runner": "jest" from the detox object in your package.json as it is not required.
Instead of a config.json file you should have a mocha.opts file beside your init.js and it should have something similar to:
--recursive
--timeout 120000
--bail
#Next steps
Choose the test runner that you are wanting to run; either jest or
mocha.
Make sure you have the correct init.js file for the test runner.
If using jest have a config.json file and add the test-runner to the detox object in the package.json.
If using mocha have a mocha.opts file. No need to specify a test-runner in the detox object in the package.json.
You can see the setup instructions here: https://github.com/wix/detox/blob/master/docs/Introduction.GettingStarted.md#step-3-create-your-first-test
If you are still having issues let me know.
i have installed jasmine by $npm install -g jasmine then i have initiated it by $jasmine init it created spec/support i have placed my test.spec.js in spec/support/test.spec.js
test.spec.js
var a = 100;
var b = 200;
describe("A suite", function() {
it("contains spec with an expectation", function() {
expect(true).toBe(true);
});
});
my jasmine.json look like this
{
"spec_dir": "spec",
"spec_files": [
"*/[sS]pec.js"
],
"helpers": [
"helpers/*/.js"
],
"stopSpecOnExpectationFailure": false,
"random": false
}
now i wanted to test the test.spec.js by
$ [by running] jasmine
always gives No specs found
please help me thanks in advance!!!!
I wonder if there is a better way to disable console errors inside a specific Jest test (i.e. restore the original console before/after each test).
Here is my current approach:
describe("Some description", () => {
let consoleSpy;
beforeEach(() => {
if (typeof consoleSpy === "function") {
consoleSpy.mockRestore();
}
});
test("Some test that should not output errors to jest console", () => {
expect.assertions(2);
consoleSpy = jest.spyOn(console, "error").mockImplementation();
// some function that uses console error
expect(someFunction).toBe("X");
expect(consoleSpy).toHaveBeenCalled();
});
test("Test that has console available", () => {
// shows up during jest watch test, just as intended
console.error("test");
});
});
Is there a cleaner way of accomplishing the same thing? I would like to avoid spyOn, but mockRestore only seems to work with it.
For particular spec file, Andreas's is good enough. Below setup will suppress console.log statements for all test suites,
jest --silent
(or)
To customize warn, info and debug you can use below setup
tests/setup.js or jest-preload.js configured in setupFilesAfterEnv
global.console = {
...console,
// uncomment to ignore a specific log level
log: jest.fn(),
debug: jest.fn(),
info: jest.fn(),
// warn: jest.fn(),
// error: jest.fn(),
};
jest.config.js
module.exports = {
verbose: true,
setupFilesAfterEnv: ["<rootDir>/__tests__/setup.js"],
};
If you want to do it just for a specific test:
beforeEach(() => {
jest.spyOn(console, 'warn').mockImplementation(() => {});
});
As every test file runs in its own thread there is no need to restore it if you want to disable it for all test in one file. For the same reason you can also just write
console.log = jest.fn()
expect(console.log).toHaveBeenCalled();
I found that the answer above re: suppressing console.log across all test suites threw errors when any other console methods (e.g. warn, error) were called since it was replacing the entire global console object.
This somewhat similar approach worked for me with Jest 22+:
package.json
"jest": {
"setupFiles": [...],
"setupTestFrameworkScriptFile": "<rootDir>/jest/setup.js",
...
}
jest/setup.js
jest.spyOn(global.console, 'log').mockImplementation(() => jest.fn());
Using this method, only console.log is mocked and other console methods are unaffected.
To me a more clear/clean way (reader needs little knowledge of the jest API to understand what is happening), is to just manually do what mockRestore does:
// at start of test you want to suppress
const consoleLog = console.log;
console.log = jest.fn();
// at end of test
console.log = consoleLog;
beforeAll(() => {
jest.spyOn(console, 'log').mockImplementation(() => {});
jest.spyOn(console, 'error').mockImplementation(() => {});
jest.spyOn(console, 'warn').mockImplementation(() => {});
jest.spyOn(console, 'info').mockImplementation(() => {});
jest.spyOn(console, 'debug').mockImplementation(() => {});
});
Here's all the lines you may want to use. You can put them right in the test:
jest.spyOn(console, 'warn').mockImplementation(() => {});
console.warn("You won't see me!")
expect(console.warn).toHaveBeenCalled();
console.warn.mockRestore();
Weirdly the answers above (except Raja's great answer but I wanted to share the weird way the others fail and how to clear the mock so no one else wastes the time I did) seem to successfully create the mock but don't suppress the logging to the console.
Both
const consoleSpy = jest.spyOn(console, 'warn').mockImplementation(() => {});
and
global console = {
warn: jest.fn().mockImplementation(() => {});
}
successfully install the mock (I can use expect(console.warn).toBeCalledTimes(1) and it passes) but it still outputs the warning even though the mock implementation seemingly should be replacing the default (this is in a jsdom environment).
Eventually I found a hack to fix the problem and put the following in the file loaded with SetupFiles in your config (note that I found sometimes global.$ didn't work for me when putting jquery into global context so I just set all my globals this way in my setup).
const consoleWarn = jest.spyOn(console, 'warn').mockImplementation(() => {});
const consoleLog = jest.spyOn(console, 'log').mockImplementation(() => {});
const consoleDebug = jest.spyOn(console, 'debug').mockImplementation(() => {});
const consoleError = jest.spyOn(console, 'error').mockImplementation(() => {});
Object.defineProperty(global, 'console', {value: {
warn: consoleWarn,
log: consoleLog,
debug: consoleDebug,
error: consoleError}});
It feels ugly and I then have to put code like the following in each test file since beforeEach isn't defined in the files referenced by SetupFiles (maybe you could put both in SetupFilesAfterEnv but I haven't tried).
beforeEach(() => {
console.warn.mockClear();
});
Since jest.spyOn doesn't work for this (it may have in the past), I resorted to jest.fn with a manual mock restoration as pointed out in Jest docs. This way, you should not miss any logs which are not empirically ignored in a specific test.
const consoleError = console.error
beforeEach(() => {
console.error = consoleError
})
test('with error', () => {
console.error = jest.fn()
console.error('error') // can't see me
})
test('with error and log', () => {
console.error('error') // now you can
})
If you are using command npm test to run test then change the test script in package.json like below
{
....
"name": "....",
"version": "0.0.1",
"private": true,
"scripts": {
"android": "react-native run-android",
"ios": "react-native run-ios",
"start": "react-native start",
"test": "jest --silent", // add --silent to jest in script like this
"lint": "eslint ."
},
...
}
Or else you can directly run command npx jest --silent to get rid of all logs and errors when testing
Kudos to #Raja's top answer. Here is what I am using (I would comment, but can't share a multi-line code block in a comment).
With jest v26, I'm getting this error:
We detected setupFilesAfterEnv in your package.json.
Remove it from Jest configuration, and put the initialization code in src/setupTests.js:
This file will be loaded automatically.
Therefore, I had to remove the setupFilesAfterEnv from my jest config, and add this to src/setupTests.js
// https://stackoverflow.com/questions/44467657/jest-better-way-to-disable-console-inside-unit-tests
const nativeConsoleError = global.console.error
global.console.error = (...args) => {
if (args.join('').includes('Could not parse CSS stylesheet')) {
return
}
return nativeConsoleError(...args)
}
Another approach is to use process.env.NODE_ENV. This way one can selectively choose what to show (or not) while running tests:
if (process.env.NODE_ENV === 'development') {
console.log('Show output only while in "development" mode');
} else if (process.env.NODE_ENV === 'test') {
console.log('Show output only while in "test" mode');
}
or
const logDev = msg => {
if (process.env.NODE_ENV === 'development') {
console.log(msg);
}
}
logDev('Show output only while in "development" mode');
This will require this configuration to be placed on package.json:
"jest": {
"globals": {
"NODE_ENV": "test"
}
}
Note that this approach is not a direct solution to the original question, but gives the expected result as long as one has the possibility to wrap the console.log with the mentioned condition.
I'm having problems testing a flatiron cli app with Mocha.
The command-line command I'd like to test creates a directory and logs success with app.log.info.
This is the code to be tested (./lib/commands/create.js):
var flatiron = require('flatiron'),
app = flatiron.app,
fs = require('fs'),
path = require('path');
module.exports = function create(name, callback) {
"use strict";
fs.mkdir('./' + name);
app.log.info('Directory created!');
}
This is the test (./test/create.js):
var create = require('../lib/commands/create');
describe('Flatiron command', function () {
"use strict";
describe('#create()', function () {
it('should create a directory ', function () {
create('someDirectory');
// check if the directory was created,
// then remove the directory
});
});
});
mocha test/log -R spec gives me
Flatiron command
#log()
1) should log something
✖ 1 of 1 tests failed:
1) Flatiron command #create() should create a directory :
TypeError: Cannot call method 'info' of undefined
Why is app.lognot available to Mocha?
Is this because of how function logis exported?
Or has this something to do with how flatiron sets up the application? I tried requiring flatiron.app and starting it from the test like this
var create = require('../lib/commands/create'),
flatiron = require('flatiron'),
app = flatiron.app;
describe('Flatiron command', function () {
"use strict";
describe('#create()', function () {
it('should create a directory ', function () {
app.start();
create('someDirectory');
});
});
});
- but with no success, just a different error:
Flatiron command
#create()
1) should create a directory
✖ 1 of 1 tests failed:
1) Flatiron command #create() should create a directory :
TypeError: Object [object Object] has no method 'start'
Or is this a case where you would use spies/stubs/mocks with something like sinon.js to simulate the behavior of app.log somehow? Because I'm not really interested if the logging works, but if the directory is created.
Ok, I've got it.
Using app.start() was not quite it - but it works with app.init().
In flatiron, app.init() is normally called from within the main file by plugging flatiron.plugins.cliinto app.use() like this:
var flatiron = require('flatiron'),
path = require('path'),
app = flatiron.app;
app.config.file({ file: path.join(__dirname, '..', 'config', 'config.json') });
app.use(flatiron.plugins.cli, {
dir: path.join(__dirname, '..', 'lib', 'commands'),
usage: 'Empty Flatiron Application, please fill out commands'
});
app.start();
Calling app.init() sets up logging with winston, the flatiron logging plugin.
But you can call app.init()from within the test without calling app.start() after it.
So this works:
var create = require('../lib/commands/create'),
flatiron = require('flatiron'),
app = flatiron.app;
describe('Flatiron command', function () {
"use strict";
describe('#create()', function () {
it('should create a directory ', function () {
app.init();
create('someDirectory');
});
});
});
Mocha even takes care of the logging:
Flatiron command
#create()
◦ should create a directory : info: Directory created!
✓ should create a directory (48ms)
✔ 1 tests complete (50ms)
If you want to stop the logging, you can use app.log.loggers.default.remove(winston.transports.Console)after you've called app.init(). You have to require winston to do this.