I needed to execute shell commands and keep the process alive to keep the changes made in the env variables, so I made a function that looks like this:
import { spawn } from 'child_process';
function exec(command) {
const bash = spawn('/bin/bash', {
detached: true
});
bash.stdout.setEncoding('utf-8');
bash.stderr.setEncoding('utf-8');
return new Promise(resolve => {
const result = {
stdout: '',
stderr: '',
code: 0
};
bash.stdout.on('data', data => {
// Removes the last line break (i.e \n) from the data.
result.stdout += data.substring(0, data.length - 1);
// Gets the code by getting the number before the `:` on the last line
result.code = Number(result.stdout.split('\n').pop().split(':')[0]);
resolve(result);
});
bash.stderr.on('data', err => {
result.stderr += err;
resolve(result);
});
// Writes the passed command and another to check the code of the command
bash.stdin.write(command + `; echo "$?:${ command }"\n`);
});
}
The test is simple:
it('should run a command that outputs an error', async () => {
// Notice the typo in `echoo`
const res = await exec('echoo test');
expect(res.code).toBe(127);
});
When running this test, it sometimes fails with res.code being 0 instead of 127.
When testing without jest, it works flawlessly 100% of the time.
Related
My test is failing whenever i used middy, but when i remove it, my test succeed. the error that im encountering is "TypeError: callback is not a function at terminate (C:\cico\node_modules\middy\src\middy.js:152:16)"
I'm stuck and I need help.
command: yarn run jest
Heres my code:
test files:
describe('Partners', () => {
describe('GET /partners', () => {
test('should return partner list.', async () => {
const result = await getPartner();
const body = JSON.parse(result.body);
expect(result.statusCode).toBe(httpStatus.OK);
expect(body.data.length).toBe(1);
});
});
});
handler:
exports.getPartner = middy(async (event) => {
// logic from db calling data
});
middy:
module.exports = (fn) => {
return middy(fn).use(jsonBodyParser()).use(httpEventNormalizer());
};
solved: use middy/core instead of middy itself.
yarn add / npm i #middy/core instead of npm/yarn middy
for more info -> https://middy.js.org/docs/category/intro-to-middy
I'm trying to run 3 bash scripts depending on what the JSON list sent from the client specifies, then return their outputs to a JSON dict which will be sent to the client again.
This is the code of the three scripts:
marc#linux:~ $ cat 1.sh
sleep 1
echo -n "a"
marc#linux:~ $ cat 2.sh
sleep 1
echo -n "b"
marc#linux:~ $ cat 3.sh
sleep 1
echo -n "c"
If I executed them synchronously, they might stop the event loop for 3 seconds (undesirable):
const express = require("express");
const app = express();
app.use(express.json())
const cp = require("child_process");
app.get("/", (request, response) => {
console.log(request.body);
var response_data = {};
if (request.body.includes("script1")) {
response_data.value1 = cp.execFileSync("./1.sh").toString();
}
if (request.body.includes("script2")) {
response_data.value2 = cp.execFileSync("./2.sh").toString();
}
if (request.body.includes("script3")) {
response_data.value3 = cp.execFileSync("./3.sh").toString();
}
response.json(response_data);
response.status(200)
response.send()
})
app.listen(8080, () => {
console.log("ready");
})
And if I executed them asynchronously, they would return after the response is sent, where the response would be just {}
My intended flow chart is that if I send ["script1", "script3"], it should return {"value1": "a", "value3": "c"} when the 1.sh and 3.sh are done executing, and without blocking the event loop.
Example
How do I implement callbacks/promises in such scenario?
Use Promise.all() for that:
const run = (script) => new Promise((resolve, reject) => {
// spawns cp for script
// without any args and options
cp.execFile(`./${script.slice(-1)}.sh`, null, null, (res, err) => {
if (err) reject(err);
// returns res through callback
resolve(res);
});
});
const runScripts = (req) => new Promise((resolve, reject) => {
const
result = {},
promises = [];
// creates promise for each script in request
for (script of req) {
// pushes it to the required array of promises for Promise.all
promises.push(new Promise((resolve, reject) => {
// runs script
run(script).then((res) => {
// gets scripts result
// and writes it
result[`value${script.slice(-1)}`] = res;
resolve();
});
}));
};
// uses Promise.all to wait until all scripts are done
Promise.all(promises).then(() => {
// when each script is done
// finally returns final result
resolve(result);
});
});
// put req as your request.body
runScripts(req).then((res) => {
// sends final result as response
response.status(200).json(result);
}).catch((err) => {
// if something goes wrong
response.status(500).json('Something broke!');
});
I couldn't check this code because because I have Windows.
If I try to execute scripts with child_process, it's tells me that I'm trying to execute UNKNOWN, even if I used sync function in test without anything, just console.log(cp.execFileSync("./1.sh").toString());. But, it worked for you.
So try it and tell me if it works or not.
P.S. Edited for error handling.
This might be a case of 'you're using the wrong tools for the job' but I'm going to shoot my question anyways, because this is what I have to work with for now.
So, here goes:
I have to make relatively small applications that periodically run as functions in an Azure environment. These applications perform tasks like fetching data from an API and storing that data on a SFTP server. When I create these applications I use a TDD approach with Jest.
I'd like to react to any problems proactively and solve them before the function runs are scheduled. If I run Jest locally I would notice any of these problems but I'd like to automate this proces. Therefor I'd like to know if it's possible to run these tests from an Azure function and have Azure Warnings notify me when one these runs fail.
What have I tried?
Created new function folder "Jest_Function"
Added an always failing test in a separate file.
/main_functions_folder
/jest_function
- index.js
- function.json
- failingTest.test.js
added the following code to index.js:
const { exec } = require('child_process');
function checkTests() {
return new Promise((resolve, reject) => {
exec('npm run test failingTest.test.js', (error) => {
if (error) reject(error);
else resolve();
});
});
}
module.exports = async function (context) {
try {
await checkTests();
} catch (err) {
context.log('tests failed!');
throw err;
}
};
Transforming the function and running it in the terminal results in expected behaviour:
const { exec } = require('child_process');
function checkTests() {
return new Promise((resolve, reject) => {
exec('npm run test failingTest.test.js', (error) => {
if (error) reject(error);
else resolve();
});
});
}
async function myTest() {
try {
await checkTests();
} catch (err) {
console.log('tests failed!');
throw err;
}
}
myTest();
tests failed!
node:child_process:399
ex = new Error('Command failed: ' + cmd + '\n' + stderr);
^
Error: Command failed: npm run test failingTest.test.js
FAIL jest_function/failingTest.test.js
✕ short test (3 ms)
● short test
expect(received).toBe(expected) // Object.is equality
Expected: 1
Received: 0
1 | test('short test', () => {
> 2 | expect(0).toBe(1);
| ^
3 | });
4 |
at Object.<anonymous> (jest_function/failingTest.test.js:2:13)
Test Suites: 1 failed, 1 total
Tests: 1 failed, 1 total
Snapshots: 0 total
Time: 0.227 s, estimated 1 s
Ran all test suites matching /failingTest.test.js/i.
at ChildProcess.exithandler (node:child_process:399:12)
at ChildProcess.emit (node:events:520:28)
at maybeClose (node:internal/child_process:1092:16)
at Process.ChildProcess._handle.onexit (node:internal/child_process:302:5) {
killed: false,
code: 1,
signal: null,
cmd: 'npm run test failingTest.test.js'
}
Azure
I deployed the function in Azure and manualy ran it. This resulted in a failing function as I expected, but for the wrong reason. It displayed the following error message:
Result: Failure Exception: Error: Command failed: npm run test failingTest.test.js sh: 1: jest: Permission denied
I'm not really sure where to go from here, any help or advice will be appreciated!
Not sure if you can use jest directly from within Functions but I know you can run pupeteer headless in Azure Functions:
https://anthonychu.ca/post/azure-functions-headless-chromium-puppeteer-playwright/
and there's also jest-pupeteer package but not sure if there is a specific limitation on jest in Functions if all of the deps are installed as runtime dependencies.
I was able to make this work using npx instead of npm:
const { exec } = require('child_process');
function checkTests() {
return new Promise((resolve, reject) => {
exec('npx jest jest_function/failingTest.test.js', (error) => {
if (error) reject(error);
else resolve();
});
});
}
module.exports = async function (context) {
try {
await checkTests();
} catch (err) {
context.log('tests failed!');
throw err;
}
};
Looking at the logs I'm not really sure what '330' exactly is, but is assume it is installing jest?
2022-04-19T09:54:06Z [Information] Error: Command failed: npx jest
npx: installed 330 in 32.481s
Anyways I'm glad I got this working now :).
I have a suite of tests written with Playwright. I am trying to execute these tests from a separate suite of tests. In this separate suite of tests, I need to examine the results of the original results. This leads me to the following directory structure:
/
/checks
checks1.spec.js
/tests
tests1.spec.js
tests2.spec.js
playwright.config.js
My files look like this:
playwright.config.js
// #ts-check
const { devices } = require('#playwright/test');
/**
* #see https://playwright.dev/docs/test-configuration
* #type {import('#playwright/test').PlaywrightTestConfig}
*/
const config = {
testDir: '.',
timeout: 30 * 1000,
expect: {
timeout: 5000
},
forbidOnly: !!process.env.CI,
retries: process.env.CI ? 2 : 0,
workers: process.env.CI ? 1 : undefined,
reporter: [
['html', { outputFolder: 'reports' } ]
],
use: {
actionTimeout: 0,
trace: 'on-first-retry',
},
/* Configure projects for major browsers */
projects: [
{
name: 'chromium',
use: {
...devices['Desktop Chrome'],
},
}
]
};
module.exports = config;
tests1.spec.js
const { test, expect } = require('#playwright/test');
test.describe('Field Tests', () => {
test('Should be required', async({ page }) => {
await page.goto('http://localhost:8080');
await expect(true).toBe(true);
});
});
checks1.spec.js
const { exec } = require('child_process');
const { test, expect } = require('#playwright/test');
const command = 'npx playwright test --reporter=json ./tests/tests1.spec.js';
test.describe('Field', () => {
test(`Require state`, async () => {
const { stdout, stderr } = await exec(command);
// Execute the test and get the results
const buffer = child_process.execSync(command);
const json = JSON.parse(buffer.toString());
const results = json.suites[0].suites[0].specs;
const status = results[0].tests[0].results[0].status;
expect(status).toBe('passed');
});
});
When I run npx playwright test --reporter=json ./tests/test1.spec.js, I receive the JSON output as I would expect. However, when I run npx playwright test checks, I receive a Socket instead of the JSON. How do I run the command and wait for the JSON to be returned? Thank you.
It is not clear why your are executing tour command twice (once with await keyword and once with execSync function). However it can be resolved using events:
test.describe('Field', () => {
test(`Require state`, async () => {
const process = await exec(command);
process.stdout.on('data', data => {
// do something with the data here
})
process.on('exit', () => {
// final checks (e.g. - expect) go here
})
});
});
If you want you can build a helper that will return the execution result as promise, thus support the await + async keywords
async cmd(command) {
return new Promise((resolve, reject) => {
const process = await exec(command);
let result = data
process.stdout.on('data', data => {
result = data // any other format manipulations should be added here
})
process.stderr.on('data', reject)
process.on('exit', () => resolve(result))
})
}
Then, somewhere in your code just use:
const result = await cmd('your command goes here')
As per the question, I think you want to execute the playwright for the tests folder first and then wait for the JSON to generate. And, after the JSON is generated, you would like to execute playwright for checks folder.
For this, you can simply split your command into two parts as follow:
npx playwright tests --reporter=json
npx playwright checks
And then execute them as one command using && operator as follow:
npx playwright tests --reporter=json && npx playwright checks.
This way, the tests are execute first, the JSON would be generated at the end of the test. And after JSON is generated, the checks would be executed.
Explaination:
In you command, you were executing npx playwright test checks, since playwrite generates the report at the end of execution (to consolidate all the results), the JSON will be generated after check folder execution is done.
Hence we simply split that command into 2. First we will generate JSON report for tests and then we will execute checks.
I am making an application which reads a file and extracts the data line by line. Each line is then stored as an array element and that array is required as the final output.
const fs = require('fs');
const readline = require('readline');
var output_array=[];
const readInterface = readline.createInterface({
input: fs.createReadStream(inp_file),
console: false
});
readInterface.on('line', function(line) {
//code for pushing lines into output_array after doing conditional formatting
}
callback (null, output_array);
With this I am getting an empty array.
Though if I use 'setTimeout' then it's working fine
setTimeout(() => {
callback (null, output_array);
}, 2000);
How can I execute this synchronously without having to use 'setTimeout'?
You cannot execute asynchronous functions synchronously. But readline supports the close event, which is triggered when the inputfile is completely read, so you can call the callback there.
readInterface.on('close', () => {
callback(null, output_array);
});
Of course you could output the content on the close event, but you could also use a promise like this:
function readFile(fileName) {
return new Promise((resolve, reject) => {
const readInterface = readline.createInterface({
input: fs.createReadStream(fileName).on('error', error => {
// Failed to create stream so reject.
reject(error);
}),
});
const output = [];
readInterface.on('error', error => {
// Some error occurred on stream so reject.
reject(error);
});
readInterface.on('line', line => {
output.push(line);
});
readInterface.on('close', () => {
// Resolve the promise with the output.
resolve(output);
});
});
}
readFile(inp_file)
.then(response => {
callback(null, response);
})
.catch(error => {
// Take care of any errors.
});
I removed the console setting when calling createInterface() since it does not seem to be a valid option.
Make your callback from readline's close event handler. That way your callback will happen after all lines are read.
readInterface.on('close', function(line) {
callback (null, output_array);
}