I am trying to get the lines a ('never ending') python script puts into stdout. But currently my code would only log something to the console when the python process exits. Is there a way I can get the 'live' output of the python script line by line?
spawn_child.js:
let execFile = require("child_process").execFile;
var child = execFile("python3", ["PATH_TO_FILE"]);
child.stdout.on("data", data=>{
console.log(data.toString());
});
child.stderr.on("data", data=>{
console.log(data.toString());
});
child.on("exit", code=>{
console.log("Child exited with code "+code);
});
The python file:
from time import sleep
while True:
sleep(3)
print("test")
Edit: It works when using a nodejs script instead of a python script
change python script to
import time
import sys
while True:
time.sleep(1)
print("test")
sys.stdout.flush()
and increase the buffer size of the child process
const child = execFile("python", ["./runner.py"], {
detached: true,
maxBuffer: 10 * 1024 * 1024 * 1024
});
or you can do it without the flushing to stdout with python-shell
const { PythonShell } = require('python-shell');
let pyshell = new PythonShell('runner.py');
pyshell.on('message', function (message) {
console.log(message);
});
pyshell.end(function (err, code, signal) {
if (err) throw err;
console.log('The exit code was: ' + code);
console.log('The exit signal was: ' + signal);
console.log('finished');
});
Use spawn instead of execFile, dont forget options shell and stdio.
const spawn = require("child_process").spawn;
const child = spawn("python3", ["file.py"], {shell: true, stdio: 'inherit'});
child.on('data', function(data) {
console.log(data);
});
child.on('close', function(code) {
console.log('Child process exited with exit code '+code);
});
You can also add cwd option.
Was trying to implement something similar inside a NextJS application and wanted live output from my python script and using python-shell had the same issue that it was only giving me output when the process existed and I ended up using node-pty instead which worked as expected:
import { spawn } from "node-pty"
const pyProcess = spawn("python", ["path/to/python/script"], {
name: 'xterm-color',
cols: 80,
rows: 30,
cwd: process.cwd(),
});
pyProcess.on('data', function (data: { toString: () => any; }) {
console.log(data.toString());
});
pyProcess.on('exit', (code: any) => {
console.log(`child process exited with code ${code}`);
});
I'm trying to create a simple script to run a few node files, these files are just api's for a quote database , Authentication , and a twitch bot.
The Quote and Authentication work just find however they twitch bot does not it says its connected to the server but when it goes to execute any commands it gets a unhandled promise rejection or some such error
However when I run the twitch bot separately in its own terminal session and not from the script it works just fine with no thrown errors.
Why will it work from a separate session that I create but not from a child_process?
note: I have had the child_process start up a new shell and its still the same issue
The process script:
const exec = require('child_process').exec;
const spawn = require('child_process').spawn
// I think twtich bot process has an issue without how much
//data is being passed to stdout or stderr and I need
//to specify how much data is allowed
twitchBot = exec('node ./twitchbot-api/index.js',
function(error, stdout, stderr){
console.log('stdout: ' + stdout);
console.log('stderr: ' + stderr);
if(error !== null){
console.log('exec error: ' + error);
}
})
twitchBot.stdout.on('data', (data) => {
console.log(`twitchBot stdout:\n${data}`);
});
//whenver I recieve an error from quoteDataBase it also displays in the parent process
twitchBot.stderr.on('data', (data) => {
console.error(`twitchBot stderr:\n${data}`);
});
//this throws an error the first time I try a command saying not connected to server , I think I need to do this as exec
// let twitchBot = spawn('node ./twitchbot-api/index.js',{
// stdio: 'inherit',
// shell: true,
// detached: true,
// })
// //twitchBot.unref();
// twitchBot.on('error', (error)=>{
// console.log(`the erorr ${error}`)
// })
//starts up a child exec process for my quotes database
let quoteDataBase = exec('node ./quotes-api/index.js',
function(error, stdout, stderr){
console.log('stdout: ' + stdout);
console.log('stderr: ' + stderr);
if(error !== null){
console.log('exec error: ' + error);
}
})
//whatever the child exec process quoteDatabase pushs to the standard out (console) displays on the parent process
// aka the terminal I run apiStart from
quoteDataBase.stdout.on('data', (data) => {
console.log(`quoteDataBase stdout:\n${data}`);
});
//whenver I recieve an error from quoteDataBase it also displays in the parent process
quoteDataBase.stderr.on('data', (data) => {
console.error(`quoteDataBase stderr:\n${data}`);
});
//starts up a child process for my user Authentication
let AuthenDataBase = exec('node ./authen-api/index.js',
function(error, stdout, stderr){
console.log('stdout: ' + stdout);
console.log('stderr: ' + stderr);
if(error !== null){
console.log('exec error: ' + error);
}
})
And then the twitch bot code:
const tmi = require('tmi.js')
const haikudos = require('haikudos')
require('dotenv').config()
require('es6-promise').polyfill();
require('isomorphic-fetch');
// Valid commands start with:
let commandPrefix = '!'
// Define configuration options:
let opts = {
identity: {
username: process.env.user,
password: process.env.pass
},
channels: [
"dshrops1"
]
}
// These are the commands the bot knows (defined below):
let knownCommands = { echo, haiku, quote }
// Function called when the "echo" command is issued:
function echo (target, context, params) {
// If there's something to echo:
if (params.length) {
// Join the params into a string:
const msg = params.join(' ')
// Send it back to the correct place:
sendMessage(target, context, msg)
} else { // Nothing to echo
console.log(`* Nothing to echo`)
}
}
// Function called when the "haiku" command is issued:
function haiku (target, context) {
// Generate a new haiku:
haikudos((newHaiku) => {
// Split it line-by-line:
newHaiku.split('\n').forEach((h) => {
// Send each line separately:
sendMessage(target, context, h)
})
})
}
async function quote (target, context){
//cant deploy this on AWS yet Untill I deploy my database api as well.
let quote = await fetch('http://localhost:3006/random').then(resp =>resp.text())
sendMessage(target,context,quote)
}
// Helper function to send the correct type of message:
function sendMessage (target, context, message) {
if (context['message-type'] === 'whisper') {
client.whisper(target, message)
} else {
client.say(target, message)
}
}
// Create a client with our options:
let client = new tmi.client(opts)
// Register our event handlers (defined below):
client.on('message', onMessageHandler)
client.on('connected', onConnectedHandler)
client.on('disconnected', onDisconnectedHandler)
// Connect to Twitch:
client.connect()
// Called every time a message comes in:
function onMessageHandler (target, context, msg, self) {
if (self) { return } // Ignore messages from the bot
// This isn't a command since it has no prefix:
if (msg.substr(0, 1) !== commandPrefix) {
console.log(`[${target} (${context['message-type']})] ${context.username}: ${msg}`)
return
}
// Split the message into individual words:
const parse = msg.slice(1).split(' ')
// The command name is the first (0th) one:
const commandName = parse[0]
// The rest (if any) are the parameters:
const params = parse.splice(1)
// If the command is known, let's execute it:
if (commandName in knownCommands) {
// Retrieve the function by its name:
const command = knownCommands[commandName]
// Then call the command with parameters:
command(target, context, params)
console.log(`* Executed ${commandName} command for ${context.username}`)
} else {
console.log(`* Unknown command ${commandName} from ${context.username}`)
}
}
// Called every time the bot connects to Twitch chat:
function onConnectedHandler (addr, port) {
console.log(`* Connected to ${addr}:${port}`)
}
// Called every time the bot disconnects from Twitch:
function onDisconnectedHandler (reason) {
console.log(`Womp womp, disconnected: ${reason}`)
process.exit(1)
}
Sorry About any formatting issues
Despite all the issues that might be wrong with the twitch bot it does work fine when I run it from a separate terminal I am curious as to why it does work from the script and what I can do.
notes: I figured it might have to do with the twitch api being a IRC.
I am in the process of porting a CLI library from Ruby over to Node.js. In my code I execute several third party binaries when necessary. I am not sure how best to accomplish this in Node.
Here's an example in Ruby where I call PrinceXML to convert a file to a PDF:
cmd = system("prince -v builds/pdf/book.html -o builds/pdf/book.pdf")
What is the equivalent code in Node?
For even newer version of Node.js (v8.1.4), the events and calls are similar or identical to older versions, but it's encouraged to use the standard newer language features. Examples:
For buffered, non-stream formatted output (you get it all at once), use child_process.exec:
const { exec } = require('child_process');
exec('cat *.js bad_file | wc -l', (err, stdout, stderr) => {
if (err) {
// node couldn't execute the command
return;
}
// the *entire* stdout and stderr (buffered)
console.log(`stdout: ${stdout}`);
console.log(`stderr: ${stderr}`);
});
You can also use it with Promises:
const util = require('util');
const exec = util.promisify(require('child_process').exec);
async function ls() {
const { stdout, stderr } = await exec('ls');
console.log('stdout:', stdout);
console.log('stderr:', stderr);
}
ls();
If you wish to receive the data gradually in chunks (output as a stream), use child_process.spawn:
const { spawn } = require('child_process');
const child = spawn('ls', ['-lh', '/usr']);
// use child.stdout.setEncoding('utf8'); if you want text chunks
child.stdout.on('data', (chunk) => {
// data from standard output is here as buffers
});
// since these are streams, you can pipe them elsewhere
child.stderr.pipe(dest);
child.on('close', (code) => {
console.log(`child process exited with code ${code}`);
});
Both of these functions have a synchronous counterpart. An example for child_process.execSync:
const { execSync } = require('child_process');
// stderr is sent to stderr of parent process
// you can set options.stdio if you want it to go elsewhere
let stdout = execSync('ls');
As well as child_process.spawnSync:
const { spawnSync} = require('child_process');
const child = spawnSync('ls', ['-lh', '/usr']);
console.log('error', child.error);
console.log('stdout ', child.stdout);
console.log('stderr ', child.stderr);
Note: The following code is still functional, but is primarily targeted at users of ES5 and before.
The module for spawning child processes with Node.js is well documented in the documentation (v5.0.0). To execute a command and fetch its complete output as a buffer, use child_process.exec:
var exec = require('child_process').exec;
var cmd = 'prince -v builds/pdf/book.html -o builds/pdf/book.pdf';
exec(cmd, function(error, stdout, stderr) {
// command output is in stdout
});
If you need to use handle process I/O with streams, such as when you are expecting large amounts of output, use child_process.spawn:
var spawn = require('child_process').spawn;
var child = spawn('prince', [
'-v', 'builds/pdf/book.html',
'-o', 'builds/pdf/book.pdf'
]);
child.stdout.on('data', function(chunk) {
// output will be here in chunks
});
// or if you want to send output elsewhere
child.stdout.pipe(dest);
If you are executing a file rather than a command, you might want to use child_process.execFile, which parameters which are almost identical to spawn, but has a fourth callback parameter like exec for retrieving output buffers. That might look a bit like this:
var execFile = require('child_process').execFile;
execFile(file, args, options, function(error, stdout, stderr) {
// command output is in stdout
});
As of v0.11.12, Node now supports synchronous spawn and exec. All of the methods described above are asynchronous, and have a synchronous counterpart. Documentation for them can be found here. While they are useful for scripting, do note that unlike the methods used to spawn child processes asynchronously, the synchronous methods do not return an instance of ChildProcess.
Node JS v15.8.0, LTS v14.15.4, and v12.20.1 --- Feb 2021
Async method (Unix):
'use strict';
const { spawn } = require( 'child_process' );
const ls = spawn( 'ls', [ '-lh', '/usr' ] );
ls.stdout.on( 'data', ( data ) => {
console.log( `stdout: ${ data }` );
} );
ls.stderr.on( 'data', ( data ) => {
console.log( `stderr: ${ data }` );
} );
ls.on( 'close', ( code ) => {
console.log( `child process exited with code ${ code }` );
} );
Async method (Windows):
'use strict';
const { spawn } = require( 'child_process' );
// NOTE: Windows Users, this command appears to be differ for a few users.
// You can think of this as using Node to execute things in your Command Prompt.
// If `cmd` works there, it should work here.
// If you have an issue, try `dir`:
// const dir = spawn( 'dir', [ '.' ] );
const dir = spawn( 'cmd', [ '/c', 'dir' ] );
dir.stdout.on( 'data', ( data ) => console.log( `stdout: ${ data }` ) );
dir.stderr.on( 'data', ( data ) => console.log( `stderr: ${ data }` ) );
dir.on( 'close', ( code ) => console.log( `child process exited with code ${code}` ) );
Sync:
'use strict';
const { spawnSync } = require( 'child_process' );
const ls = spawnSync( 'ls', [ '-lh', '/usr' ] );
console.log( `stderr: ${ ls.stderr.toString() }` );
console.log( `stdout: ${ ls.stdout.toString() }` );
From Node.js v15.8.0 Documentation
The same goes for Node.js v14.15.4 Documentation and Node.js v12.20.1 Documentation
You are looking for child_process.exec
Here is the example:
const exec = require('child_process').exec;
const child = exec('cat *.js bad_file | wc -l',
(error, stdout, stderr) => {
console.log(`stdout: ${stdout}`);
console.log(`stderr: ${stderr}`);
if (error !== null) {
console.log(`exec error: ${error}`);
}
});
Since version 4 the closest alternative is child_process.execSync method:
const {execSync} = require('child_process');
let output = execSync('prince -v builds/pdf/book.html -o builds/pdf/book.pdf');
⚠️ Note that execSync call blocks event loop.
Now you can use shelljs (from node v4) as follows:
var shell = require('shelljs');
shell.echo('hello world');
shell.exec('node --version');
Install with
npm install shelljs
See https://github.com/shelljs/shelljs
const exec = require("child_process").exec
exec("ls", (error, stdout, stderr) => {
//do whatever here
})
If you want something that closely resembles the top answer but is also synchronous then this will work.
var execSync = require('child_process').execSync;
var cmd = "echo 'hello world'";
var options = {
encoding: 'utf8'
};
console.log(execSync(cmd, options));
I just wrote a Cli helper to deal with Unix/windows easily.
Javascript:
define(["require", "exports"], function (require, exports) {
/**
* Helper to use the Command Line Interface (CLI) easily with both Windows and Unix environments.
* Requires underscore or lodash as global through "_".
*/
var Cli = (function () {
function Cli() {}
/**
* Execute a CLI command.
* Manage Windows and Unix environment and try to execute the command on both env if fails.
* Order: Windows -> Unix.
*
* #param command Command to execute. ('grunt')
* #param args Args of the command. ('watch')
* #param callback Success.
* #param callbackErrorWindows Failure on Windows env.
* #param callbackErrorUnix Failure on Unix env.
*/
Cli.execute = function (command, args, callback, callbackErrorWindows, callbackErrorUnix) {
if (typeof args === "undefined") {
args = [];
}
Cli.windows(command, args, callback, function () {
callbackErrorWindows();
try {
Cli.unix(command, args, callback, callbackErrorUnix);
} catch (e) {
console.log('------------- Failed to perform the command: "' + command + '" on all environments. -------------');
}
});
};
/**
* Execute a command on Windows environment.
*
* #param command Command to execute. ('grunt')
* #param args Args of the command. ('watch')
* #param callback Success callback.
* #param callbackError Failure callback.
*/
Cli.windows = function (command, args, callback, callbackError) {
if (typeof args === "undefined") {
args = [];
}
try {
Cli._execute(process.env.comspec, _.union(['/c', command], args));
callback(command, args, 'Windows');
} catch (e) {
callbackError(command, args, 'Windows');
}
};
/**
* Execute a command on Unix environment.
*
* #param command Command to execute. ('grunt')
* #param args Args of the command. ('watch')
* #param callback Success callback.
* #param callbackError Failure callback.
*/
Cli.unix = function (command, args, callback, callbackError) {
if (typeof args === "undefined") {
args = [];
}
try {
Cli._execute(command, args);
callback(command, args, 'Unix');
} catch (e) {
callbackError(command, args, 'Unix');
}
};
/**
* Execute a command no matters what's the environment.
*
* #param command Command to execute. ('grunt')
* #param args Args of the command. ('watch')
* #private
*/
Cli._execute = function (command, args) {
var spawn = require('child_process').spawn;
var childProcess = spawn(command, args);
childProcess.stdout.on("data", function (data) {
console.log(data.toString());
});
childProcess.stderr.on("data", function (data) {
console.error(data.toString());
});
};
return Cli;
})();
exports.Cli = Cli;
});
Typescript original source file:
/**
* Helper to use the Command Line Interface (CLI) easily with both Windows and Unix environments.
* Requires underscore or lodash as global through "_".
*/
export class Cli {
/**
* Execute a CLI command.
* Manage Windows and Unix environment and try to execute the command on both env if fails.
* Order: Windows -> Unix.
*
* #param command Command to execute. ('grunt')
* #param args Args of the command. ('watch')
* #param callback Success.
* #param callbackErrorWindows Failure on Windows env.
* #param callbackErrorUnix Failure on Unix env.
*/
public static execute(command: string, args: string[] = [], callback ? : any, callbackErrorWindows ? : any, callbackErrorUnix ? : any) {
Cli.windows(command, args, callback, function () {
callbackErrorWindows();
try {
Cli.unix(command, args, callback, callbackErrorUnix);
} catch (e) {
console.log('------------- Failed to perform the command: "' + command + '" on all environments. -------------');
}
});
}
/**
* Execute a command on Windows environment.
*
* #param command Command to execute. ('grunt')
* #param args Args of the command. ('watch')
* #param callback Success callback.
* #param callbackError Failure callback.
*/
public static windows(command: string, args: string[] = [], callback ? : any, callbackError ? : any) {
try {
Cli._execute(process.env.comspec, _.union(['/c', command], args));
callback(command, args, 'Windows');
} catch (e) {
callbackError(command, args, 'Windows');
}
}
/**
* Execute a command on Unix environment.
*
* #param command Command to execute. ('grunt')
* #param args Args of the command. ('watch')
* #param callback Success callback.
* #param callbackError Failure callback.
*/
public static unix(command: string, args: string[] = [], callback ? : any, callbackError ? : any) {
try {
Cli._execute(command, args);
callback(command, args, 'Unix');
} catch (e) {
callbackError(command, args, 'Unix');
}
}
/**
* Execute a command no matters what's the environment.
*
* #param command Command to execute. ('grunt')
* #param args Args of the command. ('watch')
* #private
*/
private static _execute(command, args) {
var spawn = require('child_process').spawn;
var childProcess = spawn(command, args);
childProcess.stdout.on("data", function (data) {
console.log(data.toString());
});
childProcess.stderr.on("data", function (data) {
console.error(data.toString());
});
}
}
Example of use:
Cli.execute(Grunt._command, args, function (command, args, env) {
console.log('Grunt has been automatically executed. (' + env + ')');
}, function (command, args, env) {
console.error('------------- Windows "' + command + '" command failed, trying Unix... ---------------');
}, function (command, args, env) {
console.error('------------- Unix "' + command + '" command failed too. ---------------');
});
Use this lightweight npm package: system-commands
Look at it here.
Import it like this:
const system = require('system-commands')
Run commands like this:
system('ls').then(output => {
console.log(output)
}).catch(error => {
console.error(error)
})
If you don't mind a dependency and want to use promises, child-process-promise works:
installation
npm install child-process-promise --save
exec Usage
var exec = require('child-process-promise').exec;
exec('echo hello')
.then(function (result) {
var stdout = result.stdout;
var stderr = result.stderr;
console.log('stdout: ', stdout);
console.log('stderr: ', stderr);
})
.catch(function (err) {
console.error('ERROR: ', err);
});
spawn usage
var spawn = require('child-process-promise').spawn;
var promise = spawn('echo', ['hello']);
var childProcess = promise.childProcess;
console.log('[spawn] childProcess.pid: ', childProcess.pid);
childProcess.stdout.on('data', function (data) {
console.log('[spawn] stdout: ', data.toString());
});
childProcess.stderr.on('data', function (data) {
console.log('[spawn] stderr: ', data.toString());
});
promise.then(function () {
console.log('[spawn] done!');
})
.catch(function (err) {
console.error('[spawn] ERROR: ', err);
});
ECMAScript Modules import...from syntax
import {exec} from 'child-process-promise';
let result = await exec('echo hi');
console.log(result.stdout);
#hexacyanide's answer is almost a complete one.
On Windows command prince could be prince.exe, prince.cmd, prince.bat or just prince (I'm no aware of how gems are bundled, but npm bins come with a sh script and a batch script - npm and npm.cmd).
If you want to write a portable script that would run on Unix and Windows, you have to spawn the right executable.
Here is a simple yet portable spawn function:
function spawn(cmd, args, opt) {
var isWindows = /win/.test(process.platform);
if ( isWindows ) {
if ( !args ) args = [];
args.unshift(cmd);
args.unshift('/c');
cmd = process.env.comspec;
}
return child_process.spawn(cmd, args, opt);
}
var cmd = spawn("prince", ["-v", "builds/pdf/book.html", "-o", "builds/pdf/book.pdf"])
// Use these props to get execution results:
// cmd.stdin;
// cmd.stdout;
// cmd.stderr;
I'm trying to deploy from GitHub using I want to execute more than one command, in order of the array. The code I'm using now is included below.
async.series([
...
// Deploy from GitHub
function (callback) {
// Console shizzle:
console.log('');
console.log('Deploying...'.red.bold);
console.log();
console.log();
var deployFunctions = [
{
command: 'cd ' + envOptions.folder + ' && pwd',
log: false
},
{
command: 'pwd'
},
{
command: 'su ' + envOptions.user,
log: false
},
{
command: 'git pull'
},
{
command: 'chmod 0777 * -R',
log: false
}
];
async.eachSeries(deployFunctions, function (item, callback) {
deployment.ssh2.exec(item.command, function (err, stream) {
deployment.logExec(item);
stream.on('data', function (data, extended) {
console.log(data.toString().trim());
console.log();
});
function done() {
callback(err);
}
stream.on('exit', done);
stream.on('end', done);
});
}, function () {
callback();
});
},
...);
But, after I cd'ed to the right directory, it forgets where it was and starts all over again.
$ cd /some/folder && pwd
/some/folder
$ pwd
/root
#robertklep is correct about why your cd doesn't persist. Each command invokes a distinct shell instance which starts in its initial state. You could prefix each command with cd /home/jansenstok/domains/alcoholtesterwinkel.com/public_html/ && as a quick fix, but really you are setting yourself up for pain. What you want is a shell script with all the power of multiple lines as opposed to a list of individual disconnected commands.
Look at using ssh2's sftp function to transfer a complete shell script to the remote machine as step 1, execute it via exec (/bin/bash /tmp/your_deploy_script.sh) as step 2, and then delete the script as step 3.
I know this is a super old question, but I ran into this problem while trying to manage an ACE through my Node server. The answer didn't work for me, but several searches later led me to a wrapper that worked really well for me. Just wanted to share here because this was the top link in my Google search. It's called ssh2shell and can be found here: https://www.npmjs.com/package/ssh2shell
It's very simple to use, just pass an array of commands and they run one by one waiting for each command to complete before moving on to the next.
A practical example:
const client = new Client();
const cmds = [
'ls -lah \n',
'cd /mnt \n',
'pwd \n',
'ls -lah \n',
'exit \n',
];
client.on('ready', () => {
console.log('Client :: ready');
client.shell((err, stream) => {
stream.on('close', (code) => {
console.log('stream :: close\n', { code });
}).on('data', (myData) => {
console.log('stream :: data\n', myData.toString());
}).on('exit', (code) => {
console.log('stream :: exit\n', { code });
client.end();
}).on('error', (e) => {
console.log('stream :: error\n', { e });
rej(e);
});
for (let i = 0; i < cmds.length; i += 1) {
const cmd = cmds[i];
stream.write(`${cmd}`);
}
});
}).connect({
host: '127.0.0.1',
port: 22,
username: 'root',
password: 'root',
});
all the examples in the doc use stream.end() which caused the creation of a new session instead of using the current one.
You cooldn't use "shell" on your program because "Shell" command invokes a new terminal on the system and does your jop. You need to use "exec" command without not emitting "exit" . Default "exec" command emits "exit" command after the command which you gave has been executed.