var cp = require("child_process");
var proc = cp.spawn("cmd");
proc.stdout.on("data", data => console.log("Data:", data.toString()));
proc.stdin.write("help\n");
proc.stdin.write("help\n");
In the above code snippet, how would you detect when the stream has finished writing for a specific command (i.e. when, if these commands were executed in the terminal, it would show a blinking cursor that you could enter text into)?
I have tried using listening to the event end, but this seems only to be fired when the process finishes.
There is no definitive way to know, unless the program you're executing has a well-defined output format (e.g. newline-delimited JSON, XML, etc.). Either way, you will have to perform some kind of parsing (and possible buffering) of the program output.
This can be solved by giving stdin a string which will be echoed back to you when the original command has been executed, where you know that the string executed have no unwanted effects and will probably not be outputted into stdout with the output of the command e.g. for the given example:
var cp = require("child_process");
var proc = cp.spawn("cmd");
proc.stdout.on("data", data => {
var str = data.toString();
console.log(str)
if(str.search("string to be detected") !== -1){
console.log("Command finished!");
}
});
proc.stdin.write("help\n");
proc.stdin.write("string to be detected\n");
Alternatively you could wait for some feature of the response which indicates the end of the command output such as a newline or a new prompt, as suggested by #mesdex and #user866762
Related
How could I print data without a trailing newline in PhantomJS? In nodejs, i used the process to write data on the same line.
$ process.stdout.write( "Anything..." )
But i can find it's equalent in phantomJS. I've tried with PhantomJS child_process module but it returns me undefined error:
var process = require("child_subprocess");
process.stdout.write(" Anything ")
TypeError: undefined is not an object (evaluating 'process.stdout.write')
I think the problem here:
var process = require("child_process");
I do not know, but I think it is not valid statement.
Instead of:
var process = require("child_subprocess");
process.stdout.write(" Anything ")
You can try only:
var process = require("child_process")
process.stdout.write(" Anything ")
Just found it, system module is the native phantomjs module for handling stdin and stdout files. This did the task for me:
var system = require("system");
system.stdout.write("Anything...");
I am trying to take the value of am input, use AJax to submit these variables into a php function, call PhantomJS from said PHP function WITH these arguments passed from AJax, and return the result back to the HTML page. I am passing the variables to the PHP file perfectly fine, the problem arises from calling PhantomJS with my script followed by the three arguments.
This is the script on my PHP page to call PhantomJS
echo json_encode(array("abc" => shell_exec('/Applications/XAMPP/htdocs/scripts/phantom/bin/phantomjs /Applications/XAMPP/htdocs/scripts/phantom/examples/test.js 2>&1',$website)));
This is the script referenced in the shell script:
var args = require('system').args;
args.forEach(function(arg, i) {
console.log(i+'::'+arg);
});
var page = require('webpage').create();
var address = args[1];
page.open(address, function () {
console.log("Done")
});
As you can see it should be a relatively simple process, except nothing at all is being echo'd. Permissions for each file are more than adequate, and I am sure these files are executing because if I change the shell script to run hello.jsEverything echo's and logs perfectly.
ALSO NOTE This script is executing on my web server, so I am not 100% certain there IS a system variable.
Any ideas?
First issue, shell_exec() takes a single argument (Documentation). However your example is passing the shell argument ($website) as a second argument on shell_exec().
Corrected Example:
$shellReturn = shell_exec("/Applications/XAMPP/htdocs/scripts/phantom/bin/phantomjs /Applications/XAMPP/htdocs/scripts/phantom/examples/test.js " . $website);
echo json_encode(array("abc" => $shellReturn));
For simplicity i excluded the redirect of the error pipe. In addition i would suggest you pass the arguments as JSON wrapped in base64 encoding. This eliminates URL spacing resulting in multiple arguments. Once PhantomJS receives the system args use atob() to bring the JSON back and iterate over the JSON obj rather than the raw string arguments.
I would also point you towards this project: https://github.com/merlinthemagic/MTS, Under the hood is an instance of PhantomJS, the project just wraps the functionality of PhantomJS.
$myUrl = "http://www.example.com"; //replace with content of your $website variable
$windowObj = \MTS\Factories::getDevices()->getLocalHost()->getBrowser('phantomjs')->getNewWindow($myUrl);
//if you want the DOM or maybe screenshot and any point run:
$dom = $windowObj->getDom();
$imageData = $windowObj->screenshot();
I wrote a perl script that handles some data automatically. However, I face a problem when I try to call the script from my thunderbird extension that is naturally written in javascript.
var file = Components.classes["#mozilla.org/file/local;1"]
.createInstance(Components.interfaces.nsILocalFile);
file.initWithPath("/usr/bin/perl");
// create an nsIProcess
var process = Components.classes["#mozilla.org/process/util;1"]
.createInstance(Components.interfaces.nsIProcess);
process.init(file);
// Run the process.
// If first param is true, calling thread will be blocked until
// called process terminates.
// Params are used to pass command-line arguments
// to the process
var args = ["package/myperlscript.pl", "some arguments];
process.run(true, args, args.length);
I guess I have the perl script placed at the wrong location. I tried various ones, but I could not get it work. If that is my major mistake, where is the base path that the javascript file expects?
I have PHP script which acts as a DNode client. Then I have Node.js Dnode server which evaluates code which receives from PHP client and it returns DOM as HTML. However, Node.js acts strangely to me (beeing a Node.js newbie). It doesn't return anything, even though the returning string is not empty. My code is below:
PHP client code using DNode-PHP library:
<?php
require(__DIR__.'/../../vendor/autoload.php');
$loop = new React\EventLoop\StreamSelectLoop();
$dnode = new DNode\DNode($loop);
$dnode->connect(7070, function($remote, $connection) {
$js = 'var a = document.createElement("A");';
$js.= 'document.getElementsByTagName("body")[0].appendChild(a);'
$remote->zing($js, function($n) use ($connection) {
print_r($n);
$connection->end();
});
});
$loop->run();
?>
Node.js server code:
var dnode = require('dnode');
var jsdom = require("jsdom");
var server = dnode({
zing: function (n, cb) {
var document = jsdom.jsdom('<!DOCTYPE html>');
var window = jsdom.parentWindow;
eval(n);
var html = jsdom.serializeDocument(document);
// console.log(html);
cb(html);
}
});
server.listen(7070);
Console.log() clearly outputs <!DOCTYPE html><html><head></head><body><a></a></body></html> what is expected result. But it never gets to PHP client. But what is strange, if I change line cb(html); to cb('test');, PHP outputs "test". So the problem must be somewhere on the Node.js side. But I have no idea where to look for.
Thanks in advance for any hints.
How are you viewing the response? Through a web browser? If so, then you're depending on whatever you're evaluating in eval(n) to change the DOM of the document... If nothing changes, then you won't end up seeing anything because you'll have an empty DOM other than the html/head/body tags. It would be worth your time confirming that you're getting an empty response back and it's not just an empty DOM.
That being said, The eval function has any context of you wanting to execute it on the document/window you declare above. As it is, it is just executing in the context of node itself, not on the page you are attempting to create. To fix this, try using:
window.eval(n)
If you take a look at the example Creating a browser-like window object
on the Github page for jsdom, this will give you a better idea of how exactly to use this package.
https://github.com/tmpvar/jsdom
What you have above should look something like this:
var document = jsdom.jsdom("<!DOCUMENT html>");
var window = document.parentWindow;
window.eval(n);
var html = jsdom.serializeDocument(document);
cb(html);
Now you'll be executing the Javascript on the DOM you were previously creating :-)
Your problem is not in Node. When I use the server code you show in your question and try with this client code, I get the expected result:
var dnode = require("dnode");
var d = dnode();
d.on('remote', function (remote) {
var js = 'var a = document.createElement("A");' +
'document.getElementsByTagName("body")[0].appendChild(a);';
remote.zing(js, function (s) {
console.log(s);
});
});
d.connect('localhost', '7070');
I don't do PHP so I don't know what the problem might be on that side.
In nodejs, the only way to execute external commands is via sys.exec(cmd). I'd like to call an external command and give it data via stdin. In nodejs there does yet not appear to be a way to open a command and then push data to it (only to exec and receive its standard+error outputs), so it appears the only way I've got to do this right now is via a single string command such as:
var dangerStr = "bad stuff here";
sys.exec("echo '" + dangerStr + "' | somecommand");
Most answers to questions like this have focused on either regex which doesn't work for me in nodejs (which uses Google's V8 Javascript engine) or native features from other languages like Python.
I'd like to escape dangerStr so that it's safe to compose an exec string like the one above. If it helps, dangerStr will contain JSON data.
This is what I use:
var escapeShell = function(cmd) {
return '"'+cmd.replace(/(["'$`\\])/g,'\\$1')+'"';
};
You should never rely on escaping unknown input going to a shell parameter - there will almost always be some edge-case that you haven't thought of that allows the user to execute arbitrary code on your server.
Node has support for calling a command and passing each argument separately, with no escaping required. This is the safest way to do it:
const { spawn } = require('child_process');
// Note that the arguments are in an array, not using string interpolation
const ls = spawn('ls', ['-lh', '/usr']);
ls.stdout.on('data', (data) => {
console.log(`stdout: ${data}`);
});
ls.stderr.on('data', (data) => {
console.log(`stderr: ${data}`);
});
ls.on('close', (code) => {
console.log(`child process exited with code ${code}`);
});
The documentation is here
If you need simple (yet correct) solution you can use this:
function escapeShellArg (arg) {
return `'${arg.replace(/'/g, `'\\''`)}'`;
}
So your string will be simply escaped with single quotes as Chris Johnsen mentioned.
echo 'John'\''s phone';
It works in bash because of strong quoting, feels like it also works in fish, but does not work in zsh and sh.
If you have bash your can run your script in sh or zsh with 'bash -c \'' + escape('all-the-rest-escaped') + '\''.
But actually... node.js will escape all needed characters for you:
var child = require('child_process')
.spawn('echo', ['`echo 1`;"echo $SSH_TTY;\'\\0{0..5}']);
child.stdout.on('data', function (data) {
console.log('stdout: ' + data);
});
child.stderr.on('data', function (data) {
console.log('stderr: ' + data);
});
this block of code will execute:
echo '`echo 1`;"echo $SSH_TTY;'\''\\0{0..5}'
and will output:
stdout: `echo 1`;"echo $SSH_TTY;\'\\0{0..5}
or some error.
Take a look at http://nodejs.org/api/child_process.html#child_process_child_process_spawn_command_args_options
By the way simple solution to run a bunch of commands is:
require('child_process')
.spawn('sh', ['-c', [
'cd all/your/commands',
'ls here',
'echo "and even" > more'
].join('; ')]);
Have a nice day!
I second the opinion of Will, whenever possible you should avoid escaping by hand and prefer spawn.
However, in the case that escaping is unavoidable, for example if you need to use exec or you are executing a command through ssh. Then you can use base64 to pass safe characters to bash and rely on bash to escape the unknown.
const dangerStr = 'bad stuff here'
// base64 has safe characters [A-Za-z=0-9+/]
const dangerBase64 = btoa(dangerStr)
sys.exec(`echo "$(echo ${dangerBase64} | base64 -d)" | somecommand`)
The explanation is the following:
dangerBase64 is unknown but it does not contain unsafe characters in bash. Hence echo ${dangerBase64} will output what we want.
Finally the double quote around $(echo ${dangerBase64} | base64 -d) escape the actual value passed by the user inside bash, which is safe and has the same value that the user wanted.
If you also need to deal with special character (line-breaks etc.) you can do it this way:
str = JSON.stringify(str)
.replace(/^"|"$/g,'') //remove JSON-string double quotes
.replace(/'/g, '\'"\'"\'') //escape single quotes the ugly bash way
This assumes you use Bash's strong-quoting via single-quotes) and the receiver can understand JSON's C-like escaping.
If you are building you own software, you can encode the command to base64 or hex format then decode the arguments from the program.
For my Nodejs applications I use.
var base64_encode = exports.base64_encode = function(non_base64_string){
return Buffer.from(non_base64_string).toString('base64');
}
var base64_decode = exports.base64_decode = function(base64_string){
return Buffer.from(base64_string, 'base64').toString('ascii')
}
So when I run a base64 encoded command like this
webman grep --search "aW5jbHVkZV9vbmNlICRfU0VSVkVSWyJET0NVTUVOVF9ST09UIl0uIi9zZXR0aW5ncy5waHAiOw==" --replacement "JGRvY3VtZW50X3Jvb3QgPSBfX0RJUl9fO3doaWxlKHRydWUpe2lmIChmaWxlX2V4aXN0cygkZG9jdW1lbnRfcm9vdC4iL3NldHRpbmdzLmpzb24iKSl7YnJlYWs7fWVsc2V7JGRvY3VtZW50X3Jvb3Q9ZGlybmFtZSgkZG9jdW1lbnRfcm9vdCk7fX08bmV3bGluZT5pbmNsdWRlX29uY2UgJGRvY3VtZW50X3Jvb3QuIi9zZXR0aW5ncy5waHAiOw=="
I can get the arguments search and replacement arguments without stress using base64_decode
There is a way to write to an external command: process.createChildProcess (documentation) returns an object with a write method. createChildProcess isn't as convenient though, because it doesn't buffer stdout and stderr, so you will need event handlers to read the output in chunks.
var stdout = "", stderr = "";
var child = process.createChildProcess("someCommand");
child.addListener("output", function (data) {
if (data !== null) {
stdout += data;
}
});
child.addListener("error", function (data) {
if (data !== null) {
stderr += data;
}
});
child.addListener("exit", function (code) {
if (code === 0) {
sys.puts(stdout);
}
else {
// error
}
});
child.write("This goes to someCommand's stdin.");