I am experiencing such an issue when starting my Redux app:
./node_modules/draftjs-md-converter/dist/index.js
Syntax error: /Users/vlasenkona/Desktop/gris-seqr2/ui/node_modules/draftjs-md-converter/dist/index.js: Identifier '_toConsumableArray' has already been declared (196:9)
194 | var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };
195 |
> 196 | function _toConsumableArray(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } else { return Array.from(arr); } }
| ^
197 |
198 | var parse = require('#textlint/markdown-to-ast').parse;
199 |
at parser.next (<anonymous>)
The startup script is the following:
'use strict';
// Do this as the first thing so that any code reading it knows the right env.
process.env.BABEL_ENV = 'development';
process.env.NODE_ENV = 'development';
// Makes the script crash on unhandled rejections instead of silently
// ignoring them. In the future, promise rejections that are not handled will
// terminate the Node.js process with a non-zero exit code.
process.on('unhandledRejection', err => {
throw err;
});
// Ensure environment variables are read.
require('../config/env');
const fs = require('fs');
const chalk = require('chalk');
const webpack = require('webpack');
const WebpackDevServer = require('webpack-dev-server');
const clearConsole = require('react-dev-utils/clearConsole');
const {
choosePort,
createCompiler,
prepareProxy,
prepareUrls,
} = require('react-dev-utils/WebpackDevServerUtils');
const openBrowser = require('react-dev-utils/openBrowser');
const paths = require('../config/paths');
const config = require('../config/webpack.config.dev');
const createDevServerConfig = require('../config/webpackDevServer.config');
const useYarn = fs.existsSync(paths.yarnLockFile);
const isInteractive = process.stdout.isTTY;
// Tools like Cloud9 rely on this.
const DEFAULT_PORT = parseInt(process.env.PORT, 10) || 3000;
const HOST = process.env.HOST || '0.0.0.0';
// We attempt to use the default port but if it is busy, we offer the user to
// run on a different port. `detect()` Promise resolves to the next free port.
choosePort(HOST, DEFAULT_PORT)
.then(port => {
if (port == null) {
// We have not found a port.
return;
}
const protocol = process.env.HTTPS === 'true' ? 'https' : 'http';
const appName = require(paths.appPackageJson).name;
const urls = prepareUrls(protocol, HOST, port);
// Create a webpack compiler that is configured with custom messages.
const compiler = createCompiler(webpack, config, appName, urls, useYarn);
// Load proxy config
const proxySetting = require(paths.appPackageJson).proxy;
const proxyConfig = prepareProxy(proxySetting, paths.appPublic);
// Serve webpack assets generated by the compiler over a web sever.
const serverConfig = createDevServerConfig(
proxyConfig,
urls.lanUrlForConfig
);
const devServer = new WebpackDevServer(compiler, serverConfig);
// Launch WebpackDevServer.
devServer.listen(port, HOST, err => {
if (err) {
return console.log(err);
}
if (isInteractive) {
clearConsole();
}
console.log(chalk.cyan('Starting the development server...\n'));
openBrowser(urls.localUrlForBrowser);
});
['SIGINT', 'SIGTERM'].forEach(function(sig) {
process.on(sig, function() {
devServer.close();
process.exit();
});
});
})
.catch(err => {
if (err && err.message) {
console.log(err.message);
}
process.exit(1);
});
The startup of the app was working just fine until I switched to babel7, the process of which (and relevant config files) can be found in my other thread:
Switching to babel 7 causes jest to show 'unexpected token'
I tried updating draftjs-md-converter to the latest version but it did not fix the issue. I found in this thread:
Identifier already declared - Identifier 'userScore' has already been declared
That such an error may be happening because the app is somehow launched twice, but not sure why it started to do so right now.
The solution can be found here:
https://github.com/kadikraman/draftjs-md-converter/pull/56
It happens because both Babel 7 and draftjs-md-converter define _toConsumableArray function. So, the pull request was issues on the actual github page where it was built with parcel instead and the solution is the following:
git clone https://github.com/kadikraman/draftjs-md-converter.git
cd draftjs-md-converter
git checkout origin/chore/use-parcel-for-bundling
npm i
npm run compile
After that copying the files from the dist folder into node_modules/draftjs-md-converter/dist/ one solved the issue.
Related
i have a problem, my Command Handler only recognize the top Folder inside my Commands Directory. Its supposed to show all of the available Folder in Commands Directory but it only showed the 'test' category which is the top one. any help would be really appreciated.
Folder/Directory Construction:
console.log output:
Command Handler Code:
const {readdirSync} = require('fs');
const ascii = require('ascii-table');
let table = new ascii("Commands");
table.setHeading('Category', 'Command', ' Load status');
var logged = false;
const path = require('node:path')
module.exports = (client) => {
readdirSync('./Commands/').forEach(dir => {
var commands = readdirSync(`./Commands/${dir}/`).filter(file => file.endsWith('.js'));
for(let file of commands){
let pull = require(`../Commands/${dir}/${file}`);
if(pull.name){
client.commands.set(pull.name, pull);
table.addRow(dir,file,'✔️ -> Command Loaded')
} else {
table.addRow(dir,file,'❌ -> Command Error')
continue;
}
if(pull.aliases && Array.isArray(pull.aliases)) pull.aliases.forEach(alias => client.aliases.set(alias, pull.name))
}
if(!logged) {
console.log(table.toString())
console.log(`[Command] Command Handler is Ready! | Total Commands: ${commands.length}`)
logged = true
}
});
}
I believe you are overwriting the commands variable after each folder has been looped through. Try this:
const {readdirSync} = require('fs');
const ascii = require('ascii-table');
let table = new ascii("Commands");
table.setHeading('Category', 'Command', ' Load status');
var logged = false;
const path = require('node:path')
module.exports = (client) => {
readdirSync('./Commands/').forEach(dir => {
var commands = []
commands.push(readdirSync(`./Commands/${dir}/`).filter(file => file.endsWith('.js')));
for(let file of commands){
let pull = require(`../Commands/${dir}/${file}`);
if(pull.name){
client.commands.set(pull.name, pull);
table.addRow(dir,file,'✔️ -> Command Loaded')
} else {
table.addRow(dir,file,'❌ -> Command Error')
continue;
}
if(pull.aliases && Array.isArray(pull.aliases)) pull.aliases.forEach(alias => client.aliases.set(alias, pull.name))
}
if(!logged) {
console.log(table.toString())
console.log(`[Command] Command Handler is Ready! | Total Commands: ${commands.length}`)
logged = true
}
});
}
If this doesn't help than it might still be that issue I referred to above but the edits I made might not be compatible with your code.
I've got a script that synchronously installs non-built-in modules at startup that looks like this
const cp = require('child_process')
function requireOrInstall (module) {
try {
require.resolve(module)
} catch (e) {
console.log(`Could not resolve "${module}"\nInstalling`)
cp.execSync(`npm install ${module}`)
console.log(`"${module}" has been installed`)
}
console.log(`Requiring "${module}"`)
try {
return require(module)
} catch (e) {
console.log(require.cache)
console.log(e)
}
}
const http = require('http')
const path = require('path')
const fs = require('fs')
const ffp = requireOrInstall('find-free-port')
const express = requireOrInstall('express')
const socket = requireOrInstall('socket.io')
// List goes on...
When I uninstall modules, they get installed successfully when I start the server again, which is what I want. However, the script starts throwing Cannot find module errors when I uninstall the first or first two modules of the list that use the function requireOrInstall. That's right, the errors only occur when the script has to install either the first or the first two modules, not when only the second module needs installing.
In this example, the error will be thrown when I uninstall find-free-port, unless I move its require at least one spot down ¯\_(• _ •)_/¯
I've also tried adding a delay directly after the synchronous install to give it a little more breathing time with the following two lines:
var until = new Date().getTime() + 1000
while (new Date().getTime() < until) {}
The pause was there. It didn't fix anything.
#velocityzen came with the idea to check the cache, which I've now added to the script. It doesn't show anything out of the ordinary.
#vaughan's comment on another question noted that this exact error occurs when requiring a module twice. I've changed the script to use require.resolve(), but the error still remains.
Does anybody know what could be causing this?
Edit
Since the question has been answered, I'm posting the one-liner (139 characters!). It doesn't globally define child_modules, has no last try-catch and doesn't log anything in the console:
const req=async m=>{let r=require;try{r.resolve(m)}catch(e){r('child_process').execSync('npm i '+m);await setImmediate(()=>{})}return r(m)}
The name of the function is req() and can be used like in #alex-rokabilis' answer.
It seems that the require operation after an npm install needs a certain delay.
Also the problem is worse in windows, it will always fail if the module needs to be npm installed.
It's like at a specific event snapshot is already known what modules can be required and what cannot. Probably that's why require.cache was mentioned in the comments. Nevertheless I suggest you to check the 2 following solutions.
1) Use a delay
const cp = require("child_process");
const requireOrInstall = async module => {
try {
require.resolve(module);
} catch (e) {
console.log(`Could not resolve "${module}"\nInstalling`);
cp.execSync(`npm install ${module}`);
// Use one of the two awaits below
// The first one waits 1000 milliseconds
// The other waits until the next event cycle
// Both work
await new Promise(resolve => setTimeout(() => resolve(), 1000));
await new Promise(resolve => setImmediate(() => resolve()));
console.log(`"${module}" has been installed`);
}
console.log(`Requiring "${module}"`);
try {
return require(module);
} catch (e) {
console.log(require.cache);
console.log(e);
}
}
const main = async() => {
const http = require("http");
const path = require("path");
const fs = require("fs");
const ffp = await requireOrInstall("find-free-port");
const express = await requireOrInstall("express");
const socket = await requireOrInstall("socket.io");
}
main();
await always needs a promise to work with, but it's not needed to explicitly create one as await will wrap whatever it is waiting for in a promise if it isn't handed one.
2) Use a cluster
const cp = require("child_process");
function requireOrInstall(module) {
try {
require.resolve(module);
} catch (e) {
console.log(`Could not resolve "${module}"\nInstalling`);
cp.execSync(`npm install ${module}`);
console.log(`"${module}" has been installed`);
}
console.log(`Requiring "${module}"`);
try {
return require(module);
} catch (e) {
console.log(require.cache);
console.log(e);
process.exit(1007);
}
}
const cluster = require("cluster");
if (cluster.isMaster) {
cluster.fork();
cluster.on("exit", (worker, code, signal) => {
if (code === 1007) {
cluster.fork();
}
});
} else if (cluster.isWorker) {
// The real work here for the worker
const http = require("http");
const path = require("path");
const fs = require("fs");
const ffp = requireOrInstall("find-free-port");
const express = requireOrInstall("express");
const socket = requireOrInstall("socket.io");
process.exit(0);
}
The idea here is to re-run the process in case of a missing module. This way we fully reproduce a manual npm install so as you guess it works! Also it seems more synchronous rather the first option, but a bit more complex.
I think your best option is either:
(ugly) to install package globally, instead of locally
(best solution ?) to define YOUR new 'package repository installation', when installing, AND when requiring
First, you may consider using the npm-programmatic package.
Then, you may define your repository path with something like:
const PATH='/tmp/myNodeModuleRepository';
Then, replace your installation instruction with something like:
const npm = require('npm-programmatic');
npm.install(`${module}`, {
cwd: PATH,
save:true
}
Eventually, replace your failback require instruction, with something like:
return require(module, { paths: [ PATH ] });
If it is still not working, you may update the require.cache variable, for instance to invalide a module, you can do something like:
delete require.cache[process.cwd() + 'node_modules/bluebird/js/release/bluebird.js'];
You may need to update it manually, to add information about your new module, before loading it.
cp.execSync is an async call so try check if the module is installed in it's call back function. I have tried it, installation is clean now:
const cp = require('child_process')
function requireOrInstall (module) {
try {
require.resolve(module)
} catch (e) {
console.log(`Could not resolve "${module}"\nInstalling`)
cp.execSync(`npm install ${module}`, () => {
console.log(`"${module}" has been installed`)
try {
return require(module)
} catch (e) {
console.log(require.cache)
console.log(e)
}
})
}
console.log(`Requiring "${module}"`)
}
const http = require('http')
const path = require('path')
const fs = require('fs')
const ffp = requireOrInstall('find-free-port')
const express = requireOrInstall('express')
const socket = requireOrInstall('socket.io')
When node_modules not available yet :
When node_modules available already:
I have a number of UI tests written in javascript (using selenium and cucumber). Currently I am running tests as follows:
yarn run loginTest
Is there a command for debugging the test?
Update:
Below is a snapshot of my package.json that includes the loginTest script reference:
"scripts": {
"loginTest": "node ./scripts/loginTest.js"
....
},
The loginTest.js file is shown below:
const chalk = require('chalk');
const path = require('path');
const fs = require('fs-extra');
const cp = require('child_process');
const srcPath = './projects/';
const projects = fs.readdirSync(srcPath)
.filter(file => fs.lstatSync(path.join(srcPath, file)).isDirectory())
.filter(file => file !== 'commons');
process.env.HOST = process.argv.slice(2)[0] || 'localhost';
console.log(chalk.magenta('Carrying out integration tests against: ') + process.env.HOST);
projects.forEach((project) => {
if (fs.existsSync(`./projects/${project}/login-tests/features/`)) {
console.log(chalk.yellow('Starting integration tests for: ') + project);
cp.fork(__dirname + '/runtime/pickle.js',
[
'-f', `./projects/${project}/login-tests/features/`,
'-s', `./projects/${project}/login-tests/step-definitions/`,
'-p', `./projects/${project}/login-tests/page-objects/`,
'-r', `./build/reports/${project}`
]);
}
});
The pickle.js file that was referenced above is shown here:
#!/usr/bin / env node
'use strict';
const fs = require('fs-plus');
const path = require('path');
const program = require('commander');
const cucumber = require('cucumber');
function collectPaths(value, paths) {
paths.push(value);
return paths;
}
function coerceInt(value, defaultValue) {
const int = parseInt(value);
if (typeof int === 'number') return int;
return defaultValue;
}
let config = {
steps: './step-definitions',
pageObjects: './page-objects',
sharedObjects: './shared-objects',
reports: './reports',
browser: 'chrome',
timeout: 15000
};
const configFileName = path.resolve(process.cwd(), 'selenium-cucumber-js.json');
if (fs.isFileSync(configFileName)) {
config = Object.assign(config, require(configFileName));
}
program
.option('-s, --steps <path>', 'path to step definitions. defaults to ' + config.steps, config.steps)
.option('-p, --pageObjects <path>', 'path to page objects. defaults to ' + config.pageObjects, config.pageObjects)
.option('-o, --sharedObjects [paths]', 'path to shared objects (repeatable). defaults to ' + config.sharedObjects, collectPaths, [config.sharedObjects])
.option('-b, --browser <path>', 'name of browser to use. defaults to ' + config.browser, config.browser)
.option('-r, --reports <path>', 'output path to save reports. defaults to ' + config.reports, config.reports)
.option('-d, --disableLaunchReport [optional]', 'Disables the auto opening the browser with test report')
.option('-j, --junit <path>', 'output path to save junit-report.xml defaults to ' + config.reports)
.option('-t, --tags <tagName>', 'name of tag to run', collectPaths, [])
.option('-f, --featureFiles <paths>', 'comma-separated list of feature files to run')
.option('-x, --timeOut <n>', 'steps definition timeout in milliseconds. defaults to ' + config.timeout, coerceInt, config.timeout)
.option('-n, --noScreenshot [optional]', 'disable auto capturing of screenshots when an error is encountered')
.parse(process.argv);
program.on('--help', function () {
console.log(' For more details please visit https://github.com/john-doherty/selenium-cucumber-js#readme\n');
});
// store browserName globally (used within world.js to build driver)
global.browserName = program.browser;
// store Eyes Api globally (used within world.js to set Eyes)
global.eyesKey = config.eye_key;
// used within world.js to import page objects
global.pageObjectPath = path.resolve(program.pageObjects);
// used within world.js to output reports
global.reportsPath = path.resolve(program.reports);
if (!fs.existsSync(program.reports)) {
fs.makeTreeSync(program.reports);
}
// used within world.js to decide if reports should be generated
global.disableLaunchReport = (program.disableLaunchReport);
// used with world.js to determine if a screenshot should be captured on error
global.noScreenshot = (program.noScreenshot);
// used within world.js to output junit reports
global.junitPath = path.resolve(program.junit || program.reports);
// set the default timeout to 10 seconds if not already globally defined or passed via the command line
global.DEFAULT_TIMEOUT = global.DEFAULT_TIMEOUT || program.timeOut || 10 * 1000;
// used within world.js to import shared objects into the shared namespace
global.sharedObjectPaths = program.sharedObjects.map(function (item) {
return path.resolve(item);
});
// rewrite command line switches for cucumber
process.argv.splice(2, 100);
// allow specific feature files to be executed
if (program.featureFiles) {
var splitFeatureFiles = program.featureFiles.split(',');
splitFeatureFiles.forEach(function (feature) {
process.argv.push(feature);
});
}
// add switch to tell cucumber to produce json report files
process.argv.push('-f');
process.argv.push('pretty');
process.argv.push('-f');
process.argv.push('json:' + path.resolve(__dirname, global.reportsPath, 'cucumber-report.json'));
// add cucumber world as first required script (this sets up the globals)
process.argv.push('-r');
process.argv.push(path.resolve(__dirname, 'world.js'));
// add path to import step definitions
process.argv.push('-r');
process.argv.push(path.resolve(program.steps));
// add tag
if (program.tags) {
program.tags.forEach(function (tag) {
process.argv.push('-t');
process.argv.push(tag);
});
}
// add strict option (fail if there are any undefined or pending steps)
process.argv.push('-S');
//
// execute cucumber
//
var cucumberCli = cucumber.Cli(process.argv);
global.cucumber = cucumber;
cucumberCli.run(function (succeeded) {
var code = succeeded ? 0 : 1;
function exitNow() {
process.exit(code);
}
if (process.stdout.write('')) {
exitNow();
} else {
// write() returned false, kernel buffer is not empty yet...
process.stdout.on('drain', exitNow);
}
});
I am having issues generating files with node. The files show up out of sequence and after the loop that has created them finish throwing my automated git commits of out whack. Can someone help me please! github repo: https://github.com/wendellmva/cli3
class Generator {
constructor(){
this.root = resolve(__dirname, 'generated');
}
clean(done){
if(fs.existsSync(this.root)){
trash(this.root).then(done);
} else {
done();
}
}
execute(){
this.init()
for(let i=0; i<10; ++i) {
const file = resolve(this.root, `file${i}.txt`);
fs.writeFile(file, 'Hello world', (err)=> {
if(err) console.error(err);
console.info('CREATE ', file);
if(fs.existsSync(file)) this.add(file);
if(i==9) this.commit();
});
}
}
init(){
shelljs.exec(`mkdir ${this.root} && cd ${this.root} && git init`);
}
add(file){
shelljs.exec(`cd ${this.root} && git add ${file}`);
}
commit(){
shelljs.exec(`cd ${this.root} && git commit -m "chore: initial commit"`);
}
}
const generator = new Generator();
generator.clean(()=>{
generator.execute();
});
problem 1: commit starts before execute has finished
resulting in empty commit
problem 2: when generated files exist execute starts before clean has finished
resulting in file already exist errors
===> SOLVED
problem 3: files are created out of sequence
Initialized empty Git repository in D:/#cardstrip/cli3/generated/.git/
CREATE D:\#cardstrip\cli3\generated\file2.txt
CREATE D:\#cardstrip\cli3\generated\file3.txt
CREATE D:\#cardstrip\cli3\generated\file0.txt
CREATE D:\#cardstrip\cli3\generated\file1.txt
CREATE D:\#cardstrip\cli3\generated\file4.txt
CREATE D:\#cardstrip\cli3\generated\file5.txt
CREATE D:\#cardstrip\cli3\generated\file7.txt
CREATE D:\#cardstrip\cli3\generated\file9.txt
[master (root-commit) 3442284] chore: initial commit
8 files changed, 8 insertions(+)
create mode 100644 file0.txt
create mode 100644 file1.txt
create mode 100644 file2.txt
create mode 100644 file3.txt
create mode 100644 file4.txt
create mode 100644 file5.txt
create mode 100644 file7.txt
create mode 100644 file9.txt
CREATE D:\#cardstrip\cli3\generated\file6.txt
CREATE D:\#cardstrip\cli3\generated\file8.txt
Both fs.writeFile and child_process.exec are asynchronous, and you're not waiting until the operations are concluded, that's why your code is not doing what you expect it to do.
We can convert writeFile and exec to Promise based API with util.promisify, and use async/await to perform the operations in the right order.
const { promisify } = require('util');
const childProcess = require('child_process');
const fs = require('fs');
const writeFile = promisify(fs.readFile);
const exec = promisify(childProcess.exec);
class Generator {
constructor(){
this.root = resolve(__dirname, 'generated');
}
async execute() {
await this.init();
for(let i=0; i < 10; ++i) {
const file = resolve(this.root, `file${i}.txt`);
await writeFile(file, 'Hello world');
// No need to check if it exists, if it wasn't created writeFile rejects
console.info('CREATE ', file);
await this.add(file);
}
return this.commit();
}
init(){
return exec(`mkdir ${this.root} && cd ${this.root} && git init`);
}
add(file){
return exec(`cd ${this.root} && git add ${file}`);
}
commit(){
return exec(`cd ${this.root} && git commit -m "chore: initial commit"`);
}
}
And now when doing:
new Generator()
.execute()
.then(() => console.log('Commited!')
.catch(err => console.error(err));
You will get your files created & added in order, and then the changes will be commited.
You are using the async version of fs.writeFile in a loop which means that you do not know the order in which the data will be written to disk and also that the loop will not wait for every call of that async function to continue.
You have to find a way to know when the last file has been written. A way to do that is to use the synnchronized version of the fs.writeFile function which is fs.writeFileSync this way :
execute(){
this.init();
for(let i=0; i<10; ++i) {
const file = resolve(this.root, `file${i}.txt`);
try {
fs.writeFileSync(file, 'Hello world');
console.info('CREATE ', file);
if(fs.existsSync(file)) this.add(file);
} catch (err) {
console.error(err);
}
}
this.commit();
}
Ok, i am just starting to learn node.js and i am having a little difficulty getting a good grasp on the async nature of it and when/how to use callbacks to get data passed along like i need it.
The concept of what i am trying to build is this. I have a small node.js app that uses the FS package - defined as var fs = require("fs"). The app is responding to localhost:4000 right now. When i hit that url, the app will use fs.readdir() to get all of the virtual host files in the directory that i pass to readdir().
Next, the app loops through those files and parses each one line by line and word by word (quick and dirty for now). I am using fs.readFile() to read the file and then literally doing lines = data.toString().split("\n") and then var words = lines[l].split(" ") to get to the data in the file i need. Each virtual host file looks something like this:
<VirtualHost *:80>
ServerName some-site.local
DocumentRoot "/path/to/the/docroot"
<Directory "/path/to/the/docroot">
Options Indexes FollowSymLinks Includes ExecCGI
AllowOverride All
Require all granted
</Directory>
ErrorLog "logs/some-site.local-error_log"
</VirtualHost>
Here is my main.js file (routes file):
var express = require("express"),
router = express.Router(),
async = require("async"),
directoryReader = require(__dirname + "/../lib/directoryReader");
fileReader = require(__dirname + "/../lib/fileReader");
router.get("/", function(req, res) {
var response = [];
directoryReader.read(function(files) {
async.each(files, function(file, callback) {
fileReader.read(file, function(data) {
if (data.length > 0) {
response.push(data);
}
callback();
});
}, function(err){
if (err) throw err;
res.json(response);
});
});
});
module.exports = router;
My directoryReader.js file:
var fs = require("fs"),
fileReader = require(__dirname + "/../lib/fileReader");
var directoryReader = {
read: function(callback) {
var self = this;
fs.readdir("/etc/apache2/sites-available", function (err, files) {
if (err) throw err;
var output = [];
for(f in files) {
output.push(files[f]);
}
callback(output);
});
}
};
module.exports = directoryReader;
And my fileReader.js file:
var fs = require("fs");
var fileReader = {
read: function(file, callback) {
fs.readFile("/etc/apache2/sites-available/" + file, { encoding: "utf8" }, function (err, data) {
if (err) throw err;
var vHostStats = ["servername", "documentroot", "errorlog"],
lines = data.toString().split("\n"),
output = [];
for(l in lines) {
var words = lines[l].split(" ");
for(w in words) {
if (words[w].toLowerCase() == "<virtualhost") {
var site = {
"servername" : "",
"documentroot" : "",
"errorlog" : "",
"gitbranch" : ""
}
w++;
}
if (vHostStats.indexOf(words[w].toLowerCase()) !== -1) {
var key = words[w].toLowerCase();
w++;
site[key] = words[w];
}
if (words[w].toLowerCase() == "</virtualhost>" && site.documentroot != "") {
w++;
output.push(site);
var cmd = "cd " + site["documentroot"] + " && git rev-parse --abbrev-ref HEAD";
var branch = ...; // get Git branch based on the above command
site["gitbranch"] = branch;
}
}
}
callback(output);
});
}
};
module.exports = fileReader;
All of this code will spit out json. This all works fine, expect for one part. The line in the fileReader.js file:
var branch = ...; // get Git branch based on the above command
I am trying to get this code to run a shell command and get the Git branch based on the document root directory. I then want to take the branch returned and add the value to the gitbranch proptery of the current site object during the loop. Hope this makes sense. I know there are probably questions on SO that cover something similar to this and i have looked at many of them. I fear i am just not educated enough in node.js yet to apply the answers to those SO questions to my particular use case.
Please let me know if there's anything i can add that can help anyoe answer this question. I should note that this app is for personal uses only, so the solution really just has to work, not be super elegant.
UPDATE: (5/1/2015)
Probably not the best solution but i got what i wanted by using the new execSync added to v0.12.x
if (words[w].toLowerCase() == "</virtualhost>" && site.documentroot != "") {
var cmd = "cd " + site["documentroot"] + " && git rev-parse --abbrev-ref HEAD";
var branch = sh(cmd, { encoding: "utf8" });
site["gitbranch"] = branch.toString().trim();
w++;
output.push(site);
}