How to exit properly from node.js - javascript

The following will read and import many CSV files from disk into MongoDB but NodeJS won't exit after importing all the files if it doesn't go through the resizePhoto() function (Which contains a process.exit after resizing the images).
How can I have it to close properly after importing all files without interrupting? If I add a process.exit .on end it will exit after importing the first file.
var importData = function(fileName) {
// Get file from disk.
var filePath = path.join(folder, fileName);
// Read and import the CSV file.
csv.fromPath(filePath, {
objectMode: true,
headers: keys
})
.on('data', function (data) {
var Obj = new models[fileName](data);
models[fileName].find({}).remove().exec();
Obj.save(function (err, importedObj) {
if (err) {
console.log(fileName, err);
} else if (fileName === 'PHOTOS') {
resizePhoto(importedObj);
}
});
})
.on('end', function() {
console.log(fileName + ': Imported.');
});
};
module.exports = importData;

Use the module async, method parallel (https://github.com/caolan/async#parallel). It can call your tasks (import) in parallel and call the final handler (exit) after all tasks end.
In your case:
1) Somewhere in project
csvImp=require('importData.js');
async.parallel([
function(done){csvImp(name1,done);},
function(done){csvImp(name2,done);},
...
function(done){csvImp(nameN,done);}
],
function(err, results){
process.exit();
});
2) in importData.js
var importData = function(fileName,done) {
...
.on('end', function() {
console.log(fileName + ': Imported.');
done();
});
So, next you need to prepare list of tasks. Something like
names.forEach(function(n){
tasks.push(function(done){csvImp(n,done);});
});
And call async.parallel with tasks.
Good luck)

Related

Node Child process is running continuously without closing after getting the data

I am working on a loopback project where I need to run a background job. Spawn node child process. Since I am beginner, documentation is little confusing. https://nodejs.org/api/child_process.html#child_process_child_process_spawn_command_args_options
parent.js
const path = require("path");
const spawn = require('child_process').spawn;
let child = spawn('node', [__dirname + '../../worker/worker.js']);
child.stdout.on('data', function (data) {
console.log('data available ' + data);
});
child.stderr.on('data', function (data) {
console.log('There was an error: ' + data);
});
worker.js
function jobListener() {
let query = { jobname: "countProvider"}
let result = Job.findOne({ where: query }, function(err, instance) {
if(err) {
console.log('Error', err);
}
// console.log(instance);
});
while (!result) {
console.log(result);
// await sleep(5000);
console.log('checking for job in mongodb with a delay of 5 seconds');
}
if(result) {
fetchProcedureRequest();
}
}
jobListener();
function fetchProcedureRequest() {
console.log('Fetching pREquest');
}
What I am trying to achieve is I am getting the data in child process. But how can I close/exit the process when data is available in parent. Somebody please help and any suggestion will be really appreciated.
May be you can use this, found here How to kill childprocess in nodejs?
var proc = require('child_process').spawn('mongod');
proc.kill('SIGINT');

How to return zip file from node js api and handle on client side?

I have an API that creates a zip file using the archiver module in which I would like to pass back the zip as a respone and download it on the client side.
This is what my API that creates the zip looks like:
reports.get('/xxx/:fileName', async (req, res) => {
var s3 = new AWS.S3();
var archiver = require('archiver');
var filenames = "xxx"
var str_array = filenames.split(',');
for (var i = 0; i < str_array.length; i++) {
var filename = str_array[i].trim();
localFileName = './temp/' + filename.substring(filename.indexOf("/") + 1);
file = fs.createWriteStream(localFileName, {flags: 'a', encoding: 'utf-8',mode: 0666});
file.on('error', function(e) { console.error(e); });
s3.getObject({
Bucket: config.xxx,
Key: filename
})
.on('error', function (err) {
console.log(err);
})
.on('httpData', function (chunk) {
file.on('open', function(){
file.write(chunk);
});
})
.on('httpDone', function () {
file.end();
})
.send();
}
res.end("Files have been downloaded successfully")
// create a file to stream archive data to.
var output = fs.createWriteStream('example.zip');
var archive = archiver('zip', {
zlib: { level: 9 } // Sets the compression level.
});
// listen for all archive data to be written
// 'close' event is fired only when a file descriptor is involved
output.on('close', function() {
console.log(archive.pointer() + ' total bytes');
console.log('archiver has been finalized and the output file descriptor has closed.');
});
// This event is fired when the data source is drained no matter what was the data source.
// It is not part of this library but rather from the NodeJS Stream API.
// #see: https://nodejs.org/api/stream.html#stream_event_end
output.on('end', function() {
console.log('Data has been drained');
});
// good practice to catch warnings (ie stat failures and other non-blocking errors)
archive.on('warning', function(err) {
if (err.code === 'ENOENT') {
// log warning
} else {
// throw error
throw err;
}
});
// good practice to catch this error explicitly
archive.on('error', function(err) {
throw err;
});
// pipe archive data to the file
archive.pipe(output);
// append files from a sub-directory, putting its contents at the root of archive
archive.directory('./temp', false);
// finalize the archive (ie we are done appending files but streams have to finish yet)
// 'close', 'end' or 'finish' may be fired right after calling this method so register to them beforehand
archive.finalize();
});
Also for reference here is another one of my APIs to show how I am accustomed to sending data back to the client:
reports.get('/xxx/:fileName', async (req, res) => {
var s3 = new AWS.S3();
var params = {
Bucket: config.reportBucket,
Key: req.params.fileName,
Expires: 60 * 5
}
try {
s3.getSignedUrl('getObject', params, function (err, url) {
if(err)throw err;
res.json(url);
});
}catch (err) {
res.status(500).send(err.toString());
}
});
How can I send the zip back as a response and download it on the client side to disk?
Since archive is streaming, I would assume it can be pipe(lined) directly to the response (res):
// Node.js v10+, if res is a proper stream
const {pipeline} = require('stream')
pipeline(archive, res)
// Alternatively (search for caveats of pipe vs. pipeline)
archive.pipe(res)
You should probably set some HTTP headers on res to tell the browser the MIME type and possibly a filename:
res.set({
'Content-Type': 'application/zip',
'Content-Disposition': 'attachment; filename="zip"'
})
Okay so once you wrote your file, example.zip you can easily follow the example mentioned in another answer and do:
var stat = fileSystem.statSync('example.zip');
res.writeHead(200, {
'Content-Type': 'application/zip',
'Content-Length': stat.size
});
var readStream = fileSystem.createReadStream('example.zip');
// We replaced all the event handlers with a simple call to readStream.pipe()
readStream.pipe(res);
This should work perfectly. Credits to OP

Nodejs fs module: How to readFile and create json object?

I am reading below data from file now i want to create json object from it,how can i create that using nodejs fs module ?
app.js
var path = "./ditLogs/" + file;
fs.readFile(path, function(err, data) {
console.log('reading file data', data.toString());
});
Here is data in file that i need to create json for
file.txt
{"event":"test data"}
{"event":"test data"}
{"event":"test data"}
You can use this sample function:
function(strFromFile) {
try {
return JSON.parse(strFromFile);
} catch(e) {
return {};
}
};
FS Module
While developing in NodeJS the need to access a computer's file system is sometimes necessary. FS Module is a utility that assists with this process.
FS includes some functions that allow for different activities to be done on the file system via a wrapper around the API.
This should be included/required/imported into any JS that needs to interact with the file system API.
var fs = require("fs");
These are the different methods you can use on this API which are all asynchronous:
fs.readFile {fs.readFile('input.txt', function(err, data)
// Asynchronous read
fs.readFile('input.txt', function (err, data) {
if (err) {
return console.error(err);
}
console.log("Asynchronous read: " + data.toString());
});
fs.writeFile
fs.writeFile('input.txt', 'Simply Easy Learning!', function(err) {
if (err) {
return console.error(err);
}
console.log("Data written successfully!");
console.log("Let's read newly written data");
fs.readFile('input.txt', function (err, data) {
if (err) {
return console.error(err);
}
console.log("Asynchronous read: " + data.toString());
});
});
open
// Asynchronous - Opening File
console.log("Going to open file!");
fs.open('input.txt', 'r+', function(err, fd) {
if (err) {
return console.error(err);
}
console.log("File opened successfully!");
});
fs.stat (provides information about the file) e.g.
fs.stat('input.txt', function (err, stats) {
if (err) {
return console.error(err);
}
console.log(stats);
console.log("Got file info successfully!");
// Check file type
console.log("isFile ? " + stats.isFile());
console.log("isDirectory ? " + stats.isDirectory());
});
fs.read (similar to readFile and should not be the first choice for reading a file)
fs.close
// Close the opened file.
fs.close(fd, function(err){
if (err){
console.log(err);
}
console.log("File closed successfully.");
});
ftruncate (truncate an opened file)
unlink (delete an opened file)
fs.unlink('input.txt', function(err) {
if (err) {
return console.error(err);
}
console.log("File deleted successfully!");
});
fs.mkdir (make new directory)
fs.mkdir('/tmp/test',function(err){
if (err) {
return console.error(err);
}
console.log("Directory created successfully!");
});
fs.readdir (reads a directory)
fs.readdir("/tmp/",function(err, files){
if (err) {
return console.error(err);
}
files.forEach( function (file){
console.log( file );
});
});
fs.rmdir (remove directory)
fs.rmdir("/tmp/test",function(err){
if (err) {
return console.error(err);
}
console.log("Going to read directory /tmp");
fs.readdir("/tmp/",function(err, files){
if (err) {
return console.error(err);
}
files.forEach( function (file){
console.log( file );
});
});
});
Synchronous functions:
readFileSync
// Synchronous read
var data = fs.readFileSync('input.txt');
console.log("Synchronous read: " + data.toString());
writeFileSync
// Synchronous write
var data = fs.writeFileSync('input.txt', 'asdasdasd');
Simply read using line-by-line package, less headache, more control on reading process (it can pause, resume reading, close file descriptor ondemand, skip N lines) with less code.
1) install:
npm i --save line-by-line
npm i --save lodash
2) implement
var lineByLine = require('line-by-line'),
_ = require('lodash'),
path = require('path');
var lines = [];
var filePath = path.join(__dirname, "ditLogs", file);
var fileReader = new lineByLine(filePath);
fileReader.on('line', function(line) {
line = JSON.parse(line);
if(_.isPlainObject(line) && !_.isEmpty(line)) {
lines.push(line);
}
// optional
doSomethingWithLine(line);
});
function doSomethingWithLine(line) {
// for example You can save to db or send to somewhere using request libary or just show in console
}
fileReader.on('error', function(error) {
console.error(error);
process.exit(-1);
});
fileReader.on('end', function() {
doSomethingAfterParsingAllLines(lines);
});
function doSomethingAfterParsingAllLines(records) {
// do something with data
}
'use strict';
const fs = require('fs');
let rawdata = fs.readFileSync('student.json');
let student = JSON.parse(rawdata);
console.log(student);

node.js code to append data to a file

How can I append data to a file using node.js
I already have a file named myfile.json with data. I want to check if the file name exists and then append some data to that file.
I'm using following code
var writeTempFile = function (reportPath, data, callback) {
fs.writeFile(reportPath, data, function (err) {
//if (err) //say(err);
callback(err);
});
}
writeTempFile(reportDir + '_' + query.jobid + ".json", data, function (err) {
context.sendResponse(data, 200, {
'Content-Type': 'text/html'
});
You can use jsonfile
var jf = require('jsonfile');
var yourdata;
var file = '/tmp/data.json';
jf.readFile(file, function(err, obj) {
if(!err) {
var finalData = merge(obj, yourdata);
jf.writeFile(file, finalData, function(err) {
console.log(err);
});
}
});
You need to implement your merging logic in merge(object1, object2)
https://npmjs.org/package/jsonfile
Check out the following code.
function addToFile(reportPath, data, callback){
fs.appendFile(reportPath, data, function (err) {
callback(err);
});
}
Node offers fs module to work with file system.
To use this module do
var fs = require('fs')
To append some data to file you can do:
fs.appendFile('message.txt', 'data to append', function (err) {
if (err) throw err;
console.log('The "data to append" was appended to file!');
});
Node offers you both synchronous and asynchronous method to append data to file, For more information please refer to this documentation

Replace a string in a file with nodejs

I use the md5 grunt task to generate MD5 filenames. Now I want to rename the sources in the HTML file with the new filename in the callback of the task. I wonder what's the easiest way to do this.
You could use simple regex:
var result = fileAsString.replace(/string to be replaced/g, 'replacement');
So...
var fs = require('fs')
fs.readFile(someFile, 'utf8', function (err,data) {
if (err) {
return console.log(err);
}
var result = data.replace(/string to be replaced/g, 'replacement');
fs.writeFile(someFile, result, 'utf8', function (err) {
if (err) return console.log(err);
});
});
Since replace wasn't working for me, I've created a simple npm package replace-in-file to quickly replace text in one or more files. It's partially based on #asgoth's answer.
Edit (3 October 2016): The package now supports promises and globs, and the usage instructions have been updated to reflect this.
Edit (16 March 2018): The package has amassed over 100k monthly downloads now and has been extended with additional features as well as a CLI tool.
Install:
npm install replace-in-file
Require module
const replace = require('replace-in-file');
Specify replacement options
const options = {
//Single file
files: 'path/to/file',
//Multiple files
files: [
'path/to/file',
'path/to/other/file',
],
//Glob(s)
files: [
'path/to/files/*.html',
'another/**/*.path',
],
//Replacement to make (string or regex)
from: /Find me/g,
to: 'Replacement',
};
Asynchronous replacement with promises:
replace(options)
.then(changedFiles => {
console.log('Modified files:', changedFiles.join(', '));
})
.catch(error => {
console.error('Error occurred:', error);
});
Asynchronous replacement with callback:
replace(options, (error, changedFiles) => {
if (error) {
return console.error('Error occurred:', error);
}
console.log('Modified files:', changedFiles.join(', '));
});
Synchronous replacement:
try {
let changedFiles = replace.sync(options);
console.log('Modified files:', changedFiles.join(', '));
}
catch (error) {
console.error('Error occurred:', error);
}
Perhaps the "replace" module (www.npmjs.org/package/replace) also would work for you. It would not require you to read and then write the file.
Adapted from the documentation:
// install:
npm install replace
// require:
var replace = require("replace");
// use:
replace({
regex: "string to be replaced",
replacement: "replacement string",
paths: ['path/to/your/file'],
recursive: true,
silent: true,
});
You can also use the 'sed' function that's part of ShellJS ...
$ npm install [-g] shelljs
require('shelljs/global');
sed('-i', 'search_pattern', 'replace_pattern', file);
Full documentation ...
ShellJS - sed()
ShellJS
If someone wants to use promise based 'fs' module for the task.
const fs = require('fs').promises;
// Below statements must be wrapped inside the 'async' function:
const data = await fs.readFile(someFile, 'utf8');
const result = data.replace(/string to be replaced/g, 'replacement');
await fs.writeFile(someFile, result,'utf8');
You could process the file while being read by using streams. It's just like using buffers but with a more convenient API.
var fs = require('fs');
function searchReplaceFile(regexpFind, replace, cssFileName) {
var file = fs.createReadStream(cssFileName, 'utf8');
var newCss = '';
file.on('data', function (chunk) {
newCss += chunk.toString().replace(regexpFind, replace);
});
file.on('end', function () {
fs.writeFile(cssFileName, newCss, function(err) {
if (err) {
return console.log(err);
} else {
console.log('Updated!');
}
});
});
searchReplaceFile(/foo/g, 'bar', 'file.txt');
On Linux or Mac, keep is simple and just use sed with the shell. No external libraries required. The following code works on Linux.
const shell = require('child_process').execSync
shell(`sed -i "s!oldString!newString!g" ./yourFile.js`)
The sed syntax is a little different on Mac. I can't test it right now, but I believe you just need to add an empty string after the "-i":
const shell = require('child_process').execSync
shell(`sed -i "" "s!oldString!newString!g" ./yourFile.js`)
The "g" after the final "!" makes sed replace all instances on a line. Remove it, and only the first occurrence per line will be replaced.
Expanding on #Sanbor's answer, the most efficient way to do this is to read the original file as a stream, and then also stream each chunk into a new file, and then lastly replace the original file with the new file.
async function findAndReplaceFile(regexFindPattern, replaceValue, originalFile) {
const updatedFile = `${originalFile}.updated`;
return new Promise((resolve, reject) => {
const readStream = fs.createReadStream(originalFile, { encoding: 'utf8', autoClose: true });
const writeStream = fs.createWriteStream(updatedFile, { encoding: 'utf8', autoClose: true });
// For each chunk, do the find & replace, and write it to the new file stream
readStream.on('data', (chunk) => {
chunk = chunk.toString().replace(regexFindPattern, replaceValue);
writeStream.write(chunk);
});
// Once we've finished reading the original file...
readStream.on('end', () => {
writeStream.end(); // emits 'finish' event, executes below statement
});
// Replace the original file with the updated file
writeStream.on('finish', async () => {
try {
await _renameFile(originalFile, updatedFile);
resolve();
} catch (error) {
reject(`Error: Error renaming ${originalFile} to ${updatedFile} => ${error.message}`);
}
});
readStream.on('error', (error) => reject(`Error: Error reading ${originalFile} => ${error.message}`));
writeStream.on('error', (error) => reject(`Error: Error writing to ${updatedFile} => ${error.message}`));
});
}
async function _renameFile(oldPath, newPath) {
return new Promise((resolve, reject) => {
fs.rename(oldPath, newPath, (error) => {
if (error) {
reject(error);
} else {
resolve();
}
});
});
}
// Testing it...
(async () => {
try {
await findAndReplaceFile(/"some regex"/g, "someReplaceValue", "someFilePath");
} catch(error) {
console.log(error);
}
})()
I ran into issues when replacing a small placeholder with a large string of code.
I was doing:
var replaced = original.replace('PLACEHOLDER', largeStringVar);
I figured out the problem was JavaScript's special replacement patterns, described here. Since the code I was using as the replacing string had some $ in it, it was messing up the output.
My solution was to use the function replacement option, which DOES NOT do any special replacement:
var replaced = original.replace('PLACEHOLDER', function() {
return largeStringVar;
});
ES2017/8 for Node 7.6+ with a temporary write file for atomic replacement.
const Promise = require('bluebird')
const fs = Promise.promisifyAll(require('fs'))
async function replaceRegexInFile(file, search, replace){
let contents = await fs.readFileAsync(file, 'utf8')
let replaced_contents = contents.replace(search, replace)
let tmpfile = `${file}.jstmpreplace`
await fs.writeFileAsync(tmpfile, replaced_contents, 'utf8')
await fs.renameAsync(tmpfile, file)
return true
}
Note, only for smallish files as they will be read into memory.
This may help someone:
This is a little different than just a global replace
from the terminal we run
node replace.js
replace.js:
function processFile(inputFile, repString = "../") {
var fs = require('fs'),
readline = require('readline'),
instream = fs.createReadStream(inputFile),
outstream = new (require('stream'))(),
rl = readline.createInterface(instream, outstream);
formatted = '';
const regex = /<xsl:include href="([^"]*)" \/>$/gm;
rl.on('line', function (line) {
let url = '';
let m;
while ((m = regex.exec(line)) !== null) {
// This is necessary to avoid infinite loops with zero-width matches
if (m.index === regex.lastIndex) {
regex.lastIndex++;
}
url = m[1];
}
let re = new RegExp('^.* <xsl:include href="(.*?)" \/>.*$', 'gm');
formatted += line.replace(re, `\t<xsl:include href="${repString}${url}" />`);
formatted += "\n";
});
rl.on('close', function (line) {
fs.writeFile(inputFile, formatted, 'utf8', function (err) {
if (err) return console.log(err);
});
});
}
// path is relative to where your running the command from
processFile('build/some.xslt');
This is what this does.
We have several file that have xml:includes
However in development we need the path to move down a level.
From this
<xsl:include href="common/some.xslt" />
to this
<xsl:include href="../common/some.xslt" />
So we end up running two regx patterns one to get the href and the other to write
there is probably a better way to do this but it work for now.
Thanks
Nomaly, I use tiny-replace-files to replace texts in file or files. This pkg is smaller and lighter...
https://github.com/Rabbitzzc/tiny-replace-files
import { replaceStringInFilesSync } from 'tiny-replace-files'
const options = {
files: 'src/targets/index.js',
from: 'test-plugin',
to: 'self-name',
}
# await
const result = replaceStringInFilesSync(options)
console.info(result)
I would use a duplex stream instead. like documented here nodejs doc duplex streams
A Transform stream is a Duplex stream where the output is computed in
some way from the input.
<p>Please click in the following {{link}} to verify the account</p>
function renderHTML(templatePath: string, object) {
const template = fileSystem.readFileSync(path.join(Application.staticDirectory, templatePath + '.html'), 'utf8');
return template.match(/\{{(.*?)\}}/ig).reduce((acc, binding) => {
const property = binding.substring(2, binding.length - 2);
return `${acc}${template.replace(/\{{(.*?)\}}/, object[property])}`;
}, '');
}
renderHTML(templateName, { link: 'SomeLink' })
for sure you can improve the reading template function to read as stream and compose the bytes by line to make it more efficient

Categories