Replace a string in a file with nodejs - javascript
I use the md5 grunt task to generate MD5 filenames. Now I want to rename the sources in the HTML file with the new filename in the callback of the task. I wonder what's the easiest way to do this.
You could use simple regex:
var result = fileAsString.replace(/string to be replaced/g, 'replacement');
So...
var fs = require('fs')
fs.readFile(someFile, 'utf8', function (err,data) {
if (err) {
return console.log(err);
}
var result = data.replace(/string to be replaced/g, 'replacement');
fs.writeFile(someFile, result, 'utf8', function (err) {
if (err) return console.log(err);
});
});
Since replace wasn't working for me, I've created a simple npm package replace-in-file to quickly replace text in one or more files. It's partially based on #asgoth's answer.
Edit (3 October 2016): The package now supports promises and globs, and the usage instructions have been updated to reflect this.
Edit (16 March 2018): The package has amassed over 100k monthly downloads now and has been extended with additional features as well as a CLI tool.
Install:
npm install replace-in-file
Require module
const replace = require('replace-in-file');
Specify replacement options
const options = {
//Single file
files: 'path/to/file',
//Multiple files
files: [
'path/to/file',
'path/to/other/file',
],
//Glob(s)
files: [
'path/to/files/*.html',
'another/**/*.path',
],
//Replacement to make (string or regex)
from: /Find me/g,
to: 'Replacement',
};
Asynchronous replacement with promises:
replace(options)
.then(changedFiles => {
console.log('Modified files:', changedFiles.join(', '));
})
.catch(error => {
console.error('Error occurred:', error);
});
Asynchronous replacement with callback:
replace(options, (error, changedFiles) => {
if (error) {
return console.error('Error occurred:', error);
}
console.log('Modified files:', changedFiles.join(', '));
});
Synchronous replacement:
try {
let changedFiles = replace.sync(options);
console.log('Modified files:', changedFiles.join(', '));
}
catch (error) {
console.error('Error occurred:', error);
}
Perhaps the "replace" module (www.npmjs.org/package/replace) also would work for you. It would not require you to read and then write the file.
Adapted from the documentation:
// install:
npm install replace
// require:
var replace = require("replace");
// use:
replace({
regex: "string to be replaced",
replacement: "replacement string",
paths: ['path/to/your/file'],
recursive: true,
silent: true,
});
You can also use the 'sed' function that's part of ShellJS ...
$ npm install [-g] shelljs
require('shelljs/global');
sed('-i', 'search_pattern', 'replace_pattern', file);
Full documentation ...
ShellJS - sed()
ShellJS
If someone wants to use promise based 'fs' module for the task.
const fs = require('fs').promises;
// Below statements must be wrapped inside the 'async' function:
const data = await fs.readFile(someFile, 'utf8');
const result = data.replace(/string to be replaced/g, 'replacement');
await fs.writeFile(someFile, result,'utf8');
You could process the file while being read by using streams. It's just like using buffers but with a more convenient API.
var fs = require('fs');
function searchReplaceFile(regexpFind, replace, cssFileName) {
var file = fs.createReadStream(cssFileName, 'utf8');
var newCss = '';
file.on('data', function (chunk) {
newCss += chunk.toString().replace(regexpFind, replace);
});
file.on('end', function () {
fs.writeFile(cssFileName, newCss, function(err) {
if (err) {
return console.log(err);
} else {
console.log('Updated!');
}
});
});
searchReplaceFile(/foo/g, 'bar', 'file.txt');
On Linux or Mac, keep is simple and just use sed with the shell. No external libraries required. The following code works on Linux.
const shell = require('child_process').execSync
shell(`sed -i "s!oldString!newString!g" ./yourFile.js`)
The sed syntax is a little different on Mac. I can't test it right now, but I believe you just need to add an empty string after the "-i":
const shell = require('child_process').execSync
shell(`sed -i "" "s!oldString!newString!g" ./yourFile.js`)
The "g" after the final "!" makes sed replace all instances on a line. Remove it, and only the first occurrence per line will be replaced.
Expanding on #Sanbor's answer, the most efficient way to do this is to read the original file as a stream, and then also stream each chunk into a new file, and then lastly replace the original file with the new file.
async function findAndReplaceFile(regexFindPattern, replaceValue, originalFile) {
const updatedFile = `${originalFile}.updated`;
return new Promise((resolve, reject) => {
const readStream = fs.createReadStream(originalFile, { encoding: 'utf8', autoClose: true });
const writeStream = fs.createWriteStream(updatedFile, { encoding: 'utf8', autoClose: true });
// For each chunk, do the find & replace, and write it to the new file stream
readStream.on('data', (chunk) => {
chunk = chunk.toString().replace(regexFindPattern, replaceValue);
writeStream.write(chunk);
});
// Once we've finished reading the original file...
readStream.on('end', () => {
writeStream.end(); // emits 'finish' event, executes below statement
});
// Replace the original file with the updated file
writeStream.on('finish', async () => {
try {
await _renameFile(originalFile, updatedFile);
resolve();
} catch (error) {
reject(`Error: Error renaming ${originalFile} to ${updatedFile} => ${error.message}`);
}
});
readStream.on('error', (error) => reject(`Error: Error reading ${originalFile} => ${error.message}`));
writeStream.on('error', (error) => reject(`Error: Error writing to ${updatedFile} => ${error.message}`));
});
}
async function _renameFile(oldPath, newPath) {
return new Promise((resolve, reject) => {
fs.rename(oldPath, newPath, (error) => {
if (error) {
reject(error);
} else {
resolve();
}
});
});
}
// Testing it...
(async () => {
try {
await findAndReplaceFile(/"some regex"/g, "someReplaceValue", "someFilePath");
} catch(error) {
console.log(error);
}
})()
I ran into issues when replacing a small placeholder with a large string of code.
I was doing:
var replaced = original.replace('PLACEHOLDER', largeStringVar);
I figured out the problem was JavaScript's special replacement patterns, described here. Since the code I was using as the replacing string had some $ in it, it was messing up the output.
My solution was to use the function replacement option, which DOES NOT do any special replacement:
var replaced = original.replace('PLACEHOLDER', function() {
return largeStringVar;
});
ES2017/8 for Node 7.6+ with a temporary write file for atomic replacement.
const Promise = require('bluebird')
const fs = Promise.promisifyAll(require('fs'))
async function replaceRegexInFile(file, search, replace){
let contents = await fs.readFileAsync(file, 'utf8')
let replaced_contents = contents.replace(search, replace)
let tmpfile = `${file}.jstmpreplace`
await fs.writeFileAsync(tmpfile, replaced_contents, 'utf8')
await fs.renameAsync(tmpfile, file)
return true
}
Note, only for smallish files as they will be read into memory.
This may help someone:
This is a little different than just a global replace
from the terminal we run
node replace.js
replace.js:
function processFile(inputFile, repString = "../") {
var fs = require('fs'),
readline = require('readline'),
instream = fs.createReadStream(inputFile),
outstream = new (require('stream'))(),
rl = readline.createInterface(instream, outstream);
formatted = '';
const regex = /<xsl:include href="([^"]*)" \/>$/gm;
rl.on('line', function (line) {
let url = '';
let m;
while ((m = regex.exec(line)) !== null) {
// This is necessary to avoid infinite loops with zero-width matches
if (m.index === regex.lastIndex) {
regex.lastIndex++;
}
url = m[1];
}
let re = new RegExp('^.* <xsl:include href="(.*?)" \/>.*$', 'gm');
formatted += line.replace(re, `\t<xsl:include href="${repString}${url}" />`);
formatted += "\n";
});
rl.on('close', function (line) {
fs.writeFile(inputFile, formatted, 'utf8', function (err) {
if (err) return console.log(err);
});
});
}
// path is relative to where your running the command from
processFile('build/some.xslt');
This is what this does.
We have several file that have xml:includes
However in development we need the path to move down a level.
From this
<xsl:include href="common/some.xslt" />
to this
<xsl:include href="../common/some.xslt" />
So we end up running two regx patterns one to get the href and the other to write
there is probably a better way to do this but it work for now.
Thanks
Nomaly, I use tiny-replace-files to replace texts in file or files. This pkg is smaller and lighter...
https://github.com/Rabbitzzc/tiny-replace-files
import { replaceStringInFilesSync } from 'tiny-replace-files'
const options = {
files: 'src/targets/index.js',
from: 'test-plugin',
to: 'self-name',
}
# await
const result = replaceStringInFilesSync(options)
console.info(result)
I would use a duplex stream instead. like documented here nodejs doc duplex streams
A Transform stream is a Duplex stream where the output is computed in
some way from the input.
<p>Please click in the following {{link}} to verify the account</p>
function renderHTML(templatePath: string, object) {
const template = fileSystem.readFileSync(path.join(Application.staticDirectory, templatePath + '.html'), 'utf8');
return template.match(/\{{(.*?)\}}/ig).reduce((acc, binding) => {
const property = binding.substring(2, binding.length - 2);
return `${acc}${template.replace(/\{{(.*?)\}}/, object[property])}`;
}, '');
}
renderHTML(templateName, { link: 'SomeLink' })
for sure you can improve the reading template function to read as stream and compose the bytes by line to make it more efficient
Related
Where is memory leak in my JavaScript (NodeJS) code?
I have simple script to handle CSV file with size 10GB. The idea is pretty simple. Open file as stream. Parse CSV objects from it. Modify objects. Make output stream to new file. I made following code, but it cause memory leak. I have tried a lot of different things, but nothing helps. The memory leak disappear if I remove transformer from pipes. Maybe it causes memory leak. I run the code under NodeJS. Can you help me found where I am wrong? 'use strict'; import fs from 'node:fs'; import {parse, transform, stringify} from 'csv'; import lineByLine from 'n-readlines'; // big input file const inputFile = './input-data.csv'; // read headers first const linesReader = new lineByLine(inputFile); const firstLine = linesReader.next(); linesReader.close(); const headers = firstLine.toString() .split(',') .map(header => { return header .replace(/^"/, '') .replace(/"$/, '') .replace(/\s+/g, '_') .replace('(', '_') .replace(')', '_') .replace('.', '_') .replace(/_+$/, ''); }); // file stream const fileStream1 = fs.createReadStream(inputFile); // parser stream const parserStream1 = parse({delimiter: ',', cast: true, columns: headers, from_line: 1}); // transformer const transformer = transform(function(record) { return Object.assign({}, record, { SomeField: 'BlaBlaBla', }); }); // stringifier stream const stringifier = stringify({delimiter: ','}); console.log('Loading data...'); // chain of pipes fileStream1.on('error', err => { console.log(err); }) .pipe(parserStream1).on('error', err => {console.log(err); }) .pipe(transformer).on('error', err => { console.log(err); }) .pipe(stringifier).on('error', err => { console.log(err); }) .pipe(fs.createWriteStream('./_data/new-data.csv')).on('error', err => { console.log(err); }) .on('finish', () => { console.log('Loading data finished!'); });
Watch for changes in a text file, and handling when this file doesn't exist
I've been making a project recently and I basically need to check for new text in a text file. My code was this: const fs = require('fs'); fs.watch('./file.txt', (event, filename) => { fs.readFile('./file.txt', (err, data) => { if (err) throw err; data = JSON.parse(data); console.log(data); } } It worked great. However, sometimes, I must delete this file for whatever reasons, and thus my code crashes too! Any idea on how to handle this? Thank you for your answers
Node's built-in module fs doesn't support file deletion detection very well. There is a workaround using a package called nsfw which is a wrapper around a native library that provides much better support for deletion detection. The API is a bit odd but it is a solid package nonetheless. Here is an example of what you're attempting to do using nsfw. const nsfw = require("nsfw"); const path = require("path"); const fs = require("fs"); const file = path.join(__dirname, "file.txt"); let watcher; nsfw( file, ([event, ...restEvents]) => { switch (event.action) { case nsfw.actions.DELETED: { watcher.stop(); return; // or handle this however you need to.. } default: { fs.readFile(file, (err, data) => { if (err) throw err; try { data = JSON.parse(data); console.log(data); } catch (error) { console.error(error) } }); } } } ) .then((w) => { watcher = w; watcher.start() });
npm package csvtojson CSV Parse Error: Error: unclosed_quote
Node version: v10.19.0 Npm version: 6.13.4 Npm package csvtojson Package Link csvtojson({ "delimiter": ";", "fork": true }) .fromStream(fileReadStream) .subscribe((dataObj) => { console.log(dataObj); }, (err) => { console.error(err); }, (success) => { console.log(success); }); While trying to handle large CSV file (about 1.3 million records) I face error "CSV Parse Error: Error: unclosed_quote." after certain records(e.g. after 400+ records) being processed successfully. From the CSV file i don't see any problems with data formatting there, however the parser might be raising this error because of "\n" character being found inside the column/field value. Is there a solution already available with this package? or is there a workaround to handle this error? or is there a way to skip such CSV rows having any sort of errors not only this one, to let the entire CSV to JSON parsing work without the processing getting stuck? Any help will be much appreciated.
I've played about with this, and it's possible to hook into this using a CSV File Line Hook, csv-file-line-hook, you can check for invalid lines and either repair or simply invalidate them. The example below will simply skip the invalid lines (missing end quotes) example.js const fs = require("fs"); let fileReadStream = fs.createReadStream("test.csv"); let invalidLineCount = 0; const csvtojson = require("csvtojson"); csvtojson({ "delimiter": ";", "fork": true }) .preFileLine((fileLineString, lineIdx)=> { let invalidLinePattern = /^['"].*[^"'];/; if (invalidLinePattern.test(fileLineString)) { console.log(`Line #${lineIdx + 1} is invalid, skipping:`, fileLineString); fileLineString = ""; invalidLineCount++; } return fileLineString }) .fromStream(fileReadStream) .subscribe((dataObj) => { console.log(dataObj); }, (err) => { console.error("Error:", err); }, (success) => { console.log("Skipped lines:", invalidLineCount); console.log("Success"); }); test.csv Name;Age;Profession Bob;34;"Sales,Marketing" Sarah;31;"Software Engineer" James;45;Driver "Billy, ;35;Manager "Timothy;23;"QA
This regex works better /^(?:[^"\]|\.|"(?:\.|[^"\])")$/g Here is a more complex working script for big files by reading each line import csv from 'csvtojson' import fs from 'fs-extra' import lineReader from 'line-reader' import { __dirname } from '../../../utils.js' const CSV2JSON = async(dumb, editDumb, headers, { options = { trim: true, delimiter: '|', quote: '"', escape: '"', fork: true, headers: headers } } = {}) => { try { log(`\n\nStarting CSV2JSON - Current directory: ${__dirname()} - Please wait..`) await new Promise((resolve, reject) => { let firstLine, counter = 0 lineReader.eachLine(dumb, async(line, last) => { counter++ // log(`line before convert: ${line}`) let json = ( await csv(options).fromString(headers + '\n\r' + line) .preFileLine((fileLineString, lineIdx) => { // if it its not the first line // eslint-disable-next-line max-len if (counter !== 1 && !fileLineString.match(/^(?:[^"\\]|\\.|"(?:\\.|[^"\\])*")*$/g)) { // eslint-disable-next-line max-len console.log(`Line #${lineIdx + 1} is invalid. It has unescaped quotes. We will skip this line.. Invalid Line: ${fileLineString}`) fileLineString = '' } return fileLineString }) .on('error', e => { e = `Error while converting CSV to JSON. Line before convert: ${line} Error: ${e}` throw new BaseError(e) }) )[0] // log(`line after convert: ${json}`) if (json) { json = JSON.stringify(json).replace(/\\"/g, '') if (json.match(/^(?:[^"\\]|\\.|"(?:\\.|[^"\\])*")*$/g)) { await fs.appendFile(editDumb, json) } } if (last) { resolve() } }) }) } catch (e) { throw new BaseError(`Error while converting CSV to JSON - Error: ${e}`) } } export { CSV2JSON }
How to check if each file in a path is accessible for reading or not
I am trying to read the contents of a specific path. for that purpose, i used the following code: code1: const contentsOfPersonalFolder = fs.readdirSync(rootPathToPersonal); but i know in advance that i do not have access permission to read some of the contents that will be returned from the previous line of code. To check whether or not I have access permission to read some files, i would use the following code code2: try { fs.accessSync(path, fs.constants.R_OK); logger.info('The directory: ', path, 'can be read'); } catch (err) { logger.error('The directory: ', path, 'can not be read due inaccessibility'); } The problem now is, the code in code1 will return an array of all available files in the specified path. and if one of the these files is not accessible due read right protection, then it will throw and the program will throw. what i want to achieve is to iterate through all the available files in the specified path in code1, and then check each item using the code in code2 and if the file is accessible for reading i would like to do some logic, and if it is not accessible for reading i would do something else. please let me know how to achieve that.
you could use fs.access to check the users permissions https://nodejs.org/api/fs.html#fs_fs_access_path_mode_callback const testFolder = './tests/'; const fs = require('fs'); fs.readdir(testFolder, (err, files) => { files.forEach(file => { console.log(file); fs.access(file, fs.constants.R_OK, (err) => { if (err) { console.error("file is not readable"); return; } // do your reading operations }); }); })
const fs = require('fs'); const isAvailableToRead = file => { try { fs.accessSync(file, fs.constants.R_OK); return true; } catch (err) { return false; } } const readDirectory = path => { const files = fs.readdirSync(path); files.forEach(file => { if(isAvailableToRead(file)) { console.log(`Do some logic ${file}`); } }); } readDirectory(__dirname);
How to zip a directory with node.js [duplicate]
I need to zip an entire directory using Node.js. I'm currently using node-zip and each time the process runs it generates an invalid ZIP file (as you can see from this Github issue). Is there another, better, Node.js option that will allow me to ZIP up a directory? EDIT: I ended up using archiver writeZip = function(dir,name) { var zip = new JSZip(), code = zip.folder(dir), output = zip.generate(), filename = ['jsd-',name,'.zip'].join(''); fs.writeFileSync(baseDir + filename, output); console.log('creating ' + filename); }; sample value for parameters: dir = /tmp/jsd-<randomstring>/ name = <randomstring> UPDATE: For those asking about the implementation I used, here's a link to my downloader:
I ended up using archiver lib. Works great. Example var file_system = require('fs'); var archiver = require('archiver'); var output = file_system.createWriteStream('target.zip'); var archive = archiver('zip'); output.on('close', function () { console.log(archive.pointer() + ' total bytes'); console.log('archiver has been finalized and the output file descriptor has closed.'); }); archive.on('error', function(err){ throw err; }); archive.pipe(output); // append files from a sub-directory, putting its contents at the root of archive archive.directory(source_dir, false); // append files from a sub-directory and naming it `new-subdir` within the archive archive.directory('subdir/', 'new-subdir'); archive.finalize();
I'm not going to show something new, just wanted to summarise the solutions above for those who like Promises as much as I do 😉. const archiver = require('archiver'); /** * #param {String} sourceDir: /some/folder/to/compress * #param {String} outPath: /path/to/created.zip * #returns {Promise} */ function zipDirectory(sourceDir, outPath) { const archive = archiver('zip', { zlib: { level: 9 }}); const stream = fs.createWriteStream(outPath); return new Promise((resolve, reject) => { archive .directory(sourceDir, false) .on('error', err => reject(err)) .pipe(stream) ; stream.on('close', () => resolve()); archive.finalize(); }); } Hope it will help someone 🤞
Use Node's native child_process api to accomplish this. No need for third party libs. Two lines of code. const child_process = require("child_process"); child_process.execSync(`zip -r <DESIRED_NAME_OF_ZIP_FILE_HERE> *`, { cwd: <PATH_TO_FOLDER_YOU_WANT_ZIPPED_HERE> }); The example above showcases the synchronous API. You can also use child_process.exec(path, options, callback) if you want async behavior. There are a lot more options you can specify other than cwd to further fine-tune your request. If you don't have the ZIP utility: This question is specifically asks about the zip utility for archiving/compression purposes. Therefore, this example assumes you have the zip utility installed on your system. For completeness sakes, some operating systems may not have utility installed by default. In that case you have at least three options: Work with the archiving/compression utility that is native to your platform Replace the shell command in the above Node.js code with code from your system. For example, linux distros usually come with tar/gzip utilities: tar -cfz <DESIRED_NAME_OF_ZIP_FILE_HERE> <PATH_TO_FOLDER_YOU_WANT_ZIPPED_HERE>. This is a nice option as you don't need to install anything new onto your operating system or manage another dependency (kind of the whole point for this answer). Obtain the zip binary for your OS/distribution. For example on Ubuntu: apt install zip. The ZIP utility is tried and tested for decades, it's fairly ubiquitous and it's a safe choice. Do a quick google search or go to the creator, Info-ZIP's, website for downloadable binaries. Use a third party library/module (of which there are plenty on NPM). I don't prefer this option. However, if you don't really care to understand the native methods and introducing a new dependency is a non-issue, this is also a valid option.
This is another library which zips the folder in one line : zip-local var zipper = require('zip-local'); zipper.sync.zip("./hello/world/").compress().save("pack.zip");
Archive.bulk is now deprecated, the new method to be used for this is glob: var fileName = 'zipOutput.zip' var fileOutput = fs.createWriteStream(fileName); fileOutput.on('close', function () { console.log(archive.pointer() + ' total bytes'); console.log('archiver has been finalized and the output file descriptor has closed.'); }); archive.pipe(fileOutput); archive.glob("../dist/**/*"); //some glob pattern here archive.glob("../dist/.htaccess"); //another glob pattern // add as many as you like archive.on('error', function(err){ throw err; }); archive.finalize();
To include all files and directories: archive.bulk([ { expand: true, cwd: "temp/freewheel-bvi-120", src: ["**/*"], dot: true } ]); It uses node-glob(https://github.com/isaacs/node-glob) underneath, so any matching expression compatible with that will work.
To pipe the result to the response object (scenarios where there is a need to download the zip rather than store locally) archive.pipe(res); Sam's hints for accessing the content of the directory worked for me. src: ["**/*"]
I have found this small library that encapsulates what you need. npm install zip-a-folder const zip-a-folder = require('zip-a-folder'); await zip-a-folder.zip('/path/to/the/folder', '/path/to/archive.zip'); https://www.npmjs.com/package/zip-a-folder
Adm-zip has problems just compressing an existing archive https://github.com/cthackers/adm-zip/issues/64 as well as corruption with compressing binary files. I've also ran into compression corruption issues with node-zip https://github.com/daraosn/node-zip/issues/4 node-archiver is the only one that seems to work well to compress but it doesn't have any uncompress functionality.
Since archiver is not compatible with the new version of webpack for a long time, I recommend using zip-lib. var zl = require("zip-lib"); zl.archiveFolder("path/to/folder", "path/to/target.zip").then(function () { console.log("done"); }, function (err) { console.log(err); });
As today, I'm using AdmZip and works great: import AdmZip = require('adm-zip'); export async function archiveFile() { try { const zip = new AdmZip(); const outputDir = "/output_file_dir.zip"; zip.addLocalFolder("./yourFolder") zip.writeZip(outputDir); } catch (e) { console.log(`Something went wrong ${e}`); } }
import ... from answer based on https://stackoverflow.com/a/51518100 To zip single directory import archiver from 'archiver'; import fs from 'fs'; export default zipDirectory; /** * From: https://stackoverflow.com/a/51518100 * #param {String} sourceDir: /some/folder/to/compress * #param {String} outPath: /path/to/created.zip * #returns {Promise} */ function zipDirectory(sourceDir, outPath) { const archive = archiver('zip', { zlib: { level: 9 }}); const stream = fs.createWriteStream(outPath); return new Promise((resolve, reject) => { archive .directory(sourceDir, false) .on('error', err => reject(err)) .pipe(stream) ; stream.on('close', () => resolve()); archive.finalize(); }); } To zip multiple directories: import archiver from 'archiver'; import fs from 'fs'; export default zipDirectories; /** * Adapted from: https://stackoverflow.com/a/51518100 * #param {String} sourceDir: /some/folder/to/compress * #param {String} outPath: /path/to/created.zip * #returns {Promise} */ function zipDirectories(sourceDirs, outPath) { const archive = archiver('zip', { zlib: { level: 9 }}); const stream = fs.createWriteStream(outPath); return new Promise((resolve, reject) => { var result = archive; sourceDirs.forEach(sourceDir => { result = result.directory(sourceDir, false); }); result .on('error', err => reject(err)) .pipe(stream) ; stream.on('close', () => resolve()); archive.finalize(); }); }
You can try in a simple way: Install zip-dir : npm install zip-dir and use it var zipdir = require('zip-dir'); let foldername = src_path.split('/').pop() zipdir(<<src_path>>, { saveTo: 'demo.zip' }, function (err, buffer) { });
I ended up wrapping archiver to emulate JSZip, as refactoring through my project woult take too much effort. I understand Archiver might not be the best choice, but here you go. // USAGE: const zip=JSZipStream.to(myFileLocation) .onDone(()=>{}) .onError(()=>{}); zip.file('something.txt','My content'); zip.folder('myfolder').file('something-inFolder.txt','My content'); zip.finalize(); // NodeJS file content: var fs = require('fs'); var path = require('path'); var archiver = require('archiver'); function zipper(archive, settings) { return { output: null, streamToFile(dir) { const output = fs.createWriteStream(dir); this.output = output; archive.pipe(output); return this; }, file(location, content) { if (settings.location) { location = path.join(settings.location, location); } archive.append(content, { name: location }); return this; }, folder(location) { if (settings.location) { location = path.join(settings.location, location); } return zipper(archive, { location: location }); }, finalize() { archive.finalize(); return this; }, onDone(method) { this.output.on('close', method); return this; }, onError(method) { this.output.on('error', method); return this; } }; } exports.JSzipStream = { to(destination) { console.log('stream to',destination) const archive = archiver('zip', { zlib: { level: 9 } // Sets the compression level. }); return zipper(archive, {}).streamToFile(destination); } };