Javascript,Nodejs: search for a specific word string in files - javascript

i'm trying to make an app that searches for all files
contains a specified string under the current directory/subdirectory.
as i understand it means i need to create a read stream, loop it, load the read data to an array, if the word found give __filename, dirname and if ! not found message.
unfortunately, i could not make it work...
any clue?
var path = require('path'),
fs=require('fs');
function fromDir(startPath,filter,ext){
if (!fs.existsSync(startPath)){
console.log("no dir ",startPath);
return;
};
var files=fs.readdirSync(startPath);
let found = files.find((file) => {
let thisFilename = path.join(startPath, file);
let stat = fs.lstatSync(thisFilename);
var readStream = fs.createReadStream(fs);
var readline = require('readline');
if (stat.isDirectory()) {
fromDir(thisFilename, filename,readline, ext);
} else {
if (path.extname(createReadStream) === ext && path.basename(thisFilename, ext) === filename) {
return true;
}
}
});
console.log('-- your word has found on : ',filename,__dirname);
}
if (!found) {
console.log("Sorry, we didn't find your term");
}
}
fromDir('./', process.argv[3], process.argv[2]);

Because not everything was included in the question, I made an assumption:
We are looking for full words (if that's not the case, replace the regex with a simple indexOf()).
Now, I've split the code into two functions - to make it both more readable and easier to recursively find the files.
Synchronous version:
const path = require('path');
const fs = require('fs');
function searchFilesInDirectory(dir, filter, ext) {
if (!fs.existsSync(dir)) {
console.log(`Specified directory: ${dir} does not exist`);
return;
}
const files = getFilesInDirectory(dir, ext);
files.forEach(file => {
const fileContent = fs.readFileSync(file);
// We want full words, so we use full word boundary in regex.
const regex = new RegExp('\\b' + filter + '\\b');
if (regex.test(fileContent)) {
console.log(`Your word was found in file: ${file}`);
}
});
}
// Using recursion, we find every file with the desired extention, even if its deeply nested in subfolders.
function getFilesInDirectory(dir, ext) {
if (!fs.existsSync(dir)) {
console.log(`Specified directory: ${dir} does not exist`);
return;
}
let files = [];
fs.readdirSync(dir).forEach(file => {
const filePath = path.join(dir, file);
const stat = fs.lstatSync(filePath);
// If we hit a directory, apply our function to that dir. If we hit a file, add it to the array of files.
if (stat.isDirectory()) {
const nestedFiles = getFilesInDirectory(filePath, ext);
files = files.concat(nestedFiles);
} else {
if (path.extname(file) === ext) {
files.push(filePath);
}
}
});
return files;
}
Async version - because async is cool:
const path = require('path');
const fs = require('fs');
const util = require('util');
const fsReaddir = util.promisify(fs.readdir);
const fsReadFile = util.promisify(fs.readFile);
const fsLstat = util.promisify(fs.lstat);
async function searchFilesInDirectoryAsync(dir, filter, ext) {
const found = await getFilesInDirectoryAsync(dir, ext);
for (file of found) {
const fileContent = await fsReadFile(file);
// We want full words, so we use full word boundary in regex.
const regex = new RegExp('\\b' + filter + '\\b');
if (regex.test(fileContent)) {
console.log(`Your word was found in file: ${file}`);
}
};
}
// Using recursion, we find every file with the desired extention, even if its deeply nested in subfolders.
async function getFilesInDirectoryAsync(dir, ext) {
let files = [];
const filesFromDirectory = await fsReaddir(dir).catch(err => {
throw new Error(err.message);
});
for (let file of filesFromDirectory) {
const filePath = path.join(dir, file);
const stat = await fsLstat(filePath);
// If we hit a directory, apply our function to that dir. If we hit a file, add it to the array of files.
if (stat.isDirectory()) {
const nestedFiles = await getFilesInDirectoryAsync(filePath, ext);
files = files.concat(nestedFiles);
} else {
if (path.extname(file) === ext) {
files.push(filePath);
}
}
};
return files;
}
If you have not worked with / understand async/await yet, it is a great step to take and learn it as soon as possible. Trust me, you will love not seeing those ugly callbacks again!
UPDATE:
As you pointed in comments, you want it to execute the function after running node process on the file. You also want to pass the function parameters as node's arguments.
To do that, at the end of your file, you need to add:
searchFilesInDirectory(process.argv[2], process.argv[3], process.argv[4]);
This extracts our arguments and passes them to the function.
With that, you can call our process like so (example arguments):
node yourscriptname.js ./ james .txt
Personally, if I were to write this, I would leverage the beauty of asynchronous code, and Node.js's async / await.
As a very side note:
You can easily improve readability of your code, if you add proper formatting to it. Don't get me wrong, it's not terrible - but it can be improved:
Use spaces OR newlines after commas.
Use spaces around equality operators and arithmetic operators.
As long as you are consistent with formatting, everything looks much better.

Related

How to loop through the folder with files?

How can I loop through the folder that has subfolder and retrieve all files with extension '.element.ts' ?
const fs = require('fs');
const filesDir = fs.readdirSync('packages/web-components/src');
// the json result that will be generated
let content = [];
files.forEach(file => {
if (fileName === '???')
content.push(file);
});
const fs=require('fs');
function getAllFiles (dir, allFilesList = []){
const files = fs.readdirSync(dir);
files.map(file => {
const name = dir + '/' + file;
if (fs.statSync(name).isDirectory()) { // check if subdirectory is present
getAllFiles(name, allFilesList); // do recursive execution for subdirectory
} else {
allFilesList.push(name); // push filename into the array
}
})
return allFilesList;
}
const allFiles = getAllFiles('./testfolder');
const fileEndsWith = allFiles.filter(file => file.endsWith('.element.ts'))
console.log(fileEndsWith);
Easy way: use glob package.
const glob = require("glob");
const pattern = "packages/web-components/src/*.element.ts"
const elementTsFilenames = glob.sync(pattern);
Manual way:
const dir = "packages/web-components/src";
const extension = ".element.ts";
const elementTsFilenames = fs.readdirSync(dir).filter(fn => fn.endsWith(extension));
This is easy enough that the manual way is as easy or easier; if you have more complex requirements (e.g. recursively searching subdirectories), a library approach is nice.
I'll show you how to recursively get all the files in a directory (even those located in a subdirectory).
To do this, we need to create a recursive function that can call itself when dealing with sub-directories. And we also need the function to go through each of the sub-directories and add any new files it encounters. Then we also need to check if the filename contains a specific string. When the function is finished , it should return an array with all the files it encountered.
Here's what the recursive function looks like:
const fs = require("fs")
const path = require("path")
const getAllFiles = function(dirPath, extension, arrayOfFiles) {
files = fs.readdirSync(dirPath);
arrayOfFiles = arrayOfFiles || [];
files.forEach(function(file) {
if (fs.statSync(dirPath + "/" + file).isDirectory()) {
arrayOfFiles = getAllFiles(dirPath + "/" + file, arrayOfFiles);
} else if (file.endsWith(extension)){
arrayOfFiles.push(path.join(__dirname, dirPath, "/", file));
}
});
return arrayOfFiles;
}
First, we require() the Node.js path module. Since this is
included with Node.js, you don't need to install anything for it to
work. This module will help us easily create full file paths for our
files.
The getAllFiles variable holds the recursive function that will go
through each subdirectory and return an array of filenames. It takes a
directory file path, a specified character and an optional arrayOfFiles as arguments.
Inside the getAllFiles function, we first use the readdirSync()
function to get all of the files and directories inside the given
dirPath supplied to the function.
Then, we create an arrayOfFiles that will hold all the filenames
that will be returned when the function is done running.
Next, we loop over each item (file or directory) found by the
readdirSync() function. If the item is a directory, we have the
function recursively call itself to get all of the files and
sub-directories inside the given directory.
And if the item is a file, we simply append the file path to the
arrayOfFiles array. (When the end of the file name is confirmed to
contain characters)
When the forEach loop has finished, we return the arrayOfFiles
array.
Here is how you use the function in your code:
const result = getAllFiles("packages/web-components/src", ".element.ts");
I don't think you need an npm package for this: It's not too hard to walk the file system using an async iterator and filter the results based on something like a regular expression.
Another bonus of an async technique is that it doesn't block your thread while it iterates the files (other work can be done in between each result while it's searching), especially if you have a lot of sub-directories/files to search through.
If you want to reduce your project's dependencies, you can do something like this:
example.mjs:
import * as path from 'node:path';
import {readdir} from 'node:fs/promises';
/** Search all subdirectories, yielding matching file entries */
export async function* findFiles (dir, regexpFilter) {
for (const entry of await readdir(dir, {withFileTypes: true})) {
const fPath = path.resolve(dir, entry.name);
if (entry.isDirectory()) {
yield* findFiles(fPath, regexpFilter);
continue;
}
if (regexpFilter && !regexpFilter.test(entry.name)) continue;
yield Object.assign(entry, {path: fPath});
}
}
async function main () {
const dir = 'packages/web-components/src';
// Regular expression which means: ends with '.element.ts'
const filter = /\.element\.ts$/;
for await (const entry of findFiles(dir, filter)) {
// ^^^^^^
// If you don't include a filter argument, then all files will be iterated
console.log(entry.name); // just the file name
console.log(entry.path); // the full file path
}
}
main();

How could I duplicate/copy file in an automatized way with JavaScript?

I have an gif file that is stored in a directory call assets on my computer. I would like to create X amount of duplicates and they should be stored in the same directory and each of them should have a different file name.
Example:
I in the assets directory is the gif file call 0.gif I would like to duplicate this gif file 10 times and The duplicates should be called 1.gif, 2.gif, 3.R and so on.
The simplest option is to use fs and using copyFile function available
const fs = require("fs");
const path = require("path");
let copyMultiple = (src, count) => {
let initCount = 0;
while (initCount < count) {
initCount++;// you can put this at bottom too acc to your needs
const newFileName = `${initCount}_${initCount}${path.extname(src)}`;
console.log(newFileName, "is new file name");
fs.copyFile(src, newFileName, (error) => {
// if errors comes
if (error) {
console.log(error);
}
});
}
};
copyMultiple("1.gif", 3);
Another elegant way of doing this is
const util = require("util");
const fs = require("fs");
const path = require("path");
const copyFilePromise = util.promisify(fs.copyFile);
function copyFiles(srcFile, destDir, destFileNames) {
return Promise.all(
destFileNames.map((file) => {
return copyFilePromise(srcFile, path.join(destDir, file));
})
);
}
const myDestinationFileNames = ["second.gif", "third.gif"];
const sourceFileName = "1.gif";
copyFiles(sourceFileName, "", myDestinationFileNames)
.then(() => {
console.log("Copying is Done");
})
.catch((err) => {
console.log("Got and Error", error);
});
Using this will also give upperhand of knowing when it is done.
You can read docs here
const fs = require("fs")
const filename = "index.js".split(".") //filename like 0.gif to gif
const times = 10 // number of times to duplicate
for(var int = 1; int < times; int++){
const newFilename = `${(parseInt(filename[0]) + init)}.${filename[1]}` //new filename like 0.gif to 1.gif
fs.copyFileSync(filename, newfilename)
}
use the write file and read file from the fs module and a simple for loop
not sure which framework you're on but fs.copyFile() is the standard way for node.js https://nodejs.org/api/fs.html#fscopyfilesrc-dest-mode-callback

How to write a data as file names in a folder to CSV file using Nodejs

Anty body did write file names in a folder to the CSV file using javascript
my folder structure is
Data
+IMG
+test
-1.png
-2.png
+train
-3.png
-4.png
an output CSV file will be like this
Data/IMG/test/1.png Data/IMG/train/3.png
Data/IMG/test/2.png Data/IMG/train/4.png
You just need to loop through all folders and find all files.
You can refer to this answer to do this.
when you are finding all files' paths, you can write these paths in a string for the csv file:
const fs = require('fs');
const path = require('path');
let csvStr = "";
async function loop(startPath) {
// Our starting point
try {
// Get the files as an array
const files = await fs.promises.readdir(startPath);
// Loop them all with the new for...of
for (const file of files) {
// Get the full paths
const currentPath = path.join(startPath, file);
// Stat the file to see if we have a file or dir
const stat = await fs.promises.stat(currentPath);
if (stat.isFile()) {
console.log("'%s' is a file.", currentPath);
// put the file into csv string
csvStr += currentPath + ", "
} else if (stat.isDirectory()) {
console.log("'%s' is a directory.", currentPath);
// enter the dictionary and loop
await loop(currentPath);
}
} // End for...of
} catch (e) {
// Catch anything bad that happens
console.error("We've thrown! Whoops!", e);
}
}
// Make an async function that gets executed immediately
(async () => {
// start loop from the path where you run node
await loop("./");
fs.writeFileSync("your.csv", csvStr);
})();

What is the most efficient way to read only the first line of a file in Node JS?

Imagine you have many long text files, and you need to only extract data from the first line of each one (without reading any further content). What is the best way in Node JS to do it?
Thanks!
I ended up adopting this solution, which seems the most performant I've seen so far:
var fs = require('fs');
var Q = require('q');
function readFirstLine (path) {
return Q.promise(function (resolve, reject) {
var rs = fs.createReadStream(path, {encoding: 'utf8'});
var acc = '';
var pos = 0;
var index;
rs
.on('data', function (chunk) {
index = chunk.indexOf('\n');
acc += chunk;
index !== -1 ? rs.close() : pos += chunk.length;
})
.on('close', function () {
resolve(acc.slice(0, pos + index));
})
.on('error', function (err) {
reject(err);
})
});
}
I created a npm module for convenience, named "firstline".
Thanks to #dandavis for the suggestion to use String.prototype.slice()!
There's a built-in module almost for this case - readline. It avoids messing with chunks and so forth. The code would look like the following:
const fs = require('fs');
const readline = require('readline');
async function getFirstLine(pathToFile) {
const readable = fs.createReadStream(pathToFile);
const reader = readline.createInterface({ input: readable });
const line = await new Promise((resolve) => {
reader.on('line', (line) => {
reader.close();
resolve(line);
});
});
readable.close();
return line;
}
I know this doesn't exactly answer the question but for those who are looking for a READABLE and simple way to do so:
const fs = require('fs').promises;
async function getFirstLine(filePath) {
const fileContent = await fs.readFile(filePath, 'utf-8');
return (fileContent.match(/(^.*)/) || [])[1] || '';
}
NOTE:
naturaly, this will only work with text files, which I assumed you used from your description
this will work with empty files and will return an empty string
this regexp is very performant since it is simple (no OR conditions`or complex matches) and only reads the first line
Please try this:
https://github.com/yinrong/node-line-stream-util#get-head-lines
It unpipe the upstream once got the head lines.
Node.js >= 16
In all current versions of Node.js, readline.createInterface can be used as an async iterable, to read a file line by line - or just for the first line. This is also safe to use with empty files.
Unfortunately, the error handling logic is broken in versions of Node.js before 16, where certain file system errors may go uncaught even if the code is wrapped in a try-catch block because of the way asynchronous errors are propagated in streams. So I recommend using this method only in Node.js >= 16.
import { createReadStream } from "fs";
import { createInterface } from "readline";
async function readFirstLine(path) {
const inputStream = createReadStream(path);
try {
for await (const line of createInterface(inputStream)) return line;
return ''; // If the file is empty.
}
finally {
inputStream.destroy(); // Destroy file stream.
}
}
const firstLine = await readFirstLine("path/to/file");
//Here you go;
var lineReader = require('line-reader');
var async = require('async');
exports.readManyFiles = function(files) {
async.map(files,
function(file, callback))
lineReader.open(file, function(reader) {
if (reader.hasNextLine()) {
reader.nextLine(function(line) {
callback(null,line);
});
}
});
},
function(err, allLines) {
//do whatever you want to with the lines
})
}

Read a file one line at a time in node.js?

I am trying to read a large file one line at a time. I found a question on Quora that dealt with the subject but I'm missing some connections to make the whole thing fit together.
var Lazy=require("lazy");
new Lazy(process.stdin)
.lines
.forEach(
function(line) {
console.log(line.toString());
}
);
process.stdin.resume();
The bit that I'd like to figure out is how I might read one line at a time from a file instead of STDIN as in this sample.
I tried:
fs.open('./VeryBigFile.csv', 'r', '0666', Process);
function Process(err, fd) {
if (err) throw err;
// DO lazy read
}
but it's not working. I know that in a pinch I could fall back to using something like PHP, but I would like to figure this out.
I don't think the other answer would work as the file is much larger than the server I'm running it on has memory for.
Since Node.js v0.12 and as of Node.js v4.0.0, there is a stable readline core module. Here's the easiest way to read lines from a file, without any external modules:
const fs = require('fs');
const readline = require('readline');
async function processLineByLine() {
const fileStream = fs.createReadStream('input.txt');
const rl = readline.createInterface({
input: fileStream,
crlfDelay: Infinity
});
// Note: we use the crlfDelay option to recognize all instances of CR LF
// ('\r\n') in input.txt as a single line break.
for await (const line of rl) {
// Each line in input.txt will be successively available here as `line`.
console.log(`Line from file: ${line}`);
}
}
processLineByLine();
Or alternatively:
var lineReader = require('readline').createInterface({
input: require('fs').createReadStream('file.in')
});
lineReader.on('line', function (line) {
console.log('Line from file:', line);
});
The last line is read correctly (as of Node v0.12 or later), even if there is no final \n.
UPDATE: this example has been added to Node's API official documentation.
For such a simple operation there shouldn't be any dependency on third-party modules. Go easy.
var fs = require('fs'),
readline = require('readline');
var rd = readline.createInterface({
input: fs.createReadStream('/path/to/file'),
output: process.stdout,
console: false
});
rd.on('line', function(line) {
console.log(line);
});
Update in 2019
An awesome example is already posted on official Nodejs documentation. here
This requires the latest Nodejs is installed on your machine. >11.4
const fs = require('fs');
const readline = require('readline');
async function processLineByLine() {
const fileStream = fs.createReadStream('input.txt');
const rl = readline.createInterface({
input: fileStream,
crlfDelay: Infinity
});
// Note: we use the crlfDelay option to recognize all instances of CR LF
// ('\r\n') in input.txt as a single line break.
for await (const line of rl) {
// Each line in input.txt will be successively available here as `line`.
console.log(`Line from file: ${line}`);
}
}
processLineByLine();
You don't have to open the file, but instead, you have to create a ReadStream.
fs.createReadStream
Then pass that stream to Lazy
require('fs').readFileSync('file.txt', 'utf-8').split(/\r?\n/).forEach(function(line){
console.log(line);
})
there is a very nice module for reading a file line by line, it's called line-reader
with it you simply just write:
var lineReader = require('line-reader');
lineReader.eachLine('file.txt', function(line, last) {
console.log(line);
// do whatever you want with line...
if(last){
// or check if it's the last one
}
});
you can even iterate the file with a "java-style" interface, if you need more control:
lineReader.open('file.txt', function(reader) {
if (reader.hasNextLine()) {
reader.nextLine(function(line) {
console.log(line);
});
}
});
Old topic, but this works:
var rl = readline.createInterface({
input : fs.createReadStream('/path/file.txt'),
output: process.stdout,
terminal: false
})
rl.on('line',function(line){
console.log(line) //or parse line
})
Simple. No need for an external module.
You can always roll your own line reader. I have'nt benchmarked this snippet yet, but it correctly splits the incoming stream of chunks into lines without the trailing '\n'
var last = "";
process.stdin.on('data', function(chunk) {
var lines, i;
lines = (last+chunk).split("\n");
for(i = 0; i < lines.length - 1; i++) {
console.log("line: " + lines[i]);
}
last = lines[i];
});
process.stdin.on('end', function() {
console.log("line: " + last);
});
process.stdin.resume();
I did come up with this when working on a quick log parsing script that needed to accumulate data during the log parsing and I felt that it would nice to try doing this using js and node instead of using perl or bash.
Anyway, I do feel that small nodejs scripts should be self contained and not rely on third party modules so after reading all the answers to this question, each using various modules to handle line parsing, a 13 SLOC native nodejs solution might be of interest .
With the carrier module:
var carrier = require('carrier');
process.stdin.resume();
carrier.carry(process.stdin, function(line) {
console.log('got one line: ' + line);
});
I ended up with a massive, massive memory leak using Lazy to read line by line when trying to then process those lines and write them to another stream due to the way drain/pause/resume in node works (see: http://elegantcode.com/2011/04/06/taking-baby-steps-with-node-js-pumping-data-between-streams/ (i love this guy btw)). I haven't looked closely enough at Lazy to understand exactly why, but I couldn't pause my read stream to allow for a drain without Lazy exiting.
I wrote the code to process massive csv files into xml docs, you can see the code here: https://github.com/j03m/node-csv2xml
If you run the previous revisions with Lazy line it leaks. The latest revision doesn't leak at all and you can probably use it as the basis for a reader/processor. Though I have some custom stuff in there.
Edit: I guess I should also note that my code with Lazy worked fine until I found myself writing large enough xml fragments that drain/pause/resume because a necessity. For smaller chunks it was fine.
In most cases this should be enough:
const fs = require("fs")
fs.readFile('./file', 'utf-8', (err, file) => {
const lines = file.split('\n')
for (let line of lines)
console.log(line)
});
Edit:
Use a transform stream.
With a BufferedReader you can read lines.
new BufferedReader ("lorem ipsum", { encoding: "utf8" })
.on ("error", function (error){
console.log ("error: " + error);
})
.on ("line", function (line){
console.log ("line: " + line);
})
.on ("end", function (){
console.log ("EOF");
})
.read ();
I was frustrated by the lack of a comprehensive solution for this, so I put together my own attempt (git / npm). Copy-pasted list of features:
Interactive line processing (callback-based, no loading the entire file into RAM)
Optionally, return all lines in an array (detailed or raw mode)
Interactively interrupt streaming, or perform map/filter like processing
Detect any newline convention (PC/Mac/Linux)
Correct eof / last line treatment
Correct handling of multi-byte UTF-8 characters
Retrieve byte offset and byte length information on per-line basis
Random access, using line-based or byte-based offsets
Automatically map line-offset information, to speed up random access
Zero dependencies
Tests
NIH? You decide :-)
Since posting my original answer, I found that split is a very easy to use node module for line reading in a file; Which also accepts optional parameters.
var split = require('split');
fs.createReadStream(file)
.pipe(split())
.on('data', function (line) {
//each chunk now is a seperate line!
});
Haven't tested on very large files. Let us know if you do.
function createLineReader(fileName){
var EM = require("events").EventEmitter
var ev = new EM()
var stream = require("fs").createReadStream(fileName)
var remainder = null;
stream.on("data",function(data){
if(remainder != null){//append newly received data chunk
var tmp = new Buffer(remainder.length+data.length)
remainder.copy(tmp)
data.copy(tmp,remainder.length)
data = tmp;
}
var start = 0;
for(var i=0; i<data.length; i++){
if(data[i] == 10){ //\n new line
var line = data.slice(start,i)
ev.emit("line", line)
start = i+1;
}
}
if(start<data.length){
remainder = data.slice(start);
}else{
remainder = null;
}
})
stream.on("end",function(){
if(null!=remainder) ev.emit("line",remainder)
})
return ev
}
//---------main---------------
fileName = process.argv[2]
lineReader = createLineReader(fileName)
lineReader.on("line",function(line){
console.log(line.toString())
//console.log("++++++++++++++++++++")
})
I wanted to tackle this same problem, basically what in Perl would be:
while (<>) {
process_line($_);
}
My use case was just a standalone script, not a server, so synchronous was fine. These were my criteria:
The minimal synchronous code that could reuse in many projects.
No limits on file size or number of lines.
No limits on length of lines.
Able to handle full Unicode in UTF-8, including characters beyond the BMP.
Able to handle *nix and Windows line endings (old-style Mac not needed for me).
Line endings character(s) to be included in lines.
Able to handle last line with or without end-of-line characters.
Not use any external libraries not included in the node.js distribution.
This is a project for me to get a feel for low-level scripting type code in node.js and decide how viable it is as a replacement for other scripting languages like Perl.
After a surprising amount of effort and a couple of false starts this is the code I came up with. It's pretty fast but less trivial than I would've expected: (fork it on GitHub)
var fs = require('fs'),
StringDecoder = require('string_decoder').StringDecoder,
util = require('util');
function lineByLine(fd) {
var blob = '';
var blobStart = 0;
var blobEnd = 0;
var decoder = new StringDecoder('utf8');
var CHUNK_SIZE = 16384;
var chunk = new Buffer(CHUNK_SIZE);
var eolPos = -1;
var lastChunk = false;
var moreLines = true;
var readMore = true;
// each line
while (moreLines) {
readMore = true;
// append more chunks from the file onto the end of our blob of text until we have an EOL or EOF
while (readMore) {
// do we have a whole line? (with LF)
eolPos = blob.indexOf('\n', blobStart);
if (eolPos !== -1) {
blobEnd = eolPos;
readMore = false;
// do we have the last line? (no LF)
} else if (lastChunk) {
blobEnd = blob.length;
readMore = false;
// otherwise read more
} else {
var bytesRead = fs.readSync(fd, chunk, 0, CHUNK_SIZE, null);
lastChunk = bytesRead !== CHUNK_SIZE;
blob += decoder.write(chunk.slice(0, bytesRead));
}
}
if (blobStart < blob.length) {
processLine(blob.substring(blobStart, blobEnd + 1));
blobStart = blobEnd + 1;
if (blobStart >= CHUNK_SIZE) {
// blobStart is in characters, CHUNK_SIZE is in octets
var freeable = blobStart / CHUNK_SIZE;
// keep blob from growing indefinitely, not as deterministic as I'd like
blob = blob.substring(CHUNK_SIZE);
blobStart -= CHUNK_SIZE;
blobEnd -= CHUNK_SIZE;
}
} else {
moreLines = false;
}
}
}
It could probably be cleaned up further, it was the result of trial and error.
Generator based line reader: https://github.com/neurosnap/gen-readlines
var fs = require('fs');
var readlines = require('gen-readlines');
fs.open('./file.txt', 'r', function(err, fd) {
if (err) throw err;
fs.fstat(fd, function(err, stats) {
if (err) throw err;
for (var line of readlines(fd, stats.size)) {
console.log(line.toString());
}
});
});
A new function was added in Node.js v18.11.0 to read files line by line
filehandle.readLines([options])
This is how you use this with a text file you want to read
import { open } from 'node:fs/promises';
myFileReader();
async function myFileReader() {
const file = await open('./TextFileName.txt');
for await (const line of file.readLines()) {
console.log(line)
}
}
To understand more read Node.js documentation here is the link for file system readlines():
https://nodejs.org/api/fs.html#filehandlereadlinesoptions
If you want to read a file line by line and writing this in another:
var fs = require('fs');
var readline = require('readline');
var Stream = require('stream');
function readFileLineByLine(inputFile, outputFile) {
var instream = fs.createReadStream(inputFile);
var outstream = new Stream();
outstream.readable = true;
outstream.writable = true;
var rl = readline.createInterface({
input: instream,
output: outstream,
terminal: false
});
rl.on('line', function (line) {
fs.appendFileSync(outputFile, line + '\n');
});
};
var fs = require('fs');
function readfile(name,online,onend,encoding) {
var bufsize = 1024;
var buffer = new Buffer(bufsize);
var bufread = 0;
var fd = fs.openSync(name,'r');
var position = 0;
var eof = false;
var data = "";
var lines = 0;
encoding = encoding || "utf8";
function readbuf() {
bufread = fs.readSync(fd,buffer,0,bufsize,position);
position += bufread;
eof = bufread ? false : true;
data += buffer.toString(encoding,0,bufread);
}
function getLine() {
var nl = data.indexOf("\r"), hasnl = nl !== -1;
if (!hasnl && eof) return fs.closeSync(fd), online(data,++lines), onend(lines);
if (!hasnl && !eof) readbuf(), nl = data.indexOf("\r"), hasnl = nl !== -1;
if (!hasnl) return process.nextTick(getLine);
var line = data.substr(0,nl);
data = data.substr(nl+1);
if (data[0] === "\n") data = data.substr(1);
online(line,++lines);
process.nextTick(getLine);
}
getLine();
}
I had the same problem and came up with above solution
looks simular to others but is aSync and can read large files very quickly
Hopes this helps
Two questions we must ask ourselves while doing such operations are:
What's the amount of memory used to perform it?
Is the memory consumption increasing drastically with the file size?
Solutions like require('fs').readFileSync() loads the whole file into memory. That means that the amount of memory required to perform operations will be almost equivalent to the file size. We should avoid these for anything larger than 50mbs
We can easily track the amount of memory used by a function by placing these lines of code after the function invocation :
const used = process.memoryUsage().heapUsed / 1024 / 1024;
console.log(
`The script uses approximately ${Math.round(used * 100) / 100} MB`
);
Right now the best way to read particular lines from a large file is using node's readline. The documentation has amazing examples.
This is my favorite way of going through a file, a simple native solution for a progressive (as in not a "slurp" or all-in-memory way) file read with modern async/await. It's a solution that I find "natural" when processing large text files without having to resort to the readline package or any non-core dependency.
let buf = '';
for await ( const chunk of fs.createReadStream('myfile') ) {
const lines = buf.concat(chunk).split(/\r?\n/);
buf = lines.pop() ?? '';
for( const line of lines ) {
console.log(line);
}
}
if(buf.length) console.log(buf); // last line, if file does not end with newline
You can adjust encoding in the fs.createReadStream or use chunk.toString(<arg>). Also this let's you better fine-tune the line splitting to your taste, ie. use .split(/\n+/) to skip empty lines and control the chunk size with fs.createReadStream('myfile', { highWaterMark: <chunkSize> }).
Don't forget to create a function like processLine(line) to avoid repeating the line processing code twice due to the ending buf leftover. Unfortunately, the ReadStream instance does not update its end-of-file flags in this setup, so there's no way, afaik, to detect within the loop that we're in the last iteration without some more verbose tricks like comparing the file size from a fs.Stats() with .bytesRead. Hence the final buf processing solution, unless you're absolutely sure your file ends with a newline \n, in which case the for await loop should suffice.
Performance Considerations
Chunk sizes are important for performance, the default is 64k for text files and, for multi MB files, larger chunks can improve speed by an order of magnitude.
The above snippet runs at least the same speed (or even 5% faster sometimes) as code based on NodeJS v18's fs.readLine() or based on the readline module (the accepted answer), once you tune highWaterMark to something that your machine can handle, ie. setting it to the same size as the file, if your available memory allows it, is the fastest.
In any case, any of NodeJS line-reading answers here are an order of magnitude slower than the Perl or native *Nix solutions.
Similar alternatives
★ If you prefer the evented asynchronous version, this would be it:
let buf = '';
fs.createReadStream('myfile')
.on('data', chunk => {
const lines = buf.concat(chunk).split(/\r?\n/);
buf = lines.pop();
for( const line of lines ) {
console.log(line);
}
})
.on('end', () => buf.length && console.log(buf) );
★ Now if you don't mind importing the stream core package, then this is the equivalent piped stream version, which allows for chaining transforms like gzip decompression:
const { Writable } = require('stream');
let buf = '';
fs.createReadStream('myfile').pipe(
new Writable({
write: (chunk, enc, next) => {
const lines = buf.concat(chunk).split(/\r?\n/);
buf = lines.pop();
for (const line of lines) {
console.log(line);
}
next();
}
})
).on('finish', () => buf.length && console.log(buf) );
I have a little module which does this well and is used by quite a few other projects npm readline Note thay in node v10 there is a native readline module so I republished my module as linebyline https://www.npmjs.com/package/linebyline
if you dont want to use the module the function is very simple:
var fs = require('fs'),
EventEmitter = require('events').EventEmitter,
util = require('util'),
newlines = [
13, // \r
10 // \n
];
var readLine = module.exports = function(file, opts) {
if (!(this instanceof readLine)) return new readLine(file);
EventEmitter.call(this);
opts = opts || {};
var self = this,
line = [],
lineCount = 0,
emit = function(line, count) {
self.emit('line', new Buffer(line).toString(), count);
};
this.input = fs.createReadStream(file);
this.input.on('open', function(fd) {
self.emit('open', fd);
})
.on('data', function(data) {
for (var i = 0; i < data.length; i++) {
if (0 <= newlines.indexOf(data[i])) { // Newline char was found.
lineCount++;
if (line.length) emit(line, lineCount);
line = []; // Empty buffer.
} else {
line.push(data[i]); // Buffer new line data.
}
}
}).on('error', function(err) {
self.emit('error', err);
}).on('end', function() {
// Emit last line if anything left over since EOF won't trigger it.
if (line.length){
lineCount++;
emit(line, lineCount);
}
self.emit('end');
}).on('close', function() {
self.emit('close');
});
};
util.inherits(readLine, EventEmitter);
Another solution is to run logic via sequential executor nsynjs. It reads file line-by-line using node readline module, and it doesn't use promises or recursion, therefore not going to fail on large files. Here is how the code will looks like:
var nsynjs = require('nsynjs');
var textFile = require('./wrappers/nodeReadline').textFile; // this file is part of nsynjs
function process(textFile) {
var fh = new textFile();
fh.open('path/to/file');
var s;
while (typeof(s = fh.readLine(nsynjsCtx).data) != 'undefined')
console.log(s);
fh.close();
}
var ctx = nsynjs.run(process,{},textFile,function () {
console.log('done');
});
Code above is based on this exampe: https://github.com/amaksr/nsynjs/blob/master/examples/node-readline/index.js
i use this:
function emitLines(stream, re){
re = re && /\n/;
var buffer = '';
stream.on('data', stream_data);
stream.on('end', stream_end);
function stream_data(data){
buffer += data;
flush();
}//stream_data
function stream_end(){
if(buffer) stream.emmit('line', buffer);
}//stream_end
function flush(){
var re = /\n/;
var match;
while(match = re.exec(buffer)){
var index = match.index + match[0].length;
stream.emit('line', buffer.substring(0, index));
buffer = buffer.substring(index);
re.lastIndex = 0;
}
}//flush
}//emitLines
use this function on a stream and listen to the line events that is will emit.
gr-
While you should probably use the readline module as the top answer suggests, readline appears to be oriented toward command line interfaces rather than line reading. It's also a little bit more opaque regarding buffering. (Anyone who needs a streaming line oriented reader probably will want to tweak buffer sizes). The readline module is ~1000 lines while this, with stats and tests, is 34.
const EventEmitter = require('events').EventEmitter;
class LineReader extends EventEmitter{
constructor(f, delim='\n'){
super();
this.totalChars = 0;
this.totalLines = 0;
this.leftover = '';
f.on('data', (chunk)=>{
this.totalChars += chunk.length;
let lines = chunk.split(delim);
if (lines.length === 1){
this.leftover += chunk;
return;
}
lines[0] = this.leftover + lines[0];
this.leftover = lines[lines.length-1];
if (this.leftover) lines.pop();
this.totalLines += lines.length;
for (let l of lines) this.onLine(l);
});
// f.on('error', ()=>{});
f.on('end', ()=>{console.log('chars', this.totalChars, 'lines', this.totalLines)});
}
onLine(l){
this.emit('line', l);
}
}
//Command line test
const f = require('fs').createReadStream(process.argv[2], 'utf8');
const delim = process.argv[3];
const lineReader = new LineReader(f, delim);
lineReader.on('line', (line)=> console.log(line));
Here's an even shorter version, without the stats, at 19 lines:
class LineReader extends require('events').EventEmitter{
constructor(f, delim='\n'){
super();
this.leftover = '';
f.on('data', (chunk)=>{
let lines = chunk.split(delim);
if (lines.length === 1){
this.leftover += chunk;
return;
}
lines[0] = this.leftover + lines[0];
this.leftover = lines[lines.length-1];
if (this.leftover)
lines.pop();
for (let l of lines)
this.emit('line', l);
});
}
}
const fs = require("fs")
fs.readFile('./file', 'utf-8', (err, data) => {
var innerContent;
console.log("Asynchronous read: " + data.toString());
const lines = data.toString().split('\n')
for (let line of lines)
innerContent += line + '<br>';
});
I wrap the whole logic of daily line processing as a npm module: line-kit
https://www.npmjs.com/package/line-kit
// example
var count = 0
require('line-kit')(require('fs').createReadStream('/etc/issue'),
(line) => { count++; },
() => {console.log(`seen ${count} lines`)})
I use below code the read lines after verify that its not a directory and its not included in the list of files need not to be check.
(function () {
var fs = require('fs');
var glob = require('glob-fs')();
var path = require('path');
var result = 0;
var exclude = ['LICENSE',
path.join('e2e', 'util', 'db-ca', 'someother-file'),
path.join('src', 'favicon.ico')];
var files = [];
files = glob.readdirSync('**');
var allFiles = [];
var patternString = [
'trade',
'order',
'market',
'securities'
];
files.map((file) => {
try {
if (!fs.lstatSync(file).isDirectory() && exclude.indexOf(file) === -1) {
fs.readFileSync(file).toString().split(/\r?\n/).forEach(function(line){
patternString.map((pattern) => {
if (line.indexOf(pattern) !== -1) {
console.log(file + ' contain `' + pattern + '` in in line "' + line +'";');
result = 1;
}
});
});
}
} catch (e) {
console.log('Error:', e.stack);
}
});
process.exit(result);
})();
I have looked through all above answers, all of them use third-party library to solve it. It's have a simple solution in Node's API. e.g
const fs= require('fs')
let stream = fs.createReadStream('<filename>', { autoClose: true })
stream.on('data', chunk => {
let row = chunk.toString('ascii')
}))

Categories