Related
I am trying to use filesystem api to create permanent files, write and read data from them.
Although I succeeded creating the file and writing data to it, after calling flush() the file becomes empty (and it's size is 0).
The files that I created exist and I can still see them in a different running of safari, but the data is lost and the file's are all 0 sized.
Even if I try to read the file just after writing to it and flushing, the data is lost and it's size returns to 0.
Does anybody know what I am doing wrong?
I tried running this:
console.log("Starting");
async function files() {
// Set a Message
const message = "Thank you for reading this.";
const fileName = "Draft.txt";
// Get handle to draft file
const root = await navigator.storage.getDirectory();
const draftHandle = await root.getFileHandle(fileName, { create: true });
console.log("File Name: ", fileName);
// Get sync access handle
const accessHandle = await draftHandle.createSyncAccessHandle();
// Get size of the file.
const fileSize = await accessHandle.getSize();
console.log("File Size: ", fileSize); // getting 0 here
// Read file content to a buffer.
const buffer = new DataView(new ArrayBuffer(fileSize));
const readBuffer = accessHandle.read(buffer, { at: 0 });
console.log("Read: ", readBuffer); // getting 0 here because the file was just created
// Write the message to the file.
const encoder = new TextEncoder();
const encodedMessage = encoder.encode(message);
const writeBuffer = accessHandle.write(encodedMessage, { at: readBuffer });
console.log("Write: ", writeBuffer); // writing 27 bytes here, succeeding
// Persist changes to disk.
accessHandle.flush();
// Always close FileSystemSyncAccessHandle if done.
accessHandle.close();
console.log("Closed file ");
// Find files under root/ and print their details.
for await (const handle of root.values()) {
console.log('Item in root: ', handle.name);
if (handle.kind == "file") {
let f = await handle.getFile();
console.log("File Details: ", f); // I can see here the file I created now and earlier created files, but all of them are 0 sized
}
}
}
files();
I have a html form and I want to create a json-file with data introduced in html fields.
Right now, it is visible in console json-text but it doesn't create a new json-file with this content. Also,I have an error, Uncaught ReferenceError: require is not defined.
// get the form element from dom
const formElement = document.querySelector('form#forms')
// convert the form to JSON
const getFormJSON = (form) => {
const data = new FormData(form);
return Array.from(data.keys()).reduce((result, key) => {
if (result[key]) {
result[key] = data.getAll(key)
return result
}
result[key] = data.get(key);
return result;
}, {});
};
// handle the form submission event, prevent default form behaviour, check validity, convert form to JSON
const handler = (event) => {
event.preventDefault();
const valid = formElement.reportValidity();
if (valid) {
const result = getFormJSON(formElement);
// handle one, multiple or no files uploaded
const images = [result.images].flat().filter((file) => !!file.name)
// handle one, multiple or no languages selected
const languages = [result.languages || []].flat();
// convert the checkbox to a boolean
const isHappyReader = !!(result.isHappyReader && result.isHappyReader === 'on')
// use spread function, but override the keys we've made changes to
const output = {
...result,
images,
languages,
isHappyReader
}
console.log(output)
}
}
formElement.addEventListener("submit", handler)
const fs = require('fs');
const dataNew = JSON.stringify(output);
fs.writeFile('output.json', dataNew, (err) => {
if (err) {
console.log("error")
throw err;
}
console.log("JSON data is saved.");
});
</script>
</body>
It seems you are on the frontend. You can't write files like this because of security. This would result in every website with some JavaScript being potentially able to write files to your system and you really don't want that.
Additionally fs is a Node API that is not available in the browser.
One option would be to download the JSON file from the frontend which you could do using the following code:
/**
* Download a JSON file.
* #param {sting} filename filename
* #param {any} obj any serializeable object
*/
function downloadJson(filename, obj) {
// serialize to JSON and pretty print with indent of 4
const text = JSON.stringify(obj, null, 4);
// create anchor tag
var element = document.createElement("a");
element.setAttribute(
"href",
"data:application/json;charset=utf-8," + encodeURIComponent(text)
);
element.setAttribute("download", filename);
// don't display the tag
element.style.display = "none";
// add tag to document
document.body.appendChild(element);
// click it: this starts the download
element.click();
// remove the tag again
document.body.removeChild(element);
}
window.addEventListener("DOMContentLoaded", (event) => {
// Start file download.
downloadJson("helloWorld.json", { hello: "World" });
});
If you add that to your page the save dialog will appear on a user's system. Here the one I am getting on Ubuntu:
And here the content of the downloaded file:
Please read this thread on the pros and cons of using an approach like this.
I have a file which stores many JavaScript objects in JSON form and I need to read the file, create each of the objects, and do something with them (insert them into a db in my case). The JavaScript objects can be represented a format:
Format A:
[{name: 'thing1'},
....
{name: 'thing999999999'}]
or Format B:
{name: 'thing1'} // <== My choice.
...
{name: 'thing999999999'}
Note that the ... indicates a lot of JSON objects. I am aware I could read the entire file into memory and then use JSON.parse() like this:
fs.readFile(filePath, 'utf-8', function (err, fileContents) {
if (err) throw err;
console.log(JSON.parse(fileContents));
});
However, the file could be really large, I would prefer to use a stream to accomplish this. The problem I see with a stream is that the file contents could be broken into data chunks at any point, so how can I use JSON.parse() on such objects?
Ideally, each object would be read as a separate data chunk, but I am not sure on how to do that.
var importStream = fs.createReadStream(filePath, {flags: 'r', encoding: 'utf-8'});
importStream.on('data', function(chunk) {
var pleaseBeAJSObject = JSON.parse(chunk);
// insert pleaseBeAJSObject in a database
});
importStream.on('end', function(item) {
console.log("Woot, imported objects into the database!");
});*/
Note, I wish to prevent reading the entire file into memory. Time efficiency does not matter to me. Yes, I could try to read a number of objects at once and insert them all at once, but that's a performance tweak - I need a way that is guaranteed not to cause a memory overload, not matter how many objects are contained in the file.
I can choose to use FormatA or FormatB or maybe something else, just please specify in your answer. Thanks!
To process a file line-by-line, you simply need to decouple the reading of the file and the code that acts upon that input. You can accomplish this by buffering your input until you hit a newline. Assuming we have one JSON object per line (basically, format B):
var stream = fs.createReadStream(filePath, {flags: 'r', encoding: 'utf-8'});
var buf = '';
stream.on('data', function(d) {
buf += d.toString(); // when data is read, stash it in a string buffer
pump(); // then process the buffer
});
function pump() {
var pos;
while ((pos = buf.indexOf('\n')) >= 0) { // keep going while there's a newline somewhere in the buffer
if (pos == 0) { // if there's more than one newline in a row, the buffer will now start with a newline
buf = buf.slice(1); // discard it
continue; // so that the next iteration will start with data
}
processLine(buf.slice(0,pos)); // hand off the line
buf = buf.slice(pos+1); // and slice the processed data off the buffer
}
}
function processLine(line) { // here's where we do something with a line
if (line[line.length-1] == '\r') line=line.substr(0,line.length-1); // discard CR (0x0D)
if (line.length > 0) { // ignore empty lines
var obj = JSON.parse(line); // parse the JSON
console.log(obj); // do something with the data here!
}
}
Each time the file stream receives data from the file system, it's stashed in a buffer, and then pump is called.
If there's no newline in the buffer, pump simply returns without doing anything. More data (and potentially a newline) will be added to the buffer the next time the stream gets data, and then we'll have a complete object.
If there is a newline, pump slices off the buffer from the beginning to the newline and hands it off to process. It then checks again if there's another newline in the buffer (the while loop). In this way, we can process all of the lines that were read in the current chunk.
Finally, process is called once per input line. If present, it strips off the carriage return character (to avoid issues with line endings – LF vs CRLF), and then calls JSON.parse one the line. At this point, you can do whatever you need to with your object.
Note that JSON.parse is strict about what it accepts as input; you must quote your identifiers and string values with double quotes. In other words, {name:'thing1'} will throw an error; you must use {"name":"thing1"}.
Because no more than a chunk of data will ever be in memory at a time, this will be extremely memory efficient. It will also be extremely fast. A quick test showed I processed 10,000 rows in under 15ms.
Just as I was thinking that it would be fun to write a streaming JSON parser, I also thought that maybe I should do a quick search to see if there's one already available.
Turns out there is.
JSONStream "streaming JSON.parse and stringify"
Since I just found it, I've obviously not used it, so I can't comment on its quality, but I'll be interested to hear if it works.
It does work consider the following Javascript and _.isString:
stream.pipe(JSONStream.parse('*'))
.on('data', (d) => {
console.log(typeof d);
console.log("isString: " + _.isString(d))
});
This will log objects as they come in if the stream is an array of objects. Therefore the only thing being buffered is one object at a time.
As of October 2014, you can just do something like the following (using JSONStream) - https://www.npmjs.org/package/JSONStream
var fs = require('fs'),
JSONStream = require('JSONStream'),
var getStream() = function () {
var jsonData = 'myData.json',
stream = fs.createReadStream(jsonData, { encoding: 'utf8' }),
parser = JSONStream.parse('*');
return stream.pipe(parser);
}
getStream().pipe(MyTransformToDoWhateverProcessingAsNeeded).on('error', function (err) {
// handle any errors
});
To demonstrate with a working example:
npm install JSONStream event-stream
data.json:
{
"greeting": "hello world"
}
hello.js:
var fs = require('fs'),
JSONStream = require('JSONStream'),
es = require('event-stream');
var getStream = function () {
var jsonData = 'data.json',
stream = fs.createReadStream(jsonData, { encoding: 'utf8' }),
parser = JSONStream.parse('*');
return stream.pipe(parser);
};
getStream()
.pipe(es.mapSync(function (data) {
console.log(data);
}));
$ node hello.js
// hello world
I had similar requirement, i need to read a large json file in node js and process data in chunks and call a api and save in mongodb.
inputFile.json is like:
{
"customers":[
{ /*customer data*/},
{ /*customer data*/},
{ /*customer data*/}....
]
}
Now i used JsonStream and EventStream to achieve this synchronously.
var JSONStream = require("JSONStream");
var es = require("event-stream");
fileStream = fs.createReadStream(filePath, { encoding: "utf8" });
fileStream.pipe(JSONStream.parse("customers.*")).pipe(
es.through(function(data) {
console.log("printing one customer object read from file ::");
console.log(data);
this.pause();
processOneCustomer(data, this);
return data;
}),
function end() {
console.log("stream reading ended");
this.emit("end");
}
);
function processOneCustomer(data, es) {
DataModel.save(function(err, dataModel) {
es.resume();
});
}
I realize that you want to avoid reading the whole JSON file into memory if possible, however if you have the memory available it may not be a bad idea performance-wise. Using node.js's require() on a json file loads the data into memory really fast.
I ran two tests to see what the performance looked like on printing out an attribute from each feature from a 81MB geojson file.
In the 1st test, I read the entire geojson file into memory using var data = require('./geo.json'). That took 3330 milliseconds and then printing out an attribute from each feature took 804 milliseconds for a grand total of 4134 milliseconds. However, it appeared that node.js was using 411MB of memory.
In the second test, I used #arcseldon's answer with JSONStream + event-stream. I modified the JSONPath query to select only what I needed. This time the memory never went higher than 82MB, however, the whole thing now took 70 seconds to complete!
I wrote a module that can do this, called BFJ. Specifically, the method bfj.match can be used to break up a large stream into discrete chunks of JSON:
const bfj = require('bfj');
const fs = require('fs');
const stream = fs.createReadStream(filePath);
bfj.match(stream, (key, value, depth) => depth === 0, { ndjson: true })
.on('data', object => {
// do whatever you need to do with object
})
.on('dataError', error => {
// a syntax error was found in the JSON
})
.on('error', error => {
// some kind of operational error occurred
})
.on('end', error => {
// finished processing the stream
});
Here, bfj.match returns a readable, object-mode stream that will receive the parsed data items, and is passed 3 arguments:
A readable stream containing the input JSON.
A predicate that indicates which items from the parsed JSON will be pushed to the result stream.
An options object indicating that the input is newline-delimited JSON (this is to process format B from the question, it's not required for format A).
Upon being called, bfj.match will parse JSON from the input stream depth-first, calling the predicate with each value to determine whether or not to push that item to the result stream. The predicate is passed three arguments:
The property key or array index (this will be undefined for top-level items).
The value itself.
The depth of the item in the JSON structure (zero for top-level items).
Of course a more complex predicate can also be used as necessary according to requirements. You can also pass a string or a regular expression instead of a predicate function, if you want to perform simple matches against property keys.
If you have control over the input file, and it's an array of objects, you can solve this more easily. Arrange to output the file with each record on one line, like this:
[
{"key": value},
{"key": value},
...
This is still valid JSON.
Then, use the node.js readline module to process them one line at a time.
var fs = require("fs");
var lineReader = require('readline').createInterface({
input: fs.createReadStream("input.txt")
});
lineReader.on('line', function (line) {
line = line.trim();
if (line.charAt(line.length-1) === ',') {
line = line.substr(0, line.length-1);
}
if (line.charAt(0) === '{') {
processRecord(JSON.parse(line));
}
});
function processRecord(record) {
// Process the records one at a time here!
}
I solved this problem using the split npm module. Pipe your stream into split, and it will "Break up a stream and reassemble it so that each line is a chunk".
Sample code:
var fs = require('fs')
, split = require('split')
;
var stream = fs.createReadStream(filePath, {flags: 'r', encoding: 'utf-8'});
var lineStream = stream.pipe(split());
linestream.on('data', function(chunk) {
var json = JSON.parse(chunk);
// ...
});
Using the #josh3736 answer, but for ES2021 and Node.js 16+ with async/await + AirBnb rules:
import fs from 'node:fs';
const file = 'file.json';
/**
* #callback itemProcessorCb
* #param {object} item The current item
*/
/**
* Process each data chunk in a stream.
*
* #param {import('fs').ReadStream} readable The readable stream
* #param {itemProcessorCb} itemProcessor A function to process each item
*/
async function processChunk(readable, itemProcessor) {
let data = '';
let total = 0;
// eslint-disable-next-line no-restricted-syntax
for await (const chunk of readable) {
// join with last result, remove CR and get lines
const lines = (data + chunk).replace('\r', '').split('\n');
// clear last result
data = '';
// process lines
let line = lines.shift();
const items = [];
while (line) {
// check if isn't a empty line or an array definition
if (line !== '' && !/[\[\]]+/.test(line)) {
try {
// remove the last comma and parse json
const json = JSON.parse(line.replace(/\s?(,)+\s?$/, ''));
items.push(json);
} catch (error) {
// last line gets only a partial line from chunk
// so we add this to join at next loop
data += line;
}
}
// continue
line = lines.shift();
}
total += items.length;
// Process items in parallel
await Promise.all(items.map(itemProcessor));
}
console.log(`${total} items processed.`);
}
// Process each item
async function processItem(item) {
console.log(item);
}
// Init
try {
const readable = fs.createReadStream(file, {
flags: 'r',
encoding: 'utf-8',
});
processChunk(readable, processItem);
} catch (error) {
console.error(error.message);
}
For a JSON like:
[
{ "name": "A", "active": true },
{ "name": "B", "active": false },
...
]
https.get(url1 , function(response) {
var data = "";
response.on('data', function(chunk) {
data += chunk.toString();
})
.on('end', function() {
console.log(data)
});
});
I think you need to use a database. MongoDB is a good choice in this case because it is JSON compatible.
UPDATE:
You can use mongoimport tool to import JSON data into MongoDB.
mongoimport --collection collection --file collection.json
My ultimate goal is the following: my gardener has several devices that can send data to my Node.js server via TCP. This data is in JSON format, and looks something like the following:
Device A:
{"pagename": "spacekittens", "count": 11};
Device B:
{"pagename": "norwegiansultans", "count": 22};
As each of these are streamed to my server via TCP, I have added a ; to separate each stream. In addition, the count in each device stream is randomly generated.
Now, I want to add dynamic routes for each TCP packet that comes my way, and display content from that stream to that route.
So my route myserver:4000/spacekittens should show the following:
{"pagename": "spacekittens", "count": [random number every second]};
And my route myserver:4000/norwegiansultans should show:
{"pagename": "norwegiansultans", "count": [random number every second]};
In order to accomplish this I have set up the following code:
server.on("connection", function(socket) {
let chunk = "";
socket.on('data', function(data) {
chunk += data.toString(); // Add string on the end of the variable 'chunk'
let d_index = chunk.indexOf(';'); // Find the delimiter
// While loop to keep going until no delimiter can be found
while (d_index > -1) {
try {
let string = chunk.substring(0, d_index); // Create string up until the delimiter
// define local variables that can be used in a closure
let json = JSON.parse(string); // Parse the current string
let localData = data;
console.log(json.pagename); // Function that does something with the current chunk of valid json.
app.get("/" + json.pagename, function(req, res) {
res.writeHead(200, {
'Content-Type': 'text/plain'
});
res.write(JSON.stringify(json));
res.end();
});
} catch (e) {
console.log(e);
}
chunk = chunk.substring(d_index + 1); // Cuts off the processed chunk
d_index = chunk.indexOf(';'); // Find the new delimiter
}
});
socket.on("close", function() {
console.log("connection closed");
});
});
I appreciate any thoughts and comments on methodology with regards to what I am trying to accomplish. However, I did not post only for this reason. I have a problem.
Currently, my res.write() line only populates the data in my routes one time. Adding new data via sockets does not replace the content on my route.
So under the myserver:4000/spacekittens route my count shows 11, and even though I stream an updated number ("count": 12) my route myserver:4000/spacekittens still only shows 11. Console logging gives me the correct response each time data is sent. So I am not using the res.write() correctly since it does not override old data.
Unsure how to rectify.
I would seperate the pages data and route setup from the connection handling. You can't set the same route up every time you receive a JSON blob so this modifies the app route to return the data to the user. The data will change with each blob of JSON.
class Pages {
constructor(app){
this._store = {}
this.app = app
}
get( name ){
return this._store[name]
}
set( name, data ){
if ( !this.exists(name) ) this.setupRoute(name)
return this._store[name] = data
}
exists( name ){
return this._store.hasOwnProperty(name)
}
addJSON( json_string ){
let data = JSON.parse(json_string)
if ( !data.pagename ) throw new Error('No pagename in data: "%s"', json_string)
if ( !data.count ) throw new Error('No count in data "%s"', json_string)
return this.set(data.pagename, data)
}
setupRoute( name ){
let route = `/${name}`
this.app.get(route, (req, res)=>{
res.json(this.get(name))
})
console.log('setup route: %s', route)
return this.app
}
}
Then the connection handling just deals with pulling out the JSON strings for Pages.
const pages = new Pages(app)
server.on("connection", function(socket) {
let chunk = "";
socket.on('data', function(data) {
chunk += data.toString(); // Add string on the end of the variable 'chunk'
let d_index = chunk.indexOf(';'); // Find the delimiter
// While loop to keep going until no delimiter can be found
while (d_index > -1) {
try {
let string = chunk.substring(0, d_index); // Create string up until the delimiter
pages.addJSON(string)
} catch (e) {
console.error(e);
}
chunk = chunk.substring(d_index + 1); // Cuts off the processed chunk
d_index = chunk.indexOf(';'); // Find the new delimiter
}
});
socket.on("close", function() {
console.log("connection closed");
});
});
You could also use one of the line delimited JSON parsing libraries which will take a binary stream and emit the JSON out as plain objects:
ndjson or ld-jsonstream
I've been trying to find a way to write to a file when using Node.js, but with no success. How can I do that?
There are a lot of details in the File System API. The most common way is:
const fs = require('fs');
fs.writeFile("/tmp/test", "Hey there!", function(err) {
if(err) {
return console.log(err);
}
console.log("The file was saved!");
});
// Or
fs.writeFileSync('/tmp/test-sync', 'Hey there!');
Currently there are three ways to write a file:
fs.write(fd, buffer, offset, length, position, callback)
You need to wait for the callback to ensure that the buffer is written to disk. It's not buffered.
fs.writeFile(filename, data, [encoding], callback)
All data must be stored at the same time; you cannot perform sequential writes.
fs.createWriteStream(path, [options])
Creates a WriteStream, which is convenient because you don't need to wait for a callback. But again, it's not buffered.
A WriteStream, as the name says, is a stream. A stream by definition is “a buffer” containing data which moves in one direction (source ► destination). But a writable stream is not necessarily “buffered”. A stream is “buffered” when you write n times, and at time n+1, the stream sends the buffer to the kernel (because it's full and needs to be flushed).
In other words: “A buffer” is the object. Whether or not it “is buffered” is a property of that object.
If you look at the code, the WriteStream inherits from a writable Stream object. If you pay attention, you’ll see how they flush the content; they don't have any buffering system.
If you write a string, it’s converted to a buffer, and then sent to the native layer and written to disk. When writing strings, they're not filling up any buffer. So, if you do:
write("a")
write("b")
write("c")
You're doing:
fs.write(new Buffer("a"))
fs.write(new Buffer("b"))
fs.write(new Buffer("c"))
That’s three calls to the I/O layer. Although you're using “buffers”, the data is not buffered. A buffered stream would do: fs.write(new Buffer ("abc")), one call to the I/O layer.
As of now, in Node.js v0.12 (stable version announced 02/06/2015) now supports two functions:
cork() and
uncork(). It seems that these functions will finally allow you to buffer/flush the write calls.
For example, in Java there are some classes that provide buffered streams (BufferedOutputStream, BufferedWriter...). If you write three bytes, these bytes will be stored in the buffer (memory) instead of doing an I/O call just for three bytes. When the buffer is full the content is flushed and saved to disk. This improves performance.
I'm not discovering anything, just remembering how a disk access should be done.
You can of course make it a little more advanced. Non-blocking, writing bits and pieces, not writing the whole file at once:
var fs = require('fs');
var stream = fs.createWriteStream("my_file.txt");
stream.once('open', function(fd) {
stream.write("My first row\n");
stream.write("My second row\n");
stream.end();
});
Synchronous Write
fs.writeFileSync(file, data[, options])
fs = require('fs');
fs.writeFileSync("foo.txt", "bar");
Asynchronous Write
fs.writeFile(file, data[, options], callback)
fs = require('fs');
fs.writeFile('foo.txt', 'bar', (err) => { if (err) throw err; });
Where
file <string> | <Buffer> | <URL> | <integer> filename or file descriptor
data <string> | <Buffer> | <Uint8Array>
options <Object> | <string>
callback <Function>
Worth reading the offical File System (fs) docs.
Update: async/await
fs = require('fs');
util = require('util');
writeFile = util.promisify(fs.writeFile);
fn = async () => { await writeFile('foo.txt', 'bar'); }
fn()
var path = 'public/uploads/file.txt',
buffer = new Buffer("some content\n");
fs.open(path, 'w', function(err, fd) {
if (err) {
throw 'error opening file: ' + err;
}
fs.write(fd, buffer, 0, buffer.length, null, function(err) {
if (err) throw 'error writing file: ' + err;
fs.close(fd, function() {
console.log('file written');
})
});
});
The answers provided are dated and a newer way to do this is:
const fsPromises = require('fs').promises
await fsPromises.writeFile('/path/to/file.txt', 'data to write')
see documents here for more info
I liked Index of ./articles/file-system.
It worked for me.
See also How do I write files in node.js?.
fs = require('fs');
fs.writeFile('helloworld.txt', 'Hello World!', function (err) {
if (err)
return console.log(err);
console.log('Wrote Hello World in file helloworld.txt, just check it');
});
Contents of helloworld.txt:
Hello World!
Update:
As in Linux node write in current directory , it seems in some others don't, so I add this comment just in case :
Using this ROOT_APP_PATH = fs.realpathSync('.'); console.log(ROOT_APP_PATH); to get where the file is written.
I know the question asked about "write" but in a more general sense "append" might be useful in some cases as it is easy to use in a loop to add text to a file (whether the file exists or not). Use a "\n" if you want to add lines eg:
var fs = require('fs');
for (var i=0; i<10; i++){
fs.appendFileSync("junk.csv", "Line:"+i+"\n");
}
OK, it's quite simple as Node has built-in functionality for this, it's called fs which stands for File System and basically, NodeJS File System module...
So first require it in your server.js file like this:
var fs = require('fs');
fs has few methods to do write to file, but my preferred way is using appendFile, this will append the stuff to the file and if the file doesn't exist, will create one, the code could be like below:
fs.appendFile('myFile.txt', 'Hi Ali!', function (err) {
if (err) throw err;
console.log('Thanks, It\'s saved to the file!');
});
You may write to a file using fs (file system) module.
Here is an example of how you may do it:
const fs = require('fs');
const writeToFile = (fileName, callback) => {
fs.open(fileName, 'wx', (error, fileDescriptor) => {
if (!error && fileDescriptor) {
// Do something with the file here ...
fs.writeFile(fileDescriptor, newData, (error) => {
if (!error) {
fs.close(fileDescriptor, (error) => {
if (!error) {
callback(false);
} else {
callback('Error closing the file');
}
});
} else {
callback('Error writing to new file');
}
});
} else {
callback('Could not create new file, it may already exists');
}
});
};
You might also want to get rid of this callback-inside-callback code structure by useing Promises and async/await statements. This will make asynchronous code structure much more flat. For doing that there is a handy util.promisify(original) function might be utilized. It allows us to switch from callbacks to promises. Take a look at the example with fs functions below:
// Dependencies.
const util = require('util');
const fs = require('fs');
// Promisify "error-back" functions.
const fsOpen = util.promisify(fs.open);
const fsWrite = util.promisify(fs.writeFile);
const fsClose = util.promisify(fs.close);
// Now we may create 'async' function with 'await's.
async function doSomethingWithFile(fileName) {
const fileDescriptor = await fsOpen(fileName, 'wx');
// Do something with the file here...
await fsWrite(fileDescriptor, newData);
await fsClose(fileDescriptor);
}
You can write to files with streams.
Just do it like this:
const fs = require('fs');
const stream = fs.createWriteStream('./test.txt');
stream.write("Example text");
var fs = require('fs');
fs.writeFile(path + "\\message.txt", "Hello", function(err){
if (err) throw err;
console.log("success");
});
For example : read file and write to another file :
var fs = require('fs');
var path = process.cwd();
fs.readFile(path+"\\from.txt",function(err,data)
{
if(err)
console.log(err)
else
{
fs.writeFile(path+"\\to.text",function(erro){
if(erro)
console.log("error : "+erro);
else
console.log("success");
});
}
});
Here we use w+ for read/write both actions and if the file path is not found then it would be created automatically.
fs.open(path, 'w+', function(err, data) {
if (err) {
console.log("ERROR !! " + err);
} else {
fs.write(data, 'content', 0, 'content length', null, function(err) {
if (err)
console.log("ERROR !! " + err);
fs.close(data, function() {
console.log('written success');
})
});
}
});
Content means what you have to write to the file and its length, 'content.length'.
Here is the sample of how to read file csv from local and write csv file to local.
var csvjson = require('csvjson'),
fs = require('fs'),
mongodb = require('mongodb'),
MongoClient = mongodb.MongoClient,
mongoDSN = 'mongodb://localhost:27017/test',
collection;
function uploadcsvModule(){
var data = fs.readFileSync( '/home/limitless/Downloads/orders_sample.csv', { encoding : 'utf8'});
var importOptions = {
delimiter : ',', // optional
quote : '"' // optional
},ExportOptions = {
delimiter : ",",
wrap : false
}
var myobj = csvjson.toSchemaObject(data, importOptions)
var exportArr = [], importArr = [];
myobj.forEach(d=>{
if(d.orderId==undefined || d.orderId=='') {
exportArr.push(d)
} else {
importArr.push(d)
}
})
var csv = csvjson.toCSV(exportArr, ExportOptions);
MongoClient.connect(mongoDSN, function(error, db) {
collection = db.collection("orders")
collection.insertMany(importArr, function(err,result){
fs.writeFile('/home/limitless/Downloads/orders_sample1.csv', csv, { encoding : 'utf8'});
db.close();
});
})
}
uploadcsvModule()
fs.createWriteStream(path[,options])
options may also include a start option to allow writing data at some position past the beginning of the file. Modifying a file rather than replacing it may require a flags mode of r+ rather than the default mode w. The encoding can be any one of those accepted by Buffer.
If autoClose is set to true (default behavior) on 'error' or 'finish' the file descriptor will be closed automatically. If autoClose is false, then the file descriptor won't be closed, even if there's an error. It is the application's responsibility to close it and make sure there's no file descriptor leak.
Like ReadStream, if fd is specified, WriteStream will ignore the path argument and will use the specified file descriptor. This means that no 'open' event will be emitted. fd should be blocking; non-blocking fds should be passed to net.Socket.
If options is a string, then it specifies the encoding.
After, reading this long article. You should understand how it works.
So, here's an example of createWriteStream().
/* The fs.createWriteStream() returns an (WritableStream {aka} internal.Writeable) and we want the encoding as 'utf'-8 */
/* The WriteableStream has the method write() */
fs.createWriteStream('out.txt', 'utf-8')
.write('hello world');
Point 1:
If you want to write something into a file.
means: it will remove anything already saved in the file and write the new content. use fs.promises.writeFile()
Point 2:
If you want to append something into a file.
means: it will not remove anything already saved in the file but append the new item in the file content.then first read the file, and then add the content into the readable value, then write it to the file. so use fs.promises.readFile and fs.promises.writeFile()
example 1: I want to write a JSON object in my JSON file .
const fs = require('fs');
const data = {table:[{id: 1, name: 'my name'}]}
const file_path = './my_data.json'
writeFile(file_path, data)
async function writeFile(filename, writedata) {
try {
await fs.promises.writeFile(filename, JSON.stringify(writedata, null, 4), 'utf8');
console.log('data is written successfully in the file')
}
catch (err) {
console.log('not able to write data in the file ')
}
}
example2 :
if you want to append data to a JSON file.
you want to add data {id:1, name:'my name'} to file my_data.json on the same folder root. just call append_data (file_path , data ) function.
It will append data in the JSON file if the file existed . or it will create the file and add the data to it.
const fs = require('fs');
const data = {id: 2, name: 'your name'}
const file_path = './my_data.json'
append_data(file_path, data)
async function append_data(filename, data) {
if (fs.existsSync(filename)) {
var read_data = await readFile(filename)
if (read_data == false) {
console.log('not able to read file')
} else {
read_data.table.push(data) //data must have the table array in it like example 1
var dataWrittenStatus = await writeFile(filename, read_data)
if (dataWrittenStatus == true) {
console.log('data added successfully')
} else {
console.log('data adding failed')
}
}
}
}
async function readFile(filePath) {
try {
const data = await fs.promises.readFile(filePath, 'utf8')
return JSON.parse(data)
}
catch (err) {
return false;
}
}
async function writeFile(filename, writedata) {
try {
await fs.promises.writeFile(filename, JSON.stringify(writedata, null, 4), 'utf8');
return true
}
catch (err) {
return false
}
}
You can use library easy-file-manager
install first from npm
npm install easy-file-manager
Sample to upload and remove files
var filemanager = require('easy-file-manager')
var path = "/public"
var filename = "test.jpg"
var data; // buffered image
filemanager.upload(path,filename,data,function(err){
if (err) console.log(err);
});
filemanager.remove(path,"aa,filename,function(isSuccess){
if (err) console.log(err);
});
You can write in a file by the following code example:
var data = [{ 'test': '123', 'test2': 'Lorem Ipsem ' }];
fs.open(datapath + '/data/topplayers.json', 'wx', function (error, fileDescriptor) {
if (!error && fileDescriptor) {
var stringData = JSON.stringify(data);
fs.writeFile(fileDescriptor, stringData, function (error) {
if (!error) {
fs.close(fileDescriptor, function (error) {
if (!error) {
callback(false);
} else {
callback('Error in close file');
}
});
} else {
callback('Error in writing file.');
}
});
}
});