I am quite new to Node and gulp which I am trying to learn.
As an experiment, I am trying to write a little gulp task that just prints on the console the content of the streams it receives from the gulp.src() method. So, I have implemented it this way (the commented code works and the not commented code does not work). What am I doing wrong?
var gulp = require('gulp');
var through = require('through2');
var connect = require('gulp-connect');
var fs = require('fs');
var PATHS = {
src : 'src/**/*.ts',
html : 'src/**/*.html',
css : 'src/**/*.css'
};
gulp.task('test', function() {
// This is working !!!
/*
* fs.createReadStream('ex.txt').pipe(through(function(chunk, enc, callback) {
* console.log("Text is: " + chunk.toString());
*
* this.push(chunk)
*
* callback() })).pipe(fs.createWriteStream('out.txt')).on('finish',
* function() { console.log("Working"); })
*/
// But this is not working, why !!!
var writable = through.obj(function(chunk, enc, callback) {
if (chunk.isStream()) {
console.log("Chunk is stream ");
console.log("Path is: " + chunk.path);
} else if (chunk.isBuffer()) {
console.log("Chunk is buffer ");
} else {
console.log("Chunk is something else then stream or buffer");
}
this.push(chunk);
callback();
});
gulp.src('ex.txt').pipe(writable);
});
BTW - How can I tell what kind of object is 'chunk' and what methods I have available to call on that object (where are they documented – I am used to Java style documentation)?
These might be trivial questions for experienced Node.js and gulp users but I am having trouble to understand them.
If anybody can help me to get through this learning curve I would greatly appreciate it.
Related
Closed. This question is not reproducible or was caused by typos. It is not currently accepting answers.
This question was caused by a typo or a problem that can no longer be reproduced. While similar questions may be on-topic here, this one was resolved in a way less likely to help future readers.
Closed 3 years ago.
Improve this question
Anyone able to explain what I'm doing wrong with my use of asynchronous functions in Javascript?
Basically, I must use an asynchronous in my Node.js code to grab an open port for me to use. There is a local variable that is being set outside of the asynchronous call that I can access/use just fine until I await for the asynchronous function to return. After that, the local variable is undefined.
(async () => {
console.log("CHECK AFTER ASYNC1: " + csvFilePath);
// First, grab a valid open port
var port;
while (!port || portsInProcess.indexOf(port) >= 0) {
console.log("CHECK AFTER ASYNC2: " + csvFilePath);
port = await getPort();
console.log(port);
}
console.log("CHECK AFTER ASYNC3: " + csvFilePath);
portsInProcess.push(port);
// ... more code below...
Checks #1 and 2 are fine for the csvFilePath variable, but check #3 shows that it's undefined. The port number, however, is fine. This leads me to believe that there's some weirdness with asynchronous function calls in Javascript that ONLY affects local variables; the global variables I use further down are just fine. Unfortunately here, I cannot make the csvFilePath variable global since that will introduce race conditions on that variable too (which I'm preventing elsewhere; the while loop is to help prevent race conditions on the port number, which is basically unused in my simple tests on localhost).
Just in case it's helpful, here's the output I'm getting:
CHECK AFTER ASYNC1: data/text/crescent_topics.csv
CHECK AFTER ASYNC2: data/text/crescent_topics.csv
58562
CHECK AFTER ASYNC3: null
It might also be worth mentioning it's really only those first few lines of code to dynamically grab an open port that are the lines of code I added. The code that I had before which used a fixed port number worked just fine (including this csvFilePath variable remaining stable).
My understanding of the await functionality was that it makes the asynchronous function act more or less synchronously, which is what seems to be happening here; the code I have farther down that uses the port number is not running until after the port number is set. (But even if that wasn't the case, why is the csvFilePath variable being unset since I'm not altering it or using it in any way here?)
EDIT: Here's some more code to provide additional context
var spawn = require('child_process').spawn;
var fs = require("fs");
var async = require('async');
var zmq = require('zmq');
var readline = require('readline');
const getPort = require('get-port');
/* Export the Nebula class */
module.exports = Nebula;
/* Location of the data for the Crescent dataset */
var textDataPath = "data/text/";
var crescentRawDataPath = textDataPath + "crescent_raw";
var crescentTFIDF = textDataPath + "crescent tfidf.csv";
var crescentTopicModel = textDataPath + "crescent_topics.csv";
/* Location of the data for the UK Health dataset */
var ukHealthRawDataPath = textDataPath + "uk_health_raw";
var ukHealthTFIDF = textDataPath + "uk_health.csv";
/* Map CSV files for text data to raw text location */
var textRawDataMappings = {};
textRawDataMappings[crescentTFIDF] = crescentRawDataPath;
textRawDataMappings[crescentTopicModel] = crescentRawDataPath;
textRawDataMappings[ukHealthTFIDF] = ukHealthRawDataPath;
textRawDataMappings[textDataPath + "uk_health_sm.csv"] = ukHealthRawDataPath;
/* The pipelines available to use */
var flatTextUIs = ["cosmos", "composite", "sirius", "centaurus"];
var pipelines = {
andromeda: {
file: "pipelines/andromeda.py",
defaultData: "data/highD/Animal_Data_study.csv"
},
cosmos: {
file: "pipelines/cosmos.py",
defaultData: textDataPath + "crescent tfidf.csv"
},
sirius: {
file: "pipelines/sirius.py",
defaultData: "data/highD/Animal_Data_paper.csv"
},
centaurus: {
file: "pipelines/centaurus.py",
defaultData: "data/highD/Animal_Data_paper.csv"
},
twitter: {
file: "pipelines/twitter.py",
},
composite: {
file: "pipelines/composite.py",
defaultData: textDataPath + "crescent tfidf.csv"
},
elasticsearch: {
file: "pipelines/espipeline.py",
args: []
}
};
/* The locations of the different types of datasets on the server */
var textDataFolder = "data/text/";
var highDDataFolder = "data/highD/";
var customCSVFolder = "data/customCSV/";
var sirius_prototype = 2;
// An array to track the ports being processed to eliminate race conditions
// as much as possible
var portsInProcess = [];
var nextSessionNumber = 0;
var usedSessionNumbers = [];
/* Nebula class constructor */
function Nebula(io, pipelineAddr) {
/* This allows you to use "Nebula(obj)" as well as "new Nebula(obj)" */
if (!(this instanceof Nebula)) {
return new Nebula(io);
}
/* The group of rooms currently active, each with a string identifier
* Each room represents an instance of a visualization that can be shared
* among clients.
*/
this.rooms = {};
this.io = io;
/* For proper use in callback functions */
var self = this;
/* Accept new WebSocket clients */
io.on('connection', function(socket) {
// Skipped some irrelevant Socket.io callbacks
**// Use the csvFilePath to store the name of a user-defined CSV file
var csvFilePath = null;**
/* Helper function to tell the client that the CSV file is now ready for them
* to use. They are also sent a copy of the data
*/
var csvFileReady = function(csvFilePath) {
// Let the client know that the CSV file is now ready to be used on
// the server
socket.emit("csvDataReady");
// Prepare to parse the CSV file
var csvData = [];
const rl = readline.createInterface({
input: fs.createReadStream(csvFilePath),
crlfDelay: Infinity
});
// Print any error messages we encounter
rl.on('error', function (err) {
console.log("Error while parsing CSV file: " + csvFilePath);
console.log(err);
});
// Read each line of the CSV file one at a time and parse it
var columnHeaders = [];
var firstColumnName;
rl.on('line', function (data) {
var dataColumns = data.split(",");
// If we haven't saved any column names yet, do so first
if (columnHeaders.length == 0) {
columnHeaders = dataColumns;
firstColumnName = columnHeaders[0];
}
// Process each individual line of data in the CSV file
else {
var dataObj = {};
var i;
for (i = 0; i < dataColumns.length; i++) {
var key = columnHeaders[i];
var value = dataColumns[i];
dataObj[key] = value
}
csvData.push(dataObj);
}
});
// All lines are read, file is closed now.
rl.on('close', function () {
// On certain OSs, like Windows, an extra, blank line may be read
// Check for this and remove it if it exists
var lastObservation = csvData[csvData.length-1];
var lastObservationKeys = Object.keys(lastObservation);
if (lastObservationKeys.length = 1 && lastObservation[lastObservationKeys[0]] == "") {
csvData.pop();
}
// Provide the CSV data to the client
socket.emit("csvDataReadComplete", csvData, firstColumnName);
});
};
**/* Allows the client to specify a CSV file already on the server to use */
socket.on("setCSV", function(csvName) {
console.log("setCSV CALLED");
csvFilePath = "data/" + csvName;
csvFileReady(csvFilePath);
console.log("CSV FILE SET: " + csvFilePath);
});**
// Skipped some more irrelevant callbacks
/* a client/ a room. If the room doesn't next exist yet,
* initiate it and send the new room to the client. Otherwise, send
* the client the current state of the room.
*/
socket.on('join', function(roomName, user, pipeline, args) {
console.log("Join called for " + pipeline + " pipeline; room " + roomName);
socket.roomName = roomName;
socket.user = user;
socket.join(roomName);
console.log("CSV FILE PATH: " + csvFilePath);
var pipelineArgsCopy = [];
if (!self.rooms[roomName]) {
var room = {};
room.name = roomName;
room.count = 1;
room.points = new Map();
room.similarity_weights = new Map();
if (pipeline == "sirius" || pipeline == "centaurus") {
room.attribute_points = new Map();
room.attribute_similarity_weights = new Map();
room.observation_data = [];
room.attribute_data = [];
}
/* Create a pipeline client for this room */
console.log("CHECK BEFORE ASYNC: " + csvFilePath);
**// Here's the code snippet I provided above**
**(async () => {
console.log("CHECK AFTER ASYNC1: " + csvFilePath);
// First, grab a valid open port
var port;
while (!port || portsInProcess.indexOf(port) >= 0) {
console.log("CHECK AFTER ASYNC2: " + csvFilePath);
port = await getPort();
console.log(port);
}
console.log("CHECK AFTER ASYNC3: " + csvFilePath);**
portsInProcess.push(port);
console.log("CHECK AFTER ASYNC4: " + csvFilePath);
if (!pipelineAddr) {
var pythonArgs = ["-u"];
if (pipeline in pipelines) {
// A CSV file path should have already been set. This
// file path should be used to indicate where to find
// the desired file
console.log("LAST CHECK: " + csvFilePath);
if (!csvFilePath) {
csvFilePath = pipelines[pipeline].defaultData;
}
console.log("FINAL CSV FILE: " + csvFilePath);
pipelineArgsCopy.push(csvFilePath);
// If the UI supports reading flat text files, tell the
// pipeline where to find the files
if (flatTextUIs.indexOf(pipeline) >= 0) {
pipelineArgsCopy.push(textRawDataMappings[csvFilePath]);
}
// Set the remaining pipeline args
pythonArgs.push(pipelines[pipeline].file);
pythonArgs.push(port.toString());
if (pipeline != "twitter" && pipeline != "elasticsearch") {
pythonArgs = pythonArgs.concat(pipelineArgsCopy);
}
}
else {
pythonArgs.push(pipelines.cosmos.file);
pythonArgs.push(port.toString());
pythonArgs.push(pipelines.cosmos.defaultData);
pythonArgs.push(crescentRawDataPath);
}
// used in case of CosmosRadar
for (var key in args) {
if (args.hasOwnProperty(key)) {
pythonArgs.push("--" + key);
pythonArgs.push(args[key]);
}
}
// Dynamically determine which distance function should be
// used
if (pythonArgs.indexOf("--dist_func") < 0) {
if (pipeline === "twitter" || pipeline === "elasticsearch" ||
csvFilePath.startsWith(textDataPath)) {
pythonArgs.push("--dist_func", "cosine");
}
else {
pythonArgs.push("--dist_func", "euclidean");
}
}
console.log(pythonArgs);
console.log("");
var pipelineInstance = spawn("python2.7", pythonArgs, {stdout: "inherit"});
pipelineInstance.on("error", function(err) {
console.log("python2.7.exe not found. Trying python.exe");
pipelineInstance = spawn("python", pythonArgs,{stdout: "inherit"});
pipelineInstance.stdout.on("data", function(data) {
console.log("Pipeline: " + data.toString());
});
pipelineInstance.stderr.on("data", function(data) {
console.log("Pipeline error: " + data.toString());
});
});
/* Data received by node app from python process,
* ouptut this data to output stream(on 'data'),
* we want to convert that received data into a string and
* append it to the overall data String
*/
pipelineInstance.stdout.on("data", function(data) {
console.log("Pipeline STDOUT: " + data.toString());
});
pipelineInstance.stderr.on("data", function(data) {
console.log("Pipeline error: " + data.toString());
});
room.pipelineInstance = pipelineInstance;
}
/* Connect to the pipeline */
pipelineAddr = pipelineAddr || "tcp://127.0.0.1:" + port.toString();
room.pipelineSocket = zmq.socket('pair');
room.pipelineSocket.connect(pipelineAddr);
pipelineAddr = null;
portsInProcess.splice(portsInProcess.indexOf(port), 1);
/* Listens for messages from the pipeline */
room.pipelineSocket.on('message', function (msg) {
self.handleMessage(room, msg);
});
self.rooms[roomName] = socket.room = room;
invoke(room.pipelineSocket, "reset");
})();
}
else {
socket.room = self.rooms[roomName];
socket.room.count += 1;
if (pipeline == "sirius" || pipeline == "centaurus") {
socket.emit('update', sendRoom(socket.room, true), true);
socket.emit('update', sendRoom(socket.room, false), false);
}
else {
socket.emit('update', sendRoom(socket.room));
}
}
// Reset the csvFilePath to null for future UIs...
// I don't think this is actually necessary since
// csvFilePath is local to the "connections" message,
// which is called for every individual room
csvFilePath = null;
});
// Skipped the rest of the code; it's irrelevant
});
}
Full printouts:
setCSV CALLED
CSV FILE SET: data/text/crescent_topics.csv
Join called for sirius pipeline; room sirius0
CSV FILE PATH: data/text/crescent_topics.csv
CHECK BEFORE ASYNC: data/text/crescent_topics.csv
CHECK AFTER ASYNC1: data/text/crescent_topics.csv
CHECK AFTER ASYNC2: data/text/crescent_topics.csv
58562
CHECK AFTER ASYNC3: null
CHECK AFTER ASYNC4: null
LAST CHECK: null
FINAL CSV FILE: data/highD/Animal_Data_paper.csv
[ '-u',
'pipelines/sirius.py',
'58562',
'data/highD/Animal_Data_paper.csv',
undefined,
'--dist_func',
'euclidean' ]
Since bolding of code doesn't work, just search for the "**" to find the relevant pieces I've marked.
TL;DR There's a lot of communication happening between the client and server to establish an individualized communication that is directly linked to a specific dataset. The user has the ability to upload a custom CSV file to the system, but the code I'm working with right now is just trying to select an existing CSV file on the server, so I omitted the callbacks for the custom CSV file. Once the file has been selected, the client asks to "join" a room/session. The case I'm working with right now assumes that this is a new room/session as opposed to trying to do some shared room/session with another client. (Yes, I know, the code is messy for sharing rooms/sessions, but it works for the most part for now and is not my main concern.) Again, all this code worked just fine before the asynchronous code was added (and using a static port variable), so I don't know what changed so much by adding it.
Since you now included the whole code context, we can see that the issue is that the code after your async IIFE is what is causing the problem.
An async function returns a promise as soon as it hits an await. And, while that await is waiting for its asynchronous operation, the code following the call to the async function runs. In your case, you're essentially doing this:
var csvFilePath = someGoodValue;
(async () => {
port = await getPort();
console.log(csvFilePath); // this will be null
})();
csvFilePath = null; // this runs as soon as the above code hits the await
So, as soon as you hit your first await, the async function returns a promise and the code following it continues to run, hitting the line of code that resets your csvFilePath.
There are probably cleaner ways to restructure your code, but a simple thing you could do is this:
var csvFilePath = someGoodValue;
(async () => {
port = await getPort();
console.log(csvFilePath); // this will be null
})().finally(() => {
csvFilePath = null;
});
Note: .finally() is supported in node v10+. If you're using an older version, you can reset the path in both .then() and .catch().
Or, as your comment says, maybe you can just remove the resetting of the csvFilePath entirely.
I realized after some silly tests I tried that I'm resetting csvFilePath to null outside the asynchronous call, which is what is causing the error... Oops!
I am running node.js on raspbian and trying to save/update a file every 2/3 seconds using the following code:
var saveFileSaving = false;
function loop() {
mainLoop = setTimeout(function() {
// update data
saveSaveFile(data, function() {
//console.log("Saved data to file");
loop();
});
}, 1500);
}
function saveSaveFile(data, callback) {
if(!saveFileSaving) {
saveFileSaving = true;
var wstream = fs.createWriteStream(path.join(__dirname, 'save.json'));
wstream.on('finish', function () {
saveFileSaving = false;
callback(data);
});
wstream.on('error', function (error) {
console.log(error);
saveFileSaving = false;
wstream.end();
callback(null);
});
wstream.write(JSON.stringify(data));
wstream.end();
} else {
callback(null);
}
}
When I run this it works fine for an hour then starts spitting out:
[25/May/2016 11:3:4 am] { [Error: EROFS, open '<path to file>']
errno: 56,
code: 'EROFS',
path: '<path to file>' }
I have tried jsonfile plugin which also sends out a similiar write error after an hour.
I have tried both fileSystem.writeFile and fileSystem.writeFileSync both give the same error after an hour.
I was thinking it had to do with the handler not being let go before a new save occurs which is why I started using the saveFileSaving flag.
Resetting the system via hard reset fixes the issue (soft reset does not work as the system seems to be locked up).
Any suggestions guys? I have searched the web and so only found one other question slightly similar from 4 years ago which was left in limbo.
Note: I am using the callback function from the code to continue with the main loop.
I was able to get this working by unlinking the file and saving the file every time I save while it is not pretty it works and shouldn't cause too much overhead.
I also added a backup solution which saves a backup every 5 minutes in case the save file has issues.
Thank you for everyone's help.
Here is my ideas:
1) Check free space when this problem happens by typing in terminal:
df -h
2) Also check if file is editable when problem occurs. with nano or vim and etc.
3) Your code too complicated for simply scheduling data manipulation and writing it to file. Because of even Your file will be busy (saveFileSaving) You will lose data until next iteration, try to use that code:
var
async = require('async'),
fs = require('fs'),
path = require('path');
async.forever(function(next) {
// some data manipulation
try {
fs.writeFileSync(path.join(__dirname, 'save.json'), JSON.stringify(data));
}
catch(ex) {
console.error('Error writing data to file:', ex);
}
setTimeout(next, 2000);
});
4) How about keeping file descriptor open?
var
async = require('async'),
fs = require('fs'),
path = require('path');
var file = fs.createWriteStream(path.join(__dirname, 'save.json'));
async.forever(function(next) {
// some data manipulation
file.write(JSON.stringify(data));
setTimeout(next, 2000);
});
var handleSignal = function (exc) {
// close file
file.end();
if(exc) {
console.log('STOPPING PROCESS BECAUSE OF:', exc);
}
process.exit(-1);
}
process.on('uncaughtException', handleSignal);
process.on('SIGHUP', handleSignal);
5) hardware or software problems (maybe because of OS drivers) with raspberry's storage controller.
I am trying to write a newer watch module that uses the fs.watch method instead of the watchFile approach.
So far, it works beautifully, but only when I run it outside of mocha. I can't figure out why my unit test is throwing a tantrum, maybe someone here can?
Here is my class code:
/**
* requirements
*/
var fs, path, events;
fs = require('fs');
path = require('path');
events = require('events');
/**
* private
*/
var Monitor = function(directory, options) {
this.directory = directory;
this.options = options || {};
(this.options.lazy && this.empty()) || this.walk(this.directory);
this.watch(this.directory);
};
Monitor.prototype = new events.EventEmitter();
Monitor.prototype.watch = function(directory, stats) {
var stats = stats || {};
if (!this.directories[directory]) {
var w = fs.watch(directory, this.options, this.justlookatit.bind(this));
}
this.directories[directory] = { 'stats': stats, 'w': w };
};
Monitor.prototype.directories = function() {
if (!Object.keys(this.directories).length) {
this.walk(this.directory);
}
return this.directories;
};
Monitor.prototype.files = function() {
if (!Object.keys(this.files).length) {
this.walk(this.directory);
}
return this.files;
};
Monitor.prototype.unwatch = function() {
if (!Object.keys(this.directories).length) {
for (var dir in this.directories) {
dir.w.close();
}
}
};
Monitor.prototype.empty = function() {
this.unwatch();
this.files = {};
this.directories = {};
};
Monitor.prototype.walk = function(directory) {
var monitor = this;
this.empty();
fs.readdir(directory, function(err, files) {
if (err) return;
for (var file in files) {
var fullname = path.resolve(files[file]);
if (!monitor.options.filter || monitor.options.filter(fullname)) {
fs.stat(fullname, function(err, stats) {
if (err) return;
if (stats.isDirectory()) {
monitor.walk(fullname);
monitor.watch(fullname, stats);
} else {
monitor.files[fullname] = stats;
}
});
}
}
});
};
Monitor.prototype.justlookatit = function(action, file) {
var monitor = this;
var fullname = path.resolve(file);
if (this.options.filter && !this.options.filer(fullname)) return;
fs.exists(fullname, function(exists) {
if (exists) {
fs.stat(fullname, function(err, stats) {
if (stats.isDirectory()) {
monitor.watch(fullname, stats);
} else {
if (monitor.files[fullname]) {
if (stats.mtime.getTime() > monitor.files[fullname].mtime.getTime()) {
monitor.emit('modified', fullname, stats);
}
} else {
monitor.emit('added', fullname, stats);
}
monitor.files[fullname] = stats;
}
});
} else {
if (monitor.files[fullname]) {
delete monitor.files[fullname];
monitor.emit('deleted', fullname);
} else if (monitor.directories[fullname]) {
monitor.directories[fullname].w.close();
delete monitor.directories[fullname];
}
}
});
};
/**
* exports
*/
exports.start = function(directory, options) {
return new Monitor(directory, options);
};
Here is my Working external test code:
var watch = require("./watch.js");
var fs = require('fs');
monitor = watch.start(__dirname);
monitor.on('added', function(file, stats) {
console.log("Caught Added: " + file);
});
monitor.on('modified', function(file, stats) {
console.log("Caught Modified: " + file);
});
monitor.on('deleted', function(file) {
console.log("Caught deleted: " + file);
});
// try creating a file immediately
fs.openSync('v.md', 'w');
The first test file runs perfectly fine, and I've tried both openSync and open. Finally, here is a version of the same test code, wrapped in a mocha unit test which is timing out:
/**
* requirements
*/
var watch, Q, fs, path, mocha, chai, assert;
watch = require('../lib/watch.js');
Q = require('q');
fs = require('fs');
path = require('path');
mocha = require('mocha');
chai = require('chai');
assert = chai.assert;
/**
* variables
*/
var watch_directory = path.join(__dirname, './watch');
/**
* tests
*/
describe('test watch', function() {
it('should create a monitor and run callbacks after fs changes', function(done) {
// I had planned to implement promises that chained the three callbacks
// but couldn't get one of them working in general
var added = function(file, stats) {
console.log("added");
done();
};
var modified = function(file, stats) {
console.log("modified");
};
var deleted = function(file, stats) {
console.log("deleted");
};
// create our service
var monitor = watch.start(watch_directory);
// assert it is defined
assert.isDefined(monitor);
// establish a listener
monitor.on('added', added);
monitor.on('modified', modified);
monitor.on('deleted', deleted);
// here is a file name using the current date to prevent duplication during tests
var file = path.join(watch_directory, (new Date()).getTime() + '.md');
// let's create the file, then delete it
fs.open(file, 'w+', function(err, fileDescriptor) {
// this prints before console output from the watch.js's `justlookatit` method
console.log(err);
console.log("writing to file");
// we probably don't want to try closing the fileDescriptor if the open failed
if (err) return;
// close the file descriptor
fs.close(fileDescriptor, function() {
// delete the file we just created
// fs.unlink(file, function() { /* not a big deal */ });
});
});
// modify a known-existing test file
fs.open('test.md', 'w+', function() {/* we don't care about this */});
})
});
I checked with console.log(fullname) inside the justlookatit method on the watch code, and it spits out the correct file name, matching the one generated by the unit test.
However, it then proceeds to return false when I run fs.exists. As I undestand it, that means the file system is notifying me that a file exists before it exists, which doesn't make sense really. So I tried adding an additional delay by wrapping my fs.exists method in a setTimeout, and that didn't change the results. I have also tried using both openSync and existsSync, and that made no difference.
I'm stumped, does anyone have any ideas why the mocha code isn't working?
So, the solution was to go for a walk. I came back, looked at the code again and figured out the cause of the problem with mocha, and also identified many other bugs.
The problem was the lack of context. The justlookatit method does not have a context, and in the test.js scenario it is watching the current directory, while the mocha test is watching a sub-directory.
The path.resolve was receiving only the file name, not the directory, and therefore merged it with the default (executables) directory, so the level of test.js, or watch_test.js for mocha. It proceeded to fail to locate any of the files in the mocha test case because they were all one level below the executable.
I won't go into detail about all the other bugs, but I may come back and post the repository link when I get to a point that I want to push it online.
You're missing the callback return(done); at the end of your test. Unless you call that callback, Mocha will time out every time.
I created plugin for send json data in json file.
But I don't understand why send my object json in pipe, and not write file directly in my plugin.
I want use my plugin whit this syntax:
gulp.task('js-hash', function()
{
// Get all js in redis
gulp.src('./build/js/**/*.js')
.pipe(getHashFile('/build/js/'))
.pipe(gulp.dest('./build/js/hash.json'));
});
And not that:
gulp.task('js-hash', function()
{
// Get all js in redis
gulp.src('./build/js/**/*.js')
.pipe(getHashFile('./build/js/hash.json', '/build/js/'));
});
This is my plugin:
var through = require('through2');
var gutil = require('gulp-util');
var crypto = require('crypto');
var fs = require('fs');
var PluginError = gutil.PluginError;
// Consts
const PLUGIN_NAME = 'get-hash-file';
var json = {};
function getHashFile(filename, basename)
{
if (!filename) {
throw PluginError(PLUGIN_NAME, "Missing filename !");
}
// Creating a stream through which each file will pass
var stream = through.obj(function (file, enc, callback) {
if (file.isNull()) {
this.push(file); // Do nothing if no contents
return callback();
}
if (file.isBuffer()) {
var hash = crypto.createHash('sha256').update(String(file.contents)).digest('hex');
json[file.path.replace(file.cwd+basename, '')] = hash;
return callback();
}
if (file.isStream()) {
this.emit('error', new PluginError(PLUGIN_NAME, 'Stream not supported!'));
return callback();
}
}).on('finish', function () {
fs.writeFile(filename, JSON.stringify(json), function(err) {
if (err) {
throw err;
}
});
});
// returning the file stream
return stream;
}
// Exporting the plugin main function
module.exports = getHashFile;
Your are idea
Nothing prevents you from doing this... besides not respecting plugins guidelines!
Users actually assume a plugin will stream files and that they can pipe them to other plugins.
If I get your code right, you're trying to generate a file that contains all sha hashes of inbound files. Why not let users take this file and pipe it to other plugins? You'd be surprised what people could do.
While this question looks a bit opinion-based, you could definitely put the focus on how to deal with files that may not belong to the main stream of files. Issues like this can be found in many plugins; for example, gulp-uglify authors are wondering how they can add source-maps without mixing js and source map downstream.
In meteor I can read a file like this:
myjson = JSON.parse(Assets.getText("lib/myfile.json"))
Now i want to iterate through a folder, and read all the available json files. What would be the best way to do this without installing extra NPM packages.
Thank you for your time.
I'm not sure if this is the best way, but is certainly an easy one:
var fs = Npm.require('fs');
fs.readdir('./assets/app/myFolder', function(e, r) {
_.each(r, function(filename) {
Assets.getText('myFolder/' + filename);
});
});
I wrapped Hubert OGs code into a function with Meteor.bindEnvironment. I believe this is necessary because of fibre not available outside of the Meteor environement.
see https://www.eventedmind.com/feed/49CkbYeyKoa7MyH5R
Beware that external Node packages have different document root than Meteor.
var done, files;
var fs = Npm.require('fs');
files = fs.readdirSync("../../../../../server/collections/lib/", function(e, r) {});
done = Meteor.bindEnvironment(function(files) {
return _.each(files, function(filename) {
var myjson;
myjson = JSON.parse(Assets.getText("lib/" + filename));
/* do Something */
});
}, function(e) {
throw e;
});
done(files);