IPFS in Javascript 'cat' function doesn't work - javascript

I created this testcase to prove that the cat method is not working for me using the IPFS javascript library. What am I doing wrong ? My console output does not draw anything from within the 'node.files.cat' function, its as if that (err,filestream) callback is not being called at all. I I know my multihash is somewhat working because if I change it I get a fatal error. However right now it is seemingly just locking up and pausing after NODE READY.
const IPFS = require('ipfs')
const path = require('path')
const os = require('os')
const fs = require('fs')
console.log('ipfs test ')
var mhash = "Qmc5LfkMVAhvzip2u2RjRBRhgVthtSokHsz4Y5bgaBCW2R";
// Create the IPFS node instance
const node = new IPFS()
node.on('ready', () => {
// Your node is now ready to use \o/
console.log('NODE READY')
/*
THIS WORKS
var test_rstream = fs.createReadStream( path.join(__dirname, '.', '/public/sample_land_file.json') )
var wstream = fs.createWriteStream(os.tmpdir() + '/lobc_cache/'+'Qmc5LfkMVAhvzip2u2RjRBRhgVthtSokHsz4Y5bgaBCW2R');
wstream.on('finish', function() {
console.log('Written ' + wstream.bytesWritten + ' ' + wstream.path);
test_rstream.close()
});
test_rstream.pipe(wstream);
*/
node.files.cat("Qmc5LfkMVAhvzip2u2RjRBRhgVthtSokHsz4Y5bgaBCW2R", function (err, filestream) {
console.log('WHY ISNT THIS FIRING ') // i never see this logged
console.log(filestream)
console.log(os.tmpdir())
if (!fs.existsSync(os.tmpdir() + '/lobc_cache')){
fs.mkdirSync(os.tmpdir() + '/lobc_cache');
}
var wstream = fs.createWriteStream(os.tmpdir() + '/lobc_cache/'+'Qmc5LfkMVAhvzip2u2RjRBRhgVthtSokHsz4Y5bgaBCW2R');
result = '';
wstream.on('finish', function() {
console.log('Written ' + wstream.bytesWritten + ' ' + wstream.path);
filestream.close()
});
filestream.pipe(wstream);
// wstream.end();
// file will be a stream containing the data of the file requested
})
// stopping a node
node.stop(() => {
// node is now 'offline'
})
})
node.on('start', () => {
console.log('NODE START')
})

This looks like a bug. A quick way to solve it is just to put the node.files.cat inside the callback for .on('ready'). Seems that bitswap is dropping requests before the node is online.
Let me know if this works.

Related

Javascript Await Changes Local Variables? [closed]

Closed. This question is not reproducible or was caused by typos. It is not currently accepting answers.
This question was caused by a typo or a problem that can no longer be reproduced. While similar questions may be on-topic here, this one was resolved in a way less likely to help future readers.
Closed 3 years ago.
Improve this question
Anyone able to explain what I'm doing wrong with my use of asynchronous functions in Javascript?
Basically, I must use an asynchronous in my Node.js code to grab an open port for me to use. There is a local variable that is being set outside of the asynchronous call that I can access/use just fine until I await for the asynchronous function to return. After that, the local variable is undefined.
(async () => {
console.log("CHECK AFTER ASYNC1: " + csvFilePath);
// First, grab a valid open port
var port;
while (!port || portsInProcess.indexOf(port) >= 0) {
console.log("CHECK AFTER ASYNC2: " + csvFilePath);
port = await getPort();
console.log(port);
}
console.log("CHECK AFTER ASYNC3: " + csvFilePath);
portsInProcess.push(port);
// ... more code below...
Checks #1 and 2 are fine for the csvFilePath variable, but check #3 shows that it's undefined. The port number, however, is fine. This leads me to believe that there's some weirdness with asynchronous function calls in Javascript that ONLY affects local variables; the global variables I use further down are just fine. Unfortunately here, I cannot make the csvFilePath variable global since that will introduce race conditions on that variable too (which I'm preventing elsewhere; the while loop is to help prevent race conditions on the port number, which is basically unused in my simple tests on localhost).
Just in case it's helpful, here's the output I'm getting:
CHECK AFTER ASYNC1: data/text/crescent_topics.csv
CHECK AFTER ASYNC2: data/text/crescent_topics.csv
58562
CHECK AFTER ASYNC3: null
It might also be worth mentioning it's really only those first few lines of code to dynamically grab an open port that are the lines of code I added. The code that I had before which used a fixed port number worked just fine (including this csvFilePath variable remaining stable).
My understanding of the await functionality was that it makes the asynchronous function act more or less synchronously, which is what seems to be happening here; the code I have farther down that uses the port number is not running until after the port number is set. (But even if that wasn't the case, why is the csvFilePath variable being unset since I'm not altering it or using it in any way here?)
EDIT: Here's some more code to provide additional context
var spawn = require('child_process').spawn;
var fs = require("fs");
var async = require('async');
var zmq = require('zmq');
var readline = require('readline');
const getPort = require('get-port');
/* Export the Nebula class */
module.exports = Nebula;
/* Location of the data for the Crescent dataset */
var textDataPath = "data/text/";
var crescentRawDataPath = textDataPath + "crescent_raw";
var crescentTFIDF = textDataPath + "crescent tfidf.csv";
var crescentTopicModel = textDataPath + "crescent_topics.csv";
/* Location of the data for the UK Health dataset */
var ukHealthRawDataPath = textDataPath + "uk_health_raw";
var ukHealthTFIDF = textDataPath + "uk_health.csv";
/* Map CSV files for text data to raw text location */
var textRawDataMappings = {};
textRawDataMappings[crescentTFIDF] = crescentRawDataPath;
textRawDataMappings[crescentTopicModel] = crescentRawDataPath;
textRawDataMappings[ukHealthTFIDF] = ukHealthRawDataPath;
textRawDataMappings[textDataPath + "uk_health_sm.csv"] = ukHealthRawDataPath;
/* The pipelines available to use */
var flatTextUIs = ["cosmos", "composite", "sirius", "centaurus"];
var pipelines = {
andromeda: {
file: "pipelines/andromeda.py",
defaultData: "data/highD/Animal_Data_study.csv"
},
cosmos: {
file: "pipelines/cosmos.py",
defaultData: textDataPath + "crescent tfidf.csv"
},
sirius: {
file: "pipelines/sirius.py",
defaultData: "data/highD/Animal_Data_paper.csv"
},
centaurus: {
file: "pipelines/centaurus.py",
defaultData: "data/highD/Animal_Data_paper.csv"
},
twitter: {
file: "pipelines/twitter.py",
},
composite: {
file: "pipelines/composite.py",
defaultData: textDataPath + "crescent tfidf.csv"
},
elasticsearch: {
file: "pipelines/espipeline.py",
args: []
}
};
/* The locations of the different types of datasets on the server */
var textDataFolder = "data/text/";
var highDDataFolder = "data/highD/";
var customCSVFolder = "data/customCSV/";
var sirius_prototype = 2;
// An array to track the ports being processed to eliminate race conditions
// as much as possible
var portsInProcess = [];
var nextSessionNumber = 0;
var usedSessionNumbers = [];
/* Nebula class constructor */
function Nebula(io, pipelineAddr) {
/* This allows you to use "Nebula(obj)" as well as "new Nebula(obj)" */
if (!(this instanceof Nebula)) {
return new Nebula(io);
}
/* The group of rooms currently active, each with a string identifier
* Each room represents an instance of a visualization that can be shared
* among clients.
*/
this.rooms = {};
this.io = io;
/* For proper use in callback functions */
var self = this;
/* Accept new WebSocket clients */
io.on('connection', function(socket) {
// Skipped some irrelevant Socket.io callbacks
**// Use the csvFilePath to store the name of a user-defined CSV file
var csvFilePath = null;**
/* Helper function to tell the client that the CSV file is now ready for them
* to use. They are also sent a copy of the data
*/
var csvFileReady = function(csvFilePath) {
// Let the client know that the CSV file is now ready to be used on
// the server
socket.emit("csvDataReady");
// Prepare to parse the CSV file
var csvData = [];
const rl = readline.createInterface({
input: fs.createReadStream(csvFilePath),
crlfDelay: Infinity
});
// Print any error messages we encounter
rl.on('error', function (err) {
console.log("Error while parsing CSV file: " + csvFilePath);
console.log(err);
});
// Read each line of the CSV file one at a time and parse it
var columnHeaders = [];
var firstColumnName;
rl.on('line', function (data) {
var dataColumns = data.split(",");
// If we haven't saved any column names yet, do so first
if (columnHeaders.length == 0) {
columnHeaders = dataColumns;
firstColumnName = columnHeaders[0];
}
// Process each individual line of data in the CSV file
else {
var dataObj = {};
var i;
for (i = 0; i < dataColumns.length; i++) {
var key = columnHeaders[i];
var value = dataColumns[i];
dataObj[key] = value
}
csvData.push(dataObj);
}
});
// All lines are read, file is closed now.
rl.on('close', function () {
// On certain OSs, like Windows, an extra, blank line may be read
// Check for this and remove it if it exists
var lastObservation = csvData[csvData.length-1];
var lastObservationKeys = Object.keys(lastObservation);
if (lastObservationKeys.length = 1 && lastObservation[lastObservationKeys[0]] == "") {
csvData.pop();
}
// Provide the CSV data to the client
socket.emit("csvDataReadComplete", csvData, firstColumnName);
});
};
**/* Allows the client to specify a CSV file already on the server to use */
socket.on("setCSV", function(csvName) {
console.log("setCSV CALLED");
csvFilePath = "data/" + csvName;
csvFileReady(csvFilePath);
console.log("CSV FILE SET: " + csvFilePath);
});**
// Skipped some more irrelevant callbacks
/* a client/ a room. If the room doesn't next exist yet,
* initiate it and send the new room to the client. Otherwise, send
* the client the current state of the room.
*/
socket.on('join', function(roomName, user, pipeline, args) {
console.log("Join called for " + pipeline + " pipeline; room " + roomName);
socket.roomName = roomName;
socket.user = user;
socket.join(roomName);
console.log("CSV FILE PATH: " + csvFilePath);
var pipelineArgsCopy = [];
if (!self.rooms[roomName]) {
var room = {};
room.name = roomName;
room.count = 1;
room.points = new Map();
room.similarity_weights = new Map();
if (pipeline == "sirius" || pipeline == "centaurus") {
room.attribute_points = new Map();
room.attribute_similarity_weights = new Map();
room.observation_data = [];
room.attribute_data = [];
}
/* Create a pipeline client for this room */
console.log("CHECK BEFORE ASYNC: " + csvFilePath);
**// Here's the code snippet I provided above**
**(async () => {
console.log("CHECK AFTER ASYNC1: " + csvFilePath);
// First, grab a valid open port
var port;
while (!port || portsInProcess.indexOf(port) >= 0) {
console.log("CHECK AFTER ASYNC2: " + csvFilePath);
port = await getPort();
console.log(port);
}
console.log("CHECK AFTER ASYNC3: " + csvFilePath);**
portsInProcess.push(port);
console.log("CHECK AFTER ASYNC4: " + csvFilePath);
if (!pipelineAddr) {
var pythonArgs = ["-u"];
if (pipeline in pipelines) {
// A CSV file path should have already been set. This
// file path should be used to indicate where to find
// the desired file
console.log("LAST CHECK: " + csvFilePath);
if (!csvFilePath) {
csvFilePath = pipelines[pipeline].defaultData;
}
console.log("FINAL CSV FILE: " + csvFilePath);
pipelineArgsCopy.push(csvFilePath);
// If the UI supports reading flat text files, tell the
// pipeline where to find the files
if (flatTextUIs.indexOf(pipeline) >= 0) {
pipelineArgsCopy.push(textRawDataMappings[csvFilePath]);
}
// Set the remaining pipeline args
pythonArgs.push(pipelines[pipeline].file);
pythonArgs.push(port.toString());
if (pipeline != "twitter" && pipeline != "elasticsearch") {
pythonArgs = pythonArgs.concat(pipelineArgsCopy);
}
}
else {
pythonArgs.push(pipelines.cosmos.file);
pythonArgs.push(port.toString());
pythonArgs.push(pipelines.cosmos.defaultData);
pythonArgs.push(crescentRawDataPath);
}
// used in case of CosmosRadar
for (var key in args) {
if (args.hasOwnProperty(key)) {
pythonArgs.push("--" + key);
pythonArgs.push(args[key]);
}
}
// Dynamically determine which distance function should be
// used
if (pythonArgs.indexOf("--dist_func") < 0) {
if (pipeline === "twitter" || pipeline === "elasticsearch" ||
csvFilePath.startsWith(textDataPath)) {
pythonArgs.push("--dist_func", "cosine");
}
else {
pythonArgs.push("--dist_func", "euclidean");
}
}
console.log(pythonArgs);
console.log("");
var pipelineInstance = spawn("python2.7", pythonArgs, {stdout: "inherit"});
pipelineInstance.on("error", function(err) {
console.log("python2.7.exe not found. Trying python.exe");
pipelineInstance = spawn("python", pythonArgs,{stdout: "inherit"});
pipelineInstance.stdout.on("data", function(data) {
console.log("Pipeline: " + data.toString());
});
pipelineInstance.stderr.on("data", function(data) {
console.log("Pipeline error: " + data.toString());
});
});
/* Data received by node app from python process,
* ouptut this data to output stream(on 'data'),
* we want to convert that received data into a string and
* append it to the overall data String
*/
pipelineInstance.stdout.on("data", function(data) {
console.log("Pipeline STDOUT: " + data.toString());
});
pipelineInstance.stderr.on("data", function(data) {
console.log("Pipeline error: " + data.toString());
});
room.pipelineInstance = pipelineInstance;
}
/* Connect to the pipeline */
pipelineAddr = pipelineAddr || "tcp://127.0.0.1:" + port.toString();
room.pipelineSocket = zmq.socket('pair');
room.pipelineSocket.connect(pipelineAddr);
pipelineAddr = null;
portsInProcess.splice(portsInProcess.indexOf(port), 1);
/* Listens for messages from the pipeline */
room.pipelineSocket.on('message', function (msg) {
self.handleMessage(room, msg);
});
self.rooms[roomName] = socket.room = room;
invoke(room.pipelineSocket, "reset");
})();
}
else {
socket.room = self.rooms[roomName];
socket.room.count += 1;
if (pipeline == "sirius" || pipeline == "centaurus") {
socket.emit('update', sendRoom(socket.room, true), true);
socket.emit('update', sendRoom(socket.room, false), false);
}
else {
socket.emit('update', sendRoom(socket.room));
}
}
// Reset the csvFilePath to null for future UIs...
// I don't think this is actually necessary since
// csvFilePath is local to the "connections" message,
// which is called for every individual room
csvFilePath = null;
});
// Skipped the rest of the code; it's irrelevant
});
}
Full printouts:
setCSV CALLED
CSV FILE SET: data/text/crescent_topics.csv
Join called for sirius pipeline; room sirius0
CSV FILE PATH: data/text/crescent_topics.csv
CHECK BEFORE ASYNC: data/text/crescent_topics.csv
CHECK AFTER ASYNC1: data/text/crescent_topics.csv
CHECK AFTER ASYNC2: data/text/crescent_topics.csv
58562
CHECK AFTER ASYNC3: null
CHECK AFTER ASYNC4: null
LAST CHECK: null
FINAL CSV FILE: data/highD/Animal_Data_paper.csv
[ '-u',
'pipelines/sirius.py',
'58562',
'data/highD/Animal_Data_paper.csv',
undefined,
'--dist_func',
'euclidean' ]
Since bolding of code doesn't work, just search for the "**" to find the relevant pieces I've marked.
TL;DR There's a lot of communication happening between the client and server to establish an individualized communication that is directly linked to a specific dataset. The user has the ability to upload a custom CSV file to the system, but the code I'm working with right now is just trying to select an existing CSV file on the server, so I omitted the callbacks for the custom CSV file. Once the file has been selected, the client asks to "join" a room/session. The case I'm working with right now assumes that this is a new room/session as opposed to trying to do some shared room/session with another client. (Yes, I know, the code is messy for sharing rooms/sessions, but it works for the most part for now and is not my main concern.) Again, all this code worked just fine before the asynchronous code was added (and using a static port variable), so I don't know what changed so much by adding it.
Since you now included the whole code context, we can see that the issue is that the code after your async IIFE is what is causing the problem.
An async function returns a promise as soon as it hits an await. And, while that await is waiting for its asynchronous operation, the code following the call to the async function runs. In your case, you're essentially doing this:
var csvFilePath = someGoodValue;
(async () => {
port = await getPort();
console.log(csvFilePath); // this will be null
})();
csvFilePath = null; // this runs as soon as the above code hits the await
So, as soon as you hit your first await, the async function returns a promise and the code following it continues to run, hitting the line of code that resets your csvFilePath.
There are probably cleaner ways to restructure your code, but a simple thing you could do is this:
var csvFilePath = someGoodValue;
(async () => {
port = await getPort();
console.log(csvFilePath); // this will be null
})().finally(() => {
csvFilePath = null;
});
Note: .finally() is supported in node v10+. If you're using an older version, you can reset the path in both .then() and .catch().
Or, as your comment says, maybe you can just remove the resetting of the csvFilePath entirely.
I realized after some silly tests I tried that I'm resetting csvFilePath to null outside the asynchronous call, which is what is causing the error... Oops!

Sending image buffer to node.js tcp server

I am trying to send image data from my TCP client to my TCP server both written in node.js
I have already tried doing it this way
client:
function onData(socket, data) {
var data = Buffer.from(data).toString()
var arg = data.split(',')
var event = arg[0]
console.log(event)
if (event == 'screenshot') {
console.log(hostname)
console.log('control client uid ' + arg[1] + 'then we screenshot')
screenshot()
.then(img => {
console.log(img)
socket.write('screenshotData,' + ',' + hostname + ',' + img)
socket.write('stdout,' + arg[2] + ',Screenshot')
})
.catch(err => {
console.log(err)
socket.write('error', err)
})
}
}
server:
sock.on('data', function(data) {
//right here i need to parse the first 'EVENT' part of the text so i can get cusotom tcp events and
var data = Buffer.from(data).toString()
var arg = data.split(',')
var event = arg[0]
if (event == 'screenshotData') {
agentName = arg[1]
img = arg[2]
console.log('agent-name ' + agentName)
console.log('screnshotdata' + img)
var dt = dateTime.create()
var formattedTime = dt.format('Y-m-d-H-M-S')
var folder = 'adminPanel/screenshots/'
var filename = formattedTime + '-' + agentName + '.png'
console.log(filename)
fs.writeFile(folder + filename, img, function(err) {
console.log(err)
})
}
})
I had to build some rudimentary event system in TCP. If you know a better way then let me know. Anyways, the client takes a screenshot and then it does socket.write('screenshotData', + ',' + hostname + ',' img).
But it sends the data in multiple chunks as my console is showing random gibberish as a new event many times so I don't even know how I would do this. Any help would be great.
You are treating your TCP stream as a message-oriented protocol, in addition to mixing encodings (your image Buffer is simply concatenated into the string).
I suggest you switch TCP streams with websockets. The interface remains largely the same (read replaced with message events, stuff like that) but it actually behaves like you are expecting.
Working server:
const WebSocket = require('ws');
const fs = require('fs');
const PORT = 3000;
const handleMessage = (data) => {
const [action, payload] = data.split(',');
const imageData = Buffer.from(payload, 'base64');
const imageHandle = fs.createWriteStream('screenshot.jpg');
imageHandle.write(imageData);
imageHandle.end();
console.log(`Saved screenshot (${imageData.length} bytes)`);
};
const wss = new WebSocket.Server({port: PORT});
wss.on('connection', (ws) => {
console.log('Opened client');
ws.on('message', (data)=>handleMessage(data));
});
console.log('Server started');
client:
const WebSocket = require('ws');
const screenshot = require('screenshot-desktop');
const PORT = 3000;
const sendImage = ( client, image ) => {
const payload = image.toString('base64');
const message = ["screenshot", payload].join(',');
console.log(`Sending ${image.length} bytes in message ${message.length} bytes`);
client.send(
message,
() => {
console.log('Done');
process.exit(0);
}
);
};
const client = new WebSocket('ws://localhost:'+PORT+'/');
client.on('open', () => {
console.log('Connected');
screenshot().then( image => sendImage(client, image) );
});
if you specifically want to transfer an image type file, then the best-suggested way is to deal with b64 data. i.e convert your image to a b64 and send the data over a channel, and after receiving it in the server, you can convert it into a .jpg/.png again.
For reference https://www.npmjs.com/package/image-to-base64

Promise.all() not resolving when running server - otherwise works fine

I've written a small tool that returns a promise after calling several other promises. This tool works great when I test it solo, it takes about 10 seconds in the example below. However, when I try to run it along with a http server instance it, takes in the order of several minutes to return, if at all!
I'm fairly sure I'm just misunderstanding something here, as I'm not extremely proficient in Node. If anyone can spot an issue, or suggest an alternative to using promises for handling asynchronous methods, please let me know!
Just to clarify, it's the Promise.all returned by the traceRoute function which is hanging. The sub-promises are all resolving as expected.
Edit: As suggested in the comments, I have also tried a recursive version with no call to Promise.all; same issue.
This is a working standalone version being called without any http server instance running:
const dns = require('dns');
const ping = require('net-ping');
var traceRoute = (host, ttl, interval, duration) => {
var session = ping.createSession({
ttl:ttl,
timeout: 5000
});
var times = new Array(ttl);
for (var i=0; i<ttl; i++){
times[i] = {'ttl': null, 'ipv4': null, 'hostnames': [], 'times': []}
};
var feedCb = (error, target, ttl, sent, rcvd) => {
var ms = rcvd - sent;
if (error) {
if (error instanceof ping.TimeExceededError) {
times[ttl-1].ttl = ttl;
times[ttl-1].ipv4 = error.source;
times[ttl-1].times.push(ms)
} else {
console.log(target + ": " +
error.toString () +
" (ttl=" + ttl + " ms=" + ms +")");
}
} else {
console.log(target + ": " +
target + " (ttl=" + ttl + " ms=" + ms +")");
}
}
var proms = new Array();
var complete = 0
while(complete < duration){
proms.push(
new Promise((res, rej) => {
setTimeout(function(){
session.traceRoute(
host,
{ maxHopTimeouts: 5 },
feedCb,
function(e,t){
console.log('traceroute done: resolving promise')
res(); // resolve inner promise
}
);
}, complete);
})
)
complete += interval;
}
return Promise.all(proms)
.then(() => {
console.log('resolving traceroute');
return times.filter((t)=> t.ttl != null);
});
}
traceRoute('195.146.144.8', 20, 500, 5000)
.then( (times) => console.log(times) )
Below, is the same logic being called from inside the server instance, this is not working as it should. See the inline comment for where exactly it hangs.
const express = require('express');
const http = require('http');
const WebSocket = require('ws');
const app = express();
const server = http.createServer(app);
const wss = new WebSocket.Server({server: server, path: "/wss"});
const dns = require('dns');
const ping = require('net-ping');
var traceRoute = (host, ttl, interval, duration) => {
var session = ping.createSession({
ttl:ttl,
timeout: 5000
});
var times = new Array(ttl);
for (var i=0; i<ttl; i++){
times[i] = {'ttl': null, 'ipv4': null, 'hostnames': [], 'times': []}
};
var feedCb = (error, target, ttl, sent, rcvd) => {
var ms = rcvd - sent;
if (error) {
if (error instanceof ping.TimeExceededError) {
times[ttl-1].ttl = ttl;
times[ttl-1].ipv4 = error.source;
times[ttl-1].times.push(ms)
} else {
console.log(target + ": " +
error.toString () + " (ttl=" + ttl + " ms=" + ms +")");
}
} else {
console.log(target + ": " + target +
" (ttl=" + ttl + " ms=" + ms +")");
}
}
var proms = new Array();
var complete = 0
while(complete < duration){
proms.push(
new Promise((res, rej) => {
setTimeout(function(){
session.traceRoute(
host,
{ maxHopTimeouts: 5 },
feedCb,
function(e,t){
console.log('traceroute done: resolving promise')
res(); // resolve inner promise
}
);
}, complete);
})
)
complete += interval;
}
console.log('Promise all:', proms);
// #####################
// Hangs on this promise
// i.e. console.log('resolving traceroute') is not called for several minutes.
// #####################
return Promise.all(proms)
.then(() => {
console.log('resolving traceroute')
return times.filter((t)=> t.ttl != null)
});
}
wss.on('connection', function connection(ws, req) {
traceRoute('195.146.144.8', 20, 500, 5000)
.then((data) => ws.send(data));
});
app.use('/tools/static', express.static('./public/static'));
app.use('/tools/templates', express.static('./public/templates'));
app.get('*', function (req, res) {
res.sendFile(__dirname + '/public/templates/index.html');
});
server.listen(8081);
Note: I have tried calling it before the server.listen, after server.listen, from inside wss.on('connection', .... None of which makes a difference. Calling it anywhere, while the server is listening, causes it to behave in a non-deterministic manner.
I'm not going to accept this answer as it's only a workaround; it was just too long to put in the comments...
None of the promises, including the Promise.all, are throwing exceptions. However, Node seems to be parking the call to Promise.all. I accidentally discovered that if I keep a timeout loop running while waiting for the promise.all to resolve, then it will in fact resolve as and when expected.
I'd love if someone could explain exactly what is happening here as I don't really understand.
var holdDoor = true
var ps = () => {
setTimeout(function(){
console.log('status:', proms);
if (holdDoor) ps();
}, 500);
}
ps();
return Promise.all(proms)
.then(() => {
holdDoor = false
console.log('Resolving all!')
return times.filter((t)=> t.ttl != null)
});
Your code is working perfectly fine!
To reproduce this I've created a Dockerfile with a working version. You can find it in this git repository, or you can pull it with docker pull luxferresum/promise-all-problem.
You can run the docker image with docker run -ti -p 8081:8081 luxferresum/promise-all-problem. This will expose the webserver on localhost:8081.
You can also just run the problematic.js with node problematic.js and then opening localhost:8081 in the web browser.
The web socket will be opened by const ws = new WebSocket('ws://localhost:8081/wss'); which then triggers the code to run.
Its just very important to actually open the web socket, without that the code will not run.
I would suggest replacing the trace route with something else, like a DNS lookup, and see of the issue remains. At this point you cannot be sure it relates to raw-socket, since that uses libuv handles directly and does not effect other parts of the Node.js event loop.

Node - child process spawn path

I would like to run an exe in Windows from my Node Webkit app.
I am trying the below code but it is not working.
document.getElementById('play').onclick = function()
{
var spawn = require('child_process').spawn;
var child = spawn(__dirname + '/core.exe', ['/arg1']);
var snd = new Audio("min.wav");
snd.play();
win.minimize();
child.stdout.on('data', function (data) {
console.log('stdout: ' + data);
});
child.stderr.on('data', function (data) {
console.log('stderr: ' + data);
});
child.on('close', function (code) {
console.log('child process exited with code ' + code);
var snd = new Audio("restore.wav");
snd.play();
win.restore();
});
}
Am I getting the path wrong? I need it to be current directory and run the exe with that name and the example arg.
The output SHOULD be a messagebox, but nothing loads.
Managed to figure it out, it wasn't defined because I was using it in browser context. I didn't get the nw.js SDK version for some reason, found that __DIRNAME was undefined. Came up with this solution instead.
var path = require('path');
var nwDir = path.dirname(process.execPath);
var spawn = require('child_process').spawn;
var child = spawn(nwDir + '/app/core.exe', ['/arg1']);
Now working as intended.

How do I make JakeJS watch work reliably with Vim?

I'm using a Jakefile to help me update Wordpress pages from the command line. I'm using Jake's watch task to re-build ever time I edit a file. When I edit a file with Vim, after the first successful build, Jake fails with following error :
WatchTask started for: default
cp home.html dist/home.html
exec wp --path=../wordpress post list --post_type=page --format=json --fields=ID,post_name { silent: true }
exec wp --path=../wordpress post update 2 dist/home.html --post_type=page
Success: Updated post 2.
jake aborted.
Error: File-task home.html has no existing file, and no action to create one.
at FileBase.isNeeded (/usr/local/lib/node_modules/jake/lib/task/file_task.js:50:17)
at TaskBase.run (/usr/local/lib/node_modules/jake/lib/task/task.js:256:26)
(See full trace by running task with --trace)
I've tried using a sleep function in the rule to delay the rebuild. I tried this because Vim, when saving a file, write the contents to a new temp file and then renames the new temp file to the original file name. I think the build fails because it's trying to build before the file is fully renamed. Using the sleep doesn't work reliably, it may work once or twice but then it fails the same way as above.
Here is my Jakefile:
var shell = require('shelljs');
var sleep = require('sleep');
shell.config.verbose = true;
const destDir = 'dist';
const wpDir = '../wordpress';
var files = new jake.FileList();
files.include('*.html');
var outputFiles = files.toArray().map(function(fileName){
return destDir + '/' + fileName;
});
var sourceFile = function(name) {
return name.substr(name.lastIndexOf('/') + 1);
}
function objectToStr(object) {
var s = '';
for(var property in object){
s += property + ': ' + object[property] + '\n';
}
return s;
}
function rmExt(name) {
return name.substr(0, name.lastIndexOf('.'));
}
directory(destDir);
task('default', [destDir].concat(outputFiles));
task('clean', function() {
jake.rmRf(destDir);
});
rule('dist/%.html', sourceFile, function() {
shell.cp(this.source, this.name);
var pages = JSON.parse(shell.exec('wp --path=' + wpDir
+ ' post list --post_type=page --format=json --fields=ID,post_name',
{ silent: true }).stdout);
var postId = null;
var l = pages.length;
for(var i = 0; i < l; i++){
if(pages[i].post_name === rmExt(this.source)){
postId = pages[i].ID;
break;
}
}
if(postId !== null){
shell.exec('wp --path=' + wpDir + ' post update ' + postId
+ ' ' + this.name + ' --post_type=page');
}else{
shell.echo('Unable to find matching post ID for file: ' + this.name);
}
shell.echo('1');
sleep.sleep(2);
shell.echo('2');
});
watchTask('watch', ['default'], function() {
this.watchFiles.include('*.html');
}
I found a reliable solution using the library node-watch. I replaced the watchTask with a normal Jake task and invoked the task I wanted to call using node-watch to watch the current working directory. The library node-watch is able filter files based on regex so the watch won't be trigger for the Vim swap file and the like. Below is the code I used:
task('watch', function() {
var defaultTask = jake.Task['default'];
defaultTask.invoke();
defaultTask.reenable(true);
watch('./', { filter: /.*.html$/ }, function(evt, name) {
defaultTask.invoke();
defaultTask.reenable(true);
});
});
Note: I also invoked the task I want to call when the watch task is first run to make sure any changes made before the watch started are built.

Categories