nodejs is not publishing all messages in timeout mqtt - javascript

I am testing my mqtt broker. I have made 1000 clients. These 1000 clients publish one message in 2 seconds, means in 2 seconds 1000 messages will be published. But when I use setInterval, 1000 messages are not published. Almost half of messages will be published and when I publish without setInterval all of the messages are delivered.
It is due to my nodejs code but I am unable to understand what exactly is happening. I am using emqtt broker.
Here is my code:
publish.js
var mqtt = require('mqtt')
var client = [];
for (let index = 0; index < 1000; index++) {
client[index] = mqtt.connect('mqtt://localhost:1883',{qos:2})
}
setInterval(function () {
for (let index = 0; index < 1000; index++) {
var payload = JSON.stringify({
id:index,
timestamp:Date.now()
});
client[index].publish("drivers", payload,{qos:2},function(err){
if(err){
console.log("There is an error in publishing ",err);
}
});
}
}, 2000)
Subscriber.js
var client2 = mqtt.connect('mqtt://localhost:1883',{qos:2});
var fs = require('fs');
client2.subscribe("drivers", {
qos: 2
});
client2.on("message", function (topic, payload) {
payload = JSON.parse(payload.toString());
fs.appendFile('drivers', payload.id+"\n", function (err) {
if (err) throw err;
console.log('Saved!');
});
})
What is happening with this code?
Thanks

Related

UDP pinger timeout in javascript dgram node

So, for a course i'm taking, we're coding a UDP pinger in Javascript, using Node.js and Dgram. We've been given the following assignment:
Create the client code for an application. Your client should send 10 ping messages to the target UDP server. For each message, your client should calculate the round trip time from when the package is sent to when the response is received. Should a package be dropped along the way, the client is to handle this as well. This should be done by having the client wait 1 second for a response after sending each package. If no reply is received, the client should log accordingly (package lost, no response, timeout, etc.) and send a new package to try again. However, the total amount of packages sent should still only be 10. The client should also calculate a percentage of packages lost/no response received, and log this before connection is closed.
THis if course seems rather straight forward, and I thought so. I've been coding it for a while, and I'm almost finished, but I'm having issues with the aspect of making the client send a package, await response, and then act accordingly.
So far, what my code does is basically to send a ping, and when a pong is received, it sends another ping. What I can't figure out is how to make it log that a response wasn't received before sending the next package. In other words, I know how to make it react to a received response, I just don't know how to make it respond if no response is given within a set timeframe. I've tried playing around with if-statements and loops, as well as async functions, but I haven't made it work yet, so now I'm asking for help.
Code is here:
const dgram = require("dgram");
const ms = require("ms");
var client = dgram.createSocket("udp4");
const PORT = 8000;
const HOST = "localhost";
let today = "";
let t0 = "";
let t1 = "";
let RTT = "";
let sentPackages = "";
let receivedPackages = "";
const messageOutbound = Buffer.from("You Up?");
sendPackage();
const x = setInterval(sendPackage, 1000);
client.on("message", (message, remote) => {
receivedPackages++
today = new Date();
t1 = today.getTime();
console.log(
`Message from: ${remote.address}:${remote.port} saying: ${message}`
);
RTT = ms(t1 - t0, { long: true });
console.log(RTT);
const x = setInterval(sendPackage, 1000);
});
client.on('error', (err) => {
console.log(`server error:\n${err.stack}`);
server.close();
});
async function sendPackage() {
if (sentPackages < 10) {
client.send(messageOutbound, 0, messageOutbound.length, PORT, HOST, () => {
sentPackages++
let today = new Date();
t0 = today.getTime();
console.log(
`message has been sent to ${HOST}:${PORT}. Message sent at: ${t0}`
);
});
} else {
calculateLoss();
client.close();
}
};
function calculateLoss() {
let amountLost = sentPackages - receivedPackages;
let percentageLoss = amountLost / sentPackages * 100
console.log(amountLost);
console.log(percentageLoss +"% of packages lost");
};
I would use async / await to simply wait 1000ms / 1s between messages, then keep track of all messages in an array.
We identify messages with a uuid, so we can ensure that messages we receive can be matched to those we send.
We can then log all the required statistics afterwards:
const dgram = require("dgram");
const uuid = require('uuid');
const PORT = 8000;
const HOST = "localhost";
const client = dgram.createSocket("udp4");
// Array that keeps track of the messages we send
let messages = [];
// When we get a message, decode it and update our message list accordingly...
client.on("message", (messageBuffer, remote) => {
let receivedMessage = bufferToMessage(messageBuffer);
// Find the message we sent and set the response time accordingly.
let message = messages.find(message => message.uuid === (receivedMessage ||{}).uuid);
if (message) {
message.responseTimestamp = new Date().getTime();
}
});
client.on('error', (err) => {
console.log(`server error:\n${err.stack}`);
server.close();
});
function createMessage() {
return { uuid: uuid.v4() };
}
function messageToBuffer(message) {
return Buffer.from(JSON.stringify(message), "utf-8");
}
function bufferToMessage(buffer) {
try {
return JSON.parse(buffer.toString("utf-8"));
} catch (error) {
return null;
}
}
// Wait for timeout milliseconds
function wait(timeout) {
return new Promise(resolve => setTimeout(resolve, timeout));
}
function sendMessage(message, port, host) {
// Save the messages to our list...
messages.push(message);
console.log(`Sending message #${messages.length}...`);
// Set the time we send out message...
message.sentTimestamp = new Date().getTime();
let messageBuffer = messageToBuffer(message);
return new Promise((resolve, reject) => {
client.send(messageBuffer, 0, messageBuffer.length, port, host, (error, bytes) => {
if (error) {
reject(error);
} else {
resolve(bytes);
}
})
});
}
async function sendMessages(messageCount, port, host, timeout) {
for(let messageIndex = 0; messageIndex < messageCount; messageIndex++) {
let message = createMessage();
await sendMessage(message, port, host);
await wait(timeout);
if (message.responseTimestamp) {
console.log(`Response received after ${message.responseTimestamp - message.sentTimestamp} ms...`);
} else {
console.log(`No response received after ${timeout} ms...`);
}
}
logStatistics(messages);
}
function logStatistics(messages) {
let messagesSent = messages.length;
let messagesReceived = messages.filter(m => m.responseTimestamp).length;
let messagesLost = messagesSent - messagesReceived;
console.log(`Total messages sent: ${messagesSent}`);
console.log(`Total messages received: ${messagesReceived}`);
console.log(`Total messages lost: ${messagesLost} / ${(100*messagesLost / (messages.length || 1) ).toFixed(2)}%`);
if (messagesReceived > 0) {
console.log(`Average response interval:`, messages.filter(m => m.responseTimestamp).reduce((averageTime, message) => {
averageTime += (message.responseTimestamp - message.sentTimestamp) / messagesReceived;
return averageTime;
}, 0) + " ms");
}
}
sendMessages(10, PORT, HOST, 1000);

Client side is rendering faster than my data loading on server side nodeJS

I am trying to do a traffic analysis app but my client side is rendering faster than my data loading on the server side. (More specifically, it's the visual recognition data that is not being rendered on the web, I am using IBM visual recognition cloud service)
May I know how can I solve this issue? I have been stuck at this for a few days.
This is my code where I calling my function.
fetchAPI(10).then((data) => {
var data = data["items"][0]["cameras"]
for (let i = 0; i < data.length; i++){
image = data[i]["image"]
images.push(image);
coordinates.push([data[i]["location"]["latitude"], data[i]["location"]["longitude"]]);
}}).then(recognition(10, pageRender))
This is where I do my visual recognition
async function recognition(res, callback){
// Fetch api to get the images to be analyse
var data = await fetchAPI(10)
var info = await data["items"][0]["cameras"]
// Loop through the images
for (let i = 0; i < info.length; i++){
var image = await info[i]["image"]
// Params to be analyse
var params = await {
url: image,
classifier_ids: classifier_ids
};
// The part where it analyse the image
await visualRecognition.classify(params, function(err, response) {
if (err)
console.log(err);
else
try{
var status = (JSON.stringify(response.images[0]["classifiers"][0]["classes"][0]["class"], null, 2));
var prob = (JSON.stringify(response.images[0]["classifiers"][0]["classes"][0]["score"], null, 2));
}
catch(error){
status = "Error getting status"
prob = "Error getting probability"
}
value.push([status, prob]);
});
}
await callback()}
This is the callback function which I use to render my page.
function pageRender(){
app.get("/", (req,res) => {
res.render("index", {
images: JSON.stringify(images),
coordinates: JSON.stringify(coordinates),
value: JSON.stringify(value),
})
console.log("I have finish rendering")
}) }

Running node-rdkafka code in server

I'm running the below node-rdkafka code in Eclipse as Node.js application. This is the sample code from https://blizzard.github.io/node-rdkafka/current/tutorial-producer_.html
I want to run this in a test server and call from iOS Mobile application.
I knew about running node.js app in AWS.
Question I: Is there any other options to run in a free test server environment like Tomcat?
Question II: Even If I am able to run this node.js app in a server, how do i call from a mobile application? Do I need to call producer.on('ready', function(arg) (or) What function i need to call from Mobile app?
var Kafka = require('node-rdkafka');
//console.log(Kafka.features);
//console.log(Kafka.librdkafkaVersion);
var producer = new Kafka.Producer({
'metadata.broker.list': 'localhost:9092',
'dr_cb': true
});
var topicName = 'MyTest';
//logging debug messages, if debug is enabled
producer.on('event.log', function(log) {
console.log(log);
});
//logging all errors
producer.on('event.error', function(err) {
console.error('Error from producer');
console.error(err);
});
//counter to stop this sample after maxMessages are sent
var counter = 0;
var maxMessages = 10;
producer.on('delivery-report', function(err, report) {
console.log('delivery-report: ' + JSON.stringify(report));
counter++;
});
//Wait for the ready event before producing
producer.on('ready', function(arg) {
console.log('producer ready.' + JSON.stringify(arg));
for (var i = 0; i < maxMessages; i++) {
var value = new Buffer('MyProducerTest - value-' +i);
var key = "key-"+i;
// if partition is set to -1, librdkafka will use the default partitioner
var partition = -1;
producer.produce(topicName, partition, value, key);
}
//need to keep polling for a while to ensure the delivery reports are received
var pollLoop = setInterval(function() {
producer.poll();
if (counter === maxMessages) {
clearInterval(pollLoop);
producer.disconnect();
}
}, 1000);
});
/*
producer.on('disconnected', function(arg) {
console.log('producer disconnected. ' + JSON.stringify(arg));
});*/
//starting the producer
producer.connect();
First of all, you need an HTTP server. ExpressJS can be used. Then, just tack on the Express code basically at the end, but move the producer loop into the request route.
So, start with what you had
var Kafka = require('node-rdkafka');
//console.log(Kafka.features);
//console.log(Kafka.librdkafkaVersion);
var producer = new Kafka.Producer({
'metadata.broker.list': 'localhost:9092',
'dr_cb': true
});
var topicName = 'MyTest';
//logging debug messages, if debug is enabled
producer.on('event.log', function(log) {
console.log(log);
});
//logging all errors
producer.on('event.error', function(err) {
console.error('Error from producer');
console.error(err);
});
producer.on('delivery-report', function(err, report) {
console.log('delivery-report: ' + JSON.stringify(report));
counter++;
});
//Wait for the ready event before producing
producer.on('ready', function(arg) {
console.log('producer ready.' + JSON.stringify(arg));
});
producer.on('disconnected', function(arg) {
console.log('producer disconnected. ' + JSON.stringify(arg));
});
//starting the producer
producer.connect();
Then, you can add this in the same file.
var express = require('express')
var app = express()
app.get('/', (req, res) => res.send('Ready to send messages!'))
app.post('/:maxMessages', function (req, res) {
if (req.params.maxMessages) {
var maxMessages = parseInt(req.params.maxMessages);
for (var i = 0; i < maxMessages; i++) {
var value = new Buffer('MyProducerTest - value-' +i);
var key = "key-"+i;
// if partition is set to -1, librdkafka will use the default partitioner
var partition = -1;
producer.produce(topicName, partition, value, key);
} // end for
} // end if
}); // end app.post()
app.listen(3000, () => console.log('Example app listening on port 3000!'))
I don't think the poll loop is necessary since you don't care about the counter anymore.
Now, connect your mobile app to http://<your server IP>:3000/ and send test messages with a POST request to http://<your server IP>:3000/10, for example, and adjust to change the number of messages to send
I might be late on this but this is how I did using promises and found it better than have a time out etc.
const postMessageToPublisher = (req, res) => {
return new Promise((resolve, reject) => {
producer.connect();
producer.setPollInterval(globalConfigs.producerPollingTime);
const actualBody = requestBody.data;
const requestBody = req.body;
const topicName = req.body.topicName;
const key = requestBody.key || uuid();
const partition = requestBody.partition || undefined;
const data = Buffer.from(JSON.stringify(udpatedBody));
/**
* Actual messages are sent here when the producer is ready
*/
producer.on(kafkaEvents.READY, () => {
try {
producer.produce(
topic,
partition,
message,
key // setting key user provided or UUID
);
} catch (error) {
reject(error);
}
});
// Register listener for debug information; only invoked if debug option set in driver_options
producer.on(kafkaEvents.LOG, log => {
logger.info('Producer event log notification for debugging:', log);
});
// Register error listener
producer.on(kafkaEvents.ERROR, err => {
logger.error('Error from producer:' + JSON.stringify(err));
reject(err);
});
// Register delivery report listener
producer.on(kafkaEvents.PUBLISH_ACKNOWLEDGMENT, (err, ackMessage) => {
if (err) {
logger.error(
'Delivery report: Failed sending message ' + ackMessage.value
);
logger.error('and the error is :', err);
reject({ value: ackMessage.value, error: err });
} else {
resolve({
teamName: globalConfigs.TeamNameService,
topicName: ackMessage.topic,
key: ackMessage.key.toString()
});
}
});
});
};
Please note that kafkaEvents contains my constants for the events we listen to and it is just a reference such as kafkaEvents.LOG is same as event.log
and also the calling function is expecting this to a promise and accordingly we user .then(data => 'send your response to user from here') and .catch(error => 'send error response to user
this is how I achieved it using promises

Node.js Websocket chat app: clients receiving duplicate message

I am new to Node.js. I am trying to implement a chat app using Node.js, websocket. The code I have used on the server side:
var config = require('someconfigure'),
socketServer = require('somewebsocketapi').server;
var clients = [ ];
socketServer( 'example', function ( connection, server ) {
var index;
connection.on('open', function ( id ) {
index = clients.push(connection) - 1;
console.log('[open]');
});
connection.on('message', function ( msg ) {
console.log(msg);
var content = JSON.parse(msg.utf8Data);
var author = content.action.data[0].author;
var channel = content.action.data[0].channel;
var text = content.action.data[0].text;
var time = (new Date()).getTime();
console.log(author+text+channel);
// send messsage to all clients
var obj = JSON.stringify(
{
command:"messages",
data: [
{
timestamp: time,
text: text,
author: author,
channel: channel
}
]
}
);
console.log("Number of clients "+clients.length);
for (var i=0; i < clients.length; i++) {
if(clients[i]!= connection)
clients[i].send(obj);
}
});
connection.on('error', function ( err ) {
console.log(err);
});
connection.on('close', function(){
console.log((new Date()) + " Peer "
+ index + " disconnected.");
// remove user from the list of connected clients
clients.splice(index, 1);
});
}).config( config );
The problem I am facing is, The clients are getting duplicate messgae. For Example, If 5 clients are connected, then all the client will get the same message 5 times.
I know I am missing up something, But how I can fix this? Thank you in advance for help.

Seeking Guidance on troubleshooting node memory issue

I'm trying to implement a message queue using node, socket.io, and redis. I am attempting to follow the reliable queue pattern outlined Here. I am trying to read a logfile (60M in size) in line-by-line (will be changing this later) and pump the lines into the queue for processing later. However, I am running into a memory allocation issue. I'm not sure how to troubleshoot this and would like some guidance on where to start. I can't tell if the issue is in reading the file, or in the redis client. I have been able to add messages to the queue one by one like this :
socket.emit('message', 'some sort of log line here');
Therefore I know the listener is working, but when I run the socketClient.js file It will spin out for a bit and then ultimately fail with the following generic error message:
FATAL ERROR: JS Allocation failed - process out of memory
Is there some error handling, or profiling I can add to get more information on where this is failing?
Here is the code:
socketListener.js
var util = require("util"),
redis = require("redis"),
io = require('socket.io').listen(8088)
client = redis.createClient("7777", "localhost");
util.log("Established connection to redis");
io.sockets.on('connection', function(socket) {
util.log("socket connection established for socket : " + socket);
socket.on('message', function (data) {
util.log("received the following data : ");
util.log(JSON.stringify(data, 0, 3));
client.on("error", function(err) {
util.log("Error " + err);
});
try {
// reliable queue pattern implementation
util.log("queuing up the data in the list");
client.rpush('logList', data);
client.brpoplpush('logList', 'dequeueList', 10);
} catch (err) {
util.log("An error occurred : ");
util.log(JSON.stringify(err, 0, 3));
}
});
socket.on('disconnect', function() {});
});
socketClient.js
var io = require("socket.io-client");
var socket = io.connect('http://localhost:8088');
var redis = require('redis');
var util = require('util');
var fs = require('fs');
var readline = require('readline');
socket.on('connect', function() {
client = redis.createClient("7777", "localhost");
var rd = readline.createInterface({
input: fs.createReadStream('someLogFile.log'),
terminal: false
});
rd.on('line', function(line) {
util.log("reading line " + line);
socket.emit('message', line);
});
client.lrange('dequeueList', 0, -1, function(err, results) {
if (err) {
util.log(err);
} else {
var multi = client.multi();
for (var i=0; i < results.length; i++) {
util.log('got : ' + results[i]);
multi.hgetall(results[i]);
}
multi.exec(function(err, logs) {
util.log("executing the multi commands");
util.log(JSON.stringify(logs, 0 ,3));
})
};
});
})
Thank you in advance for the help!

Categories