I have an node.js socket.io application where I have a few different events and listeners. Right now this is how I am doing it.
class testEmitterClass extends events {
}
const testEmitter = new testEmitterClass();
io.on('connection', function (socket) {
console.log('connected');
let dnsInactiveTermsListener = function (dnsInactiveTerms) {
socket.emit(socketEvents.DNS_INACTIVE_TERMS, dnsInactiveTerms);
};
let checkpointInactiveTermsListener = function(checkpointInactiveTerms) {
socket.emit(socketEvents.CHECKPOINT_INACTIVE_TERMS, checkpointInactiveTerms);
};
let dnsActiveTermsListener = function (dnsActiveTerms) {
socket.emit(socketEvents.DNS_ACTIVE_TERMS, dnsActiveTerms);
};
let checkpointActiveTermsListener = function(checkpointActiveTerms) {
socket.emit(socketEvents.CHECKPOINT_ACTIVE_TERMS, checkpointActiveTerms);
};
let dnsCountListener = function (dnsCountStreaming) {
socket.emit(socketEvents.DNS_COUNT, dnsCountStreaming);
};
testEmitter.on(socketEvents.CHECKPOINT_ACTIVE_TERMS, checkpointActiveTermsListener);
testEmitter.on(socketEvents.DNS_INACTIVE_TERMS, dnsInactiveTermsListener);
testEmitter.on(socketEvents.CHECKPOINT_INACTIVE_TERMS, checkpointInactiveTermsListener);
testEmitter.on(socketEvents.DNS_ACTIVE_TERMS, dnsActiveTermsListener);
testEmitter.on(socketEvents.DNS_COUNT, dnsCountListener);
socket.on('disconnect', function () {
console.log('disconnected');
testEmitter.removeListener(socketEvents.DNS_INACTIVE_TERMS, dnsInactiveTermsListener);
testEmitter.removeListener(socketEvents.DNS_ACTIVE_TERMS, dnsActiveTermsListener);
testEmitter.removeListener(socketEvents.DNS_COUNT, dnsCountListener);
testEmitter.removeListener(socketEvents.CHECKPOINT_INACTIVE_TERMS, checkpointInactiveTermsListener);
testEmitter.removeListener(socketEvents.CHECKPOINT_ACTIVE_TERMS, checkpointActiveTermsListener);
})
});
The testemitter is a single instance which is emitting events somewhere else and being sent to the client using socket.io
Is there a way to maintain single list of the listeners somewhere so that this code can be maintained better? How can I map events to the listeners so that they can be added and removed as a client disconnected from socket.io without making a mess.
socketEvents is just an object of event names.
const DNS_COUNT = 'dnsCount';
const DNS_INACTIVE_TERMS = 'dnsInactiveTerms';
const DNS_ACTIVE_TERMS = 'dnsActiveTerms';
const CHECKPOINT_INACTIVE_TERMS = 'checkpointInactiveTerms';
const CHECKPOINT_ACTIVE_TERMS = 'checkpointActiveTerms';
module.exports = {
DNS_COUNT,
DNS_INACTIVE_TERMS,
CHECKPOINT_INACTIVE_TERMS,
DNS_ACTIVE_TERMS,
CHECKPOINT_ACTIVE_TERMS
};
Hope I made myself clear, thanks!
I think you can change the whole way you do things. Rather than register an event handler for every single socket that connects, you can just broadcast the message to all connected sockets. So, I think you can replace everything you show with just this:
class testEmitterClass extends events {
}
const testEmitter = new testEmitterClass();
const notifications = [
CHECKPOINT_ACTIVE_TERMS,
DNS_INACTIVE_TERMS,
CHECKPOINT_INACTIVE_TERMS,
CHECKPOINT_INACTIVE_TERMS,
DNS_COUNT
];
for (let msg of notifications) {
testEmitter.on(socketEvents[msg], function(data) {
// send this message and data to all currently connected sockets
io.emit(socketEvents[msg], data);
});
}
Also notice that the code has been DRYed by using a table of messages that you can loop through rather than repeating the same statements over and over again. So, now to add, remove or edit one of your notification messages, you just modify the table in one place.
If socketEvents (which you don't show) is just an object with these 5 properties on it, then you could even remove the notifications array by just iterating the properties of socketEvents.
That would further reduce the code to this:
class testEmitterClass extends events {
}
const testEmitter = new testEmitterClass();
for (let msg of Object.keys(socketEvents)) {
testEmitter.on(socketEvents[msg], function(data) {
// send this message and data to all currently connected sockets
io.emit(socketEvents[msg], data);
});
}
Related
I have written the script below which creates multiple WebSocket connections with a smart contract to listen to events. it's working fine but I feel this is not an optimized solution and probably this could be done in a better way.
const main = async (PAIR_NAME, PAIR_ADDRESS_UNISWAP, PAIR_ADDRESS_SUSHISWAP) => {
const PairContractHTTPUniswap = new Blockchain.web3http.eth.Contract(
UniswapV2Pair.abi,
PAIR_ADDRESS_UNISWAP
);
const PairContractWSSUniswap = new Blockchain.web3ws.eth.Contract(
UniswapV2Pair.abi,
PAIR_ADDRESS_UNISWAP
);
const PairContractHTTPSushiswap = new Blockchain.web3http.eth.Contract(
UniswapV2Pair.abi,
PAIR_ADDRESS_SUSHISWAP
);
const PairContractWSSSushiswap = new Blockchain.web3ws.eth.Contract(
UniswapV2Pair.abi,
PAIR_ADDRESS_SUSHISWAP
);
var Price_Uniswap = await getReserves(PairContractHTTPUniswap);
var Price_Sushiswap = await getReserves(PairContractHTTPSushiswap);
// subscribe to Sync event of Pair
PairContractWSSUniswap.events.Sync({}).on("data", (data) => {
Price_Uniswap = (Big(data.returnValues.reserve0)).div(Big(data.returnValues.reserve1));
priceDifference(Price_Uniswap, Price_Sushiswap, PAIR_NAME);
});
PairContractWSSSushiswap.events.Sync({}).on("data", (data) => {
Price_Sushiswap = (Big(data.returnValues.reserve0)).div(Big(data.returnValues.reserve1));
priceDifference(Price_Uniswap, Price_Sushiswap, PAIR_NAME);
});
};
for (let i = 0; i < pairsArray.length; i++){
main(pairsArray[i].tokenPair, pairsArray[i].addressUniswap, pairsArray[i].addressSushiswap);
}
In the end, I instantiate the main function multiple times for each pair from a pair array, in a for-loop. I think this way of solving is brute force and there is a better way of doing this.
Any suggestions/opinions would be really appreciated.
Just to clear up the terms: You're opening a websocket connection to the WSS node provider - not to the smart contracts. But yes, your JS snippet subscribes to multiple channels (one for each contract) within this one connection (to the node provider).
You can collect event logs from multiple contracts through just one WSS channel using the web3.eth.subscribe("logs") function (docs), passing it the list of contract addresses as a param. Example:
const options = {
// list of contract addresses that you want to subscribe to their event logs
address: ["0x123", "0x456"]
};
web3.eth.subscribe("logs", options, (err, data) => {
console.log(data);
});
But it has a drawback - it doesn't decode the event log data for you. So your code will need to find the expected data types based on the event signature (returned in data.topics[0]). Once you know which event log is emitted based on the topics[0] event signature (real-life example value in this answer), you can use the decodeLog() function (docs) to get the decoded values.
I'm trying to do a couple of things in the IndexedDB database inside the 'fetch' event of a service worker, when the aplication asks the server for a new page. Here's what I'm going for:
Create a new object store (they need to be created dynamically, according to the data that 'fetch' picks up);
Store an element on the store.
Or, if the store already exists:
Get an element from the store;
Update the element and store it back on the store.
The problem is that the callbacks (onupgradeneeded, onsuccess, etc) never get executed.
I've been trying with the callbacks inside of each other, though I know that may not be the best approach. I've also tried placing an event.waitUntil() on 'fetch' but it didn't help.
The 'fetch' event, where the function registerPageAccess is called:
self.addEventListener('fetch', function (event) {
event.respondWith(
caches.match(event.request)
.then(function (response) {
event.waitUntil(function () {
const nextPageURL = new URL(event.request.url);
if (event.request.destination == 'document') {
if (currentURL) {
registerPageAccess(currentURL, nextPageURL);
}
currentURL = nextPageURL;
}
}());
/*
* some other operations
*/
return response || fetch(event.request);
})
);
});
registerPageAccess, the function with the callbacks.
I know it's plenty of code, but just look at secondRequest.onupgradeneeded in the 5th line. It is never executed, let alone the following ones.
function registerPageAccess(currentPageURL, nextPageURL) {
var newVersion = parseInt(db.version) + 1;
var secondRequest = indexedDB.open(DB_NAME, newVersion);
secondRequest.onupgradeneeded = function (e) {
db = e.target.result;
db.createObjectStore(currentPageURL, { keyPath: "pageURL" });
var transaction = request.result.transaction([currentPageURL], 'readwrite');
var store = transaction.objectStore(currentPageURL);
var getRequest = store.get(nextPageURL);
getRequest.onsuccess = function (event) {
var obj = getRequest.result;
if (!obj) {
// Insert element into the database
console.debug('ServiceWorker: No matching object in the database');
const addRes = putInObjectStore(nextPageURL, 1, store);
addRes.onsuccess = function (event) {
console.debug('ServiceWorker: Element was successfully added in the Object Store');
}
addRes.onerror = function (event) {
console.error('ServiceWorker error adding element to the Object Store: ' + addRes.error);
}
}
else {
// Updating database element
const updRes = putInObjectStore(obj.pageURL, obj.nVisits + 1, store);
updRes.onsuccess = function (event) {
console.debug('ServiceWorker: Element was successfully updated in the Object Store');
}
updRes.onerror = function (event) {
console.error('ServiceWorker error updating element of the Object Store: ' + putRes.error);
}
}
};
};
secondRequest.onsuccess = function (e) {
console.log('ServiceWorker: secondRequest onsuccess');
};
secondRequest.onerror = function (e) {
console.error('ServiceWorker: error on the secondRequest.open: ' + secondRequest.error);
};
}
I need a way to perform the operations in registerPageAccess, which involve executing a couple of callbacks, but the browser seems to kill the Service Worker before they get to occur.
All asynchronous logic inside of a service worker needs to be promise-based. Because IndexedDB is callback-based, you're going to find yourself needing to wrap the relevant callbacks in a promise.
I'd strongly recommend not attempting to do this on your own, and instead using one of the following libraries, which are well-tested, efficient, and lightweight:
idb-keyval, if you're okay with a simple key-value store.
idb if you're need the full IndexedDB API.
I'd also recommend that you consider using the async/await syntax inside of your service worker's fetch handler, as it tends to make promise-based code more readable.
Put together, this would look roughly like:
self.addEventListener('fetch', (event) => {
event.waitUntil((async () => {
// Your IDB cleanup logic here.
// Basically, anything that can execute separately
// from response generation.
})());
event.respondWith((async () => {
// Your response generation logic here.
// Return a Response object at the end of the function.
})());
});
This is a long post, so I appreciate those who answer it. I am trying to understand the websocket communication in the blockchain example below.
Here is the source code for a node in a blockchain:
const BrewChain = require('./brewChain');
const WebSocket = require('ws');
const BrewNode = function(port){
let brewSockets = [];
let brewServer;
let _port = port
let chain = new BrewChain();
const REQUEST_CHAIN = "REQUEST_CHAIN";
const REQUEST_BLOCK = "REQUEST_BLOCK";
const BLOCK = "BLOCK";
const CHAIN = "CHAIN";
function init(){
chain.init();
brewServer = new WebSocket.Server({ port: _port });
brewServer.on('connection', (connection) => {
console.log('connection in');
initConnection(connection);
});
}
const messageHandler = (connection) =>{
connection.on('message', (data) => {
const msg = JSON.parse(data);
switch(msg.event){
case REQUEST_CHAIN:
connection.send(JSON.stringify({ event: CHAIN, message: chain.getChain()}))
break;
case REQUEST_BLOCK:
requestLatestBlock(connection);
break;
case BLOCK:
processedRecievedBlock(msg.message);
break;
case CHAIN:
processedRecievedChain(msg.message);
break;
default:
console.log('Unknown message ');
}
});
}
const processedRecievedChain = (blocks) => {
let newChain = blocks.sort((block1, block2) => (block1.index - block2.index))
if(newChain.length > chain.getTotalBlocks() && chain.checkNewChainIsValid(newChain)){
chain.replaceChain(newChain);
console.log('chain replaced');
}
}
const processedRecievedBlock = (block) => {
let currentTopBlock = chain.getLatestBlock();
// Is the same or older?
if(block.index <= currentTopBlock.index){
console.log('No update needed');
return;
}
//Is claiming to be the next in the chain
if(block.previousHash == currentTopBlock.hash){
//Attempt the top block to our chain
chain.addToChain(block);
console.log('New block added');
console.log(chain.getLatestBlock());
}else{
// It is ahead.. we are therefore a few behind, request the whole chain
console.log('requesting chain');
broadcastMessage(REQUEST_CHAIN,"");
}
}
const requestLatestBlock = (connection) => {
connection.send(JSON.stringify({ event: BLOCK, message: chain.getLatestBlock()}))
}
const broadcastMessage = (event, message) => {
brewSockets.forEach(node => node.send(JSON.stringify({ event, message})))
}
const closeConnection = (connection) => {
console.log('closing connection');
brewSockets.splice(brewSockets.indexOf(connection),1);
}
const initConnection = (connection) => {
console.log('init connection');
messageHandler(connection);
requestLatestBlock(connection);
brewSockets.push(connection);
connection.on('error', () => closeConnection(connection));
connection.on('close', () => closeConnection(connection));
}
const createBlock = (teammember) => {
let newBlock = chain.createBlock(teammember)
chain.addToChain(newBlock);
broadcastMessage(BLOCK, newBlock);
}
const getStats = () => {
return {
blocks: chain.getTotalBlocks()
}
}
const addPeer = (host, port) => {
let connection = new WebSocket(`ws://${host}:${port}`);
connection.on('error', (error) =>{
console.log(error);
});
connection.on('open', (msg) =>{
initConnection(connection);
});
}
return {
init,
broadcastMessage,
addPeer,
createBlock,
getStats
}
}
module.exports = BrewNode;
When a new block is created by the node with the createBlock() function, a message is broadcast from the node to all connected sockets with the broadcastMessage() function to tell them a new block has been created. The connected sockets will receive the message, and in messageHandler() it will hit the BLOCK option in the switch statement for each of them. I have a grasp of this process, and have drawn up a graph to show my understanding.
FIGURE 1
As stated earlier, when A creates a new block it will send the new block to it's connected nodes, where each node will verify it and possibly add it to it's chain. This processing is done by the processedRecievedBlock() function. Let's say B and C decide to add the block to their chain, but D is several blocks behind so it must request the whole chain from A. This is where I am confused. I expected that D would send a message back to A requesting the whole chain, like this:
FIGURE 2
However, according to the processReceivedBlock() function, in this situation D will broadcast a REQUEST_CHAIN message to all it's connected sockets, when this line is run:
broadcastMessage(REQUEST_CHAIN,"");
Let's say D is connected to E and F. Instead of requesting the chain from A like in FIGURE 2, it seems as though it will send the REQUEST_CHAIN message to it's connected sockets, like this:
FIGURE 3
In the messageHandler() function, the REQUEST_CHAIN option in the switch statement will be run for E and F, and they will hit this line of code:
connection.send(JSON.stringify({ event: CHAIN, message: chain.getChain()}));
It is my understanding that this will cause E and F to send their own chain back to themselves, like this:
FIGURE 4
I want to know why FIGURE 2 does not occur when D needs to request the whole chain from A. Tracing the code has had me believe that FIGURE 3 and FIGURE 4 occur instead, neither of which seem to be useful.
I am trying to find an understanding of what exactly happens in this code when a node must request the whole chain from another node. I must be misunderstanding what these sockets are doing.
Complete source code: https://github.com/dbjsdev/BrewChain/blob/master/brewNode.js
Thanks for a descriptive question. :)
You are right for the most part and Figure 3 is the correct description of that part of the process. But Figure 4 is wrong.
Note that every socket connection between peers leads to a different instance of connection which are collectively maintained in brewSockets.
So, when A/E/F receive a request on connection from D, they respond with the whole chain, as in the code below:
connection.send(JSON.stringify({ event: CHAIN, message: chain.getChain()}));
D then processes the CHAIN message:
const processedRecievedChain = (blocks) => {
let newChain = blocks.sort((block1, block2) => (block1.index - block2.index))
if(newChain.length > chain.getTotalBlocks() && chain.checkNewChainIsValid(newChain)){
chain.replaceChain(newChain);
console.log('chain replaced');
}
}
Now, onto the 'why'!
Firstly, the underlying principle is that we trust in the network, not just one node. So, you want to verify the authenticity of the chain from as many sources as possible.
Secondly, you want the latest chain from your peers not just any random chain.
By doing so, we ensured that any node is as latest as its peers. So, D node fetched chain from multiple sources and stored the latest verified chain.
Hope that helps!
I've got an app I'm writing in React Native. It's socketed and I have a file that controls all socket information.
import {Alert, AppState} from 'react-native';
import store from '../store/store';
import {updateNotifications} from '../reducers/notifications';
import {setError, clearError} from '../reducers/error';
import {updateCurrentEvent, updateEventStatus, setCurrentEvent} from '../reducers/event_details';
import {setAlert} from '../reducers/alert';
import {ws_url} from '../api/urls'
let conn = new WebSocket(ws_url);
/*
handleSocketConnections handles any actions that require rerouting. The rest are passed off to handleOnMessage
This is being called from authLogin on componentDidMount. It would be ideal to only initialize a socket conn
when a user logs in somehow, but this package gets ran when a user opens the app, meaning there are socket
connections that don't need to exist yet.
*/
function setAppStateHandler() {
AppState.addEventListener('change', cstate => {
if(cstate === 'active') {
reconnect()
}
})
}
export const handleSocketConnections = (navigator, route) => {
setAppStateHandler();
conn.onmessage = e => {
const state = store.getState();
const msg = JSON.parse(e.data);
const { type, payload, event_id } = msg;
const { event } = state.event_details.event_details;
if (type == "SET_EVENT_STATUS" && payload == "CLOSED" && event_id == event.event_id) {
navigator.push(route)
// store.dispatch(setAlert({
// message:"Event is closed, click to navigate to checkout."
// , scene: null
// }))
store.dispatch(updateEventStatus(payload));
} else {
handleOnMessage(msg, state)
}
}
}
export function reconnect() {
//TODO: Fatal errors should redirect the mainNav to a fatal error screen. Not dismount the nav entirely, as it does now
//and this should pop the error screen when it's fixed.
let state = store.getState();
conn = new WebSocket(ws_url);
setTimeout(function () {
if (conn.readyState == 1) {
if (typeof state.event_details.event_details != 'undefined') {
setSocketedEventInfo(state.event_details.event_details.event.event_id);
}
store.dispatch(clearError());
} else {
store.dispatch(setError('fatal',`Socket readyState should be 1 but it's ${conn.readyState}`))
}
}, 1000);
}
//Preform function on ES close.
conn.onclose = e => {
console.log("Closing wsbidder, ", `${e.code} -- ${e.reason}`);
//TODO: Set error here saying they need to restart the app. Maybe a 'reconnect' somehow?
//Maybe set a store variable to socketErr and if null, all is good. Else, panic the app?
//Use Case: Server is not started and user tries to connect to the app. String of e.message contains "Connection refused"
store.dispatch(setError("fatal", `Socket onclose: ${e.code} -- ${e.reason}`))
};
conn.onerror = e => {
console.log("Error at socket, ", e);
store.dispatch(setError("fatal", `Socket onerror: ${e.message}`))
};
//Initialization function for websocket.
// conn.onopen = e => console.log("Opening wsbidder, ", e)
function handleOnMessage(msg, state) {
switch (msg.type) {
//These types come from the SocketWrappers on the server.
//updateCurrentEvent should be filtering the event by event_id.
case "EVENT_ITEMS":
store.dispatch(updateCurrentEvent(
msg.payload
, state.user_info.uid
, state.event_details.event_details.event.event_id));
break;
case "NOTIFICATIONS":
//bug: this needs to filter notifications per event on the client-side.
store.dispatch(updateNotifications(
msg.payload
, state.event_details.event_details.event.event_id
, state.user_info.uid)
);
break;
case "NOT_BIDDABLE":
if (msg.event_id == state.event_details.event_details.event.event_id) {
store.dispatch(updateEventStatus("CLOSED"));
}
break;
case "PUSH_NOTIFICATION":
const {title, message} = msg.payload;
Alert.alert(title, message);
break;
default:
console.warn(`Unrecognized socket action type: ${msg.type}`);
}
}
//closes the socket connection and sends a reason to the server.
export const closeConn = reason => conn.close(null, reason);
export const setSocketedEventInfo = event_id => {
//Gives the event ID to the socketed connection, which pulls end dates.
const msg = {
type: "UPDATE_EVENT_DETAILS"
, payload: { event_id }
}
conn.send(JSON.stringify(msg));
}
export const createBid = (bid, cb) => {
/*
Expects:
const new_bid = {
item_id: item.item_id,
bid: amount, //Storage keeps storing it as a string
uid: 0, //Not needed here, but can't be null since the server wants an int.
event_id, key, bidder
};
*/
const new_bid = {
type: 'BID'
, payload: bid
};
// Send this to the server socket
conn.send(JSON.stringify(new_bid));
//Returning the callback so the front-end knows to flip the card back over.
return cb()
};
Some of the code is crap, I know. Unless you're giving true advice, which I'm always glad to follow, no need to bash it :-)
The issue I'm having is that when the socket dies (the conn variable), I can't re-initialize the socket and assign it to that conn variable. What I think is happening is all functions using the conn variable aren't using the 'new' one, still stuck to the 'old' one.
Line 9 -- Creating the original one.
Line 28 -- Creating an onMessage function for the conn object, within the handleSocketConnections function that gets called elsewhere at the start of the program
Line 57 -- Trying to re-assign a new connection to the conn variable in the reconnect function, that gets run whenever the app goes on standby (killing the socket connections).
Line 131 -- This gets called correctly from the reconnect function, connecting the socket to the server again
The reconnect() function runs correctly - the server registers the new connection with all the right info, but the app seems to still be in a weird state where there's no conn error (possibly looking at the new one??) but no actions are formed on the conn (possibly looking at the old one?).
Any ideas?
If you have to start a replacement webSocket connection, then you will need to rerun all the code that hooks up to the webSocket (installs event handlers, etc...). Because it's a new object, the old event listeners aren't associated with the new webSocket object.
The simplest way to do that is usually to create a single webSocketInit() function that you call both when you first create your webSocket connection and then call again any time you have to replace it with a new one. You can pass the latest webSocket object to webSocketInit() so any other code can see the new object. Individual blocks of code can register for onclose themselves if they want to know when the old one closes.
There are also more event-driven ways to do this by creating an EventEmitter that gets notified whenever the webSocket has been replaced and individual blocks of code can subscribe to that event if they want to get notified of that occurrence.
I have an Observable coming from an EventEmitter which is really just a http connection, streaming events.
Occasionally I have to disconnect from the underlying stream and reconnect. I am not sure how to handle this with rxjs.
I am not sure if i can complete a source and then dynamically add other "source" to the source, or if I have to do something like i have at the very bottom.
var Rx = require('rx'),
EventEmitter = require('events').EventEmitter;
var eventEmitter = new EventEmitter();
var eventEmitter2 = new EventEmitter();
var source = Rx.Observable.fromEvent(eventEmitter, 'data')
var subscription = source.subscribe(function (data) {
console.log('data: ' + data);
});
setInterval(function() {
eventEmitter.emit('data', 'foo');
}, 500);
// eventEmitter stop emitting data, underlying connection closed
// now attach seconds eventemitter (new connection)
// something like this but obvouisly doesn't work
source
.fromEvent(eventEmitter2, 'data')
Puesdo code that is more of what i am doing, I am creating a second stream connection before I close the first, so i don't "lose" any data. Here i am not sure how to stop the Observable without "losing" records due to onNext not being called due to the buffer.
var streams = [], notifiers = [];
// create initial stream
createNewStream();
setInterval(function() {
if (params of stream have changed) createNewStream();
}, $1minutes / 3);
function createNewStream() {
var stream = new eventEmitterStream();
stream.once('connected', function() {
stopOthers();
streams.push(stream);
createSource(stream, 'name', 'id');
});
}
function stopOthers() {
while(streams.length > 0) {
streams.pop().stop(); // stop the old stream
}
while(notifiers.length > 0) {
// if i call this, the buffer may lose records, before onNext() called
//notifiers.pop()(Rx.Notification.createOnCompleted());
}
}
function createObserver(tag) {
return Rx.Observer.create(
function (x) {
console.log('Next: ', tag, x.length, x[0], x[x.length-1]);
},
function (err) {
console.log('Error: ', tag, err);
},
function () {
console.log('Completed', tag);
});
}
function createSource(stream, event, id) {
var source = Rx.Observable
.fromEvent(stream, event)
.bufferWithTimeOrCount(time, max);
var subscription = source.subscribe(createObserver(id));
var notifier = subscription.toNotifier();
notifiers.push(notifier);
}
First and formost, you need to make sure you can remove all listeners from your previously "dead" emitter. Otherwise you'll create a leaky application.
It seems like the only way you'll know that an EventEmitter has died is to watch frequency, unless you have an event that fires on error or completion (for disconnections). The latter is much, much more preferrable.
Regardless, The secret sauce of Rx is making sure to wrap your data stream creation and teardown in your observable. If wrap the creation of the emitter in your observable, as well as a means to tear it down, you'll be able to use awesome things like the retry operator to recreate that observable.
So if you have no way of knowing if it died, and you want to reconnect it, you can use something like this:
// I'll presume you have some function to get an EventEmitter that
// is already set up
function getEmitter() {
var emitter = new EventEmitter();
setInterval(function(){
emitter.emit('data', 'foo');
}, 500)
return emitter;
}
var emitterObservable = Observable.create(function(observer) {
// setup the data stream
var emitter = getEmitter();
var handler = function(d) {
observer.onNext(d);
};
emitter.on('data', handler);
return function() {
// tear down the data stream in your disposal function
emitter.removeListener('on', handler);
};
});
// Now you can do Rx magic!
emitterObservable
// if it doesn't emit in 700ms, throw a timeout error
.timeout(700)
// catch all* errors and retry
// this means the emitter will be torn down and recreated
// if it times out!
.retry()
// do something with the values
.subscribe(function(x) { console.log(x); });
* NOTE: retry catches all errors, so you may want to add a catch above it to handle non-timeout errors. Up to you.