RxJs Dynamically add events from another EventEmitter - javascript

I have an Observable coming from an EventEmitter which is really just a http connection, streaming events.
Occasionally I have to disconnect from the underlying stream and reconnect. I am not sure how to handle this with rxjs.
I am not sure if i can complete a source and then dynamically add other "source" to the source, or if I have to do something like i have at the very bottom.
var Rx = require('rx'),
EventEmitter = require('events').EventEmitter;
var eventEmitter = new EventEmitter();
var eventEmitter2 = new EventEmitter();
var source = Rx.Observable.fromEvent(eventEmitter, 'data')
var subscription = source.subscribe(function (data) {
console.log('data: ' + data);
});
setInterval(function() {
eventEmitter.emit('data', 'foo');
}, 500);
// eventEmitter stop emitting data, underlying connection closed
// now attach seconds eventemitter (new connection)
// something like this but obvouisly doesn't work
source
.fromEvent(eventEmitter2, 'data')
Puesdo code that is more of what i am doing, I am creating a second stream connection before I close the first, so i don't "lose" any data. Here i am not sure how to stop the Observable without "losing" records due to onNext not being called due to the buffer.
var streams = [], notifiers = [];
// create initial stream
createNewStream();
setInterval(function() {
if (params of stream have changed) createNewStream();
}, $1minutes / 3);
function createNewStream() {
var stream = new eventEmitterStream();
stream.once('connected', function() {
stopOthers();
streams.push(stream);
createSource(stream, 'name', 'id');
});
}
function stopOthers() {
while(streams.length > 0) {
streams.pop().stop(); // stop the old stream
}
while(notifiers.length > 0) {
// if i call this, the buffer may lose records, before onNext() called
//notifiers.pop()(Rx.Notification.createOnCompleted());
}
}
function createObserver(tag) {
return Rx.Observer.create(
function (x) {
console.log('Next: ', tag, x.length, x[0], x[x.length-1]);
},
function (err) {
console.log('Error: ', tag, err);
},
function () {
console.log('Completed', tag);
});
}
function createSource(stream, event, id) {
var source = Rx.Observable
.fromEvent(stream, event)
.bufferWithTimeOrCount(time, max);
var subscription = source.subscribe(createObserver(id));
var notifier = subscription.toNotifier();
notifiers.push(notifier);
}

First and formost, you need to make sure you can remove all listeners from your previously "dead" emitter. Otherwise you'll create a leaky application.
It seems like the only way you'll know that an EventEmitter has died is to watch frequency, unless you have an event that fires on error or completion (for disconnections). The latter is much, much more preferrable.
Regardless, The secret sauce of Rx is making sure to wrap your data stream creation and teardown in your observable. If wrap the creation of the emitter in your observable, as well as a means to tear it down, you'll be able to use awesome things like the retry operator to recreate that observable.
So if you have no way of knowing if it died, and you want to reconnect it, you can use something like this:
// I'll presume you have some function to get an EventEmitter that
// is already set up
function getEmitter() {
var emitter = new EventEmitter();
setInterval(function(){
emitter.emit('data', 'foo');
}, 500)
return emitter;
}
var emitterObservable = Observable.create(function(observer) {
// setup the data stream
var emitter = getEmitter();
var handler = function(d) {
observer.onNext(d);
};
emitter.on('data', handler);
return function() {
// tear down the data stream in your disposal function
emitter.removeListener('on', handler);
};
});
// Now you can do Rx magic!
emitterObservable
// if it doesn't emit in 700ms, throw a timeout error
.timeout(700)
// catch all* errors and retry
// this means the emitter will be torn down and recreated
// if it times out!
.retry()
// do something with the values
.subscribe(function(x) { console.log(x); });
* NOTE: retry catches all errors, so you may want to add a catch above it to handle non-timeout errors. Up to you.

Related

Javascript - websocket message stuck in switch case [duplicate]

I'm trying to implement a WebSocket with a fallback to polling. If the WebSocket connection succeeds, readyState becomes 1, but if it fails, readyState is 3, and I should begin polling.
I tried something like this:
var socket = new WebSocket(url);
socket.onmessage = onmsg;
while (socket.readyState == 0)
{
}
if (socket.readyState != 1)
{
// fall back to polling
setInterval(poll, interval);
}
I was expecting socket.readyState to update asynchronously, and allow me to read it immediately. However, when I run this, my browser freezes (I left it open for about half a minute before giving up).
I thought perhaps there was an onreadyStateChanged event, but I didn't see one in the MDN reference.
How should I be implementing this? Apparently an empty loop won't work, and there is no event for this.
This is simple and it work perfectly... you can add condition about maximal time, or number of try to make it more robust...
function sendMessage(msg){
// Wait until the state of the socket is not ready and send the message when it is...
waitForSocketConnection(ws, function(){
console.log("message sent!!!");
ws.send(msg);
});
}
// Make the function wait until the connection is made...
function waitForSocketConnection(socket, callback){
setTimeout(
function () {
if (socket.readyState === 1) {
console.log("Connection is made")
if (callback != null){
callback();
}
} else {
console.log("wait for connection...")
waitForSocketConnection(socket, callback);
}
}, 5); // wait 5 milisecond for the connection...
}
Here is a more elaborate explanation. First off, check the specific browser API, as not all browsers will be on the latest RFC. You can consult the
You don't want to run a loop to constantly check the readystate, it's extra overhead you don't need. A better approach is to understand all of the events relevant to a readystate change, and then wire them up appropriately. They are as follows:
onclose An event listener to be called when the WebSocket connection's readyState changes to CLOSED. The listener receives a CloseEvent named "close".
onerror An event listener to be called when an error occurs. This is a simple event named "error".
onmessage An event listener to be called when a message is received from the server. The listener receives a MessageEvent named "message".
onopen An event listener to be called when the WebSocket connection's readyState changes to OPEN; this indicates that the connection is ready to send and receive data. The event is a simple one with the name "open".
JS is entirely event driven, so you need to just wire up all of these events and check for the readystate, this way you can switch from WS to polling accordingly.
I recommend you look at the Mozilla reference, it's easier to read than the RFC document and it will give you a good overview of the API and how it works (link).
Don't forget to do a callback for a retry if you have a failure and poll until the callback for a successful reconnect is fired.
I am not using pooling at all. Instead, I use queuing.
First I create new send function and a queue:
var msgs = []
function send (msg) {
if (ws.readyState !== 1) {
msgs.push(msg)
} else {
ws.send(msg)
}
}
Then I need to read and send when the connection is first established:
function my_element_click () {
if (ws == null){
ws = new WebSocket(websocket_url)
ws.onopen = function () {
while (msgs.length > 0) {
ws.send(msgs.pop())
}
}
ws.onerror = function(error) {
// do sth on error
}
}
msg = {type: 'mymessage', data: my_element.value}
send(JSON.stringify(msg))
}
WebSocket connection in this example is created only on the first click. Usually, on second messages start to be sent directly.
Look on http://dev.w3.org/html5/websockets/
Search for "Event handler" and find the Table.
onopen -> open
onmessage -> message
onerror ->error
onclose ->close
function update(e){ /*Do Something*/};
var ws = new WebSocket("ws://localhost:9999/");
ws.onmessage = update;
If you use async/await and you just want to wait until the connection is available I would suggest this function :
async connection (socket, timeout = 10000) {
const isOpened = () => (socket.readyState === WebSocket.OPEN)
if (socket.readyState !== WebSocket.CONNECTING) {
return isOpened()
}
else {
const intrasleep = 100
const ttl = timeout / intrasleep // time to loop
let loop = 0
while (socket.readyState === WebSocket.CONNECTING && loop < ttl) {
await new Promise(resolve => setTimeout(resolve, intrasleep))
loop++
}
return isOpened()
}
}
Usage (in async function) :
const websocket = new WebSocket('...')
const opened = await connection(websocket)
if (opened) {
websocket.send('hello')
}
else {
console.log("the socket is closed OR couldn't have the socket in time, program crashed");
return
}
tl;dr
A simple proxy wrapper to add state event to WebSocket which will be emitted when its readyState changes:
const WebSocketProxy = new Proxy(WebSocket, {
construct: function(target, args) {
// create WebSocket instance
const instance = new target(...args);
//internal function to dispatch 'state' event when readyState changed
function _dispatchStateChangedEvent() {
instance.dispatchEvent(new Event('state'));
if (instance.onstate && typeof instance.onstate === 'function') {
instance.onstate();
}
}
//dispatch event immediately after websocket was initiated
//obviously it will be CONNECTING event
setTimeout(function () {
_dispatchStateChangedEvent();
}, 0);
// WebSocket "onopen" handler
const openHandler = () => {
_dispatchStateChangedEvent();
};
// WebSocket "onclose" handler
const closeHandler = () => {
_dispatchStateChangedEvent();
instance.removeEventListener('open', openHandler);
instance.removeEventListener('close', closeHandler);
};
// add event listeners
instance.addEventListener('open', openHandler);
instance.addEventListener('close', closeHandler);
return instance;
}
});
A long explanation:
You can use a Proxy object to monitor inner WebSocket state.
This is a good article which explains how to do it Debugging WebSockets using JS Proxy Object
And here is an example of code snippet from the article above in case the site won't be available in the future:
// proxy the window.WebSocket object
var WebSocketProxy = new Proxy(window.WebSocket, {
construct: function(target, args) {
// create WebSocket instance
const instance = new target(...args);
// WebSocket "onopen" handler
const openHandler = (event) => {
console.log('Open', event);
};
// WebSocket "onmessage" handler
const messageHandler = (event) => {
console.log('Message', event);
};
// WebSocket "onclose" handler
const closeHandler = (event) => {
console.log('Close', event);
// remove event listeners
instance.removeEventListener('open', openHandler);
instance.removeEventListener('message', messageHandler);
instance.removeEventListener('close', closeHandler);
};
// add event listeners
instance.addEventListener('open', openHandler);
instance.addEventListener('message', messageHandler);
instance.addEventListener('close', closeHandler);
// proxy the WebSocket.send() function
const sendProxy = new Proxy(instance.send, {
apply: function(target, thisArg, args) {
console.log('Send', args);
target.apply(thisArg, args);
}
});
// replace the native send function with the proxy
instance.send = sendProxy;
// return the WebSocket instance
return instance;
}
});
// replace the native WebSocket with the proxy
window.WebSocket = WebSocketProxy;
Just like you defined an onmessage handler, you can also define an onerror handler. This one will be called when the connection fails.
var socket = new WebSocket(url);
socket.onmessage = onmsg;
socket.onerror = function(error) {
// connection failed - try polling
}
Your while loop is probably locking up your thread. Try using:
setTimeout(function(){
if(socket.readyState === 0) {
//do nothing
} else if (socket.readyState !=1) {
//fallback
setInterval(poll, interval);
}
}, 50);
In my use case, I wanted to show an error on screen if the connection fails.
let $connectionError = document.getElementById("connection-error");
setTimeout( () => {
if (ws.readyState !== 1) {
$connectionError.classList.add( "show" );
}
}, 100 ); // ms
Note that in Safari (9.1.2) no error event gets fired - otherwise I would have placed this in the error handler.

IndexedDB's callbacks not being executed inside the 'fetch' event of a Service Worker

I'm trying to do a couple of things in the IndexedDB database inside the 'fetch' event of a service worker, when the aplication asks the server for a new page. Here's what I'm going for:
Create a new object store (they need to be created dynamically, according to the data that 'fetch' picks up);
Store an element on the store.
Or, if the store already exists:
Get an element from the store;
Update the element and store it back on the store.
The problem is that the callbacks (onupgradeneeded, onsuccess, etc) never get executed.
I've been trying with the callbacks inside of each other, though I know that may not be the best approach. I've also tried placing an event.waitUntil() on 'fetch' but it didn't help.
The 'fetch' event, where the function registerPageAccess is called:
self.addEventListener('fetch', function (event) {
event.respondWith(
caches.match(event.request)
.then(function (response) {
event.waitUntil(function () {
const nextPageURL = new URL(event.request.url);
if (event.request.destination == 'document') {
if (currentURL) {
registerPageAccess(currentURL, nextPageURL);
}
currentURL = nextPageURL;
}
}());
/*
* some other operations
*/
return response || fetch(event.request);
})
);
});
registerPageAccess, the function with the callbacks.
I know it's plenty of code, but just look at secondRequest.onupgradeneeded in the 5th line. It is never executed, let alone the following ones.
function registerPageAccess(currentPageURL, nextPageURL) {
var newVersion = parseInt(db.version) + 1;
var secondRequest = indexedDB.open(DB_NAME, newVersion);
secondRequest.onupgradeneeded = function (e) {
db = e.target.result;
db.createObjectStore(currentPageURL, { keyPath: "pageURL" });
var transaction = request.result.transaction([currentPageURL], 'readwrite');
var store = transaction.objectStore(currentPageURL);
var getRequest = store.get(nextPageURL);
getRequest.onsuccess = function (event) {
var obj = getRequest.result;
if (!obj) {
// Insert element into the database
console.debug('ServiceWorker: No matching object in the database');
const addRes = putInObjectStore(nextPageURL, 1, store);
addRes.onsuccess = function (event) {
console.debug('ServiceWorker: Element was successfully added in the Object Store');
}
addRes.onerror = function (event) {
console.error('ServiceWorker error adding element to the Object Store: ' + addRes.error);
}
}
else {
// Updating database element
const updRes = putInObjectStore(obj.pageURL, obj.nVisits + 1, store);
updRes.onsuccess = function (event) {
console.debug('ServiceWorker: Element was successfully updated in the Object Store');
}
updRes.onerror = function (event) {
console.error('ServiceWorker error updating element of the Object Store: ' + putRes.error);
}
}
};
};
secondRequest.onsuccess = function (e) {
console.log('ServiceWorker: secondRequest onsuccess');
};
secondRequest.onerror = function (e) {
console.error('ServiceWorker: error on the secondRequest.open: ' + secondRequest.error);
};
}
I need a way to perform the operations in registerPageAccess, which involve executing a couple of callbacks, but the browser seems to kill the Service Worker before they get to occur.
All asynchronous logic inside of a service worker needs to be promise-based. Because IndexedDB is callback-based, you're going to find yourself needing to wrap the relevant callbacks in a promise.
I'd strongly recommend not attempting to do this on your own, and instead using one of the following libraries, which are well-tested, efficient, and lightweight:
idb-keyval, if you're okay with a simple key-value store.
idb if you're need the full IndexedDB API.
I'd also recommend that you consider using the async/await syntax inside of your service worker's fetch handler, as it tends to make promise-based code more readable.
Put together, this would look roughly like:
self.addEventListener('fetch', (event) => {
event.waitUntil((async () => {
// Your IDB cleanup logic here.
// Basically, anything that can execute separately
// from response generation.
})());
event.respondWith((async () => {
// Your response generation logic here.
// Return a Response object at the end of the function.
})());
});

Standard way to maintain socket listeners

I have an node.js socket.io application where I have a few different events and listeners. Right now this is how I am doing it.
class testEmitterClass extends events {
}
const testEmitter = new testEmitterClass();
io.on('connection', function (socket) {
console.log('connected');
let dnsInactiveTermsListener = function (dnsInactiveTerms) {
socket.emit(socketEvents.DNS_INACTIVE_TERMS, dnsInactiveTerms);
};
let checkpointInactiveTermsListener = function(checkpointInactiveTerms) {
socket.emit(socketEvents.CHECKPOINT_INACTIVE_TERMS, checkpointInactiveTerms);
};
let dnsActiveTermsListener = function (dnsActiveTerms) {
socket.emit(socketEvents.DNS_ACTIVE_TERMS, dnsActiveTerms);
};
let checkpointActiveTermsListener = function(checkpointActiveTerms) {
socket.emit(socketEvents.CHECKPOINT_ACTIVE_TERMS, checkpointActiveTerms);
};
let dnsCountListener = function (dnsCountStreaming) {
socket.emit(socketEvents.DNS_COUNT, dnsCountStreaming);
};
testEmitter.on(socketEvents.CHECKPOINT_ACTIVE_TERMS, checkpointActiveTermsListener);
testEmitter.on(socketEvents.DNS_INACTIVE_TERMS, dnsInactiveTermsListener);
testEmitter.on(socketEvents.CHECKPOINT_INACTIVE_TERMS, checkpointInactiveTermsListener);
testEmitter.on(socketEvents.DNS_ACTIVE_TERMS, dnsActiveTermsListener);
testEmitter.on(socketEvents.DNS_COUNT, dnsCountListener);
socket.on('disconnect', function () {
console.log('disconnected');
testEmitter.removeListener(socketEvents.DNS_INACTIVE_TERMS, dnsInactiveTermsListener);
testEmitter.removeListener(socketEvents.DNS_ACTIVE_TERMS, dnsActiveTermsListener);
testEmitter.removeListener(socketEvents.DNS_COUNT, dnsCountListener);
testEmitter.removeListener(socketEvents.CHECKPOINT_INACTIVE_TERMS, checkpointInactiveTermsListener);
testEmitter.removeListener(socketEvents.CHECKPOINT_ACTIVE_TERMS, checkpointActiveTermsListener);
})
});
The testemitter is a single instance which is emitting events somewhere else and being sent to the client using socket.io
Is there a way to maintain single list of the listeners somewhere so that this code can be maintained better? How can I map events to the listeners so that they can be added and removed as a client disconnected from socket.io without making a mess.
socketEvents is just an object of event names.
const DNS_COUNT = 'dnsCount';
const DNS_INACTIVE_TERMS = 'dnsInactiveTerms';
const DNS_ACTIVE_TERMS = 'dnsActiveTerms';
const CHECKPOINT_INACTIVE_TERMS = 'checkpointInactiveTerms';
const CHECKPOINT_ACTIVE_TERMS = 'checkpointActiveTerms';
module.exports = {
DNS_COUNT,
DNS_INACTIVE_TERMS,
CHECKPOINT_INACTIVE_TERMS,
DNS_ACTIVE_TERMS,
CHECKPOINT_ACTIVE_TERMS
};
Hope I made myself clear, thanks!
I think you can change the whole way you do things. Rather than register an event handler for every single socket that connects, you can just broadcast the message to all connected sockets. So, I think you can replace everything you show with just this:
class testEmitterClass extends events {
}
const testEmitter = new testEmitterClass();
const notifications = [
CHECKPOINT_ACTIVE_TERMS,
DNS_INACTIVE_TERMS,
CHECKPOINT_INACTIVE_TERMS,
CHECKPOINT_INACTIVE_TERMS,
DNS_COUNT
];
for (let msg of notifications) {
testEmitter.on(socketEvents[msg], function(data) {
// send this message and data to all currently connected sockets
io.emit(socketEvents[msg], data);
});
}
Also notice that the code has been DRYed by using a table of messages that you can loop through rather than repeating the same statements over and over again. So, now to add, remove or edit one of your notification messages, you just modify the table in one place.
If socketEvents (which you don't show) is just an object with these 5 properties on it, then you could even remove the notifications array by just iterating the properties of socketEvents.
That would further reduce the code to this:
class testEmitterClass extends events {
}
const testEmitter = new testEmitterClass();
for (let msg of Object.keys(socketEvents)) {
testEmitter.on(socketEvents[msg], function(data) {
// send this message and data to all currently connected sockets
io.emit(socketEvents[msg], data);
});
}

How do I ensure that a call to subscribe on the observer initially receives the most recent value?

I am using rxjs together with Angular 2 and Typescript. I would like to share a common web-resource (a "project" in the context of my app, essentially a JSON document) between multiple components. To achieve this I introduced a service that exposes an observable, which will be shared by all clients:
/**
* Handed out to clients so they can subscribe to something.
*/
private _observable : Observable<Project>;
/**
* Used to emit events to clients.
*/
private _observer : Observer<Project>;
constructor(private _http: Http) {
// Create observable and observer once and for all. These instances
// are not allowed to changed as they are passed on to every subscriber.
this._observable = Observable.create( (obs : Observer<Project>) => {
this._observer = obs;
});
}
Clients now simply get a reference to that one _observable and subscribe to it.
/**
* Retrieves an observable that always points to the active
* project.
*/
get ActiveProject() : Observable<Project> {
return (this._observable);
}
When some component decides to actually load a project, it calls the following method:
/**
* #param id The id of the project to set for all subscribers
*/
setActiveProject(id : string) {
// Projects shouldn't change while other requests are in progress
if (this._httpRequest) {
throw { "err" : "HTTP request in progress" };
}
this._httpRequest = this._http.get('/api/project/' + id)
.catch(this.handleError)
.map(res => new Project(res.json()));
this._httpRequest.subscribe(res => {
// Cache the project
this._cachedProject = res;
// Show that there are no more requests
this._httpRequest = null;
// Inform subscribers
this._observer.next(this._cachedProject)
console.log("Got project");
});
}
It basically does a HTTP request, transforms the JSON document into a "proper" instance and calls this._observer.next() to inform all subscribers about the change.
But if something subscribes after the HTTP request has already taken place, the see nothing until a new HTTP request is issued. I have found out that there is some kind of caching (or replay?) mechanism in rxjs that seems to adress this, but I couldn't figure out how to use it.
tl;dr: How do I ensure that a call to subscribe on the observer initially receives the most recent value?
Extra question: By "pulling the observer out of the observable" (in the constructor), have I essentially created a subject?
That's what BehaviorSubject does
import { BehaviorSubject } from 'rxjs/subject/BehaviorSubject';
...
obs=new BehaviourSubject(4);
obs.subscribe(); //prints 4
obs.next(3); //prints 3
obs.subscribe(); //prints 3
I usually achieve this with shareReplay(1). Using this operator with 1 as parameter will ensure that the latest value emitted will be kept in a buffer, so when there is a new subscriber that value is immediately passed on to it. You can have a look at the documentation :
var interval = Rx.Observable.interval(1000);
var source = interval
.take(4)
.doAction(function (x) {
console.log('Side effect');
});
var published = source
.shareReplay(3);
published.subscribe(createObserver('SourceA'));
published.subscribe(createObserver('SourceB'));
// Creating a third subscription after the previous two subscriptions have
// completed. Notice that no side effects result from this subscription,
// because the notifications are cached and replayed.
Rx.Observable
.return(true)
.delay(6000)
.flatMap(published)
.subscribe(createObserver('SourceC'));
function createObserver(tag) {
return Rx.Observer.create(
function (x) {
console.log('Next: ' + tag + x);
},
function (err) {
console.log('Error: ' + err);
},
function () {
console.log('Completed');
});
}
// => Side effect
// => Next: SourceA0
// => Next: SourceB0
// => Side effect
// => Next: SourceA1
// => Next: SourceB1
// => Side effect
// => Next: SourceA2
// => Next: SourceB2
// => Side effect
// => Next: SourceA3
// => Next: SourceB3
// => Completed
// => Completed
// => Next: SourceC1
// => Next: SourceC2
// => Next: SourceC3
// => Completed
Extra question: By "pulling the observer out of the observable" (in
the constructor), have I essentially created a subject?
I am not sure what you mean by that, but no. A subject is both an observer and an observable and have specific semantics. It is not enough to 'pull the observer out of the observable' as you say. For subjects semantics, have a look here : What are the semantics of different RxJS subjects?

forcing completion of an rxjs observer

I've got an rxjs observer (really a Subject) that tails a file forever, just like tail -f. It's awesome for monitoring logfiles, for example.
This "forever" behavior is great for my application, but terrible for testing. Currently my application works but my tests hang forever.
I'd like to force an observer change to complete early, because my test code knows how many lines should be in the file. How do I do this?
I tried calling onCompleted on the Subject handle I returned but at that point it's basically cast as an observer and you can't force it to close, the error is:
Object # has no method 'onCompleted'
Here's the source code:
function ObserveTail(filename) {
source = new Rx.Subject();
if (fs.existsSync(filename) == false) {
console.error("file doesn't exist: " + filename);
}
var lineSep = /[\r]{0,1}\n/;
tail = new Tail(filename, lineSep, {}, true);
tail.on("line", function(line) {
source.onNext(line);
});
tail.on('close', function(data) {
console.log("tail closed");
source.onCompleted();
});
tail.on('error', function(error) {
console.error(error);
});
this.source = source;
}
And here's the test code that can't figure out how to force forever to end (tape style test). Note the "ILLEGAL" line:
test('tailing a file works correctly', function(tid) {
var lines = 8;
var i = 0;
var filename = 'tape/tail.json';
var handle = new ObserveTail(filename);
touch(filename);
handle.source
.filter(function (x) {
try {
JSON.parse(x);
return true;
} catch (error) {
tid.pass("correctly caught illegal JSON");
return false;
}
})
.map(function(x) { return JSON.parse(x) })
.map(function(j) { return j.name })
.timeout(10000, "observer timed out")
.subscribe (
function(name) {
tid.equal(name, "AssetMgr", "verified name field is AssetMgr");
i++;
if (i >= lines) {
handle.onCompleted(); // XXX ILLEGAL
}
},
function(err) {
console.error(err)
tid.fail("err leaked through to subscriber");
},
function() {
tid.end();
console.log("Completed");
}
);
})
It sounds like you solved your problem, but to your original question
I'd like to force an observer change to complete early, because my test code knows how many lines should be in the file. How do I do this?
In general the use of Subjects is discouraged when you have better alternatives, since they tend to be a crutch for people to use programming styles they are familiar with. Instead of trying to use a Subject I would suggest that you think about what each event would mean in an Observable life cycles.
Wrap Event Emitters
There already exists wrapper for the EventEmitter#on/off pattern in the form of Observable.fromEvent. It handles clean up and keeping the subscription alive only when there are listeners. Thus ObserveTail can be refactored into
function ObserveTail(filename) {
return Rx.Observable.create(function(observer) {
var lineSep = /[\r]{0,1}\n/;
tail = new Tail(filename, lineSep, {}, true);
var line = Rx.Observable.fromEvent(tail, "line");
var close = Rx.Observable.fromEvent(tail, "close");
var error = Rx.Observable.fromEvent(tail, "error")
.flatMap(function(err) { return Rx.Observable.throw(err); });
//Only take events until close occurs and wrap in the error for good measure
//The latter two are terminal events in this case.
return line.takeUntil(close).merge(error).subscribe(observer);
});
}
Which has several benefits over the vanilla use of Subjects, one, you will now actually see the error downstream, and two, this will handle clean up of your events when you are done with them.
Avoid *Sync Methods
Then this can be rolled into your file existence checking without the use of readSync
//If it doesn't exist then we are done here
//You could also throw from the filter if you want an error tracked
var source = Rx.Observable.fromNodeCallback(fs.exists)(filename)
.filter(function(exists) { return exists; })
.flatMap(ObserveTail(filename));
Next you can simplify your filter/map/map sequence down by using flatMap instead.
var result = source.flatMap(function(x) {
try {
return Rx.Observable.just(JSON.parse(x));
} catch (e) {
return Rx.Observable.empty();
}
},
//This allows you to map the result of the parsed value
function(x, json) {
return json.name;
})
.timeout(10000, "observer timed out");
Don't signal, unsubscribe
How do you stop "signal" a stop when streams only travel in one direction. We rarely actually want to have an Observer directly communicate with an Observable, so a better pattern is to not actually "signal" a stop but to simply unsubscribe from the Observable and leave it up to the Observable's behavior to determine what it should do from there.
Essentially your Observer really shouldn't care about your Observable more than to say "I'm done here".
To do that you need to declare a condition you want to reach in when stopping.
In this case since you are simply stopping after a set number in your test case you can use take to unsubscribe. Thus the final subscribe block would look like:
result
//After lines is reached this will complete.
.take(lines)
.subscribe (
function(name) {
tid.equal(name, "AssetMgr", "verified name field is AssetMgr");
},
function(err) {
console.error(err)
tid.fail("err leaked through to subscriber");
},
function() {
tid.end();
console.log("Completed");
}
);
Edit 1
As pointed out in the comments, In the case of this particular api there isn't a real "close" event since Tail is essentially an infinite operation. In this sense it is no different from a mouse event handler, we will stop sending events when people stop listening. So your block would probably end up looking like:
function ObserveTail(filename) {
return Rx.Observable.create(function(observer) {
var lineSep = /[\r]{0,1}\n/;
tail = new Tail(filename, lineSep, {}, true);
var line = Rx.Observable.fromEvent(tail, "line");
var error = Rx.Observable.fromEvent(tail, "error")
.flatMap(function(err) { return Rx.Observable.throw(err); });
//Only take events until close occurs and wrap in the error for good measure
//The latter two are terminal events in this case.
return line
.finally(function() { tail.unwatch(); })
.merge(error).subscribe(observer);
}).share();
}
The addition of the finally and the share operators creates an object which will attach to the tail when a new subscriber arrives and will remain attached as long as there is at least one subscriber still listening. Once all the subscribers are done however we can safely unwatch the tail.

Categories