When I run the following code 9999999+ times, Node returns with:
FATAL ERROR: CALL_AND_RETRY_2 Allocation failed - process out of memory
Aborted (core dumped)
Whats the best solution to get around this issue, other than increasing the max alloc size or any command line arguments?
I'd like to improve the code quality rather than hack a solution.
The following is the main bulk of recursion within the application.
The application is a load testing tool.
a.prototype.createClients = function(){
for(var i = 0; i < 999999999; i++){
this.recursiveRequest();
}
}
a.prototype.recursiveRequest = function(){
var self = this;
self.hrtime = process.hrtime();
if(!this.halt){
self.reqMade++;
this.http.get(this.options, function(resp){
resp.on('data', function(){})
.on("connection", function(){
})
.on("end", function(){
self.onSuccess();
});
})
.on("error", function(e){
self.onError();
});
}
}
a.prototype.onSuccess = function(){
var elapsed = process.hrtime(this.hrtime),
ms = elapsed[0] * 1000000 + elapsed[1] / 1000
this.times.push(ms);
this.successful++;
this.recursiveRequest();
}
Looks like you should really be using a queue instead of recursive calls. async.queue offers a fantastic mechanism for processing asynchronous queues. You should also consider using the request module to make your http client connections simpler.
var async = require('async');
var request = require('request');
var load_test_url = 'http://www.testdomain.com/';
var parallel_requests = 1000;
function requestOne(task, callback) {
request.get(task.url, function(err, connection, body) {
if(err) return callback(err);
q.push({url:load_test_url});
callback();
});
}
var q = async.queue(requestOne, parallel_requests);
for(var i = 0; i < parallel_requests; i++){
q.push({url:load_test_url});
}
You can set the parallel_requests variable according to how many simultaneous requests you want to hit the test server with.
You are launching 1 billion "clients" in parallel, and having each of them perform an http get request recursively in an endless recursion.
Few remarks:
while your question mentions 10 million clients, your code creates 1 billion clients.
You should replace the for loop by a recursive function, to get rid of the out-of-memory error.
Something in these lines:
a.prototype.createClients = function(i){
if (i < 999999999) {
this.recursiveRequest();
this.createClients(i+1);
}
}
Then, you probably want to include some delay between the clients creations, or between the calls to recursiveRequest. Use setTimeout.
You should have a way to get the recursions stopping (onSuccess and recursiveRequest keep calling each other)
A flow control library like async node.js module may help.
10 million is very large... Assuming that the stack supports any number of calls, it should work, but you are likely asking the JavaScript interpreter to load 10 million x quite a bit of memory... and the result is Out of Memory.
Also I personally do no see why you'd want to have so many requests at the same time (testing a heavy load on a server?) one way to optimize is to NOT create "floating functions" which you are doing a lot. "Floating functions" use their own set of memory on each instantiation.
this.http.get(this.options, function(resp){ ... });
^^^^
++++--- allocates memory x 10 million
Here the function(resp)... declaration allocates more memory on each call. What you want to do is:
# either global scope:
function r(resp) {...}
this.http.get(this.options, r ...);
# or as a static member:
a.r = function(resp) {...};
this.http.get(this.options, a.r ...);
At least you'll save on all that function memory. That goes for all the functions you declare within the r function, of course. Especially if their are quite large.
If you want to use the this pointer (make r a prototype function) then you can do that:
a.prototype.r = function(resp) {...};
// note that we have to have a small function to use 'that'... probably not a good idea
var that = this;
this.http.get(this.options, function(){that.r();});
To avoid the that reference, you may use an instance saved in a global. That defeats the use of an object as such though:
a.instance = new a;
// r() is static, but can access the object as follow:
a.r = function(resp) { a.instance.<func>(); }
Using the instance you can access the object's functions from the static r function. That could be the actual implementation which could make full use of the this reference:
a.r = function(resp) { a.instance.r_impl(); }
According to a comment by Daniel, your problem is that you misuse a for() to count the total number of requests you want to send. This means you can apply a very simple fix to your code as follow:
a.prototype.createClients = function(){
this.recursiveRequest();
};
a.prototype.recursiveRequest = function(){
var self = this;
self.hrtime = process.hrtime();
if(!this.halt && this.successful < 10000000){
...
Your recursivity is enough to run the test any number of times.
What you do is never quit, though. You have a halt variable, but it does not look like you ever set that to true. However, to test 10 million times, you want to check the number of requests you already sent.
My "fix" supposes that onError() fails (is no recursive). You could also change the code to make use of the halt flag as in:
a.prototype.onSuccess = function(){
var elapsed = process.hrtime(this.hrtime),
ms = elapsed[0] * 1000000 + elapsed[1] / 1000
this.times.push(ms);
this.successful++;
if(this.successful >= 10000000)
{
this.halt = true;
}
this.recursiveRequest();
}
Note here that you will be pushing ms in the times buffer 10 million times. That's a big table! You may want to have a total instead and compute an average at the end:
this.time += ms;
// at the end:
this.average_time = this.time / this.successful;
Related
I have a set of API endpoints in Express. One of them receives a request and starts a long running process that blocks other incoming Express requests.
My goal to make this process non-blocking. To understand better inner logic of Node Event Loop and how I can do it properly, I want to replace this long running function with my dummy long running blocking function that would start when I send a request to its endpoint.
I suppose, that different ways of making the dummy function blocking could cause Node manage these blockings differently.
So, my question is - how can I make a basic blocking process as a function that would run infinitely?
You can use node-webworker-threads.
var Worker, i$, x$, spin;
Worker = require('webworker-threads').Worker;
for (i$ = 0; i$ < 5; ++i$) {
x$ = new Worker(fn$);
x$.onmessage = fn1$;
x$.postMessage(Math.ceil(Math.random() * 30));
}
(spin = function(){
return setImmediate(spin);
})();
function fn$(){
var fibo;
fibo = function(n){
if (n > 1) {
return fibo(n - 1) + fibo(n - 2);
} else {
return 1;
}
};
return this.onmessage = function(arg$){
var data;
data = arg$.data;
return postMessage(fibo(data));
};
}
function fn1$(arg$){
var data;
data = arg$.data;
console.log("[" + this.thread.id + "] " + data);
return this.postMessage(Math.ceil(Math.random() * 30));
}
https://github.com/audreyt/node-webworker-threads
So, my question is - how can I make a basic blocking process as a function that would run infinitely?
function block() {
// not sure why you need that though
while(true);
}
I suppose, that different ways of making the dummy function blocking could cause Node manage these blockings differently.
Not really. I can't think of a "special way" to block the engine differently.
My goal to make this process non-blocking.
If it is really that long running you should really offload it to another thread.
There are short cut ways to do a quick fix if its like a one time thing, you can do it using a npm module that would do the job.
But the right way to do it is setting up a common design pattern called 'Work Queues'. You will need to set up a queuing mechanism, like rabbitMq, zeroMq, etc. How it works is, whenever you get a computation heavy task, instead of doing it in the same thread, you send it to the queue with relevant id values. Then a separate node process commonly called a 'worker' process will be listening for new actions on the queue and will process them as they arrive. This is a worker queue pattern and you can read up on it here:
https://www.rabbitmq.com/tutorials/tutorial-one-javascript.html
I would strongly advise you to learn this pattern as you would come across many tasks that would require this kind of mechanism. Also with this in place you can scale both your node servers and your workers independently.
I am not sure what exactly your 'long processing' is, but in general you can approach this kind of problem in two different ways.
Option 1:
Use the webworker-threads module as #serkan pointed out. The usual 'thread' limitations apply in this scenario. You will need to communicate with the Worker in messages.
This method should be preferable only when the logic is too complicated to be broken down into smaller independent problems (explained in option 2). Depending on complexity you should also consider if native code would better serve the purpose.
Option 2:
Break down the problem into smaller problems. Solve a part of the problem, schedule the next part to be executed later, and yield to let NodeJS process other events.
For example, consider the following example for calculating the factorial of a number.
Sync way:
function factorial(inputNum) {
let result = 1;
while(inputNum) {
result = result * inputNum;
inputNum--;
}
return result;
}
Async way:
function factorial(inputNum) {
return new Promise(resolve => {
let result = 1;
const calcFactOneLevel = () => {
result = result * inputNum;
inputNum--;
if(inputNum) {
return process.nextTick(calcFactOneLevel);
}
resolve(result);
}
calcFactOneLevel();
}
}
The code in second example will not block the node process. You can send the response when returned promise resolves.
I have a method that runs every 2 seconds to capture a video stream to canvas and write it to file:
function capture(streamName, callback) {
var buffer,
dataURL,
dataSplit,
_ctx;
_ctx = _canvas[streamName].getContext('2d');
_ctx.drawImage(_video[streamName], 0, 0);
dataURL = _canvas[streamName].toDataURL('image/png');
dataSplit = dataURL.split(",")[1];
buffer = new Buffer(dataSplit, 'base64');
fs.writeFileSync(directory + streamName + '.png', buffer);
}
setInterval(function() {
// Called from here
captureState.capture(activeScreens[currentScreenIndex]);
gameState.pollForState(processId, activeScreens[currentScreenIndex], function() {
// do things...
});
}, 2000);
Assuming _video[streamName] exists as a running <video> and _canvas[streamName] exists as a <canvas>. The method works, it just causes a memory leak.
The issue:
Garbage collection can't keep up with the amount of memory the method uses, memory leak ensues.
I have narrowed it down to this line:
buffer = new Buffer(dataSplit, 'base64');
If I comment that out, there is some accumulation of memory (~100MB) but it drops back down every 30s or so.
What I've tried:
Some posts suggested buffer = null; to remove the reference and mark for garbage collection, but that hasn't changed anything.
Any suggestions?
Timeline:
https://i.imgur.com/wH7yFjI.png
https://i.imgur.com/ozFwuxY.png
Allocation Profile:
https://www.dropbox.com/s/zfezp46um6kin7g/Heap-20160929T140250.heaptimeline?dl=0
Just to quantify. After about 30 minutes of run time it sits at 2 GB memory used. This is an Electron (chromium / desktop) app.
SOLVED
Pre-allocating the buffer is what fixed it. This means that in addition to scoping buffer outside of the function, you need to reuse the created buffer with buffer.write. In order to keep proper headers, make sure that you use the encoded parameter of buffer.write.
Matt, I am not sure what was not working with the pre-allocated buffers, so I've posted an algorithm of how such pre-allocated buffers could be used. The key thing here is that buffers are allocated only once for that reason there should not be any memory leak.
var buffers = [];
var bsize = 10000;
// allocate buffer pool
for(var i = 0; i < 10; i++ ){
buffers.push({free:true, buf: new Buffer(bsize)});
}
// sample method that picks one of the buffers into use
function useOneBuffer(data){
// find a free buffer
var theBuf;
var i = 10;
while((typeof theBuf==='undefined')&& i < 10){
if(buffers[i].free){
theBuf = buffers[i];
}
i++;
}
theBuf.free = false;
// start doing whatever you need with the buffer, write data in needed format to it first
// BUT do not allocate
// also, you may want to clear-write the existing data int he buffer, just in case before reuse or after the use.
if(typeof theBuf==='undefined'){
// return or throw... no free buffers left for now
return;
}
theBuf.buf.write(data);
// .... continue using
// dont forget to pass the reference to the buffers member along because
// when you are done, toy have to mark it as free, so that it could be used again
// theBuf.free = true;
}
Did you try something like this? Where did it fail?
There is no leak of buffer object in your code.
Any Buffer objects that you no longer retain a reference to in your code will be immediately available for garbage collection.
the problem caused by callback and how you use it out of capture function.
notice that GC can not cleans the buffer or any other variable as long as callback is running.
I have narrowed it down to this line:
buffer = new Buffer(dataSplit, 'base64');
Short solution is not to use Buffer, as it is not necessary to write file to filesystem, where a file reference exists at base64 portion of data URI. setInterval does not appear to be cleared. You can define a reference for setInterval, then call clearInterval() at <video> ended event.
You can perform function without declaring any variables. Remove data, MIME type, and base64 portions of data URI returned by HTMLCanvasElement.prototype.toDataURL() as described at NodeJS: Saving a base64-encoded image to disk , this Answer at NodeJS write base64 image-file
function capture(streamName, callback) {
_canvas[streamName].getContext("2d")
.drawImage(_video[streamName], 0, 0);
fs.writeFileSync(directory + streamName + ".png"
, _canvas[streamName].toDataURL("image/png").split(",")[1], "base64");
}
var interval = setInterval(function() {
// Called from here
captureState.capture(activeScreens[currentScreenIndex]);
gameState.pollForState(processId, activeScreens[currentScreenIndex]
, function() {
// do things...
});
}, 2000);
video[/* streamName */].addEventListener("ended", function(e) {
clearInterval(interval);
});
I was having a similar issue recently with a software app that uses ~500MB of data in arrayBuffer form. I thought I had a memory leak, but it turns out Chrome was trying to do optimizations on a set of large-ish ArrayBuffer's and corresponding operations (each buffer ~60mb in size and some slightly larger objects). The CPU usage appeared to never allow for GC to run, or at least that's how it appeared. I had to do two things to resolve my issues. I Have not read any specific spec for when the GC gets scheduled to prove or disprove that. What I had to do:
I had to break the reference to the data in my arrayBuffers and some other large objects.
I had to force Chrome to have downtime, which appeared to give it time to schedule and then run the GC.
After applying those two steps, things ran for me and were garbage collected. Unfortunately, when applying those two things independently from each other, my app kept on crashing (exploding into GB of memory used before doing so). The following would be my thoughts on what I'd try on your code.
The problem with the garbage collector is that you cannot force it to run. So you can have objects that are ready to be malloced, but for whatever reason the browser doesn't give the garbage collector opportunity. Another approach to the buffer = null would be instead to break the reference explicitly with the delete operator -- this is what I did, but in theory ... = null is equivalent. It's important to note that delete cannot be run on any variable created by the var operator. So something like the following would be my suggestion:
function capture(streamName, callback) {
this._ctx = _canvas[streamName].getContext('2d');
this._ctx.drawImage(_video[streamName], 0, 0);
this.dataURL = _canvas[streamName].toDataURL('image/png');
this.dataSplit = dataURL.split(",")[1];
this.buffer = new Buffer(dataSplit, 'base64');
fs.writeFileSync(directory + streamName + '.png', this.buffer);
delete this._ctx;//because the context with the image used still exists
delete this.dataURL;//because the data used in dataSplit exists here
delete this.dataSplit;//because the data used in buffer exists here
delete this.buffer;
//again ... = null likely would work as well, I used delete
}
Second, the small break. So it appears you've got some intensive processes going on and the system cannot keep up. It's not actually hitting the 2s save mark, because it needs more than 2 seconds per save. There is always a function on the queue for executing the captureState.capture(...) method and it never has time to garbage collect. Some helpful posts on the scheduler and differences between setInterval and setTimeout:
http://javascript.info/tutorial/settimeout-setinterval
http://ejohn.org/blog/how-javascript-timers-work/
If that is for sure the case, why not use setTimeout and simple check that roughly 2 seconds (or more) time has passed and execute. In doing that check always force your code to wait a set period of time between saves. Give the browser time to schedule/run GC -- something like what follows (100 ms setTimeout in the pollForState):
var MINIMUM_DELAY_BETWEEN_SAVES = 100;
var POLLING_DELAY = 100;
//get the time in ms
var ts = Date.now();
function interValCheck(){
//check if 2000 ms have passed
if(Date.now()-ts > 2000){
//reset the timestamp of the last time save was run
ts = Date.now();
// Called from here
captureState.capture(activeScreens[currentScreenIndex]);
//upon callback, force the system to take a break.
setTimeout(function(){
gameState.pollForState(processId, activeScreens[currentScreenIndex], function() {
// do things...
//and then schedule the interValCheck again, but give it some time
//to potentially garbage collect.
setTimeout(intervalCheck,MINIMUM_DELAY_BETWEEN_SAVES);
});
}
}else{
//reschedule check back in 1/10th of a second.
//or after whatever may be executing next.
setTimeout(intervalCheck,POLLING_DELAY);
}
}
This means that a capture will happen no more than once every 2 seconds, but will also in some sense trick the browser into having the time to GC and remove any data that was left.
Last thoughts, entertaining a more traditional definition of memory leak, The candidates for a memory leak based on what I see in your code would be activeScreens, _canvas or _video which appear to be objects of some sort? Might be worthwhile to explore those if the above doesn't resolve your issue (wouldn't be able to make any assessments based on what is currently shared).
Hope that helps!
In general, I would recommend using a local map of UUID / something that will allow you to control your memory when dealing with getImageData and other buffers.
The UUID can be a pre-defined identifier e.g: "current-image" and "prev-image" if comparing between slides
E.g
existingBuffers: Record<string, UInt8ClampedArray> = {}
existingBuffers[ptrUid] = ImageData.data (OR something equivalent)
then if you want to override ("current-image") you can (overkill here):
existingBuffers[ptrUid] = new UInt8ClampedArray();
delete existingBuffers[ptrUid]
In addition, you will always be able to check your buffers and make sure they are not going out of control.
Maybe it is a bit old-school, but I found it comfortable.
I have a long running for-loop in my code and I'd like to delay to loop to handle other tasks in the event queue (like a button press). Does javascript or JQuery have anything that could help me? Basically I'm trying to do something similar to delaying loops like here (https://support.microsoft.com/en-us/kb/118468).
If your application really requires long-running JavaScript code, one of the best ways to deal with it is by using JavaScript web workers. JavaScript code normally runs on the foreground thread, but by creating a web worker you can effectively keep a long-running process on a background thread, and your UI thread will be free to respond to user input.
As an example, you create a new worker like this:
var myWorker = new Worker("worker.js");
You can then post messages to it from the js in the main page like this:
myWorker.postMessage([first.value,second.value]);
console.log('Message posted to worker');
And respond to the messages in worker.js like this:
onmessage = function(e) {
console.log('Message received from main script');
var workerResult = 'Result: ' + (e.data[0] * e.data[1]);
console.log('Posting message back to main script');
postMessage(workerResult);
}
With the introduction of generators in ES6, you can write a helper method that uses yield to emulate DoEvents without much syntactic overhead:
doEventsOnYield(function*() {
... synchronous stuff ...
yield; // Pump the event loop. DoEvents() equivalent.
... synchronous stuff ...
});
Here's the helper method, which also exposes the completion/failure of the function as a Promise:
function doEventsOnYield(generator) {
return new Promise((resolve, reject) => {
let g = generator();
let advance = () => {
try {
let r = g.next();
if (r.done) resolve();
} catch (ex) {
reject(ex);
}
setTimeout(advance, 0);
};
advance();
});
}
Note that, at this time, you probably need to run this through an ES6-to-ES5 transpiler for it to run on common browsers.
You can use the setTimeout:
setTimeout(function() { }, 3600);
3600 it's the time in milliseconds:
http://www.w3schools.com/jsref/met_win_settimeout.asp
There is no exact equivalent to DoEvents. Something close is using setTimeout for each iteration:
(function next(i) {
// exit condition
if (i >= 10) {
return;
}
// body of the for loop goes here
// queue up the next iteration
setTimeout(function () {
// increment
next(i + 1);
}, 0);
})(0); // initial value of i
However, that’s rarely a good solution, and is almost never necessary in web applications. There might be an event you could use that you’re missing. What’s your real problem?
Here's a tested example of how to use Yield as a direct replacement for DoEvents.
(I've used Web Worker and it's great, but it's far removed from DoEvents and near-impossible to access global variables). This has been formatted for ease of understanding and attempts to show how the extras required (to make the function handle yield) could be treated as an insertion within the original function. "yield" has all sorts of other features, but used thus, it is a near direct replacement for DoEvents.
//'Replace DoEvents with Yield ( https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/yield )
var misc = 0; //'sample external var
function myfunction() { //'This is the beginning of your original function which is effectively replaced by a handler inserted as follows..
//'-----------------------------------Insert Handler..
var obj = myfuncGen.next(); //'start it
if (obj.done == false) {
setTimeout(myfunction, 150); //'adjust for the amount of time you wish to yield (depends how much screen drawing is required or etc)
}
}
var myfuncGen = myfuncSurrogate(); //'creates a "Generator" out of next.
function* myfuncSurrogate() { //'This the original function repackaged! Note asterisk.
//'-------------------------------------End Insert
var ms; //...your original function continues here....
for (var i = 1; i <= 9; i++) { //'sample 9x loop
ms = new Date().getTime();
while (new Date().getTime() < ms + 500); //'PAUSE (get time & wait 500ms) as an example of being busy
misc++; //'example manipulating an external var
outputdiv.innerHTML = "Output Div<br>demonstrating progress.. " + misc;
yield; //'replacement for your doevents, all internal stack state and variables effectively hibernate.
}
console.log("done");
}
myfunction(); //'and start by calling here. Note that you can use "return" to return a value except by call backs.
<div id='outputdiv' align='center'></div>
..If you are new to all this, be aware that without the insertion and the yield keyword, you would simply wait 5 seconds while nothing happened and then the progress {div} would read "9" (because all the other changes to {div} were invisible).
I'm having trouble using Mongoskin to perform bulk inserting (MongoDB 2.6+) on Node.
var dbURI = urigoeshere;
var db = mongo.db(dbURI, {safe:true});
var bulk = db.collection('collection').initializeUnorderedBulkOp();
for (var i = 0; i < 200000; i++) {
bulk.insert({number: i}, function() {
console.log('bulk inserting: ', i);
});
}
bulk.execute(function(err, result) {
res.json('send response statement');
});
The above code gives the following warnings/errors:
(node) warning: possible EventEmitter memory leak detected. 51 listeners added. Use emitter.setMaxListeners() to increase limit.
TypeError: Object #<SkinClass> has no method 'execute'
(node) warning: possible EventEmitter memory leak detected. 51 listeners added. Use emitter.setMaxListeners() to increase limit.
TypeError: Object #<SkinClass> has no method 'execute'
Is it possible to use Mongoskin to perform unordered bulk operations? If so, what am I doing wrong?
You can do it but you need to change your calling conventions to do this as only the "callback" form will actually return a collection object from which the .initializeUnorderedBulkOp() method can be called. There are also some usage differences to how you think this works:
var dbURI = urigoeshere;
var db = mongo.db(dbURI, {safe:true});
db.collection('collection',function(err,collection) {
var bulk = collection.initializeUnorderedBulkOp();
count = 0;
for (var i = 0; i < 200000; i++) {
bulk.insert({number: i});
count++;
if ( count % 1000 == 0 )
bulk.execute(function(err,result) {
// maybe do something with results
bulk = collection.initializeUnorderedBulkOp(); // reset after execute
});
});
// If your loop was not a round divisor of 1000
if ( count % 1000 != 0 )
bulk.execute(function(err,result) {
// maybe do something here
});
});
So the actual "Bulk" methods themselves don't require callbacks and work exactly as shown in the documentation. The exeception is .execute() which actually sends the statements to the server.
While the driver will sort this out for you somewhat, it probably is not a great idea to queue up too many operations before calling execute. This basically builds up in memory, and though the driver will only send in batches of 1000 at a time ( this is a server limit as well as the complete batch being under 16MB ), you probably want a little more control here, at least to limit memory usage.
That is the point of the modulo tests as shown, but if memory for building the operations and a possibly really large response object are not a problem for you then you can just keep queuing up operations and call .execute() once.
The "response" is in the same format as given in the documentation for BulkWriteResult.
I develop a web application that is getting user updates from a web service (that is in another domain) I want to get the updates every 10 seconds.
For calling the service for the first time I dynamically insert a script in the page. The service is JSONP. Then I would like to define a trigger that insert a script from 10 to 10 seconds to get the updates. Is this correct? Can I do that without affecting the user experience on the website? I mean the site performance ... it will be great if I could do the call async and when I have the results I will update the status.
Is there any better solution for accessing the remote services. Is there an efficient way of dynamically reusing the same script using a trigger? I am pretty new to Javascript. Can you give me a short sample how can I define a trigger that calls a remote web service? ... or if there is a better solution.
I suggest that in your AJAX callback, when you get the result, you schedule a timer (window.setTimeout(ex, t)) so that your updating script is called again.
The reason to set the time in the AJAX callback is that you don't know exactly how long it will take for the AJAX call to complete. In this way, you ensure a smooth 10 sec delay between successive updates.
About performance, you will have to check that. It depends on the amount of data and the kind of processing you do of it (and the rest of your page)... but you can try it and check processor usage...
The following will dynamically create the script tags, and delete them (and the global it requires) after being finished with a given call.
You could also use CORS to allow requests besides GET ones, though as you may be aware, that is not supported in older browsers.
To avoid race conditions or performance problems during a slow network, you could allow the JSONP callback to recursively call the function, thereby only making a new call if the callback was returned, though with an optional setTimeout call to ensure there is at least a minimum delay.
The following uses Wikipedia's API to grab a specific page revision and its user.
<script>
var JSONP = function(global){
// (C) WebReflection Essential - Mit Style ( http://webreflection.blogspot.com/2011/02/all-you-need-for-jsonp.html )
// 202 bytes minified + gzipped via Google Closure Compiler
function JSONP(uri, callback) {
function JSONPResponse() {
try { delete global[src] } catch(e) { global[src] = null }
documentElement.removeChild(script);
callback.apply(this, arguments);
}
var
src = prefix + id++,
script = document.createElement("script")
;
global[src] = JSONPResponse;
documentElement.insertBefore(
script,
documentElement.lastChild
).src = uri + "=" + src;
}
var
id = 0,
prefix = "__JSONP__",
document = global.document,
documentElement = document.documentElement
;
return JSONP;
}(this);
// Be sure to include the callback parameter at the end
function startAPI (start) {
start = start || new Date();
var url = 'http://en.wikipedia.org/w/api.php?action=query&prop=revisions&titles=Main%20Page&rvprop=timestamp|user|comment|content&format=json&callback';
var minimum = 10000;
function execute (str) {
alert(str);
}
JSONP(url, function (obj) {
for (var pageNo in obj.query.pages) {
var page = obj.query.pages[pageNo];
var str = 'The user ' + page.revisions[0]['user'] + ' left the page with this code ' + page.revisions[0]['*'];
execute(str);
var elapsed = (new Date().getTime()) - start;
setTimeout(startAPI, (elapsed < minimum) ? (minimum - elapsed) : 0);
break;
}
});
}
startAPI();
</script>
I would make use of JavaScript's setInterval method
function getUpdate () {
//AJAX goes here
}
var myInterval = setInterval(getUpdate,10000);
This way you'll need to inject the script-tag only once.