Checking if file exists on server without using XMLHttpRequest - javascript

I am trying to use a promise with a setInterval function to keep checking to see if a file exists(as it could be still generating on the backend) and then once it is available it runs the renderpage function as shown in my setupFunction(). Currently this program just keeps running until the setInterval maxes out and then it resolves even if the file is there.
async checkFileExist(path, timeout = 20000) {
let totalTime = 0;
let checkTime = timeout / 10;
var self = this;
return await new Promise((resolve, reject) => {
const timer = setInterval(function () {
totalTime += checkTime;
let fileExists = self.fileExist(path);
if (fileExists || totalTime >= timeout) {
alert(totalTime);
clearInterval(timer);
resolve(fileExists);
}
}, checkTime);
})
},
fileExist(urlToFile) {
var xhr = new XMLHttpRequest();
xhr.open('HEAD', urlToFile, false);
xhr.send();
if (xhr.status == "404") {
alert('false 404 ' + urlToFile)
return false;
} else {
alert('true file found ' + urlToFile)
return true;
}
},
setupFunction() {
var self = this
if (this.filedir == null || this.filedir == "") {
self.loadingFile = false
} else {
this.checkFileExist(this.filedir).then(function (response) {
self.loadingFile = false;
self.renderPage(this.pageNum);
}, function (error) {
console.error("Failed!", error);
})
}
}
},
mounted() {
this.setupFunction()
},
watch: {
filedir: function () { // watch it
this.setupFunction()
},
}

That won't tell you if a file exists. It will tell you if a URL finds a resource. It's an important distinction since a URL could be handled by server-side code and not a simple static file, while a file might exist and not have a URL.
Making an HTTP request is the simplest way to find out if a URL finds a resource. There are other ways to make HTTP requests (such as the fetch API), but they aren't faster, just different.
A potentially faster (but much more complicated) way would be to use a Websocket. You need to have it open before you wanted to know if the URL exists (otherwise any time savings are lost to establishing the Websocket connection) and you'd need to write the server side code which reacted to the Websocket message by working out if the desired URL existed and telling the client.

Related

Node.JS: Best way to handle max requests per minute to a server

I'm sorry if this question as already been answered and I hope I'm not breaking any SO rule, if so, in advance I apologise...
I was wondering what was the best way to handle a request limiter? I've seen a bunch of throttles and rate-limiters online but I'm not sure how or if it could apply in my case.
I'm doing a bunch of [OUTGOING] request-promises based on an Array and on a server I can only make 90 request per minute. My request-promises are generated by this command: return Promise.all(array.map(request)).
I was thinking to handle it like this:
var i = 0;
return rp({
url: uri,
json: true,
}).then((data) => {
if (i <=90) {
i ++;
return data;
} else {
return i;
}
});
but I'm not sure if it will be a really effective way to handle it plus, I'm not sure how to handle the time relation yet... :S
Thanks in advance for your help and sorry I'm still a huge beginner...
You can use setInterval. Check out the documentation here.
var requestCount = 0;
setInterval(function(){
// Every 60 seconds, reset the count
requestCount = 0;
}, 60000);
// There needs to be an additional check before calling rp,
// that checks for requestCount > 90, and returns before starting the request.
rp({
url: uri,
json: true,
})
.then((data) => {
requestCount++;
return data;
});
If the requests are started from different code parts, it might be useful to implement sth like a server queue which awaits the request until it is allowed to do so. The general handler:
var fromUrl = new Map();
function Server(url, maxPerMinute){
if(fromUrl.has(url)) return fromUrl.get(url);
fromUrl.set(url,this);
this.tld = url;
this.maxPerMinute = maxPerMinute;
this.queue = [];
this.running = false;
}
Server.prototype ={
run(d){
if(this.running && !d) return;
var curr = this.queue.shift();
if(!curr){
this.running = false;
return;
}
var [url,resolve] = curr;
Promise.all([
request(this.tld + url),
new Promise(res => setTimeout(res, 1000*60/this.maxPerMinute)
]).then(([res]) => {
resolve(res);
this.run(true);
});
},
request(url){
return new Promise(res => {
this.queue.push([url,res]);
this.run();
});
}
};
module.exports = Server;
Usable like this:
var google = new require("server")("http://google.com");
google.maxPerMinute = 90;
google.request("/api/v3/hidden/service").then(res => ...);
Probably just do 90 requests per minute. You could use a pseudorecursive promise utilizing function:
function multiRequest(urls, maxPerMinute){
return new Promise(function(cb){
var result = [];
//iterate recursively
(function next(i){
//if finished resolve promise
if(i>=urls.length) return cb(result);
//get all requests
var requests = Promise.all(urls.slice(i,i+maxPerMinute).map(request));
//if the requests are done, add them to result
requests.then(data=>result.push(...data));
//if the requests + one minute done, conginue with next
Promise.all([
requests,
new Promise(res=>setTimeout(res,1000*60))
] ).then(_=>next(i+maxPerMinute))
})(0);
});
}
Use it like this:
multiRequests(["google.com","stackoverflow.com"],90)
.then(([google,so])=>...);

Retry when no response from website

I use the recursive function below, in order to reopen website if httpstatus != 200:
retryOpen = function(){
this.thenOpen("http://www.mywebsite.com", function(response){
utils.dump(response.status);
var httpstatus = response.status;
if(httpstatus != 200){
this.echo("FAILED GET WEBSITE, RETRY");
this.then(retryOpen);
} else{
var thisnow = hello[variable];
this.evaluate(function(valueOptionSelect){
$('select#the_id').val(valueOptionSelect);
$('select#the_id').trigger('change');
},thisnow);
}
});
}
The problem is that sometimes the retryOpen function does not even go as far as to callback function(response){}. Then, my script freezes.
I wonder how one could change the function to be able to recursively try to open website again if there is no response from website (not even some error code as 404 or something)? In other words, how to rewrite the retryOpen function so it reruns when the function does not reach callback after a certain amount of time?
I would try something like this. Please note this is untested code, but should get you on the correct path
retryOpen = function(maxretry){
var count = 0;
function makeCall(url)
{
this.thenOpen(url, function(response){
utils.dump(response.status);
});
}
function openIt(){
makeCall.call(this,"http://www.mywebsite.com");
this.waitFor(function check() {
var res = this.status(false);
return res.currentHTTPStatus === 200;
}, function then() {
var thisnow = hello[variable];
this.evaluate(function(valueOptionSelect){
$('select#the_id').val(valueOptionSelect);
$('select#the_id').trigger('change');
},thisnow);
}, function timeout() { // step to execute if check has failed
if(count < maxretry)
{
openIt.call(this);
}
count++
},
1000 //wait 1 sec
);
}
openIt();
}

What's the best(right) way to write a polling method (with Typescript & AngularJS)?

I am trying to write a polling method that polls a server periodically to check whether a zip file has already been created or not.
What I want to accomplish are the following:
Calls(ajax) an API that creates a zip file on server
Calls(ajax) another API that checks if the zip file has already been created (polling method)
Some subsequent process
Here is my code snippet ↓
var success: boolean = false;
//1. requests a server to create a zip file
this.apiRequest.downloadRequest(params,ApiUrl.URL_FOR_DOWNLOAD_REQUEST)
.then((resObj) => {
var apiRes: IDownloadService = resObj.data;
if (apiRes.status[0].statusCode == "000") {
success = true;
} else {
//Error
}
}).then(() => {
if (success) {
//2. polls the server to check if the zip file is ready
<- Polling method↓ ->
this.polling(params).then((zipUrl) => {
console.log(zipUrl); //always logs zipUrl
//some subsequent process...
});
}
});
Could anyone give some examples of polling method that would work in this case?
Added:
private polling(params: any): ng.IPromise<any> {
var poller = () => this.apiRequest.polling(params, ApiUrl.URL_FOR_POLLING);
var continuation = () => poller().then((resObj) => {
var apiRes: IDownloadService = resObj.data;
if (apiRes.zipFilePath == "") {
return this.$timeout(continuation, 1000);
} else {
return apiRes.zipFilePath;
}
})
var result: ng.IPromise<any> = continuation();
return result;
}
Basically abstract the methods out as shown below:
let poll = () => this.apiRequest.downloadRequest(params,ApiUrl.URL_FOR_DOWNLOAD_REQUEST)
let continuation = () => poll().then((/*something*/)=> {
/*if still bad*/ return continuation();
/*else */ return good;
})
continuation().then((/*definitely good*/));
Update
As requested in the comment below:
return this.$timeout(continuation, 1000);
This is needed to get angular to kick off a digest cycle.

memory leak in ajax - setInterval

I have an ajax code which causes memory leak (especially in IE).
function setStatus() {
var formInput=$(this).serialize();
$.getJSON('CheckStatus.action', formInput, function(data) {
if(data == false) {
function getEventsPeriodicaly() {
getEvents();
};
var timer = setInterval(function () {getEventsPeriodicaly();}, 5000);
}
}
);
}
function getEvents() {
var formInput=$(this).serialize();
$.getJSON('StartEP.action', formInput,function(data) {
var txt = $("#txtEventsArea");
if(data != null && data.toString().length!=0) {
txt.val(data.join('\n') + '\n' + txt.val());
data=null;
}
}
)}
StartEP
public String startEP() throws Exception {
logger.info("[EP] In startEP");
try {
synchronized(status) {
if(!getStatus()) {
EventProcessor amiep = EventProcessor.getInstance();
amiep.addObserver(this);
new Thread(amiep).start();
setStatus(true);
}
}
} catch (Exception ex) {
logger.error("Unable to start EP", ex);
return ERROR;
}
logger.info("[EP] In startEP, before loop");
while(!gotNewData) {
Thread.sleep(4000);
}
gotNewData = false;
logger.info("[EP] Out startEP");
return SUCCESS;
}
The StartEP action returns messages (about 5KB on each request). First I thought it concerned with setting text to textarea, but after some tests got that it is not the reason. Could it be setInterval method?
Is there any considerations?
thanks
I would say this looks pretty suspect:
while(!gotNewData) {
Thread.sleep(4000);
}
Where is gotNewData set? If you call the web service once and set gotNewData to true and then call another web service and set gotNewData to false I don't think there is a guarantee that you're setting the variable within the same instance of the application. Therefore, if you are not, then every time you're hitting the web service you are starting a new thread and then continually putting it back to sleep.

How to make a Firefox addon listen to xmlhttprequests from a page?

Background
I have an existing extension designed to accompany a browser-based game (The extension is mine, the game is not). The extension had been scraping the pages as they came in for the data it needed and making ajax requests for taking any actions.
Problem
The game developers recently changed a number of actions on the site to use ajax requests and I am thus far unable to get the data from those requests.
What I have so far
function TracingListener() {
}
TracingListener.prototype =
{
originalListener: null,
receivedData: [], // array for incoming data.
onDataAvailable: function(request, context, inputStream, offset, count)
{
var binaryInputStream = CCIN("#mozilla.org/binaryinputstream;1",
"nsIBinaryInputStream");
var storageStream = CCIN("#mozilla.org/storagestream;1", "nsIStorageStream");
binaryInputStream.setInputStream(inputStream);
storageStream.init(8192, count, null);
var binaryOutputStream = CCIN("#mozilla.org/binaryoutputstream;1",
"nsIBinaryOutputStream");
binaryOutputStream.setOutputStream(storageStream.getOutputStream(0));
// Copy received data as they come.
var data = binaryInputStream.readBytes(count);
this.receivedData.push(data);
binaryOutputStream.writeBytes(data, count);
this.originalListener.onDataAvailable(request, context,storageStream.newInputStream(0), offset, count);
},
onStartRequest: function(request, context) {
this.originalListener.onStartRequest(request, context);
},
onStopRequest: function(request, context, statusCode)
{
try {
if (request.originalURI && piratequesting.baseURL == request.originalURI.prePath && request.originalURI.path.indexOf("/index.php?ajax=") == 0) {
dump("\nProcessing: " + request.originalURI.spec + "\n");
var date = request.getResponseHeader("Date");
var responseSource = this.receivedData.join();
dump("\nResponse: " + responseSource + "\n");
piratequesting.ProcessRawResponse(request.originalURI.spec, responseSource, date);
}
} catch(e) { dumpError(e);}
this.originalListener.onStopRequest(request, context, statusCode);
},
QueryInterface: function (aIID) {
if (aIID.equals(Ci.nsIStreamListener) ||
aIID.equals(Ci.nsISupports)) {
return this;
}
throw Components.results.NS_NOINTERFACE;
}
}
hRO = {
observe: function(aSubject, aTopic, aData){
try {
if (aTopic == "http-on-examine-response") {
if (aSubject.originalURI && piratequesting.baseURL == aSubject.originalURI.prePath && aSubject.originalURI.path.indexOf("/index.php?ajax=") == 0) {
var newListener = new TracingListener();
aSubject.QueryInterface(Ci.nsITraceableChannel);
newListener.originalListener = aSubject.setNewListener(newListener);
dump("\n\nObserver Processing: " + aSubject.originalURI.spec + "\n");
for (var i in aSubject) {
dump("\n\trequest." + i);
}
}
}
} catch (e) {
dumpError(e);
}
},
QueryInterface: function(aIID){
if (aIID.equals(Ci.nsIObserver) ||
aIID.equals(Ci.nsISupports)) {
return this;
}
throw Components.results.NS_NOINTERFACE;
}
};
var observerService = Cc["#mozilla.org/observer-service;1"] .getService(Ci.nsIObserverService);
observerService.addObserver(hRO, "http-on-examine-response", false);
What's happening
The above code is notified properly when an http request is processed. The uri is also available and is correct (it passes the domain/path check) but the responseSource that gets dumped is, as far as I can tell, always the contents of the first http request made after the browser opened and, obviously, not what I was expecting.
The code above comes in large part from http://www.softwareishard.com/blog/firebug/nsitraceablechannel-intercept-http-traffic/. I'm really hoping that it's just something small that I've overlooked but I've been banging my head against the desk for days on this one, and so now I turn to the wisdom of SO. Any ideas?
but the responseSource that gets
dumped is, as far as I can tell,
always the contents of the first http
request made after the browser opened
and, obviously, not what I was
expecting.
There is a problem with the code above. The "receivedData" member is declared on prototype object and have empty array assigned. This leads to every instantiation of the TracingListener class to be using the same object in memory for receivedData. Changing your code to might solve he problem:
function TracingListener() {
this.receivedData = [];
}
TracingListener.prototype =
{
originalListener: null,
receivedData: null, // array for incoming data.
/* skipped */
}
Not sure though if this will solve your original problem.

Categories