Axios create shorter duration for ETIMEDOUT - javascript

I have a server application (we'll call ServerApp1) which is going to be running on an Azure VM instance. I have a separate server (which we'll call ServerApp2) on a different machine which will be communicating with ServerApp1 and a separate client. The Azure VM is going to be spun up and/or down depending on need, so it's quite possible that the VM (and thus ServerApp1) aren't even alive to respond to request from ServerApp2. My client is polling ServerApp2 to ask for the status of ServerApp1, but if the VM is currently down then that request hangs for like 20 seconds before issuing an error with code ETIMEDOUT. What I'd like is for ServerApp2 to make the request to ServerApp1 to see if it's alive, but after about 1 or 2 seconds of not getting a response to then simply stop and tell the client that's it's not currently running. I thought I could get away with adding a {timeout:2000} parameter to my axios call, but this doesn't seem to change the behavior in any noticeable way.
Here's the function that gets called when the client asks ServerApp2 what the status is of ServerApp1:
router.get('/getCurrentConsoleStatus', function(req, res) {
async function getStatus() {
try {
const result = await consoleDataService.getConsoleStatus();
if (result.message === 'Begin listen for job.') {
console.log('The app is ready!');
} else {
console.log('The console app is not ready');
}
} catch (error) {
console.log(`Error communicating with console app: ${error}`);
}
}
getStatus();
});
I have a function which creates the root Axios object:
var axios = require('axios');
module.exports = axios.create({
baseURL: 'baseURL',
timeout: 1000,
headers: {
'Content-type': 'application/json'
}
});
And then the function that gets called in consoleDataService.getConsoleStatus() looks like this:
exports.getConsoleStatus = async function() {
const res = await axios({
method: 'get',
url: '/status'
});
return res.data;
};

Thanks to #JonEdwards for the suggestion to use Promise.race. I ended up solving the issue with this function which tries to take the first promise which resolves first.
exports.getConsoleStatus = () => {
var sleep = function() {
return new Promise(resolve => {
setTimeout(function() {
resolve({ status: false });
}, 2000);
});
};
var fetch = async function() {
const res = await http({
method: 'get',
url: '/status'
});
return new Promise(resolve => {
resolve(res.data);
});
};
async function getStatus() {
const asyncFunctions = [sleep(), fetch()];
const result = await Promise.race(asyncFunctions);
return result;
}
return getStatus();
};

Related

Firebase function timeout when calling external api

I'm trying to call an external API in Firebase Functions but i always get a timeout.
What can be the issue causing this?
Here is my code
exports.getCountryData = functions.https.onRequest((request, response) => {
const https = require('https');
const options = {
hostname: "api-football-v1.p.rapidapi.com/v3",
path: '/fixtures?next=5',
headers: {
"x-rapidapi-host": "api-football-v1.p.rapidapi.com/v3",
"x-rapidapi-key": "my-api-key"
}
};
var req = https.get(options, (resp) => {
let data = '';
resp.on('data', (chunk) => { data += chunk; });
resp.on('end', () => {
var result = JSON.parse(data);
console.log("Api fetched successfully");
console.log(result);
response.send({ fulfillmentText: result});
});
}).on("error", (err) => { console.log("Error: " + err.message); });
});
An event-driven function may fail to successfully complete due to errors thrown in the function code itself. Some of the reasons this might happen are as follows:
The function contains a bug and the runtime throws an exception.
The function cannot reach a service endpoint, or times out while
trying to reach the endpoint.
The function intentionally throws an exception (for example, when a
parameter fails validation).
When functions written in Node.js return a rejected promise or pass a
non-null value to a callback.
In any of the above cases, the function stops executing by default and the event is discarded. If you want to retry the function when an error occurs, you can change the default retry policy by setting the "retry on failure" property. This causes the event to be retried repeatedly for up to multiple days until the function successfully completes.
In this question, the service endpointi ‘api-football-v1.p.rapidapi.com/v3’ itself took so much time to load ( not reachable ), that was the issue. Changing the API endpoint to v3.football.api-sports.io and then calling the external API in Firebase Functions solved the issue for our user #tate_xy
It turns out using their Rapid Api url (api-football-v1.p.rapidapi.com/v3) was was resulting in a timeout. Using a direct Api url (v3.football.api-sports.io) with their domain name in it did the trick for me.
Here is my working code.
exports.getCountryData = functions.https.onRequest((request, response) => {
const https = require('https');
const options = {
hostname: "v3.football.api-sports.io",
path: '/fixtures?next=5',
headers: {
"x-rapidapi-host": "v3.football.api-sports.io",
"x-apisports-key": "my-api-key"
}
};
var req = https.get(options, (resp) => {
let data = '';
resp.on('data', (chunk) => { data += chunk; });
resp.on('end', () => {
var result = JSON.parse(data);
console.log("Api fetched successfully");
console.log(result);
response.send({ fulfillmentText: result});
});
}).on("error", (err) => { console.log("Error: " + err.message); });
});

Sinon fakeserver doesn't respond when API's are invoked from a callback of setTimeout

Like the title of this question says sinon fake server is not responding to the API/ajax calls made from a callback of setTimeOut.
Status.js
_checkStatus() {
if (...) {
....
}
} else if (...) {
....
} else {
setTimeout(async () => {
this.status = await services.getStatus();
this._checkStatus();
},100);
}
}
Status.test.js
const API_STATUS = /\/status/;
const cType = { 'Content-Type': 'application/json' };
const mocks = {};
mocks.get = { code: 'ready' };
const server = fakeServer.create();
server.respondImmediately = true;
server.respondWith(API_STATUS, [200, cType, JSON.stringify(mocks.get)]);
el._checkStatus();
setTimeout(function(done){
expect(el.status).to.equal('ready');
done();
}
,500);
});
When we execute this test it invokes the mocks that are defined in the repo folder but not picking up the fake server mock defined above. However if I move the getStatus() call outside the timeout then it works with fake server. Just an FYI that getStatus() invokes the status API call.

How do I call Airtable rest API from inside an AWS Lambda?

I am trying to call my rest api endpoint in AIRTABLE from inside an AWS Lambda with no success. I get no errors, no outputs.
If I call the same code using node - it works.
I am able to use Axios in my code.
Pure airtable code (works)
var Airtable = require('airtable');
var base = new Airtable({apiKey: 'keyoMYSECRETKEY'}).base('Mybaseid');
base('MyBase').select({maxRecords: 3,view: "MyView"}).eachPage(function page(records, fetchNextPage) {
// This function (`page`) will get called for each page of records.
records.forEach(function(record) {
console.log('Retrieved',JSON.stringify(record.get('Session Information')));
});
fetchNextPage();
}, function done(err) {
if (err) { console.error(err); return; }
});
If I put it inside a Lambda handler - I get nothing.
const axios = require('axios')
const url = 'https://checkip.amazonaws.com/';
var Airtable = require('airtable');
var base = new Airtable({apiKey: 'keySECRETKEY'}).base('MYBASEID');
let response;
exports.lambdaHandler = async (event, context) => {
try {
base('MyBase').select({maxRecords: 3,view: "MyView"}).eachPage(function page(records, fetchNextPage) {
records.forEach(function(record) { //HERE - NOTHING HAPPENS
console.log('Retrieved',JSON.stringify(record.get('Session Information')));
});
fetchNextPage();
}, function done(err) {
if (err) { console.error(err); return; }
});
const ret = await axios(url); //THIS WORKS
response = {
'statusCode': 200,
'body': JSON.stringify({
message: 'hello world - boo',
location: ret.data.trim()
})
}
} catch (err) {
console.log(err);
return err;
}
return response
};
What am I missing so I can call Airtable API from inside an AWS Lambda?
It seems that your lambda terminates before the API call execution your trying to perform.
I believe this will be solved using a synchronous lambda or with a correct usage of promises with await calls.
Best way to troubleshoot this is to go back to the basics.
See if you can at least get a meaningful console log by wrapping a simpler fetch request into a lambda handler:
const baseId = 'exampleAppId123';
const tableName = 'Table 1';
const api_key = 'keyExample123';
const url = `https://api.airtable.com/v0/${baseId}/${tableName}?api_key=${api_key}`;
exports.lambdaHandler = async () => {
const res = await fetch(url)
.then(res => res.json())
.then(data=>console.log(data))
.then(() => {
//do more stuff
})
}
Then report back if you can't. Or better yet, report back either way as that's bound to help more people in the future.
Worst case? The above code still doesn't do anything. If that happens, I suggest going with #Shoty's first instinct and turning this code into a synchronous fetch request by removing the async/await syntax and returning chained thenables. Not that blocking behavior of this sort is acceptable from a UX perspective, but it should at least help with debugging.

How to avoid sending multiple duplicate AJAX requests in axios

Is it possible to automatically throttle all requests going to a particular list of endpoints using axios? Perhaps using axios interceptor?
Currently I throttle the user action that sends the axios request, but the problem with that is that I have to write this everywhere I have a user action that results in some AJAX request. Like this
const throttledDismissNotification = throttle(dismissNotification, 1000)
const dismiss = (event: any) => {
throttledDismissNotification();
};
render() {
return (
<Button onClick={dismiss}>Dismiss Notification</Button>
)
}
This results in a lot of clutter and I was wondering if this could be automated.
Something like:
if(request.url in listOfEndpointsToThrottle && request.params in cacheOfPreviousRequestsToThisEndpoint) {
StopRequest();
}
Obviously this is pseudocode but you get the idea.
Perhaps you could try to use the Cancellation feature that axios provides.
With it, you can ensure that you don't have any two (or more, depending on your implementation) similar requests in a pending state.
Below, you will find a small simplified example of how to ensure that only the latest request is processed. You can adjust it a bit to make it function like a pool of requests
import axios, { CancelToken } from 'axios';
const pendingRequests = {};
const makeCancellable = (headers, requestId) => {
if (!requestId) {
return headers;
}
if (pendingRequests[requestId]) {
// cancel an existing request
pendingRequests[requestId].cancel();
}
const source = CancelToken.source();
const newHeaders = {
...headers,
cancelToken: source.token
};
pendingRequests[requestId] = source;
return newHeaders;
};
const request = ({
url,
method = 'GET',
headers,
id
}) => {
const requestConfig = {
url,
method,
headers: makeCancellable(headers || {}, id)
};
return axios.request(requestConfig)
.then((res) => {
delete pendingRequests[id];
return ({ data: res.data });
})
.catch((error) => {
delete pendingRequests[id];
if (axios.isCancel(error)) {
console.log(`A request to url ${url} was cancelled`); // cancelled
} else {
return handleReject(error);
}
});
};
export default request;
It's quite easy to throttle an axios request itself. The real headache is how to handle the promises that are returned from nullified requests. What is considered sane behavior when dealing with promises that are returned from a nullified axios request? Should they stay pending forever?
I don't see any perfect solution to this problem. But then I come to a solution that is kind of cheating:
What if we don't throttle the axios call, instead we throttle the actual XMLHttpRequest?
This makes things way easier, because it avoids the promise problem, and it's easier to implement. The idea is to implement a cache for recent requests, and if a new request matches a recent one, you just pull the result from cache and skip the XMLHttpRequest.
Because of the way axios interceptors work, the following snippet can be used to skip a certain XHR call conditionally:
// This should be the *last* request interceptor to add
axios.interceptors.request.use(function (config) {
/* check the cache, if hit, then intentionally throw
* this will cause the XHR call to be skipped
* but the error is still handled by response interceptor
* we can then recover from error to the cached response
**/
if (requestCache.isCached(config)) {
const skipXHRError = new Error('skip')
skipXHRError.isSkipXHR = true
skipXHRError.request = config
throw skipXHRError
} else {
/* if not cached yet
* check if request should be throttled
* then open up the cache to wait for a response
**/
if (requestCache.shouldThrottle(config)) {
requestCache.waitForResponse(config)
}
return config;
}
});
// This should be the *first* response interceptor to add
axios.interceptors.response.use(function (response) {
requestCache.setCachedResponse(response.config, response)
return response;
}, function (error) {
/* recover from error back to normality
* but this time we use an cached response result
**/
if (error.isSkipXHR) {
return requestCache.getCachedResponse(error.request)
}
return Promise.reject(error);
});
I have a similar problem, thru my research it seems to lack a good solution. All I saw were some ad hoc solutions so I open an issue for axios, hoping someone can answer my question https://github.com/axios/axios/issues/2118
I also find this article Throttling Axios requests but I did not try the solution he suggested.
And I have a discussion related to this My implementation of debounce axios request left the promise in pending state forever, is there a better way?
I finish one, #hackape thank you for you answer, the code is as follows:
const pendings = {}
const caches = {}
const cacheUtils = {
getUniqueUrl: function (config) {
// you can set the rule based on your own requirement
return config.url + '&' + config.method
},
isCached: function (config) {
let uniqueUrl = this.getUniqueUrl(config)
return caches[uniqueUrl] !== undefined
},
isPending: function (config) {
let uniqueUrl = this.getUniqueUrl(config)
if (!pendings[uniqueUrl]) {
pendings[uniqueUrl] = [config]
return false
} else {
console.log(`cache url: ${uniqueUrl}`)
pendings[uniqueUrl].push(config)
return true
}
},
setCachedResponse: function (config, response) {
let uniqueUrl = this.getUniqueUrl(config)
caches[uniqueUrl] = response
if (pendings[uniqueUrl]) {
pendings[uniqueUrl].forEach(configItem => {
configItem.isFinished = true
})
}
},
getError: function(config) {
const skipXHRError = new Error('skip')
skipXHRError.isSkipXHR = true
skipXHRError.requestConfig = config
return skipXHRError
},
getCachedResponse: function (config) {
let uniqueUrl = this.getUniqueUrl(config)
return caches[uniqueUrl]
}
}
// This should be the *last* request interceptor to add
axios.interceptors.request.use(function (config) {
// to avoid careless bug, only the request that explicitly declares *canCache* parameter can use cache
if (config.canCache) {
if (cacheUtils.isCached(config)) {
let error = cacheUtils.getError(config)
throw error
}
if (cacheUtils.isPending(config)) {
return new Promise((resolve, reject) => {
let interval = setInterval(() => {
if(config.isFinished) {
clearInterval(interval)
let error = cacheUtils.getError(config)
reject(error)
}
}, 200)
});
} else {
// the head of cacheable requests queue, get the response by http request
return config
}
} else {
return config
}
});

Increase timeout time in React - neo4j app

I am trying to increase the timeout time of my React app. I am using axios, so initially I tried:
axios.post('/gene_info', postData, {timeout: timeoutVal});
It did not work, and there is the respective thread that deals with it:
https://github.com/axios/axios/issues/647
So, I tried the following code:
let CancelToken = axios.CancelToken;
const source = CancelToken.source();
try {
let response = null;
setTimeout(() => {
if (response === null) {
source.cancel();
}
}, 60 * 1500 * 1000);
response = await axios.post('/gene_info', postData, {cancelToken: source.token});
console.log(response);
} catch (error) {
console.log(error);
}
And it is not working either. The request times out and I see the empty response error, even though on the Node.js backend I see that the result is returned correctly. On the backend I am making a very long running request to Neo4j database. I got a suspicion that maybe it timeouts, so I added to neo4j.config file the following lines:
unsupported.dbms.executiontime_limit.enabled=true
unsupported.dbms.executiontime_limit.time=99999999999999s
That I found here:
How to configure a query timeout in Neo4j 3.0.1
and restarted neo4j but it did not help either. Here is what I see in the terminal:
I am not sure what this POST /gene_info - - ms - - means, whether the problem is still on the front end, or the back end, but I have a suspicion that neo4j now times out, but it is still calculating the result which I see using console.log() statements. Any suggestions would be greatly appreciated.
Update
I tried using Reacts fetch, but still not working. Here is the code:
fetchWithTimeout = (url, postData, timeout) => {
let didTimeOut = false;
new Promise(function(resolve, reject) {
const timeout = setTimeout(function() {
didTimeOut = true;
reject(new Error('Request timed out'));
}, timeout);
fetch(url, {
method: 'POST',
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json',
},
timeout: timeout,
body: JSON.stringify(postData)
})
.then(function(response) {
// Clear the timeout as cleanup
clearTimeout(timeout);
if(!didTimeOut) {
console.log('fetch good! ', response);
resolve(response);
}
})
.catch(function(err) {
console.log('fetch failed! ', err);
// Rejection already happened with setTimeout
if(didTimeOut) return;
// Reject with error
reject(err);
});
})
.then(function() {
// Request success and no timeout
console.log('good promise, no timeout! ');
})
.catch(function(err) {
// Error: response error, request timeout or runtime error
console.log('promise error! ', err);
});
}
Then I am calling this function like that:
let postData = {"jsonData": geneNameArr,
"datasetName": this.props.datasetName};
this.fetchWithTimeout('/gene_info', postData, timeout).then((response) => {
console.log("fetchWithTimeout is done!");
console.log(response);
});
Update
I tried using axios.create() function with no success:
const axiosInstance = axios.create({
baseURL: '/gene_info',
timeout: timeout
});
axiosInstance.post('', postData).then((response) => {
console.log("axios request is done with create() method");
console.log(response);
});
If nothing seems to work on the front end, I would think it is the timeout that comes from the neo4j driver, even though somehow the results are returned. Here is the code I am using for the driver:
router.post('/gene_info', function(req, res) {
...
...
var driver = dbUtils.driver;
const session = driver.session();
session.run(
full_query,
{}
).then(result => {
const exprData = chartService.prepareGeneInfoData(result, '');
res.json({
exprData
});
session.close();
});
})
Or maybe it can also be express.Router(); that I am using for treating get and post requests on the backend with Node.js
If you want to configure your timeout in axios, you can use,
const axiosInstance = axios.create({
baseURL: "http://example.com/api/",
timeout: 5000
});
Replace 5000 with your timeout value needed.
Ultimately I found the solution that worked here:
Node Express specific timeout value per route
And I used the setConnectionTimeout() function in the following way:
router.post('/gene_info', setConnectionTimeout('12h'), function(req, res) {
...
})

Categories