Promises nodejs readability improvements - javascript

I have writting a few functions which returns a promise, to get data from google analytics api. I think what I've written is called a callback hell..
Can someone help me optimise this code (or give tips/best practices), so it's better readable.
var express = require('express');
var router = express.Router();
var googleAuth = require('google-oauth-jwt');
var google = require('googleapis');
var app = express();
module.exports.getGoogleData = function (jwtClient,analytics,VIEW_ID){
return new Promise(function(resolve, reject){
return getOrdersToday(jwtClient,analytics,VIEW_ID).then(function (orders) {
return getOnlineUsersToday(jwtClient,analytics,VIEW_ID).then(function (users) {
return getSearchedToday(jwtClient, analytics, VIEW_ID).then(function (searched){
return getPageviewsTodayAndUsersToday(jwtClient, analytics, VIEW_ID).then(function (pageviews){
var returndata =[
{
"Orders":orders,
"Onlineusers":users,
"searched":searched,
"pageviews":pageviews[0].pageviews,
"usersToday":pageviews[0].users
}
]
resolve(returndata);
});
});
});
});
});
}
Example getfunction
function getOrdersToday(jwtClient,analytics,view_id){
return new Promise(function(resolve,reject){
analytics.data.ga.get({
'auth':jwtClient,
'ids': view_id,
'metrics': 'ga:totalEvents',
'start-date': 'today',
'end-date': 'today',
filters: 'ga:eventAction==Bestelling geplaatst',
'max-results': '1'
}, function(err, response) {
// handle the errors (if any)
if(err){
console.log(err)
reject(err)
} else
console.log('Response:',response)
resolve(response.totalsForAllResults["ga:totalEvents"]);
});
});
}

There's no need for the new Promise in there at all, and in fact by using it, you leave it open to never settling if an error occurs in one of your calls. Remember that then returns a new promise. So you can just chain all those together if you want them run sequentially:
module.exports.getGoogleData = function (jwtClient,analytics,VIEW_ID){
var result = {};
return getOrdersToday(jwtClient,analytics,VIEW_ID)
.then(function (orders) {
result.Orders = orders;
return getOnlineUsersToday(jwtClient,analytics,VIEW_ID);
})
.then(function (users) {
result.Onlineusers = users;
return getSearchedToday(jwtClient, analytics, VIEW_ID);
}).then(function (searched){
result.searched = searched;
return getPageviewsTodayAndUsersToday(jwtClient, analytics, VIEW_ID);
}).then(function (pageviews){
result.pageviews = pageviews[0].pageviews;
result.usersToday = pageviews[0].users;
return [result]; // (Seems a bit odd that it's wrapped in an array, but
// that's what the original code did...)
});
}
But, those operations look independent of one another. If that's really true, run them in parallel with Promise.all:
module.exports.getGoogleData = function (jwtClient,analytics,VIEW_ID){
return Promise.all([
getOrdersToday(jwtClient,analytics,VIEW_ID),
getOnlineUsersToday(jwtClient,analytics,VIEW_ID),
getSearchedToday(jwtClient, analytics, VIEW_ID),
getPageviewsTodayAndUsersToday(jwtClient, analytics, VIEW_ID)
]).then(results => {
return [{
Orders: results[0],
Onlineusers: results[1],
searched: results[2],
pageviews: results[3][0].pageviews,
usersToday: results[3][0].users
}];
});
}

The only way to fix that ugly code is killing Promise hell, for doing that you must use:
ES2017 async/await syntax
So, take a look at this new code
var express = require('express');
var router = express.Router();
var googleAuth = require('google-oauth-jwt');
var google = require('googleapis');
var app = express();
module.exports.getGoogleData = foo
async function foo (jwtClient,analytics,VIEW_ID){
var orders = await getOrdersToday(jwtClient,analytics,VIEW_ID)
var users = await getOnlineUsersToday(jwtClient,analytics,VIEW_ID)
var searched = await getSearchedToday(jwtClient, analytics, VIEW_ID)
var pageviews = await getPageviewsTodayAndUsersToday(jwtClient, analytics, VIEW_ID)
return [{
"Orders":orders,
"Onlineusers":users,
"searched":searched,
"pageviews":pageviews[0].pageviews,
"usersToday":pageviews[0].users
}]
}

Since you have the promises created already. You can use Promise.all() and pass it the array of promises in the order you want them to be executed.
Refer this for more details - https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/all
Alternatively, you can use async/await as well - https://javascript.info/async-await

Below is the correct and readable way to chain your promises. Correct chaining reduce nesting by expanding it vertically. Also, you don't need to instantiate a new Promise because all your get methods already returns promises.
function (jwtClient,analytics,VIEW_ID) {
var returndata =[{}];
return getOrdersToday(jwtClient,analytics,VIEW_ID).then(function (orders) {
returndata[0].Orders = orders;
return getOnlineUsersToday(jwtClient,analytics,VIEW_ID);
}).then(function(users) {
returndata[0].Onlineusers= users;
return getSearchedToday(jwtClient, analytics, VIEW_ID);
}).then(function(searched) {
returndata[0].searched = searched;
return getPageviewsTodayAndUsersToday(jwtClient, analytics, VIEW_ID);
}).then(function(pageviews) {
returndata[0].pageviews = pageviews[0].pageviews;
returndata[0].usersToday = pageviews[0].users;
return returndata;
});
}

As others have noted, these calls do not depend on each other, which means you don't need to sequence them and can instead execute them all simultaneously. Here is what that might look like:
module.exports.getGoogleData = function(jwtClient, analytics, VIEW_ID) {
return Promise.all([
getOrdersToday(jwtClient, analytics, VIEW_ID),
getOnlineUsersToday(jwtClient, analytics, VIEW_ID),
getSearchedToday(jwtClient, analytics, VIEW_ID),
getPageviewsTodayAndUsersToday(jwtClient, analytics, VIEW_ID)
])
.then(function(results) {
return {
"Orders": results[0],
"Onlineusers": results[1],
"searched": results[2],
"pageviews": results[3][0].pageviews,
"usersToday": results[3][0].users
};
});
}
But I think your question is a really good one, and I hope your interest in good patterns doesn't end just because this specific case calls for a different solution. As I have noted elsewhere, async patterns is a complicated problem domain.
So, let's pretend you need to make 4 calls, each depending on the results of the previous one, so they must be executed in sequence. We have access to this fictional API:
function fetchOne(null) // returns opaque ONE
function fetchTwo(ONE) // must feed it ONE, returns opaque TWO
function fetchThree(TWO) // must feed it TWO, returns opaque THREE
function fetchFour(THREE) // must feed it THREE, returns opaque FOUR
And furthermore, let's pretend that at the end of this process, you wish to have access to all four of these return values so you can craft a response with all of them. After all, what good is a pattern if it can't accommodate the kinds of complicated workflows we see in the real world?
Thus, our goal is to flesh out the implementation of this function:
function doTheWork() {
// DO STUFF somehow, finally...
return {
one: ONE,
two: TWO,
three: THREE,
four: FOUR,
checksum: ONE + TWO + THREE + FOUR
};
}
A naively straightforward attempt might look like this:
function doTheWork() {
return fetchOne()
.then((ONE) => {
return fetchTwo(ONE)
})
.then((TWO) => {
return fetchThree(TWO)
})
.then((THREE) => {
return fetchFour(THREE)
})
.then((FOUR) => {
// all work is done, let's package up the results
return {
one: ONE // uh-oh...
};
})
}
This will execute, but the problem is that we no longer have access to the earlier values in the final handler.
There are really only two ways around this: (1) declare variables with a scope that will be shared by all the callbacks, or (2) pass all data between handlers in some kind of structure. I think solution 1 is really inelegant, and I recommend against it, but let's see what that might look like:
function doTheWork() {
var A, B, C
return fetchOne()
.then((ONE) => {
A = ONE // store ONE in A
return fetchTwo(ONE)
})
// ...
.then((FOUR) => {
// all work is done, let's package up the results
// at this point, A & B & C contain the results of previous calls
// and are within scope here
return {
one: A,
two: B,
three: C,
four: FOUR,
checksum: A + B + C + FOUR
};
})
}
This will work, but I think it's bad practice and I dislike it. It's bad practice because these variables are now "public" in a limited kind of way, exposing them to everything else within doTheWork. They can be clobbered by statements anywhere in this function. And unless they are scalars, they will be passed by reference, which means any mutation bugs associated with them can manifest themselves in sometimes bizarre ways.
Also, each of these "temporary" variables now competes for good names in a shared namespace (the scope of doTheWork). Ideally, you should create as few variables as possible, and each should live only as long as necessary. This might save memory, but it definitely saves your "naming pool." Naming things is mentally exhausting. I am not kidding. And every name must be good -- never name variables in a slapdash way. The names of things are often the only clues within the code about what is happening. If some of those names are throwaway garbage, you are making your life and the lives of future maintainers harder.
So, let's look at solution 2. At this point, I want to point out that this approach works best when you can use ES6 destructuring syntax. You can do this without it, but it's a bit clunkier.
We're going to construct an array that will slowly accumulate all the data fetched by each of these async calls. At the end, the array will look like this:
[ ONE , TWO , THREE , FOUR ]
By chaining promises efficiently, and by passing this array from one handler to the next, we can both avoid the Pyramid of Doom and share async results among all of these methods easily. See below:
function doTheWork() {
return fetchOne()
.then((ONE) => {
return fetchTwo(ONE)
.then((TWO) => [ ONE , TWO ])
})
.then(([ ONE , TWO ]) => {
return fetchThree(TWO)
.then((THREE) => [ ONE , TWO , THREE ])
})
.then(([ ONE , TWO , THREE ]) => {
return fetchFour(THREE)
.then((FOUR) => [ ONE , TWO , THREE , FOUR ])
})
.then(([ ONE , TWO , THREE , FOUR ]) => {
return {
one: ONE,
two: TWO,
three: THREE,
four: FOUR,
checksum: ONE + TWO + THREE + FOUR
}
})
}
That's the whole thing, but let's step through just the first part:
return fetchOne()
.then((ONE) => {
return fetchTwo(ONE)
.then((TWO) => [ ONE , TWO ])
})
As usual, fetchOne returns ONE -- it's the third-party API we have no control over. And as you might expect, we use ONE to make the second call, making sure to return its promise. But that last line is the real magic:
.then((TWO) => [ ONE , TWO ])
The second API call still returns just TWO, but rather than us simply returning TWO alone, we instead return an array that contains both ONE and TWO. That value -- the array -- becomes the argument to the next .then handler.
This works because nested promises are automatically unwrapped. Here's a simple example showing that at work:
function getDogName() {
return fetchCatName()
.then((catName) => 'Fido')
}
getDogName()
.then((dogName) => {
console.log(dogName);
})
// logs 'Fido'
This illustrates that you can attach .then handlers to nested promises, and the outer promises will return the results of those handlers. This is a really common pattern when using fetch to get JSON:
function makeApiCall() {
fetch(api_url) // resolves with a Response object
.then((response) => response.json()) // extracts just the JSON and returns that instead!
}
Going back to our code, since we want to avoid the Pyramid, we leave this handler along (rather than nesting the next call inside, as you originally did). The next line looks like this:
.then(([ ONE , TWO ]) => {
This is the ES6 destructuring at work. By destructuring the array argument, we can name the elements as they enter the function. And if we're doing that, we might as well give them the names we like best, the ones the came into the world as: ONE and TWO.
We then repeat the pattern, using TWO to invoke fetchThree, making sure to return its promise, but not before tacking on a tiny .then handler that bundles the newly-available THREE into the package that we pass forward.
I hope this helps. It's a pattern I worked out to deal with some complicated branching workflows in AWS, making calls against S3 and Dynamo and other platforms with a lot of parallel conditionals, mixing blocking and non-blocking behavior.
Async patterns are a special problem domain. Even with the right tech, it's can be hard to find clear ways to express behavior, especially when the underlying workflow is convoluted. This is often the case when exploring the data graphs that are so common these days.
Happy coding.

Related

Executing two create functions cloud functions

I'm creating a cloud function for when a user signs up, first to create a score collection from them, but then I also want to add some dummy starter data to their account but the below won't work because the second return statement is never reached.
If there are any suggestions on a better way to do this entirely, then I welcome that feedback.
// auth trigger (new user signup)
exports.newUserSignup = functions.auth.user().onCreate((user) => {
return admin.firestore().collection('users').doc(user.uid).collection('score').add({
gems: 0,
})
//this line isn't reached
return admin.firestore().collection('users').doc(user.uid).collection('sampleData').add({
...
});
})
You will need to wait until the promises from both operations resolve. One way to do this is to generate a new promise that resolves only after the other two resolve.
exports.newUserSignup = functions.auth.user().onCreate((user) => {
const p1 = admin.firestore().collection('users').doc(user.uid).collection('score').add({
gems: 0,
})
const p2 = admin.firestore().collection('users').doc(user.uid).collection('sampleData').add({
...
});
return Promise.all([p1, p2]);
})
In order to make effective use of JavaScript and Cloud Functions, you will definitely need to understand how asynchronous programming with JavaScript promises work. I suggest taking some time to learn about that.

Avoid callback hell with angular, HttpClient and Observables

I am currently struggling to wrap my head around angular (2+), the HttpClient and Observables.
I come from a promise async/await background, and what I would like to achieve in angular, is the equivalent of:
//(...) Some boilerplate to showcase how to avoid callback hell with promises and async/await
async function getDataFromRemoteServer() {
this.result = await httpGet(`/api/point/id`);
this.dependentKey = someComplexSyncTransformation(this.result);
this.dependentResult = await httpGet(`/api/point/id/dependent/keys/${this.dependentKey}`);
this.deeplyNestedResult = await httpGet(`/api/point/id/dependen/keys/${this.dependentResult.someValue}`);
}
The best I could come op with in angular is:
import { HttpClient } from `#angular/common/http`;
//(...) boilerplate to set component up.
constructor(private http: HttpClient) {}
// somewhere in a component.
getDataFromRemoteServer() {
this.http.get(`/api/point/id`).subscribe( result => {
this.result = result;
this.dependentKey = someComplexSyncTransformation(this.result);
this.http.get(`/api/point/id/dependent/keys/${this.dependentKey}`).subscribe( dependentResult => {
this.dependentResult = dependentResult;
this.http.get(`/api/point/id/dependen/keys/${this.dependentResult.someValue}`).subscribe( deeplyNestedResult => {
this.deeplyNestedResult = deeplyNestedResult;
});
})
});
}
//...
As you might have noticed, I am entering the Pyramid of Doom with this approach, which I would like to avoid.
So how could I write the angular snippet in a way as to avoid this?
Thx!
Ps: I am aware of the fact that you can call .toPromise on the result of the .get call.
But let's just assume I want to go the total Observable way, for now.
When working with observables, you won't call subscribe very often. Instead, you'll use the various operators to combine observables together, forming a pipeline of operations.
To take the output of one observable and turn it into another, the basic operator is map. This is similar to how you can .map an array to produce another array. For a simple example, here's doubling all the values of an observable:
const myObservable = of(1, 2, 3).pipe(
map(val => val * 2)
);
// myObservable is an observable which will emit 2, 4, 6
Mapping is also what you do to take an observable for one http request, and then make another http request. However, we will need one additional piece, so the following code is not quite right:
const myObservable = http.get('someUrl').pipe(
map(result => http.get('someOtherUrl?id=' + result.id)
)
The problem with this code is that it creates an observable that spits out other observables. A 2-dimensional observable if you like. We need to flatten this down so that we have an observable that spits out the results of the second http.get. There are a few different ways to do the flattening, depending on what order we want the results to be in if multiple observables are emitting multiple values. This is not much of an issue in your case since each of these http observables will only emit one item. But for reference, here are the options:
mergeMap will let all the observables run in whatever order, and outputs in whatever order the values arrive. This has its uses, but can also result in race conditions
switchMap will switch to the latest observable, and cancel old ones that may be in progress. This can eliminate race conditions and ensure you have only the latest data.
concatMap will finish the entirety of the first observable before moving on to the second. This can also eliminate race conditions, but won't cancel old work.
Like i said, it doesn't matter much in your case, but i'd recommend using switchMap. So my little example above would become:
const myObservable = http.get('someUrl').pipe(
switchMap(result => http.get('someOtherUrl?id=' + result.id)
)
Now here's how i can use those tools with your code. In this code example, i'm not saving all the this.result, this.dependentKey, etc:
getDataFromRemoteServer() {
return this.http.get(`/api/point/id`).pipe(
map(result => someComplexSyncTransformation(result)),
switchMap(dependentKey => this.http.get(`/api/point/id/dependent/keys/${dependentKey}`)),
switchMap(dependantResult => this.http.get(`/api/point/id/dependent/keys/${dependentResult.someValue}`)
});
}
// to be used like:
getDataFromRemoteServer()
.subscribe(deeplyNestedResult => {
// do whatever with deeplyNestedResult
});
If its important to you to save those values, then i'd recommend using the tap operator to highlight the fact that you're generating side effects. tap will run some code whenever the observable emits a value, but will not mess with the value:
getDataFromRemoteServer() {
return this.http.get(`/api/point/id`).pipe(
tap(result => this.result = result),
map(result => someComplexSyncTransformation(result)),
tap(dependentKey => this.dependentKey = dependentKey),
// ... etc
});
}

Functional Programming and async/promises

I'm refactoring some old node modules into a more functional style. I'm like a second year freshman when it comes to FP :) Where I keep getting hung up is handling large async flows. Here is an example where I'm making a request to a db and then caching the response:
// Some external xhr/promise lib
const fetchFromDb = make => {
return new Promise(resolve => {
console.log('Simulate async db request...'); // just simulating a async request/response here.
setTimeout(() => {
console.log('Simulate db response...');
resolve({ make: 'toyota', data: 'stuff' });
}, 100);
});
};
// memoized fn
// this caches the response to getCarData(x) so that whenever it is invoked with 'x' again, the same response gets returned.
const getCarData = R.memoizeWith(R.identity, (carMake, response) => response.data);
// Is this function pure? Or is it setting something outside the scope (i.e., getCarData)?
const getCarDataFromDb = (carMake) => {
return fetchFromDb(carMake).then(getCarData.bind(null, carMake));
// Note: This return statement is essentially the same as:
// return fetchFromDb(carMake).then(result => getCarData(carMake, result));
};
// Initialize the request for 'toyota' data
const toyota = getCarDataFromDb('toyota'); // must be called no matter what
// Approach #1 - Just rely on thenable
console.log(`Value of toyota is: ${toyota.toString()}`);
toyota.then(d => console.log(`Value in thenable: ${d}`)); // -> Value in thenable: stuff
// Approach #2 - Just make sure you do not call this fn before db response.
setTimeout(() => {
const car = getCarData('toyota'); // so nice!
console.log(`later, car is: ${car}`); // -> 'later, car is: stuff'
}, 200);
<script src="https://cdnjs.cloudflare.com/ajax/libs/ramda/0.25.0/ramda.min.js"></script>
I really like memoization for caching large JSON objects and other computed properties. But with a lot of asynchronous requests whose responses are dependent on each other for doing work, I'm having trouble keeping track of what information I have and when. I want to get away from using promises so heavily to manage flow. It's a node app, so making things synchronous to ensure availability was blocking the event loop and really affecting performance.
I prefer approach #2, where I can get the car data simply with getCarData('toyota'). But the downside is that I have to be sure that the response has already been returned. With approach #1 I'll always have to use a thenable which alleviates the issue with approach #2 but introduces its own problems.
Questions:
Is getCarFromDb a pure function as it is written above? If not, how is that not a side-effect?
Is using memoization in this way an FP anti-pattern? That is, calling it from a thenable with the response so that future invocations of that same method return the cached value?
Question 1
It's almost a philosophical question here as to whether there are side-effects here. Calling it does update the memoization cache. But that itself has no observable side-effects. So I would say that this is effectively pure.
Update: a comment pointed out that as this calls IO, it can never be pure. That is correct. But that's the essence of this behavior. It's not meaningful as a pure function. My answer above is only about side-effects, and not about purity.
Question 2
I can't speak for the whole FP community, but I can tell you that the Ramda team (disclaimer: I'm a Ramda author) prefers to avoid Promises, preferring more lawful types such Futures or Tasks. But the same questions you have here would be in play with those types substituted for Promises. (More on these issues below.)
In General
There is a central point here: if you're doing asynchronous programming, it will spread to every bit of the application that touches it. There is nothing you will do that changes this basic fact. Using Promises/Tasks/Futures helps avoid some of the boilerplate of callback-based code, but it requires you to put the post response/rejection code inside a then/map function. Using async/await helps you avoid some of the boilerplate of Promise-based code, but it requires you to put the post reponse/rejection code inside async functions. And if one day we layer something else on top of async/await, it will likely have the same characteristics.
(While I would suggest that you look at Futures or Tasks instead of Promises, below I will only discuss Promises. The same ideas should apply regardless.)
My suggestion
If you're going to memoize anything, memoize the resulting Promises.
However you deal with your asynchrony, you will have to put the code that depends on the result of an asynchronous call into a function. I assume that the setTimeout of your second approach was just for demonstration purposes: using timeout to wait for a DB result over the network is extremely error-prone. But even with setTimeout, the rest of your code is running from within the setTimeout callback.
So rather than trying to separate the cases for when your data has already been cached and when it hasn't, simply use the same technique everywhere: myPromise.then(... my code ... ). That could look something like this:
// getCarData :: String -> Promise AutoInfo
const getCarData = R.memoizeWith(R.identity, make => new Promise(resolve => {
console.log('Simulate async db request...')
setTimeout(() => {
console.log('Simulate db response...')
resolve({ make: 'toyota', data: 'stuff' });
}, 100)
})
)
getCarData('toyota').then(carData => {
console.log('now we can go', carData)
// any code which depends on carData
})
// later
getCarData('toyota').then(carData => {
console.log('now it is cached', carData)
})
<script src="//cdnjs.cloudflare.com/ajax/libs/ramda/0.25.0/ramda.min.js"></script>
In this approach, whenever you need car data, you call getCarData(make). Only the first time will it actually call the server. After that, the Promise is served out of the cache. But you use the same structures everywhere to deal with it.
I only see one reasonable alternative. I couldn't tell if your discussion about having to have to wait for the data before making remaining calls means that you would be able to pre-fetch your data. If that's the case, then there is one additional possibility, one which would allow you to skip the memoization as well:
// getCarData :: String -> Promise AutoInfo
const getCarData = make => new Promise(resolve => {
console.log('Simulate async db request...')
setTimeout(() => {
console.log('Simulate db response...')
resolve({ make: 'toyota', data: 'stuff' });
}, 100)
})
const makes = ['toyota', 'ford', 'audi']
Promise.all(makes.map(getCarData)).then(allAutoInfo => {
const autos = R.zipObj(makes, allAutoInfo)
console.log('cooking with gas', autos)
// remainder of app that depends on auto data here
})
<script src="//cdnjs.cloudflare.com/ajax/libs/ramda/0.25.0/ramda.min.js"></script>
But this one means that nothing will be available until all your data has been fetched. That may or may not be all right with you, depending on all sorts of factors. And for many situations, it's not even remotely possible or desirable. But it is possible that yours is one where it is helpful.
One technical point about your code:
const getCarDataFromDb = (carMake) => {
return fetchFromDb(carMake).then(getCarData.bind(null, carMake));
};
Is there any reason to use getCarData.bind(null, carMake) instead of () => getCarData(carMake)? This seems much more readable.
Is getCarFromDb a pure function as it is written above?
No. Pretty much anything that uses I/O is impure. The data in the DB could change, the request could fail, so it doesn't give any reliable guarantee that it will return consistent values.
Is using memoization in this way an FP anti-pattern? That is, calling it from a thenable with the response so that future invocations of that same method return the cached value?
It's definitely an asynchrony antipattern. In your approach #2 you are creating a race condition where the operation will succeed if the DB query completes in less than 200 ms, and fail if it takes longer than that. You've labeled a line in your code "so nice!" because you're able to retrieve data synchronously. That suggests to me that you're looking for a way to skirt the issue of asynchrony rather than facing it head-on.
The way you're using bind and "tricking" memoizeWith into storing the value you're passing into it after the fact also looks very awkward and unnatural.
It is possible to take advantage of caching and still use asynchrony in a more reliable way.
For example:
// Some external xhr/promise lib
const fetchFromDb = make => {
return new Promise(resolve => {
console.log('Simulate async db request...')
setTimeout(() => {
console.log('Simulate db response...')
resolve({ make: 'toyota', data: 'stuff' });
}, 2000);
});
};
const getCarDataFromDb = R.memoizeWith(R.identity, fetchFromDb);
// Initialize the request for 'toyota' data
const toyota = getCarDataFromDb('toyota'); // must be called no matter what
// Finishes after two seconds
toyota.then(d => console.log(`Value in thenable: ${d.data}`));
// Wait for 5 seconds before getting Toyota data again.
// This time, there is no 2-second wait before the data comes back.
setTimeout(() => {
console.log('About to get Toyota data again');
getCarDataFromDb('toyota').then(d => console.log(`Value in thenable: ${d.data}`));
}, 5000);
<script src="https://cdnjs.cloudflare.com/ajax/libs/ramda/0.25.0/ramda.min.js"></script>
The one potential pitfall here is that if a request should fail, you'll be stuck with a rejected promise in your cache. I'm not sure what would be the best way to address that, but you'd surely need some way of invalidating that part of the cache or implementing some sort of retry logic somewhere.

How do I use Zapier's StoreClient to set and retrieve a single value?

Following the documentation, I am using this in my JS:
var store = StoreClient('my secret');
store.set('active', true);
var status = store.get('active');
The variable status never has a value. I'm clearly not using the library correctly.
For context, this is inside a switch statement that does something like this for many of the cases, where some of them need to set or get a value from the StoreClient.
The documentation uses this example:
var store = StoreClient('your secret here');
store
.set('hello', 'world')
.then(function() {
return store.get('hello');
})
.then(function(value) {
// value === 'world'
return store.delete('hello');
})
.then(function() {
callback();
})
.catch(callback);
Because I'm on the amateur side, I'm not super familiar with promises. In that example, it's unclear to me which parts of the are required in order to [a] set, and eventually, [b] get a value. I suggest including an example that doesn't have set/get/delete combined into one.
I tried this:
var store = StoreClient('my secret');
store
.set('active', true)
.then(function() {
return store.get('active');
})
.then(function() {
callback();
})
.catch(callback);
... but then I get an error that there is no output variable, even though I haven't touched the output variable at the bottom of the script.
David from Zapier's Platform team here.
Sorry about the confusion in the docs. I'll give you a quick answer on how to fix your code and a long one as to why. In the meantime, I'll make a note to update the docs with more sensical examples.
Short
Two big things:
A. Promises pick whatever was returned in the last function. If you don't bring them along, they're lost. Your code should read:
.then(function(storedVal) { // <- that variable is missing in your code
console.log('stored val is', storedVal);
})
B. You need to provide a value to the second argument of callback. There's a better example here.
.then(function(storedVal) {
callback(null, {active: storedVal});
})
Long
Here's some of the nitty gritty on how to make all Zapier code work great.
Callback
Your code runs inside AWS Lambda, which always needs to know when you're finished. It executes all of your code in a special function with a certain set of arguments. The pertinent one here is callback, a function that you can call when you're ready to exit (or have an error). You can read more about that setup here.
Like most node callbacks, the callback has the function signature callback (error, result). To throw an error, you pass something in the first spot:
callback({msg: 'thing went wrong'});
To pass a result, use the second (and nothing in the first)
callback(null, {myData: 4});
So, not passing anything there is why the zap result isn't seeing any data.
Promises
In general, callbacks suck and are confusing to work with, so we designed StoreClient to return promises. There's a lot of materials about promises online so I won't go into the details here. The important thing is that whatever gets returned from a promise's function is the argument in the next one. For example:
Promise.resolve(1)
.then(function(val) {
// val === 1
return Promise.resolve(val + 1)
})
.then(function(val) {
// val === 2
})
There's a more practical example in these docs:
var store = StoreClient('your secret here');
var outCount;
store
.get('some counter')
.then(function(count) {
count = (count || 0) + 1;
outCount = count;
return store.set('some counter', count);
})
.then(function() {
callback(null, {'the count': outCount});
})
.catch(callback);
Hopefully that clears things up a bit!
Also, if you want to give Python a try, you can do the same code, but much simpler (example here).
Either way, let us know if there's anything else we can do to help!

Invoking http.get sequentially when the list length is unknown

lets suppose I have the following:
var myurls = ['http://server1.com', 'http://server2.com', 'http:sever2.com', etc ]
Each url is a "fallback" and should be used only if the previous one cannot be reached. In other words, this list specifies a priority. Lets also assume that this list can be of any length - I don't know and must iterate.
How do I go about writing a function, lets say "reachability" that loops through this array and returns the first reachable server?
I can't do $http.all as it is parallel. I can't run a while loop with an $http.get inside because the result may come later and in the mean time, my UI will freeze.
Please note I am not using jQuery. I am using ionic, which has a version of jQuery-lite in it.
Various examples I've seen talk about chaining them in .then, which is fine if you know the # of URLs before hand, but I don't.
thanks
Just reduce over the array:
myurls.reduce((p, url) => p.catch(() => http.get(url).then(() => url)),
Promise.reject());
Flow explained:
It's based off the perhaps more common pattern of using reduce to build a promise chain, like so: [func1, func2].reduce((p, f) => p.then(f), Promise.resolve()); is equivalent to Promise.resolve().then(func1).then(func2) (the last arg of reduce is the initial value).
In your case, since you're retrying on failure, you want to build a retry (or reject) chain, so we must start with Promise.reject() instead. Promise.reject().catch(func1).catch(func2)
I guess recursion and chaining could suit your needs:
var findFirstReachableUrl = function (urls) {
if (urls.length > 0) {
return $http.get(urls[0]).then(function () {
return urls[0];
}, function () {
return findFirstReachableUrl(urls.slice(1));
});
} else {
return $q.reject("No reachable URL");
}
}
Call:
findFirstReachableUrl(myurls).then(function (firstReachableUrl) {
// OK: do something with firstReachableUrl
}, function () {
// KO: no url could be reached
});

Categories