Concurrent async requests - keep track of necessary timeout - javascript

With Nominatim requests I need to keep a timeout of 1500ms between every requests. How do I handle that if multiple concurrent running processes keep firing on that function?
Code:
osmService:
export const getLocation = async (zipCode, street) => {
if (!zipCode) {
return {
address: null,
cacheHit: false,
};
}
const cacheData = await getAddress(zipCode, street);
if (cacheData && cacheData.length === 1) {
return { address: cacheData[0].address, cacheHit: true };
} if (cacheData && cacheData.length > 1) {
logger.error('Found multiple address, this should not be', zipCode + street);
}
try {
const responseObj = await getNominatimLocation(zipCode, street);
if (responseObj) {
const returnObj = {
...responseObj.address,
lat: responseObj.lat,
lon: responseObj.lon,
displayName: responseObj.display_name,
};
await insertAddress(zipCode, street, null, returnObj);
return {
address: returnObj,
cacheHit: false,
};
}
return {
address: null,
cacheHit: false,
};
} catch (ex) {
logger.error(`Error getting location from ${zipCode} ${street}`, ex);
}
return {
address: null,
cacheHit: false,
};
};
As you can see I have a caching layer in between. So when the request was hit by cache I do not need to wait 1500ms.
export const getNominatimLocation = async (zipCode, street, query, retries = 0) => {
if (retries > 5) {
return null;
}
try {
const qs = {
format: 'json',
q: query,
postalcode: zipCode,
addressdetails: 1,
country: 'Deutschland',
street,
'accept-language': 'de',
};
const response = await requestPromise()
.get({
url: OSM_SEARCH,
qs,
timeout: 12000,
headers: {
'User-Agent': 'xxxxxxx',
},
});
return JSON.parse(response)[0];
} catch (ex) {
logger.info(`Nominatim timed out - retry ${retries}`, ex);
await timeout(9000);
return await getNominatimLocation(zipCode, street, query, retries + 1);
}
};
Since Nominatim often times out I need to do this recursive call (does not need to be recursive - was just easier).
Now let's suppose I have following jobs (backend engine) that want to get locations
const jobA = asnyc () => {
const {address, cacheHit} = await getLocation(10243);
if(!cacheHit){
await timeout(1500)
}
}
const jobB = asnyc () => {
const {address, cacheHit} = await getLocation(10245);
if(!cacheHit){
await timeout(1500)
}
}
const startJobs = async () => {
Promise.all([jobA(),jobB()])
console.log('all jobs done');
}
The jobs partially represent my current code structure. In my code the jobs do more (call other services etc.).
Now when I have this layout - how can I make sure to keep 1500ms between every Nominatim call when there is no cacheHit?

You could use a lock that only unlocks every 1.5 seconds:
let lock = Promise.resolve();
let aquireLock = () => (lock = lock.then(() => new Promise(res => setTimeout(res, 1500))));
Then
await aquireLock();
// will only run every 1.5 seconds

Related

How to update RTK Query cache when Firebase RTDB change event fired (update, write, create, delete)

I am using redux-tookit, rtk-query (for querying other api's and not just Firebase) and Firebase (for authentication and db).
The code below works just fine for retrieving and caching the data but I wish to take advantage of both rtk-query caching as well as Firebase event subscribing, so that when ever a change is made in the DB (from any source even directly in firebase console) the cache is updated.
I have tried both updateQueryCache and invalidateTags but so far I am not able to find an ideal approach that works.
Any assistance in pointing me in the right direction would be greatly appreciated.
// firebase.ts
export const onRead = (
collection: string,
callback: (snapshort: DataSnapshot) => void,
options: ListenOptions = { onlyOnce: false }
) => onValue(ref(db, collection), callback, options);
export async function getCollection<T>(
collection: string,
onlyOnce: boolean = false
): Promise<T> {
let timeout: NodeJS.Timeout;
return new Promise<T>((resolve, reject) => {
timeout = setTimeout(() => reject('Request timed out!'), ASYNC_TIMEOUT);
onRead(collection, (snapshot) => resolve(snapshot.val()), { onlyOnce });
}).finally(() => clearTimeout(timeout));
}
// awards.ts
const awards = dbApi
.enhanceEndpoints({ addTagTypes: ['Themes'] })
.injectEndpoints({
endpoints: (builder) => ({
getThemes: builder.query<ThemeData[], void>({
async queryFn(arg, api) {
try {
const { auth } = api.getState() as RootState;
const programme = auth.user?.unit.guidingProgramme!;
const path = `/themes/${programme}`;
const themes = await getCollection<ThemeData[]>(path, true);
return { data: themes };
} catch (error) {
return { error: error as FirebaseError };
}
},
providesTags: ['Themes'],
keepUnusedDataFor: 1000 * 60
}),
getTheme: builder.query<ThemeData, string | undefined>({
async queryFn(slug, api) {
try {
const initiate = awards.endpoints.getThemes.initiate;
const getThemes = api.dispatch(initiate());
const { data } = (await getThemes) as ApiResponse<ThemeData[]>;
const name = slug
?.split('-')
.map(
(value) =>
value.substring(0, 1).toUpperCase() +
value.substring(1).toLowerCase()
)
.join(' ');
return { data: data?.find((theme) => theme.name === name) };
} catch (error) {
return { error: error as FirebaseError };
}
},
keepUnusedDataFor: 0
})
})
});

Solana method cannot convert string to Base58 format

I'm using #solana/web3.js and have this code:
const web3 = require("#solana/web3.js");
const clusterApi = process.env.SOLANA_CLUSTER;
module.exports = {
getConfirmedSignaturesForAddress: async address => {
try {
const connection = new web3.Connection(web3.clusterApiUrl(clusterApi), "confirmed");
const result = await connection.getSignaturesForAddress(address, {
limit: 25
});
return {
tx: result,
status: true
};
} catch (e) {
return {
status: false,
error: e.message
};
}
}
}
And every time I call this function I get this error:
{ status: false, error: 'address.toBase58 is not a function' }
I was trying to send it already converted to Base58, but it just doesn't work. What's wrong?
This is how I solved this problem. Generally speaking, you need to convert it not just by converting to pure Base58, but like this:
const web3 = require("#solana/web3.js");
const bip39 = require("bip39");
const getKeyFromMemonic = async mnemonic => {
return new Promise((resolve, reject) => {
bip39
.mnemonicToSeed(mnemonic)
.then(buffer => {
const a = new Uint8Array(buffer.toJSON().data.slice(0, 32));
const key = web3.Keypair.fromSeed(a);
resolve(key);
})
.catch(err => reject(err));
});
};
getSignaturesForAddress: async address => {
try {
const key = await getKeyFromMemonic(address);
const connection = new web3.Connection(web3.clusterApiUrl(clusterApi), "confirmed");
const result = await connection.getSignaturesForAddress(key.publicKey);
return {
tx: result,
status: true
};
} catch (e) {
return {
status: false,
error: e.message
};
}
}

How to Periodically Check Whether Data from API Has Changed

I'm fetching weather data from OpenWeather API for a given location and want to check, every minute, whether that data is still current (and if not, change it). I've used setInterval but the data doesn't seem to update every minute--here are the functions in question.
In the controller...
const controlStation = async function (station) {
try {
// Updates weather given station
const weather = await model.updateWeather(station);
// Periodically checks if weather data is current
// If not, updates weather
let checkWeather = await model.checkWeather(station);
setInterval(checkWeather, 1 * MINUTES);
// Renders weather
weatherView.render(model.stations[station], weather);
} catch (err) {
console.log(err);
}
};
controlStation("fbi");
In the model...
export const state = {};
export const stations = {
fbi: {
name: "fbi",
city: "Sydney, Australia",
coordinates: [-33.5346, 151.12],
},
kutx: {
name: "kutx",
city: "Austin, Texas, United States of America",
coordinates: [30.1721, -97.4402],
},
cism: {
name: "cism",
city: "Montreal, Quebec, Canada",
coordinates: [45.3023, -73.3644],
},
};
export const updateWeather = async function (station) {
try {
const [lat, lng] = stations[station].coordinates;
const url = `${API_WEATHER_URL}lat=${lat}&lon=${lng}&appid=${API_WEATHER_KEY}&units=imperial`;
const data = await fetch(url);
const weather = await data.json();
state.station = station;
state.weather = weather;
return weather;
} catch (err) {
console.error(err);
}
};
export const checkWeather = async function (station) {
try {
console.log("Checking weather!");
const needsUpdate = false;
const prev = state;
console.log("prev", prev.weather);
const cur = await updateWeather(state.station);
console.log("cur", cur);
if (
prev.weather.wind.speed !== cur.wind.speed ||
prev.weather.wind.dir !== cur.wind.dir ||
prev.weather.main.temp !== cur.main.temp ||
prev.weather.weather[0].description !== cur.weather[0].description
) {
console.log("Changing now");
needsUpdate = true;
} else console.log(`They were same at ${Date.now()}`);
return needsUpdate;
} catch (err) {
console.error(err);
}
};
I know I still need to do something if the weather data has changed and is different than what's in state, but I don't even see it making a new comparison through the checkWeather function every minute.
/////////////////
UPDATE--
I discovered that the issue was that async functions are incompatible with vanilla JS setInterval. There's a node package for creating setInterval with an async callback function but I don't know Node yet so instead I grabbed this workaround off another StackOverflow answer.
async function execute1() {
while (true) {
await new Promise((resolve) => setTimeout(resolve, 2 * MINUTES));
await model.checkWeather(station);
}
}
execute1();
Now my program is successfully checking the results from a new API call to the data stored in state. It recognizes when things have changed and when they haven't and now I'm going to update state when the weather conditions have changed. Thanks all for the help!

Twitter API cursor navigation with Async node.js

I am trying to use Twitter's API with node.js using async/await (which I admit I am new to) but I am struggling to get to the next cursor value.
Why does my getFollowers function bellow always returns before the await block?
require('dotenv').config();
const Twitter = require('twitter');
const client = new Twitter({
consumer_key: process.env.API_KEY,
consumer_secret: process.env.API_KEY_SECRET,
access_token_key: process.env.ACCESS_TOKEN,
access_token_secret: process.env.ACCESS_TOKEN_SECRET
});
const getFollowers = async (screen_name, count, cursor) => {
console.log("Cursor: " + cursor);
const params = {
screen_name: screen_name,
count: count,
cursor: cursor
};
const promise = await client.get('followers/list', params)
.then(data => {
console.log("This promise is never executed...");
return data.next_cursor;
})
.catch(err => console.error(err));
return promise;
}
const main = async () => {
let cursor = -1;
while (cursor != 0) {
getFollowers(process.env.SCREEN_NAME, 200, cursor)
.then(next_cursor => {
cursor = next_cursor;
console.log("This promise is never executed either... " + cursor);
});
}
}
main();
With your .then statement in main(), you weren't awaiting for client.get() to resolve, but for data.next_cursor(). Therefore, promise of client.get() remained pending.
Instead, return the promise of client.get() as a in getFollowers(). This will make sure that when you call getFollowers().then() in main(), you are referring to client.get.
Edit:
Following the line of thought in the answer in this question, I have modified getFollowers(). It now includes a promise that is resolved when cursor hits the value of 0. Every other value, a request will be made.
I have a concern though with the rate limit of requests, which is set to 15 per 15 minutes. Since a new request is made for every non 0 next_cursor value, you'll reach this limit quite soon for accounts with many followers.
Also note that the data retrieved will be stored in an array. I am not sure what your use case exactly is.
const Twitter = require('twitter');
const client = new Twitter({
consumer_key: '',
consumer_secret: '',
bearer_token: ''
});
let output = [];
const getFollowers = (screen_name, count) => {
let cursor = -1;
const params = {
screen_name: screen_name,
count: count,
cursor: cursor
};
return new Promise((resolve, reject) => {
client.get('followers/list', params, function getData(err, data, response) {
if (err) reject(response.body);
output.push(data);
cursor = data.next_cursor;
if (cursor > 0) {
client.get('followers/list', params, getData);
}
if (cursor = 0) {
resolve('done');
}
});
});
};
const main = async () => {
await getFollowers('MozDevNet', 200);
console.log(output);
};
I gave up on the implementation using the Twitter package and switched to using axios instead.
require('dotenv').config();
const axios = require('axios');
const credentials = {
consumer_key: process.env.API_KEY,
consumer_secret: process.env.API_KEY_SECRET,
access_token_key: process.env.ACCESS_TOKEN,
access_token_secret: process.env.ACCESS_TOKEN_SECRET
};
const FOLLOWERS_LIST_ENDPOINT = "https://api.twitter.com/1.1/followers/list.json";
//documentation: https://developer.twitter.com/en/docs/authentication/oauth-2-0/application-only
const generateToken = async () => {
return process.env.BEARER_TOKEN;
}
//documentation: https://developer.twitter.com/en/docs/twitter-api/v1/accounts-and-users/follow-search-get-users/api-reference/get-followers-list
const getFollowers = async (screen_name, count, cursor) => {
let token = await generateToken();
let requestConfig = {
params: {
screen_name: screen_name,
count: count,
cursor: cursor,
include_user_entities: false
},
headers: {
Authorization: `Bearer ${token}`
}
};
let response = await axios.get(FOLLOWERS_LIST_ENDPOINT, requestConfig);
let users = response.data.users;
processUsers(users);
return response.data.next_cursor;
};
const processUsers = (users) => {
users.map(user => {
console.log(user.screen_name);
});
}
const main = async () => {
let cursor = -1;
while (cursor != 0) {
cursor = await getFollowers(process.env.SCREEN_NAME, 200, cursor);
}
}
main();

How can I update more than 500 docs in Firestore using Batch?

I'm trying to update a field timestamp with the Firestore admin timestamp in a collection with more than 500 docs.
const batch = db.batch();
const serverTimestamp = admin.firestore.FieldValue.serverTimestamp();
db
.collection('My Collection')
.get()
.then((docs) => {
serverTimestamp,
}, {
merge: true,
})
.then(() => res.send('All docs updated'))
.catch(console.error);
This throws an error
{ Error: 3 INVALID_ARGUMENT: cannot write more than 500 entities in a single call
at Object.exports.createStatusError (C:\Users\Growthfile\Desktop\cf-test\functions\node_modules\grpc\src\common.js:87:15)
at Object.onReceiveStatus (C:\Users\Growthfile\Desktop\cf-test\functions\node_modules\grpc\src\client_interceptors.js:1188:28)
at InterceptingListener._callNext (C:\Users\Growthfile\Desktop\cf-test\functions\node_modules\grpc\src\client_interceptors.js:564:42)
at InterceptingListener.onReceiveStatus (C:\Users\Growthfile\Desktop\cf-test\functions\node_modules\grpc\src\client_interceptors.js:614:8)
at callback (C:\Users\Growthfile\Desktop\cf-test\functions\node_modules\grpc\src\client_interceptors.js:841:24)
code: 3,
metadata: Metadata { _internal_repr: {} },
details: 'cannot write more than 500 entities in a single call' }
Is there a way that I can write a recursive method which creates a batch object updating a batch of 500 docs one by one until all the docs are updated.
From the docs I know that delete operation is possible with the recursive approach as mentioned here:
https://firebase.google.com/docs/firestore/manage-data/delete-data#collections
But, for updating, I'm not sure how to end the execution since the docs are not being deleted.
I also ran into the problem to update more than 500 documents inside a Firestore collection. And i would like to share how i solved this problem.
I use cloud functions to update my collection inside Firestore but this should also work on client side code.
The solution counts every operation which is made to the batch and after the limit is reached a new batch is created and pushed to the batchArray.
After all updates are completed the code loops through the batchArray and commits every batch which is inside the array.
It is important to count every operation set(), update(), delete() which is made to the batch because they all count to the 500 operation limit.
const documentSnapshotArray = await firestore.collection('my-collection').get();
const batchArray = [];
batchArray.push(firestore.batch());
let operationCounter = 0;
let batchIndex = 0;
documentSnapshotArray.forEach(documentSnapshot => {
const documentData = documentSnapshot.data();
// update document data here...
batchArray[batchIndex].update(documentSnapshot.ref, documentData);
operationCounter++;
if (operationCounter === 499) {
batchArray.push(firestore.batch());
batchIndex++;
operationCounter = 0;
}
});
batchArray.forEach(async batch => await batch.commit());
return;
I liked this simple solution:
const users = await db.collection('users').get()
const batches = _.chunk(users.docs, 500).map(userDocs => {
const batch = db.batch()
userDocs.forEach(doc => {
batch.set(doc.ref, { field: 'myNewValue' }, { merge: true })
})
return batch.commit()
})
await Promise.all(batches)
Just remember to add import * as _ from "lodash" at the top. Based on this answer.
You can use default BulkWriter. This method used 500/50/5 rule.
Example:
let bulkWriter = firestore.bulkWriter();
bulkWriter.create(documentRef, {foo: 'bar'});
bulkWriter.update(documentRef2, {foo: 'bar'});
bulkWriter.delete(documentRef3);
await close().then(() => {
console.log('Executed all writes');
});
As mentioned above, #Sebastian's answer is good and I upvoted that too. Although faced an issue while updating 25000+ documents in one go.
The tweak to logic is as below.
console.log(`Updating documents...`);
let collectionRef = db.collection('cities');
try {
let batch = db.batch();
const documentSnapshotArray = await collectionRef.get();
const records = documentSnapshotArray.docs;
const index = documentSnapshotArray.size;
console.log(`TOTAL SIZE=====${index}`);
for (let i=0; i < index; i++) {
const docRef = records[i].ref;
// YOUR UPDATES
batch.update(docRef, {isDeleted: false});
if ((i + 1) % 499 === 0) {
await batch.commit();
batch = db.batch();
}
}
// For committing final batch
if (!(index % 499) == 0) {
await batch.commit();
}
console.log('write completed');
} catch (error) {
console.error(`updateWorkers() errored out : ${error.stack}`);
reject(error);
}
Explanations given on previous comments already explain the issue.
I'm sharing the final code that I built and worked for me, since I needed something that worked in a more decoupled manner, instead of the way that most of the solutions presented above do.
import { FireDb } from "#services/firebase"; // = firebase.firestore();
type TDocRef = FirebaseFirestore.DocumentReference;
type TDocData = FirebaseFirestore.DocumentData;
let fireBatches = [FireDb.batch()];
let batchSizes = [0];
let batchIdxToUse = 0;
export default class FirebaseUtil {
static addBatchOperation(
operation: "create",
ref: TDocRef,
data: TDocData
): void;
static addBatchOperation(
operation: "update",
ref: TDocRef,
data: TDocData,
precondition?: FirebaseFirestore.Precondition
): void;
static addBatchOperation(
operation: "set",
ref: TDocRef,
data: TDocData,
setOpts?: FirebaseFirestore.SetOptions
): void;
static addBatchOperation(
operation: "create" | "update" | "set",
ref: TDocRef,
data: TDocData,
opts?: FirebaseFirestore.Precondition | FirebaseFirestore.SetOptions
): void {
// Lines below make sure we stay below the limit of 500 writes per
// batch
if (batchSizes[batchIdxToUse] === 500) {
fireBatches.push(FireDb.batch());
batchSizes.push(0);
batchIdxToUse++;
}
batchSizes[batchIdxToUse]++;
const batchArgs: [TDocRef, TDocData] = [ref, data];
if (opts) batchArgs.push(opts);
switch (operation) {
// Specific case for "set" is required because of some weird TS
// glitch that doesn't allow me to use the arg "operation" to
// call the function
case "set":
fireBatches[batchIdxToUse].set(...batchArgs);
break;
default:
fireBatches[batchIdxToUse][operation](...batchArgs);
break;
}
}
public static async runBatchOperations() {
// The lines below clear the globally available batches so we
// don't run them twice if we call this function more than once
const currentBatches = [...fireBatches];
fireBatches = [FireDb.batch()];
batchSizes = [0];
batchIdxToUse = 0;
await Promise.all(currentBatches.map((batch) => batch.commit()));
}
}
Based on all the above answers, I put together the following pieces of code that one can put into a module in JavaScript back-end and front-end to easily use Firestore batch writes, without worrying about the 500 writes limit.
Back-end (Node.js)
// The Firebase Admin SDK to access Firestore.
const admin = require("firebase-admin");
admin.initializeApp();
// Firestore does not accept more than 500 writes in a transaction or batch write.
const MAX_TRANSACTION_WRITES = 499;
const isFirestoreDeadlineError = (err) => {
console.log({ err });
const errString = err.toString();
return (
errString.includes("Error: 13 INTERNAL: Received RST_STREAM") ||
errString.includes("Error: 4 DEADLINE_EXCEEDED: Deadline exceeded")
);
};
const db = admin.firestore();
// How many transactions/batchWrites out of 500 so far.
// I wrote the following functions to easily use batchWrites wthout worrying about the 500 limit.
let writeCounts = 0;
let batchIndex = 0;
let batchArray = [db.batch()];
// Commit and reset batchWrites and the counter.
const makeCommitBatch = async () => {
console.log("makeCommitBatch");
await Promise.all(batchArray.map((bch) => bch.commit()));
};
// Commit the batchWrite; if you got a Firestore Deadline Error try again every 4 seconds until it gets resolved.
const commitBatch = async () => {
try {
await makeCommitBatch();
} catch (err) {
console.log({ err });
if (isFirestoreDeadlineError(err)) {
const theInterval = setInterval(async () => {
try {
await makeCommitBatch();
clearInterval(theInterval);
} catch (err) {
console.log({ err });
if (!isFirestoreDeadlineError(err)) {
clearInterval(theInterval);
throw err;
}
}
}, 4000);
}
}
};
// If the batchWrite exeeds 499 possible writes, commit and rest the batch object and the counter.
const checkRestartBatchWriteCounts = () => {
writeCounts += 1;
if (writeCounts >= MAX_TRANSACTION_WRITES) {
batchIndex++;
batchArray.push(db.batch());
writeCounts = 0;
}
};
const batchSet = (docRef, docData) => {
batchArray[batchIndex].set(docRef, docData);
checkRestartBatchWriteCounts();
};
const batchUpdate = (docRef, docData) => {
batchArray[batchIndex].update(docRef, docData);
checkRestartBatchWriteCounts();
};
const batchDelete = (docRef) => {
batchArray[batchIndex].delete(docRef);
checkRestartBatchWriteCounts();
};
module.exports = {
admin,
db,
MAX_TRANSACTION_WRITES,
checkRestartBatchWriteCounts,
commitBatch,
isFirestoreDeadlineError,
batchSet,
batchUpdate,
batchDelete,
};
Front-end
// Firestore does not accept more than 500 writes in a transaction or batch write.
const MAX_TRANSACTION_WRITES = 499;
const isFirestoreDeadlineError = (err) => {
return (
err.message.includes("DEADLINE_EXCEEDED") ||
err.message.includes("Received RST_STREAM")
);
};
class Firebase {
constructor(fireConfig, instanceName) {
let app = fbApp;
if (instanceName) {
app = app.initializeApp(fireConfig, instanceName);
} else {
app.initializeApp(fireConfig);
}
this.name = app.name;
this.db = app.firestore();
this.firestore = app.firestore;
// How many transactions/batchWrites out of 500 so far.
// I wrote the following functions to easily use batchWrites wthout worrying about the 500 limit.
this.writeCounts = 0;
this.batch = this.db.batch();
this.isCommitting = false;
}
async makeCommitBatch() {
console.log("makeCommitBatch");
if (!this.isCommitting) {
this.isCommitting = true;
await this.batch.commit();
this.writeCounts = 0;
this.batch = this.db.batch();
this.isCommitting = false;
} else {
const batchWaitInterval = setInterval(async () => {
if (!this.isCommitting) {
this.isCommitting = true;
await this.batch.commit();
this.writeCounts = 0;
this.batch = this.db.batch();
this.isCommitting = false;
clearInterval(batchWaitInterval);
}
}, 400);
}
}
async commitBatch() {
try {
await this.makeCommitBatch();
} catch (err) {
console.log({ err });
if (isFirestoreDeadlineError(err)) {
const theInterval = setInterval(async () => {
try {
await this.makeCommitBatch();
clearInterval(theInterval);
} catch (err) {
console.log({ err });
if (!isFirestoreDeadlineError(err)) {
clearInterval(theInterval);
throw err;
}
}
}, 4000);
}
}
}
async checkRestartBatchWriteCounts() {
this.writeCounts += 1;
if (this.writeCounts >= MAX_TRANSACTION_WRITES) {
await this.commitBatch();
}
}
async batchSet(docRef, docData) {
if (!this.isCommitting) {
this.batch.set(docRef, docData);
await this.checkRestartBatchWriteCounts();
} else {
const batchWaitInterval = setInterval(async () => {
if (!this.isCommitting) {
this.batch.set(docRef, docData);
await this.checkRestartBatchWriteCounts();
clearInterval(batchWaitInterval);
}
}, 400);
}
}
async batchUpdate(docRef, docData) {
if (!this.isCommitting) {
this.batch.update(docRef, docData);
await this.checkRestartBatchWriteCounts();
} else {
const batchWaitInterval = setInterval(async () => {
if (!this.isCommitting) {
this.batch.update(docRef, docData);
await this.checkRestartBatchWriteCounts();
clearInterval(batchWaitInterval);
}
}, 400);
}
}
async batchDelete(docRef) {
if (!this.isCommitting) {
this.batch.delete(docRef);
await this.checkRestartBatchWriteCounts();
} else {
const batchWaitInterval = setInterval(async () => {
if (!this.isCommitting) {
this.batch.delete(docRef);
await this.checkRestartBatchWriteCounts();
clearInterval(batchWaitInterval);
}
}, 400);
}
}
}
No citations or documentation, this code i invented by myself and for me it worked and looks clean, and simple for read and usage. If some one like it, then can use it too.
Better make autotest becose code use private var _ops wich can be changed after packages upgrade. Forexample in old versions its can be _mutations
async function commitBatch(batch) {
const MAX_OPERATIONS_PER_COMMIT = 500;
while (batch._ops.length > MAX_OPERATIONS_PER_COMMIT) {
const batchPart = admin.firestore().batch();
batchPart._ops = batch._ops.splice(0, MAX_OPERATIONS_PER_COMMIT - 1);
await batchPart.commit();
}
await batch.commit();
}
Usage:
const batch = admin.firestore().batch();
batch.delete(someRef);
batch.update(someRef);
...
await commitBatch(batch);
Simple solution
Just fire twice ?
my array is "resultsFinal"
I fire batch once with a limit of 490 , and second with a limit of the lenght of the array ( results.lenght)
Works fine for me :)
How you check it ?
You go to firebase and delete your collection , firebase say you have delete XXX docs , same as the lenght of your array ? Ok so you are good to go
async function quickstart(results) {
// we get results in parameter for get the data inside quickstart function
const resultsFinal = results;
// console.log(resultsFinal.length);
let batch = firestore.batch();
// limit of firebase is 500 requests per transaction/batch/send
for (i = 0; i < 490; i++) {
const doc = firestore.collection('testMore490').doc();
const object = resultsFinal[i];
batch.set(doc, object);
}
await batch.commit();
// const batchTwo = firestore.batch();
batch = firestore.batch();
for (i = 491; i < 776; i++) {
const objectPartTwo = resultsFinal[i];
const doc = firestore.collection('testMore490').doc();
batch.set(doc, objectPartTwo);
}
await batch.commit();
}

Categories