How to use promises with IndexedDB without transactions auto-committing? - javascript

Is there any way to use IndexedDB with promises and async/await without the transactions auto-committing? I understand that you can't do stuff like fetch network data in the middle of a transaction, but everything I was able to find online on the subject indicates that IndexedDB should still work if you simply wrap it in promises.
However, in my testing (Firefox 73), I found that simply wrapping the request's onsuccess method in a Promise is enough to cause the transaction to auto-commit before the promise executes, while the same code works when using the raw IndexedDB API. What can I do?
Here is a simplified minimal example of my code.
const {log, error, trace, assert} = console;
const VERSION = 1;
const OBJ_STORE_NAME = 'test';
const DATA_KEY = 'data';
const META_KEY = 'last-updated';
function open_db(name, version) {
return new Promise((resolve, reject) => {
const req = indexedDB.open(name, version);
req.onerror = reject;
req.onupgradeneeded = e => {
const db = e.target.result;
for (const name of db.objectStoreNames) {db.deleteObjectStore(name);}
db.createObjectStore(OBJ_STORE_NAME);
};
req.onsuccess = e => resolve(e.target.result);
});
}
function idbreq(objs, method, ...args) {
return new Promise((resolve, reject) => {
const req = objs[method](...args);
req.onsuccess = e => resolve(req.result);
req.onerror = e => reject(req.error);
});
}
async function update_db(db) {
const new_time = (new Date).toISOString();
const new_data = 42; // simplified for sake of example
const [old_data, last_time] = await (() => {
const t = db.transaction([OBJ_STORE_NAME], 'readonly');
t.onabort = e => error('trans1 abort', e);
t.onerror = e => error('trans1 error', e);
t.oncomplete = e => log('trans1 complete', e);
const obj_store = t.objectStore(OBJ_STORE_NAME);
return Promise.all([
idbreq(obj_store, 'get', DATA_KEY),
idbreq(obj_store, 'get', META_KEY),
]);
})();
log('fetched data from db');
// do stuff with data before writing it back
(async () => {
log('beginning write callback');
const t = db.transaction([OBJ_STORE_NAME], 'readwrite');
t.onabort = e => error('trans2 abort', e);
t.onerror = e => error('trans2 error', e);
t.oncomplete = e => log('trans2 complete', e);
const obj_store = t.objectStore(OBJ_STORE_NAME);
// This line works when using onsuccess directly, but simply wrapping it in a Promise causes the
// transaction to autocommit before the rest of the code executes, resulting in an error.
obj_store.get(META_KEY).onsuccess = ({result: last_time2}) => {
log('last_time', last_time, 'last_time2', last_time2, 'new_time', new_time);
// Check if some other transaction updated db in the mean time so we don't overwrite newer data
if (!last_time2 || last_time2 < new_time) {
obj_store.put(new_time, META_KEY);
obj_store.put(new_data, DATA_KEY);
}
log('finished write callback');
};
// This version of the above code using a Promise wrapper results in an error
// idbreq(obj_store, 'get', META_KEY).then(last_time2 => {
// log('last_time', last_time, 'last_time2', last_time2, 'new_time', new_time);
// if (!last_time2 || last_time2 < new_time) {
// obj_store.put(new_time, META_KEY);
// obj_store.put(new_data, DATA_KEY);
// }
// log('finished write callback');
// });
// Ideally, I'd be able to use await like a civilized person, but the above example
// shows that IndexedDB breaks when simply using promises, even without await.
// const last_time2 = await idbreq(obj_store, 'get', META_KEY);
// log('last_time', last_time, 'last_time2', last_time2, 'new_time', new_time);
// if (!last_time2 || last_time2 < new_time) {
// obj_store.put(new_time, META_KEY);
// obj_store.put(new_data, DATA_KEY);
// }
// log('finished write callback');
})();
return [last_time, new_time];
}
open_db('test').then(update_db).then(([prev, new_]) => log(`updated db timestamp from ${prev} to ${new_}`));

Orchestrate promises around transactions, not individual requests.
If that causes problems with your design, and you still want to use indexedDB, then design around it. Reevaluate whether you need transactional safety or whether you need to actually reuse a transaction for several requests instead of creating several transactions with only a couple requests per transaction.
There is little to no overhead in spawning a large number of transactions with a small number of requests per transaction in comparison to spawning a small number of transactions with a large number of requests. The only real concern is consistency.
Any await is a yield in disguise. indexedDB transactions timeout when no requests are pending. A yield causes a gap in time so the transactions will timeout.

It turns out that the problem was in a completely different part of my code.
At the end of my top level code, I had
.catch(e => {
error('caught error', e);
alert(e);
});
I'm not sure about the details, but showing an alert appears to cause all the transactions to autocommit, while the promises are still pending, leading the errors I saw once the user clicks "ok" on the alert popup and the pending promises continue. Removing the alert call from my global error handler fixed the issue.

Related

Rate Limit: Add a buffer beween API calls in map JavaScript

I'm using Google Gmail API to get sent emails.
I'm using 2 APIs for this -
list (https://developers.google.com/gmail/api/reference/rest/v1/users.messages/list)
get (https://developers.google.com/gmail/api/reference/rest/v1/users.messages/get)
The list API gives a list of messages IDs which I use to get specific data from the get API.
Here's the code for this -
await Promise.all(
messages?.map(async (message) => {
const messageData = await contacts.getSentGmailData(
accessToken,
message.id
);
return messageData;
})
);
getSentGmailData is the get API here.
The problem here is, while mapping and making requests to this API continuously, I get a 429 (rateLimitExceeded) error.
What I tried is adding a buffer between each request like this -
function delay(ms) {
return new Promise((resolve) => {
setTimeout(resolve, ms);
});
}
const messageData = await contacts.getSentGmailData(accessToken,message.id);
await delay(200);
But this doesn't seem to work.
How can I work around this?
You can use the below solution like code for adding some more buffer time when you will get 429 (to many requests from google api).
Basically this code will help you to stop calling api when you exceed Rate Limiter.
Note: This doesn't mean that you can bypass Google api Rate Limiter.
async function getSentGmailDataWithBackoff(accessToken, messageId) {
const MAX_RETRIES = 5;
let retries = 0;
let delay = 200;
while (true) {
try {
const messageData = await contacts.getSentGmailData(accessToken, messageId);
return messageData;
} catch (error) {
if (error.response && error.response.status === 429 && retries < MAX_RETRIES) {
retries++;
console.log(`Rate limit exceeded. Retrying in ${delay}ms.`);
await delay(delay);
delay *= 2;
} else {
throw error;
}
}
}
}
async function getSentGmailDataWithBackoffBatch(accessToken, messageIds) {
return Promise.all(
messageIds.map(async (messageId) => {
const messageData = await getSentGmailDataWithBackoff(accessToken, messageId);
return messageData;
})
);
}
function delay(ms) {
return new Promise((resolve) => {
setTimeout(resolve, ms);
});
}
The reason the delay is not working is because it does not wait for the Promise to be resolved. The same reasoning applied to forEach, filter, reduce etc. You can get some idea here: https://gist.github.com/joeytwiddle/37d2085425c049629b80956d3c618971
If you had used a for-of loop or another for-loop for this purpose, it would have worked.
for(let message of messages) {
const messageData = await contacts.getSentGmailData(accessToken,message.id);
await delay(200);
}
You could also write your own rate-limiting function (also commonly called throttling function) or use one provided by libraries like Lodash: https://lodash.com/docs#throttle

Asyncronicity in a reduce() function WITHOUT using async/await

I am patching the exec() function to allow subpopulating in Mongoose, which is why I am not able to use async/await here -- my function will be chained off a db call, so there is no opportunity to call await on it, and within the submodule itself, there I can't add async/await outside of an async function itself.
With that out of the way, let's look at what I'm trying to do. I have two separate arrays (matchingMealPlanFoods and matchingMealPlanRecipeFoods) full of IDs that I need to populate. Both of them reside on the same array, foods. They each require a db call with aggregation, and the problem in my current scenario is that only one of the arrays populates because they are happening asynchronously.
What I am trying to do now is use the reduce function to return the updated foods array to the next run of reduce so that when the final result is returned, I can replace the entire foods array once on my doc. The problem of course is that my aggregate/exec has not yet returned a value by the time the reduce function goes into its next run. Is there a way I can achieve this without async/await here? I'm including the high-level structure here so you can see what needs to happen, and why using .then() is probably not viable.
EDIT: Updating code with async suggestion
function execute(model, docs, options, lean, cb) {
options = formatOptions(options);
let resolvedCount = 0;
let error = false;
(async () => {
for (let doc of docs) {
let newFoodsArray = [...doc.foods];
for (let option of options) {
const path = option.path.split(".");
// ... various things happen here to prep the data
const aggregationOptions = [
// // $match, then $unwind, then $replaceRoot
];
await rootRefModel
.aggregate(aggregationOptions)
.exec((err, refSubDocuments) => {
// more stuff happens
console.log('newFoodsArray', newFoodsArray); // this is to check whether the second iteration is using the updated newFoods Array
const arrToReturn = newFoodsArray.map((food) => {
const newMatchingArray = food[nests[1]].map((matchingFood) => {
//more stuff
return matchingFood;
});
const updatedFood = food;
updatedFood[`${nests[1]}`] = newMatchingArray;
return updatedFood;
});
console.log('arrToReturn', arrToReturn);
newFoodsArray = [...arrToReturn];
});
}
};
console.log('finalNewFoods', newFoodsArray); // this should log after the other two, but it is logging first.
const document = doc.toObject();
document.foods = newFoodsArray;
if (resolvedCount === options.length) cb(null, [document]);
}
})()
EDIT: Since it seems it will help, here is the what is calling the execute function I have excerpted above.
/**
* This will populate sub refs
* #param {import('mongoose').ModelPopulateOptions[]|
* import('mongoose').ModelPopulateOptions|String[]|String} options
* #returns {Promise}
*/
schema.methods.subPopulate = function (options = null) {
const model = this.constructor;
if (options) {
return new Promise((resolve, reject) => execute(model, [this], options, false, (err, docs) => {
if (err) return reject(err);
return resolve(docs[0]);
}));
}
Promise.resolve();
};
};
We can use async/await just fine here, as long as we remember that async is the same as "returning a Promise" and await is the same as "resolving a Promise's .then or .catch".
So let's turn all those "synchronous but callback-based" calls into awaitables: your outer code has to keep obeying the API contract, but since it's not meant to a return a value, we can safely mark our own version of it as async, and then we can use await in combination with promises around any other callback based function calls in our own code just fine:
async function execute(model, docs, options, lean, andThenContinueToThis) {
options = formatOptions(options);
let option, resolvedCount = 0;
for (let doc of docs) {
let newFoodsArray = [...doc.foods];
for (option of options) {
// ...things happen here...
const aggregationOptions = [/*...data...*/];
try {
const refSubDocuments = await new Promise((resolve, reject) => rootRefModel
.aggregate(aggregationOptions)
.exec((err, result) => err ? reject(err) : resolve(result));
// ...do some work based on refSubDocuments...
}
// remember to forward errors and then stop:
catch (err) {
return andThenContinueToThis(err);
}
}
// remember: bind newFoodsArray somewhere so it doesn't get lost next iteration
}
// As our absolutely last action, when all went well, we trigger the call forwarding:
andThenContinueToThis(null, dataToForward);
}

Firebase Functions: Why do they sometimes fail? Why do they often complete without error but don't fulfill all tasks?

This perplexes me. I'm six months into a firebase project and have been using Javascript for firebase-functions. I've learned a lot along the way by adding transactions, promises, batch writes and neat tricks. However, it seems like complete luck for a function to execute correctly. More often than not, the functions do execute correctly, but there are strange periods when bursts of consecutive function calls where functions half complete with no errors in the logs.
For example. I have a function for when a new user joins my app. It does a little bit of server data construction and also notifies the two admins that a new user has joined. Last night I did a test run with two new users and got no notification, but their user profiles constructed correctly on the server database. I checked the function logs and there were no errors.
Am I not handling Promises in the correct way? If a firebase function hangs, does it mess up the next few function calls?
exports.onNewUser = functions.firestore
.document('/users/{userId}')
.onCreate(async (snapshot, context) => {
user = snapshot.data().username;
//Notification payload
const payload = {
notification: {
title: `New user!`,
body: `${user} has joined [AppName]`
}
};
var promises = [];
//Check if usename unique
var passed = true;
promises.push(db.runTransaction(async t => {
const docRef = db.collection('users').doc('index');
const doc = await t.get(docRef);
var newIndex = doc.data().usernames;
if (newIndex[user.toUpperCase()] == true) {
t.delete(snapshot.ref);
passed = false;
return null;
} else {
newIndex[user.toUpperCase()] = true;
t.set(docRef, { 'usernames': newIndex });
}
}));
if (!passed) return Promise.all(promises);
//add new user to Algolia database
const algoliasearch = require('algoliasearch');
const algoliaClient = algoliasearch(functions.config().algolia.appid, functions.config().algolia.apikey);
const collectionIndex = algoliaClient.initIndex(collectionIndexName);
await saveDocumentInAlgolia(snapshot, collectionIndex);
//Notify Admins
db.collection('notificationTokens')
.doc(admin1)
.get().then((doc) => {
if (doc.exists && doc.data().notificationToken != null)
promises.push(pushNotification(doc.data().notificationToken, payload));
});
db.collection('notificationTokens')
.doc(admin2)
.get().then((doc) => {
if (doc.exists && doc.data().notificationToken != null)
promises.push(pushNotification(doc.data().notificationToken, payload));
});
return Promise.all(promises);
});
Just change
return Promise.all(promises);
to
return await Promise.all(promises);
You have to wait till the promises resolve before you return the function, as that would stop the instance of the cloud function.

Using composition in JavaScript

I want to make a request and cache it, in a functional style.
const req = (uri) =>
(console.log(`requesting: ${uri}`), Promise.resolve({ status: 200 }));
const cache = (fn) => (...args) =>
fn(...args).then((result) => { console.log('caching:', result) });
const cachedReq = cache(req);
cachedReq('example.com/foo');
Two questions:
Is this code idiomatic?
How can I supply logic to generate the cache key from the result, while maintaining separation of concerns? For example, I might use req to retrieve different kinds of resource which need different logic to generate the key to be used in the cache. How should I supply this key-generation logic to the cache function?
Edit:
In reality, the URI should be the key (thanks to #epascarello). I chose a poor example. But I'd like to ask about the more general case, where logic needs to be supplied "down composition", while maintaining decent separation of concerns.
You almost close to achieve your goal, you are in the right direction, with composition concept. maybe this code can help you to make your goal come true.
Let's simulate your req function like so:
var req = (uri) => {
console.log("inside req", uri);
return new Promise((resolve, reject) => {
setTimeout(() => {
resolve({ status: 200 });
}, 3000);
});
}
then you have the cacheFunc version as:
var withCache = (promiseFunc) => {
const cache = {};
return (...args) => {
// suppose first param is uri
var uri = args[0];
return new Promise((resolve, reject) => {
if (cache.hasOwnProperty(uri)) {
return resolve(cache[uri]);
}
promiseFunc(...args).then((data) => {
cache[uri] = data;
resolve(data);
}).catch(reject);
});
}
}
as you can see, you need to create and cache object into the first function, so this is a a little similar to Currying in JS, so you need to wrap your req (that is a promise) wrapped into another promise from the cache version, so before execute the req function, you need to verify if some response exists into cache with the same uri key, if it is, so resolve inmmediatly the promise, else execute the req function, once you receive the response cache the response and resolve the cache promise version.
So you can use it like so:
var cacheReq = withCache(req);
cacheReq('https://anywhere.com').then(console.log.bind(null, 'response')).catch(console.log.bind(null, 'error response'));
you will notice that in the first time you promise wait until 3 seconds to resolve the req, in the second call the promise will resolve the promise ASAP because of cache, if you try with another uri it will wait 3 seconds again and will cache the response to use it the next time.
Hope it can help you.
You can use a combination of a Map and the Request constructor:
// I'll be using ramda for object equality, but any
// deepEquals checker should work.
const R = window.R;
const genRequest = ((cache, eqComparator) => {
return (url, fetchOpts={}) => {
const key = {url, fetchOpts};
const alreadyHave = [...cache.keys].find(x => eqComparator(x, key));
if (alreadyHave) return cache.get(alreadyHave);
const req = new Request(url, fetchOpts);
cache.set(key, req);
return req;
};
})(new Map(), R.equals);
const req = genRequest('http://www.google.com');
fetch(req)
.then(...)
.catch(...);
Some nice properties fall out of this:
Each request is constructed only once but can be repeatedly fetched.
No side-effects until you fetch: creating the request and fetching it are separate.
...thus, concerns are about as separated as they can be.
You could re-jigger parameter application to easily support custom equality comparisons using the same cache.
You can use the same strategy to cache the results of a fetch, separately from caching the requests.

Calculate total elapsed time of Promises till reject?

I want to test how much requests i can do and get their total time elapsed. My Promise function
async execQuery(response, query) {
let request = new SQL.Request();
return new Promise((resolve, reject) => {
request.query(query, (error, result) => {
if (error) {
reject(error);
} else {
resolve(result);
}
});
});
}
And my api
app.get('/api/bookings/:uid', (req, res) => {
let st = new stopwatch();
let id = req.params.uid;
let query = `SELECT * FROM booking.TransactionDetails WHERE UID='${id}'`;
for (let i = 0; i < 10000; i++) {
st.start();
db.execQuery(res, query);
}
});
I can't stop the for loop since its async but I also don't know how can I stop executing other calls after the one which first rejects so i can get the counter and the elapsed time of all successful promises. How can i achieve that?
You can easily create a composable wrapper for this, or a subclass:
Inheritance:
class TimedPromise extends Promise {
constructor(executor) {
this.startTime = performance.now(); // or Date.now
super(executor);
let end = () => this.endTime = performance.now();
this.then(end, end); // replace with finally when available
}
get time() {
return this.startTime - this.endTime; // time in milliseconds it took
}
}
Then you can use methods like:
TimedPromise.all(promises);
TimedPromise.race(promises);
var foo = new TimedPromise(resolve => setTimeout(resolve, 100);
let res = await foo;
console.log(foo.time); // how long foo took
Plus then chaining would work, async functions won't (since they always return native promises).
Composition:
function time(promise) {
var startTime = performance.now(), endTime;
let end = () => endTime = performance.now();
promise.then(end, end); // replace with finally when appropriate.
return () => startTime - endTime;
}
Then usage is:
var foo = new Promise(resolve => setTimeout(resolve, 100);
var timed = time(foo);
await foo;
console.log(timed()); // how long foo took
This has the advantage of working everywhere, but the disadvantage of manually having to time every promise. I prefer this approach for its explicitness and arguably nicer design.
As a caveat, since a rejection handler is attached, you have to be 100% sure you're adding your own .catch or then handler since otherwise the error will not log to the console.
Wouldn't this work in your promise ?
new Promise((resolve, reject) => {
var time = Date.now();
request.query(query, (error, result) => {
if (error) {
reject(error);
} else {
resolve(result);
}
});
}).then(function(r){
//code
}).catch(function(e){
console.log('it took : ', Date.now() - time);
});
Or put the .then and .catch after your db.execQuery() call
You made 2 comments that would indicate you want to stop all on going queries when a promise fails but fail to mention what SQL is and if request.query is something that you can cancel.
In your for loop you already ran all the request.query statements, if you want to run only one query and then the other you have to do request.query(query).then(-=>request.query(query)).then... but it'll take longer because you don't start them all at once.
Here is code that would tell you how long all the queries took but I think you should tell us what SQL is so we could figure out how to set connection pooling and caching (probably the biggest performance gainer).
//removed the async, this function does not await anything
// so there is no need for async
//removed initializing request, you can re use the one created in
// the run function, that may shave some time off total runtime
// but not sure if request can share connections (in that case)
// it's better to create a couple and pass them along as their
// connection becomes available (connection pooling)
const execQuery = (response, query, request) =>
new Promise(
(resolve, reject) =>
request.query(
query
,(error, result) =>
(error)
? reject(error)
: resolve(result)
)
);
// save failed queries and resolve them with Fail object
const Fail = function(detail){this.detail=detail;};
// let request = new SQL.Request();
const run = (numberOfTimes) => {
const start = new Date().getTime();
const request = new SQL.Request();
Promise.all(
(x=>{
for (let i = 0; i < numberOfTimes; i++) {
let query = `SELECT * FROM booking.TransactionDetails WHERE UID='${i}'`;
db.execQuery(res, query, request)
.then(
x=>[x,query]
,err=>[err,query]
)
}
})()//IIFE creating array of promises
)
.then(
results => {
const totalRuntime = new Date().getTime()-start;
const failed = results.filter(r=>(r&&r.constructor)===Fail);
console.log(`Total runtime in ms:${totalRuntime}
Failed:${failed.length}
Succeeded:${results.length-failed.length}`);
}
)
};
//start the whole thing with:
run(10000);

Categories

Resources