Asynchronously update IndexedDB in upgrade event - javascript

In the onupgradeneeded() event for IndexedDB I am trying to update each record in an object store. In order to update them I need to first preform an async operation but this causes the upgrade transaction to become inactive and I get the error
Failed to execute 'update' on 'IDBCursor': The transaction is not active.
In the follow code I am simulating an async operation with setTimeout()
let openRequest = indexedDB.open('myDb', 1);
openRequest.onupgradeneeded = function (versionEvent) {
let db = versionEvent.target['result'];
let upgradeTransaction = versionEvent.target['transaction'];
if(versionEvent.oldVersion < 1) {
let objStore = db.createObjectStore('sample');
objStore.add('one', '1');
objStore.add('two', '2');
}
if(versionEvent.oldVersion >= 1) {
let getCursor = upgradeTransaction.objectStore('sample').openCursor();
getCursor.onsuccess = (e) => {
let cursor = e.target['result'];
if (cursor) {
setTimeout(() => {
cursor.update(cursor.value + ' updated');
cursor.continue();
})
}
}
}
};
https://plnkr.co/edit/DIIzLduZT1dwOEHAdzSf?p=preview
If you run this plunker it will initialize IndexedDB. Then if you increase the version number to 2 and run it again you will get the error.
How can I update IndexedDB in an upgrade event if my update relies on an async operation?

You need a different approach. Options include:
Making the schema changes immediately, but deferring adding new data for a subsequent transaction.
Fetch the data before trying to perform the upgrade. Since fetching the data is slow this is likely not desirable.
Conditionally fetch the data, only if an upgrade is needed.
There are two approaches for the latter. You could do an open() with no version number, check the version, and then fetch/upgrade if it is lower than desired. Or you can open at the new version and in upgradeneeded abort the upgrade (get the transaction from the request and call abort() on it) then fetch the data and re-attempt the upgrade.

Related

How to check what the throttling limit is for your access to an endpoint with JS

[![enter image description here][1]][1]I need to implement code to check what my throttling limit is on an endpoint (I know it's x times per minute). I've only been able to find an example of this in python, which I have never used. It seems like my options are to run a script to send the request repeatedly until it throttles me or, if possible, query the API to see what the limit is.
Does anyone have a good idea on how to go about this?
Thanks.
Note: The blank space is just data from the api calls.
[1]: https://i.stack.imgur.com/gAFQQ.png
This starts concurency number of workers (I'm using workers as a loose term here; don't # me). Each one makes as many requests as possible until one of the requests is rate-limited or it runs out of time. It them reports how many of the requests completed successfully inside the given time window.
If you know the rate-limit window (1 minute based on your question), this will find the rate-limit. If you need to discover the window, you would want to intentionally exhaust the limit, then slow down the requests and measure the time until they started going through again. The provided code does not do this.
// call apiCall() a bunch of times, stopping when a apiCall() resolves
// false or when "until" time is reached, whichever comes first. For example
// if your limit is 50 req/min (and you give "until" enough time to
// actuially complete 50+ requests) this will call apiCall() 50 times. Each
// call should return a promise resolving to TRUE, so it will be counted as
// a success. On the 51st call you will presumably hit the limit, the API
// will return an error, apiCall() will detect that, and resolve to false.
// This will cause the worker to stop making requests and return 50.
async function workerThread(apiCall, until) {
let successfullRequests = 0;
while(true) {
const success = await apiCall();
// only count it if the request was successfull
// AND finished within the timeframe
if(success && Date.now() < until) {
successfullRequests++;
} else {
break;
}
}
return successfullRequests;
}
// this just runs a bunch of workerThreads in parallell, since by doing a
// single request at a time, you might not be able to hit the limit
// depending on how slow the API is to return. It returns the sum of each
// workerThread(), AKA the total number of apiCall()s that resolved to TRUE
// across all threads.
async function testLimit(apiCall, concurency, time) {
const endTime = Date.now() + time;
// launch "concurency" number of requests
const workers = [];
while(workers.length < concurency) {
workers.push(workerThread(apiCall, endTime));
}
// sum the number of requests that succeded from each worker.
// this implicitly waits for them to finish.
let total = 0;
for(const worker of workers) {
total += await worker;
}
return total;
}
// put in your own code to make a trial API call.
// return true for success or false if you were throttled.
async function yourAPICall() {
try {
// this is a really sloppy example API
// the limit is ROUGHLY 5/min, but because of the sloppy server-side
// implimentation you might get 4-6.
const resp = await fetch("https://9072997.com/demos/rate-limit/");
return resp.ok;
} catch {
return false;
}
}
// this is a demo of how to use the function
(async function() {
// run 2 requests at a time for 5 seconds
const limit = await testLimit(yourAPICall, 2, 5*1000);
console.log("limit is " + limit + " requests in 5 seconds");
})();
Note that this method measures the quota available to itself. If other clients or previous requests have already depleted the quota, it will affect the result.

Firebase update Function is sometimes slower executed

I have a simple update function which sometimes executes really slow compared to other times.
// Five executions with very different execution times
Finished in 1068ms // Almost six times slower than the next execution
Finished in 184ms
Finished in 175ms
Finished in 854ms
Finished in 234ms
The Function is triggered from the frontend and doesn't run on Firebase Cloud Functions.
const startAt = performance.now()
const db = firebase.firestore();
const ref = db.doc(`random/nested/document/${id}`);
ref.update({
na: boolean // a calculated boolean with array.includes(...)
? firebase.firestore.FieldValue.arrayRemove(referenceId)
: firebase.firestore.FieldValue.arrayUnion(referenceId)
})
.then(() => {
let endAt = performance.now();
console.log("Finished in " + (endAt - startAt) + "ms");
});
Is there anything I can improve to fix these performance differences?
Also the longer execution times dont only appear when removing something from an array or adding something to an array. It appears on adding and removing. Sometimes these execution times go up to 3000ms.
Similar to cold-starting a Cloud Function where everything is spun up, initialized and made ready for use, a connection to Cloud Firestore also needs to be resolved through DNS, ID tokens need to be obtained to authenticate the request, a socket to the server opened and any handshakes are exchanged between the server and the SDK.
Any new operations on the database can make use of the previous work taken to initialize the connection and that is why they look like they are faster.
Showing this as loose pseudocode:
let connection = undefined;
function initConnectionToFirestore() {
if (!connection) {
await loadFirebaseConfig();
await Promise.all([
resolveIpAddressOfFirebaseAuth(),
resolveIpAddressOfFirestoreInstance()
]);
await getIdTokenFromFirebaseAuth();
await addAuthenticationToRequest();
connection = await openConnection();
}
return connection;
}
function doUpdate(...args) {
const connection = await initConnectionToFirestore();
// do the work
connection.send(/* ... */);
}
await doUpdate() // has to do work of initConnectionToFirestore
await doUpdate() // reuses previous work
await doUpdate() // reuses previous work
await doUpdate() // reuses previous work

Should I open an IDBDatabase each time or keep one instance open?

I have a SPA application that will make multiple reads/writes to IndexedDB.
Opening the DB is an asynchronous operation with a callback:
var db;
var request = window.indexedDB.open("MyDB", 2);
request.onupgradeneeded = function(event) {
// Upgrade to latest version...
}
request.onerror = function(event) {
// Uh oh...
}
request.onsuccess = function(event) {
// DB open, now do something
db = event.target.result;
};
There are two ways I can use this db instance:
Keep a single db instance for the life of the page/SPA?
Call db.close() once the current operation is done and open a new one on the next operation?
Are there pitfalls of either pattern? Does keeping the indexedDB open have any risks/issues? Is there an overhead/delay (past the possible upgrade) to each open action?
I have found that opening a connection per operation does not substantially degrade performance. I have been running a local Chrome extension for over a year now that involves a ton of indexedDB operations and have analyzed its performance hundreds of times and have never witnessed opening a connection as a bottleneck. The bottlenecks come in doing things like not using an index properly or storing large blobs.
Basically, do not base your decision here on performance. It really isn't the issue in terms of connecting.
The issue is really the ergonomics of your code, how much you are fighting against the APIs, and how intuitive your code feels when you look at it, how understable you think the code is, how welcoming is it to fresh eyes (your own a month later, or someone else). This is very notable when dealing with the blocking issue, which is indirectly dealing with application modality.
My personal opinion is that if you are comfortable with writing async Javascript, use whatever method you prefer. If you struggle with async code, choosing to always open the connection will tend to avoid any issues. I would never recommend using a single global page-lifetime variable to someone who is newer to async code. You are also leaving the variable there for the lifetime of the page. On the other hand, if you find async trivial, and find the global db variable more amenable, by all means use it.
Edit - based on your comment I thought I would share some pseudocode of my personal preference:
function connect(name, version) {
return new Promise((resolve, reject) => {
const request = indexedDB.open(name, version);
request.onupgradeneeded = onupgradeneeded;
request.onsuccess = () => resolve(request.result);
request.onerror = () => reject(request.error);
request.onblocked = () => console.warn('pending till unblocked');
});
}
async foo(bar) {
let conn;
try {
conn = await connect(DBNAME, DBVERSION);
await storeBar(conn, bar);
} finally {
if(conn)
conn.close();
}
}
function storeBar(conn, bar) {
return new Promise((resolve, reject) => {
const tx = conn.transaction('store');
const store = tx.objectStore('store');
const request = store.put(bar);
request.onsuccess = () => resolve(request.result);
request.onerror = () => reject(request.error);
});
}
With async/await, there isn't too much friction in having the extra conn = await connect() line in your operational functions.
Opening a connection each time is likely to be slower just because the browser is doing more work (e.g. it may need to read data from disk). Otherwise, no real down sides.
Since you mention upgrades, either pattern requires a different approach to the scenario where the user opens your page in another tab and it tries to open the database with a higher version (because it downloaded newer code form your server). Let's say the old tab was version 3 and the new tab is version 4.
In the one-connection-per-operation case you'll find that your open() on version 3 fails, because the other tab was able to upgrade to version 4. You can notice that the open failed with VersionError e.g. and inform the user that they need to refresh the page.
In the one-connection-per-page case your connection at version 3 will block the other tab. The v4 tab can respond to the "blocked" event on the request and let the user know that older tabs should be closed. Or the v3 tab can respond to the versionupgrade event on the connection and tell the user that it needs to be closed. Or both.

TransactionInactiveError with subsequent put calls

I can't figure out if I'm doing something wrong or if I'm just pushing it to hard.
I'm trying to sync ~70000 records from my online db to IndexedDB in combination with EventSource and a Worker.
So I get 2000 records per package and then use the following code to store them in IndexedDB:
eventSource.addEventListener('package', function(e) {
var data = JSON.parse(e.data);
putData(data.type, data.records);
});
function putData(storeName, data) {
var store = db.transaction([storeName], 'readwrite').objectStore(storeName);
return new Promise(function(resolve, reject) {
putRecord(data, store, 0);
store.transaction.oncomplete = resolve;
store.transaction.onerror = reject;
});
}
function putRecord(data, store, recordIndex) {
if(recordIndex < data.length){
var req = store.put(data[recordIndex]);
req.onsuccess = function(e) {
recordIndex += 1;
putRecord(data, store, recordIndex);
};
req.onerror = function() {
self.postMessage(this.error.name);
recordIndex += 1;
putRecord(data, store, recordIndex);
};
}
}
It all works for about ~10000 records. Didn't really test where the limit is though. I suspect that at some point there are too many transactions in parallel which causes a single transaction to be very slow and thus causing trouble because of some timeout. According to the dev tools the 70000 records are around 20MB.
Complete error:
Uncaught TransactionInactiveError: Failed to execute 'put' on
'IDBObjectStore': The transaction has finished.
Any ideas?
I don't see an obvious error in your code, but you can make it much simpler and faster. There's no need to wait for the success of a previous put() to issue a second put() request.
function putData(storeName, data) {
var store = db.transaction([storeName], 'readwrite').objectStore(storeName);
return new Promise(function(resolve, reject) {
for (var i = 0; i < data.length; ++i) {
var req = store.put(data[i]);
req.onerror = function(e) {
self.postMessage(e.target.error.name);
};
}
store.transaction.oncomplete = resolve;
store.transaction.onerror = reject;
});
}
It is possible that the error you are seeing is because the browser has implemented an arbitrary time limit on the transaction. But again, your code looks correct, including the use of Promises (which are tricky with IDB, but so far as I can tell you're doing it correctly!)
If this is still occurring I second the comment to file a bug against the browser(s) with a stand-alone repro. (If it's happening in Chrome I'd be happy to take a look.)
I think this is due the implementation. If you read the specs a transaction must keep a list of all the requests made in the transaction. When the transaction is commited all these changes are persisted otherwise the transaction will be aborted. Specs
Probably is the maximum request list in your case a 1000 request. You can easily test that by trying to insert a 1001 records. So my guess is when the 1000 request is reached, the transaction is set to inactive.
Maybe change your stratigy and only make 1000 request in every transaction and start a new transaction when the other one is completed.

PhoneGap Windows Phone 8 IndexedDB AbortError when opening db

Using essentially the example from the MDN IndexedDb tutorial I can see that my test IndexedDb code is working on Chrome. When I load the app onto my Windows Phone 8 device inside of the deviceready handler, I get an AbortError in the error handler for the database open request.
The only other related SO question was solved by fixing errors in onupgradeneeded but this handler is never even called in my code.
In this simple example, you have to run the fiddle twice because apparently onsuccess is called (where I read a test value) before onupgradeneeded (where I write the value when the db is initialized). I was going to deal with this once I got this first test to work.
http://jsfiddle.net/WDUVx/2/
// In the following line, you should include the prefixes of
// implementations you want to test.
window.indexedDB = window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
window.IDBTransaction = window.IDBTransaction || window.webkitIDBTransaction || window.msIDBTransaction;
window.IDBKeyRange = window.IDBKeyRange || window.webkitIDBKeyRange || window.msIDBKeyRange;
if (!window.indexedDB) {
window.alert("Your browser doesn't support a stable version of IndexedDB. Such and such feature will not be available.");
}
// open the database
var db;
var request = window.indexedDB.open("MyTestDatabase", 1);
request.onerror = function(e) {
alert("Couldn't open database: " + kc.util.getObjectString(e.target));
};
request.onsuccess = function(e) {
db = e.target.result;
var getRequest =
db.transaction("data")
.objectStore("data")
.get("firstObject")
.onsuccess = function(event) {
alert("Got: " + event.target.result.test);
};
};
request.onupgradeneeded = function(e) {
var db = event.target.result;
var objectStore = db.createObjectStore("data", {
autoincrement : false
});
objectStore.transaction.oncomplete = function(event) {
var myObjectStore = db.transaction("data", "readwrite").objectStore("data");
var addRequest = myObjectStore.add({
test : true
},
"firstObject");
addRequest.onerror = function(e) {
console.log("Error adding");
};
addRequest.onsuccess = function(e) {
console.log("Added!");
};
};
};
Questions:
What is my stupid mistake?
Are there any Windows Phone 8 examples of IndexedDb and PhoneGap? I could not find any after some searching. There were a few for an IndexedDb API Android and IOS polyfill, but none for wp8.
Is there something special that I have to do because I'm on a phone? Again, the code works in chrome.
Are there any other plugins that support wp8 storage > 5mb?
LocalStorage has a 5mb size limit
WebSQL is not supported
FileSystem plugin does not support filewriter.write(blob). This is what I am using for Android/iOS. It's strange that they say wp8 is supported by this plugin when this is the only way to actually write data, and you can't read the nothing you can write. I found that although the web api does not support it, the devices support filewriter.write(string). Windows Phone 8 is still not writing/reading things entirely correctly, but that is a separate question.
Recently, I faced a similar issue with indexed DB. My IndexedDB.open request was throwing an abort error.
After doing some search, I found suggestions to separate the requests for database creation and store creation.
Separating the code prevented the abort error. However, I noticed that the transaction to create the stores was sometimes run even before the completion of the database creation request.
This meant that my database connection was not closed from the first request, when the second request was run.
A minor fix was required to overcome this error. I moved the code to create stores to the success event for the first call.
Here is the code for reference.
function create_db(db_name)
{
var request = indexedDB.open(db_name);
request.onupgradeneeded=function(e)
{
console.log("1. creating database");
db=e.target.result;
};
request.onsuccess = function(e)
{
db = e.target.result;
console.log("1.1 database created successfully");
db.close();
add_tables(db_name);
};
request.onerror=function(e)
{
alert("error: "+ e.target.error.name + "failed creating db");
console.log("1.2 error creating db");
};
}
function add_tables(db_name)
{
var request = indexedDB.open(db_name,2);
request.onsuccess=function(e)
{
db=e.target.result;
console.log("2.2 table creation request successful");
};
request.onupgradeneeded=function(e)
{
db=e.target.result;
table = db.createObjectStore("table_name");
table.createIndex("id","id");
console.log("2.2 creating a single object store");
};
request.onerror=function(e)
{
console.log("2.3 error occured when creating tables");
};
};
Just some ideas, hope they help:
Don't use a global db variable. Do all of your work in callbacks. Using a global db variable can lead to numerous in-explainable situations, some of which include getting abort errors. Looking at your code, it actually looks like you are properly just using e.target, so I am not sure why you have a global var db.
Don't perform read/write requests on the version change transaction that occurs in the onupgradeneeded callback. Instead, perform requests when they are appropriate and let indexeddb worry about calling onupgradeneeded. In other words, don't retrieve the transaction in onupgradeneeded. Instead, just initiate some later transaction in a new connection as if the onupgradeneeded callback already completed.
openDBRequest having an abort event precedes onupgradeneeded callback
onupgradeneeded won't be called unless you make an attempt to connect to a database using a higher version
Listen for abort events. Add a callback to the open database request for onabort. Abort events sometimes occur because of things like opening two pages in the same context that try to access the same database. There could be something funky going on there.

Categories

Resources