How can I `put` new values in an already existing objectStore in an already existing indexedDB? - javascript

I am tying myself up in knots trying to update a series of four entries in an objectStore in an indexedDB.
This is what I want to achieve (in pseudo-code):
let myDatabase = indexedDB('myDatabase', 1);
let myObjectStore = myDatabase.myObjectStore;
myObjectStore.entry1 = 'newValue1';
myObjectStore.entry2 = 'newValue2';
myObjectStore.entry3 = 'newValue3';
myObjectStore.entry4 = 'newValue4';
But of course, it isn't anything like that straightforward.
I understand I need to use put. But, despite numerous attempted approaches, I can't get further than that.
I have got as far as successfully setting up and populating the objectStore in the first place when the indexedDB is first created:
// SET UP VALUES OBJECT
let valuesObject = {
entry1 : 'a',
entry2 : 'b',
entry3 : 'c',
entry4 : 'd'
};
// SET UP INDEXED DATABASE
const setUpIndexedDatabase = (valuesObject) => {
let database
const databaseVersion = 1;
const databaseName = \'myDatabase\';
const databaseOpenRequest = indexedDB.open(databaseName, databaseVersion);
databaseOpenRequest.onupgradeneeded = () => {
database = databaseOpenRequest.result;
let myObjectStore = database.createObjectStore('myObjectStore');
myObjectStore.transaction.oncomplete = () => {
let objectStoreValues = database.transaction('Values', 'readwrite').objectStore('Values');
const valuesEntries = Object.entries(valuesObject);
for (let i = 0; i < valuesEntries.length; i++) {
objectStoreValues.add(valuesEntries[i][1], valuesEntries[i][0]);
}
}
}
databaseOpenRequest.onsuccess = () => {
database = databaseOpenRequest.result;
// >>> THIS IS THE BIT THAT I NEED TO WRITE <<<
database.close();
}
}
setUpIndexedDatabase(valuesObject);
So far, so good. The code above fires the onupgradeneeded event if no database exists yet, which creates myObjectStore and populates it with four key-value pairs.
But if the database does exist and already contains myObjectStore, then every variation of code I have written using put fails to update the values for the keys and returns various errors - and quite often no errors at all.
All I want to do is update values in the database.
I think the problem is that I don't know how to use put properly when the Database Version remains unchanged and onupgradeneeded doesn't fire.

If you want to update an already existing value in the database, you can do so with the following code (as example, I am updating the entry1 entry):
databaseOpenRequest.onsuccess = function(event) {
db = event.target.result;
const objectStore = db.transaction('myObjectStore', 'readwrite').objectStore('myObjectStore');
const request = objectStore.put('e', 'entry1');
request.onerror = function(event) {
// There was an error while updating.
};
request.onsuccess = function(event) {
// The update was successful.
};
}

Related

Override Mongoose save method to retry on `duplicate key error`

My Mongoose schema uses a custom _id value and the code I inherited does something like this
const sampleSchema = new mongoose.Schema({
_id: String,
key: String,
});
sampleSchema.statics.generateId = async function() {
let id;
do {
id = randomStringGenerator.generate({length: 8, charset: 'hex', capitalization: 'uppercase'});
} while (await this.exists({_id: id}));
return id;
};
let SampleModel = mongoose.model('Sample', sampleSchema);
A simple usage looks like this:
let mySample = new SampleModel({_id: await SampleModel.generateId(), key: 'a' });
await mySample.save();
There are at least three problems with this:
Every save will require at least two trips to the database, one to test for a unique id and one to save the document.
For this to work, it is necessary to manually call generateId() before each save. An ideal solution would handle that for me, like Mongoose does with ids of type ObjectId.
Most significantly, there is a potential race condition that will result in duplicate key error. Consider two clients running this code. Both coincidentally generate the same id at the same time, both look in the database and find the id absent, both try to write the record to the database. The second will fail.
An ideal solution would, on save, generate an id, save it to the database and on duplicate key error, generate a new id and retry. Do this in a loop until the document is stored successfully.
The trouble is, I don't know how to get Mongoose to let me do this.
Here's what I tried: Based on this SO Question, I found a rather old sample (using a very old mongoose version) of overriding the save function to accomplish something similar and based this attempt off it.
// First, change generateId() to force a collision
let ids = ['a', 'a', 'a', 'b'];
let index = 0;
let generateId = function() {
return ids[index++];
};
// Configure middleware to generate the id before a save
sampleSchema.pre('validate', function(next) {
if (this.isNew)
this._id = generateId();
next();
});
// Now override the save function
SampleModel.prototype.save_original = SampleModel.prototype.save;
SampleModel.prototype.save = function(options, callback) {
let self = this;
let retryOnDuplicate = function(err, savedDoc) {
if (err) {
if (err.code === 11000 && err.name === 'MongoError') {
self.save(options, retryOnDuplicate);
return;
}
}
if (callback) {
callback(err, savedDoc);
}
};
return self.save_original(options, retryOnDuplicate);
}
This gets me close but I'm leaking a promise and I'm not sure where.
let sampleA = new SampleModel({key: 'a'});
let sampleADoc = await sampleA.save();
console.log('sampleADoc', sampleADoc); // prints undefined, but should print the document
let sampleB = new SampleModel({key: 'b'});
let sampleBDoc = await sampleB.save();
console.log('sampleBDoc', sampleBDoc); // prints undefined, but should print the document
let all = await SampleModel.find();
console.log('all', all); // prints `[]`, but should be an array of two documents
Output
sampleADoc undefined
sampleBDoc undefined
all []
The documents eventually get written to the database, but not before the console.log calls are made.
Where am I leaking a promise? Is there an easier way to do this that addresses the three problems I outlined?
Edit 1:
Mongoose version: 5.11.15
I fixed the problem by changing the save override. The full solution looks like this:
const sampleSchema = new mongoose.Schema({
_id: String,
color: String,
});
let generateId = function() {
return randomStringGenerator.generate({length: 8, charset: 'hex', capitalization: 'uppercase'});
};
sampleSchema.pre('validate', function() {
if (this.isNew)
this._id = generateId();
});
let SampleModel = mongoose.model('Sample', sampleSchema);
SampleModel.prototype.save_original = SampleModel.prototype.save;
SampleModel.prototype.save = function(options, callback) {
let self = this;
let isDupKeyError = (error, field) => {
// Determine whether the error is a duplicate key error on the given field
return error?.code === 11000 && error?.name === 'MongoError' && error?.keyValue[field];
}
let saveWithRetries = (options, callback) => {
// save() returns undefined if used with callback or a Promise otherwise.
// https://mongoosejs.com/docs/api/document.html#document_Document-save
let promise = self.save_original(options, callback);
if (promise) {
return promise.catch((error) => {
if (isDupKeyError(error, '_id')) {
return saveWithRetries(options, callback);
}
throw error;
});
}
};
let retryCallback;
if (callback) {
retryCallback = (error, saved, rows) => {
if (isDupKeyError(error, '_id')) {
saveWithRetries(options, retryCallback);
} else {
callback(error, saved, rows);
}
}
}
return saveWithRetries(options, retryCallback);
}
This will generate an _id repeatedly until a successful save is called and addresses the three problems outlined in the original question:
The minimum trips to the database has been reduced from two to one. Of course, if there are collisions, more trips will occur but that's the exceptional case.
This implementation takes care of generating the id itself with no manual step to take before saving. This reduces complexity and removes the required knowledge of prerequisites for saving that are present in the original method.
The race condition has been addressed. It won't matter if two clients attempt to use the same key. One will succeed and the other will generate a new key and save again.
To improve this:
There ought to be a maximum number of save attempts for a single document followed by failure. In this case, you've perhaps used up all the available keys in whatever domain you're using.
The unique field may not be named _id or you might have multiple fields that require a unique generated value. The embedded helper function isDupKeyError() could be updated to look for multiple keys. Then on error you could add logic to regenerate just the failed key.

Delete same value from multiple locations Firebase Functions

I have a firebase function that deletes old messages after 24 hours as in my old question here. I now have just the messageIds stored in an array under the user such that the path is: /User/objectId/myMessages and then an array of all the messageIds under myMessages. All of the messages get deleted after 24 hours, but the iDs under the user's profile stay there. Is there a way to continue the function so that it also deletes the messageIds from the array under the user's account?
I'm new to Firebase functions and javascript so I'm not sure how to do this. All help is appreciated!
Building upon #frank-van-puffelen's accepted answer on the old question, this will now delete the message IDs from their sender's user data as part of the same atomic delete operation without firing off a Cloud Function for every message deleted.
Method 1: Restructure for concurrency
Before being able to use this method, you must restructure how you store entries in /User/someUserId/myMessages to follow best practices for concurrent arrays to the following:
{
"/User/someUserId/myMessages": {
"-Lfq460_5tm6x7dchhOn": true,
"-Lfq483gGzmpB_Jt6Wg5": true,
...
}
}
This allows you to modify the previous function to:
// Cut off time. Child nodes older than this will be deleted.
const CUT_OFF_TIME = 24 * 60 * 60 * 1000; // 2 Hours in milliseconds.
exports.deleteOldMessages = functions.database.ref('/Message/{chatRoomId}').onWrite(async (change) => {
const rootRef = admin.database().ref(); // needed top level reference for multi-path update
const now = Date.now();
const cutoff = (now - CUT_OFF_TIME) / 1000; // convert to seconds
const oldItemsQuery = ref.orderByChild('seconds').endAt(cutoff);
const snapshot = await oldItemsQuery.once('value');
// create a map with all children that need to be removed
const updates = {};
snapshot.forEach(messageSnapshot => {
let senderId = messageSnapshot.child('senderId').val();
updates['Message/' + messageSnapshot.key] = null; // to delete message
updates['User/' + senderId + '/myMessages/' + messageSnapshot.key] = null; // to delete entry in user data
});
// execute all updates in one go and return the result to end the function
return rootRef.update(updates);
});
Method 2: Use an array
Warning: This method falls prey to concurrency issues. If a user was to post a new message during the delete operation, it's ID could be removed while evaluating the deletion. Use method 1 where possible to avoid this.
This method assumes your /User/someUserId/myMessages object looks like this (a plain array):
{
"/User/someUserId/myMessages": {
"0": "-Lfq460_5tm6x7dchhOn",
"1": "-Lfq483gGzmpB_Jt6Wg5",
...
}
}
The leanest, most cost-effective, anti-collision function I can come up for this data structure is the following:
// Cut off time. Child nodes older than this will be deleted.
const CUT_OFF_TIME = 24 * 60 * 60 * 1000; // 2 Hours in milliseconds.
exports.deleteOldMessages = functions.database.ref('/Message/{chatRoomId}').onWrite(async (change) => {
const rootRef = admin.database().ref(); // needed top level reference for multi-path update
const now = Date.now();
const cutoff = (now - CUT_OFF_TIME) / 1000; // convert to seconds
const oldItemsQuery = ref.orderByChild('seconds').endAt(cutoff);
const snapshot = await oldItemsQuery.once('value');
// create a map with all children that need to be removed
const updates = {};
const messagesByUser = {};
snapshot.forEach(messageSnapshot => {
updates['Message/' + messageSnapshot.key] = null; // to delete message
// cache message IDs by user for next step
let senderId = messageSnapshot.child('senderId').val();
if (!messagesByUser[senderId]) { messagesByUser[senderId] = []; }
messagesByUser[senderId].push(messageSnapshot.key);
});
// Get each user's list of message IDs and remove those that were deleted.
let pendingOperations = [];
for (let [senderId, messageIdsToRemove] of Object.entries(messagesByUser)) {
pendingOperations.push(admin.database.ref('User/' + senderId + '/myMessages').once('value')
.then((messageArraySnapshot) => {
let messageIds = messageArraySnapshot.val();
messageIds.filter((id) => !messageIdsToRemove.includes(id));
updates['User/' + senderId + '/myMessages'] = messageIds; // to update array with non-deleted values
}));
}
// wait for each user's new /myMessages value to be added to the pending updates
await Promise.all(pendingOperations);
// execute all updates in one go and return the result to end the function
return ref.update(updates);
});
Update: DO NOT USE THIS ANSWER (I will leave it as it may still be handy for detecting a delete operation for some other need, but do not use for the purpose of cleaning up an array in another document)
Thanks to #samthecodingman for providing an atomic and concurrency safe answer.
If using Firebase Realtime Database you can add an onChange event listener:
const functions = require('firebase-functions');
const admin = require('firebase-admin');
admin.initializeApp();
exports.onDeletedMessage = functions.database.ref('Message/{messageId}').onChange(async event => {
// Exit if this item exists... if so it was not deleted!
if (event.data.exists()) {
return;
}
const userId = event.data.userId; //hopefully you have this in the message document
const messageId = event.data.messageId;
//once('value') useful for data that only needs to be loaded once and isn't expected to change frequently or require active listening
const myMessages = await functions.database.ref('/users/' + userId).once('value').snapshot.val().myMessages;
if(!myMessages || !myMessages.length) {
//nothing to do, myMessages array is undefined or empty
return;
}
var index = myMessages.indexOf(messageId);
if (index === -1) {
//nothing to delete, messageId is not in myMessages
return;
}
//removeAt returns the element removed which we do not need
myMessages.removeAt(index);
const vals = {
'myMessages': myMessages;
}
await admin.database.ref('/users/' + userId).update(vals);
});
If using Cloud Firestore can add an event listener on the document being deleted to handle cleanup in your user document:
exports.onDeletedMessage = functions.firestore.document('Message/{messageId}').onDelete(async event => {
const data = event.data();
if (!data) {
return;
}
const userId = data.userId; //hopefully you have this in the message document
const messageId = data.messageId;
//now you can do clean up for the /user/{userId} document like removing the messageId from myMessages property
const userSnapShot = await admin.firestore().collection('users').doc(userId).get().data();
if(!userSnapShot.myMessages || !userSnapShot.myMessages.length) {
//nothing to do, myMessages array is undefined or empty
return;
}
var index = userSnapShot.myMessages.indexOf(messageId);
if (index === -1) {
//nothing to delete, messageId is not in myMessages
return;
}
//removeAt returns the element removed which we do not need
userSnapShot.myMessages.removeAt(index);
const vals = {
'myMessages': userSnapShot.myMessages;
}
//To update some fields of a document without overwriting the entire document, use the update() method
await admin.firestore().collection('users').doc(userId).update(vals);
});

How to increase your limit of Github API uses per hour in Javascript

I'm trying to work pull requests, issues, and commits with repos and I have the following code:
const axios = require('axios');
var gitPullApiLink = "https://api.github.com/repos/elixir-lang/elixir/pulls";
var listOfCommits = [];
var listOfSHAs = [];
var mapOfInfoObjects = new Map();
var mapPullRequestNumberToCommits = new Map();
var mapPRNumbersToCommitObjects = new Map();
var listOfPrObjects = [];
var setOfFileObjects = new Set();
var listOfNumbersOfTargetedIssues = [];
var mapPRnumberToCloseOpenDateObjects = new Map();
class PullRequestParser {
async getListOfPullRequests(pullrequestLink) {
const message = await axios.get(pullrequestLink);
//console.log(message);
listOfPrObjects = message['data'];
}
async getCommitsForEachPullRequestAndPRinformation() {
var listOfPrNumbers = [];
var k;
// this loop will just make a list of Pull Request Numbers
for (k = 0; k < listOfPrObjects.length; k++){
var currPrNumber = listOfPrObjects[k]['number'];
listOfPrNumbers.push(currPrNumber);
}
// I created a separate list just because... I did it this way because on the github API website it seems
// like the pull request has the same number as the issue it affects. I explain how you can see this down below
listOfNumbersOfTargetedIssues = listOfPrNumbers;
// next loop will make objects that contain information about each pull request.
var n;
for (n = 0; n < listOfPrNumbers; n++){
var ApiLinkForEachPullRequest = gitPullApiLink + "/" + listOfPrNumbers[n];
const mes = await axios.get(ApiLinkForEachPullRequest);
var temp = {OpeningDate: mes['data']['created_at'],
ClosingDate: mes['data']['closed_at'],
IssueLink: mes['data']['_links']['issue']['href']};
//mapPRnumberToCloseOpenDateObjects will be a map where the key is the pull request number and the value
// is the object that stores the open date, close date, and issue link for that pull request. The reason
// why I said I think the pull request number is the same as the number of the issue it affects is because
// if you take any object from the map, say you do mapPRnumberToCloseOpenDateObjects.get(10). You'll
// get an object with a pull request number 10. Now if you take this object and look at it's "IssueLink"
// field, the very last part of the link will have the number 10, and if you look at the github API
// it says for a single issue, you do: /repos/:owner/:repo/issues/:issue_number <---- As you can see,
// the IssueLink field will have this structure and in place of the issue_number, the field will be 10
// for our example object.
mapPRnumberToCloseOpenDateObjects.set(listOfPrNumbers[n], temp);
}
//up to this point, we have the pull request numbers. we will now start getting the commits associated with
//each pull request
var j;
for (j = 0; j < listOfPrNumbers.length; j++){
var currentApiLink = gitPullApiLink + "/" + listOfPrNumbers[j] + "/commits";
const res = await axios.get(currentApiLink);
//here we map a single pull request to the information containing the commits. I'll just warn you in
// advance: there's another object called mapPRNumbersToCommitObjects. THIS MAP IS DIFFERENT! I know it's
// subtle, but I hope the language can make the distinction: mapPullRequestNumberToCommits will just
// map a pull request number to some data about the commits it's linked to. In contrast,
// mapPRNumbersToCommitObjects will be the map that actually maps pull request numbers to objects
// containing information about the commits a pull request is associated with!
mapPullRequestNumberToCommits.set(listOfPrNumbers[j], res['data']);
}
// console.log("hewoihoiewa");
}
async createCommitObjects(){
var x;
// the initial loop using x will loop over all pull requests and get the associated commits
for (x = 0; x < listOfPrObjects.length; x++){
//here we will get the commits
var currCommitObjects = mapPullRequestNumberToCommits.get(listOfPrObjects[x]['number']);
//console.log('dhsiu');
// the loop using y will iterate over all commits that we get from a single pull request
var y;
for (y = 0; y < currCommitObjects.length; y++){
var currentSHA = currCommitObjects[y]['sha'];
listOfSHAs.push(currentSHA);
var currApiLink = "https://api.github.com/repos/elixir-lang/elixir/commits/" + currentSHA;
const response = await axios.get(currApiLink,);
//console.log("up to here");
// here we start extracting some information from a single commit
var currentAuthorName = response['data']['commit']['committer']['name'];
var currentDate = response['data']['commit']['committer']['date'];
var currentFiles = response['data']['files'];
// this loop will iterate over all changed files for a single commit. Remember, every commit has a list
// of changed files, so this loop will iterate over all those files, get the necessary information
// from those files.
var z;
// we create this temporary list of file objects because for every file, we want to make an object
// that will store the necessary information for that one file. after we store all the objects for
// each file, we will add this list of file objects as a field for our bigger commit object (see down below)
var tempListOfFileObjects = [];
for (z = 0; z < currentFiles.length; z++){
var fileInConsideration = currentFiles[z];
var nameOfFile = fileInConsideration['filename'];
var numberOfAdditions = fileInConsideration['additions'];
var numberOfDeletions = fileInConsideration['deletions'];
var totalNumberOfChangesToFile = fileInConsideration['changes'];
//console.log("with file");
var tempFileObject = {fileName: nameOfFile, totalAdditions: numberOfAdditions,
totalDeletions: numberOfDeletions, numberOfChanges: totalNumberOfChangesToFile};
// we add the same file objects to both a temporary, local list and a global set. Don't be tripped
// up by this; they're doing the same thing!
setOfFileObjects.add(tempFileObject);
tempListOfFileObjects.push(tempFileObject);
}
// here we make an object that stores information for a single commit. sha, authorName, date are single
// values, but files will be a list of file objects and these file objects will store further information
// for each file.
var tempObj = {sha: currentSHA, authorName: currentAuthorName, date: currentDate, files: tempListOfFileObjects};
var currPrNumber = listOfPrObjects[x]['number'];
console.log(currPrNumber);
// here we will make a single pull request number to an object that will contain all the information for
// every single commit associated with that pull request. So for every pull request, it will map to a list
// of objects where each object stores information about a commit associated with the pull request.
mapPRNumbersToCommitObjects.set(currPrNumber, tempObj);
}
}
return mapPRNumbersToCommitObjects;
}
async startParsingPullRequests() {
this.getListOfPullRequests(gitPullApiLink + "?state=all").then(() => {
this.getCommitsForEachPullRequestAndPRinformation().then(() => {
this.createCommitObjects().then((response) => {
console.log("functions were successful");
return new mapPRNumbersToCommitObjects;
//return mapPRNumbersToCommitObjects;
}).catch((error) => {
console.log("printing first error");
console.log(error);
})
}).catch((error2) => {
console.log("printing the second error");
console.log(error2);
})
}).catch((error3) => {
console.log("printing the third error");
console.log(error3);
});
}
//adding some getter methods so they can be used to work with whatever information people may need.
//I start all of them with the this.startParsingPullRequests() method because by calling that method
it gets all
// the information for the global variables.
async getSetOfFileObjects(){
var dummyMap = await this.startParsingPullRequests();
return {files: setOfFileObjects, prMap: mapPRnumberToCloseOpenDateObjects};
}
async OpenCloseDateObjects(){
var dummyMap = await this.startParsingPullRequests();
return mapPRnumberToCloseOpenDateObjects;
}
async getNumbersOfTargetedIssues(){
var dummyMap = await this.startParsingPullRequests();
return listOfNumbersOfTargetedIssues;
}
}
var dummy = new PullRequestParser();
var dummyMap = dummy.startParsingPullRequests().then((message) => {
console.log("dummyMap is defined! :)");
console.log(dummyMap);
});
module.exports = PullRequestParser;
Whenever I run the code on the webstorm terminal though, with:
node PullRequestParser.js
I get a 403 error, followed by a bunch of error output, with the following statement:
data: {
message: "API rate limit exceeded for 138.186.17.173. (But here's the good news: Authenticated
requests get a higher rate limit. Check out the documentation for more details.)"
I looked up the documentation for this and found out that without authentication, I can make 60 requests per hour to a repo. In order to get authentication, however, the only example provided is an example they provide by using the command line. I don't think this would be enough though because I want to do some further analysis with the results I get. Does anybody know how I can increase the number of requests I can make? Where in the code would I need to make changes and what kind of changes would I need to make? Thanks!
The first line of the documentation says everything you need to know.
For API requests using Basic Authentication or OAuth, you can make up
to 5000 requests per hour.
Using Basic Authentication is pretty simple, so that may be the easiest thing to get you up and running. OAuth is more complicated, but more desirable in production.
The axios library supports basic auth requests out of the box.
async getListOfPullRequests(pullrequestLink) {
const message = await axios.get(pullrequestLink, {
auth: {
username: 'username',
password: 'password'
}
});
//console.log(message);
listOfPrObjects = message['data'];
}
You just need to supply the correct username and password information.

Any way to ensure I get the return value from a sequence of async functions

I'm working with repos and trying to get the pull requests, issues and commits for a repo. I have the following code:
const axios = require('axios');
var gitPullApiLink = "https://api.github.com/repos/alirezadir/Production-Level-Deep-Learning/pulls"
var listOfCommits = [];
var listOfSHAs = [];
var mapOfInfoObjects = new Map();
var mapPullRequestNumberToCommits = new Map();
var mapPRNumbersToCommitObjects = new Map();
var listOfPrObjects = [];
var setOfFileObjects = new Set();
var listOfNumbersOfTargetedIssues = [];
var mapPRnumberToCloseOpenDateObjects = new Map();
class PullRequestParser {
async getListOfPullRequests(pullrequestLink) {
const message = await axios.get(pullrequestLink);
//console.log(message);
listOfPrObjects = message['data'];
}
async getCommitsForEachPullRequestAndPRinformation() {
var listOfPrNumbers = [];
var k;
// this loop will just make a list of Pull Request Numbers
for (k = 0; k < listOfPrObjects.length; k++){
var currPrNumber = listOfPrObjects[k]['number'];
listOfPrNumbers.push(currPrNumber);
}
// I created a separate list just because... I did it this way because on the github API website it seems
// like the pull request has the same number as the issue it affects. I explain how you can see this down below
listOfNumbersOfTargetedIssues = listOfPrNumbers;
// next loop will make objects that contain information about each pull request.
var n;
for (n = 0; n < listOfPrNumbers; n++){
var ApiLinkForEachPullRequest = gitPullApiLink + "/" + listOfPrNumbers[n];
const mes = await axios.get(ApiLinkForEachPullRequest);
var temp = {OpeningDate: mes['data']['created_at'],
ClosingDate: mes['data']['closed_at'],
IssueLink: mes['data']['_links']['issue']['href']};
//mapPRnumberToCloseOpenDateObjects will be a map where the key is the pull request number and the value
// is the object that stores the open date, close date, and issue link for that pull request. The reason
// why I said I think the pull request number is the same as the number of the issue it affects is because
// if you take any object from the map, say you do mapPRnumberToCloseOpenDateObjects.get(10). You'll
// get an object with a pull request number 10. Now if you take this object and look at it's "IssueLink"
// field, the very last part of the link will have the number 10, and if you look at the github API
// it says for a single issue, you do: /repos/:owner/:repo/issues/:issue_number <---- As you can see,
// the IssueLink field will have this structure and in place of the issue_number, the field will be 10
// for our example object.
mapPRnumberToCloseOpenDateObjects.set(listOfPrNumbers[n], temp);
}
//up to this point, we have the pull request numbers. we will now start getting the commits associated with
//each pull request
var j;
for (j = 0; j < listOfPrNumbers.length; j++){
var currentApiLink = gitPullApiLink + "/" + listOfPrNumbers[j] + "/commits";
const res = await axios.get(currentApiLink);
//here we map a single pull request to the information containing the commits. I'll just warn you in
// advance: there's another object called mapPRNumbersToCommitObjects. THIS MAP IS DIFFERENT! I know it's
// subtle, but I hope the language can make the distinction: mapPullRequestNumberToCommits will just
// map a pull request number to some data about the commits it's linked to. In contrast,
// mapPRNumbersToCommitObjects will be the map that actually maps pull request numbers to objects
// containing information about the commits a pull request is associated with!
mapPullRequestNumberToCommits.set(listOfPrNumbers[j], res['data']);
}
// console.log("hewoihoiewa");
}
async createCommitObjects(){
var x;
// the initial loop using x will loop over all pull requests and get the associated commits
for (x = 0; x < listOfPrObjects.length; x++){
//here we will get the commits
var currCommitObjects = mapPullRequestNumberToCommits.get(listOfPrObjects[x]['number']);
//console.log('dhsiu');
// the loop using y will iterate over all commits that we get from a single pull request
var y;
for (y = 0; y < currCommitObjects.length; y++){
var currentSHA = currCommitObjects[y]['sha'];
listOfSHAs.push(currentSHA);
var currApiLink = "https://api.github.com/repos/alirezadir/Production-Level-Deep-Learning/commits/" + currentSHA;
const response = await axios.get(currApiLink);
//console.log("up to here");
// here we start extracting some information from a single commit
var currentAuthorName = response['data']['commit']['committer']['name'];
var currentDate = response['data']['commit']['committer']['date'];
var currentFiles = response['data']['files'];
// this loop will iterate over all changed files for a single commit. Remember, every commit has a list
// of changed files, so this loop will iterate over all those files, get the necessary information
// from those files.
var z;
// we create this temporary list of file objects because for every file, we want to make an object
// that will store the necessary information for that one file. after we store all the objects for
// each file, we will add this list of file objects as a field for our bigger commit object (see down below)
var tempListOfFileObjects = [];
for (z = 0; z < currentFiles.length; z++){
var fileInConsideration = currentFiles[z];
var nameOfFile = fileInConsideration['filename'];
var numberOfAdditions = fileInConsideration['additions'];
var numberOfDeletions = fileInConsideration['deletions'];
var totalNumberOfChangesToFile = fileInConsideration['changes'];
//console.log("with file");
var tempFileObject = {fileName: nameOfFile, totalAdditions: numberOfAdditions,
totalDeletions: numberOfDeletions, numberOfChanges: totalNumberOfChangesToFile};
// we add the same file objects to both a temporary, local list and a global set. Don't be tripped
// up by this; they're doing the same thing!
setOfFileObjects.add(tempFileObject);
tempListOfFileObjects.push(tempFileObject);
}
// here we make an object that stores information for a single commit. sha, authorName, date are single
// values, but files will be a list of file objects and these file objects will store further information
// for each file.
var tempObj = {sha: currentSHA, authorName: currentAuthorName, date: currentDate, files: tempListOfFileObjects};
var currPrNumber = listOfPrObjects[x]['number'];
console.log(currPrNumber);
// here we will make a single pull request number to an object that will contain all the information for
// every single commit associated with that pull request. So for every pull request, it will map to a list
// of objects where each object stores information about a commit associated with the pull request.
mapPRNumbersToCommitObjects.set(currPrNumber, tempObj);
}
}
return mapPRNumbersToCommitObjects;
}
startParsingPullRequests() {
this.getListOfPullRequests(gitPullApiLink + "?state=all").then(() => {
this.getCommitsForEachPullRequestAndPRinformation().then(() => {
this.createCommitObjects().then((response) => {
console.log("functions were successful");
return mapPRNumbersToCommitObjects;
}).catch((error) => {
console.log("printing first error");
console.log(error);
})
}).catch((error2) => {
console.log("printing the second error");
console.log(error2);
})
}).catch((error3) => {
console.log("printing the third error");
console.log(error3);
});
}
//adding some getter methods so they can be used to work with whatever information people may need.
//I start all of them with the this.startParsingPullRequests() method because by calling that method it gets all
// the information for the global variables.
async getSetOfFileObjects(){
var dummyMap = this.startParsingPullRequests();
return setOfFileObjects;
}
async OpenCloseDateObjects(){
var dummyMap = this.startParsingPullRequests();
return mapPRnumberToCloseOpenDateObjects;
}
async getNumbersOfTargetedIssues(){
var dummyMap = this.startParsingPullRequests();
return listOfNumbersOfTargetedIssues;
}
}
I'm trying to make a "dummy map" that gets the return value of calling startParsingPullRequests() in the following way:
var dummy = new PullRequestParser();
var dummyMap = dummy.startParsingPullRequests();
console.log(dummyMap);
But I end up with the following output:
undefined
3
1
functions were successful
I understand dummyMap is undefined because startParsingPullRequests() makes a series of async calls, but I'm wondering how do I go about making sure dummyMap gets its value and then prints. Thanks!
Since you already have code using async/await, why are you changing to .then? When you are using .then/.catch, it can be a lot harder to tell where things are waiting and what they are waiting on.
Here is your function converted to use async/await. With this change, you can await this function (though in that case, you might want to not catch the error and let the caller handle it, otherwise you need to check the return for undefined to "detect" the error). I've also simplified the error handling (but again, unless the code here can "fix" the problem, no reason to handle it here)
async startParsingPullRequests() {
try {
await this.getListOfPullRequests(gitPullApiLink + "?state=all");
await this.getCommitsForEachPullRequestAndPRinformation();
const response = await this.createCommitObjects();
console.log("functions were successful");
return mapPRNumbersToCommitObjects;
} catch (error) {
console.log("printing error");
console.log(error);
}
}

Create index on already existing objectStore

As an example on basic setup one index is created.
db.onupgradeneeded = function(event) {
var db = event.target.result;
var store = db.createObjectStore('name', { keyPath: 'id' });
store.createIndex('by name', 'name', { unique: false });
};
Question:
Is it possible to create/append more indexes to the same objectStore on the future versionupdate? Since if I try:
db.onupgradeneeded = function(event) {
var db = event.target.result;
var store = db.createObjectStore('name', { keyPath: 'id' });
store.createIndex('by newName', 'newName', { unique: false });
};
It throws an error that current objectStore does already exist. An if I try to create store reference using transaction:
db.onupgradeneeded = function(event) {
var db = event.target.result;
var store = db.transaction('name', 'readwrite').objectStore('name');
store.createIndex('by newName', 'newName', { unique: false });
};
It throws that version change transaction is currently running
Yes it is possible. It can be a bit confusing at first. You want to get the existing object store via the implicit transaction created for you within onupgradeneeded. This is a transaction of type versionchange which is basically like a readwrite transaction but specific to the onupgradeneeded handler function.
Something like this:
var request = indexedDB.open(name, oldVersionPlusOne);
request.onupgradeneeded = myOnUpgradeNeeded;
function myOnUpgradeNeeded(event) {
// Get a reference to the request related to this event
// #type IDBOpenRequest (a specialized type of IDBRequest)
var request = event.target;
// Get a reference to the IDBDatabase object for this request
// #type IDBDatabase
var db = request.result;
// Get a reference to the implicit transaction for this request
// #type IDBTransaction
var txn = request.transaction;
// Now, get a reference to the existing object store
// #type IDBObjectStore
var store = txn.objectStore('myStore');
// Now, optionally inspect index names, or create a new index
console.log('existing index names in store', store.indexNames);
// Add a new index to the existing object store
store.createIndex(...);
}
You also will want to take care to increment the version so as to guarantee the onupgradeneeded handler function is called, and to represent that your schema (basically the set of tables and indices and properties of things) has changed in the new version.
You will also need to rewrite the function so that you only create or make changes based on the version. You can use event.oldVersion to help with this, or things like db.objectStoreNames.contains.
Something like this:
function myOnUpgradeNeeded(event) {
var is_new_db = isNaN(event.oldVersion) || event.oldVersion === 0;
if(is_new_db) {
var db = event.target.result;
var store = db.createObjectStore(...);
store.createIndex('my-initial-index');
// Now that you decided you want a second index, you also need
// to do this for brand new databases
store.createIndex('my-second-new-index');
}
// But if the database already exists, we are not creating things,
// instead we are modifying the existing things to get into the
// new state of things we want
var is_old_db_not_yet_current_version = !isNaN(event.oldVersion) && event.oldVersion < 2;
if(is_old_db_not_yet_current_version) {
var txn = event.target.transaction;
var store = txn.objectStore('store');
store.createIndex('my-second-new-index');
}
}
Pay close attention to the fact that I used event.target.transaction instead of db.transaction(...). These are not at all the same thing. One references an existing transaction, and one creates a new one.
Finally, and in addition, a personal rule of mine and not a formal coding requirement, you should never be using db.transaction() from within onupgradeneeded. Stick to modifying the schema when doing upgrades, and do all data changes outside of it.

Categories

Resources