API request not updating variable in the first request. Second request is able to output the correct information - javascript

I am building an application to collate all the indicators that I use while investing in the stock market, as a personal project. I am currently making use of the yahoo-finance API. Given below is my code for one of the data loading functions, that will later be used with Tulind package.
var yahooFinance = require("yahoo-finance");
var returnValue = [];
const loadData = (symbol, from, to, freq) => {
let open = [];
let close = [];
let high = [];
let low = [];
let volume = [];
let updateValues = () => {
returnValue.map((e) => {
open.push(e.open);
close.push(e.adjClose);
high.push(e.high);
low.push(e.low);
volume.push(e.volume);
});
};
const data = yahooFinance.historical(
{
symbol: symbol,
from: from,
to: to,
freq: freq,
},
function (error, quotes) {
if (error)
console.log("Error in server/indicatorCalc/parent_calc.js", err);
else {
returnValue = quotes;
}
console.log("Data loaded in parent_calc.js");
}
);
updateValues();
return {
open,
close,
high,
low,
volume,
};
};
// console.log(tulind.indicators);
module.exports = { loadData };
Given above is the function, which has an API endpoint at localhost:5000/indicatorParent/
For testing purposes, the API is being called using the thunderclient extension on VSCode.
The first request sent with the required parameters is outputting an empty value (or the previous value if this is not the first request). When I click the send button for the second time, it gets updated with the correct/expected values. I want to know how to rectify this apparent "lag" in the updating of the returnValue variable. I am open to suggestions in changes in the flow of code as well.

Related

Override Mongoose save method to retry on `duplicate key error`

My Mongoose schema uses a custom _id value and the code I inherited does something like this
const sampleSchema = new mongoose.Schema({
_id: String,
key: String,
});
sampleSchema.statics.generateId = async function() {
let id;
do {
id = randomStringGenerator.generate({length: 8, charset: 'hex', capitalization: 'uppercase'});
} while (await this.exists({_id: id}));
return id;
};
let SampleModel = mongoose.model('Sample', sampleSchema);
A simple usage looks like this:
let mySample = new SampleModel({_id: await SampleModel.generateId(), key: 'a' });
await mySample.save();
There are at least three problems with this:
Every save will require at least two trips to the database, one to test for a unique id and one to save the document.
For this to work, it is necessary to manually call generateId() before each save. An ideal solution would handle that for me, like Mongoose does with ids of type ObjectId.
Most significantly, there is a potential race condition that will result in duplicate key error. Consider two clients running this code. Both coincidentally generate the same id at the same time, both look in the database and find the id absent, both try to write the record to the database. The second will fail.
An ideal solution would, on save, generate an id, save it to the database and on duplicate key error, generate a new id and retry. Do this in a loop until the document is stored successfully.
The trouble is, I don't know how to get Mongoose to let me do this.
Here's what I tried: Based on this SO Question, I found a rather old sample (using a very old mongoose version) of overriding the save function to accomplish something similar and based this attempt off it.
// First, change generateId() to force a collision
let ids = ['a', 'a', 'a', 'b'];
let index = 0;
let generateId = function() {
return ids[index++];
};
// Configure middleware to generate the id before a save
sampleSchema.pre('validate', function(next) {
if (this.isNew)
this._id = generateId();
next();
});
// Now override the save function
SampleModel.prototype.save_original = SampleModel.prototype.save;
SampleModel.prototype.save = function(options, callback) {
let self = this;
let retryOnDuplicate = function(err, savedDoc) {
if (err) {
if (err.code === 11000 && err.name === 'MongoError') {
self.save(options, retryOnDuplicate);
return;
}
}
if (callback) {
callback(err, savedDoc);
}
};
return self.save_original(options, retryOnDuplicate);
}
This gets me close but I'm leaking a promise and I'm not sure where.
let sampleA = new SampleModel({key: 'a'});
let sampleADoc = await sampleA.save();
console.log('sampleADoc', sampleADoc); // prints undefined, but should print the document
let sampleB = new SampleModel({key: 'b'});
let sampleBDoc = await sampleB.save();
console.log('sampleBDoc', sampleBDoc); // prints undefined, but should print the document
let all = await SampleModel.find();
console.log('all', all); // prints `[]`, but should be an array of two documents
Output
sampleADoc undefined
sampleBDoc undefined
all []
The documents eventually get written to the database, but not before the console.log calls are made.
Where am I leaking a promise? Is there an easier way to do this that addresses the three problems I outlined?
Edit 1:
Mongoose version: 5.11.15
I fixed the problem by changing the save override. The full solution looks like this:
const sampleSchema = new mongoose.Schema({
_id: String,
color: String,
});
let generateId = function() {
return randomStringGenerator.generate({length: 8, charset: 'hex', capitalization: 'uppercase'});
};
sampleSchema.pre('validate', function() {
if (this.isNew)
this._id = generateId();
});
let SampleModel = mongoose.model('Sample', sampleSchema);
SampleModel.prototype.save_original = SampleModel.prototype.save;
SampleModel.prototype.save = function(options, callback) {
let self = this;
let isDupKeyError = (error, field) => {
// Determine whether the error is a duplicate key error on the given field
return error?.code === 11000 && error?.name === 'MongoError' && error?.keyValue[field];
}
let saveWithRetries = (options, callback) => {
// save() returns undefined if used with callback or a Promise otherwise.
// https://mongoosejs.com/docs/api/document.html#document_Document-save
let promise = self.save_original(options, callback);
if (promise) {
return promise.catch((error) => {
if (isDupKeyError(error, '_id')) {
return saveWithRetries(options, callback);
}
throw error;
});
}
};
let retryCallback;
if (callback) {
retryCallback = (error, saved, rows) => {
if (isDupKeyError(error, '_id')) {
saveWithRetries(options, retryCallback);
} else {
callback(error, saved, rows);
}
}
}
return saveWithRetries(options, retryCallback);
}
This will generate an _id repeatedly until a successful save is called and addresses the three problems outlined in the original question:
The minimum trips to the database has been reduced from two to one. Of course, if there are collisions, more trips will occur but that's the exceptional case.
This implementation takes care of generating the id itself with no manual step to take before saving. This reduces complexity and removes the required knowledge of prerequisites for saving that are present in the original method.
The race condition has been addressed. It won't matter if two clients attempt to use the same key. One will succeed and the other will generate a new key and save again.
To improve this:
There ought to be a maximum number of save attempts for a single document followed by failure. In this case, you've perhaps used up all the available keys in whatever domain you're using.
The unique field may not be named _id or you might have multiple fields that require a unique generated value. The embedded helper function isDupKeyError() could be updated to look for multiple keys. Then on error you could add logic to regenerate just the failed key.

Firestore listener removes a message from pagination when adding a new message in React Native

I am trying to do Firestore reactive pagination. I know there are posts, comments, and articles saying that it's not possible but anyways...
When I add a new message, it kicks off or "removes" the previous message
Here's the main code. I'm paginating 4 messages at a time
async getPaginatedRTLData(queryParams: TQueryParams, onChange: Function){
let collectionReference = collection(firestore, queryParams.pathToDataInCollection);
let collectionReferenceQuery = this.modifyQueryByOperations(collectionReference, queryParams);
//Turn query into snapshot to track changes
const unsubscribe = onSnapshot(collectionReferenceQuery, (snapshot: QuerySnapshot) => {
snapshot.docChanges().forEach((change: DocumentChange<DocumentData>) => {
//Now save data to format later
let formattedData = this.storeData(change, queryParams)
onChange(formattedData);
})
})
this.unsubscriptions.push(unsubscribe)
}
For completeness this is how Im building my query
let queryParams: TQueryParams = {
limitResultCount: 4,
uniqueKey: '_id',
pathToDataInCollection: messagePath,
orderBy: {
docField: orderByKey,
direction: orderBy
}
}
modifyQueryByOperations(
collectionReference: CollectionReference<DocumentData> = this.collectionReference,
queryParams: TQueryParams) {
//Extract query params
let { orderBy, where: where_param, limitResultCount = PAGINATE} = queryParams;
let queryCall: Query<DocumentData> = collectionReference;
if(where_param) {
let {searchByField, whereFilterOp, valueToMatch} = where_param;
//collectionReferenceQuery = collectionReference.where(searchByField, whereFilterOp, valueToMatch)
queryCall = query(queryCall, where(searchByField, whereFilterOp, valueToMatch) )
}
if(orderBy) {
let { docField, direction} = orderBy;
//collectionReferenceQuery = collectionReference.orderBy(docField, direction)
queryCall = query(queryCall, fs_orderBy(docField, direction) )
}
if(limitResultCount) {
//collectionReferenceQuery = collectionReference.limit(limitResultCount)
queryCall = query(queryCall, limit(limitResultCount) );
}
if(this.lastDocInSortedOrder) {
//collectionReferenceQuery = collectionReference.startAt(this.lastDocInSortedOrder)
queryCall = query(queryCall, startAt(this.lastDocInSortedOrder) )
}
return queryCall
}
See the last line removed is removed when I add a new message to the collection. Whats worse is it's not consistent. I debugged this and Firestore is removing the message.
I almost feel like this is a bug in Firestore's handling of listeners
As mentioned in the comments and confirmed by you the problem you are facing is occuring due to the fact that some values of the fields that your are searching in your query changed while the listener was still active and this makes the listener think of this document as a removed one.
This is proven by the fact that the records are not being deleted from Firestore itself, but are just being excluded from the listener.
This can be fixed by creating a better querying structure, separating the old data from new data incoming from the listener, which you mentioned you've already done in the comments as well.

socket.io send data to matching socket's

when a user connects to my socket
I add to a session map:
io.on('connection', function (socket) {
sessionMap.set(socket.id,socket);
}
my session Map
var SessionMap = {};
module.exports = {
set: function(key,value){
SessionMap[key] = value;
},
get: function(key){
return SessionMap[key]
},
delete: function(key){
delete SessionMap[key]
},
all: function(){
return SessionMap
}
}
And also save my user socket id in a class player:
socket.on('addPlayer-Queue', (result) => {
sessionMap.set(socket.id,socket);
queue.addPlayer(new Player({
id: result.id,
name: result.name,
mmr: result.mmr
}, socket.id));
And I have a function that selects two players that are connected (where I save in an array) and create a "battle" and then I wanted to send to the socket that was selected / matched for this battle
the battle dice
This is my role that selects both players and creates a battle:
searching() {
const firstPlayer = this.getRandomPlayer();
const secondPlayer = this.players.find(
playerTwo =>
playerTwo.mmr < this.calculateLessThanPercentage(firstPlayer) &&
playerTwo.mmr > this.calculateGreaterThanPercentage(firstPlayer) &&
playerTwo.id != firstPlayer.id
);
if (!secondPlayer) {
return null;
}
const matchedPlayers = [firstPlayer, secondPlayer];
this.removePlayers(matchedPlayers);
return new Match(matchedPlayers);
}
}
And also when connecting I use a set interval to be performing this function every 1 second
But my difficulty is how I would send the data from this battle to the corresponding socket's
my relation socket with player
When a player enters my event I create a player by going through socket id
And I also make a session map of this socket
sessionMap.set(socket.id,socket);
my class player:
class Player {
constructor(player,socketId) {
this.id = player.id
this.socketId = socketId
this.name = player.name
this.mmr = player.mmr
}
}
module.exports = Player;
const getMatchConfigurationFor = player => {
/* configure and return the payload notifying the player of the match */
}
const configurePlayersForNewMatch = () => matchedPlayers.forEach(player =>
sessionMap.get(player.socketid)
.broadcast.to(player.socketid)
.emit(messageTags.MATCH_CONFIGURATION,
getMatchConfigurationFor(player)))
regarding where to do this work .. the single responsibility principle says that a function should have a singular clear purpose. So the search method should search for matching players, not configure the match. You should do this work in another function that is called while configuring the match, which itself is called after the search returns successfully. I've provided the wrapper function for that here: it is written in a fashion to expect the relevant pieces are in scope. You could rewrite it as a proper function with parameters if you prefer.
This is a work in progress solution for Felipe, posted by request.
After a match is found, you'd probably want to emit a MatchFound object to both clients detailing information about the match (including information about their opponent). Once a client gets this, you can initiate anything the client needs for a match (load a level, display names, or a lobby).

How to increase your limit of Github API uses per hour in Javascript

I'm trying to work pull requests, issues, and commits with repos and I have the following code:
const axios = require('axios');
var gitPullApiLink = "https://api.github.com/repos/elixir-lang/elixir/pulls";
var listOfCommits = [];
var listOfSHAs = [];
var mapOfInfoObjects = new Map();
var mapPullRequestNumberToCommits = new Map();
var mapPRNumbersToCommitObjects = new Map();
var listOfPrObjects = [];
var setOfFileObjects = new Set();
var listOfNumbersOfTargetedIssues = [];
var mapPRnumberToCloseOpenDateObjects = new Map();
class PullRequestParser {
async getListOfPullRequests(pullrequestLink) {
const message = await axios.get(pullrequestLink);
//console.log(message);
listOfPrObjects = message['data'];
}
async getCommitsForEachPullRequestAndPRinformation() {
var listOfPrNumbers = [];
var k;
// this loop will just make a list of Pull Request Numbers
for (k = 0; k < listOfPrObjects.length; k++){
var currPrNumber = listOfPrObjects[k]['number'];
listOfPrNumbers.push(currPrNumber);
}
// I created a separate list just because... I did it this way because on the github API website it seems
// like the pull request has the same number as the issue it affects. I explain how you can see this down below
listOfNumbersOfTargetedIssues = listOfPrNumbers;
// next loop will make objects that contain information about each pull request.
var n;
for (n = 0; n < listOfPrNumbers; n++){
var ApiLinkForEachPullRequest = gitPullApiLink + "/" + listOfPrNumbers[n];
const mes = await axios.get(ApiLinkForEachPullRequest);
var temp = {OpeningDate: mes['data']['created_at'],
ClosingDate: mes['data']['closed_at'],
IssueLink: mes['data']['_links']['issue']['href']};
//mapPRnumberToCloseOpenDateObjects will be a map where the key is the pull request number and the value
// is the object that stores the open date, close date, and issue link for that pull request. The reason
// why I said I think the pull request number is the same as the number of the issue it affects is because
// if you take any object from the map, say you do mapPRnumberToCloseOpenDateObjects.get(10). You'll
// get an object with a pull request number 10. Now if you take this object and look at it's "IssueLink"
// field, the very last part of the link will have the number 10, and if you look at the github API
// it says for a single issue, you do: /repos/:owner/:repo/issues/:issue_number <---- As you can see,
// the IssueLink field will have this structure and in place of the issue_number, the field will be 10
// for our example object.
mapPRnumberToCloseOpenDateObjects.set(listOfPrNumbers[n], temp);
}
//up to this point, we have the pull request numbers. we will now start getting the commits associated with
//each pull request
var j;
for (j = 0; j < listOfPrNumbers.length; j++){
var currentApiLink = gitPullApiLink + "/" + listOfPrNumbers[j] + "/commits";
const res = await axios.get(currentApiLink);
//here we map a single pull request to the information containing the commits. I'll just warn you in
// advance: there's another object called mapPRNumbersToCommitObjects. THIS MAP IS DIFFERENT! I know it's
// subtle, but I hope the language can make the distinction: mapPullRequestNumberToCommits will just
// map a pull request number to some data about the commits it's linked to. In contrast,
// mapPRNumbersToCommitObjects will be the map that actually maps pull request numbers to objects
// containing information about the commits a pull request is associated with!
mapPullRequestNumberToCommits.set(listOfPrNumbers[j], res['data']);
}
// console.log("hewoihoiewa");
}
async createCommitObjects(){
var x;
// the initial loop using x will loop over all pull requests and get the associated commits
for (x = 0; x < listOfPrObjects.length; x++){
//here we will get the commits
var currCommitObjects = mapPullRequestNumberToCommits.get(listOfPrObjects[x]['number']);
//console.log('dhsiu');
// the loop using y will iterate over all commits that we get from a single pull request
var y;
for (y = 0; y < currCommitObjects.length; y++){
var currentSHA = currCommitObjects[y]['sha'];
listOfSHAs.push(currentSHA);
var currApiLink = "https://api.github.com/repos/elixir-lang/elixir/commits/" + currentSHA;
const response = await axios.get(currApiLink,);
//console.log("up to here");
// here we start extracting some information from a single commit
var currentAuthorName = response['data']['commit']['committer']['name'];
var currentDate = response['data']['commit']['committer']['date'];
var currentFiles = response['data']['files'];
// this loop will iterate over all changed files for a single commit. Remember, every commit has a list
// of changed files, so this loop will iterate over all those files, get the necessary information
// from those files.
var z;
// we create this temporary list of file objects because for every file, we want to make an object
// that will store the necessary information for that one file. after we store all the objects for
// each file, we will add this list of file objects as a field for our bigger commit object (see down below)
var tempListOfFileObjects = [];
for (z = 0; z < currentFiles.length; z++){
var fileInConsideration = currentFiles[z];
var nameOfFile = fileInConsideration['filename'];
var numberOfAdditions = fileInConsideration['additions'];
var numberOfDeletions = fileInConsideration['deletions'];
var totalNumberOfChangesToFile = fileInConsideration['changes'];
//console.log("with file");
var tempFileObject = {fileName: nameOfFile, totalAdditions: numberOfAdditions,
totalDeletions: numberOfDeletions, numberOfChanges: totalNumberOfChangesToFile};
// we add the same file objects to both a temporary, local list and a global set. Don't be tripped
// up by this; they're doing the same thing!
setOfFileObjects.add(tempFileObject);
tempListOfFileObjects.push(tempFileObject);
}
// here we make an object that stores information for a single commit. sha, authorName, date are single
// values, but files will be a list of file objects and these file objects will store further information
// for each file.
var tempObj = {sha: currentSHA, authorName: currentAuthorName, date: currentDate, files: tempListOfFileObjects};
var currPrNumber = listOfPrObjects[x]['number'];
console.log(currPrNumber);
// here we will make a single pull request number to an object that will contain all the information for
// every single commit associated with that pull request. So for every pull request, it will map to a list
// of objects where each object stores information about a commit associated with the pull request.
mapPRNumbersToCommitObjects.set(currPrNumber, tempObj);
}
}
return mapPRNumbersToCommitObjects;
}
async startParsingPullRequests() {
this.getListOfPullRequests(gitPullApiLink + "?state=all").then(() => {
this.getCommitsForEachPullRequestAndPRinformation().then(() => {
this.createCommitObjects().then((response) => {
console.log("functions were successful");
return new mapPRNumbersToCommitObjects;
//return mapPRNumbersToCommitObjects;
}).catch((error) => {
console.log("printing first error");
console.log(error);
})
}).catch((error2) => {
console.log("printing the second error");
console.log(error2);
})
}).catch((error3) => {
console.log("printing the third error");
console.log(error3);
});
}
//adding some getter methods so they can be used to work with whatever information people may need.
//I start all of them with the this.startParsingPullRequests() method because by calling that method
it gets all
// the information for the global variables.
async getSetOfFileObjects(){
var dummyMap = await this.startParsingPullRequests();
return {files: setOfFileObjects, prMap: mapPRnumberToCloseOpenDateObjects};
}
async OpenCloseDateObjects(){
var dummyMap = await this.startParsingPullRequests();
return mapPRnumberToCloseOpenDateObjects;
}
async getNumbersOfTargetedIssues(){
var dummyMap = await this.startParsingPullRequests();
return listOfNumbersOfTargetedIssues;
}
}
var dummy = new PullRequestParser();
var dummyMap = dummy.startParsingPullRequests().then((message) => {
console.log("dummyMap is defined! :)");
console.log(dummyMap);
});
module.exports = PullRequestParser;
Whenever I run the code on the webstorm terminal though, with:
node PullRequestParser.js
I get a 403 error, followed by a bunch of error output, with the following statement:
data: {
message: "API rate limit exceeded for 138.186.17.173. (But here's the good news: Authenticated
requests get a higher rate limit. Check out the documentation for more details.)"
I looked up the documentation for this and found out that without authentication, I can make 60 requests per hour to a repo. In order to get authentication, however, the only example provided is an example they provide by using the command line. I don't think this would be enough though because I want to do some further analysis with the results I get. Does anybody know how I can increase the number of requests I can make? Where in the code would I need to make changes and what kind of changes would I need to make? Thanks!
The first line of the documentation says everything you need to know.
For API requests using Basic Authentication or OAuth, you can make up
to 5000 requests per hour.
Using Basic Authentication is pretty simple, so that may be the easiest thing to get you up and running. OAuth is more complicated, but more desirable in production.
The axios library supports basic auth requests out of the box.
async getListOfPullRequests(pullrequestLink) {
const message = await axios.get(pullrequestLink, {
auth: {
username: 'username',
password: 'password'
}
});
//console.log(message);
listOfPrObjects = message['data'];
}
You just need to supply the correct username and password information.

Is there a way to trigger validation manually in monaco editor?

I'm using the default TypeScript service and the models are initialized asynchronously with one model depending on the other. There's a case where the two models cannot detect each other so it shows a semantic error. If I make some edits in the dependent model, which causes the model to be re-validated, the errors disappear.
I have tried to setModel manually, which solves the problems. However, it destroys the undo history.
Is there a way to re-validate the model manually?
That's my solution, which is extracted from monaco-typescript:
async function revalidateModel(model) {
if (!model || model.isDisposed()) return;
const getWorker = await monaco.languages.typescript.getTypeScriptWorker();
const worker = await getWorker(model.uri);
const diagnostics = (await Promise.all([
worker.getSyntacticDiagnostics(model.uri.toString()),
worker.getSemanticDiagnostics(model.uri.toString())
])).reduce((a, it) => a.concat(it));
const markers = diagnostics.map(d => {
const start = model.getPositionAt(d.start);
const end = model.getPositionAt(d.start + d.length);
return {
severity: monaco.MarkerSeverity.Error,
startLineNumber: start.lineNumber,
startColumn: start.column,
endLineNumber: end.lineNumber,
endColumn: end.column,
message: flattenDiagnosticMessageText(d.messageText, "\n")
};
});
const owner = model.getLanguageIdentifier().language;
monaco.editor.setModelMarkers(model, owner, markers);
}
Call the function above when model is created asynchronizedly.
This is what I did to fix it:
setInterval(() => {
const range = new monaco.Range(1,1,1,1);
const addEmptySpace = {forceMoveMarkers: true, range, text: ' '};
for (const m of monaco.editor.getModels()) {
const toInvert = m.applyEdits([addEmptySpace]);
m.applyEdits(toInvert);
}
}, 50*1000)
Every fifty seconds you insert and immediately remove a space. I don't like it, but it works.

Categories

Resources