I have a static local webpage that is supposed to be updated with data from a csv file (so as data is written to the file, the page should update and display the new data). My issue is that with many data pointys, it becomes very slow and after a certain number of points it will not update at all.
Currently, the way that the page updates is that it reads the file every 3 seconds and updates accordingly. I imagine a much more efficient way would be to see what are the most recent additions to the file and then just append those new points to the current data set. I'm just not sure how to do that.
Code below parses the csv file and separates the data to arrays to be used in the charts:
function parseCSVData(csvFile) {
time = [];
altitude = [];
outsideTemp = [];
insideTemp = [];
voltage = [];
state = [];
velocity = [];
degrees = [];
// cut CSV dataFiles into lines
var lines = csvFile.split("\n");
$.each(lines, function (lineNumber, line) {
if (lineNumber != 0) { // skip header line
var fields = line.split(",");
var missionTime = parseInt(fields[1]);
var altitude2 = parseInt(fields[2]);
var outsideTemp2 = parseInt(fields[3]);
var insideTemp2 = parseInt(fields[4]);
var voltage2 = parseInt(fields[5]);
var state2 = parseInt(fields[6]);
var velocity2 = parseInt(fields[7]);
var degrees2 = parseInt(fields[8]);
time.push(missionTime);
altitude.push(altitude2);
outsideTemp.push(outsideTemp2);
insideTemp.push(insideTemp2);
voltage.push(voltage2);
state.push(state2);
velocity.push(velocity2);
degrees.push(degrees2);
}
});
}
This is the code to update the charts every 3 seconds:
setInterval(function blah() {
var file = fileName+'?q='+Math.random();
fillCharts(file);//which calls the parseCSVData function and fills the charts
}, 3000);
EDIT: pastebin with entire code: http://pastebin.com/Qmzn8azY
EDIT2: sample csv data:
TEAM_ID,MISSION_TIME,ALT_SENSOR,OUTSIDE_TEMP,INSIDE_TEMP,VOLTAGE,FSW_STATE,VELOCITY,DEGREES
ubArtemis,0,36,20,20,9,1,0,0
ubArtemis,1,45,18,20,9,1,6,2
ubArtemis,2,200,16,20,9,1,10,5
ubArtemis,3,65,14,19,9,1,15,3
ubArtemis,4,79,12,17,8,2,22,4
ubArtemis,5,100,10,16,8,3,30,2
ubArtemis,6,120,8,15,8,4,39,0
Related
I get data from my Firebase database every ten seconds. I want to plot a graph with highcharts.js, but when I access the web page, I get all the data prior to the moment the page is loaded. At this point, my web page freezes and crashes after a few minutes while the data is being recovered. (I also have a second piece of code that retrieves the new data each time it is added to the database but the web page crashes before).
I think it's my plotvalue function that is crashing the page. When I suppress plotValue(...) I can read my data in a few seconds even if there's 15k+. Indeed, it may be because i'm redrawing the chart each time I receive a data instead of waiting to retrieve it all.
I would like to know how to draw the chart once all of my data is retrieved from my database.
I thank you in advance for your answers.
Here is the code:
setInterval(test, 10000);
function test() {
if (it === 0) {
console.log("it=", it);
dbRef.orderByKey().once('value', snapshot => { //"value" to have all existing data
var jsonData = snapshot.toJSON();
//console.log(jsonData); // Test purpose
var coco = Object.values(jsonData); //Extract the numerical values of the object
// Save values on variables
//console.log(Défaut_communication_automate); //Test purpose
coco.forEach(elt => { //Choose in coco for each element
var Puissance_batterie = elt.Puissance_batterie; //Choose all the values
of the element named "Puissance_batterie"
var Puissance_AC_Onduleurs = elt.Puissance_AC_Onduleurs;
var Tension_réseau = elt.Tension_réseau;
var timestamp = elt.timestamp;
//console.log(Puissance_batterie);
plotValues(chartALM, timestamp, Puissance_batterie, 0);
plotValues(chartALM, timestamp, Puissance_AC_Onduleurs, 1);
plotValues(chartALM, timestamp, Tension_réseau, 2);
})
++it;
console.log("it=", it);
})
}
//function to plot values on charts
function plotValues(chart, timestamp, value, series) {
var x = epochToJsDate(timestamp).getTime();
var y = Number(value);
chart.series[series].addPoint([x, y], false, false);
chart.redraw();
}
// Create the charts when the web page loads
window.addEventListener('load', onload);
function onload(event) {
chartALM = createALMChart();
barALM = createALMBar();
}
// Create Chart
function createALMChart() {
var chart = new Highcharts.Chart({
chart: {
renderTo: 'chart-ALM',
type: 'spline'
}
//The following is only about the visual aspect
}
I'm trying to make an interface that lets a user upload CSV files and plots these using plotly, only using javascript and obviously the plotly library. I'm close, but my suspicion is that there's an issue with the asynchronous reading of the csv files.
As you can probably see, I'm relatively new to javascript, so any feedback is welcome. I cannot however use any other libraries or packages plotly.
The problem is that the resulting figure only shows the initialized values (1).
EDIT: The heatmap function works on test data, or if I modify specific elements of the data_y object, just not when I update the information from the file.
There's a button that allows uploading of the csv files. On event this code triggers:
<script>
let picker = document.getElementById('picker');
picker.addEventListener('change', event => {0
file_list = event.target.files;
var fig_y = [];
for (let i = 0 ; i< file_list.length ; i++){
if(file_list[i].name == (".DS_Store")){continue}
else {
var ready = read_data(file_list[i]);
fig_y.push(ready);
}
}
console.log(fig_y);
plot_heatmap(fig_y);
}
);
</script>
The data is read using this code.
<script>
function read_data(input){
var xs = 1212; // length of the data
file_contents = [];
var data_y = Array(xs).fill(1);
let file = input;
let reader = new FileReader();
reader.readAsText(file);
reader.onload = function(){
file_contents = reader.result.split('\n');
// open the data file. First two lines contain a description of the data.
for (let j = 2 ; j<file_contents.length-1 ; j++) {
// the relevant data is the third number in the column
var nr = file_contents[j].split(",").map(Number)[2];
data_y[j-2] = nr;
}
}
return data_y;
}
</script>
the code that makes the plotly heatmap.
<script>
function plot_heatmap(data_z){
var data = [
{
z: data_z,
type: 'heatmap'
}
];
Plotly.newPlot('raw_data', data);
};
</script>
OK, so I figured out the answer. It comes from the asynchronous reading of the text files. Putting the plot_heatmap function in the following Timeout function solved the issue (well, maybe it's more a workaround).
setTimeout(() => { plot_heatmap(fig_y); }, 100);
Actually, by changing the length of the timeout, I could catch JS in its act and see half the heatmap filled in with the real values and the other half still with the initialized value!
I'm trying to work pull requests, issues, and commits with repos and I have the following code:
const axios = require('axios');
var gitPullApiLink = "https://api.github.com/repos/elixir-lang/elixir/pulls";
var listOfCommits = [];
var listOfSHAs = [];
var mapOfInfoObjects = new Map();
var mapPullRequestNumberToCommits = new Map();
var mapPRNumbersToCommitObjects = new Map();
var listOfPrObjects = [];
var setOfFileObjects = new Set();
var listOfNumbersOfTargetedIssues = [];
var mapPRnumberToCloseOpenDateObjects = new Map();
class PullRequestParser {
async getListOfPullRequests(pullrequestLink) {
const message = await axios.get(pullrequestLink);
//console.log(message);
listOfPrObjects = message['data'];
}
async getCommitsForEachPullRequestAndPRinformation() {
var listOfPrNumbers = [];
var k;
// this loop will just make a list of Pull Request Numbers
for (k = 0; k < listOfPrObjects.length; k++){
var currPrNumber = listOfPrObjects[k]['number'];
listOfPrNumbers.push(currPrNumber);
}
// I created a separate list just because... I did it this way because on the github API website it seems
// like the pull request has the same number as the issue it affects. I explain how you can see this down below
listOfNumbersOfTargetedIssues = listOfPrNumbers;
// next loop will make objects that contain information about each pull request.
var n;
for (n = 0; n < listOfPrNumbers; n++){
var ApiLinkForEachPullRequest = gitPullApiLink + "/" + listOfPrNumbers[n];
const mes = await axios.get(ApiLinkForEachPullRequest);
var temp = {OpeningDate: mes['data']['created_at'],
ClosingDate: mes['data']['closed_at'],
IssueLink: mes['data']['_links']['issue']['href']};
//mapPRnumberToCloseOpenDateObjects will be a map where the key is the pull request number and the value
// is the object that stores the open date, close date, and issue link for that pull request. The reason
// why I said I think the pull request number is the same as the number of the issue it affects is because
// if you take any object from the map, say you do mapPRnumberToCloseOpenDateObjects.get(10). You'll
// get an object with a pull request number 10. Now if you take this object and look at it's "IssueLink"
// field, the very last part of the link will have the number 10, and if you look at the github API
// it says for a single issue, you do: /repos/:owner/:repo/issues/:issue_number <---- As you can see,
// the IssueLink field will have this structure and in place of the issue_number, the field will be 10
// for our example object.
mapPRnumberToCloseOpenDateObjects.set(listOfPrNumbers[n], temp);
}
//up to this point, we have the pull request numbers. we will now start getting the commits associated with
//each pull request
var j;
for (j = 0; j < listOfPrNumbers.length; j++){
var currentApiLink = gitPullApiLink + "/" + listOfPrNumbers[j] + "/commits";
const res = await axios.get(currentApiLink);
//here we map a single pull request to the information containing the commits. I'll just warn you in
// advance: there's another object called mapPRNumbersToCommitObjects. THIS MAP IS DIFFERENT! I know it's
// subtle, but I hope the language can make the distinction: mapPullRequestNumberToCommits will just
// map a pull request number to some data about the commits it's linked to. In contrast,
// mapPRNumbersToCommitObjects will be the map that actually maps pull request numbers to objects
// containing information about the commits a pull request is associated with!
mapPullRequestNumberToCommits.set(listOfPrNumbers[j], res['data']);
}
// console.log("hewoihoiewa");
}
async createCommitObjects(){
var x;
// the initial loop using x will loop over all pull requests and get the associated commits
for (x = 0; x < listOfPrObjects.length; x++){
//here we will get the commits
var currCommitObjects = mapPullRequestNumberToCommits.get(listOfPrObjects[x]['number']);
//console.log('dhsiu');
// the loop using y will iterate over all commits that we get from a single pull request
var y;
for (y = 0; y < currCommitObjects.length; y++){
var currentSHA = currCommitObjects[y]['sha'];
listOfSHAs.push(currentSHA);
var currApiLink = "https://api.github.com/repos/elixir-lang/elixir/commits/" + currentSHA;
const response = await axios.get(currApiLink,);
//console.log("up to here");
// here we start extracting some information from a single commit
var currentAuthorName = response['data']['commit']['committer']['name'];
var currentDate = response['data']['commit']['committer']['date'];
var currentFiles = response['data']['files'];
// this loop will iterate over all changed files for a single commit. Remember, every commit has a list
// of changed files, so this loop will iterate over all those files, get the necessary information
// from those files.
var z;
// we create this temporary list of file objects because for every file, we want to make an object
// that will store the necessary information for that one file. after we store all the objects for
// each file, we will add this list of file objects as a field for our bigger commit object (see down below)
var tempListOfFileObjects = [];
for (z = 0; z < currentFiles.length; z++){
var fileInConsideration = currentFiles[z];
var nameOfFile = fileInConsideration['filename'];
var numberOfAdditions = fileInConsideration['additions'];
var numberOfDeletions = fileInConsideration['deletions'];
var totalNumberOfChangesToFile = fileInConsideration['changes'];
//console.log("with file");
var tempFileObject = {fileName: nameOfFile, totalAdditions: numberOfAdditions,
totalDeletions: numberOfDeletions, numberOfChanges: totalNumberOfChangesToFile};
// we add the same file objects to both a temporary, local list and a global set. Don't be tripped
// up by this; they're doing the same thing!
setOfFileObjects.add(tempFileObject);
tempListOfFileObjects.push(tempFileObject);
}
// here we make an object that stores information for a single commit. sha, authorName, date are single
// values, but files will be a list of file objects and these file objects will store further information
// for each file.
var tempObj = {sha: currentSHA, authorName: currentAuthorName, date: currentDate, files: tempListOfFileObjects};
var currPrNumber = listOfPrObjects[x]['number'];
console.log(currPrNumber);
// here we will make a single pull request number to an object that will contain all the information for
// every single commit associated with that pull request. So for every pull request, it will map to a list
// of objects where each object stores information about a commit associated with the pull request.
mapPRNumbersToCommitObjects.set(currPrNumber, tempObj);
}
}
return mapPRNumbersToCommitObjects;
}
async startParsingPullRequests() {
this.getListOfPullRequests(gitPullApiLink + "?state=all").then(() => {
this.getCommitsForEachPullRequestAndPRinformation().then(() => {
this.createCommitObjects().then((response) => {
console.log("functions were successful");
return new mapPRNumbersToCommitObjects;
//return mapPRNumbersToCommitObjects;
}).catch((error) => {
console.log("printing first error");
console.log(error);
})
}).catch((error2) => {
console.log("printing the second error");
console.log(error2);
})
}).catch((error3) => {
console.log("printing the third error");
console.log(error3);
});
}
//adding some getter methods so they can be used to work with whatever information people may need.
//I start all of them with the this.startParsingPullRequests() method because by calling that method
it gets all
// the information for the global variables.
async getSetOfFileObjects(){
var dummyMap = await this.startParsingPullRequests();
return {files: setOfFileObjects, prMap: mapPRnumberToCloseOpenDateObjects};
}
async OpenCloseDateObjects(){
var dummyMap = await this.startParsingPullRequests();
return mapPRnumberToCloseOpenDateObjects;
}
async getNumbersOfTargetedIssues(){
var dummyMap = await this.startParsingPullRequests();
return listOfNumbersOfTargetedIssues;
}
}
var dummy = new PullRequestParser();
var dummyMap = dummy.startParsingPullRequests().then((message) => {
console.log("dummyMap is defined! :)");
console.log(dummyMap);
});
module.exports = PullRequestParser;
Whenever I run the code on the webstorm terminal though, with:
node PullRequestParser.js
I get a 403 error, followed by a bunch of error output, with the following statement:
data: {
message: "API rate limit exceeded for 138.186.17.173. (But here's the good news: Authenticated
requests get a higher rate limit. Check out the documentation for more details.)"
I looked up the documentation for this and found out that without authentication, I can make 60 requests per hour to a repo. In order to get authentication, however, the only example provided is an example they provide by using the command line. I don't think this would be enough though because I want to do some further analysis with the results I get. Does anybody know how I can increase the number of requests I can make? Where in the code would I need to make changes and what kind of changes would I need to make? Thanks!
The first line of the documentation says everything you need to know.
For API requests using Basic Authentication or OAuth, you can make up
to 5000 requests per hour.
Using Basic Authentication is pretty simple, so that may be the easiest thing to get you up and running. OAuth is more complicated, but more desirable in production.
The axios library supports basic auth requests out of the box.
async getListOfPullRequests(pullrequestLink) {
const message = await axios.get(pullrequestLink, {
auth: {
username: 'username',
password: 'password'
}
});
//console.log(message);
listOfPrObjects = message['data'];
}
You just need to supply the correct username and password information.
I'm fetching the top news from hacker news API which is using firebase. I'm planning to build a progressive web app hence, I'm planning to cache the results into the localstorage.
The current code is here, which fetch and render the top stories. The code is here:
var ref = new Firebase("http://hacker-news.firebaseio.com/v0/");
var itemRef = ref.child('item');
var topStories = [];
var storyCallback = function(snapshot) {
var story = snapshot.val();
var html = '';
if(story.score) {
html = '<i>'+story.score+'</i> '+story.title+''
}
document.getElementById(topStories.indexOf(story.id)).innerHTML = html;
}
ref.child('topstories').once('value', function(snapshot) {
topStories = snapshot.val();
for(var i = 0; i < topStories.length; i++) {
var element = document.createElement("P");
element.id = i;
document.getElementById('items').appendChild(element);
itemRef.child(topStories[i]).on('value', storyCallback);
}
});
ref.child('topstories').on('child_changed', function(snapshot, prevChildName) {
var ref = snapshot.ref()
var index = ref.name();
var oldItemId = topStories[index];
itemRef.child(oldItemId).off();
var newItemId = snapshot.val();
topStories[index] = newItemId
itemRef.child(newItemId).on('value', storyCallback);
});
If I add each stories into the localstorage (by tweaking the above code), how can I skip fetching it from firebase the next time (if it already presents and doesn't changed)?
Note that doesn't change part is important because I can easily get from local storage using the key, how ever it should be in sync with firebase as well. So wondering whether firebase has some way to handle this
If I'm not missing any point you can simply check whether they already exists in local storage or not
if(localStorage.getItem('key')){
//display here
}else{
//fetch
//save for next time
localStorage.setItem('key', JSON.stringify('yourdata'));
//and display here
}
Also you can generalize your function for fetching, display or rendering so you can call at multiple place.
So I have Faye set up on node.js doing pubsub type stuff for a time-series chart. With Smoothie Charts it looks like this:
//set up faye subscription
var subscription = client.subscribe('/myfeed', function(message) {
//add data to appropriate series
mySeries.append(new Date(),message.value);
});
That's pretty much it. Now I'm trying to replace smoothie with Cubism.js. Here is an example of pushing a single value that I have working:
// create context and horizon
var context = cubism.context().size(960);
var horizon = context.horizon().extent([0,2]);
function testFeed(name) {
return context.metric(function(start,stop,step,callback){
var values = [];
//static value I added
values.push(85);
//original random example
//while (+start < +stop){ start = +start +step; values.push(Math.random());}
callback(null, values);
}, name);
}
// draw graph
var metrics = ["Metric1","Metric2","Metric3"];
horizon.metric(testFeed);
Sadly I have no idea what most of this means. So 2 things:
How do I integrate my subscription into the feed?
How do I send individual values to each "metric"? Right now that 85 shows up on all 3 metrics.
I tried the following and I get my data logged to the console but nothing appears on the chart:
function testFeed(name) {
return context.metric(function(start,stop,step,callback){
var values = [];
var subscription = client.subscribe('/myfeed', function(message) {
values.push(message.messageCount);
console.log(message.messageCount);
});
callback(null, values);
}, name);
}