I'm trying to make an interface that lets a user upload CSV files and plots these using plotly, only using javascript and obviously the plotly library. I'm close, but my suspicion is that there's an issue with the asynchronous reading of the csv files.
As you can probably see, I'm relatively new to javascript, so any feedback is welcome. I cannot however use any other libraries or packages plotly.
The problem is that the resulting figure only shows the initialized values (1).
EDIT: The heatmap function works on test data, or if I modify specific elements of the data_y object, just not when I update the information from the file.
There's a button that allows uploading of the csv files. On event this code triggers:
<script>
let picker = document.getElementById('picker');
picker.addEventListener('change', event => {0
file_list = event.target.files;
var fig_y = [];
for (let i = 0 ; i< file_list.length ; i++){
if(file_list[i].name == (".DS_Store")){continue}
else {
var ready = read_data(file_list[i]);
fig_y.push(ready);
}
}
console.log(fig_y);
plot_heatmap(fig_y);
}
);
</script>
The data is read using this code.
<script>
function read_data(input){
var xs = 1212; // length of the data
file_contents = [];
var data_y = Array(xs).fill(1);
let file = input;
let reader = new FileReader();
reader.readAsText(file);
reader.onload = function(){
file_contents = reader.result.split('\n');
// open the data file. First two lines contain a description of the data.
for (let j = 2 ; j<file_contents.length-1 ; j++) {
// the relevant data is the third number in the column
var nr = file_contents[j].split(",").map(Number)[2];
data_y[j-2] = nr;
}
}
return data_y;
}
</script>
the code that makes the plotly heatmap.
<script>
function plot_heatmap(data_z){
var data = [
{
z: data_z,
type: 'heatmap'
}
];
Plotly.newPlot('raw_data', data);
};
</script>
OK, so I figured out the answer. It comes from the asynchronous reading of the text files. Putting the plot_heatmap function in the following Timeout function solved the issue (well, maybe it's more a workaround).
setTimeout(() => { plot_heatmap(fig_y); }, 100);
Actually, by changing the length of the timeout, I could catch JS in its act and see half the heatmap filled in with the real values and the other half still with the initialized value!
Related
Sooooo,
I want to get data from a google sheet to a website which appends the passed data dynamically to the website so the amount of itme's on the website is equal to the amount of rows. I pretty much got everything done separately but I just cant seem to figure out how to combine them.
I set up a node.js project which uses my Api key to retrieve the data, manipulate and push it into an array.
also maybe there's a more efficient way to push the data but i couldn't figure anything else out but saving it to a file and then reading it back. I then pumped that into a AWS api gateway+Lambda funtion which didn't really work because I couldn't get Lambda to require google/api but i think i could sort that out.
I also already did all the HTML / JS on the website to push the data to the page I just cant seem to figure out how to get the data from my AWS-http-link into my website. No matter if I use async or promise or none of both It just can't get the data.
'ISSUES: using google/api inside Lambda, getting data from Api to array, maybe diffrent way to get the data from google api'
Thank you in advance!
AWS-link: https://po7bu16g2i.execute-api.eu-central-1.amazonaws.com/live/celldata
//JS code on website
var receivearray =[];
function createEntry(data){
for(i=0;i<data.length;i++){
var outdt = document.createElement('div');
outdt.className='outwrap';
var pers_name = document.createElement('div');
pers_name.className='name';
var pers_name_txt= document.createElement('h3');
pers_name_txt.className='name_head';
pers_name_txt.innerHTML=data[i][0];
outdt.append(pers_name);
pers_name.append(pers_name_txt);
for(k=1;k<data[i].length;k++){
var top =document.createElement('div');
top.className='text_wrap';
var q = document.createElement('p');
q.className='frage';
q.innerHTML=data[i][k][0];
var a= document.createElement('p');
a.innerHTML=data[i][k][1];
outdt.append(top);
top.append(q,a);
}
document.body.appendChild(outdt);
}
};
createEntry(receivearray);
//Node.js Code
//requirements
var GoogleSpreadsheet = require('google-spreadsheet');
var crds = require('./credt.json');
const fs = require('fs');
//updates the data-tfile
function update(sheetid){
//google api shapens
var doc = new GoogleSpreadsheet(sheetid);
doc.useServiceAccountAuth(crds,
function (err) {
doc.getRows(1,
function (err, rows) {
//saves all retrieved data into textfile
fs.writeFile('ting.txt', JSON.stringify(rows),
function (err) {
//checks if file was saved succesfully
if (err) throw err;
console.log('updated')
}
)
}
)
}
)
}
function prepdata(){
//define variables
var text = fs.readFileSync("ting.txt", "utf8");
var findata =[];
var hold=[];
//singleout all row-elements
for(x=0;x<text.length;x++){
if(text.slice(x,x+7)=='"name":'){
hold.push(text.slice(x+7,x+120));
}
}
//split data by , into sets
for(x=0;x<hold.length;x++){
findata.push(hold[x].split(','));
}
//push sets into arrays
for (var k = 0; k < findata.length; k++){
for (var i = 0; i < findata[k].length; i++){
findata[k][i] = findata[k][i].replace(/"/g, " ");
findata[k][i] = findata[k][i].split(':');
}
}
return findata;
}
update("1W6tVuj0krrwI7PyTRJha2ZOX72kGGfFAI8eqXOirWHo");
console.log(prepdata());
I am using PDFJS to get textual data from PDF files, but occasionally encountering the following error:
Error: Invalid XRef table: unexpected first object.
I would prefer that my code just skip over problem files and continue on to the next file in the list. According to PDFJS documentation, setting stopAtErrors to true for the DocumentInitParameters in PDFJS should result in rejection of getTextContent when the associated PDF data cannot be successfully parsed. I am not finding such to be the case: even after setting stopAtErrors to true, I continue to get the above error and the code seems to be "spinning" on the problem file rather than just moving on to the next in the list. It is possible that I haven't properly set stopAtErrors to true as I think I have. A snippet of my code is below to illustrate what I think I've done (code based on this example):
// set up the variables to pass to getDocument, including the pdf file's url:
var obj = {};
obj.url = http://www.whatever.com/thefile.pdf; // the specific url linked to desired pdf file goes here
obj.stopAtErrors = true;
// now have PDF JS read in the file:
PDFJS.getDocument(obj).then(function(pdf) {
var pdfDocument = pdf;
var pagesPromises = [];
for (var i = 0; i < pdf.pdfInfo.numPages; i++) {
(function (pageNumber) {
pagesPromises.push(getPageText(pageNumber, pdfDocument));
}) (i+1);
}
Promise.all(pagesPromises).then(function(pagesText) {
// display text of all the pages in the console
console.log(pagesText);
});
}, function (reason) {
console.log('Error! '+reason);
});
function getPageText(pageNum, PDFDocumentInstance) {
return new Promise(function (resolve, reject) {
PDFDocumentInstance.getPage(pageNum).then(function(pdfPage) {
pdfPage.getTextContent().then(function(textContent) { // should stopAtErrors somehow be passed here to getTextContent instead of to getDocument??
var textItems = textContent.items;
var finalString = '';
for (var i = 0; i < textItems.length; i++) {
var item = textItems[i];
finalString += item.str + " ";
}
resolve(finalString);
});
});
}).catch(function(err) {
console.log('Error! '+err);
});
}
One thing I am wondering is if the stopAtErrors parameter should somehow instead be passed to getTextContent? I have not found any examples illustrating the use of stopAtErrors and the PDFJS documentation does not show a working example, either. Given that I am still at the stage of needing examples to get PDFJS to function, I am at a loss as to how to make PDFJS stop trying to parse a problem PDF file and just move on to the next one.
I have a static local webpage that is supposed to be updated with data from a csv file (so as data is written to the file, the page should update and display the new data). My issue is that with many data pointys, it becomes very slow and after a certain number of points it will not update at all.
Currently, the way that the page updates is that it reads the file every 3 seconds and updates accordingly. I imagine a much more efficient way would be to see what are the most recent additions to the file and then just append those new points to the current data set. I'm just not sure how to do that.
Code below parses the csv file and separates the data to arrays to be used in the charts:
function parseCSVData(csvFile) {
time = [];
altitude = [];
outsideTemp = [];
insideTemp = [];
voltage = [];
state = [];
velocity = [];
degrees = [];
// cut CSV dataFiles into lines
var lines = csvFile.split("\n");
$.each(lines, function (lineNumber, line) {
if (lineNumber != 0) { // skip header line
var fields = line.split(",");
var missionTime = parseInt(fields[1]);
var altitude2 = parseInt(fields[2]);
var outsideTemp2 = parseInt(fields[3]);
var insideTemp2 = parseInt(fields[4]);
var voltage2 = parseInt(fields[5]);
var state2 = parseInt(fields[6]);
var velocity2 = parseInt(fields[7]);
var degrees2 = parseInt(fields[8]);
time.push(missionTime);
altitude.push(altitude2);
outsideTemp.push(outsideTemp2);
insideTemp.push(insideTemp2);
voltage.push(voltage2);
state.push(state2);
velocity.push(velocity2);
degrees.push(degrees2);
}
});
}
This is the code to update the charts every 3 seconds:
setInterval(function blah() {
var file = fileName+'?q='+Math.random();
fillCharts(file);//which calls the parseCSVData function and fills the charts
}, 3000);
EDIT: pastebin with entire code: http://pastebin.com/Qmzn8azY
EDIT2: sample csv data:
TEAM_ID,MISSION_TIME,ALT_SENSOR,OUTSIDE_TEMP,INSIDE_TEMP,VOLTAGE,FSW_STATE,VELOCITY,DEGREES
ubArtemis,0,36,20,20,9,1,0,0
ubArtemis,1,45,18,20,9,1,6,2
ubArtemis,2,200,16,20,9,1,10,5
ubArtemis,3,65,14,19,9,1,15,3
ubArtemis,4,79,12,17,8,2,22,4
ubArtemis,5,100,10,16,8,3,30,2
ubArtemis,6,120,8,15,8,4,39,0
I'm trying to load the content of the several CSV files into a new array. CSV files have a typical structure, with a label in the first row, and values (both string and real numbers) separated by commas. This part of code is responsible for loading the data for future use with Google Maps Api (not a problem for now, since I'm stuck on just loading the data). I would like to have a structure, in which I could call an element by it's name, that's why the var nodedata = {}; is created.
So the thing I totally don't get is why some part of the code is not being executed at all? console.log(nodedata); is empty, at least not in my Firefox console.
That's my attempt to the problem - links to the csv files are in the code.
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.7.1/jquery.js"></script>
<script src="http://jquery-csv.googlecode.com/files/jquery.csv-0.71.js"></script>
<script type="text/javascript">
var nodes = {};
var generation = {};
var nodedata = {};
$.get('https://dl.dropboxusercontent.com/u/25575808/energy/nodes.csv', function (response) {
nodes = $.csv.toObjects(response);
console.log(nodes);
});
$.get('https://dl.dropboxusercontent.com/u/25575808/energy/generation.csv', function (response) {
generation = $.csv.toObjects(response);
console.log(generation);
});
function getGeneration (nodename){
gen = 0;
for (var i = 0; i < generation.length; i++) {
if (generation[i].datetime == "2013-01-01 01:00"){
if (generation[i].node == nodename){
gen = gen + Number(generation[i]["output (MW)"])
}
}
}
return gen;
}
for (var i = 0; i < nodes.length; i++) {
nodedata[nodes[i].Node] = {
center: new google.maps.LatLng(nodes[i].Latitude,nodes[i].Longitude),
nodegen : getGeneration(nodes[i].Node)
}
}
console.log(nodedata);
I believe the problem you're having is unrelated to the usage of CSV data, rather it is the fact that the data is being loaded asynchronously.
You are executing 2 $.get() requests to load the files, which will take some time to download the files. The browser does not wait for them to finish before continuing through the rest of the code.
Therefore, it is possible for console.log(nodedate) to be executed before any data exists inside the nodes array.
An easy way to handle this is to stack your callback functions so that the first GET request completes -> run the 2nd GET request -> finally, run the processing code.
Check out this reorganization of the code: http://jsfiddle.net/Vr7sw/
(I removed the Google Maps line since I don't have the library loaded)
the problem is, the $.get requests are asynchronous (see jquery documentation), try to call to a function, into your callback body like this :
function nodesToJson(nodes) {
for (var i = 0; i < nodes.length; i++) {
nodedata[nodes[i].Node] = {
center: new google.maps.LatLng(nodes[node].Latitude,nodes[node].Longitude),
nodegen : getGeneration(nodes[i].Node)
}
}
console.log(nodedata);
}
$.get('https://dl.dropboxusercontent.com/u/25575808/energy/nodes.csv', function (response) {
nodes = $.csv.toObjects(response);
//when the request are ready, process the nodes
nodesToJson(nodes);
});
I have a bunch of text files on server side with file names 0.txt, 1.txt, 2.txt, 3.txt and so forth. I want to read the content of all files and store them in an array A, such that A[0] has 0.txt's content, A[1] has 1.txt's, ...
How can I do it in Javascript / jquery?
Originally, I used $.ajax({}) in jQuery to load those text files. But it didn't work, because of the asynchronous nature of ajax. I tried to set $.ajax({...async=false...}), but it was very slow -- I have ~1000 10KB files to read in total.
from your question, you want to load txt file from server to local:
var done = 0, resultArr = [], numberOfFiles = 1000;
function getHandler(idx) {
return function(data) {
resultArr[idx] = data;
done++;
if (done === numberOfFiles) {
// tell your other part all files are loaded
}
}
}
for (var i = 0; i < numberOfFiles; i++) {
$.ajax(i + ".txt").done(getHandler(i));
}
jsFiddle: http://jsfiddle.net/LtQYF/1/
What you're looking for is File API introduced in HTML5 (working draft).
The examples in this article will point you in the right direction. Remember that the end user will have to initiate the action and manually select the files - otherwise it would have been a terrible idea privacy- and security-wise.
Update:
I found (yet again) the mozilla docos to be more readable! Quick html mockup:
<input type="file" id="files" name="files[]" onchange="loadTextFile();" multiple/>
<button id="test"onclick="test();">What have we read?</button>
...and the JavaScript:
var testArray = []; //your array
function loadTextFile() {
//this would be tidier with jQuery, but whatever
var _filesContainer = document.getElementById("files");
//check how many files have been selected and iterate over them
var _filesCount = _filesContainer.files.length;
for (var i = 0; i < _filesCount; i++) {
//create new FileReader instance; I have to read more into it
//but I was unable to just recycle one
var oFReader = new FileReader();
//when the file has been "read" by the FileReader locally
//log its contents and push them into an array
oFReader.onload = function(oFREvent) {
console.log(oFREvent.target.result);
testArray.push(oFREvent.target.result);
};
//actually initiate the read
oFReader.readAsText(_filesContainer.files[i]);
}
}
//sanity check
function test() {
for (var i = 0; i < testArray.length; i++) {
console.warn(testArray[i]);
}
}
Fiddled
You don't give much information to give a specific answer. However, it is my opinion that "it doesn't work because of the asynchronous nature of ajax" is not correct. You should be able to allocate an array of the correct size and use a callback for each file. You might try other options such as bundling the files on the server and unbundling them on the client, etc. The designs, that address the problem well, depend on specifics that you have not provided.