I'm trying to make an extension that adds some HTML to a page as it's loaded. Long story short, it gets a list of links in a table, loads those links, and grabs a specific string and adds it to the original page I loaded.
Here is the code: due to the way the webpage was coded, i couldn't get the unique ids because they didn't exist so ignore that part.
var xhr = new XMLHttpRequest();
for (i = 0; i < length; i++) {
url = rows[i].getElementsByTagName("td")[0].getElementsByTagName('a')[0].getAttribute('href');
insert = rows[i].insertCell(-1);
xhr.onreadystatechange = function() {
if (xhr.readyState == 4 && xhr.status == 200) {
//var Data = JSON.parse(xhr.responseText);
var htmlString = xhr.responseText
, parser = new DOMParser()
, doc = parser.parseFromString(htmlString,"text/html")
, test = doc.getElementById('page-wrapper').children[1]
, ip_address = doc.getElementsByClassName('col-md-5')[0].children[5].children[0].children[0].children[10].innerHTML;
insert.innerHTML = ip_address;
}
};
xhr.open("GET",url,false);
xhr.send();
}
when i call the function, it works fine, however it takes a really long time to load and it all loads at the end rather than updating as each iteration of the for loop completes. I'd like to decrease the time it takes to load but a significant amount, and possibly have ear row update instantly vs at the end. I've tried searching asycn javascript but the bit of code I tried didn't help me much. Any assistance would be appreciated.
Using modern Javascript:
rows.forEach(row => {
fetch(row.querySelector('td:first-of-type a:first-of-type').getAttribute('href'))
.then(res => res.json())
.then(data => {
// do manipulations with DOM
})
.catch(console.log)
})
Related
First of all, I would like to say that I'm a student learning programming for around a month, so expect to see many mistakes.
I'm working on a website where I use a chart from the ChartJs library. The data used for this chart is taken through requests to a server.
What I want to do is update the content of the studenGesamt variable every 20 seconds. My main idea was using a setInterval, but it didn't work. I am thinking that I could make a new request to the server every 20 seconds, but I am kind of lost on how to do that or if it is actually a good idea. If someone could help me I would really appreciate it!
let serverData;
let studenGesamt;
let date;
const http = new XMLHttpRequest();
const url = 'https://url.com/'; // I have hidden this URL as it is the actual server from my company
http.open("GET", url);
http.setRequestHeader('key', 'sample-key'); // I have hidden this key for the same reason as above
http.send();
const chart = document.getElementById("multie-pie-chart");
// Function that calculates the workdays passed up until today
const workdaysCount = () => [...new Array(new Date().getDate())]
.reduce((acc, _, monthDay) => {
const date = new Date()
date.setDate(1 + monthDay) ![0, 6].includes(date.getDay()) && acc++
return acc
}, 0)
http.onload = (e) => {
// Parsing the JSON file and storing it into a variable (Console.Log() to make sure it works)
serverData = JSON.parse(http.responseText);
console.log(serverData);
// Storing the value of total hours from the database in a variable
studenGesamt = serverData.abzurechnen.gesamt;
chartRender(); // Function that calls and renders the chart
setInterval(dataLoop, 5000); // 5 seconds to be able to test it, will be changed to 20 when finished
};
let dataLoop = () => {
studenGesamt = serverData.abzurechnen.gesamt;
console.log('test'); // Logging test to see if it is working
};
I'm working on a google chrome extension, part of the purpose of which is to gather all Amazon user reviews for any specific product listed on Amazon.com, or Amazon.co.uk for manipulation. Amazon reviews are not all stored under one url; instead, Amazon lists up to 10 reivews per page. The initial thought I had in regards to this was to use the fetch API to gather the 10 reviews on a specific page, adding them to an array, before continuing on to the next page, stopping when no next page is defined. The only problem with this is that it can require hundreds of calls to the fetch API per product, taking a long time to complete.
let contentArray = [];
let reviewArray = [];
function collectProductComments(){
let parser = new DOMParser();
let url = document.getElementsByClassName("a-link-emphasis")[0].href;
getFirstTen(url, parser);
}
function getFirstTen(url, parser){ //function for the collection of the initial 10 elements containing a user review of a specific product
if(isGood(url)){
fetch(url) //fetches data from page specified by 'url' variable
.then(response => response.text()) //Specify response as text
.then(data => {
console.log("Collecting reviews...");
let doc = parser.parseFromString(data, "text/html"); //Parse response to DOM
for(let i = 0 ; i < doc.getElementsByClassName("review").length ; i++){
reviewArray.push(doc.getElementsByClassName("review")[i]); //Iterate through reviews, append them to array
}
if(doc.getElementById("cm_cr-pagination_bar") != undefined){ //check if "next page" button exists
nextURL(doc); //handle next pages.
}else{
collectionResolved(); //If no "next page" button exists, treat as though all reviews have been collected.
}
})
.catch(function(error) {
console.log(error);
});
}
}
function nextURL(doc, parser){
url = doc.getElementById("cm_cr-pagination_bar").children[0].lastChild.children[0].href; //Get URL of the page containing the next 10 reviews
if (isGood(url)){ //If the next page exists....
fetch(url)
.then(response => response.text()) //Specify response as text
.then(data => {
doc = parser.parseFromString(data, "text/html"); //Parse response as DOM
for(let i = 0 ; i < doc.getElementsByClassName("review").length ; i++){
reviewArray.push(doc.getElementsByClassName("review")[i]); //Iterate through reviews, append them to array
}
nextURL(doc); //Assume there is a next page
})
.catch(function(error) {
console.log(error);
});
}
else{ //This is fired when there is no next page to check
collectionResolved(); //treat as though all reviews have been collected
}
}
function collectionResolved(){
console.log("Review collection resolved.");
contentArray = handleReviews(reviewArray); //Logic for searching through the DOM of the reviews.
console.log(contentArray);
saveReviews(contentArray);
}
function isGood(url){
if (url == undefined){
return false;
}else{return true;}
}
function handleReviews(elementsToCheck){
let tempContentArray = [];
for(let i = 0 ; i < elementsToCheck.length ; i++){
tempContentArray[i] = [getUser(elementsToCheck[i]), getTitle(elementsToCheck[i]), getComment(elementsToCheck[i])]; //Dissect each review DOM element into appropriate text.
}
return tempContentArray;
}
I'm very new to this sort of thing - please feel free to suggest any corrections or improvements, or point out any instances of bad practice.
Does anyone know of any method that could be used either to optimise this code, or to produce a superior method of achieving the same result?
I am trying to create a chrome extension for Twitch and I'm facing a small issue which I cannot figure out how to solve. I must say my javascript knowledge is not the best (I am a C# developer) and this time not even google could help me (maybe because I don't know the correct search terms).
So to the problem. The goal of this extension is to notify you when a streamer changes games during live stream. I would like to notify the user only once per game changed. I have an array which contains objects, this object is a class with some properties, one of the properties is game. This array is then parsed into a json string to be saved in the browser localstorage. Below is the code i'm using to check what game is the streamer playing and trying to set the property game to that value. Then parsing the array back to json string and set the localstorage.
function checkPlaying() {
var twitchUsers = JSON.parse(localStorage.twitchUsers);
if (twitchUsers.length > 0) {
var f = (function () {
var xhr = [];
for (i = 0; i < twitchUsers.length; i++) {
(function (i) {
if (twitchUsers[i].isOnline) {
xhr[i] = new XMLHttpRequest();
xhr[i].open('GET', REQUEST_BASE + REQUEST_CHANNELS + twitchUsers[i].id, true);
xhr[i].setRequestHeader('Accept', 'application/vnd.twitchtv.v5+json')
xhr[i].setRequestHeader('Client-ID', CLIENT_ID)
xhr[i].onreadystatechange = function () {
if (xhr[i].readyState == 4 && xhr[i].status == 200) {
var response = JSON.parse(xhr[i].responseText);
var currentGame = response.game;
if (twitchUsers[i].game != currentGame) {
twitchUsers[i].game = currentGame;
if (twitchUsers[i].notify) {
browser.notifications.create({
"type": "basic",
"iconUrl": twitchUsers[i].logo,
"title": "TwitchWatcher",
"message": twitchUsers[i].name + ' is currently playing ' + twitchUsers[i].game
});
}
}
}
};
xhr[i].send();
}
localStorage.twitchUsers = JSON.stringify(twitchUsers);
})(i);
}
})();
}
}
The issue is that localStorage.twitchUsers never gets the new values, and this causes the extension to keep notifying the user with the same game.
I have tried to change the position of localStorage.twitchUsers = JSON.stringify(twitchUsers); to inside of the for loop and to the outside of the f variable, but I had no success.
Does anyone know what am I doing wrong?
Thank you.
I'm learning FRP using Bacon.js, and would like to assemble data from a paginated API in a stream.
The module that uses the data has a consumption API like this:
// UI module, displays unicorns as they arrive
beautifulUnicorns.property.onValue(function(allUnicorns){
console.log("Got "+ allUnicorns.length +" Unicorns");
// ... some real display work
});
The module that assembles the data requests sequential pages from an API and pushes onto the stream every time it gets a new data set:
// beautifulUnicorns module
var curPage = 1
var stream = new Bacon.Bus()
var property = stream.toProperty()
var property.onValue(function(){}) # You have to add an empty subscriber, otherwise future onValues will not receive the initial value. https://github.com/baconjs/bacon.js/wiki/FAQ#why-isnt-my-property-updated
var allUnicorns = [] // !!! stateful list of all unicorns ever received. Is this idiomatic for FRP?
var getNextPage = function(){
/* get data for subsequent pages.
Skipping for clarity */
}
var gotNextPage = function (resp) {
Array.prototype.push.apply(allUnicorns, resp) // just adds the responses to the existing array reference
stream.push(allUnicorns)
curPage++
if (curPage <= pageLimit) { getNextPage() }
}
How do I subscribe to the stream in a way that provides me a full list of all unicorns ever received? Is this flatMap or similar? I don't think I need a new stream out of it, but I don't know. I'm sorry, I'm new to the FRP way of thinking. To be clear, assembling the array works, it just feels like I'm not doing the idiomatic thing.
I'm not using jQuery or another ajax library for this, so that's why I'm not using Bacon.fromPromise
You also may wonder why my consuming module wants the whole set instead of just the incremental update. If it were just appending rows that could be ok, but in my case it's an infinite scroll and it should draw data if both: 1. data is available and 2. area is on screen.
This can be done with the .scan() method. And also you will need a stream that emits items of one page, you can create it with .repeat().
Here is a draft code (sorry not tested):
var itemsPerPage = Bacon.repeat(function(index) {
var pageNumber = index + 1;
if (pageNumber < PAGE_LIMIT) {
return Bacon.fromCallback(function(callback) {
// your method that talks to the server
getDataForAPage(pageNumber, callback);
});
} else {
return false;
}
});
var allItems = itemsPerPage.scan([], function(allItems, itemsFromAPage) {
return allItems.concat(itemsFromAPage);
});
// Here you go
allItems.onValue(function(allUnicorns){
console.log("Got "+ allUnicorns.length +" Unicorns");
// ... some real display work
});
As you noticed, you also won't need .onValue(function(){}) hack, and curPage external state.
Here is a solution using flatMap and fold. When dealing with network you have to remember that the data can come back in a different order than you sent the requests - that's why the combination of fold and map.
var pages = Bacon.fromArray([1,2,3,4,5])
var requests = pages.flatMap(function(page) {
return doAjax(page)
.map(function(value) {
return {
page: page,
value: value
}
})
}).log("Data received")
var allData = requests.fold([], function(arr, data) {
return arr.concat([data])
}).map(function(arr) {
// I would normally write this as a oneliner
var sorted = _.sortBy(arr, "page")
var onlyValues = _.pluck(sorted, "value")
var inOneArray = _.flatten(onlyValues)
return inOneArray
})
allData.log("All data")
function doAjax(page) {
// This would actually be Bacon.fromPromise($.ajax...)
// Math random to simulate the fact that requests can return out
// of order
return Bacon.later(Math.random() * 3000, [
"Page"+page+"Item1",
"Page"+page+"Item2"])
}
http://jsbin.com/damevu/4/edit
I want to fetch a list online from a certain URL that is in JSON format and then use the DATA_ID from each item in that list to call a new URL. I'm just new with PhantomJS and I can't figure out why nest loops inside the page.open() acts all weird. Also the way to use phantom.exit() seems to be really weird doing what I want to achieve.
Here's my code:
console.log('Loading recipes');
console.log('===============================================================');
var page = require('webpage').create();
var url = 'http://www.hiddenurl.com/recipes/all';
page.open(url, function (status) {
//Page is loaded!
var js = page.evaluate(function () {
return document.getElementsByTagName('pre')[0];
});
var recipes = JSON.parse(js.innerHTML).results;
//console.log(recipes[0].name.replace('[s]', ''));
for (i = 0; i < recipes.length; i++) {
console.log(recipes[i].name.replace('[s]', ''));
var craft_page = require('webpage').create();
var craft_url = 'http://www.hiddenurl.com/recipe/' + recipes[i].data_id;
craft_page.open(craft_url, function (craft_status) {
//Page is loaded!
var craft_js = craft_page.evaluate(function () {
return document.getElementsByTagName('body')[0];
});
var craftp = craft_js.innerHTML;
console.log('test');
});
if (i == 5) {
console.log('===============================================================');
phantom.exit();
//break;
}
}
});
The thing that happens here is that this line:
console.log(recipes[i].name.replace('[s]', ''));
..prints the following:
===============================================================
Item from DATA_ID 1
Item from DATA_ID 2
Item from DATA_ID 3
Item from DATA_ID 4
Item from DATA_ID 5
..then it just prints the next:
===============================================================
..followed by:
'test'
'test'
'test'
'test'
'test'
Why is this not happening serial? The data from the innerly called page() request gets heaped up and dumped at the end, even after phantom.exit() should actually already be called.
Also when I free-loop a normal data-set I get this error:
QEventDispatcherUNIXPrivate(): Unable to create thread pipe: Too many open files
2013-01-31T15:35:18 [FATAL] QEventDispatcherUNIXPrivate(): Can not continue without a thread pipe
Abort trap: 6
Is there any way I can set GLOBAL_PARAMETERS or direct the process in some way so I can just handle 100's of page requests?
Thanks in advance!
I've made a workaround with Python by calling PhantomJS separately through the shell, like this:
import os
import json
cmd = "./phantomjs fetch.js"
fin,fout = os.popen4(cmd)
result = fout.read()
recipes = json.loads(result)
print recipes['count']
Not the actual solution for the PhantomJS issue, but it's a working solution and has less problems with memory and code-structure.