Dear Javascript Guru's:
I have the following requirements:
Process a large array in batches of 1000 (or any arbitrary size).
When each batch is processed, update the UI to show our progress.
When all batches have been processed, continue with the next step.
For example:
function process_array(batch_size) {
var da_len = data_array.length;
var idx = 0;
function process_batch() {
var idx_end = Math.min(da_len, idx + batch_size);
while (idx < idx_end) {
// do the voodoo we need to do
}
}
// This loop kills the browser ...
while (idx < da_len) {
setTimeout(process_batch, 10);
// Show some progress (no luck) ...
show_progress(idx);
}
}
// Process array ...
process_array(1000);
// Continue with next task ...
// BUT NOT UNTIL WE HAVE FINISHED PROCESSING THE ARRAY!!!
Since I am new to javascript, I discovered that everything is done on a single thread and as such, one needs to get a little creative with regard to processing and updating the UI. I have found some examples using recursive setTimeout calls, (one key difference is I have to wait until the array has been fully processed before continuing), but I cannot seem to get things working as described above.
Also -- I am in need of a "pure" javascript solution -- no third party libraries or the use of web workers (that are not fully supported).
Any (and all) guidance would be appreciated.
Thanks in advance.
You can make a stream from array and use batch-stream to make batches so that you can stream in batches to UI.
stream-array
and
batch-stream
In JavaScript when executing scripts in a HTML page, the page becomes unresponsive until the script is finished. This is because JavaScript is single thread.
You could consider using a web worker in JavaScript that runs in the background, independently of other scripts, without affecting the performance of the page.
In this case User can continue to do whatever he wants in the UI.
You can send and receive messages from the web worker.
More info on Web Worker here.
So part of the magic of recursion is really thinking about the things that you need to pass in, to make it work.
And in JS (and other functional languages) that frequently involves functions.
function processBatch (remaining, processed, batchSize,
transform, onComplete, onProgress) {
if (!remaining.length) {
return onComplete(processed);
}
const batch = remaining.slice(0, batchSize);
const tail = remaining.slice(batchSize);
const totalProcessed = processed.concat(batch.map(transform));
return scheduleBatch(tail, totalProcessed, batchSize,
transform, onComplete, onProgress);
}
function scheduleBatch (remaining, processed, batchSize,
transform, onComplete, onProgress) {
onProgress(processed, remaining, batchSize);
setTimeout(() => processBatch(remaining, processed, batchSize,
transform, onComplete, onProgress));
}
const noop = () => {};
const identity = x => x;
function processArray (array, batchSize, transform, onComplete, onProgress) {
scheduleBatch(
array,
[],
batchSize,
transform || identity,
onComplete || noop,
onProgress || noop
);
}
This can be simplified extremely, and the reality is that I'm just having a little fun here, but if you follow the trail, you should see recursion in a closed system that works with an arbitrary transform, on arbitrary objects, of arbitrary array lengths, with arbitrary code-execution when complete, and when each batch is completed and scheduling the next run.
To be honest, you could even swap this implementation out for a custom scheduler, by changing 3 lines of code or so, and then you could log whatever you wanted...
const numbers = [1, 2, 3, 4, 5, 6];
const batchSize = 2;
const showWhenDone = numbers => console.log(`Done with: ${numbers}`);
const showProgress = (processed, remaining) =>
`${processed.length} done; ${remaining.length} to go`;
const quintuple = x => x * 5;
processArray(
numbers,
batchSize,
quintuple,
showWhenDone,
showProgress
);
// 0 done; 6 to go
// 2 done; 4 to go
// 4 done; 2 to go
// Done with: 5, 10, 15, 20, 25, 30
Overkill? Oh yes. But worth familiarizing yourself with the concepts, if you're going to spend some time in the language.
Thank-you all for your comments and suggestions.
Below is a code that I settled on. The code works for any task (in my case, processing an array) and gives the browser time to update the UI if need be.
The "do_task" function starts an anonymous function via setInterval that alternates between two steps -- processing the array in batches and showing the progress, this continues until all elements in the array have been processed.
function do_task() {
const k_task_process_array = 1;
const k_task_show_progress = 2;
var working = false;
var task_step = k_task_process_array;
var batch_size = 1000;
var idx = 0;
var idx_end = 0;
var da_len = data_array.length;
// Start the task ...
var task_id = setInterval(function () {
if (!working) {
working = true;
switch (task_step) {
case k_task_process_array:
idx_end = Math.min( idx + batch_size, da_len );
while (idx < idx_end) {
// do the voodoo we need to do ...
}
idx++;
}
task_step = k_task_show_progress;
working = false;
break;
default:
// Show progress here ...
// Continue processing array ...
task_step = k_task_process_array;
working = false;
}
// Check if done ...
if (idx >= da_len) {
clearInterval(task_id);
task_id = null;
}
working = false;
}
}, 1);
}
Related
Following is the code to create a 2d matrix in javascript:
function Create2DArray(rows) {
var arr = [];
for (var i=0;i<rows;i++) {
arr[i] = [];
}
return arr;
}
now I have a couple of 2d matrices inside an array:
const matrices = []
for(let i=1; i<10000; i++){
matrices.push(new Create2DArray(i*100))
}
// I'm just mocking it here. In reality we have data available in matrix form.
I want to do operations on each matrix like this:
for(let i=0; i<matrices.length; i++){
...domeAnythingWithEachMatrix()
}
& since it will be a computationally expensive process, I would like to do it via a web worker so that the main thread is not blocked.
I'm using paralleljs for this purpose since it will provide nice api for multithreading. (Or should I use the native Webworker? Please suggest.)
update() {
for(let i=0; i<matrices.length; i++){
var p = new Parallel(matrices[i]);
p.spawn(function (matrix) {
return doanythingOnMatrix(matrix)
// can be anything like transpose, scaling, translate etc...
}).then(function (matrix) {
return back so that I can use those values to update the DOM or directly update the DOM here.
// suggest a best way so that I can prevent crashes and improve performance.
});
}
requestAnimationFrame(update)
}
So my question is what is the best way of doing this?
Is it ok to use a new Webworker or Parallel instance inside a for loop?
Would it cause memory issues?
Or is it ok to create a global instance of Parallel or Webworker and use it for manipulating each matrix?
Or suggest a better approach.
I'm using Parallel.js for as alternative for Webworker
Is it ok to use parallel.js for multithreading? (Or do I need to use the native Webworker?)
In reality, the matrices would contain position data & this data is processed by the Webworker or parallel.js instance behind the scenes and returns the processed result back to the main app, which is then used to draw items / update canvas
UPDATE NOTE
Actually, this is an animation. So it will have to be updated for each matrix during each tick.
Currently, I'm creating a new Instance of parallel inside the for loop. I fear that this would be a non conventional approach. Or it would cause memory leaks. I need the best way of doing this. Please suggest.
UPDATE
This is my example:
Following our discussion in the comments, here is an attempt at using chunks. The data is processed by groups of 10 (a chunk), so that you can receive their results regularly, and we only start the animation after receiving 200 of them (buffer) to get a head start (think of it like a video stream). But these values may need to be adjusted depending on how long each matrix takes to process.
That being said, you added details afterwards about the lag you get. I'm not sure if this will solve it, or if the problem lays in your canvas update function. That's just a path to explore:
/*
* A helper function to process data in chunks
*/
async function processInChunks({ items, processingFunc, chunkSize, bufferSize, onData, onComplete }) {
const results = [];
// For each group of {chunkSize} items
for (let i = 0; i < items.length; i += chunkSize) {
// Process this group in parallel
const p = new Parallel( items.slice(i, i + chunkSize) );
// p.map is no a real Promise, so we create one
// to be able to await it
const chunkResults = await new Promise(resolve => {
return p.map(processingFunc).then(resolve);
});
// Add to the results
results.push(...chunkResults);
// Pass the results to a callback if we're above the {bufferSize}
if (i >= bufferSize && typeof onData === 'function') {
// Flush the results
onData(results.splice(0, results.length));
}
}
// In case there was less data than the wanted {bufferSize},
// pass the results anyway
if (results.length) {
onData(results.splice(0, results.length));
}
if (typeof onComplete === 'function') {
onComplete();
}
}
/*
* Usage
*/
// For the demo, a fake matrix Array
const matrices = new Array(3000).fill(null).map((_, i) => i + 1);
const results = [];
let animationRunning = false;
// For the demo, a function which takes time to complete
function doAnythingWithMatrix(matrix) {
const start = new Date().getTime();
while (new Date().getTime() - start < 30) { /* sleep */ }
return matrix;
}
processInChunks({
items: matrices,
processingFunc: doAnythingWithMatrix,
chunkSize: 10, // Receive results after each group of 10
bufferSize: 200, // But wait for at least 200 before starting to receive them
onData: (chunkResults) => {
results.push(...chunkResults);
if (!animationRunning) { runAnimation(); }
},
onComplete: () => {
console.log('All the matrices were processed');
}
});
function runAnimation() {
animationRunning = results.length > 0;
if (animationRunning) {
updateCanvas(results.shift());
requestAnimationFrame(runAnimation);
}
}
function updateCanvas(currentMatrixResult) {
// Just for the demo, we're not really using a canvas
canvas.innerHTML = `Frame ${currentMatrixResult} out of ${matrices.length}`;
info.innerHTML = results.length;
}
<script src="https://unpkg.com/paralleljs#1.0/lib/parallel.js"></script>
<h1 id="canvas">Buffering...</h1>
<h3>(we've got a headstart of <span id="info">0</span> matrix results)</h3>
Problem
I'm trying to scan a drive directory (recursively walk all the paths) and write all the paths to a file (as it's finding them) using fs.createWriteStream in order to keep the memory usage low, but it doesn't work, the memory usage reaches 2GB during the scan.
Expected
I was expecting fs.createWriteStream to automatically handle memory/disk usage at all times, keeping memory usage at a minimum with back-pressure.
Code
const fs = require('fs')
const walkdir = require('walkdir')
let dir = 'C:/'
let options = {
"max_depth": 0,
"track_inodes": true,
"return_object": false,
"no_return": true,
}
const wstream = fs.createWriteStream("C:/Users/USERNAME/Desktop/paths.txt")
let walker = walkdir(dir, options)
walker.on('path', (path) => {
wstream.write(path + '\n')
})
walker.on('end', (path) => {
wstream.end()
})
Is it because I'm not using .pipe()? I tried creating a new Stream.Readable({read{}}) and then inside the .on('path' emitter pushing paths into it with readable.push(path) but that didn't really work.
UPDATE:
Method 2:
I tried the proposed in the answers drain method but it doesn't help much, it does reduce memory usage to 500mb (which is still too much for a stream) but it slows down the code significantly (from seconds to minutes)
Method 3:
I also tried using readdirp, it uses even less memory (~400mb) and is faster but I don't know how to pause it and use the drain method there to reduce the memory usage further:
const readdirp = require('readdirp')
let dir = 'C:/'
const wstream = fs.createWriteStream("C:/Users/USERNAME/Desktop/paths.txt")
readdirp(dir, {alwaysStat: false, type: 'files_directories'})
.on('data', (entry) => {
wstream.write(`${entry.fullPath}\n`)
})
Method 4:
I also tried doing this operation with a custom recursive walker, and even though it uses only 30mb of memory, which is what I wanted, but it is like 10 times slower than the readdirp method and it is synchronous which is undesirable:
const fs = require('fs')
const path = require('path')
let dir = 'C:/'
function customRecursiveWalker(dir) {
fs.readdirSync(dir).forEach(file => {
let fullPath = path.join(dir, file)
// Folders
if (fs.lstatSync(fullPath).isDirectory()) {
fs.appendFileSync("C:/Users/USERNAME/Desktop/paths.txt", `${fullPath}\n`)
customRecursiveWalker(fullPath)
}
// Files
else {
fs.appendFileSync("C:/Users/USERNAME/Desktop/paths.txt", `${fullPath}\n`)
}
})
}
customRecursiveWalker(dir)
Preliminary observation: you've attempted to get the results you want using multiple approaches. One complication when comparing the approaches you used is that they do not all do the same work. If you run tests on file tree that contains only regular files, that tree does not contain mount points, you can probably compare the approaches fairly, but when you start adding mount points, symbolic links, etc, you may get different memory and time statistics merely due to the fact that one approach excludes files that another approach includes.
I've initially attempted a solution using readdirp, but unfortunately, but that library appears buggy to me. Running it on my system here, I got inconsistent results. One run would output 10Mb of data, another run with the same input parameters would output 22Mb, then I'd get another number, etc. I looked at the code and found that it does not respect the return value of push:
_push(entry) {
if (this.readable) {
this.push(entry);
}
}
As per the documentation the push method may return a false value, in which case the Readable stream should stop producing data and wait until _read is called again. readdirp entirely ignores that part of the specification. It is crucial to pay attention to the return value of push to get proper handling of back-pressure. There are also other things that seemed questionable in that code.
So I abandoned that and worked on a proof of concept showing how it could be done. The crucial parts are:
When the push method returns false it is imperative to stop adding data to the stream. Instead, we record where we were, and stop.
We start again only when _read is called.
If you uncomment the console.log statements that print START and STOP. You'll see them printed out in succession on the console. We start, produce data until Node tells us to stop, and then we stop, until Node tells us to start again, and so on.
const stream = require("stream");
const fs = require("fs");
const { readdir, lstat } = fs.promises;
const path = require("path");
class Walk extends stream.Readable {
constructor(root, maxDepth = Infinity) {
super();
this._maxDepth = maxDepth;
// These fields allow us to remember where we were when we have to pause our
// work.
// The path of the directory to process when we resume processing, and the
// depth of this directory.
this._curdir = [root, 1];
// The directories still to process.
this._dirs = [this._curdir];
// The list of files to process when we resume processing.
this._files = [];
// The location in `this._files` were to continue processing when we resume.
this._ix = 0;
// A flag recording whether or not the fetching of files is currently going
// on.
this._started = false;
}
async _fetch() {
// Recall where we were by loading the state in local variables.
let files = this._files;
let dirs = this._dirs;
let [dir, depth] = this._curdir;
let ix = this._ix;
while (true) {
// If we've gone past the end of the files we were processing, then
// just forget about them. This simplifies the code that follows a bit.
if (ix >= files.length) {
ix = 0;
files = [];
}
// Read directories until we have files to process.
while (!files.length) {
// We've read everything, end the stream.
if (dirs.length === 0) {
// This is how the stream API requires us to indicate the stream has
// ended.
this.push(null);
// We're no longer running.
this._started = false;
return;
}
// Here, we get the next directory to process and get the list of
// files in it.
[dir, depth] = dirs.pop();
try {
files = await readdir(dir, { withFileTypes: true });
}
catch (ex) {
// This is a proof-of-concept. In a real application, you should
// determine what exceptions you want to ignore (e.g. EPERM).
}
}
// Process each file.
for (; ix < files.length; ++ix) {
const dirent = files[ix];
// Don't include in the results those files that are not directories,
// files or symbolic links.
if (!(dirent.isFile() || dirent.isDirectory() || dirent.isSymbolicLink())) {
continue;
}
const fullPath = path.join(dir, dirent.name);
if (dirent.isDirectory() & depth < this._maxDepth) {
// Keep track that we need to walk this directory.
dirs.push([fullPath, depth + 1]);
}
// Finally, we can put the data into the stream!
if (!this.push(`${fullPath}\n`)) {
// If the push returned false, we have to stop pushing results to the
// stream until _read is called again, so we have to stop.
// Uncomment this if you want to see when the stream stops.
// console.log("STOP");
// Record where we were in our processing.
this._files = files;
// The element at ix *has* been processed, so ix + 1.
this._ix = ix + 1;
this._curdir = [dir, depth];
// We're stopping, so indicate that!
this._started = false;
return;
}
}
}
}
async _read() {
// Do not start the process that puts data on the stream over and over
// again.
if (this._started) {
return;
}
this._started = true; // Yep, we've started.
// Uncomment this if you want to see when the stream starts.
// console.log("START");
await this._fetch();
}
}
// Change the paths to something that makes sense for you.
stream.pipeline(new Walk("/home/", 5),
fs.createWriteStream("/tmp/paths3.txt"),
(err) => console.log("ended with", err));
When I run the first attempt you made with walkdir here, I get the following statistics:
Elapsed time (wall clock): 59 sec
Maximum resident set size: 2.90 GB
When I use the code I've shown above:
Elapsed time (wall clock): 35 sec
Maximum resident set size: 0.1 GB
The file tree I use for the tests produces a file listing of 792 MB
You could exploit the returned value from WritableStream.write(): it essentially states if you should continue to read or not. a WritableStream has an internal property that stores the threshold after which the buffer should be processed by the OS. The drain event will be emitted when the buffer has been flushed, i.e. you can call safely call WritableStream.write() without risking to excessively fill the buffer (which means the RAM). Luckily for you, walkdir let you control the process: you can emit pause(pause the walk. no more events will be emitted until resume) and resume(resume the walk) event from the walkdir object, pausing and resuming the writing process on you stream accordingly. Try with this:
let is_emitter_paused = false;
wstream.on('drain', (evt) => {
if (is_emitter_paused) {
walkdir.resume();
}
});
walkdir.on('path', function(path, stat) {
is_emitter_paused = !wstream.write(path + '\n');
if (is_emitter_paused) {
walkdir.pause();
}
});
Here's an implementation inspired by #Louis's answer. I think it's a bit easier to follow and in my minimal testing it performs about the same.
const fs = require('fs');
const path = require('path');
const stream = require('stream');
class Walker extends stream.Readable {
constructor(root = process.cwd(), maxDepth = Infinity) {
super();
// Dirs to process
this._dirs = [{ path: root, depth: 0 }];
// Max traversal depth
this._maxDepth = maxDepth;
// Files to flush
this._files = [];
}
_drain() {
while (this._files.length > 0) {
const file = this._files.pop();
if (file.isFile() || file.isDirectory() || file.isSymbolicLink()) {
const filePath = path.join(this._dir.path, file.name);
if (file.isDirectory() && this._maxDepth > this._dir.depth) {
// Add directory to be walked at a later time
this._dirs.push({ path: filePath, depth: this._dir.depth + 1 });
}
if (!this.push(`${filePath}\n`)) {
// Hault walking
return false;
}
}
}
if (this._dirs.length === 0) {
// Walking complete
this.push(null);
return false;
}
// Continue walking
return true;
}
async _step() {
try {
this._dir = this._dirs.pop();
this._files = await fs.promises.readdir(this._dir.path, { withFileTypes: true });
} catch (e) {
this.emit('error', e); // Uh oh...
}
}
async _walk() {
this.walking = true;
while (this._drain()) {
await this._step();
}
this.walking = false;
}
_read() {
if (!this.walking) {
this._walk();
}
}
}
stream.pipeline(new Walker('some/dir/path', 5),
fs.createWriteStream('output.txt'),
(err) => console.log('ended with', err));
I have a stream of events and I would like to call a function that returns a promise for each of those events, the problem is that this function is very expensive, so I would like to process at most n events at a time.
This pebble diagram is probably wrong but this is what I would like:
---x--x--xxxxxxx-------------x-------------> //Events
---p--p--pppp------p-p-p-----p-------------> //In Progress
-------d--d--------d-d-dd------dddd--------> //Promise Done
---1--21-2-34-----------3----4-3210-------- //QUEUE SIZE CHANGES
This is the code that I have so far:
var n = 4;
var inProgressCount = 0;
var events$ = Rx.Observable.fromEvent(produceEvent, 'click')
.map((ev) => new Date().getTime());
var inProgress$ = events$.controlled();
var done$ = inProgress$
.tap(() => inProgressCount++)
.flatMap((timestamp) => Rx.Observable.fromPromise(expensiveComputation(getRandomInt(1, 5) * 1000, timestamp)));
done$.subscribeOnNext((timestamp) => {
inProgressCount--;
inProgress$.request(Math.max(1, n - inProgressCount));
});
inProgress$.request(n);
There are two issues with this code:
It's using the inProgressCount var which is updated with side
effect functions.
The done$ subscription is only called once when I request more than 1 item from the controlled stream. This is making the inProgressCount var to update incorrectly, this eventually limits the queue to one at a time.
You can see it working in here:
http://jsbin.com/wivehonifi/1/edit?js,console,output
Questions:
Is there a better approach?
How can I get rid of the inProgressCount variable?
Why is the done$ subscription only getting called once when requesting multiple items?
Update:
Answer to question #3: switchMap is the same as flatMapLatest, so that's why I was only getting the last one. Updated the code to flatMap instead of switchMap.
You actually don't need to use backpressure at all. There is an operator called flatMapWithMaxConcurrent that does this for you. It is essentially an alias for calling .map().merge(concurrency) and it only allows a maximum number of streams to be in flight at a time.
I updated your jsbin here: http://jsbin.com/weheyuceke/1/edit?js,output
But I annotated the important bit below:
const concurrency = 4;
var done$ = events$
//Only allows a maximum number of items to be subscribed to at a time
.flatMapWithMaxConcurrent(concurrency,
({timestamp}) =>
//This overload of `fromPromise` defers the execution of the lambda
//until subscription
Rx.Observable.fromPromise(() => {
//Notify the ui that this task is in progress
updatePanelAppend(inProgress, timestamp);
removeFromPanel(pending, timestamp);
//return the task
return expensiveComputation(getRandomInt(1, 5) * 1000, timestamp)
}));
I'm learning FRP using Bacon.js, and would like to assemble data from a paginated API in a stream.
The module that uses the data has a consumption API like this:
// UI module, displays unicorns as they arrive
beautifulUnicorns.property.onValue(function(allUnicorns){
console.log("Got "+ allUnicorns.length +" Unicorns");
// ... some real display work
});
The module that assembles the data requests sequential pages from an API and pushes onto the stream every time it gets a new data set:
// beautifulUnicorns module
var curPage = 1
var stream = new Bacon.Bus()
var property = stream.toProperty()
var property.onValue(function(){}) # You have to add an empty subscriber, otherwise future onValues will not receive the initial value. https://github.com/baconjs/bacon.js/wiki/FAQ#why-isnt-my-property-updated
var allUnicorns = [] // !!! stateful list of all unicorns ever received. Is this idiomatic for FRP?
var getNextPage = function(){
/* get data for subsequent pages.
Skipping for clarity */
}
var gotNextPage = function (resp) {
Array.prototype.push.apply(allUnicorns, resp) // just adds the responses to the existing array reference
stream.push(allUnicorns)
curPage++
if (curPage <= pageLimit) { getNextPage() }
}
How do I subscribe to the stream in a way that provides me a full list of all unicorns ever received? Is this flatMap or similar? I don't think I need a new stream out of it, but I don't know. I'm sorry, I'm new to the FRP way of thinking. To be clear, assembling the array works, it just feels like I'm not doing the idiomatic thing.
I'm not using jQuery or another ajax library for this, so that's why I'm not using Bacon.fromPromise
You also may wonder why my consuming module wants the whole set instead of just the incremental update. If it were just appending rows that could be ok, but in my case it's an infinite scroll and it should draw data if both: 1. data is available and 2. area is on screen.
This can be done with the .scan() method. And also you will need a stream that emits items of one page, you can create it with .repeat().
Here is a draft code (sorry not tested):
var itemsPerPage = Bacon.repeat(function(index) {
var pageNumber = index + 1;
if (pageNumber < PAGE_LIMIT) {
return Bacon.fromCallback(function(callback) {
// your method that talks to the server
getDataForAPage(pageNumber, callback);
});
} else {
return false;
}
});
var allItems = itemsPerPage.scan([], function(allItems, itemsFromAPage) {
return allItems.concat(itemsFromAPage);
});
// Here you go
allItems.onValue(function(allUnicorns){
console.log("Got "+ allUnicorns.length +" Unicorns");
// ... some real display work
});
As you noticed, you also won't need .onValue(function(){}) hack, and curPage external state.
Here is a solution using flatMap and fold. When dealing with network you have to remember that the data can come back in a different order than you sent the requests - that's why the combination of fold and map.
var pages = Bacon.fromArray([1,2,3,4,5])
var requests = pages.flatMap(function(page) {
return doAjax(page)
.map(function(value) {
return {
page: page,
value: value
}
})
}).log("Data received")
var allData = requests.fold([], function(arr, data) {
return arr.concat([data])
}).map(function(arr) {
// I would normally write this as a oneliner
var sorted = _.sortBy(arr, "page")
var onlyValues = _.pluck(sorted, "value")
var inOneArray = _.flatten(onlyValues)
return inOneArray
})
allData.log("All data")
function doAjax(page) {
// This would actually be Bacon.fromPromise($.ajax...)
// Math random to simulate the fact that requests can return out
// of order
return Bacon.later(Math.random() * 3000, [
"Page"+page+"Item1",
"Page"+page+"Item2"])
}
http://jsbin.com/damevu/4/edit
I'm am doing a heavy "scientific" (ie, not displaying data) webgl computation. Webgl can't be put in a worker, and doing a lot of webgl blocks the whole browser so I sliced my computation in chunks, and I compute each chunk in a setTimeout() function (after calling getError() to flush the opengl queue). I leave a bit of time in between the chunks so that the browser has time to flush some UI events from the main UI queue and it makes the whole thing feel a bit less sluggish.
My problem is that when the tab is hidden, the setTimeout gets throttled to a one second period which is way too slow for me.
Is there a better solution than what I did? Obviously requestAnimationFrame() doesn't work, since it's never called back in hidden tabs (and it's too slow in visible).
Is there a non-throttled time event in the hidden state? I tried to use window.postMessage() but it's still too fast and the whole browser feels slow.
here is the current state of my research:
function drawTile(sequenceIndex) {
if (sequenceIndex < sequence.length) {
var x = sequence[sequenceIndex][0];
var y = sequence[sequenceIndex][1];
setTilePos(x, y);
modelStage.render(renderer, modelBuffer);
minkowskiPass.render(renderer, minkowskiBuffer, modelBuffer);
copyPass.quad.position.x = x;
copyPass.quad.position.y = y;
copyPass.render(renderer, null, minkowskiBuffer);
var gl = renderer.getContext();
gl.getError();
sequenceIndex++;
if (document.visibilityState != "hidden") {
setTimeout(function () {
drawTile(sequenceIndex);
}, 10);
} else {
//window.postMessage is not rate limited then the tab is hidden
// we need to slow the computation by an event, otherwise the whole browser is unresponsive.
$(window).one('message', function () {
drawTile(sequenceIndex);
});
window.postMessage('lol', '*');
}
} else
console.timeEnd('computation');
}
console.time('computation');
drawTile(0);
Here's another convoluted workaround for anyone who needs it; you can use the Web Audio API to generate function calls:
var setTimeout2 = (function () {
var samples = 2048;
var fns = [];
var context = new AudioContext();
var source = context.createBufferSource();
var node = context.createScriptProcessor(samples, 1, 1);
// This gets fired every ~46 milliseconds. You can change
// `samples` to another valid value (256, 512, 1024, 2048,
// 4096, 8192, or 16384); then it'll get called every
// `samples / context.sampleRate` seconds (~46 ms for
// `samples == 2048` and `context.sampleRate == 44100`).
node.onaudioprocess = function (e) {
fns = fns.filter(function (fn) {
return !fn(Date.now() - fn.t);
});
};
source.connect(node);
node.connect(context.destination);
window.do_not_garbage_collect = [context, source, node];
return function (fn) {
fn.t = Date.now();
fns.push(fn);
};
}());
// Use like this:
setTimeout2(function (t) {
console.log(t);
// End after 1 second.
if (t > 1000)
return true;
})
Perhaps have a worker thread also run a
postMessage loop and a fraction of the time (every n iterations), either pause or resume the main thread?