I'm currently having an issue when loading images with webworkers. I want to batch load a bunch of images and then do some processing on these images (in my case, convert source image to ImageBitmap using createImageBitmap). Currently the user has the ability to cancel the request. This causes a crash when trying to terminate the worker if the worker hasn't finished. I've created a fiddle here https://jsfiddle.net/e4wcro0o/18/ that crashes consistently.
The issue lies here:
function closeWorker() {
if (!isClosed) {
console.log("terminating worker");
isClosed = true;
worker.terminate();
}
}
for (let i = 0; i < srcImages.length; i++) {
loadImageWithWorker(new URL(srcImages[i], window.location).toString()).then(function(img) {
closeWorker();
console.log(img);
});
}
This may look a bit funky to call closeWorker() on the first resolved promise, but does it mean that the crash is reproducible. I've only test on chrome with 64.0.3282.186 (Official Build) (64-bit)
Any ideas on what I'm doing wrong?
I have come across the same issue. I think the cause comes terminating the worker during the createImageBitmap function.
I have modified your JSFiddle with a method of terminating the worker at the earliest chance to avoid a crash.
const worker = createWorker(() => {
const pendingBitmaps = {};
var pendingKill = false;
self.addEventListener("message", e => {
const src = e.data;
if (src == "KILL") {
pendingKill = true;
Promise.all(Object.values(pendingBitmaps)).then(_ => self.postMessage("READY"));
}
// not accepting anymore conversions
if (pendingKill) {
self.postMessage({src, bitmap: null});
return;
}
pendingBitmaps[src] = fetch(src).then(response => response.blob())
.then(blob => {
if (pendingKill) return null;
return createImageBitmap(blob);
})
.then(bitmap => {
self.postMessage({src,bitmap});
delete pendingBitmaps[src];
})
})
});
https://jsfiddle.net/wrf1sLbx/16/
Related
I made to use web workers to upload files.
It handle with small size files.
But with large size file, the speed is getting very slow and my script causes web page collapse.
It does not return memory of web workers.
See the attachment.
The Dedicated Workers keep being accumulated and consume GB memory, when large file is being uploaded.
And I see this warning accumulating whenever web worker call close()
Scripts may close only the windows that were opened by them.
I throttled threadsQuantity as 5.
I think the number of web workers should not be exceeded more than 5.
class Queue {
constructor() {
this.timestamp = new Date().getTime();
this.activeConnections = {};
this.threadsQuantity = 5;
}
async sendNext() {
const activeConnections = Object.keys(this.activeConnections).length;
if (activeConnections >= this.threadsQuantity) {
return;
}
if (!this.chunksQueue.length) {
if (!activeConnections) {
this.complete();
}
return;
}
let chunkId = this.chunksQueue.pop();
this.activeConnections[chunkId] = true;
this.sendChunk( chunkId) ;
}
sendChunk( chunkId) {
if (window.Worker) {
let chunk = this.getChunk( chunkId)
const myWorker = new Worker("/assets/js/worker.js?v=" + this.timestamp);
myWorker.postMessage([this.timestamp, chunkId, chunk]);
myWorker.onmessage = (e) => {
var obj = JSON.parse(e.data)
if( obj.success) {
delete this.activeConnections[chunkId];
this.sendNext()
close();
} else {
sendChunk( chunkId);
}
}
}
}
}
I tried with close() , self.close() but all got same warning and failed.
I tried with this.close(), but it cause this error.
app.0a4dcc55.js:32 Uncaught TypeError: this.close is not a function
at _.onmessage
How can I kill terminated web workers safely during process ?
I'm trying to use node.js to write png data to ffmpeg to create video. I've verified that the following code works:
let ffmpeg = spawn(
"ffmpeg",
[
"-y",
"-loglevel", "info",
"-f", "image2pipe",
"-c:v", "png",
"-r", `${this.fps}`,
"-i", "-",
"-an",
"-vcodec", "libx264",
"-pix_fmt", "yuv420p",
"output.mp4",
],
);
ffmpeg.stdout.on('data', data => {
console.log(`ffmpeg stdout: ${data}`);
});
ffmpeg.stderr.on('data', data => {
console.log(`ffmpeg stderr: ${data}`);
});
let bufs = [];
let p = Promise.resolve();
for (let i = 0; i < this.frameData.length; i++) {
p = p.then(_ => new Promise(resolve => {
this.renderFrame(i);
this.renderer.domElement.toBlob(blob => {
blob.arrayBuffer().then(arr => {
console.log('writing...');
let uInt8Arr = new Uint8Array(arr);
// ffmpeg.stdin.write(uInt8Arr);
bufs.push(uInt8Arr);
resolve();
});
});
}));
}
p = p.then(_ => {
for (let buf of bufs) {
ffmpeg.stdin.write(buf);
}
ffmpeg.stdin.end();
});
Sorry if it seems complicated, the promises are used as a workaround for the fact that toBlob() runs asynchronously but the frames must be written in order.
That code works correctly as written, but it's needlessly inefficient because it writes the frame data to an array only to write it to ffmpeg later. However if I uncomment ffmpeg.stdin.write(uInt8Arr); and comment the loop where the array data is copied to ffmpeg, ffmpeg will simply hang at the end without generating any video. If I then perform a "large enough" action like save a file or spawn a new process, ffmpeg will generate the video correctly.
I suspect that there's some sort of buffering issue causing this, but I don't fully understand it or know how to fix it. I've already tried sending various signals with to ffmpeg with ffmpeg.kill() and running it through stdbuf as suggested here to no avail. Is there any workaround for this?
For anyone who chanced upon this, the solution was to replace the call to ffmpeg.stdin.close() with ffmpeg.stdin.destroy().
I have a service worker in sw.js, it uses a template engine to get the commit numbre as a version number. I set the cache name like this:
var version = {{ commit_hash }};
self.cacheName = `cache-` + version;
I have some scripts being added to the cache on the worker's install, but there are scripts that are dynamically loaded on the page. I would like to load all the scripts/css on the first load without forcing the user to wait for the app to install first.
I can get all the content on the page with the following code in the bottom of index.html:
var toCache = ['/'];
var css = document.getElementsByTagName("link");
for(el of css) {
var href = el.getAttribute("href");
if(href) {
toCache.push(href);
}
}
var js = document.getElementsByTagName("script");
for(el of js) {
var src = el.getAttribute("src");
if(src) {
toCache.push(src);
}
}
That works fine, now I would just need to open the correct cache, fetch files that aren't already present, and store them. Something like:
toCache.forEach(function(url) {
caches.match(url).then(function(result) {
if(!result) {
fetch(url).then(function(response) {
caches.open(cacheName).then(cache => {
cache.put(url, response)
});
});
}
});
});
Is there a way to get the cacheName from the service worker inside a script tag in a different file?
And yes, I know that I could simplify this greatly by doing the check in the for/of loops. I broke it apart so it would be easier to describe.
No.
JavaScript executing in the window context cannot access SW's context and vice versa. You have to implement a workaround of some sort.
Remember that you can use postMessage to communicate between the two.
Using this blog I was able to pass messages from the service worker and back. First, I added the following function at the top of sw.js:
function clientPostMessage(client, message){
return new Promise(function(resolve, reject){
var channel = new MessageChannel();
channel.port1.onmessage = function(event){
if(event.data.error){
reject(event.data.error);
}
else {
resolve(event.data);
}
};
client.postMessage(message, [channel.port2]);
});
}
This allows my service worker to post a message to the window, and then do a callback with a promise.
Then, in my index.html file I added the following to a script tag:
navigator.serviceWorker.addEventListener('message', event => {
switch(event.data) {
case "addAll":
var toCache = [];
var css = document.getElementsByTagName("link");
for(el of css) {
var href = el.getAttribute("href");
if(href) {
toCache.push(href);
}
}
var js = document.getElementsByTagName("script");
for(el of js) {
var src = el.getAttribute("src");
if(src) {
toCache.push(src);
}
}
event.ports[0].postMessage(toCache);
break;
default:
console.log(event.data);
}
});
This listens to any service workers asking for messages, and if it is a "addAll" message, it will get all the scripts and linked content on the page and return an array of the scripts.
Finally, I added the following to my activate event listener function in sw.js:
// Get all the clients, and for each post a message
clients.matchAll().then(clients => {
clients.forEach(client => {
// Post "addAll" to get a list of files to cache
clientPostMessage(client, "addAll").then(message => {
// For each file, check if it already exists in the cache
message.forEach(url => {
caches.match(url).then(result => {
// If there's nothing in the cache, fetch the file and cache it
if(!result) {
fetch(url).then(response => {
caches.open(cacheName).then(cache => {
cache.put(url, response);
});
});
}
})
});
});
})
});
For all clients the service worker sends an "addAll" message to the page and gets the result. For each item in the result, it checks if the value is already in the cache and if not, fetches and adds it.
With this method, the install listener of the service worker only needs to contain:
self.addEventListener('install', event => {
if(self.skipWaiting) {
self.skipWaiting();
}
event.waitUntil(
caches.open(cacheName).then(cache => {
return cache.addAll([
'/',
'/index.html',
])
})
);
});
It seems to be working well so far, if anyone has any suggestions or sees any errors I'd be happy to hear! You can also tell me how improper this is, but it makes my life a lot easier for adding service workers for pre-existing projects that rely on scripts that aren't bundled together.
I have a service worker installed in my website, everything works fine, except when I push an update to the cached files, in fact; they stay catched forever and I seem to be unable to invalidate the cache unless I unsubscribe the worker from the `chrome://serviceworker-internals/
const STATIC_CACHE_NAME = 'static-cache-v1';
const APP_CACHE_NAME = 'app-cache-#VERSION';
const CACHE_APP = [
'/',
'/app/app.js'
]
const CACHE_STATIC = [
'https://fonts.googleapis.com/css?family=Roboto:400,300,500,700',
'https://cdnjs.cloudflare.com/ajax/libs/normalize/4.1.1/normalize.min.css'
]
self.addEventListener('install',function(e){
e.waitUntil(
Promise.all([caches.open(STATIC_CACHE_NAME),caches.open(APP_CACHE_NAME)]).then(function(storage){
var static_cache = storage[0];
var app_cache = storage[1];
return Promise.all([static_cache.addAll(CACHE_STATIC),app_cache.addAll(CACHE_APP)]);
})
);
});
self.addEventListener('activate', function(e) {
e.waitUntil(
caches.keys().then(function(cacheNames) {
return Promise.all(
cacheNames.map(function(cacheName) {
if (cacheName !== APP_CACHE_NAME && cacheName !== STATIC_CACHE_NAME) {
console.log('deleting',cacheName);
return caches.delete(cacheName);
}
})
);
})
);
});
self.addEventListener('fetch',function(e){
const url = new URL(e.request.url);
if (url.hostname === 'static.mysite.co' || url.hostname === 'cdnjs.cloudflare.com' || url.hostname === 'fonts.googleapis.com'){
e.respondWith(
caches.match(e.request).then(function(response){
if (response) {
return response;
}
var fetchRequest = e.request.clone();
return fetch(fetchRequest).then(function(response) {
if (!response || response.status !== 200 || response.type !== 'basic') {
return response;
}
var responseToCache = response.clone();
caches.open(STATIC_CACHE_NAME).then(function(cache) {
cache.put(e.request, responseToCache);
});
return response;
});
})
);
} else if (CACHE_APP.indexOf(url.pathname) !== -1){
e.respondWith(caches.match(e.request));
}
});
where #VERSION is a version that is appended to the cache name at compile time; note that STATIC_CACHE_NAME never changes, as the files are thought to be static forever.
Also the behavior is erratic, I've been checking the delete function (the part where it logs) and it keeps logging about the deleting caches that have already been deleted (supposedly). when I run caches.keys().then(function(k){console.log(k)}) I get a whole bunch of old caches that should've been removed.
After googling and watching some videos on udacity, I found that the intended behavior of the worker is to stay until the page it controls is closed and reopened again when the new service worker can take control.
The solution was to force it to take control based on https://developer.mozilla.org/en-US/docs/Web/API/ServiceWorkerGlobalScope/skipWaiting the following solved the issue, even when it takes 2 reloads in order for the new worker to reflect the changes (that makes sense since the app is loaded before the new worker replaces the previous).
self.addEventListener('install',function(e){
e.waitUntil(
Promise.all([caches.open(STATIC_CACHE_NAME),caches.open(APP_CACHE_NAME),self.skipWaiting()]).then(function(storage){
var static_cache = storage[0];
var app_cache = storage[1];
return Promise.all([static_cache.addAll(CACHE_STATIC),app_cache.addAll(CACHE_APP)]);
})
);
});
self.addEventListener('activate', function(e) {
e.waitUntil(
Promise.all([
self.clients.claim(),
caches.keys().then(function(cacheNames) {
return Promise.all(
cacheNames.map(function(cacheName) {
if (cacheName !== APP_CACHE_NAME && cacheName !== STATIC_CACHE_NAME) {
console.log('deleting',cacheName);
return caches.delete(cacheName);
}
})
);
})
])
);
});
As a "service worker newbie" I encountered a related situation whereby a service worker wouldn't refresh even though the "JavaScript Console>Application>Update on reload" was enabled on Chrome Canary.
The problem was that I had working code in my /sw/index.js and then I introduced an error to /sw/index.js. When I introduced the error the browser refused to load the updated code and continued to load the earlier working service worker. When I corrected the code in index.js and refreshed the page the new code for the service worker appeared. I would have thought that the error filled code would throw an error, but it didn't. The browser just loaded the earlier error free version.
Scenario: You would like to know if TURN server is being used for a particular call and which one from the array of TURN servers you provided during PeerConnection creation, is being used. Right now there are two options:
Wireshark: But when you are behind a corporate proxy and TURN server is outside that, wireshark would show the Proxy IP as the destination.( also not the mention the inconvenience of running it in the background)
Going through the stats page and finding out, chrome --> chrome://webrtc-internals and Firefox --> about:webrtc
I would like to use a alternative to the above two, programmatically determine this so I do not have to leave my application page.
Update: I've updated the example to follow the latest spec, with maplike getStats.
The following approach follows the specification and currently only works in Firefox, because Chrome implements getStats() incorrectly at the moment. Hopefully, a version of the adapter.js polyfill should be available soon that will make this work in Chrome as well.
When you run this fiddle in Firefox, you'll see:
checking
connected
Does not use TURN
This is because the example provides both a STUN and a TURN server. But when I modify the config to use TURN only with iceTransportPolicy: "relay", I see:
checking
connected
Uses TURN server: 10.252.73.50
Note that the turn server I use is behind a VPN, so it won't work for you, but feel free to modify the fiddle with your own server (just don't save it unless you want the info to become public!)
While I haven't tested with more than one turn server, as you can see the IP address shown matches the turn server configured, so it should be possible to tell which server is used using this approach.
// Turn server is on Mozilla's VPN.
var cfg = { iceTransportPolicy: "all", // set to "relay" to force TURN.
iceServers: [{ urls: "stun:stun.l.google.com:19302" },
{ urls: "turn:10.252.73.50",
username:"webrtc", credential:"firefox" }] };
var pc1 = new RTCPeerConnection(cfg), pc2 = new RTCPeerConnection(cfg);
pc1.onicecandidate = e => pc2.addIceCandidate(e.candidate);
pc2.onicecandidate = e => pc1.addIceCandidate(e.candidate);
pc2.oniceconnectionstatechange = () => log(pc2.iceConnectionState);
pc2.onaddstream = e => v2.srcObject = e.stream;
var findSelected = stats =>
[...stats.values()].find(s => s.type == "candidate-pair" && s.selected);
var start = () => navigator.mediaDevices.getUserMedia({ video: true })
.then(stream => pc1.addStream(v1.srcObject = stream))
.then(() => pc1.createOffer()).then(d => pc1.setLocalDescription(d))
.then(() => pc2.setRemoteDescription(pc1.localDescription))
.then(() => pc2.createAnswer()).then(d => pc2.setLocalDescription(d))
.then(() => pc1.setRemoteDescription(pc2.localDescription))
.then(() => waitUntil(() => pc1.getStats().then(s => findSelected(s))))
.then(() => pc1.getStats())
.then(stats => {
var candidate = stats.get(findSelected(stats).localCandidateId);
if (candidate.candidateType == "relayed") {
log("Uses TURN server: " + candidate.ipAddress);
} else {
log("Does not use TURN (uses " + candidate.candidateType + ").");
}
})
.catch(log);
var waitUntil = f => Promise.resolve(f())
.then(done => done || wait(200).then(() => waitUntil(f)));
var wait = ms => new Promise(resolve => setTimeout(resolve, ms));
var log = msg => div.innerHTML += msg +"<br>";
var failed = e => log(e +", line "+ e.lineNumber);
<video id="v1" width="108" height="81" autoplay></video>
<video id="v2" width="108" height="81" autoplay></video><br>
<button onclick="start()">Start!</button><br><div id="div"></div>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
I wrote and tested the below piece of code, works in latest versions of both firefox and chrome, getConnectionDetails returns a promise which resolves to connection details:
function getConnectionDetails(peerConnection){
var connectionDetails = {}; // the final result object.
if(window.chrome){ // checking if chrome
var reqFields = [ 'googLocalAddress',
'googLocalCandidateType',
'googRemoteAddress',
'googRemoteCandidateType'
];
return new Promise(function(resolve, reject){
peerConnection.getStats(function(stats){
var filtered = stats.result().filter(function(e){return e.id.indexOf('Conn-audio')==0 && e.stat('googActiveConnection')=='true'})[0];
if(!filtered) return reject('Something is wrong...');
reqFields.forEach(function(e){connectionDetails[e.replace('goog', '')] = filtered.stat(e)});
resolve(connectionDetails);
});
});
}else{ // assuming it is firefox
return peerConnection.getStats(null).then(function(stats){
var selectedCandidatePair = stats[Object.keys(stats).filter(function(key){return stats[key].selected})[0]]
, localICE = stats[selectedCandidatePair.localCandidateId]
, remoteICE = stats[selectedCandidatePair.remoteCandidateId];
connectionDetails.LocalAddress = [localICE.ipAddress, localICE.portNumber].join(':');
connectionDetails.RemoteAddress = [remoteICE.ipAddress, remoteICE.portNumber].join(':');
connectionDetails.LocalCandidateType = localICE.candidateType;
connectionDetails.RemoteCandidateType = remoteICE.candidateType;
return connectionDetails;
});
}
}
I would like point out one thing, all these three methods fail in one scenario: two turn servers running from same machine on different ports, only reliable way I found was looking at the turn server logs.