I'm recording some audio in the browser and then want to loop it seamlessly, avoiding clicks etc when starting. This means fading it and out.
I can ramp the volume up and down once, but I can't find anyway to trigger Web Audio's 'ramp to value at time' every time the loop starts again.
Is there an easy way to do this? I've got 10 of these buffers looping so I'd like to avoid lots of costly setinterval checks if possible...
let source = audioContext.createBufferSource();
let gain = audioContext.createGain();
gain.gain.value = 0.01;
source.buffer = decodedData;
songLength = decodedData.duration;
source.loop = true;
source.connect(gain);
gain.connect(audioContext.destination);
source.start(0);
// fade in and out
gain.gain.exponentialRampToValueAtTime(0.2, audioContext.currentTime + 1);
gain.gain.exponentialRampToValueAtTime(0.01, audioContext.currentTime + songLength);
Consider listening to the ended event and re-trigger the playback:
class FadeInLoop {
ctx
audioBuffer
gainNode
isPlaying = true
constructor(ctx, url) {
this.ctx = ctx
this.audioBuffer = fetch(url)
.then(response => response.arrayBuffer())
.then(arrayBuffer => ctx.decodeAudioData(arrayBuffer))
this.gainNode = ctx.createGain()
this.gainNode.connect(ctx.destination)
}
async start() {
this.isPlaying = true
const source = ctx.createBufferSource()
this.source = source
source.addEventListener('ended', e => {
if (this.isPlaying) { // repeat unless stop() was called
this.start()
}
})
source.connect(this.gainNode)
source.buffer = await this.audioBuffer
const now = this.ctx.currentTime
this.gainNode.gain.setValueAtTime(Number.EPSILON, now);
this.gainNode.gain.exponentialRampToValueAtTime(1, now + 0.055)
source.start(0)
}
stop() {
this.isPlaying = false
this.source?.stop()
}
}
const ctx = new AudioContext({ latencyHint: 'interactive' })
const loop = new FadeInLoop(ctx, 'https://batman.dev/static/71474264/loop.mp3')
<button onclick="loop.start()">Start</button>
<button onclick="loop.stop()">Stop</button>
Related
I am drawing large images (~20 MB in size) on the HTML5 Canvas and creating a small thumbnail out of them. This is how I'm doing this:
const img = new Image();
img.src = '20mb-image.jpg';
img.onload = () => {
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
ctx.canvas.width = 240;
ctx.canvas.height = 240;
ctx.drawImage(img, 0, 0, 240, 240);
const base64 = encodeURIComponent(canvas.toDataURL('image/webp', 0.5));
// Do something with the base64
};
While doing this, the page hangs up for about 5 seconds before completely drawing the image on the canvas which is understandable because it is a very large image. So, I tried to find out if I could make use of web workers in this case. I found the function transferControlToOffscreen(), but it seems to have limited support and is even deemed as an experimental feature on MDN.
I was wondering if there was a different way of drawing large images on the canvas without hanging up the page.
Also, while writing this question, one solution I have thought of is to draw the image piecewise from an N x N grid.
createImageBitmap() is supposed to offer you this feature, however only Chrome seems to really do the decoding of the image in parallel...
This method will create an ImageBitmap, readily available in the GPU to be painted by the canvas. So once you get this, you can paint it on a canvas with almost no overhead.
It is somehow expected that to create an ImageBitmap from sources like a canvas, an HTML video element, an ImageData, an ImageBitmap, or even an HTML image, most of the process will be done synchronously, because the source's bitmap can actually change synchronously right after the call.
With a Blob source though, the source's bitmap won't change and browsers can make everything in parallel with no problem.
That's exactly what Chromium browsers do. Unfortunately Safari does everything synchronously, and Firefox does something quite weird where they apparently lock the UI thread but not the CPU one...
// ignite an rAF loop to avoid false signals from "instant refresh ticks"
const rafloop = () => requestAnimationFrame(rafloop);
rafloop();
// start after some time, when the page is well ready
setTimeout(doTest, 250);
const counters = {
tasks: 0,
paintingFrames: 0
};
let stopped = false;
const {port1, port2} = new MessageChannel();
const postTask = (cb) => {
port1.addEventListener("message", () => cb(), {once: true});
port1.start();
port2.postMessage("");
};
function startTaskCounter() {
postTask(check);
function check() {
counters.tasks++;
if (!stopped) postTask(check);
}
}
function startPaintingFrameCounter() {
requestAnimationFrame(check);
function check() {
counters.paintingFrames++;
if (!stopped) requestAnimationFrame(check);
}
}
async function doTest() {
const resp = await fetch("https://upload.wikimedia.org/wikipedia/commons/c/cf/Black_hole_-_Messier_87.jpg?r=" + Math.random());
const blob = await resp.blob();
startPaintingFrameCounter();
startTaskCounter();
const t1 = performance.now();
const bmp = await createImageBitmap(blob);
const t2 = performance.now();
console.log(`decoded in ${t2 - t1}ms`)
const ctx = document.createElement('canvas').getContext('2d');
ctx.drawImage(bmp, 0, 0);
const t3 = performance.now();
console.log(`Drawn in ${t3 - t2}ms`)
console.log(counters);
stopped = true;
}
However all hopes is not gone yet, since it seems that current browsers now all "support" this method from web-Workers, so we can actually generate this bitmap from one, and still use it in the main thread while waiting for better support of the OffscreenCanvas APIs.
Of course, Safari will not make our lives easy and we have to special handle it since it can't reuse a transferred bitmap. (Note that they won't even allow to us to fetch correctly from StackSnippets, but I can't do much about that).
const support_bitmap_transfer = testSupportBitmapTransfer();
const getImageBitmapAsync = async(url) => {
// to reuse the same worker every time, we store it as property of the function
const worker = getImageBitmapAsync.worker ??=
new Worker(URL.createObjectURL(new Blob([`
onmessage = async ({data: {url, canTransfer}, ports}) => {
try {
const resp = await fetch(url);
const blob = await resp.blob();
const bmp = await createImageBitmap(blob);
ports[0].postMessage(bmp, canTransfer ? [bmp] : []);
}
catch(err) {
setTimeout(() => { throw err });
}
};
`], {type: "text/javascript"})));
// we use a MessageChannel to build a "Promising" Worker
const {port1, port2} = new MessageChannel();
const canTransfer = await support_bitmap_transfer;
worker.postMessage({url, canTransfer}, [port2]);
return new Promise((res, rej) => {
port1.onmessage = ({data}) => res(data);
worker.onerror = (evt) => rej(evt.message);
});
};
// [demo only]
// ignite an rAF loop to avoid false signals from "instant refresh ticks"
const rafloop = () => requestAnimationFrame(rafloop);
rafloop();
// start after some time, when the page is well ready
setTimeout(() => doTest().catch(() => stopped = true), 250);
const counters = {
tasks: 0,
paintingFrames: 0
};
let stopped = false;
const {port1, port2} = new MessageChannel();
const postTask = (cb) => {
port1.addEventListener("message", () => cb(), { once: true });
port1.start();
port2.postMessage("");
};
function startTaskCounter() {
postTask(check);
function check() {
counters.tasks++;
if (!stopped) postTask(check);
}
}
function startPaintingFrameCounter() {
requestAnimationFrame(check);
function check() {
counters.paintingFrames++;
if (!stopped) requestAnimationFrame(check);
}
}
async function doTest() {
const url = "https://upload.wikimedia.org/wikipedia/commons/c/cf/Black_hole_-_Messier_87.jpg?r=" + Math.random();
startPaintingFrameCounter();
startTaskCounter();
const t1 = performance.now();
// Basically you'll only need this line
const bmp = await getImageBitmapAsync(url);
const t2 = performance.now();
console.log(`decoded in ${t2 - t1}ms`)
const ctx = document.createElement("canvas").getContext("2d");
ctx.drawImage(bmp, 0, 0);
const t3 = performance.now();
console.log(`Drawn in ${t3 - t2}ms`)
console.log(counters);
stopped = true;
}
// Safari doesn't support drawing back ImageBitmap's that have been transferred
// not transferring these is overkill for the other ones
// so we need to test for it.
// thanks once again Safari for doing things your way...
async function testSupportBitmapTransfer() {
const bmp = await createImageBitmap(new ImageData(5, 5));
const {port1, port2} = new MessageChannel();
const transferred = new Promise((res) => port2.onmessage = ({data}) => res(data));
port1.postMessage(bmp, [bmp]);
try{
document.createElement("canvas")
.getContext("2d")
.drawImage(await transferred);
return true;
}
catch(err) {
return false;
}
}
Or without all the measuring fluff and the Safari special handling:
const getImageBitmapAsync = async(url) => {
// to reuse the same worker every time, we store it as property of the function
const worker = getImageBitmapAsync.worker ??=
new Worker(URL.createObjectURL(new Blob([`
onmessage = async ({data: {url}, ports}) => {
try {
const resp = await fetch(url);
const blob = await resp.blob();
const bmp = await createImageBitmap(blob);
ports[0].postMessage(bmp, [bmp]);
}
catch(err) {
setTimeout(() => { throw err });
}
};
`], {type: "text/javascript"})));
// we use a MessageChannel to build a "Promising" Worker
const {port1, port2} = new MessageChannel();
worker.postMessage({url}, [port2]);
return new Promise((res, rej) => {
port1.onmessage = ({data}) => res(data);
worker.onerror = (evt) => rej(evt.message);
});
};
(async () => {
const url = "https://upload.wikimedia.org/wikipedia/commons/c/cf/Black_hole_-_Messier_87.jpg?r=" + Math.random();
const bmp = await getImageBitmapAsync(url);
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
ctx.drawImage(bmp, 0, 0, canvas.width, canvas.height);
})();
<canvas width=250 height=146></canvas>
But notice how just starting a Web Worker is an heavy operation in itself and how it may be a total overkill to use it only for one image. So be sure to reuse this Worker if you need to resize several images.
I have a web application of my own, which is based on the peerjs library (It is a video conference).
I'm trying to make a recording with 'MediaRecorder', but I'm facing a very unpleasant case.
The code for capturing my desktop stream is the following:
let chooseScreen = document.querySelector('.chooseScreenBtn')
chooseScreen.onclick = async () => {
let desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: true });
}
I then successfully apply my received desktopStream to videoElement in DOM:
const videoElement = doc.querySelector('.videoElement')
videoElement.srcObject = desktopStream
videoElement.muted = false;
videoElement.onloadedmetadata = ()=>{videoElement.play();}
For example, I get desktopStream on the page with an active conference where everyone hears and sees each other.
To check the video and audio in desktopStream I play some video on the video player on the desktop.
I can hear any audio from my desktop but audio from any participant cannot be heard.
Of course, when I put the desktopStream in MediaRecorder I get a video file with no sound from anyone except my desktop. Any ideas on how to solve it?
Chrome's MediaRecorder API can only output one track.
The createMediaStreamSource can take streams from desktop audio and microphone, by connecting both together into one object created by createMediaStreamDestination it gives you the ability to pipe this one stream into the MediaRecorder API.
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
// Create a couple of sources
const source1 = context.createMediaStreamSource(desktopStream);
const source2 = context.createMediaStreamSource(voiceStream);
const destination = context.createMediaStreamDestination();
const desktopGain = context.createGain();
const voiceGain = context.createGain();
desktopGain.gain.value = 0.7;
voiceGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
// Connect source2
source2.connect(voiceGain).connect(destination);
return destination.stream.getAudioTracks();
};
It is also possible to use two or more audio inputs + video input.
window.onload = () => {
const warningEl = document.getElementById('warning');
const videoElement = document.getElementById('videoElement');
const captureBtn = document.getElementById('captureBtn');
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const download = document.getElementById('download');
const audioToggle = document.getElementById('audioToggle');
const micAudioToggle = document.getElementById('micAudioToggle');
if('getDisplayMedia' in navigator.mediaDevices) warningEl.style.display = 'none';
let blobs;
let blob;
let rec;
let stream;
let voiceStream;
let desktopStream;
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
const destination = context.createMediaStreamDestination();
let hasDesktop = false;
let hasVoice = false;
if (desktopStream && desktopStream.getAudioTracks().length > 0) {
// If you don't want to share Audio from the desktop it should still work with just the voice.
const source1 = context.createMediaStreamSource(desktopStream);
const desktopGain = context.createGain();
desktopGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
hasDesktop = true;
}
if (voiceStream && voiceStream.getAudioTracks().length > 0) {
const source2 = context.createMediaStreamSource(voiceStream);
const voiceGain = context.createGain();
voiceGain.gain.value = 0.7;
source2.connect(voiceGain).connect(destination);
hasVoice = true;
}
return (hasDesktop || hasVoice) ? destination.stream.getAudioTracks() : [];
};
captureBtn.onclick = async () => {
download.style.display = 'none';
const audio = audioToggle.checked || false;
const mic = micAudioToggle.checked || false;
desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: audio });
if (mic === true) {
voiceStream = await navigator.mediaDevices.getUserMedia({ video: false, audio: mic });
}
const tracks = [
...desktopStream.getVideoTracks(),
...mergeAudioStreams(desktopStream, voiceStream)
];
console.log('Tracks to add to stream', tracks);
stream = new MediaStream(tracks);
console.log('Stream', stream)
videoElement.srcObject = stream;
videoElement.muted = true;
blobs = [];
rec = new MediaRecorder(stream, {mimeType: 'video/webm; codecs=vp8,opus'});
rec.ondataavailable = (e) => blobs.push(e.data);
rec.onstop = async () => {
blob = new Blob(blobs, {type: 'video/webm'});
let url = window.URL.createObjectURL(blob);
download.href = url;
download.download = 'test.webm';
download.style.display = 'block';
};
startBtn.disabled = false;
captureBtn.disabled = true;
audioToggle.disabled = true;
micAudioToggle.disabled = true;
};
startBtn.onclick = () => {
startBtn.disabled = true;
stopBtn.disabled = false;
rec.start();
};
stopBtn.onclick = () => {
captureBtn.disabled = false;
audioToggle.disabled = false;
micAudioToggle.disabled = false;
startBtn.disabled = true;
stopBtn.disabled = true;
rec.stop();
stream.getTracks().forEach(s=>s.stop())
videoElement.srcObject = null
stream = null;
};
};
Audio capture with getDisplayMedia is only fully supported with Chrome for Windows. Other platforms have a number of limitations:
there is no support for audio capture at all under Firefox or Safari;
on Chrome/Chromium for Linux and Mac OS, only the audio of a Chrome/Chromium tab can be captured, not the audio of a non-browser application window.
I'm trying to capture webcam video in the client side and sent frames to the server to process it. I'm newbie in JS and I'm having some problems.
I tried to use OpenCV.js to get the data but I didn't understand how to get it, in Python we can make
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
and the frame is an 2d-array with the image, but how can I get each frame (as 2d array) to send using OpenCV.js?
I have this code on the client side:
<script type="text/javascript">
function onOpenCvReady() {
cv['onRuntimeInitialized'] = () => {
var socket = io('http://localhost:5000');
socket.on('connect', function () {
console.log("Connected...!", socket.connected)
});
const video = document.querySelector("#videoElement");
video.width = 500;
video.height = 400;
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
video.srcObject = stream;
video.play();
})
.catch(function (err0r) {
console.log(err0r)
console.log("Something went wrong!");
});
}
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let cap = new cv.VideoCapture(video);
const FPS = 15;
function processVideo() {
let begin = Date.now();
cap.read(src);
handle_socket(src['data']);
// schedule next one.
let delay = 1000 / FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
}
// schedule first one.
setTimeout(processVideo, 0);
function handle_socket(src) {
socket.emit('event', { info: 'I\'m connected!', data: src });
}
}
}
</script>
My solution was:
// Select HTML video element where the webcam data is
const video = document.querySelector("#videoElement");
// returns a frame encoded in base64
const getFrame = () => {
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
const data = canvas.toDataURL('image/jpeg');
return data;
}
// Send data over socket
function handle_socket(data, dst) {
socket.emit('event', data, function (res) {
if (res !== 0) {
results = res;
}
});
}
frame64 = getFrame()
handle_socket(frame64);
I have following AudioContext() sound object in JavaScript.
Its volume is 100%. I want to play its volume in 10% (where volume = 0.1).
How can I reduce its volume to 10%?
const aCtx = new AudioContext();
let source = aCtx.createBufferSource();
let buf;
fetch('https://dl.dropboxusercontent.com/s/knpo4d2yooe2u4h/tank_driven.wav') // can be XHR as well
.then(resp => resp.arrayBuffer())
.then(buf => aCtx.decodeAudioData(buf)) // can be callback as well
.then(decoded => {
source.buffer = buf = decoded;
source.loop = true;
source.connect(aCtx.destination);
check.disabled = false;
});
check.onchange = e => {
if (check.checked) {
source.start(0); // start our bufferSource
} else {
source.stop(0); // this destroys the buffer source
source = aCtx.createBufferSource(); // so we need to create a new one
source.buffer = buf;
source.loop = true;
source.connect(aCtx.destination);
}
};
<label>Start Playing</label>
<input type="checkbox" id="check" disabled><br>
<br>Its volume is 100%. Please help me to reduce it to 10%.
We use GainNodes to control the volume.
var gainNode = aCtx.createGain()
gainNode.gain.value = 0.1 // 10 %
gainNode.connect(aCtx.destination)
// now instead of connecting to aCtx.destination, connect to the gainNode
source.connect(gainNode)
solution
const aCtx = new AudioContext();
const gainNode = aCtx.createGain();
gainNode.gain.value = 0.1; // setting it to 10%
gainNode.connect(aCtx.destination);
let source = aCtx.createBufferSource();
let buf;
fetch('https://dl.dropboxusercontent.com/s/knpo4d2yooe2u4h/tank_driven.wav') // can be XHR as well
.then(resp => resp.arrayBuffer())
.then(buf => aCtx.decodeAudioData(buf)) // can be callback as well
.then(decoded => {
source.buffer = buf = decoded;
source.loop = true;
source.connect(gainNode);
check.disabled = false;
});
check.onchange = e => {
if (check.checked) {
source.start(0); // start our bufferSource
} else {
source.stop(0); // this destroys the buffer source
source = aCtx.createBufferSource(); // so we need to create a new one
source.buffer = buf;
source.loop = true;
source.connect(gainNode);
}
};
<label>Start Playing</label>
<input type="checkbox" id="check" disabled><br>
<br>Its volume is 100%. Please help me to reduce it to 10%.
You can use createGain of AudioContext for that puporse.
As shown below,
For more information checkout on createGain
https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/createGain
const aCtx = new AudioContext();
let source = aCtx.createBufferSource();
let buf;
var gainNode = aCtx.createGain(); // Create a gainNode reference.
gainNode.connect(aCtx.destination); // Add context to gainNode
fetch('https://dl.dropboxusercontent.com/s/knpo4d2yooe2u4h/tank_driven.wav') // can be XHR as well
.then(resp => resp.arrayBuffer())
.then(buf => aCtx.decodeAudioData(buf)) // can be callback as well
.then(decoded => {
source.buffer = buf = decoded;
source.loop = true;
source.connect(gainNode); //Connecting gain to source
gainNode.gain.value = 1; // 100% VOLUME RANGE OF VALUE IS 0-1
check.disabled = false;
});
check.onchange = e => {
if (check.checked) {
source.start(0); // start our bufferSource
} else {
source.stop(0); // this destroys the buffer source
source = aCtx.createBufferSource(); // so we need to create a new one
source.buffer = buf;
source.loop = true;
source.connect(gainNode); //Connecting gain to source
gainNode.gain.value = 0.1; // 10% VOLUME RANGE OF VALUE IS 0-1
}
};
I'm using the $timeout angular function to call tick() each 512 ms in order to play datas which are in my audio queue. I'm using this to perform a live audio stream. Sometimes there are some cuts in the sounds and I really need to maintain a delta of one second between emitting and receiving sound. So I want to delete some audio datas in my queue corresponding to the duration of each cuts.
Do you know if there is a way to listen to those cuts on the audioContext.destination like :
audioContext.destination.oncuts = function(duration) {
audioQueue.read(duration);
});
Here is my tick and audioQueue functions :
var tick = function() {
$scope.soundclock = Date.now();
$timeout(tick, $scope.tickInterval);
if(startStream && isFocused) {
if(isChrome === true || isOpera === true || isIE === true || isFirefox === true) {
if(audioQueue.length()>=size) {
float32 = audioQueue.read(size);
source = audioContext.createBufferSource();
audioBuffer = audioContext.createBuffer(1, size, sampleRate);
data = audioBuffer.getChannelData(0);
for(var i=0; i<size;i++) {
data[i] = float32[i];
}
source.buffer = audioBuffer;
source.connect(audioContext.destination);
source.start(0);
}
}
if(isSafari === true) {
if(audioQueue.length()>=size) {
float32 = audioQueue.read(size);
source = audioContext.createBufferSource();
audioBuffer = audioContext.createBuffer(1, size, sampleRate);
data = audioBuffer.getChannelData(0);
for(var j=0; j<size;j++) {
data[j] = float32[j];
}
source.buffer = audioBuffer;
source.connect(audioContext.destination);
source.noteOn(0);
}
}
}
};
var audioQueue = {
buffer: new Float32Array(0),
write: function(newAudio){
currentQLength = this.buffer.length;
newBuffer = new Float32Array(currentQLength+newAudio.length);
d = Date.now() - date;
console.log('Queued '+newBuffer.length+' samples. ');
date = Date.now();
newBuffer.set(this.buffer, 0);
newBuffer.set(newAudio, currentQLength);
this.buffer = newBuffer;
},
read: function(nSamples){
samplesToPlay = this.buffer.subarray(0, nSamples);
this.buffer = this.buffer.subarray(nSamples, this.buffer.length);
console.log('Queue at '+this.buffer.length+' samples. ');
return samplesToPlay;
},
length: function(){
return this.buffer.length;
}
};
You need to not rely on Javascript timers (which are, for audio purposes, horribly inaccurate) and schedule your ticks ahead of time. Check out http://www.html5rocks.com/en/tutorials/audio/scheduling/, which I wrote a while ago about scheduling timers.