How to fix “cracking” audio when using Audio Worklets? - javascript

I’m trying to understand how AudioWorklet is working and made some tests.
So far, I have a huge “cracking” problem when I let the browser play the sound in the background and do something else (e.g. opening a CPU-heavy application like Photoshop or VSCode and move the window around).
At first I thought it was a hardware problem. I upgraded to Catalina, removed any system audio extension I found, but it’s the same on Android, and some other friends’ computers (Mac, PC).
I’m using Version 1.0.1 Chromium: 78.0.3904.108 (Official Build) (64-bit) myself.
This YouTube video demonstrates the cracking audio issue.
I made two CodePen demos you can test here:
Web Audio Cracks (Vanila + no Worklet):
const ctx = new(window.AudioContext || window.webkitAudioContext)();
const request = new XMLHttpRequest();
const gainNode = ctx.createGain();
const sourceNode = ctx.createBufferSource();
request.open('GET', 'https://poppublic.s3.amazonaws.com/other/2.mp3', true);
request.responseType = 'arraybuffer';
request.onload = () => {
ctx.decodeAudioData(request.response, buffer => {
sourceNode.buffer = buffer;
console.log(sourceNode.buffer.sampleRate);
});
};
request.onerror = function(e) {
console.log('HTTP error', e);
};
request.send();
play = () => {
sourceNode.connect(gainNode);
gainNode.connect(ctx.destination);
sourceNode.start(0);
}
stop = () => {
sourceNode.stop(0);
}
<button onClick="play()">Play</button>
<button onClick="stop()">Stop</button>
Web Audio Cracks (Vanila + Worklet):
const ctx = new(window.AudioContext || window.webkitAudioContext)();
const request = new XMLHttpRequest();
let gainNode = null;
let sourceNode = null;
let buffer = null;
let worklet = null;
try {
const script = 'https://poppublic.s3.amazonaws.com/other/worklet/processor.js';
ctx.audioWorklet.addModule(script).then(() => {
worklet = new AudioWorkletNode(ctx, 'popscord-processor')
request.open('GET', 'https://poppublic.s3.amazonaws.com/other/2.mp3', true);
request.responseType = 'arraybuffer';
request.onload = () => {
ctx.decodeAudioData(request.response, buff => {
buffer = buff;
console.log(buff.sampleRate);
});
};
request.onerror = function(e) {
console.log('HTTP error', e);
};
request.send();
});
} catch (e) {
this.setState({
moduleLoaded: false
});
console.log('Failed to load module', e);
}
play = () => {
stop();
gainNode = ctx.createGain();
sourceNode = ctx.createBufferSource();
sourceNode.buffer = buffer;
sourceNode.connect(gainNode);
gainNode.connect(ctx.destination);
sourceNode.start(0);
}
stop = () => {
try {
sourceNode.disconnect();
gainNode.disconnect();
sourceNode.stop(0);
} catch (e) {
console.log(e.message)
}
}
<button onClick="play()">Play</button>
<button onClick="stop()">Stop</button>
The piano MP3 you’ll hear is a 48000Hz / 32bits / 320kb audio recorded in studio.
Before filing any bugs, I need to make sure my code is correct. Maybe I’m not chaining the things the way it should.

When using a worklet, the default priority of the audio thread is normal. This is not good for audio as you've seen by running webaudio and then moving a window around.
What you can do is go to chrome://flags, search for worklet and enable the flag named "Use realtime priority thread for Audio Worklet". This should help on mac and windows. I don't know if it will make a difference on Android.
If you are hearing cracks with WebAudio without a worklet, as you do in your first codepen example, then that's unexpected and you really should file an issue on that.

Related

javascript audio api play blob url

I worked out a testing function for Web Audio API to play url blob:
// trigger takes a sound play function
function loadSound(url, trigger) {
let context = new (window.AudioContext || window.webkitAudioContext)();
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
// Decode asynchronously
request.onload = function() {
context.decodeAudioData(request.response, function(buffer) {
trigger(()=>{
// play sound
var source = context.createBufferSource(); // creates a sound source
source.buffer = buffer; // tell the source which sound to play
source.connect(context.destination); // connect the source to the context's destination (the speakers)
source.start();
});
}, e=>{
console.log(e);
});
}
request.send();
}
loadSound(url, fc=>{
window.addEventListener('click', fc);
});
this is just for test, actually, I need a function to call to directly play the sound from url, if any current playing, quit it.
let ac;
function playSound(url) {
if(ac){ac.suspend()}
let context = new (window.AudioContext || window.webkitAudioContext)();
let request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
// Decode asynchronously
request.onload = function() {
context.decodeAudioData(request.response, function(buffer) {
// play sound
let source = context.createBufferSource(); // creates a sound source
source.buffer = buffer; // tell the source which sound to play
source.connect(context.destination); // connect the source to the context's destination (the speakers)
// source.noteOn(0); // play the source now
ac = context;
source.start();
}, e=>{
console.log(e);
});
}
request.send();
}
window.addEventListener('click',()=>{
playSound(url);
});
I did not do much modification, however, the second version, triggers works fine, but always produces no sound.
I suspect it may be this variable scope issue, I will be very glad if you can help me debug it.
since the blob url is too long, I put two versions in code pen.
working version
not working version
Instead of calling supsend on the stored AudioContext, save a reference to the AudioBufferSourceNode that is currently playing. Then check if the reference exits and call stop() whenever you play a new sound.
const context = new AudioContext();
let bufferSource = null;
function playSound(url) {
if (bufferSource !== null) {
bufferSource.stop();
bufferSource = null;
}
let request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = function() {
context.decodeAudioData(request.response, (buffer) => {
bufferSource = context.createBufferSource();
bufferSource.buffer = buffer;
bufferSource.connect(context.destination);
bufferSource.start();
bufferSource.addEventListener('ended', () => {
bufferSource = null;
});
}, (error) => {
console.log(error);
});
}
request.send();
}
window.addEventListener('click', () => {
playSound(url);
});

XMLHTTPrequest plain text to blob (video)

I achieved to get a video from php using this code :
var some_video_element = document.querySelector('video')
var req = new XMLHttpRequest();
req.onload = function () {
var blob_uri = URL.createObjectURL(this.response);
some_video_element.src = blob_uri;
some_video_element.addEventListener('oncanplaythrough', (e) => {
URL.revokeObjectURL(blob_uri);
});
};
req.open("get", "vid.php", true);
req.overrideMimeType('blob');
req.send(null);
However, the loading is long so I want to show data as soon as I get it. From Mozilia, it is indicated we can use plain or "" as mime to get the text in progress. However, I can't achieve to convert plain/text to video/mp4 using a blob. Currently this is the code that doesn't work. I try to get the video when some part is available while the rest is still downloading.
var some_video_element = document.querySelector('video')
var req = new XMLHttpRequest();
req.onprogress = function () {
var text = b64toBlob(Base64.encode(this.response), "video/mp4");
var blob_uri = URL.createObjectURL(text);
some_video_element.src = blob_uri;
some_video_element.addEventListener('oncanplaythrough', (e) => {
URL.revokeObjectURL(blob_uri);
});
};
req.onload = function () {
var text = b64toBlob(this.response, "video/mp4");
var blob_uri = URL.createObjectURL(text);
some_video_element.src = blob_uri;
some_video_element.addEventListener('oncanplaythrough', (e) => {
URL.revokeObjectURL(blob_uri);
});
};
req.open("get", "vid.php", true);
req.overrideMimeType('text\/plain');
req.send(null);
Thanks.
NB : This JavaScript is fetching for this php code : https://codesamplez.com/programming/php-html5-video-streaming-tutorial
But echo data has been changed by echo base64_encode(data);
If you use the Fetch API instead of XMLHttpRequest you can consume the response as a ReadableStream which can be fed into a SourceBuffer. This will allow the video to be playable as soon as it starts to load instead of waiting for the full file to download. This does not require any special video container formats, back-end processing or third-party libraries.
const vid = document.getElementById('vid');
const format = 'video/webm; codecs="vp8,vorbis"';
const mediaSource = new MediaSource();
let sourceBuffer = null;
mediaSource.addEventListener('sourceopen', event => {
sourceBuffer = mediaSource.addSourceBuffer(format);
fetch('https://bes.works/dev/samples/test.webm')
.then(response => process(response.body.getReader()))
.catch(err => console.error(err));
}); vid.src = URL.createObjectURL(mediaSource);
function process(stream) {
return new Response(
new ReadableStream({
start(controller) {
async function read() {
let { done, value } = await stream.read();
if (done) { controller.close(); return; }
sourceBuffer.appendBuffer(value);
sourceBuffer.addEventListener(
'updateend', event => read(),
{ once: true }
);
} read();
}
})
);
}
video { width: 300px; }
<video id="vid" controls></video>
As indicated in the comments, you are missing some decent components.
You can implement what you are asking for but you need to make some changes. Following up on the HTML5 streaming API you can create a stream that will make the video using segments you fetch from the server.
Something to keep in mind is the HLS or DASH protocol that already exists can help, looking at the HLS protocol can help as it's simple to use with the idea of segments that can reach out to your server and just decode your base64'd feed.
https://videojs.github.io/videojs-contrib-hls/

Web Audio API source.start() only plays the second time it is called (in safari)

I am learning how to use the Web Audio API and I am experiencing a problem that I am not sure how to resolve. I am not sure if it is because I am a beginner at this, or there is something going on that I don't understand.
Basically I am creating a function that plays a short mp3. It should http GET to load MP3 into the audio buffer the first time the audio is played. (Subsequently requests for the same audio don't need to re-fetch the mp3)
The code below works completely correctly in Chrome. However in Safari playAudio('filename') will not play audio the very first time that it is invoked. Every single time after that it works fine. Any expert advise from someone more experienced than me would be very much appreciated.
<script>
var context;
function init() {
try {
window.AudioContext = window.AudioContext || window.webkitAudioContext;
context = new AudioContext();
}
catch(e) {
alert("Your browser doesn't support Web Audio API");
}
}
window.addEventListener('load', init);
function loadAudio(w) {
var audioURL='https://biblicaltext.com/audio/' + w + '.mp3';
var request = new XMLHttpRequest();
request.open("GET", audioURL, true);
request.responseType = 'arraybuffer';
request.onload = function() {
//console.log("update", request.readyState, request.status)
if (request.readyState === 4) {
if (request.status === 200) {
//take the audio from http request and decode it in an audio buffer
context.decodeAudioData(request.response, function(buffer) {
if(buffer) {
console.log(w, buffer);
playAudioBuffer(w, buffer);
} else {
console.log("audio buffer decoding failed")
}
}, function(ec) {
console.log("http request buffer decoding failed")
});
} else {
console.log("get", audioURL, "failed", request.status)
}
}
}
request.send();
}
var audioBuffer;
var currentWord;
function playAudio(w) {
if (typeof audioBuffer === 'undefined' || audioBuffer == null || currentWord != w) {
audioBuffer = null
currentWord = ""
console.log("reqest load of different word", w)
loadAudio(w)
} else {
playAudioBuffer(w, audioBuffer)
}
}
function playAudioBuffer(w, buffer) {
audioBuffer = buffer
currentWord = w
var source = context.createBufferSource();
source.buffer = audioBuffer;
source.connect(context.destination);
source.start();
console.log('playing', currentWord);
}
</script>
Play ἦν
<br>
Play ὥστε
<br>
Play οὐρανός
<br>
Play υἱός
<br>
Play εἷς
<br>
Am I hitting some kind of weird situation where Safari is blocking sound because the clicking of the link is disconnected from the loading/playing of the audio by a fraction of a second?
It turns out the way to 'unlock' audio is to play a silent/hidden sound when the user first interacts with the page to make sure future requests for audio will function correctly.
https://paulbakaus.com/tutorials/html5/web-audio-on-ios/
Here is what I used:
window.addEventListener('touchstart', function() {
// create empty buffer
var buffer = myContext.createBuffer(1, 1, 22050);
var source = myContext.createBufferSource();
source.buffer = buffer;
// connect to output (your speakers)
source.connect(myContext.destination);
// play the file
source.noteOn(0);
}, false);
I realize I am probably not the first person to have asked this on SO, however the search keywords (safari play first time vs second time) don't turn up anything on SO, so I am inclined to answer and leave this up. If the community things deleting my noob question would be better then I'm happy to do that.

Decoding audio data of Mediarecorder is failed on chrome

I using MediaRecorder to record microphone. The default format of MediaRecorder in chrome is video/webm. Here is short example:
navigator.mediaDevices.getUserMedia({audio: true,video: false})
.then(function(stream) {
var recordedChunks = [];
var recorder = new MediaRecorder(stream);
recorder.start(10);
recorder.ondataavailable = function (event) {
if (event.data.size > 0) {
recordedChunks.push(event.data);
} else {
// ...
}
}
setTimeout(function(){
recorder.stop();
var blob = new Blob(recordedChunks, {
"type": recordedChunks[0].type
});
var blobUrl = URL.createObjectURL(blob);
var context = new AudioContext();
var request = new XMLHttpRequest();
request.open("GET", blobUrl, true);
request.responseType = "arraybuffer";
request.onload = function () {
context.decodeAudioData(
request.response,
function (buffer) {
if (!buffer) {
alert("buffer is empty!");
}
var dataArray = buffer.getChannelData(0);
//process channel data...
context.close();
},
function (error) {
alert(error);
}
);
};
request.send();
}, 3000);
})
.catch(function(error) {
console.log('error: ' + error);
});
This code is trow error of "Uncaught (in promise) DOMException: Unable to decode audio data" on context.decodeAudioData in chrome only.
What is wrong here and how can i fix it in chrome?
Here is working example in plunker: plunker
Ok... There is no fix for the issue. This is a chrome bug and you can see it here
The fix of decoding audio from MediaRecorder will be available on chrome version 58. I already tested it on 58-beta and it works.

Web Audio Api - Download edited MP3

I'm currently editing my mp3 file with multiple effects like so
var mainVerse = document.getElementById('audio1');
var s = source;
source.disconnect(audioCtx.destination);
for (var i in filters1) {
s.connect(filters1[i]);
s = filters1[i];
}
s.connect(audioCtx.destination);
The mp3 plays accordingly on the web with the filters on it. Is it possible to create and download a new mp3 file with these new effects, using web audio api or any writing to mp3 container javascript library ? If not whats the best to solve this on the web ?
UPDATE - Using OfflineAudioContext
Using the sample code from https://developer.mozilla.org/en-US/docs/Web/API/OfflineAudioContext/oncomplete
I've tried using the offline node like so;
var audioCtx = new AudioContext();
var offlineCtx = new OfflineAudioContext(2,44100*40,44100);
osource = offlineCtx.createBufferSource();
function getData() {
request = new XMLHttpRequest();
request.open('GET', 'Song1.mp3', true);
request.responseType = 'arraybuffer';
request.onload = function() {
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
myBuffer = buffer;
osource.buffer = myBuffer;
osource.connect(offlineCtx.destination);
osource.start();
//source.loop = true;
offlineCtx.startRendering().then(function(renderedBuffer) {
console.log('Rendering completed successfully');
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var song = audioCtx.createBufferSource();
song.buffer = renderedBuffer;
song.connect(audioCtx.destination);
song.start();
rec = new Recorder(song, {
workerPath: 'Recorderjs/recorderWorker.js'
});
rec.exportWAV(function(e){
rec.clear();
Recorder.forceDownload(e, "filename.wav");
});
}).catch(function(err) {
console.log('Rendering failed: ' + err);
// Note: The promise should reject when startRendering is called a second time on an OfflineAudioContext
});
});
}
request.send();
}
// Run getData to start the process off
getData();
Still getting the recorder to download an empty file, I'm using the song source as the source for the recorder. The song plays and everything with his code but recorder doesn't download it
Use https://github.com/mattdiamond/Recorderjs to record a .wav file. Then use https://github.com/akrennmair/libmp3lame-js to encode it to .mp3.
There's a nifty guide here, if you need a hand: http://audior.ec/blog/recording-mp3-using-only-html5-and-javascript-recordmp3-js/
UPDATE
Try moving
rec = new Recorder(song, {
workerPath: 'Recorderjs/recorderWorker.js'
});
so that it is located above the call to start rendering, and connect it to osource instead, like so:
rec = new Recorder(osource, {
workerPath: 'Recorderjs/recorderWorker.js'
});
osource.connect(offlineCtx.destination);
osource.start();
offlineCtx.startRendering().then(function(renderedBuffer) {
.....

Categories

Resources