I have a dummy audio track which is defined as follows:
let silence = () => {
let ctx = new AudioContext(), oscillator = ctx.createOscillator();
let dst = oscillator.connect(ctx.createMediaStreamDestination());
oscillator.start();
return Object.assign(dst.stream.getAudioTracks()[0], {enabled: false});
}
The problem is, whenever I try to disable the corresponding WebRTC audio track in Senders, the audio keeps 'playing' on the other end, even though the reported status of the track, i.e. of its 'enabled' flag is set to 'false'.
Any callbacks that need to be supported by the 'virtual' track ? So that I turn off the oscillator etc?
Either I'm doing something terribly wrong or it's not cool that such 'security' would need to be supported from the within.
Ideas?
Maybe you can try following code, it seems like that the enabled flag set to false is not enough:
let silence = () => {
let ctx = new AudioContext(), oscillator = ctx.createOscillator();
let dst = oscillator.connect(ctx.createMediaStreamDestination());
oscillator.start();
let track = Object.assign(dst.stream.getAudioTracks()[0], {enabled: false});
return {
track,
stop: function() {
oscillator.stop();
oscillator.disconnect();
}
};
};
To implement your own custom stopping mechanism you can use something like that:
let silence = () => {
let ctx = new AudioContext(), oscillator = ctx.createOscillator();
let dst = oscillator.connect(ctx.createMediaStreamDestination());
oscillator.start();
let track = Object.assign(dst.stream.getAudioTracks()[0], {enabled: false});
let trackPlaying = true;
return {
track,
setEnabled: function(enabled) {
trackPlaying = enabled;
if (!trackPlaying) {
oscillator.stop();
oscillator.disconnect();
}
}
};
};
Related
I have a couple issues - the app is a vanilla javascript app for school that maps audio to each alphabetical key. The issues are as follows:
It takes quite a while to load the audio - the app proceeds like normal but with no audio - it usually can run as intended after a few minutes or a few refreshes, but I do want to get rid of that period. I tried to fix that using (document.readyState === "interactive") and if (allAudio.entries(audio => (audio.readyState === 4)))
It doesn't take the first key down event to change the intro slides, it takes the second - but I'm also not sure how to fix this. The slides are in an array that goes through each item and then makes them display as none. I also tried to add the event listener for the keyboard earlier, but to no avail.
To look at the bugs yourself, live link is here: https://haeuncreative.github.io/mosatic/
relevant code:
if (document.readyState === "interactive") {
const allAudio = document.querySelectorAll("audio")
console.log(allAudio)
if (allAudio.entries(audio => (audio.readyState === 4)))
window.addEventListener('load', function() {
const keysDown = new KeyDownHandler()
const canvas = document.querySelector('canvas');
const context = canvas.getContext('2d');
context.fillStyle = '#967bb6'
context.fillRect(0, 0, 1000, 562.5)
const clickDown = new ClickHandler(keysDown)
}
);
}
export default class KeyDownHandler {
constructor() {
this.addPressListener()
// audio
this.soundBank = new AudioBank
this.soundBank.createBank(CONSTANTS.KEY_ALPHABET)
// visual // intro
this.intro1 = document.querySelector('#intro1')
this.intro2a = document.querySelector('#intro2a')
this.intro2b = document.querySelector('#intro2b')
this.intro2c = document.querySelector('#intro2c')
this.intro3 = document.querySelector('#intro3')
this.intro4 = document.querySelector('#intro4')
this.introBank = [
this.intro1,
this.intro2a,
this.intro2b,
this.intro2c,
this.intro3,
this.intro4
]
// visual // main
this.body = document.querySelector('body')
this.canvas = document.querySelector('canvas');
this.context = this.canvas.getContext('2d');
this.background_colors = ["#95c88c", "#967bb6", "#A7C7E7", "#FF6961"]
this.aniBank = new AniBank;
// recording/user interaction
this.keys = [];
this.durations = [];
this.recording = false;
}
introSwitch() {
this.soundBank.playSpace()
if (this.currentSlide) {
this.currentSlide.style.animation = "fadeOut 1s"
this.currentSlide.style.display = "none"
this.currentSlide = ""
}
if (!this.introBank.length) {
slide.style.display = "none"
this.introFinish = true
}
if (this.introBank.length) {
let slide = (this.introBank.shift())
slide.style.filter = "brightness(60%)"
console.log(slide)
if (slide === this.intro4 || !slide) {
this.canvas.style.display = "flex";
this.addKeyListeners()
}
if (slide.style.display = "none") {
slide.style.display = "flex"
slide.style.filter = "brightness(60%)"
}
this.currentSlide = slide;
slide.style.filter = "brightness(100%)"
}
}
addPressListener() {
window.addEventListener('keypress', e => {
e.preventDefault()
e.stopImmediatePropagation()
this.introSwitch()
if (this.currentSlide === this.intro4) {
this.currentSlide.style.display = "none"
window.removeEventListener("keypress", this.introSwitch)
this.addKeyListeners()
}
})
}`
It takes quite a while to load the audio - the app proceeds like normal but with no audio - it usually can run as intended after a few minutes or a few refreshes, but I do want to get rid of that period. I tried to fix that using (document.readyState === "interactive") and if (allAudio.entries(audio => (audio.readyState === 4)))
It doesn't take the first key down event to change the intro slides, it takes the second - but I'm also not sure how to fix this. The slides are in an array that goes through each item and then makes them display as none. I also tried to add the event listener for the keyboard earlier, but to no avail.
Introduction
Greetings, I have looked at some similar questions that are on this platform and none of them seem to match my problem or maybe I missed something but I will try my best to give it a try because I am new in this platform.
OverView
This audio app maps the keyboard keys to audio samples.
Problem
There is a audio delay when pressing the keys rapidly and after each press the sample sound keeps playing for 4 to 6 second before it calls another sound and doesn't kill the sound immediately after the keyboard button is released.
Code
This is what I tried:
const dump = console.log.bind(console);
const sample = new Object
({
drum: "/samples/drums/trance01/BD_Trance.wav",
clap: "/samples/drums/trance01/Clap_trance.wav",
});
const keymap = new Object
({
"KeyD": "drum",
"KeyC": "clap",
});
Object.keys(sample).forEach((smpl)=>
{
let node = document.createElement("audio");
node.id = (smpl+"Sample");
node.className = "instrument";
node.src = sample[smpl];
document.body.appendChild(node);
});
document.body.addEventListener("keydown", function keyListener(event)
{
event.preventDefault(); // kill it
event.stopPropagation(); // seal it's ashes in a capsule
event.stopImmediatePropagation(); // and hurl it into the sun!
let key = event.code;
// console.log("pressed: "+key);
let tgt = keymap[key];
if (!tgt){ dump(key+" - is unused"); return };
var intervalID = setInterval(myCallback, 500, 'Parameter 1', 'Parameter 2');
function myCallback(a, b)
{
let nde = document.getElementById(tgt+"Sample");
nde.play();
dump("play: "+tgt);
}
});
Following what happens in the logic and how elements respond in their own way (and time) is important for analysing what the issue may be.
In this case I believe this can be solved by cloning the source-node, not expecting it to play multiple instances of itself by itself (or that is what I believe should happen) - but we can force it to:
const dump = console.log.bind(console);
const sample = new Object
({
drum: "/samples/drums/trance01/BD_Trance.wav",
clap: "/samples/drums/trance01/Clap_trance.wav",
});
const keymap = new Object
({
"KeyD": "drum",
});
(Object.keys(sample)).forEach((item,indx)=>
{
let node = document.createElement("audio");
node.id = (item+"Sample");
node.className = "instrument";
node.src = sample[item];
document.body.appendChild(node);
});
function keyHandler(event)
{
if (event.ctrlKey){ return }; // whew .. that was annoying as f*ck
event.preventDefault(); // kill it
event.stopPropagation(); // seal it's ashes in a capsule
event.stopImmediatePropagation(); // and hurl it into the sun!
let key = event.code;
let tag = keymap[key];
let nde,tgt;
tgt = document.getElementById(tag+"Sample");
if (!tgt){ dump(key+" - is unused"); return };
nde = tgt.cloneNode(true);
nde.id = (tgt.id+"Clone");
nde.addEventListener("ended",function()
{
this.parentNode.removeChild(this);
});
document.body.appendChild(nde);
nde.play();
dump("playing: "+tgt);
}
document.body.addEventListener("keydown", keyHandler);
I would like to get webcam input as a ReadableStream in the browser to pipe to a WritableStream. I have tried using the MediaRecorder API, but that stream is chunked into separate blobs while I would like one continuous stream. I'm thinking the solution might be to pipe the MediaRecorder chunks to a unified buffer and read from that as a continuous stream, but I'm not sure how to get that intermediate buffer working.
mediaRecorder = new MediaRecorder(stream, recorderOptions);
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.start(1000);
async function handleDataAvailable(event) {
if (event.data.size > 0) {
const data: Blob = event.data;
// I think I need to pipe to an intermediate stream? Not sure how tho
data.stream().pipeTo(writable);
}
}
Currently we can't really access the raw data of the MediaStream, the closest we have for video is the MediaRecorder API but this will encode the data and works by chunks not as a stream.
However, there is a new MediaCapture Transform W3C group working on a MediaStreamTrackProcessor interface doing exactly what you want and which is already available in Chrome under the chrome://flags/#enable-experimental-web-platform-features flag.
When reading the resulting stream and depending on which kind of track you passed, you'll gain access to VideoFrames or AudioFrames which are being added by the new WebCodecs API.
if( window.MediaStreamTrackProcessor ) {
const track = getCanvasTrack();
const processor = new MediaStreamTrackProcessor( track );
const reader = processor.readable.getReader();
readChunk();
function readChunk() {
reader.read().then( ({ done, value }) => {
// value is a VideoFrame
// we can read the data in each of its planes into an ArrayBufferView
const channels = value.planes.map( (plane) => {
const arr = new Uint8Array(plane.length);
plane.readInto(arr);
return arr;
});
value.close(); // close the VideoFrame when we're done with it
log.textContent = "planes data (15 first values):\n" +
channels.map( (arr) => JSON.stringify( [...arr.subarray(0,15)] ) ).join("\n");
if( !done ) {
readChunk();
}
});
}
}
else {
console.error("your browser doesn't support this API yet");
}
function getCanvasTrack() {
// just some noise...
const canvas = document.getElementById("canvas");
const ctx = canvas.getContext("2d");
const img = new ImageData(300, 150);
const data = new Uint32Array(img.data.buffer);
const track = canvas.captureStream().getVideoTracks()[0];
anim();
return track;
function anim() {
for( let i=0; i<data.length;i++ ) {
data[i] = Math.random() * 0xFFFFFF + 0xFF000000;
}
ctx.putImageData(img, 0, 0);
if( track.readyState === "live" ) {
requestAnimationFrame(anim);
}
}
}
<pre id="log"></pre>
<p>
Source<br>
<canvas id="canvas"></canvas>
</p>
I am in the process of replacing RecordRTC with the built in MediaRecorder for recording audio in Chrome. The recorded audio is then played in the program with audio api. I am having trouble getting the audio.duration property to work. It says
If the video (audio) is streamed and has no predefined length, "Inf" (Infinity) is returned.
With RecordRTC, I had to use ffmpeg_asm.js to convert the audio from wav to ogg. My guess is somewhere in the process RecordRTC sets the predefined audio length. Is there any way to set the predefined length using MediaRecorder?
This is a chrome bug.
FF does expose the duration of the recorded media, and if you do set the currentTimeof the recorded media to more than its actual duration, then the property is available in chrome...
var recorder,
chunks = [],
ctx = new AudioContext(),
aud = document.getElementById('aud');
function exportAudio() {
var blob = new Blob(chunks);
aud.src = URL.createObjectURL(new Blob(chunks));
aud.onloadedmetadata = function() {
// it should already be available here
log.textContent = ' duration: ' + aud.duration;
// handle chrome's bug
if (aud.duration === Infinity) {
// set it to bigger than the actual duration
aud.currentTime = 1e101;
aud.ontimeupdate = function() {
this.ontimeupdate = () => {
return;
}
log.textContent += ' after workaround: ' + aud.duration;
aud.currentTime = 0;
}
}
}
}
function getData() {
var request = new XMLHttpRequest();
request.open('GET', 'https://upload.wikimedia.org/wikipedia/commons/4/4b/011229beowulf_grendel.ogg', true);
request.responseType = 'arraybuffer';
request.onload = decodeAudio;
request.send();
}
function decodeAudio(evt) {
var audioData = this.response;
ctx.decodeAudioData(audioData, startRecording);
}
function startRecording(buffer) {
var source = ctx.createBufferSource();
source.buffer = buffer;
var dest = ctx.createMediaStreamDestination();
source.connect(dest);
recorder = new MediaRecorder(dest.stream);
recorder.ondataavailable = saveChunks;
recorder.onstop = exportAudio;
source.start(0);
recorder.start();
log.innerHTML = 'recording...'
// record only 5 seconds
setTimeout(function() {
recorder.stop();
}, 5000);
}
function saveChunks(evt) {
if (evt.data.size > 0) {
chunks.push(evt.data);
}
}
// we need user-activation
document.getElementById('button').onclick = function(evt){
getData();
this.remove();
}
<button id="button">start</button>
<audio id="aud" controls></audio><span id="log"></span>
So the advice here would be to star the bug report so that chromium's team takes some time to fix it, even if this workaround can do the trick...
Thanks to #Kaiido for identifying bug and offering the working fix.
I prepared an npm package called get-blob-duration that you can install to get a nice Promise-wrapped function to do the dirty work.
Usage is as follows:
// Returns Promise<Number>
getBlobDuration(blob).then(function(duration) {
console.log(duration + ' seconds');
});
Or ECMAScript 6:
// yada yada async
const duration = await getBlobDuration(blob)
console.log(duration + ' seconds')
A bug in Chrome, detected in 2016, but still open today (March 2019), is the root cause behind this behavior. Under certain scenarios audioElement.duration will return Infinity.
Chrome Bug information here and here
The following code provides a workaround to avoid the bug.
Usage : Create your audioElement, and call this function a single time, providing a reference of your audioElement. When the returned promise resolves, the audioElement.duration property should contain the right value. ( It also fixes the same problem with videoElements )
/**
* calculateMediaDuration()
* Force media element duration calculation.
* Returns a promise, that resolves when duration is calculated
**/
function calculateMediaDuration(media){
return new Promise( (resolve,reject)=>{
media.onloadedmetadata = function(){
// set the mediaElement.currentTime to a high value beyond its real duration
media.currentTime = Number.MAX_SAFE_INTEGER;
// listen to time position change
media.ontimeupdate = function(){
media.ontimeupdate = function(){};
// setting player currentTime back to 0 can be buggy too, set it first to .1 sec
media.currentTime = 0.1;
media.currentTime = 0;
// media.duration should now have its correct value, return it...
resolve(media.duration);
}
}
});
}
// USAGE EXAMPLE :
calculateMediaDuration( yourAudioElement ).then( ()=>{
console.log( yourAudioElement.duration )
});
Thanks #colxi for the actual solution, I've added some validation steps (As the solution was working fine but had problems with long audio files).
It took me like 4 hours to get it to work with long audio files turns out validation was the fix
function fixInfinity(media) {
return new Promise((resolve, reject) => {
//Wait for media to load metadata
media.onloadedmetadata = () => {
//Changes the current time to update ontimeupdate
media.currentTime = Number.MAX_SAFE_INTEGER;
//Check if its infinite NaN or undefined
if (ifNull(media)) {
media.ontimeupdate = () => {
//If it is not null resolve the promise and send the duration
if (!ifNull(media)) {
//If it is not null resolve the promise and send the duration
resolve(media.duration);
}
//Check if its infinite NaN or undefined //The second ontime update is a fallback if the first one fails
media.ontimeupdate = () => {
if (!ifNull(media)) {
resolve(media.duration);
}
};
};
} else {
//If media duration was never infinity return it
resolve(media.duration);
}
};
});
}
//Check if null
function ifNull(media) {
if (media.duration === Infinity || media.duration === NaN || media.duration === undefined) {
return true;
} else {
return false;
}
}
//USAGE EXAMPLE
//Get audio player on html
const AudioPlayer = document.getElementById('audio');
const getInfinity = async () => {
//Await for promise
await fixInfinity(AudioPlayer).then(val => {
//Reset audio current time
AudioPlayer.currentTime = 0;
//Log duration
console.log(val)
})
}
I wrapped the webm-duration-fix package to solve the webm length problem, which can be used in nodejs and web browsers to support video files over 2GB with not too much memory usage.
Usage is as follows:
import fixWebmDuration from 'webm-duration-fix';
const mimeType = 'video/webm\;codecs=vp9';
const blobSlice: BlobPart[] = [];
mediaRecorder = new MediaRecorder(stream, {
mimeType
});
mediaRecorder.ondataavailable = (event: BlobEvent) => {
blobSlice.push(event.data);
}
mediaRecorder.onstop = async () => {
// fix blob, support fix webm file larger than 2GB
const fixBlob = await fixWebmDuration(new Blob([...blobSlice], { type: mimeType }));
// to write locally, it is recommended to use fs.createWriteStream to reduce memory usage
const fileWriteStream = fs.createWriteStream(inputPath);
const blobReadstream = fixBlob.stream();
const blobReader = blobReadstream.getReader();
while (true) {
let { done, value } = await blobReader.read();
if (done) {
console.log('write done.');
fileWriteStream.close();
break;
}
fileWriteStream.write(value);
value = null;
}
blobSlice = [];
};
//If you want to modify the video file completely, you can use this package "webmFixDuration", Other methods are applied at the display level only on the video tag With this method, the complete video file is modified
webmFixDuration github example
mediaRecorder.onstop = async () => {
const duration = Date.now() - startTime;
const buggyBlob = new Blob(mediaParts, { type: 'video/webm' });
const fixedBlob = await webmFixDuration(buggyBlob, duration);
displayResult(fixedBlob);
};
I am trying to integrate some panning effects to some sounds in a small testing app. It works fine except for one important issue: each sound only plays once!
I have tried several ways to attempt to bypass that issue without any success. The thing is, I can't pinpoint where the problem comes from. Here is my code, and a little explanation bellow.
const audio = new Audio('audio/background.mp3');
const footstep = new Audio('audio/footstep1.mp3');
const bumpWall1 = new Audio(`audio/bump-wall1.mp3`);
const bumpWall2 = new Audio(`audio/bump-wall2.mp3`);
const bumpWall3 = new Audio(`audio/bump-wall3.mp3`);
const bumpWall4 = new Audio(`audio/bump-wall4.mp3`);
const bumpWallArray = [bumpWall1, bumpWall2, bumpWall3, bumpWall4];
audio.volume = 0.5;
function play(sound, dir) {
let audioContext = new AudioContext();
let pre = document.querySelector('pre');
let myScript = document.querySelector('script');
let source = audioContext.createMediaElementSource(sound);
let panNode = audioContext.createStereoPanner();
source.connect(panNode);
panNode.connect(audioContext.destination);
if (dir === "left") {
panNode.pan.value = -0.8
} else if (dir === "right") {
panNode.pan.value = 0.8;
} else {
panNode.pan.value = 0;
}
sound.play();
}
So basically, when you call the play() function it plays the sound either on the left, the right, or the middle. But each sound is only played once. For example, if the footstep was played one time, it is never played again if I call the play() function on it.
Can anyone help me with that?
In your developer console, you should have a message stating something along the lines of
Uncaught InvalidStateError: Failed to execute 'createMediaElementSource' on 'AudioContext': HTMLMediaElement already connected previously to a different MediaElementSourceNode.
(At least in Chrome,) you can't connect a MediaElement several times to a MediaElementSourceNode.
To avoid this, you would have to disconnect this MediaElement from the MediaElementSourceNode, but this isn't possible...
The best in your situation is probably to use directly AudioBuffers rather than HTMLAudioElements, moreover if you don't append them in the doc.
let audio;
const sel = document.getElementById( 'sel' );
// create a single AudioContext, these are not small objects
const audioContext = new AudioContext();
fetch( 'https://dl.dropboxusercontent.com/s/agepbh2agnduknz/camera.mp3' ).then( resp => resp.arrayBuffer() )
.then( buf => audioContext.decodeAudioData( buf ) )
.then( audioBuffer => audio = audioBuffer )
.then( () => sel.disabled = false )
.catch( console.error );
function play(sound, dir) {
let source = audioContext.createBufferSource();
source.buffer = sound;
let panNode = audioContext.createStereoPanner();
source.connect( panNode );
panNode.connect( audioContext.destination );
if (dir === "left") {
panNode.pan.value = -0.8
} else if (dir === "right") {
panNode.pan.value = 0.8;
} else {
panNode.pan.value = 0;
}
source.start( 0 );
}
sel.onchange = evt => play( audio, sel.value );
<select id="sel" disabled>
<option>left</option>
<option>center</option>
<option>right</option>
</select>