I have following AudioContext() sound object in JavaScript.
Its volume is 100%. I want to play its volume in 10% (where volume = 0.1).
How can I reduce its volume to 10%?
const aCtx = new AudioContext();
let source = aCtx.createBufferSource();
let buf;
fetch('https://dl.dropboxusercontent.com/s/knpo4d2yooe2u4h/tank_driven.wav') // can be XHR as well
.then(resp => resp.arrayBuffer())
.then(buf => aCtx.decodeAudioData(buf)) // can be callback as well
.then(decoded => {
source.buffer = buf = decoded;
source.loop = true;
source.connect(aCtx.destination);
check.disabled = false;
});
check.onchange = e => {
if (check.checked) {
source.start(0); // start our bufferSource
} else {
source.stop(0); // this destroys the buffer source
source = aCtx.createBufferSource(); // so we need to create a new one
source.buffer = buf;
source.loop = true;
source.connect(aCtx.destination);
}
};
<label>Start Playing</label>
<input type="checkbox" id="check" disabled><br>
<br>Its volume is 100%. Please help me to reduce it to 10%.
We use GainNodes to control the volume.
var gainNode = aCtx.createGain()
gainNode.gain.value = 0.1 // 10 %
gainNode.connect(aCtx.destination)
// now instead of connecting to aCtx.destination, connect to the gainNode
source.connect(gainNode)
solution
const aCtx = new AudioContext();
const gainNode = aCtx.createGain();
gainNode.gain.value = 0.1; // setting it to 10%
gainNode.connect(aCtx.destination);
let source = aCtx.createBufferSource();
let buf;
fetch('https://dl.dropboxusercontent.com/s/knpo4d2yooe2u4h/tank_driven.wav') // can be XHR as well
.then(resp => resp.arrayBuffer())
.then(buf => aCtx.decodeAudioData(buf)) // can be callback as well
.then(decoded => {
source.buffer = buf = decoded;
source.loop = true;
source.connect(gainNode);
check.disabled = false;
});
check.onchange = e => {
if (check.checked) {
source.start(0); // start our bufferSource
} else {
source.stop(0); // this destroys the buffer source
source = aCtx.createBufferSource(); // so we need to create a new one
source.buffer = buf;
source.loop = true;
source.connect(gainNode);
}
};
<label>Start Playing</label>
<input type="checkbox" id="check" disabled><br>
<br>Its volume is 100%. Please help me to reduce it to 10%.
You can use createGain of AudioContext for that puporse.
As shown below,
For more information checkout on createGain
https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/createGain
const aCtx = new AudioContext();
let source = aCtx.createBufferSource();
let buf;
var gainNode = aCtx.createGain(); // Create a gainNode reference.
gainNode.connect(aCtx.destination); // Add context to gainNode
fetch('https://dl.dropboxusercontent.com/s/knpo4d2yooe2u4h/tank_driven.wav') // can be XHR as well
.then(resp => resp.arrayBuffer())
.then(buf => aCtx.decodeAudioData(buf)) // can be callback as well
.then(decoded => {
source.buffer = buf = decoded;
source.loop = true;
source.connect(gainNode); //Connecting gain to source
gainNode.gain.value = 1; // 100% VOLUME RANGE OF VALUE IS 0-1
check.disabled = false;
});
check.onchange = e => {
if (check.checked) {
source.start(0); // start our bufferSource
} else {
source.stop(0); // this destroys the buffer source
source = aCtx.createBufferSource(); // so we need to create a new one
source.buffer = buf;
source.loop = true;
source.connect(gainNode); //Connecting gain to source
gainNode.gain.value = 0.1; // 10% VOLUME RANGE OF VALUE IS 0-1
}
};
Related
I'm recording some audio in the browser and then want to loop it seamlessly, avoiding clicks etc when starting. This means fading it and out.
I can ramp the volume up and down once, but I can't find anyway to trigger Web Audio's 'ramp to value at time' every time the loop starts again.
Is there an easy way to do this? I've got 10 of these buffers looping so I'd like to avoid lots of costly setinterval checks if possible...
let source = audioContext.createBufferSource();
let gain = audioContext.createGain();
gain.gain.value = 0.01;
source.buffer = decodedData;
songLength = decodedData.duration;
source.loop = true;
source.connect(gain);
gain.connect(audioContext.destination);
source.start(0);
// fade in and out
gain.gain.exponentialRampToValueAtTime(0.2, audioContext.currentTime + 1);
gain.gain.exponentialRampToValueAtTime(0.01, audioContext.currentTime + songLength);
Consider listening to the ended event and re-trigger the playback:
class FadeInLoop {
ctx
audioBuffer
gainNode
isPlaying = true
constructor(ctx, url) {
this.ctx = ctx
this.audioBuffer = fetch(url)
.then(response => response.arrayBuffer())
.then(arrayBuffer => ctx.decodeAudioData(arrayBuffer))
this.gainNode = ctx.createGain()
this.gainNode.connect(ctx.destination)
}
async start() {
this.isPlaying = true
const source = ctx.createBufferSource()
this.source = source
source.addEventListener('ended', e => {
if (this.isPlaying) { // repeat unless stop() was called
this.start()
}
})
source.connect(this.gainNode)
source.buffer = await this.audioBuffer
const now = this.ctx.currentTime
this.gainNode.gain.setValueAtTime(Number.EPSILON, now);
this.gainNode.gain.exponentialRampToValueAtTime(1, now + 0.055)
source.start(0)
}
stop() {
this.isPlaying = false
this.source?.stop()
}
}
const ctx = new AudioContext({ latencyHint: 'interactive' })
const loop = new FadeInLoop(ctx, 'https://batman.dev/static/71474264/loop.mp3')
<button onclick="loop.start()">Start</button>
<button onclick="loop.stop()">Stop</button>
I have a web application of my own, which is based on the peerjs library (It is a video conference).
I'm trying to make a recording with 'MediaRecorder', but I'm facing a very unpleasant case.
The code for capturing my desktop stream is the following:
let chooseScreen = document.querySelector('.chooseScreenBtn')
chooseScreen.onclick = async () => {
let desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: true });
}
I then successfully apply my received desktopStream to videoElement in DOM:
const videoElement = doc.querySelector('.videoElement')
videoElement.srcObject = desktopStream
videoElement.muted = false;
videoElement.onloadedmetadata = ()=>{videoElement.play();}
For example, I get desktopStream on the page with an active conference where everyone hears and sees each other.
To check the video and audio in desktopStream I play some video on the video player on the desktop.
I can hear any audio from my desktop but audio from any participant cannot be heard.
Of course, when I put the desktopStream in MediaRecorder I get a video file with no sound from anyone except my desktop. Any ideas on how to solve it?
Chrome's MediaRecorder API can only output one track.
The createMediaStreamSource can take streams from desktop audio and microphone, by connecting both together into one object created by createMediaStreamDestination it gives you the ability to pipe this one stream into the MediaRecorder API.
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
// Create a couple of sources
const source1 = context.createMediaStreamSource(desktopStream);
const source2 = context.createMediaStreamSource(voiceStream);
const destination = context.createMediaStreamDestination();
const desktopGain = context.createGain();
const voiceGain = context.createGain();
desktopGain.gain.value = 0.7;
voiceGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
// Connect source2
source2.connect(voiceGain).connect(destination);
return destination.stream.getAudioTracks();
};
It is also possible to use two or more audio inputs + video input.
window.onload = () => {
const warningEl = document.getElementById('warning');
const videoElement = document.getElementById('videoElement');
const captureBtn = document.getElementById('captureBtn');
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const download = document.getElementById('download');
const audioToggle = document.getElementById('audioToggle');
const micAudioToggle = document.getElementById('micAudioToggle');
if('getDisplayMedia' in navigator.mediaDevices) warningEl.style.display = 'none';
let blobs;
let blob;
let rec;
let stream;
let voiceStream;
let desktopStream;
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
const destination = context.createMediaStreamDestination();
let hasDesktop = false;
let hasVoice = false;
if (desktopStream && desktopStream.getAudioTracks().length > 0) {
// If you don't want to share Audio from the desktop it should still work with just the voice.
const source1 = context.createMediaStreamSource(desktopStream);
const desktopGain = context.createGain();
desktopGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
hasDesktop = true;
}
if (voiceStream && voiceStream.getAudioTracks().length > 0) {
const source2 = context.createMediaStreamSource(voiceStream);
const voiceGain = context.createGain();
voiceGain.gain.value = 0.7;
source2.connect(voiceGain).connect(destination);
hasVoice = true;
}
return (hasDesktop || hasVoice) ? destination.stream.getAudioTracks() : [];
};
captureBtn.onclick = async () => {
download.style.display = 'none';
const audio = audioToggle.checked || false;
const mic = micAudioToggle.checked || false;
desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: audio });
if (mic === true) {
voiceStream = await navigator.mediaDevices.getUserMedia({ video: false, audio: mic });
}
const tracks = [
...desktopStream.getVideoTracks(),
...mergeAudioStreams(desktopStream, voiceStream)
];
console.log('Tracks to add to stream', tracks);
stream = new MediaStream(tracks);
console.log('Stream', stream)
videoElement.srcObject = stream;
videoElement.muted = true;
blobs = [];
rec = new MediaRecorder(stream, {mimeType: 'video/webm; codecs=vp8,opus'});
rec.ondataavailable = (e) => blobs.push(e.data);
rec.onstop = async () => {
blob = new Blob(blobs, {type: 'video/webm'});
let url = window.URL.createObjectURL(blob);
download.href = url;
download.download = 'test.webm';
download.style.display = 'block';
};
startBtn.disabled = false;
captureBtn.disabled = true;
audioToggle.disabled = true;
micAudioToggle.disabled = true;
};
startBtn.onclick = () => {
startBtn.disabled = true;
stopBtn.disabled = false;
rec.start();
};
stopBtn.onclick = () => {
captureBtn.disabled = false;
audioToggle.disabled = false;
micAudioToggle.disabled = false;
startBtn.disabled = true;
stopBtn.disabled = true;
rec.stop();
stream.getTracks().forEach(s=>s.stop())
videoElement.srcObject = null
stream = null;
};
};
Audio capture with getDisplayMedia is only fully supported with Chrome for Windows. Other platforms have a number of limitations:
there is no support for audio capture at all under Firefox or Safari;
on Chrome/Chromium for Linux and Mac OS, only the audio of a Chrome/Chromium tab can be captured, not the audio of a non-browser application window.
I am trying to play a vehicle driven sound in a browser based game (continuously without break).
My .wav file length is 1 second and has same frequency from beginning to end. But sound takes a little break before next iteration.
Here is code:
function playSound()
{
//alert("");
myAudio = new Audio('http://ithmbwp.com/feedback/SoundsTest/sounds/tank_driven.wav');
if (typeof myAudio.loop == 'boolean')
{
myAudio.loop = true;
}
else
{
myAudio.addEventListener('ended', function() {
this.currentTime = 0;
this.play();
}, false);
}
myAudio.volume = 0.3;
myAudio.play();
}
Can anyone help me to play the sound continuously?
Edition
You can visit my page here to observe the problem.
window.onload = function() {
playSound();
};
function playSound()
{
//alert("");
myAudio = new Audio('http://ithmbwp.com/feedback/SoundsTest/sounds/tank_driven.wav');
if (typeof myAudio.loop == 'boolean')
{
myAudio.loop = true;
}
else
{
myAudio.addEventListener('ended', function() {
this.currentTime = 0;
this.play();
}, false);
}
myAudio.volume = 0.3;
myAudio.play();
}
<h3 style="font-family:verdana;">Please listen the sound break.</h3>
<h3 style="font-family:verdana;">It should be continuous.</h3>
Use the AudioContext API and its bufferSourceNode interface, to have seamlessly looped sounds.
Note that you'll also need your audio to be correctly edited to avoid crackles and sound clips, but yours seems good.
const aCtx = new AudioContext();
let source = aCtx.createBufferSource();
let buf;
fetch('https://dl.dropboxusercontent.com/s/knpo4d2yooe2u4h/tank_driven.wav') // can be XHR as well
.then(resp => resp.arrayBuffer())
.then(buf => aCtx.decodeAudioData(buf)) // can be callback as well
.then(decoded => {
source.buffer = buf = decoded;
source.loop = true;
source.connect(aCtx.destination);
check.disabled = false;
});
check.onchange = e => {
if (check.checked) {
source.start(0); // start our bufferSource
} else {
source.stop(0); // this destroys the buffer source
source = aCtx.createBufferSource(); // so we need to create a new one
source.buffer = buf;
source.loop = true;
source.connect(aCtx.destination);
}
};
<label>play audioBuffer</label>
<input type="checkbox" id="check" disabled><br><br>
Just to compare <audio src="https://dl.dropboxusercontent.com/s/knpo4d2yooe2u4h/tank_driven.wav" loop controls>
Or with your snippet :
window.onload = function() {
playSound();
};
function playSound() {
if (AudioContext) {
out.textContent = "yeah now it's continuous !!!";
playAsAudioBuffer();
} else {
out.textContent = "you should consider updating your browser...";
playNormally();
}
}
function playAsAudioBuffer() {
var aCtx = new AudioContext();
// here is the real audioBuffer to sound part
function ondecoded(buf) {
var source = aCtx.createBufferSource();
source.buffer = buf;
source.loop = true;
var gainNode = aCtx.createGain();
gainNode.gain.value = .3; // here you set the volume
source.connect(gainNode);
gainNode.connect(aCtx.destination);
source.start(0);
}
var xhr = new XMLHttpRequest();
xhr.onload = function() {
aCtx.decodeAudioData(this.response, ondecoded);
};
xhr.onerror = playNormally;
xhr.responseType = 'arraybuffer';
xhr.open('get', 'https://dl.dropboxusercontent.com/s/knpo4d2yooe2u4h/tank_driven.wav');
xhr.send();
}
// An ugly workaround in case of old browsers
function playNormally() {
var myAudios = [new Audio('https://dl.dropboxusercontent.com/s/knpo4d2yooe2u4h/tank_driven.wav')];
myAudios.push(new Audio(myAudios[0].src));
myAudios.forEach(function(a){
a.addEventListener('timeupdate', checkTime);
a.volume = 0.3;
});
function checkTime(){
if(this.currentTime > this.duration - 0.4){
startNext(this);
}
}
var current = 0;
function startNext(el){
current = (current + 1) % 2;
myAudios[current].play();
el.currentTime = 0;
el.pause();
}
myAudios[0].play();
}
<h3 id="out"></h3>
Bee Cool, just use few lines code
window.onload = function() {
playSound();
};
function playSound()
{
var myAudio = new Audio('http://ithmbwp.com/feedback/SoundsTest/sounds/tank_driven.wav');
myAudio.volume = 0.3 ;
var tank_driven_sound = setInterval(function()
{
myAudio.currentTime = 0;
myAudio.play();
}, 800);
}
<h3 style="font-family:verdana;">Please listen, it's continuous.</h3>
I'm using the $timeout angular function to call tick() each 512 ms in order to play datas which are in my audio queue. I'm using this to perform a live audio stream. Sometimes there are some cuts in the sounds and I really need to maintain a delta of one second between emitting and receiving sound. So I want to delete some audio datas in my queue corresponding to the duration of each cuts.
Do you know if there is a way to listen to those cuts on the audioContext.destination like :
audioContext.destination.oncuts = function(duration) {
audioQueue.read(duration);
});
Here is my tick and audioQueue functions :
var tick = function() {
$scope.soundclock = Date.now();
$timeout(tick, $scope.tickInterval);
if(startStream && isFocused) {
if(isChrome === true || isOpera === true || isIE === true || isFirefox === true) {
if(audioQueue.length()>=size) {
float32 = audioQueue.read(size);
source = audioContext.createBufferSource();
audioBuffer = audioContext.createBuffer(1, size, sampleRate);
data = audioBuffer.getChannelData(0);
for(var i=0; i<size;i++) {
data[i] = float32[i];
}
source.buffer = audioBuffer;
source.connect(audioContext.destination);
source.start(0);
}
}
if(isSafari === true) {
if(audioQueue.length()>=size) {
float32 = audioQueue.read(size);
source = audioContext.createBufferSource();
audioBuffer = audioContext.createBuffer(1, size, sampleRate);
data = audioBuffer.getChannelData(0);
for(var j=0; j<size;j++) {
data[j] = float32[j];
}
source.buffer = audioBuffer;
source.connect(audioContext.destination);
source.noteOn(0);
}
}
}
};
var audioQueue = {
buffer: new Float32Array(0),
write: function(newAudio){
currentQLength = this.buffer.length;
newBuffer = new Float32Array(currentQLength+newAudio.length);
d = Date.now() - date;
console.log('Queued '+newBuffer.length+' samples. ');
date = Date.now();
newBuffer.set(this.buffer, 0);
newBuffer.set(newAudio, currentQLength);
this.buffer = newBuffer;
},
read: function(nSamples){
samplesToPlay = this.buffer.subarray(0, nSamples);
this.buffer = this.buffer.subarray(nSamples, this.buffer.length);
console.log('Queue at '+this.buffer.length+' samples. ');
return samplesToPlay;
},
length: function(){
return this.buffer.length;
}
};
You need to not rely on Javascript timers (which are, for audio purposes, horribly inaccurate) and schedule your ticks ahead of time. Check out http://www.html5rocks.com/en/tutorials/audio/scheduling/, which I wrote a while ago about scheduling timers.
Because one of the browsers I'm trying to support doesn't allow me to decode a specific codec using AudioContext.decodeAudioData() I'm using Aurora.js to decode a audio files.
How can I change the decoded samples received from Aurora.js into an AudioBuffer I can actually use to playback the audio?
This is my code so far:
var AudioContext = (window.AudioContext || window.webkitAudioContext);
var context = new AudioContext();
var segmentUrls = [
'/segments/00.wav',
'/segments/05.wav',
'/segments/10.wav',
'/segments/15.wav',
'/segments/20.wav',
'/segments/25.wav',
'/segments/30.wav',
'/segments/35.wav',
'/segments/40.wav',
'/segments/45.wav',
'/segments/50.wav',
'/segments/55.wav'
];
Promise.all(segmentUrls.map(loadSound))
.then(function(buffers) {
var startAt = 0;
buffers.forEach(function(buffer) {
playSound(startAt, buffer);
startAt += buffer.duration;
});
})
.catch(function(err) {
console.error(err);
});
function playSound(offset, buffer) {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
source.start(offset);
}
function loadSound(url) {
return new Promise(function(resolve, reject) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = function onLoad() {
resolve(decodeAudioData(request.response));
};
request.onerror = function onError() {
reject('Could not request file');
};
request.send();
});
}
function decodeAudioData(audioData) {
return new Promise(function(resolve, reject) {
var asset = AV.Asset.fromBuffer(audioData);
asset.decodeToBuffer(function(buffer) {
// Create an AudioBuffer here
});
});
}
You'll have to create an AudioBuffer of the appropriate size and # of channels, and copy the data from one Float32 buffer to another.
Here is the MDN code snippet to put data in an AudioBuffer and then play it:
https://developer.mozilla.org/en-US/docs/Web/API/AudioBuffer
// Stereo
var channels = 2;
// Create an empty two second stereo buffer at the
// sample rate of the AudioContext
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var frameCount = audioCtx.sampleRate * 2.0;
var myArrayBuffer = audioCtx.createBuffer(channels, frameCount, audioCtx.sampleRate);
button.onclick = function() {
// Fill the buffer with white noise;
// just random values between -1.0 and 1.0
for (var channel = 0; channel < channels; channel++) {
// This gives us the actual array that contains the data
var nowBuffering = myArrayBuffer.getChannelData(channel);
for (var i = 0; i < frameCount; i++) {
// Math.random() is in [0; 1.0]
// audio needs to be in [-1.0; 1.0]
nowBuffering[i] = Math.random() * 2 - 1;
}
}
// Get an AudioBufferSourceNode.
// This is the AudioNode to use when we want to play an AudioBuffer
var source = audioCtx.createBufferSource();
// set the buffer in the AudioBufferSourceNode
source.buffer = myArrayBuffer;
// connect the AudioBufferSourceNode to the
// destination so we can hear the sound
source.connect(audioCtx.destination);
// start the source playing
source.start();
}