I'm learning Web Audio API. I'm trying to recreate the example on MDN.
Here is my code:
const playBtn = document.getElementById('play');
const pauseBtn = document.getElementById('pause');
const gainBtn = document.getElementById('gain');
const myAudio = document.getElementById('myAudio');
const file = 'https://iondrimbafilho.me/second-demo.mp3';
const request = new XMLHttpRequest();
request.open('GET', file, true);
request.onload = () => {
myAudio.src = file;
myAudio.load();
}
let audioCtx;
let source;
let gainNode;
document.onload = () => {
audioCtx = new (window.AudioContext || window.webkitAudioContext)();
source = audioCtx.createMediaElementSource(myAudio);
gainNode = audioCtx.createGain();
source.connect(gainNode)
gainNode.connect(audioCtx.destination)
}
playBtn.onclick = () => {
myAudio.play()
};
gainBtn.onclick = () => {
console.log(gainNode.gain.value)
};
I connect the source node to the GainNode, which then connected to the destination node.
I'm trying to log the audio gain.
But why is the property GainNode.gain undefined?
Here is the error I get:
Uncaught TypeError: Cannot read property 'gain' of undefined
at HTMLButtonElement.gainBtn.onclick
Additional info: I'm using Chrome 78.
Related
I am very new to javaScript, I know some basics but have not yet completely understood the complete logics behind it (so far I have only worked with Python and a little bit of VBA)
For uni I have to build a browser interface to record audio and transfer it to a server where a Speech to text application runs. I found some opensource code here (https://github.com/mdn/dom-examples/blob/main/media/web-dictaphone/scripts/app.js) which I wanted to use, but is missing the websocket part. Now I don't know, where exactly to insert that. So far I have this:
code of the Webdictaphone:
// set up basic variables for app
const record = document.querySelector('.record');
const stop = document.querySelector('.stop');
const soundClips = document.querySelector('.sound-clips');
const canvas = document.querySelector('.visualizer');
const mainSection = document.querySelector('.main-controls');
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia supported.');
const constraints = { audio: true };
let chunks = [];
let onSuccess = function(stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function() {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
}
stop.onclick = function() {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
}
mediaRecorder.onstop = function(e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt('Enter a name for your sound clip?','My unnamed clip');
const clipContainer = document.createElement('article');
const clipLabel = document.createElement('p');
const audio = document.createElement('audio');
const deleteButton = document.createElement('button');
clipContainer.classList.add('clip');
audio.setAttribute('controls', '');
deleteButton.textContent = 'Delete';
deleteButton.className = 'delete';
if(clipName === null) {
clipLabel.textContent = 'My unnamed clip';
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function(e) {
e.target.closest(".clip").remove();
}
clipLabel.onclick = function() {
const existingName = clipLabel.textContent;
const newClipName = prompt('Enter a new name for your sound clip?');
if(newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
}
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
}
let onError = function(err) {
console.log('The following error occured: ' + err);
}
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log('getUserMedia not supported on your browser!');
}
websocket part (client side):
window.addEventListener("DOMContentLoaded", () => {
// Open the WebSocket connection and register event handlers.
console.log('DOMContentLoaded done');
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
console.log('ws is defined');
})
Right now I just stacked both of the parts on top of each other, but this doesn't work, since, as I found out, you only can define and use variables (such as ws) within a block. This leads to an error that says that ws i not defined when I call the sending function within the if-statement.
I already tried to look for tutorials for hours but none that I found included this topic. I also tried moving the web socket part into the if statement, but that also did - unsurprisingly work, at least not in the way that I tried.
I feel like my problem lays in understanding how to define the websocket so I can call it within the if statement, or figure out a way to somehow get the audio somewhere where ws is considered to be defined. Unfortunately I just don't get behind it and already invested days which has become really frustrating.
I appreciate any help. If you have any ideas what I could change or move in the code or maybe just know any tutorial that could help, I'd be really grateful.
Thanks in advance!
You don't need that window.addEventListener("DOMContentLoaded", () => { part
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
const record = document.querySelector(".record");
const stop = document.querySelector(".stop");
const soundClips = document.querySelector(".sound-clips");
const canvas = document.querySelector(".visualizer");
const mainSection = document.querySelector(".main-controls");
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log("getUserMedia supported.");
const constraints = { audio: true };
let chunks = [];
let onSuccess = function (stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function () {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
};
stop.onclick = function () {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
};
mediaRecorder.onstop = function (e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt(
"Enter a name for your sound clip?",
"My unnamed clip"
);
const clipContainer = document.createElement("article");
const clipLabel = document.createElement("p");
const audio = document.createElement("audio");
const deleteButton = document.createElement("button");
clipContainer.classList.add("clip");
audio.setAttribute("controls", "");
deleteButton.textContent = "Delete";
deleteButton.className = "delete";
if (clipName === null) {
clipLabel.textContent = "My unnamed clip";
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { type: "audio/ogg; codecs=opus" });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function (e) {
e.target.closest(".clip").remove();
};
clipLabel.onclick = function () {
const existingName = clipLabel.textContent;
const newClipName = prompt("Enter a new name for your sound clip?");
if (newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
};
};
mediaRecorder.ondataavailable = function (e) {
chunks.push(e.data);
};
};
let onError = function (err) {
console.log("The following error occured: " + err);
};
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log("getUserMedia not supported on your browser!");
}
I have a web application of my own, which is based on the peerjs library (It is a video conference).
I'm trying to make a recording with 'MediaRecorder', but I'm facing a very unpleasant case.
The code for capturing my desktop stream is the following:
let chooseScreen = document.querySelector('.chooseScreenBtn')
chooseScreen.onclick = async () => {
let desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: true });
}
I then successfully apply my received desktopStream to videoElement in DOM:
const videoElement = doc.querySelector('.videoElement')
videoElement.srcObject = desktopStream
videoElement.muted = false;
videoElement.onloadedmetadata = ()=>{videoElement.play();}
For example, I get desktopStream on the page with an active conference where everyone hears and sees each other.
To check the video and audio in desktopStream I play some video on the video player on the desktop.
I can hear any audio from my desktop but audio from any participant cannot be heard.
Of course, when I put the desktopStream in MediaRecorder I get a video file with no sound from anyone except my desktop. Any ideas on how to solve it?
Chrome's MediaRecorder API can only output one track.
The createMediaStreamSource can take streams from desktop audio and microphone, by connecting both together into one object created by createMediaStreamDestination it gives you the ability to pipe this one stream into the MediaRecorder API.
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
// Create a couple of sources
const source1 = context.createMediaStreamSource(desktopStream);
const source2 = context.createMediaStreamSource(voiceStream);
const destination = context.createMediaStreamDestination();
const desktopGain = context.createGain();
const voiceGain = context.createGain();
desktopGain.gain.value = 0.7;
voiceGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
// Connect source2
source2.connect(voiceGain).connect(destination);
return destination.stream.getAudioTracks();
};
It is also possible to use two or more audio inputs + video input.
window.onload = () => {
const warningEl = document.getElementById('warning');
const videoElement = document.getElementById('videoElement');
const captureBtn = document.getElementById('captureBtn');
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const download = document.getElementById('download');
const audioToggle = document.getElementById('audioToggle');
const micAudioToggle = document.getElementById('micAudioToggle');
if('getDisplayMedia' in navigator.mediaDevices) warningEl.style.display = 'none';
let blobs;
let blob;
let rec;
let stream;
let voiceStream;
let desktopStream;
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
const destination = context.createMediaStreamDestination();
let hasDesktop = false;
let hasVoice = false;
if (desktopStream && desktopStream.getAudioTracks().length > 0) {
// If you don't want to share Audio from the desktop it should still work with just the voice.
const source1 = context.createMediaStreamSource(desktopStream);
const desktopGain = context.createGain();
desktopGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
hasDesktop = true;
}
if (voiceStream && voiceStream.getAudioTracks().length > 0) {
const source2 = context.createMediaStreamSource(voiceStream);
const voiceGain = context.createGain();
voiceGain.gain.value = 0.7;
source2.connect(voiceGain).connect(destination);
hasVoice = true;
}
return (hasDesktop || hasVoice) ? destination.stream.getAudioTracks() : [];
};
captureBtn.onclick = async () => {
download.style.display = 'none';
const audio = audioToggle.checked || false;
const mic = micAudioToggle.checked || false;
desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: audio });
if (mic === true) {
voiceStream = await navigator.mediaDevices.getUserMedia({ video: false, audio: mic });
}
const tracks = [
...desktopStream.getVideoTracks(),
...mergeAudioStreams(desktopStream, voiceStream)
];
console.log('Tracks to add to stream', tracks);
stream = new MediaStream(tracks);
console.log('Stream', stream)
videoElement.srcObject = stream;
videoElement.muted = true;
blobs = [];
rec = new MediaRecorder(stream, {mimeType: 'video/webm; codecs=vp8,opus'});
rec.ondataavailable = (e) => blobs.push(e.data);
rec.onstop = async () => {
blob = new Blob(blobs, {type: 'video/webm'});
let url = window.URL.createObjectURL(blob);
download.href = url;
download.download = 'test.webm';
download.style.display = 'block';
};
startBtn.disabled = false;
captureBtn.disabled = true;
audioToggle.disabled = true;
micAudioToggle.disabled = true;
};
startBtn.onclick = () => {
startBtn.disabled = true;
stopBtn.disabled = false;
rec.start();
};
stopBtn.onclick = () => {
captureBtn.disabled = false;
audioToggle.disabled = false;
micAudioToggle.disabled = false;
startBtn.disabled = true;
stopBtn.disabled = true;
rec.stop();
stream.getTracks().forEach(s=>s.stop())
videoElement.srcObject = null
stream = null;
};
};
Audio capture with getDisplayMedia is only fully supported with Chrome for Windows. Other platforms have a number of limitations:
there is no support for audio capture at all under Firefox or Safari;
on Chrome/Chromium for Linux and Mac OS, only the audio of a Chrome/Chromium tab can be captured, not the audio of a non-browser application window.
I have an audio element
var audioSrc = 'https://mfbx9da4.github.io/assets/audio/dope-drum-loop_C_major.wav'
var audio = document.createElement('audio')
audio.src = audioSrc
I need the AudioBuffer
to do beat detection so I tried accessing the buffer when the audio is loaded as so:
audio.oncanplaythrough = () => {
console.info('loaded');
var source = context.createMediaElementSource(audio);
source.connect(context.destination);
console.info('source.buffer', source.buffer);
source.start()
}
However, the above code snippet logs
> loaded
> source.buffer undefined
It seems the best way to do this is to avoid <audio> tags and load the audio via XHR:
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
const audioSrc =
"https://davidalbertoadler.com/assets/audio/dope-drum-loop_C_major.wav";
const audioData = await fetchAudio(audioSrc);
audioContext.decodeAudioData(audioData, onDecoded, onDecodeError);
function fetchAudio(url) {
return new Promise((resolve, reject) => {
const request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
request.onload = () => resolve(request.response);
request.onerror = (e) => reject(e);
request.send();
});
}
function onDecoded(buffer) {
// Play the song
console.log("Got the decoded buffer now play the song", buffer);
const source = audioContext.createBufferSource();
source.buffer = buffer;
source.connect(audioContext.destination);
source.loop = true;
source.start();
}
function onDecodeError(e) {
console.log("Error decoding buffer: " + e.message);
console.log(e);
}
I have an audio using Web Audio API and want to record it at half the speed and later speed it up to normal at back-end. The output I get is pitched down.
var mr = null;
var source = null;
var stop = function(){
mr.stop();
source.stop();
}
var save = function(blob, filename){
var link = document.createElement('a');
link.href = URL.createObjectURL(blob);
link.download = filename || 'data.json';
link.click();
}
var context = new ( window.AudioContext || window.webkitAudioContext )();
var dest = context.createMediaStreamDestination();
function loadSound(url) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
// Decode asynchronously
request.onload = function() {
context.decodeAudioData(request.response, function(buffer) {
source = context.createBufferSource();
source.buffer = buffer;
source.playbackRate.value = 0.5;
source.connect(dest);
mr = new MediaRecorder(dest.stream);
mr.start();
source.start(0);
var chunks = [];
mr.ondataavailable = function(e){
chunks.push(e.data);
}
mr.onstop = function(e) {
var blob = new Blob(chunks, { 'type' : 'video/webm' });
save(blob,"a.webm");
}
setTimeout(stop, 10000);
}, null);
}
request.send();
}
loadSound("https://storage.googleapis.com/frulix-dev.appspot.com/words");
I have created a fiddle for the case https://jsfiddle.net/thenectorgod/v3ogtwvp/4/.
It does not happen for normal recording containing audio and that audio comes clear after speeding to 2.
Can anyone suggest how to get proper audio in this case?
I am using the web audio api's offlineAudioContext to render an audio file with a different playback speed. When I do this the altered audio is held in the renderedBuffer.
I am wondering how I can download the audio held in the renderedBuffer?
Here is my code:
// define online and offline audio context
var audioCtx = new AudioContext();
var offlineCtx = new OfflineAudioContext(2,44100*40,44100);
source = offlineCtx.createBufferSource();
// define variables
var pre = document.querySelector('pre');
var myScript = document.querySelector('script');
var play = document.querySelector('.play');
var stop = document.querySelector('.stop');
// use XHR to load an audio track, and
// decodeAudioData to decode it and stick it in a buffer.
// Then we put the buffer into the source
function getData() {
request = new XMLHttpRequest();
request.open('GET', 'sound.mp4', true);
request.responseType = 'arraybuffer';
request.onload = function() {
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
myBuffer = buffer;
source.buffer = myBuffer;
source.playbackRate.value = 0.50;
source.connect(offlineCtx.destination);
source.start();
//source.loop = true;
offlineCtx.startRendering().then(function(renderedBuffer) {
console.log('Rendering completed successfully');
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var song = audioCtx.createBufferSource();
song.buffer = renderedBuffer;
song.connect(audioCtx.destination);
play.onclick = function() {
song.start();
}
}).catch(function(err) {
console.log('Rendering failed: ' + err);
// Note: The promise should reject when startRendering is called a second time on an OfflineAudioContext
});
});
}
request.send();
}
// Run getData to start the process off
getData();
// dump script to pre element
pre.innerHTML = myScript.innerHTML;