I'm trying to adjust the microphone volume in WebRTC chat app which using 2 videos for streaming.
It is possible to modify the gain of the microphone? If yes, how can I do it to the following streams I'm using?
/*********************** video call ***************************/
var localStream;
var localVideo = document.getElementById("localVideo");
var remoteVideo = document.getElementById("remoteVideo");
var callButton = document.getElementById("callButton");
var inputLevelSelector = document.getElementById('mic-volume');
var outputLevelSelector = document.getElementById('speaker-volume');
inputLevelSelector.addEventListener('change', changeMicrophoneLevel);
outputLevelSelector.addEventListener('change', changeSpeakerLevel);
callButton.disabled = true;
callButton.onclick = call;
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
navigator.getUserMedia({
audio: true,
video: true
}, gotStream, //note that we are adding both audio and video
function (error) {
console.log(error);
});
var RTCPeerConnection = window.RTCPeerConnection || window.mozRTCPeerConnection || window.webkitRTCPeerConnection;
var SessionDescription = window.RTCSessionDescription || window.mozRTCSessionDescription || window.webkitRTCSessionDescription;
var pc = new RTCPeerConnection({
"iceServers": []
});
function gotStream(stream) {
// localVideo.src = window.URL.createObjectURL(stream); // DEPRECATED
localVideo.srcObject = stream; // UPDATED
localStream = stream;
callButton.disabled = false;
pc.addStream(stream);
}
pc.onicecandidate = function (event) {
console.log(event);
if (!event || !event.candidate) {
return;
} else {
socket.emit("video call", {
type: "iceCandidate",
"candidate": event.candidate
});
}
};
var remoteStream;
pc.onaddstream = function (event) {
remoteStream = event.stream;
var remoteVideo = document.getElementById("remoteVideo");
// remoteVideo.src = window.URL.createObjectURL(event.stream); // DEPRECATED
remoteVideo.srcObject = event.stream; // UPDATED
remoteVideo.play();
};
Please take in note that I'm newbie, so take it easy! :)
Here below I've made an implementation of the post that I've shared in the comments. It's the same setup but with your code included.
You first create your nodes from the Web Audio API. Because you want to manipulate the volume of a stream you'll need a MediaStreamAudioSourceNode, a MediaStreamAudioDestinationNode and a GainNode. The MediaStreamAudioSourceNode is entry point for the stream. By injecting it here we can connect it through the gain. The stream will pass through the GainNode where the volume will be controlled and then passed to the MediaStreamAudioDestinationNode where you can use the stream again in your RTC client.
From there use the stream from you get from the MediaStreamAudioDestinationNode.stream property.
Edit:
It turns out that the MediaStreamAudioDestinationNode.stream is a MediaStream object with only a audio track. So the video has been removed from the stream and has to be rejoined.
So when you are able to access the stream, get the video tracks from the stream and store them in a variable. Then after passing the stream through the Web Audio API join the video tracks back with the audio tracks that went through the GainNode.
var inputLevelSelector = document.getElementById('mic-volume');
// Renamed the variable after your comment.
var peerConnection = new RTCPeerConnection({
"iceServers": []
});
function gotStream(stream) {
// Get the videoTracks from the stream.
const videoTracks = stream.getVideoTracks();
/**
* Create a new audio context and build a stream source,
* stream destination and a gain node. Pass the stream into
* the mediaStreamSource so we can use it in the Web Audio API.
*/
const context = new AudioContext();
const mediaStreamSource = context.createMediaStreamSource(stream);
const mediaStreamDestination = context.createMediaStreamDestination();
const gainNode = context.createGain();
/**
* Connect the stream to the gainNode so that all audio
* passes through the gain and can be controlled by it.
* Then pass the stream from the gain to the mediaStreamDestination
* which can pass it back to the RTC client.
*/
mediaStreamSource.connect(gainNode);
gainNode.connect(mediaStreamDestination);
/**
* Change the gain levels on the input selector.
*/
inputLevelSelector.addEventListener('input', event => {
gainNode.gain.value = event.target.value;
});
/**
* The mediaStreamDestination.stream outputs a MediaStream object
* containing a single AudioMediaStreamTrack. Add the video track
* to the new stream to rejoin the video with the controlled audio.
*/
const controlledStream = mediaStreamDestination.stream;
for (const videoTrack of videoTracks) {
controlledStream.addTrack(videoTrack);
}
/**
* Use the stream that went through the gainNode. This
* is the same stream but with altered input volume levels.
*/
localVideo.srcObject = controlledStream;
localStream = controlledStream;
peerConnection.addStream(controlledStream);
callButton.disabled = false;
}
Related
I want to encode a video (from a canvas) and add procedural audio to it.
The encoding can be accomplished with MediaRecorder that takes a MediaStream.
For the stream, I want to obtain the video part from a canvas, using the canvas.captureStream() call.
I want to add an audio track to the stream. But instead of microphone input, I want to generate the samples for those on the fly, for simplicity sake, let's assume it writes out a sine-wave.
How can I create a MediaStreamTrack that generates procedural audio?
The Web Audio API has a createMediaStreamDestination() method, which will return a MediaStreamAudioDestinationNode object, on which you'll be able to connect your audio context, and which will give you access to a MediaStream instance fed by the audio context audio output.
document.querySelector("button").onclick = (evt) => {
const duration = 5;
evt.target.remove();
const audioContext = new AudioContext();
const osc = audioContext.createOscillator();
const destNode = audioContext.createMediaStreamDestination();
const { stream } = destNode;
osc.connect(destNode);
osc.connect(audioContext.destination);
osc.start(0);
osc.frequency.value = 80;
osc.frequency.exponentialRampToValueAtTime(440, audioContext.currentTime+10);
osc.stop(duration);
// stream.addTrack(canvasStream.getVideoTracks()[0]);
const recorder = new MediaRecorder(stream);
const chunks = [];
recorder.ondataavailable = ({data}) => chunks.push(data);
recorder.onstop = (evt) => {
const el = new Audio();
const [{ type }] = chunks; // for Safari
el.src = URL.createObjectURL(new Blob(chunks, { type }));
el.controls = true;
document.body.append(el);
};
recorder.start();
setTimeout(() => recorder.stop(), duration * 1000);
console.log(`Started recording, please wait ${duration}s`);
};
<button>begin</button>
I am writing code to detect the frequency being played using the Web Audio API FFT, targeting high frequencies above 18kHz. It works perfectly in Chrome, Firefox and Edge but in Safari it is very inaccurate and doesn't register frequencies at all over 19kHz. Is there a known issue for Safari struggling to detect the highest frequencies?
My code is as follows:
async connectMicAudio() {
try {
let stream;
const constraints = {
echoCancellation: false,
noiseSuppression: false,
autoGainControl: false,
mozAutoGainControl: false,
mozNoiseSuppression: false,
};
if (navigator.mediaDevices.getUserMedia) {
stream = await navigator.mediaDevices.getUserMedia({
audio: true,
});
} else {
navigator.getUserMedia = // account for different browsers
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
stream = await navigator.getUserMedia({
audio: true,
});
}
stream.getAudioTracks()[0].applyConstraints(constraints);
this.audioStream = stream;
} catch (error) {
console.log('Error: ', error);
}
let stream = this.audioStream;
const AudioContext = window.AudioContext || window.webkitAudioContext;
const audioContext = new AudioContext();
if (!this.sampleRate) {
this.sampleRate = audioContext.sampleRate;
}
const analyser = audioContext.createAnalyser();
analyser.fftSize = audioParams.FFT_SIZE;
analyser.smoothingTimeConstant = 0;
try {
const microphone = audioContext.createMediaStreamSource(stream);
microphone.connect(analyser);
} catch {
alert('Please allow microphone access.');
}
const processor = audioContext.createScriptProcessor(
audioParams.BUFFER_SIZE,
1,
1
); // single channel audio (mono)
processor.connect(audioContext.destination);
this.analyser = analyser;
this.processor = processor;
}
Then for the FFT I use:
this.processor.onaudioprocess = () => {
let fftData = new Float32Array(analyser.frequencyBinCount);
analyser.getFloatFrequencyData(fftData);
let highPassed = highPass(fftData, this.sampleRate);
let loudest = findLoudestFreqBin(highPassed, this.sampleRate);
console.log('loudest: ', loudest);
};
This extracts the frequency data from each buffer, highPass just zeroes anything under 18k and findLoudestFreqBin just returns the centre point of the bin with the highest amplitude. When I play anything above 19k on Safari, the highest amplitude will always be in a lower bin in the 18-19k range.
As I said this code works perfectly with other browsers so I'm assuming it's an issue with Safari's implementation of the Web Audio API. If anyone has had a similar issue and knows a workaround that would be a huge help.
I am pretty sure I did everything correct but when I try to play or download the file nothing plays. I am using web audio api to record audio from the microphone to a WAV format. I am using this library to create the .wav file. It seems like nothing is being encoded.
navigator.mediaDevices.getUserMedia({
audio: true,video:false
})
.then((stream) => {
var data
context = new AudioContext()
var source = context.createMediaStreamSource(stream)
var scriptNode = context.createScriptProcessor(8192, 1, 1)
source.connect(scriptNode)
scriptNode.connect(context.destination)
encoder = new WavAudioEncoder(16000,1)
scriptNode.onaudioprocess = function(e){
data = e.inputBuffer.getChannelData('0')
console.log(data)
encoder.encode(data)
}
$('#stop').click(()=>{
source.disconnect()
scriptNode.disconnect()
blob = encoder.finish()
console.log(blob)
url = window.URL.createObjectURL(blob)
// audio source
$('#player').attr('src',url)
// audio control
$("#pw")[0].load()
})
})
I figured it out! To help anyone who needs to do the same thing. It uses Web Audio API and this javascript library
navigator.mediaDevices.getUserMedia({
audio: true,video:false
})
.then((stream) => {
context = new AudioContext()
var source = context.createMediaStreamSource(stream)
var rec = new Recorder(source)
rec.record()
$('#stop').click(()=>{
rec.stop()
blob = rec.exportWAV(somefunction) // exportWAV() returns your file
})
use recordRTC for recording video and audio, I used in my project, it's working well, here is the code to record audio using recordrtc.org
startRecording(event) { // call this to start recording the Audio( or video or Both)
this.recording = true;
let mediaConstraints = {
audio: true
};
// Older browsers might not implement mediaDevices at all, so we set an empty object first
if (navigator.mediaDevices === undefined) {
navigator.mediaDevices = {};
}
// Some browsers partially implement mediaDevices. We can't just assign an object
// with getUserMedia as it would overwrite existing properties.
// Here, we will just add the getUserMedia property if it's missing.
if (navigator.mediaDevices.getUserMedia === undefined) {
navigator.mediaDevices.getUserMedia = function(constraints) {
// First get ahold of the legacy getUserMedia, if present
var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
// Some browsers just don't implement it - return a rejected promise with an error
// to keep a consistent interface
if (!getUserMedia) {
return Promise.reject(new Error('getUserMedia is not implemented in this browser'));
}
// Otherwise, wrap the call to the old navigator.getUserMedia with a Promise
return new Promise(function(resolve, reject) {
getUserMedia.call(navigator, constraints, resolve, reject);
});
}
}
navigator.mediaDevices.getUserMedia(mediaConstraints)
.then(successCallback.bind(this), errorCallback.bind(this));
}
successCallback(stream: MediaStream) {
var options = {
type: 'audio'
};
this.stream = stream;
this.recordRTC = RecordRTC(stream, options);
this.recordRTC.startRecording();
}
errorCallback(stream: MediaStream) {
console.log(stream);
}
stopRecording() { // call this to stop recording
this.recording = false;
this.converting = true;
let recordRTC = this.recordRTC;
if(!recordRTC) return;
recordRTC.stopRecording(this.processAudio.bind(this));
this.stream.getAudioTracks().forEach(track => track.stop());
}
processAudio(audioVideoWebMURL) {
let recordRTC = this.recordRTC;
var recordedBlob = recordRTC.getBlob(); // you can save the recorded media data in various formats, refer the link below.
console.log(recordedBlob)
this.recordRTC.save('audiorecording.wav');
let base64Data = '';
this.recordRTC.getDataURL((dataURL) => {
base64Data = dataURL.split('base64,')[1];
console.log(RecordRTC.getFromDisk('audio', function(dataURL,type) {
type == 'audio'
}));
console.log(dataURL);
})
}
Note that you cannot record the audio/video from the live site in Google Chrome unless your site is https enabled
I want to know how to set the volume in WebRTC.
I'm drawing audio like this:
audio = document.createElement('audio');
audio.controls = true;
audio.autoplay = true;
audio.src = window.URL.createObjectURL(stream);
div.appendChild(audio);
I want to make my custom Audio UI. So, I will use HTML's slide bar.
<input type="range">
But, I don't know set volumes in WebRTC stream. How can I set it?
For output(speakers) audio volume, you can manage with volume property of audio/video element.
var audio = document.getElementById('audioId');
audio.volume = 0.9; // 0.0(Silent) -> 1 (Loudest)
You can change the audio.volume based on your slide bar position
To change input(microphone) volume, there is no direct method available in WebRTC AudioTrack/MediaStream.
We can use WebAudio Api to handle volume at Stream/Track level and you can connect WebAudio output to PeerConnection as following
var audioContext = new AudioContext()
var gainNode = audioContext.createGain();
navigator.mediaDevices.getUserMedia({audio:true})
.then((stream) => {
console.log('got stream', stream);
window.orginalStream = stream;
return stream;
})
.then((stream) => {
audioSource = audioContext.createMediaStreamSource(stream),
audioDestination = audioContext.createMediaStreamDestination();
audioSource.connect(gainNode);
gainNode.connect(audioDestination);
gainNode.gain.value = 1;
window.localStream = audioDestination.stream;
//audioElement.srcObject = window.localStream; //for playback
//you can add this stream to pc object
// pc.addStream(window.localStream);
})
.catch((err) => {
console.error('Something wrong in capture stream', err);
})
Now we can easily control the microphone volume with below function
function changeMicrophoneLevel(value) {
if(value && value >= 0 && value <= 2) {
gainNode.gain.value = value;
}
}
For more info have a look at my demo
I'm trying to create audio stream from browser and send it to server.
Here is the code:
let recording = false;
let localStream = null;
const session = {
audio: true,
video: false
};
function start () {
recording = true;
navigator.webkitGetUserMedia(session, initializeRecorder, onError);
}
function stop () {
recording = false;
localStream.getAudioTracks()[0].stop();
}
function initializeRecorder (stream) {
localStream = stream;
const audioContext = window.AudioContext;
const context = new audioContext();
const audioInput = context.createMediaStreamSource(localStream);
const bufferSize = 2048;
// create a javascript node
const recorder = context.createScriptProcessor(bufferSize, 1, 1);
// specify the processing function
recorder.onaudioprocess = recorderProcess;
// connect stream to our recorder
audioInput.connect(recorder);
// connect our recorder to the previous destination
recorder.connect(context.destination);
}
function onError (e) {
console.log('error:', e);
}
function recorderProcess (e) {
if (!recording) return;
const left = e.inputBuffer.getChannelData(0);
// send left to server here (socket.io can do the job). We dont need stereo.
}
when function start is fired, the samples can be catched in recorderProcess
when function stop is fired, the mic icon in browser disappears, but...
unless I put if (!recording) return in the beginning of recorderProcess, it still process samples.
Unfortunately it's not a solution at all - the samples are still being received by recordingProcess and if I fire start functiono once more, it will get all samples from previous stream and from new one.
My question is:
How can I stop/start recording without such issue?
or if it's not best solution
How can I totally remove stream in stop function, to safely initialize it again anytime?
recorder.disconnect() should help.
You might want to consider the new MediaRecorder functionality in Chrome Canary shown at https://webrtc.github.io/samples/src/content/getusermedia/record/ (currently video-only I think) instead of the WebAudio API.