How to set volumes in WebRTC? - javascript

I want to know how to set the volume in WebRTC.
I'm drawing audio like this:
audio = document.createElement('audio');
audio.controls = true;
audio.autoplay = true;
audio.src = window.URL.createObjectURL(stream);
div.appendChild(audio);
I want to make my custom Audio UI. So, I will use HTML's slide bar.
<input type="range">
But, I don't know set volumes in WebRTC stream. How can I set it?

For output(speakers) audio volume, you can manage with volume property of audio/video element.
var audio = document.getElementById('audioId');
audio.volume = 0.9; // 0.0(Silent) -> 1 (Loudest)
You can change the audio.volume based on your slide bar position
To change input(microphone) volume, there is no direct method available in WebRTC AudioTrack/MediaStream.
We can use WebAudio Api to handle volume at Stream/Track level and you can connect WebAudio output to PeerConnection as following
var audioContext = new AudioContext()
var gainNode = audioContext.createGain();
navigator.mediaDevices.getUserMedia({audio:true})
.then((stream) => {
console.log('got stream', stream);
window.orginalStream = stream;
return stream;
})
.then((stream) => {
audioSource = audioContext.createMediaStreamSource(stream),
audioDestination = audioContext.createMediaStreamDestination();
audioSource.connect(gainNode);
gainNode.connect(audioDestination);
gainNode.gain.value = 1;
window.localStream = audioDestination.stream;
//audioElement.srcObject = window.localStream; //for playback
//you can add this stream to pc object
// pc.addStream(window.localStream);
})
.catch((err) => {
console.error('Something wrong in capture stream', err);
})
Now we can easily control the microphone volume with below function
function changeMicrophoneLevel(value) {
if(value && value >= 0 && value <= 2) {
gainNode.gain.value = value;
}
}
For more info have a look at my demo

Related

Procedural Audio using MediaStreamTrack

I want to encode a video (from a canvas) and add procedural audio to it.
The encoding can be accomplished with MediaRecorder that takes a MediaStream.
For the stream, I want to obtain the video part from a canvas, using the canvas.captureStream() call.
I want to add an audio track to the stream. But instead of microphone input, I want to generate the samples for those on the fly, for simplicity sake, let's assume it writes out a sine-wave.
How can I create a MediaStreamTrack that generates procedural audio?
The Web Audio API has a createMediaStreamDestination() method, which will return a MediaStreamAudioDestinationNode object, on which you'll be able to connect your audio context, and which will give you access to a MediaStream instance fed by the audio context audio output.
document.querySelector("button").onclick = (evt) => {
const duration = 5;
evt.target.remove();
const audioContext = new AudioContext();
const osc = audioContext.createOscillator();
const destNode = audioContext.createMediaStreamDestination();
const { stream } = destNode;
osc.connect(destNode);
osc.connect(audioContext.destination);
osc.start(0);
osc.frequency.value = 80;
osc.frequency.exponentialRampToValueAtTime(440, audioContext.currentTime+10);
osc.stop(duration);
// stream.addTrack(canvasStream.getVideoTracks()[0]);
const recorder = new MediaRecorder(stream);
const chunks = [];
recorder.ondataavailable = ({data}) => chunks.push(data);
recorder.onstop = (evt) => {
const el = new Audio();
const [{ type }] = chunks; // for Safari
el.src = URL.createObjectURL(new Blob(chunks, { type }));
el.controls = true;
document.body.append(el);
};
recorder.start();
setTimeout(() => recorder.stop(), duration * 1000);
console.log(`Started recording, please wait ${duration}s`);
};
<button>begin</button>

How would I recognize a certain tone/beep/sound in Client-Side JS? (using Web Audio API)

I am trying to recognize a certain audio clip heard using the microphone on a browser. This clip could be a beep/short tone of a specific preferably high frequency that is played by another device (something similar to how Google Tone works).
I am new to the Web Audio API and currently using "getByteFrequencyData()" from an AudioContext Analyzer to check for a high decibel value in a particular frequency and using that to trigger as "recognized". But as you may have guessed, this is a useless hack and doesn't work half the time.
I have the original sound byte that is being played on the other device and it is in the 10000-12000 freq. range so that other sounds in the environment do not interfere with the recognition. Is there any way to recognize such a beep on a browser in a decent amount of time?
Sound byte and sample code attached.
let AudioContext = window.AudioContext || window.webkitAudioContext;
audioContext = new AudioContext();
navigator.mediaDevices
.getUserMedia({ audio: true })
.then((stream) => {
let audioStream = audioContext.createMediaStreamSource(stream);
let analyser = audioContext.createAnalyser();
analyser.fftSize = 2048;
audioStream.connect(analyser);
x.analyser = analyser;
x.bufferLength = analyser.frequencyBinCount;
})
.catch(
(error) =>
(userAlert.innerHTML =
'Please allow access to your microphone.<br />' + error)
);
let frequencyArray = new Uint8Array(x.bufferLength);
let getFrequency = () => {
let totalFrequency = 0;
x.analyser.getByteFrequencyData(frequencyArray);
for (let i = 0; i < 5; i++) {
totalFrequency += Math.floor(frequencyArray[i + 52]);
}
if (totalFrequency > 500) {
// recognized
} else {
// continue
}
getFrequency();
}
Tone Example (assume 0.2s sound byte)

WebRTC - How to adjust microphone volume on a video stream?

I'm trying to adjust the microphone volume in WebRTC chat app which using 2 videos for streaming.
It is possible to modify the gain of the microphone? If yes, how can I do it to the following streams I'm using?
/*********************** video call ***************************/
var localStream;
var localVideo = document.getElementById("localVideo");
var remoteVideo = document.getElementById("remoteVideo");
var callButton = document.getElementById("callButton");
var inputLevelSelector = document.getElementById('mic-volume');
var outputLevelSelector = document.getElementById('speaker-volume');
inputLevelSelector.addEventListener('change', changeMicrophoneLevel);
outputLevelSelector.addEventListener('change', changeSpeakerLevel);
callButton.disabled = true;
callButton.onclick = call;
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
navigator.getUserMedia({
audio: true,
video: true
}, gotStream, //note that we are adding both audio and video
function (error) {
console.log(error);
});
var RTCPeerConnection = window.RTCPeerConnection || window.mozRTCPeerConnection || window.webkitRTCPeerConnection;
var SessionDescription = window.RTCSessionDescription || window.mozRTCSessionDescription || window.webkitRTCSessionDescription;
var pc = new RTCPeerConnection({
"iceServers": []
});
function gotStream(stream) {
// localVideo.src = window.URL.createObjectURL(stream); // DEPRECATED
localVideo.srcObject = stream; // UPDATED
localStream = stream;
callButton.disabled = false;
pc.addStream(stream);
}
pc.onicecandidate = function (event) {
console.log(event);
if (!event || !event.candidate) {
return;
} else {
socket.emit("video call", {
type: "iceCandidate",
"candidate": event.candidate
});
}
};
var remoteStream;
pc.onaddstream = function (event) {
remoteStream = event.stream;
var remoteVideo = document.getElementById("remoteVideo");
// remoteVideo.src = window.URL.createObjectURL(event.stream); // DEPRECATED
remoteVideo.srcObject = event.stream; // UPDATED
remoteVideo.play();
};
Please take in note that I'm newbie, so take it easy! :)
Here below I've made an implementation of the post that I've shared in the comments. It's the same setup but with your code included.
You first create your nodes from the Web Audio API. Because you want to manipulate the volume of a stream you'll need a MediaStreamAudioSourceNode, a MediaStreamAudioDestinationNode and a GainNode. The MediaStreamAudioSourceNode is entry point for the stream. By injecting it here we can connect it through the gain. The stream will pass through the GainNode where the volume will be controlled and then passed to the MediaStreamAudioDestinationNode where you can use the stream again in your RTC client.
From there use the stream from you get from the MediaStreamAudioDestinationNode.stream property.
Edit:
It turns out that the MediaStreamAudioDestinationNode.stream is a MediaStream object with only a audio track. So the video has been removed from the stream and has to be rejoined.
So when you are able to access the stream, get the video tracks from the stream and store them in a variable. Then after passing the stream through the Web Audio API join the video tracks back with the audio tracks that went through the GainNode.
var inputLevelSelector = document.getElementById('mic-volume');
// Renamed the variable after your comment.
var peerConnection = new RTCPeerConnection({
"iceServers": []
});
function gotStream(stream) {
// Get the videoTracks from the stream.
const videoTracks = stream.getVideoTracks();
/**
* Create a new audio context and build a stream source,
* stream destination and a gain node. Pass the stream into
* the mediaStreamSource so we can use it in the Web Audio API.
*/
const context = new AudioContext();
const mediaStreamSource = context.createMediaStreamSource(stream);
const mediaStreamDestination = context.createMediaStreamDestination();
const gainNode = context.createGain();
/**
* Connect the stream to the gainNode so that all audio
* passes through the gain and can be controlled by it.
* Then pass the stream from the gain to the mediaStreamDestination
* which can pass it back to the RTC client.
*/
mediaStreamSource.connect(gainNode);
gainNode.connect(mediaStreamDestination);
/**
* Change the gain levels on the input selector.
*/
inputLevelSelector.addEventListener('input', event => {
gainNode.gain.value = event.target.value;
});
/**
* The mediaStreamDestination.stream outputs a MediaStream object
* containing a single AudioMediaStreamTrack. Add the video track
* to the new stream to rejoin the video with the controlled audio.
*/
const controlledStream = mediaStreamDestination.stream;
for (const videoTrack of videoTracks) {
controlledStream.addTrack(videoTrack);
}
/**
* Use the stream that went through the gainNode. This
* is the same stream but with altered input volume levels.
*/
localVideo.srcObject = controlledStream;
localStream = controlledStream;
peerConnection.addStream(controlledStream);
callButton.disabled = false;
}

How to use Web Audio API to create .wav file?

I am pretty sure I did everything correct but when I try to play or download the file nothing plays. I am using web audio api to record audio from the microphone to a WAV format. I am using this library to create the .wav file. It seems like nothing is being encoded.
navigator.mediaDevices.getUserMedia({
audio: true,video:false
})
.then((stream) => {
var data
context = new AudioContext()
var source = context.createMediaStreamSource(stream)
var scriptNode = context.createScriptProcessor(8192, 1, 1)
source.connect(scriptNode)
scriptNode.connect(context.destination)
encoder = new WavAudioEncoder(16000,1)
scriptNode.onaudioprocess = function(e){
data = e.inputBuffer.getChannelData('0')
console.log(data)
encoder.encode(data)
}
$('#stop').click(()=>{
source.disconnect()
scriptNode.disconnect()
blob = encoder.finish()
console.log(blob)
url = window.URL.createObjectURL(blob)
// audio source
$('#player').attr('src',url)
// audio control
$("#pw")[0].load()
})
})
I figured it out! To help anyone who needs to do the same thing. It uses Web Audio API and this javascript library
navigator.mediaDevices.getUserMedia({
audio: true,video:false
})
.then((stream) => {
context = new AudioContext()
var source = context.createMediaStreamSource(stream)
var rec = new Recorder(source)
rec.record()
$('#stop').click(()=>{
rec.stop()
blob = rec.exportWAV(somefunction) // exportWAV() returns your file
})
use recordRTC for recording video and audio, I used in my project, it's working well, here is the code to record audio using recordrtc.org
startRecording(event) { // call this to start recording the Audio( or video or Both)
this.recording = true;
let mediaConstraints = {
audio: true
};
// Older browsers might not implement mediaDevices at all, so we set an empty object first
if (navigator.mediaDevices === undefined) {
navigator.mediaDevices = {};
}
// Some browsers partially implement mediaDevices. We can't just assign an object
// with getUserMedia as it would overwrite existing properties.
// Here, we will just add the getUserMedia property if it's missing.
if (navigator.mediaDevices.getUserMedia === undefined) {
navigator.mediaDevices.getUserMedia = function(constraints) {
// First get ahold of the legacy getUserMedia, if present
var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
// Some browsers just don't implement it - return a rejected promise with an error
// to keep a consistent interface
if (!getUserMedia) {
return Promise.reject(new Error('getUserMedia is not implemented in this browser'));
}
// Otherwise, wrap the call to the old navigator.getUserMedia with a Promise
return new Promise(function(resolve, reject) {
getUserMedia.call(navigator, constraints, resolve, reject);
});
}
}
navigator.mediaDevices.getUserMedia(mediaConstraints)
.then(successCallback.bind(this), errorCallback.bind(this));
}
successCallback(stream: MediaStream) {
var options = {
type: 'audio'
};
this.stream = stream;
this.recordRTC = RecordRTC(stream, options);
this.recordRTC.startRecording();
}
errorCallback(stream: MediaStream) {
console.log(stream);
}
stopRecording() { // call this to stop recording
this.recording = false;
this.converting = true;
let recordRTC = this.recordRTC;
if(!recordRTC) return;
recordRTC.stopRecording(this.processAudio.bind(this));
this.stream.getAudioTracks().forEach(track => track.stop());
}
processAudio(audioVideoWebMURL) {
let recordRTC = this.recordRTC;
var recordedBlob = recordRTC.getBlob(); // you can save the recorded media data in various formats, refer the link below.
console.log(recordedBlob)
this.recordRTC.save('audiorecording.wav');
let base64Data = '';
this.recordRTC.getDataURL((dataURL) => {
base64Data = dataURL.split('base64,')[1];
console.log(RecordRTC.getFromDisk('audio', function(dataURL,type) {
type == 'audio'
}));
console.log(dataURL);
})
}
Note that you cannot record the audio/video from the live site in Google Chrome unless your site is https enabled

Obtain MediaStream from input device

Looking for experience working with media devices:
I'm working on recording on cache and playback from Microphone source; Firefox & Chrome using HTML5.
This is what I've so far:
var constraints = {audio: true, video: false};
var promise = navigator.mediaDevices.getUserMedia(constraints);
I've been checking on official documentation from MDN on getUserMedia
but nothing related to storage the audio from the constraint to cache.
No such question has been asked previously at Stackoverflow; I'm wondering if's possible.
Thanks you.
You can simply use the MediaRecorder API for such task.
In order to record only the audio from your video+audio gUM stream, you will need to create a new MediaStream, from the gUM's audioTrack:
// using async for brevity
async function doit() {
// first request both mic and camera
const gUMStream = await navigator.mediaDevices.getUserMedia({video: true, audio: true});
// create a new MediaStream with only the audioTrack
const audioStream = new MediaStream(gUMStream.getAudioTracks());
// to save recorded data
const chunks = [];
const recorder = new MediaRecorder(audioStream);
recorder.ondataavailable = e => chunks.push(e.data);
recorder.start();
// when user decides to stop
stop_btn.onclick = e => {
recorder.stop();
// kill all tracks to free the devices
gUMStream.getTracks().forEach(t => t.stop());
audioStream.getTracks().forEach(t => t.stop());
};
// export all the saved data as one Blob
recorder.onstop = e => exportMedia(new Blob(chunks));
// play current gUM stream
vid.srcObject = gUMStream;
stop_btn.disabled = false;
}
function exportMedia(blob) {
// here blob is your recorded audio file, you can do whatever you want with it
const aud = new Audio(URL.createObjectURL(blob));
aud.controls = true;
document.body.appendChild(aud);
document.body.removeChild(vid);
}
doit()
.then(e=>console.log("recording"))
.catch(e => {
console.error(e);
console.log('you may want to try from jsfiddle: https://jsfiddle.net/5s2zabb2/');
});
<video id="vid" controls autoplay></video>
<button id="stop_btn" disabled>stop</button>
And as a fiddle since stacksnippets don't work very well with gUM...

Categories

Resources