How to find out which microphone device user gave permission to? - javascript

I ask user the permission to use Camera and Microphone:
await navigator.mediaDevices.getUserMedia({ audio: true, video: true });
And in Firefox, I get the following prompt:
Once the user gave the permission, how can I tell which Camera and Microphone was selected? The return value of getUserMedia doesn't provide much info.

Once gUM has given you a stream object do something like this:
async function getAudioDeviceLabel(stream) {
let audioDeviceLabel = 'unknown'
const tracks = stream.getAudioTracks()
if( tracks && tracks.length >= 1 && tracks[0] ) {
const settings = tracks[0].getSettings()
const chosenDeviceId = settings.deviceId
if (chosenDeviceId) {
let deviceList = await navigator.mediaDevices.enumerateDevices()
deviceList = deviceList.filter(device => device.deviceId === chosenDeviceId)
if (deviceList && deviceList.length >= 1) audioDeviceLabel = deviceList[0].label
}
}
return audioDeviceLabel
}
This gets the deviceId of the audio track of your stream, from its settings. It then looks at the list of enumerated devices to retrieve the label associated with the deviceId.
It is kind of a pain in the xxx neck to get this information.

Related

Cloning a MediaStream object on iOS Chrome - second stream freezes temporarily

I am trying to create a web application with some video chat functionality, but trying to get it to work on mobile (specifically, Chrome for iOS) is giving me fits.
What I would like to do is have users be able to join a game, and join a team within that game. There are two tabs on the page for players - a "Team" tab and a "Game" tab. When the player selects the game tab, they may talk to all participants in the entire game (e.g. to ask the host/moderator a question). When the team tab is selected, the player's stream to the game is muted, and only the player's team can hear them talk. As a result, I believe I need two MediaStream objects for each player - one to stream to the game, and one to stream to the player's team - this way, I can mute one while keeping the other unmuted.
There is an iOS quirk where you can only call the getUserMedia() function once, so I need to clone the stream using MediaStream.clone(). AddVideoStream is a function that just adds the video to the appropriate grid of videos, and it appears to work properly.
The problem is - when I use my iPhone 12 to connect to the game, I can see my video just fine, but when I click over to the "game" tab and look at the second stream, the stream works for a second, and then freezes. The weird thing is, if I open a new tab in Chrome, and then go back to the game tab, both videos seem to run smoothly.
Has anyone ever tried something similar, and figured out why this behavior occurs?
const myPeer = new Peer(undefined);
myPeer.on('open', (userId) => {
myUserId = userId;
console.log(`UserId: ${myUserId}`);
socket.emit('set-peer-id', {
id: userId,
});
});
const myVideo = document.createElement('video');
myVideo.setAttribute('playsinline', true);
myVideo.muted = true;
const myTeamVideo = document.createElement('video');
myTeamVideo.setAttribute('playsinline', true);
myTeamVideo.muted = true;
const myStream =
// (navigator.mediaDevices ? navigator.mediaDevices.getUserMedia : undefined) ||
navigator.mediaDevices ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia;
let myVideoStream;
let myTeamStream;
if (myStream) {
myStream
.getUserMedia({
video: true,
audio: true,
})
.then((stream) => {
myVideoStream = stream;
myTeamStream = stream.clone();
addVideoStream(myTeamVideo, myTeamStream, myUserId, teamVideoGrid);
addVideoStream(myVideo, myVideoStream, myUserId, videoGrid);
myPeer.on('call', (call) => {
call.answer(stream);
const video = document.createElement('video');
video.setAttribute('playsinline', true);
call.on('stream', (userVideoStream) => {
const teammate = teammates.find((t) => {
return t.peerId === call.peer;
});
if (teammate) {
addVideoStream(
video,
userVideoStream,
call.peer,
teamVideoGrid,
teammate.name
);
} else {
addVideoStream(video, userVideoStream, call.peer, videoGrid);
}
});
call.on('close', () => {
console.log(`Call with ${call.peer} closed`);
});
});
socket.on('player-joined', (data) => {
addMessage({
name: 'System',
isHost: false,
message: `${data.name} has joined the game.`,
});
if (data.id !== myUserId) {
if (data.teamId !== teamId) {
connectToNewUser(data.peerId, myVideoStream, videoGrid, data.name);
} else {
connectToNewUser(
data.peerId,
myTeamStream,
teamVideoGrid,
data.name
);
}
}
});
});
}

Is it possible to play the same audio on two audio output devices simultaneously

I'm trying to make a script that gets the audio from an audio element and plays it on headset and laptop speakers at he same time.
Is this possible?
This is the code that I wrote so far:
const outputs = (await navigator.mediaDevices.enumerateDevices()).filter(({ kind, deviceId }) => kind === 'audiooutput' && deviceId !== 'default' && deviceId !== 'communications' );
var players = [];
document.querySelectorAll('audio').forEach((y)=>{
players.push([]); let l = players.length-1;
outputs.forEach((x)=>{
if(players[l].length===0) players[l].push(y);
else {
let p = y.cloneNode();
p.srcObject = players[l][0].srcObject.clone();
players[l].push(p);
}
});
})
players.forEach((a)=>{
a.forEach((o, i)=>{
o.setSinkId(outputs[i].deviceId);
o.play();
})
})
The issue with this code is that it makes the audio only play on the other speaker instead of playing it on both.
Note that the window has access to mic so I can see all the devices from navigator.mediaDevices.enumerateDevices().
The script is intended to work mainly on Edge and Chrome.
I found a solution!
Since I wasn't able to play sound on multiple devices at once in the same window I managed to get this done by creating iframes that contain audio elements.
Then I set to all of those audio elements the same source and I set them to output audio a specific device and I sync the play/stop with event listeners.
This works because iframes are separated by the main window. Actually I think they get their own process (on chromium browsers).
My code looks like this: (note I'm not using static sources, but srcObjects)
navigator.mediaDevices.getUserMedia({audio: true}).then(s=>{
s.getTracks().forEach(x=>x.stop()); //stop mic use because we need only outputs
navigator.mediaDevices.enumerateDevices().then(o=>{
const outputs = o.filter(({ kind, deviceId }) => kind === 'audiooutput' && deviceId !== 'default' && deviceId !== 'communications');
let audioSrc = getAudioSrc(), players = [];
audioSrc.src.pause(); // Pause source to start audio in sync ?
audioSrc.src.addEventListener("pause", () => players.forEach(x=>x.pause()));
audioSrc.src.addEventListener("play", () => players.forEach(x=>x.play()));
outputs.forEach((x)=>{
let ifrm = makeIFrame(), audioEl = makeAudio(ifrm.document, audioSrc.s, x.deviceId);
players.push(audioEl);
});
});
}).catch(e=>console.log(e));

JavaScript programatically check if camera is being used

I'm writing functional tests for a video chat app.
I want to make sure that when the user leaves the meeting the camera turns off. So, I'm trying to check if the camera is being used or not.
Is there a way to do that programatically? I couldn't find any methods on navigator.MediaDevices that say "hey your camera is being used".
Here is how I solved it in TestCafe by "spying" on getUserMedia:
const overWriteGetUserMedia = ClientFunction(() => {
const realGetUserMedia = navigator.mediaDevices.getUserMedia;
const allRequestedTracks = [];
navigator.mediaDevices.getUserMedia = constraints =>
realGetUserMedia(constraints).then(stream => {
stream.getTracks().forEach(track => {
allRequestedTracks.push(track);
});
return stream;
});
return allRequestedTracks;
});
test('leaving a meeting should end streams', async t => {
const allRequestedTracks = await overWriteGetUserMedia();
await t.wait(5000); // wait for streams to start;
await t.click(screen.getByLabelText(/leave/i));
await t.click(screen.getByLabelText(/yes, leave the meeting/i));
await t.wait(1000); // wait for navigation;
const actual = !allRequestedTracks.some(track => !track.ended);
const expected = true;
await t.expect(actual).eql(expected);
});
You can use navigator.mediaDevices.getUserMedia method to get access of user camera, and user active value to check is camera already activated.
If user block the permission to the camera you will receive an error.
Hope this will be work for you.

Audio doesn't play on mobile safari even after user interaction

I am trying to write a small library for convenient manipulations with audio. I know about the autoplay policy for media elements, and I play audio after a user interaction:
const contextClass = window.AudioContext || window.webkitAudioContext;
const context = this.audioContext = new contextClass();
if (context.state === 'suspended') {
const clickCb = () => {
this.playSoundsAfterInteraction();
window.removeEventListener('touchend', clickCb);
this.usingAudios.forEach((audio) => {
if (audio.playAfterInteraction) {
const promise = audio.play();
if (promise !== undefined) {
promise.then(_ => {
}).catch(error => {
// If playing isn't allowed
console.log(error);
});
}
}
});
};
window.addEventListener('touchend', clickCb);
}
On android chrome everything ok and on a desktop browser. But on mobile Safari I am getting such error in promise:
the request is not allowed by the user agent or the platform in the current context safari
I have tried to create audios after an interaction, change their "src" property. In every case, I am getting this error.
I just create audio in js:
const audio = new Audio(base64);
add it to array and try to play. But nothing...
Tried to create and play after a few seconds after interaction - nothing.

WebRTC how to set constraints to use audio when available

Typically you would set constraints as follow
{ audio: true, video: true }
but I'd like to be more permissive then that. I'd like to only get the audio, if it is able to get it i.e. if a microphone is available, otherwise it will throw me a NotFoundError error.
Any ideas if that's possible?
Yes.
You can use the MediaDevices.enumerateDevices() method to get the list of all audio and video devices.
This will return an Array of MediaDeviceInfo objects which will have a kind property letting you know which constraint you can use.
So you could do something like
navigator.mediaDevices.enumerateDevices()
.then(getDevicesTypes)
.then(getUserMedia)
.then(console.log)
.catch(console.error);
function getDevicesTypes(list){
return new Set(list
.filter(device => device.kind.indexOf('input') > -1)
.map(device => device.kind.replace('input', ''))
);
}
function getUserMedia(deviceKinds){
const constraint = {};
deviceKinds.forEach(kind => constraint[kind] = true);
console.log(constraint);
return navigator.mediaDevices.getUserMedia(constraint);
}

Categories

Resources