How to get both audio and video sent over WebRTC tracks? - javascript

I'm making a WebRTC site and working on a one-to-many video connection right now. After finding that addStream() is deprecated, I switched to addTrack(). However, no matter which one I use, only the audio is being sent over, not the video. Originally I thought it was because I was on localhost without https, but even when I run it on my node server the same thing occurs. A solution would be appreciated.
Hosting code (host.js)
document.addEventListener("DOMContentLoaded", () => {
uuid = createUUID();
localVideo = document.getElementById('localVideo');
serverConnection = new WebSocket('wss://' + window.location.hostname + ':443');
console.log("Opened WS on :443")
serverConnection.onmessage = gotMessageFromServer;
var constraints = {
video: true,
audio: true,
};
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia(constraints).then(getUserMediaSuccess).catch(errorHandler);
} else {
alert('Your browser does not support getUserMedia API');
}
document.getElementById("start").addEventListener("click", (e) => {
start(uuid)
});
});
function getUserMediaSuccess(stream) {
localStream = stream;
localVideo.srcObject = stream;
}
function start(uid) {
peerConnections[uid] = new RTCPeerConnection(peerConnectionConfig);
peerConnections[uid].onicecandidate = gotIceCandidate;
for (const track of localStream.getTracks()) {
peerConnections[uid].addTrack(track, localStream);
}
}
Viewer code (client.js)
function pageReady() {
uuid = createUUID();
remoteVideo = document.getElementById('remoteVideo');
remoteVideo.srcObject = remoteStream;
remoteVideo.play();
serverConnection = new WebSocket('wss://' + window.location.hostname + ':443');
serverConnection.onmessage = gotMessageFromServer;
var constraints = {
video: false,
audio: true,
};
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia(constraints).then(getUserMediaSuccess).catch(errorHandler);
} else {
alert('Your browser does not support getUserMedia API');
}
}
function getUserMediaSuccess(stream) {
localStream = stream;
}
function start(isCaller) {
console.log("pressed Start")
peerConnection = new RTCPeerConnection(peerConnectionConfig);
console.log("new RTCconnection")
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.ontrack = gotRemoteStream;
peerConnection.addTrack(localStream.getTracks()[0]);
peerConnection.createOffer().then((desc) => {
createdDescription(desc);
}).catch(errorHandler);
}
function gotRemoteStream(e) {
console.log('got remote stream');
if (e.streams && e.streams[0]) {
remoteVideo.srcObject = e.streams[0];
} else {
if (!inboundStream) {
inboundStream = new MediaStream();
remoteVideo.srcObject = inboundStream;
}
inboundStream.addTrack(e.track);
}
}
P.S. I'm only sending audio from the viewer side because it's a one-way call, but the viewer has to initiate the call. My problem is getting both audio and video from the host side onto the viewer's side.
P.P.S. You probably want more code so you can run it yourselves, so the repo is here.
Open one client on /host and another on /class. Make sure you go to https://localhost or it won't work.

Add this line in the client.js file
peerConnection.addTransceiver("video"); after addtrack call.
function start(isCaller) {
console.log("pressed Start")
peerConnection = new RTCPeerConnection(peerConnectionConfig);
console.log("new RTCconnection")
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.ontrack = gotRemoteStream;
peerConnection.addTrack(localStream.getTracks()[0]);
peerConnection.addTransceiver("video"); // The line to be added
peerConnection.createOffer().then((desc) => {
createdDescription(desc);
}).catch(errorHandler);
}

Related

WEBRTC, establish connection if user A to user B. With different Video and Audio constraints

This code, is handling then new User connected.
User Class
class User{
constructor(friendID){
this.friendID = friendID;
this.userName = null;
this.myPeer = new RTCPeerConnection(servers);
this.videoManager = new VideoManager();
this.setup_events();
}
get name(){
return this.userName;
}
get id(){
return this.friendID;
}
setup_events(){
let this_instance = this;
this.myPeer.onicecandidate = function(event){
if (event.candidate !== null){
LOG("ICE CANDIDATE SEND TO " + this_instance.friendID);
LOG_obj(event.candidate);
socket.emit("ice-candidate", event.candidate, this_instance.friendID);
}else{
LOG("EMPTY CANDIDATE");
}
}
this.myPeer.addEventListener('track', async(event) => {
const [remoteStream] = event.streams;
this_instance.videoManager.createVideo();
this_instance.videoManager.setStream(remoteStream);
LOG("ADDED stream to VideoObject");
});
}
add_candidate(candidate){
this.myPeer.addIceCandidate( new RTCIceCandidate(candidate) );
LOG("CANDIDATE ADDED TO PEER");
}
accept_offer(userOffer){
this.myPeer.setRemoteDescription(new RTCSessionDescription(userOffer));
LOG("ACCEPTED OFFER");
}
async create_offer(){
MediaRules.get_MediaRules()
.then( (mediaRules) => {
navigator.mediaDevices.getUserMedia(mediaRules).then( (mediaStream) =>{
let tracks = mediaStream.getTracks();
tracks.forEach( track => { this.myPeer.addTrack(track, mediaStream); });
LOG("ADDED ALL TRACKS");
}).then( () => {
this.myPeer.createOffer(mediaRules).then( (offerObj) => {
this.myPeer.setLocalDescription(offerObj);
socket.emit("user-offer", offerObj, this.friendID);
});
});
});
}
accept_answer(userAnswer){
this.myPeer.setRemoteDescription(new RTCSessionDescription(userAnswer));
LOG("ACCEPTED ANSWER");
}
async create_answer(){
MediaRules.get_MediaRules().then( (mediaRules) => {
navigator.mediaDevices.getUserMedia(mediaRules).then( (mediaStream) => {
let tracks = mediaStream.getTracks();
tracks.forEach( track => { this.myPeer.addTrack(track, mediaStream); });
LOG("ADDED ALL TRACKS");
}).then( () => {
this.myPeer.createAnswer(mediaRules).then( (answerObj) => {
this.myPeer.setLocalDescription(answerObj);
socket.emit("user-answer", answerObj, this.friendID);
});
});
});
}
}
User Pool
class UsersPool{
constructor(){
this.UsersMap = {};
}
addUser(userObj){
this.UsersMap[userObj.id] = userObj;
}
accept_IceCandidate(candidateObject, user_id){
this.UsersMap[user_id].add_candidate(candidateObject);
}
accept_Offer(offerObject, user_id){
LOG("ACCEPT OFFER FROM " + user_id);
this.UsersMap[user_id].accept_offer(offerObject);
}
accept_Answer(answerObject, user_id){
this.UsersMap[user_id].accept_answer(answerObject);
}
async CreateSendOffer(user_id){
await this.UsersMap[user_id].create_offer();
}
async CreateSendAnswer(user_id){
await this.UsersMap[user_id].create_answer();
}
}
Media Constraints
class MediaConstraints{
async get_MediaRules(){
let mediaRules = { video: false, audio: false };
let devicesEnum = await navigator.mediaDevices.enumerateDevices();
devicesEnum.forEach( device => {
if ( device.kind == "audioinput" ){
mediaRules["audio"] = true;
}
else if ( device.kind == "videoinput"){
mediaRules["video"] = true;
}
});
return mediaRules;
}
}
Video Manager (creates video element by user)
class VideoManager {
constructor(){
this.videoObject = null;
}
createVideo(){
let videoObject = document.createElement("video");
let divVideo = document.createElement("div");
videoObject.setAttribute('width', "600");
videoObject.setAttribute('height', "800");
divVideo.appendChild(videoObject);
document.body.appendChild(divVideo);
this.videoObject = videoObject;
}
setStream(stream){
this.videoObject.srcObject = stream;
this.videoObject.play();
}
}
Well, the problem is here. icecandidate is working nice, signaling server is working too.
TURN/STUN server works fine.
My main question is how to create constraints and setup correctly Offer and Answer if User A don't have webcamera but User B has.
At the moment i get error that STUN server is broken, but this is because peers can't finish establishing connection between each other.
How to make it, if i have only microphone on my Laptop, but on other Laptop i have video and microphone.
EDIT 0: Well, looks like WebRTC doesn't like if constraints are different, if User A create offer with {video: false, audio: true}, and send it to User B, and User B creates answer with {video: true, audio: true} then it fails to connect because constraints are different.
Still don't understand why this is a problem.
EDIT 1: Looks like the only way is to use addTransceiver and to control manually media.
Actually, the problem was not in Linux Firefox version.
The Problem was in Offer-Answer exchange and using AddTrack function.
Problem was that, if User A doesn't have WebCamera but User B have, it can't finish normally connection, and as a result ice-candidate is not triggered.
So, i solved it using AddTransceiver().
An example how to make connection if someone doesn't have webacamera or microphone.
Create RTCPeerConnection.
let myPeer = new RTCPeerConnection();
Make a function that is looking for active devices. so in result you need to get
real_devices = { video: false, audio: true } for example if there is not webcamera
async get_MediaRules(){
let devicesEnum = await navigator.mediaDevices.enumerateDevices();
devicesEnum.forEach( device => {
if ( device.kind == "audioinput" ){
this.physicalRule["audio"] = true;
}
else if ( device.kind == "videoinput"){
this.physicalRule["video"] = true;
}
});
return this.physicalRule;
}
Get mediaStream
let myStream = navigator.mediaDevices.getUserMedia(real_devices);
Create Transceiver and add it to connection ( do this on Caller Side ).
async transceiverSetup(){
let videoTracks = myStream.getVideoTracks();
let audioTracks = myStream.getAudioTracks();
if (videoTracks.length > 0){
this.clientPeer.addTransceiver(videoTracks[0]);
}
else{
let video = this.myStream.addTransceiver("video");
video.direction = "recvonly";
}
if (audioTracks.length > 0){
this.myStream.addTransceiver(audioTracks[0]);
}
else{
let audio = this.myStream.addTransceiver("audio");
audio.direction = "recvonly";
}
}
After that you call
myPeer.createOffer()...
And on other side after you receiver Offer, you call remoteTrackSetup(). function and you setup on your side transceivers.
async configure_transceiver(mediaTracks, transceivers){
transceivers.forEach( async(any) => {
if (any.receiver.track){
if (mediaTracks.length > 0){
any.direction = "sendrecv";
await any.sender.replaceTrack(mediaTracks[0]);
}
else{
any.direction = "recvonly";
await any.sender.replaceTrack(null);
}
}else{
if (mediaTracks.length > 0){
any.direction = "sendonly";
await any.sender.replaceTrack(mediaTracks[0]);
}else{
any.direction = "inactive";
await mediaTracks.sender.replaceTrack(null);
}
}
});
}
async remoteTrackSetup(){
let mediaStream = GlobalState.Media;
let audioTracks = mediaStream.getAudioTracks();
let videoTracks = mediaStream.getVideoTracks();
let transceivers = this.clientPeer.getTransceivers();
let audioTransceivers = transceivers.filter(function(tr){
return tr.receiver.track.kind == "audio";
});
let videoTransceivers = transceivers.filter(function(tr){
return tr.receiver.track.kind == "video";
});
await this.configure_transceiver(audioTracks, audioTransceivers);
await this.configure_transceiver(videoTracks, videoTransceivers);
}
After those functions you call
myPeer.createAnswer()...
And connection is fully established.
Here is code for ontrack event.
setTransceiver(transceiver){
if(!this.videoObject.srcObject || this.videoObject.srcObject.getTracks().length == 2){
this.videoObject.srcObject = new MediaStream([transceiver.receiver.track]);
}else{
this.videoObject.srcObject.addTrack(transceiver.receiver.track);
}
}

WebRTC three-ways communication is creating this error: Failed to set remote answer sdp: Called in wrong state: stable

I have been trying to create three-ways communication similar to this:
User1 sends an invitation to user2 and user3
User2 receives the invitation from user1 and answers. At the same time, User2 creates another invitation, and sends it to user3
user3 answers both User1 and User2 offers .
To achieve this, I duplicated RTCPeerConnection twice. However, the connection between user 1 and 2 is established correctly, but user 3 is unable to join the call. I'm keep getting this error.
Error InvalidStateError: Failed to execute 'setRemoteDescription' on 'RTCPeerConnection': Failed to set remote answer sdp: Called in wrong state: stable
This is my code:
"use strict";
// Get our hostname
var myHostname = window.location.hostname;
console.log("Hostname: " + myHostname);
// WebSocket chat/signaling channel variables.
var connection = null;
var clientID = 0;
var mediaConstraints = {
audio: false, // We want an audio track
video: true // ...and we want a video track
};
var myUsername = null;
var targetUsername = null; // To store username of other peer
var targetUsername2 = 'User3';
var myPeerConnection = null; // RTCPeerConnection
var myPeerConnection2 = null; // RTCPeerConnection
// To work both with and without addTrack() we need to note
// if it's available
var hasAddTrack = false;
var hasAddTrack2 = false;
function log_error(text) {
var time = new Date();
console.error("[" + time.toLocaleTimeString() + "] " + text);
}
// Send a JavaScript object by converting it to JSON and sending
// it as a message on the WebSocket connection.
function sendToServer(msg) {
var msgJSON = JSON.stringify(msg);
console.log("Sending '" + msg.type + "' message: " + msgJSON);
connection.send(msgJSON);
}
function setUsername() {
myUsername = document.getElementById("name").value;
sendToServer({
name: myUsername,
date: Date.now(),
id: clientID,
type: "username"
});
}
// Open and configure the connection to the WebSocket server.
function connect() {
var serverUrl;
var scheme = "ws";
if (document.location.protocol === "https:") {
scheme += "s";
}
serverUrl = scheme + "://" + myHostname + ":443";
connection = new WebSocket(serverUrl, "json");
connection.onopen = function(evt) {
};
connection.onerror = function(evt) {
console.dir(evt);
}
connection.onmessage = function(evt) {
var text = "";
var msg = JSON.parse(evt.data);
console.log("Message received: ");
console.dir(msg);
var time = new Date(msg.date);
var timeStr = time.toLocaleTimeString();
switch(msg.type) {
case "id":
clientID = msg.id;
setUsername();
break;
case "rejectusername":
myUsername = msg.name;
break;
case "userlist": // Received an updated user list
handleUserlistMsg(msg);
break;
// Signaling messages: these messages are used to trade WebRTC
// signaling information during negotiations leading up to a video
// call.
case "video-offer": // Invitation and offer to chat
handleVideoOfferMsg(msg);
handleVideoOfferMsg2(msg);
break;
case "video-answer": // Callee has answered our offer
handleVideoAnswerMsg(msg);
handleVideoAnswerMsg2(msg);
break;
case "new-ice-candidate": // A new ICE candidate has been received
handleNewICECandidateMsg(msg);
handleNewICECandidateMsg2(msg);
break;
case "hang-up": // The other peer has hung up the call
handleHangUpMsg(msg);
break;
// Unknown message; output to console for debugging.
default:
log_error("Unknown message received:");
log_error(msg);
}
};
}
function createPeerConnection() {
console.log("Setting up a connection (myPeerConnection)");
// Create an RTCPeerConnection which knows to use our chosen
// STUN server.
myPeerConnection = new RTCPeerConnection({
iceServers: [ // Information about ICE servers - Use your own!
{
url: 'stun:stun.l.google.com:19302'
},
{
url: 'turn:numb.viagenie.ca',
credential: 'muazkh',
username: 'webrtc#live.com'
}
]
});
// Do we have addTrack()? If not, we will use streams instead.
hasAddTrack = (myPeerConnection.addTrack !== undefined);
// Set up event handlers for the ICE negotiation process.
myPeerConnection.onicecandidate = handleICECandidateEvent;
myPeerConnection.onremovestream = handleRemoveStreamEvent;
myPeerConnection.oniceconnectionstatechange = handleICEConnectionStateChangeEvent;
myPeerConnection.onicegatheringstatechange = handleICEGatheringStateChangeEvent;
myPeerConnection.onsignalingstatechange = handleSignalingStateChangeEvent;
myPeerConnection.onnegotiationneeded = handleNegotiationNeededEvent;
// Because the deprecation of addStream() and the addstream event is recent,
// we need to use those if addTrack() and track aren't available.
if (hasAddTrack) {
myPeerConnection.ontrack = handleTrackEvent;
} else {
myPeerConnection.onaddstream = handleAddStreamEvent;
}
}
//Clone
function createPeerConnection2() {
console.log("Setting up a connection... (myPeerConnection2)");
// Create an RTCPeerConnection which knows to use our chosen
// STUN server.
myPeerConnection2 = new RTCPeerConnection({
iceServers: [ // Information about ICE servers - Use your own!
{
url: 'stun:stun.l.google.com:19302'
},
{
url: 'turn:numb.viagenie.ca',
credential: 'muazkh',
username: 'webrtc#live.com'
}
]
});
// Do we have addTrack()? If not, we will use streams instead.
hasAddTrack2 = (myPeerConnection2.addTrack !== undefined);
// Set up event handlers for the ICE negotiation process.
myPeerConnection2.onicecandidate = handleICECandidateEvent2;
myPeerConnection2.onremovestream = handleRemoveStreamEvent2;
myPeerConnection2.oniceconnectionstatechange = handleICEConnectionStateChangeEvent2;
myPeerConnection2.onicegatheringstatechange = handleICEGatheringStateChangeEvent2;
myPeerConnection2.onsignalingstatechange = handleSignalingStateChangeEvent2;
myPeerConnection2.onnegotiationneeded = handleNegotiationNeededEvent2;
// Because the deprecation of addStream() and the addstream event is recent,
// we need to use those if addTrack() and track aren't available.
if (hasAddTrack2) {
myPeerConnection2.ontrack = handleTrackEvent2;
} else {
myPeerConnection2.onaddstream = handleAddStreamEvent2;
}
}
function handleNegotiationNeededEvent() {
console.log("*** Negotiation needed");
console.log("---> Creating offer For myPeerConnection1");
myPeerConnection.createOffer().then(function(offer) {
console.log("---> Creating new description object to send to remote peer (myPeerConnection)");
return myPeerConnection.setLocalDescription(offer);
})
.then(function() {
console.log("---> Sending offer to remote peer (myPeerConnection1)");
sendToServer({
name: myUsername,
target: targetUsername,
type: "video-offer",
sdp: myPeerConnection.localDescription
});
})
.catch(reportError);
}
//Clone
function handleNegotiationNeededEvent2() {
console.log("*** Negotiation needed");
console.log("---> Creating offer For myPeerConnection2");
myPeerConnection2.createOffer().then(function(offer) {
console.log("---> Creating new description object to send to remote peer (myPeerConnection2)");
return myPeerConnection2.setLocalDescription(offer);
})
.then(function() {
console.log("---> Sending offer to remote peer (myPeerConnection2)");
sendToServer({
name: myUsername,
target: targetUsername2,
type: "video-offer",
sdp: myPeerConnection2.localDescription
});
})
.catch(reportError);
}
function handleTrackEvent(event) {
console.log("*** Track event");
document.getElementById("received_video").srcObject = event.streams[0];
document.getElementById("hangup-button").disabled = false;
}
function handleTrackEvent2(event) {
console.log("*** Track event");
document.getElementById("received_video2").srcObject = event.streams[0];
document.getElementById("hangup-button").disabled = false;
}
// Called by the WebRTC layer when a stream starts arriving from the
// remote peer. We use this to update our user interface, in this
// example.
function handleAddStreamEvent(event) {
console.log("*** Stream added");
document.getElementById("received_video").srcObject = event.stream;
document.getElementById("hangup-button").disabled = false;
}
function handleAddStreamEvent2(event) {
console.log("*** Stream added");
document.getElementById("received_video2").srcObject = event.stream;
document.getElementById("hangup-button").disabled = false;
}
function handleRemoveStreamEvent(event) {
console.log("*** Stream removed");
closeVideoCall();
}
//Clone
function handleRemoveStreamEvent2(event) {
console.log("*** Stream removed");
closeVideoCall();
}
function handleICECandidateEvent(event) {
if (event.candidate) {
console.log("Outgoing ICE candidate: " + event.candidate.candidate);
sendToServer({
type: "new-ice-candidate",
target: targetUsername,
candidate: event.candidate
});
}
}
//Clone
function handleICECandidateEvent2(event) {
if (event.candidate) {
console.log("handleICECandidateEvent2")
console.log("Outgoing ICE candidate: " + event.candidate.candidate);
sendToServer({
type: "new-ice-candidate",
target: targetUsername2,
candidate: event.candidate
});
}
}
function handleICEConnectionStateChangeEvent(event) {
console.log("*** ICE connection state changed to " + myPeerConnection.iceConnectionState);
switch(myPeerConnection.iceConnectionState) {
case "closed":
case "failed":
case "disconnected":
closeVideoCall();
break;
}
}
//Clone
function handleICEConnectionStateChangeEvent2(event) {
console.log("*** ICE connection state changed to " + myPeerConnection2.iceConnectionState);
switch(myPeerConnection2.iceConnectionState) {
case "closed":
case "failed":
case "disconnected":
closeVideoCall();
break;
}
}
function handleSignalingStateChangeEvent(event) {
console.log("*** WebRTC signaling state changed to: " + myPeerConnection.signalingState);
switch(myPeerConnection.signalingState) {
case "closed":
closeVideoCall();
break;
}
}
//Clone
function handleSignalingStateChangeEvent2(event) {
console.log("*** WebRTC signaling state changed to: " + myPeerConnection2.signalingState);
switch(myPeerConnection2.signalingState) {
case "closed":
closeVideoCall();
break;
}
}
function handleICEGatheringStateChangeEvent(event) {
console.log("*** ICE gathering state changed to: " + myPeerConnection.iceGatheringState);
}
function handleICEGatheringStateChangeEvent2(event) {
console.log("*** ICE gathering state changed to: " + myPeerConnection2.iceGatheringState);
}
// Given a message containing a list of usernames, this function
// populates the user list box with those names, making each item
// clickable to allow starting a video call.
function handleUserlistMsg(msg) {
var i;
var listElem = document.getElementById("userlistbox");
while (listElem.firstChild) {
listElem.removeChild(listElem.firstChild);
}
// Add member names from the received list
for (i=0; i < msg.users.length; i++) {
var item = document.createElement("li");
item.appendChild(document.createTextNode(msg.users[i]));
item.addEventListener("click", invite, false);
listElem.appendChild(item);
}
}
function closeVideoCall() {
var remoteVideo = document.getElementById("received_video");
var remoteVideo2 = document.getElementById("received_video2");
var localVideo = document.getElementById("local_video");
console.log("Closing the call");
// Close the RTCPeerConnection
if (myPeerConnection) {
console.log("--> Closing the peer connection");
// Disconnect all our event listeners; we don't want stray events
// to interfere with the hangup while it's ongoing.
myPeerConnection.onaddstream = null; // For older implementations
myPeerConnection.ontrack = null; // For newer ones
myPeerConnection.onremovestream = null;
myPeerConnection.onnicecandidate = null;
myPeerConnection.oniceconnectionstatechange = null;
myPeerConnection.onsignalingstatechange = null;
myPeerConnection.onicegatheringstatechange = null;
myPeerConnection.onnotificationneeded = null;
// Stop the videos
if (remoteVideo.srcObject) {
remoteVideo.srcObject.getTracks().forEach(track => track.stop());
}
if (localVideo.srcObject) {
localVideo.srcObject.getTracks().forEach(track => track.stop());
}
remoteVideo.src = null;
localVideo.src = null;
// Close the peer connection
myPeerConnection.close();
myPeerConnection = null;
}
//Clone
if (myPeerConnection2) {
console.log("--> Closing the peer connection (myPeerConnection2)");
// Disconnect all our event listeners; we don't want stray events
// to interfere with the hangup while it's ongoing.
myPeerConnection2.onaddstream = null; // For older implementations
myPeerConnection2.ontrack = null; // For newer ones
myPeerConnection2.onremovestream = null;
myPeerConnection2.onnicecandidate = null;
myPeerConnection2.oniceconnectionstatechange = null;
myPeerConnection2.onsignalingstatechange = null;
myPeerConnection2.onicegatheringstatechange = null;
myPeerConnection2.onnotificationneeded = null;
// Stop the videos
if (remoteVideo2.srcObject) {
remoteVideo2.srcObject.getTracks().forEach(track => track.stop());
}
if (localVideo.srcObject) {
localVideo.srcObject.getTracks().forEach(track => track.stop());
}
remoteVideo2.src = null;
localVideo.src = null;
// Close the peer connection
myPeerConnection2.close();
myPeerConnection2 = null;
}
// Disable the hangup button
document.getElementById("hangup-button").disabled = true;
targetUsername = null;
}
// Handle the "hang-up" message, which is sent if the other peer
// has hung up the call or otherwise disconnected.
function handleHangUpMsg(msg) {
console.log("*** Received hang up notification from other peer");
closeVideoCall();
}
// Hang up the call by closing our end of the connection, then
// sending a "hang-up" message to the other peer (keep in mind that
// the signaling is done on a different connection). This notifies
// the other peer that the connection should be terminated and the UI
// returned to the "no call in progress" state.
function hangUpCall() {
closeVideoCall();
sendToServer({
name: myUsername,
target: targetUsername,
type: "hang-up"
});
//Clone
closeVideoCall();
sendToServer({
name: myUsername,
target: targetUsername2,
type: "hang-up"
});
}
// Handle a click on an item in the user list by inviting the clicked
// user to video chat. Note that we don't actually send a message to
// the callee here -- calling RTCPeerConnection.addStream() issues
// a |notificationneeded| event, so we'll let our handler for that
// make the offer.
function invite(evt) {
console.log("Starting to prepare an invitation");
if (myPeerConnection) {
alert("You can't start a call because you already have one open!");
} else {
var clickedUsername = evt.target.textContent;
// Don't allow users to call themselves, because weird.
if (clickedUsername === myUsername) {
alert("I'm afraid I can't let you talk to yourself. That would be weird.");
return;
}
// Record the username being called for future reference
targetUsername = clickedUsername;
console.log("Inviting user " + targetUsername);
// Call createPeerConnection() to create the RTCPeerConnection.
console.log("Setting up connection to invite user: " + targetUsername );
createPeerConnection();
console.log("Setting up connection to invite user: " + targetUsername2);
createPeerConnection2();
// Now configure and create the local stream, attach it to the
// "preview" box (id "local_video"), and add it to the
// RTCPeerConnection.
console.log("Requesting webcam access...");
navigator.mediaDevices.getUserMedia(mediaConstraints)
.then(function(localStream) {
console.log("-- Local video stream obtained");
document.getElementById("local_video").srcObject = localStream;
if (hasAddTrack) {
console.log("-- Adding tracks to the RTCPeerConnection");
localStream.getTracks().forEach(track => myPeerConnection.addTrack(track, localStream));
} else {
console.log("-- Adding stream to the RTCPeerConnection");
myPeerConnection.addStream(localStream);
}
if (hasAddTrack2) {
console.log("-- Adding tracks to the RTCPeerConnection2");
localStream.getTracks().forEach(track => myPeerConnection2.addTrack(track, localStream));
} else {
console.log("-- Adding stream to the RTCPeerConnection2");
myPeerConnection2.addStream(localStream);
}
})
.catch(handleGetUserMediaError);
}
}
// Accept an offer to video chat. We configure our local settings,
// create our RTCPeerConnection, get and attach our local camera
// stream, then create and send an answer to the caller.
function handleVideoOfferMsg(msg) {
var localStream = null;
targetUsername = msg.name;
// Call createPeerConnection() to create the RTCPeerConnection.
console.log("Starting to accept invitation from " + targetUsername);
createPeerConnection();
// We need to set the remote description to the received SDP offer
// so that our local WebRTC layer knows how to talk to the caller.
var desc = new RTCSessionDescription(msg.sdp);
myPeerConnection.setRemoteDescription(desc).then(function () {
console.log("Setting up the local media stream (myPeerConnection1)");
return navigator.mediaDevices.getUserMedia(mediaConstraints);
})
.then(function(stream) {
console.log("-- Local video stream obtained");
localStream = stream;
document.getElementById("local_video").srcObject = localStream;
if (hasAddTrack) {
console.log("-- Adding tracks to the RTCPeerConnection");
localStream.getTracks().forEach(track =>
myPeerConnection.addTrack(track, localStream)
);
} else {
console.log("-- Adding stream to the RTCPeerConnection");
myPeerConnection.addStream(localStream);
}
})
.then(function() {
console.log("------> Creating answer");
// Now that we've successfully set the remote description, we need to
// start our stream up locally then create an SDP answer. This SDP
// data describes the local end of our call, including the codec
// information, options agreed upon, and so forth.
return myPeerConnection.createAnswer();
})
.then(function(answer) {
console.log("------> Setting local description after creating answer");
// We now have our answer, so establish that as the local description.
// This actually configures our end of the call to match the settings
// specified in the SDP.
return myPeerConnection.setLocalDescription(answer);
})
.then(function() {
var msg = {
name: myUsername,
target: targetUsername,
type: "video-answer",
sdp: myPeerConnection.localDescription
};
// We've configured our end of the call now. Time to send our
// answer back to the caller so they know that we want to talk
// and how to talk to us.
console.log("Sending answer packet back to other peer");
sendToServer(msg);
})
.catch(handleGetUserMediaError);
}
//Clone
function handleVideoOfferMsg2(msg) {
var localStream = null;
// Call createPeerConnection() to create the RTCPeerConnection.
console.log("Starting to accept invitation from " + targetUsername2);
createPeerConnection2();
// We need to set the remote description to the received SDP offer
// so that our local WebRTC layer knows how to talk to the caller.
var desc2 = new RTCSessionDescription(msg.sdp);
myPeerConnection2.setRemoteDescription(desc2).then(function () {
console.log("Setting up the local media stream... (myPeerConnection2)");
return navigator.mediaDevices.getUserMedia(mediaConstraints);
})
.then(function(stream) {
console.log("-- Local video stream obtained");
localStream = stream;
document.getElementById("local_video").srcObject = localStream;
if (hasAddTrack2) {
console.log("-- Adding tracks to the RTCPeerConnection (myPeerConnection2)");
localStream.getTracks().forEach(track =>
myPeerConnection2.addTrack(track, localStream)
);
} else {
console.log("-- Adding stream to the RTCPeerConnection (myPeerConnection2)");
myPeerConnection2.addStream(localStream);
}
})
.then(function() {
console.log("------> Creating answer (myPeerConnection2)");
// Now that we've successfully set the remote description, we need to
// start our stream up locally then create an SDP answer. This SDP
// data describes the local end of our call, including the codec
// information, options agreed upon, and so forth.
return myPeerConnection2.createAnswer();
})
.then(function(answer) {
console.log("------> Setting local description after creating answer (myPeerConnection2)");
// We now have our answer, so establish that as the local description.
// This actually configures our end of the call to match the settings
// specified in the SDP.
return myPeerConnection2.setLocalDescription(answer);
})
.then(function() {
var msg = {
name: myUsername,
target: targetUsername2,
type: "video-answer",
sdp: myPeerConnection2.localDescription
};
// We've configured our end of the call now. Time to send our
// answer back to the caller so they know that we want to talk
// and how to talk to us.
console.log("Sending answer packet back to other peer (myPeerConnection2)");
sendToServer(msg);
})
.catch(handleGetUserMediaError);
}
// Responds to the "video-answer" message sent to the caller
// once the callee has decided to accept our request to talk.
function handleVideoAnswerMsg(msg) {
console.log("Call recipient has accepted our call");
// Configure the remote description, which is the SDP payload
// in our "video-answer" message.
var desc = new RTCSessionDescription(msg.sdp);
myPeerConnection.setRemoteDescription(desc)
.catch(reportError);
}
function handleVideoAnswerMsg2(msg) {
console.log("Call recipient has accepted our call");
// Configure the remote description, which is the SDP payload
// in our "video-answer" message.
var desc2 = new RTCSessionDescription(msg.sdp);
myPeerConnection2.setRemoteDescription(desc2)
.catch(reportError);
}
// A new ICE candidate has been received from the other peer. Call
// RTCPeerConnection.addIceCandidate() to send it along to the
// local ICE framework.
function handleNewICECandidateMsg(msg) {
var candidate = new RTCIceCandidate(msg.candidate);
console.log("Adding received ICE candidate: " + JSON.stringify(candidate));
myPeerConnection.addIceCandidate(candidate)
}
function handleNewICECandidateMsg2(msg) {
var candidate = new RTCIceCandidate(msg.candidate);
console.log("Adding received ICE candidate: " + JSON.stringify(candidate));
myPeerConnection2.addIceCandidate(candidate).catch(reportError);
}
// Handle errors which occur when trying to access the local media
// hardware; that is, exceptions thrown by getUserMedia(). The two most
// likely scenarios are that the user has no camera and/or microphone
// or that they declined to share their equipment when prompted. If
// they simply opted not to share their media, that's not really an
// error, so we won't present a message in that situation.
function handleGetUserMediaError(e) {
console.log(e);
switch(e.name) {
case "NotFoundError":
alert("Unable to open your call because no camera and/or microphone" +
"were found.");
break;
case "SecurityError":
case "PermissionDeniedError":
// Do nothing; this is the same as the user canceling the call.
break;
default:
alert("Error opening your camera and/or microphone: " + e.message);
break;
}
// Make sure we shut down our end of the RTCPeerConnection so we're
// ready to try again.
closeVideoCall();
}
// Handles reporting errors. Currently, we just dump stuff to console but
// in a real-world application, an appropriate (and user-friendly)
// error message should be displayed.
function reportError(errMessage) {
log_error("Error " + errMessage.name + ": " + errMessage.message);
}
You can find here the log file
You need to better organize your code and everything will work better. Work with , Object, constructoror object creator, send an id of peer to find the correct peer.
Your error message seems to come from the code with the msg switch statement when you call handleVideoAnswerMsg , you set the sdp on the 2 peersonnection so on second time the first throw and the second is never called.
You could add an id to choose the correct one
ex:
const id = msg.id;
case "video-offer": // Invitation and offer to chat
handleVideoOffer(id, msg.sdp);
break;
case "video-answer": // Callee has answered our offer
handleVideoAnswer(id, msg.sdp);;
break;
case "new-ice-candidate": // A new ICE candidate has been received
handleNewICECandidate(id, msg.ice);
You also call createPeerConnection(1/2) on each case

how to live stream data in webrtc

I am currently new to webrtc, I have watched videos of webrtc but the problem is it is only one to one, I want to stream a video on a specific URL let us say test.com/live and whoever visits this URL can see the stream unlike normal peer to peer
navigator.mediaDevices
.getUserMedia({ video: true, audio: true })
.then((currentStream) => {
setStream(currentStream);
myVideo.current.srcObject = currentStream;
});
this is the code to get my media data, how can I stream this data to this particular URL, please I am new to webrtc can anybody explain?
This is a snippet from a video streamer I built, You can create a data stream and attach it.
I hope this can be useful.
Peer-to-peer communications with WebRTC
<script>
var RTCPeerConnection = null;
var getUserMedia = null;
var attachMediaStream = null;
var reattachMediaStream = null;
var webrtcDetectedBrowser = null;
if (navigator.mozGetUserMedia) {
console.log("This appears to be Firefox");
webrtcDetectedBrowser = "firefox";
// The RTCPeerConnection object.
RTCPeerConnection = mozRTCPeerConnection;
// The RTCSessionDescription object.
RTCSessionDescription = mozRTCSessionDescription;
// The RTCIceCandidate object.
RTCIceCandidate = mozRTCIceCandidate;
// Get UserMedia (only difference is the prefix).
// Code from Adam Barth.
getUserMedia = navigator.mozGetUserMedia.bind(navigator);
// Attach a media stream to an element.
attachMediaStream = function (element, stream) {
console.log("Attaching media stream");
element.src = URL.createObjectURL(stream);;
element.play();
};
reattachMediaStream = function (to, from) {
console.log("Reattaching media stream");
to.mozSrcObject = from.mozSrcObject;
to.play();
};
// Fake get{Video,Audio}Tracks
MediaStream.prototype.getVideoTracks = function () {
return [];
};
MediaStream.prototype.getAudioTracks = function () {
return [];
};
} else if (navigator.webkitGetUserMedia) {
console.log("This appears to be Chrome");
webrtcDetectedBrowser = "chrome";
// The RTCPeerConnection object.
RTCPeerConnection = webkitRTCPeerConnection;
// Get UserMedia (only difference is the prefix).
// Code from Adam Barth.
getUserMedia = navigator.webkitGetUserMedia.bind(navigator);
// Attach a media stream to an element.
attachMediaStream = function (element, stream) {
element.src = webkitURL.createObjectURL(stream);
};
reattachMediaStream = function (to, from) {
to.src = from.src;
};
// The representation of tracks in a stream is changed in M26.
// Unify them for earlier Chrome versions in the coexisting period.
if (!webkitMediaStream.prototype.getVideoTracks) {
webkitMediaStream.prototype.getVideoTracks = function () {
return this.videoTracks;
};
webkitMediaStream.prototype.getAudioTracks = function () {
return this.audioTracks;
};
}
// New syntax of getXXXStreams method in M26.
if (!webkitRTCPeerConnection.prototype.getLocalStreams) {
webkitRTCPeerConnection.prototype.getLocalStreams = function () {
return this.localStreams;
};
webkitRTCPeerConnection.prototype.getRemoteStreams = function () {
return this.remoteStreams;
};
}
} else {
console.log("Browser does not appear to be WebRTC-capable");
}
</script>

How to Switch Video Cameras Using WebRTC

I am currently working on WebRTC multipeer connection. I want to be able to switch the camera that is being used in the middle of a call, without having to change the selected camera in Settings.
I followed along with the code from this RTC example, and it works, but only client side.
devices.js
'use strict';
const videoElement = document.querySelector('#local');
const audioInputSelect = document.querySelector('select#audioSource');
const audioOutputSelect = document.querySelector('select#audioOutput');
const videoSelect = document.querySelector('select#videoSource');
const selectors = [audioInputSelect, audioOutputSelect, videoSelect];
audioOutputSelect.disabled = !('sinkId' in HTMLMediaElement.prototype);
function gotDevices(deviceInfos) {
// Handles being called several times to update labels. Preserve values.
const values = selectors.map(select => select.value);
selectors.forEach(select => {
while (select.firstChild) {
select.removeChild(select.firstChild);
}
});
for (let i = 0; i !== deviceInfos.length; ++i) {
const deviceInfo = deviceInfos[i];
const option = document.createElement('option');
option.value = deviceInfo.deviceId;
if (deviceInfo.kind === 'audioinput') {
option.text = deviceInfo.label || `microphone ${audioInputSelect.length + 1}`;
audioInputSelect.appendChild(option);
} else if (deviceInfo.kind === 'audiooutput') {
option.text = deviceInfo.label || `speaker ${audioOutputSelect.length + 1}`;
audioOutputSelect.appendChild(option);
} else if (deviceInfo.kind === 'videoinput') {
option.text = deviceInfo.label || `camera ${videoSelect.length + 1}`;
videoSelect.appendChild(option);
} else {
console.log('Some other kind of source/device: ', deviceInfo);
}
}
selectors.forEach((select, selectorIndex) => {
if (Array.prototype.slice.call(select.childNodes).some(n => n.value === values[selectorIndex])) {
select.value = values[selectorIndex];
}
});
}
navigator.mediaDevices.enumerateDevices().then(gotDevices).catch(handleError);
// Attach audio output device to video element using device/sink ID.
function attachSinkId(element, sinkId) {
if (typeof element.sinkId !== 'undefined') {
element.setSinkId(sinkId)
.then(() => {
console.log(`Success, audio output device attached: ${sinkId}`);
})
.catch(error => {
let errorMessage = error;
if (error.name === 'SecurityError') {
errorMessage = `You need to use HTTPS for selecting audio output device: ${error}`;
}
console.error(errorMessage);
// Jump back to first output device in the list as it's the default.
audioOutputSelect.selectedIndex = 0;
});
} else {
console.warn('Browser does not support output device selection.');
}
}
function changeAudioDestination() {
const audioDestination = audioOutputSelect.value;
attachSinkId(videoElement, audioDestination);
}
function gotStream(stream) {
window.stream = stream; // make stream available to console
videoElement.srcObject = stream;
// Refresh button list in case labels have become available
return navigator.mediaDevices.enumerateDevices();
}
function handleError(error) {
console.log('navigator.MediaDevices.getUserMedia error: ', error.message, error.name);
}
function start() {
if (window.stream) {
window.stream.getTracks().forEach(track => {
track.stop();
});
}
const audioSource = audioInputSelect.value;
const videoSource = videoSelect.value;
const constraints = {
audio: {deviceId: audioSource ? {exact: audioSource} : undefined},
video: {deviceId: videoSource ? {exact: videoSource} : undefined}
};
navigator.mediaDevices.getUserMedia(constraints).then(gotStream).then(gotDevices).catch(handleError);
}
audioInputSelect.onchange = start;
audioOutputSelect.onchange = changeAudioDestination;
videoSelect.onchange = start;
start();
Is there an easy way to do this? I think it would have something to do with tracks, not really sure as I just started working with WebRTC.
If you want to view the full code for the repository, click here
Thanks!
To switch cameras, you must release the first camera's MediaStream by stopping all its tracks, then you must use getUserMedia() to get another MediaStream for the other camera. The browser won't prompt your user for permission again in this case; the camera will just switch. As you stop the tracks, call .removeTrack() on your rtcPeerConnection. Then, with the new stream's tracks, call .addTrack().
You may already know this, but enumerateDevices() returns much more useful information if you have an open MediaStream. That's because the user has granted permission.
If you want to replace the video sent to the remote end, you need to call RTCPeerConnection.replaceTrack. As usual, mdn has a good example

SourceBuffer.appendBuffer() fails when client joins in the middle of live-stream

I'm making a simple video live-streaming site. My basic implementation is to record fragments of webcam video using MediaRecorder, and send them to the server using socket.io. Then, the server broadcasts the fragments back to all the other clients, where they are reconstructed using MediaSource and SourceBuffer and passed into the video.
This works when the client is already connected to the server when the stream begins, but if the client joins in the middle of streaming (meaning they only being receiving fragments from the middle of the video) the appendBuffer() fails and MediaSource closes.
I've tried finding a solution with no luck. I think it had to do with the encoding of the passed fragments, and you can't just start with a fragment from the middle of the recording, but I can't find a workaround to this issue. Would really appreciate the assistance. The relevant parts of my code are here:
function Stream(cam) {
//record video in chunks, send over websocket
this.cameraStream = null;
navigator.mediaDevices.getUserMedia({ video: true, audio: true }).then((stream) => {
this.cameraStream = stream;
switch (cam) {
case 1:
video1.srcObject = stream;
video1.play();
break;
case 2:
video2.srcObject = stream;
video2.play();
break;
default:
break;
}
if (cam !== null) record(stream, delayMS);
});
var record = (stream, ms) => {
var rec = new MediaRecorder(stream, {
mimeType: 'video/webm; codecs="opus,vp8"',
});
rec.start(ms);
rec.ondataavailable = (e) => {
var fileReader = new FileReader();
fileReader.onload = () => {
socket.emit('stream-frag', {
stream: fileReader.result,
room: window.location.pathname.split('/')[0] || '/',
cam: cam,
});
};
fileReader.readAsArrayBuffer(e.data);
};
};
this.endStream = function () {
cam = null;
cameraStream.getTracks().forEach((track) => track.stop());
};
}
function getStream() {
//recieve video chunks from server
socket.on('stream-frag', (data) => {
//console.log(stream);
switch (data.cam) {
case 1:
if (
mediaSource1.readyState === 'open' &&
sourceBuffer1 &&
sourceBuffer1.updating === false
) {
sourceBuffer1.appendBuffer(data.stream);
if (sourceBuffer1.buffered.length > 0) video1.play();
}
break;
case 2:
if (
mediaSource2.readyState === 'open' &&
sourceBuffer2 &&
sourceBuffer2.updating === false
) {
sourceBuffer2.appendBuffer(data.stream);
if (sourceBuffer2.buffered.length > 0) video2.play();
}
break;
}
});
}
you need to stop the record. you can to set a timeout to create chunks of video that you will concatenate in server using ffmpeg.

Categories

Resources