I am currently new to webrtc, I have watched videos of webrtc but the problem is it is only one to one, I want to stream a video on a specific URL let us say test.com/live and whoever visits this URL can see the stream unlike normal peer to peer
navigator.mediaDevices
.getUserMedia({ video: true, audio: true })
.then((currentStream) => {
setStream(currentStream);
myVideo.current.srcObject = currentStream;
});
this is the code to get my media data, how can I stream this data to this particular URL, please I am new to webrtc can anybody explain?
This is a snippet from a video streamer I built, You can create a data stream and attach it.
I hope this can be useful.
Peer-to-peer communications with WebRTC
<script>
var RTCPeerConnection = null;
var getUserMedia = null;
var attachMediaStream = null;
var reattachMediaStream = null;
var webrtcDetectedBrowser = null;
if (navigator.mozGetUserMedia) {
console.log("This appears to be Firefox");
webrtcDetectedBrowser = "firefox";
// The RTCPeerConnection object.
RTCPeerConnection = mozRTCPeerConnection;
// The RTCSessionDescription object.
RTCSessionDescription = mozRTCSessionDescription;
// The RTCIceCandidate object.
RTCIceCandidate = mozRTCIceCandidate;
// Get UserMedia (only difference is the prefix).
// Code from Adam Barth.
getUserMedia = navigator.mozGetUserMedia.bind(navigator);
// Attach a media stream to an element.
attachMediaStream = function (element, stream) {
console.log("Attaching media stream");
element.src = URL.createObjectURL(stream);;
element.play();
};
reattachMediaStream = function (to, from) {
console.log("Reattaching media stream");
to.mozSrcObject = from.mozSrcObject;
to.play();
};
// Fake get{Video,Audio}Tracks
MediaStream.prototype.getVideoTracks = function () {
return [];
};
MediaStream.prototype.getAudioTracks = function () {
return [];
};
} else if (navigator.webkitGetUserMedia) {
console.log("This appears to be Chrome");
webrtcDetectedBrowser = "chrome";
// The RTCPeerConnection object.
RTCPeerConnection = webkitRTCPeerConnection;
// Get UserMedia (only difference is the prefix).
// Code from Adam Barth.
getUserMedia = navigator.webkitGetUserMedia.bind(navigator);
// Attach a media stream to an element.
attachMediaStream = function (element, stream) {
element.src = webkitURL.createObjectURL(stream);
};
reattachMediaStream = function (to, from) {
to.src = from.src;
};
// The representation of tracks in a stream is changed in M26.
// Unify them for earlier Chrome versions in the coexisting period.
if (!webkitMediaStream.prototype.getVideoTracks) {
webkitMediaStream.prototype.getVideoTracks = function () {
return this.videoTracks;
};
webkitMediaStream.prototype.getAudioTracks = function () {
return this.audioTracks;
};
}
// New syntax of getXXXStreams method in M26.
if (!webkitRTCPeerConnection.prototype.getLocalStreams) {
webkitRTCPeerConnection.prototype.getLocalStreams = function () {
return this.localStreams;
};
webkitRTCPeerConnection.prototype.getRemoteStreams = function () {
return this.remoteStreams;
};
}
} else {
console.log("Browser does not appear to be WebRTC-capable");
}
</script>
Related
This code, is handling then new User connected.
User Class
class User{
constructor(friendID){
this.friendID = friendID;
this.userName = null;
this.myPeer = new RTCPeerConnection(servers);
this.videoManager = new VideoManager();
this.setup_events();
}
get name(){
return this.userName;
}
get id(){
return this.friendID;
}
setup_events(){
let this_instance = this;
this.myPeer.onicecandidate = function(event){
if (event.candidate !== null){
LOG("ICE CANDIDATE SEND TO " + this_instance.friendID);
LOG_obj(event.candidate);
socket.emit("ice-candidate", event.candidate, this_instance.friendID);
}else{
LOG("EMPTY CANDIDATE");
}
}
this.myPeer.addEventListener('track', async(event) => {
const [remoteStream] = event.streams;
this_instance.videoManager.createVideo();
this_instance.videoManager.setStream(remoteStream);
LOG("ADDED stream to VideoObject");
});
}
add_candidate(candidate){
this.myPeer.addIceCandidate( new RTCIceCandidate(candidate) );
LOG("CANDIDATE ADDED TO PEER");
}
accept_offer(userOffer){
this.myPeer.setRemoteDescription(new RTCSessionDescription(userOffer));
LOG("ACCEPTED OFFER");
}
async create_offer(){
MediaRules.get_MediaRules()
.then( (mediaRules) => {
navigator.mediaDevices.getUserMedia(mediaRules).then( (mediaStream) =>{
let tracks = mediaStream.getTracks();
tracks.forEach( track => { this.myPeer.addTrack(track, mediaStream); });
LOG("ADDED ALL TRACKS");
}).then( () => {
this.myPeer.createOffer(mediaRules).then( (offerObj) => {
this.myPeer.setLocalDescription(offerObj);
socket.emit("user-offer", offerObj, this.friendID);
});
});
});
}
accept_answer(userAnswer){
this.myPeer.setRemoteDescription(new RTCSessionDescription(userAnswer));
LOG("ACCEPTED ANSWER");
}
async create_answer(){
MediaRules.get_MediaRules().then( (mediaRules) => {
navigator.mediaDevices.getUserMedia(mediaRules).then( (mediaStream) => {
let tracks = mediaStream.getTracks();
tracks.forEach( track => { this.myPeer.addTrack(track, mediaStream); });
LOG("ADDED ALL TRACKS");
}).then( () => {
this.myPeer.createAnswer(mediaRules).then( (answerObj) => {
this.myPeer.setLocalDescription(answerObj);
socket.emit("user-answer", answerObj, this.friendID);
});
});
});
}
}
User Pool
class UsersPool{
constructor(){
this.UsersMap = {};
}
addUser(userObj){
this.UsersMap[userObj.id] = userObj;
}
accept_IceCandidate(candidateObject, user_id){
this.UsersMap[user_id].add_candidate(candidateObject);
}
accept_Offer(offerObject, user_id){
LOG("ACCEPT OFFER FROM " + user_id);
this.UsersMap[user_id].accept_offer(offerObject);
}
accept_Answer(answerObject, user_id){
this.UsersMap[user_id].accept_answer(answerObject);
}
async CreateSendOffer(user_id){
await this.UsersMap[user_id].create_offer();
}
async CreateSendAnswer(user_id){
await this.UsersMap[user_id].create_answer();
}
}
Media Constraints
class MediaConstraints{
async get_MediaRules(){
let mediaRules = { video: false, audio: false };
let devicesEnum = await navigator.mediaDevices.enumerateDevices();
devicesEnum.forEach( device => {
if ( device.kind == "audioinput" ){
mediaRules["audio"] = true;
}
else if ( device.kind == "videoinput"){
mediaRules["video"] = true;
}
});
return mediaRules;
}
}
Video Manager (creates video element by user)
class VideoManager {
constructor(){
this.videoObject = null;
}
createVideo(){
let videoObject = document.createElement("video");
let divVideo = document.createElement("div");
videoObject.setAttribute('width', "600");
videoObject.setAttribute('height', "800");
divVideo.appendChild(videoObject);
document.body.appendChild(divVideo);
this.videoObject = videoObject;
}
setStream(stream){
this.videoObject.srcObject = stream;
this.videoObject.play();
}
}
Well, the problem is here. icecandidate is working nice, signaling server is working too.
TURN/STUN server works fine.
My main question is how to create constraints and setup correctly Offer and Answer if User A don't have webcamera but User B has.
At the moment i get error that STUN server is broken, but this is because peers can't finish establishing connection between each other.
How to make it, if i have only microphone on my Laptop, but on other Laptop i have video and microphone.
EDIT 0: Well, looks like WebRTC doesn't like if constraints are different, if User A create offer with {video: false, audio: true}, and send it to User B, and User B creates answer with {video: true, audio: true} then it fails to connect because constraints are different.
Still don't understand why this is a problem.
EDIT 1: Looks like the only way is to use addTransceiver and to control manually media.
Actually, the problem was not in Linux Firefox version.
The Problem was in Offer-Answer exchange and using AddTrack function.
Problem was that, if User A doesn't have WebCamera but User B have, it can't finish normally connection, and as a result ice-candidate is not triggered.
So, i solved it using AddTransceiver().
An example how to make connection if someone doesn't have webacamera or microphone.
Create RTCPeerConnection.
let myPeer = new RTCPeerConnection();
Make a function that is looking for active devices. so in result you need to get
real_devices = { video: false, audio: true } for example if there is not webcamera
async get_MediaRules(){
let devicesEnum = await navigator.mediaDevices.enumerateDevices();
devicesEnum.forEach( device => {
if ( device.kind == "audioinput" ){
this.physicalRule["audio"] = true;
}
else if ( device.kind == "videoinput"){
this.physicalRule["video"] = true;
}
});
return this.physicalRule;
}
Get mediaStream
let myStream = navigator.mediaDevices.getUserMedia(real_devices);
Create Transceiver and add it to connection ( do this on Caller Side ).
async transceiverSetup(){
let videoTracks = myStream.getVideoTracks();
let audioTracks = myStream.getAudioTracks();
if (videoTracks.length > 0){
this.clientPeer.addTransceiver(videoTracks[0]);
}
else{
let video = this.myStream.addTransceiver("video");
video.direction = "recvonly";
}
if (audioTracks.length > 0){
this.myStream.addTransceiver(audioTracks[0]);
}
else{
let audio = this.myStream.addTransceiver("audio");
audio.direction = "recvonly";
}
}
After that you call
myPeer.createOffer()...
And on other side after you receiver Offer, you call remoteTrackSetup(). function and you setup on your side transceivers.
async configure_transceiver(mediaTracks, transceivers){
transceivers.forEach( async(any) => {
if (any.receiver.track){
if (mediaTracks.length > 0){
any.direction = "sendrecv";
await any.sender.replaceTrack(mediaTracks[0]);
}
else{
any.direction = "recvonly";
await any.sender.replaceTrack(null);
}
}else{
if (mediaTracks.length > 0){
any.direction = "sendonly";
await any.sender.replaceTrack(mediaTracks[0]);
}else{
any.direction = "inactive";
await mediaTracks.sender.replaceTrack(null);
}
}
});
}
async remoteTrackSetup(){
let mediaStream = GlobalState.Media;
let audioTracks = mediaStream.getAudioTracks();
let videoTracks = mediaStream.getVideoTracks();
let transceivers = this.clientPeer.getTransceivers();
let audioTransceivers = transceivers.filter(function(tr){
return tr.receiver.track.kind == "audio";
});
let videoTransceivers = transceivers.filter(function(tr){
return tr.receiver.track.kind == "video";
});
await this.configure_transceiver(audioTracks, audioTransceivers);
await this.configure_transceiver(videoTracks, videoTransceivers);
}
After those functions you call
myPeer.createAnswer()...
And connection is fully established.
Here is code for ontrack event.
setTransceiver(transceiver){
if(!this.videoObject.srcObject || this.videoObject.srcObject.getTracks().length == 2){
this.videoObject.srcObject = new MediaStream([transceiver.receiver.track]);
}else{
this.videoObject.srcObject.addTrack(transceiver.receiver.track);
}
}
I am currently working on WebRTC multipeer connection. I want to be able to switch the camera that is being used in the middle of a call, without having to change the selected camera in Settings.
I followed along with the code from this RTC example, and it works, but only client side.
devices.js
'use strict';
const videoElement = document.querySelector('#local');
const audioInputSelect = document.querySelector('select#audioSource');
const audioOutputSelect = document.querySelector('select#audioOutput');
const videoSelect = document.querySelector('select#videoSource');
const selectors = [audioInputSelect, audioOutputSelect, videoSelect];
audioOutputSelect.disabled = !('sinkId' in HTMLMediaElement.prototype);
function gotDevices(deviceInfos) {
// Handles being called several times to update labels. Preserve values.
const values = selectors.map(select => select.value);
selectors.forEach(select => {
while (select.firstChild) {
select.removeChild(select.firstChild);
}
});
for (let i = 0; i !== deviceInfos.length; ++i) {
const deviceInfo = deviceInfos[i];
const option = document.createElement('option');
option.value = deviceInfo.deviceId;
if (deviceInfo.kind === 'audioinput') {
option.text = deviceInfo.label || `microphone ${audioInputSelect.length + 1}`;
audioInputSelect.appendChild(option);
} else if (deviceInfo.kind === 'audiooutput') {
option.text = deviceInfo.label || `speaker ${audioOutputSelect.length + 1}`;
audioOutputSelect.appendChild(option);
} else if (deviceInfo.kind === 'videoinput') {
option.text = deviceInfo.label || `camera ${videoSelect.length + 1}`;
videoSelect.appendChild(option);
} else {
console.log('Some other kind of source/device: ', deviceInfo);
}
}
selectors.forEach((select, selectorIndex) => {
if (Array.prototype.slice.call(select.childNodes).some(n => n.value === values[selectorIndex])) {
select.value = values[selectorIndex];
}
});
}
navigator.mediaDevices.enumerateDevices().then(gotDevices).catch(handleError);
// Attach audio output device to video element using device/sink ID.
function attachSinkId(element, sinkId) {
if (typeof element.sinkId !== 'undefined') {
element.setSinkId(sinkId)
.then(() => {
console.log(`Success, audio output device attached: ${sinkId}`);
})
.catch(error => {
let errorMessage = error;
if (error.name === 'SecurityError') {
errorMessage = `You need to use HTTPS for selecting audio output device: ${error}`;
}
console.error(errorMessage);
// Jump back to first output device in the list as it's the default.
audioOutputSelect.selectedIndex = 0;
});
} else {
console.warn('Browser does not support output device selection.');
}
}
function changeAudioDestination() {
const audioDestination = audioOutputSelect.value;
attachSinkId(videoElement, audioDestination);
}
function gotStream(stream) {
window.stream = stream; // make stream available to console
videoElement.srcObject = stream;
// Refresh button list in case labels have become available
return navigator.mediaDevices.enumerateDevices();
}
function handleError(error) {
console.log('navigator.MediaDevices.getUserMedia error: ', error.message, error.name);
}
function start() {
if (window.stream) {
window.stream.getTracks().forEach(track => {
track.stop();
});
}
const audioSource = audioInputSelect.value;
const videoSource = videoSelect.value;
const constraints = {
audio: {deviceId: audioSource ? {exact: audioSource} : undefined},
video: {deviceId: videoSource ? {exact: videoSource} : undefined}
};
navigator.mediaDevices.getUserMedia(constraints).then(gotStream).then(gotDevices).catch(handleError);
}
audioInputSelect.onchange = start;
audioOutputSelect.onchange = changeAudioDestination;
videoSelect.onchange = start;
start();
Is there an easy way to do this? I think it would have something to do with tracks, not really sure as I just started working with WebRTC.
If you want to view the full code for the repository, click here
Thanks!
To switch cameras, you must release the first camera's MediaStream by stopping all its tracks, then you must use getUserMedia() to get another MediaStream for the other camera. The browser won't prompt your user for permission again in this case; the camera will just switch. As you stop the tracks, call .removeTrack() on your rtcPeerConnection. Then, with the new stream's tracks, call .addTrack().
You may already know this, but enumerateDevices() returns much more useful information if you have an open MediaStream. That's because the user has granted permission.
If you want to replace the video sent to the remote end, you need to call RTCPeerConnection.replaceTrack. As usual, mdn has a good example
I am in the process of replacing RecordRTC with the built in MediaRecorder for recording audio in Chrome. The recorded audio is then played in the program with audio api. I am having trouble getting the audio.duration property to work. It says
If the video (audio) is streamed and has no predefined length, "Inf" (Infinity) is returned.
With RecordRTC, I had to use ffmpeg_asm.js to convert the audio from wav to ogg. My guess is somewhere in the process RecordRTC sets the predefined audio length. Is there any way to set the predefined length using MediaRecorder?
This is a chrome bug.
FF does expose the duration of the recorded media, and if you do set the currentTimeof the recorded media to more than its actual duration, then the property is available in chrome...
var recorder,
chunks = [],
ctx = new AudioContext(),
aud = document.getElementById('aud');
function exportAudio() {
var blob = new Blob(chunks);
aud.src = URL.createObjectURL(new Blob(chunks));
aud.onloadedmetadata = function() {
// it should already be available here
log.textContent = ' duration: ' + aud.duration;
// handle chrome's bug
if (aud.duration === Infinity) {
// set it to bigger than the actual duration
aud.currentTime = 1e101;
aud.ontimeupdate = function() {
this.ontimeupdate = () => {
return;
}
log.textContent += ' after workaround: ' + aud.duration;
aud.currentTime = 0;
}
}
}
}
function getData() {
var request = new XMLHttpRequest();
request.open('GET', 'https://upload.wikimedia.org/wikipedia/commons/4/4b/011229beowulf_grendel.ogg', true);
request.responseType = 'arraybuffer';
request.onload = decodeAudio;
request.send();
}
function decodeAudio(evt) {
var audioData = this.response;
ctx.decodeAudioData(audioData, startRecording);
}
function startRecording(buffer) {
var source = ctx.createBufferSource();
source.buffer = buffer;
var dest = ctx.createMediaStreamDestination();
source.connect(dest);
recorder = new MediaRecorder(dest.stream);
recorder.ondataavailable = saveChunks;
recorder.onstop = exportAudio;
source.start(0);
recorder.start();
log.innerHTML = 'recording...'
// record only 5 seconds
setTimeout(function() {
recorder.stop();
}, 5000);
}
function saveChunks(evt) {
if (evt.data.size > 0) {
chunks.push(evt.data);
}
}
// we need user-activation
document.getElementById('button').onclick = function(evt){
getData();
this.remove();
}
<button id="button">start</button>
<audio id="aud" controls></audio><span id="log"></span>
So the advice here would be to star the bug report so that chromium's team takes some time to fix it, even if this workaround can do the trick...
Thanks to #Kaiido for identifying bug and offering the working fix.
I prepared an npm package called get-blob-duration that you can install to get a nice Promise-wrapped function to do the dirty work.
Usage is as follows:
// Returns Promise<Number>
getBlobDuration(blob).then(function(duration) {
console.log(duration + ' seconds');
});
Or ECMAScript 6:
// yada yada async
const duration = await getBlobDuration(blob)
console.log(duration + ' seconds')
A bug in Chrome, detected in 2016, but still open today (March 2019), is the root cause behind this behavior. Under certain scenarios audioElement.duration will return Infinity.
Chrome Bug information here and here
The following code provides a workaround to avoid the bug.
Usage : Create your audioElement, and call this function a single time, providing a reference of your audioElement. When the returned promise resolves, the audioElement.duration property should contain the right value. ( It also fixes the same problem with videoElements )
/**
* calculateMediaDuration()
* Force media element duration calculation.
* Returns a promise, that resolves when duration is calculated
**/
function calculateMediaDuration(media){
return new Promise( (resolve,reject)=>{
media.onloadedmetadata = function(){
// set the mediaElement.currentTime to a high value beyond its real duration
media.currentTime = Number.MAX_SAFE_INTEGER;
// listen to time position change
media.ontimeupdate = function(){
media.ontimeupdate = function(){};
// setting player currentTime back to 0 can be buggy too, set it first to .1 sec
media.currentTime = 0.1;
media.currentTime = 0;
// media.duration should now have its correct value, return it...
resolve(media.duration);
}
}
});
}
// USAGE EXAMPLE :
calculateMediaDuration( yourAudioElement ).then( ()=>{
console.log( yourAudioElement.duration )
});
Thanks #colxi for the actual solution, I've added some validation steps (As the solution was working fine but had problems with long audio files).
It took me like 4 hours to get it to work with long audio files turns out validation was the fix
function fixInfinity(media) {
return new Promise((resolve, reject) => {
//Wait for media to load metadata
media.onloadedmetadata = () => {
//Changes the current time to update ontimeupdate
media.currentTime = Number.MAX_SAFE_INTEGER;
//Check if its infinite NaN or undefined
if (ifNull(media)) {
media.ontimeupdate = () => {
//If it is not null resolve the promise and send the duration
if (!ifNull(media)) {
//If it is not null resolve the promise and send the duration
resolve(media.duration);
}
//Check if its infinite NaN or undefined //The second ontime update is a fallback if the first one fails
media.ontimeupdate = () => {
if (!ifNull(media)) {
resolve(media.duration);
}
};
};
} else {
//If media duration was never infinity return it
resolve(media.duration);
}
};
});
}
//Check if null
function ifNull(media) {
if (media.duration === Infinity || media.duration === NaN || media.duration === undefined) {
return true;
} else {
return false;
}
}
//USAGE EXAMPLE
//Get audio player on html
const AudioPlayer = document.getElementById('audio');
const getInfinity = async () => {
//Await for promise
await fixInfinity(AudioPlayer).then(val => {
//Reset audio current time
AudioPlayer.currentTime = 0;
//Log duration
console.log(val)
})
}
I wrapped the webm-duration-fix package to solve the webm length problem, which can be used in nodejs and web browsers to support video files over 2GB with not too much memory usage.
Usage is as follows:
import fixWebmDuration from 'webm-duration-fix';
const mimeType = 'video/webm\;codecs=vp9';
const blobSlice: BlobPart[] = [];
mediaRecorder = new MediaRecorder(stream, {
mimeType
});
mediaRecorder.ondataavailable = (event: BlobEvent) => {
blobSlice.push(event.data);
}
mediaRecorder.onstop = async () => {
// fix blob, support fix webm file larger than 2GB
const fixBlob = await fixWebmDuration(new Blob([...blobSlice], { type: mimeType }));
// to write locally, it is recommended to use fs.createWriteStream to reduce memory usage
const fileWriteStream = fs.createWriteStream(inputPath);
const blobReadstream = fixBlob.stream();
const blobReader = blobReadstream.getReader();
while (true) {
let { done, value } = await blobReader.read();
if (done) {
console.log('write done.');
fileWriteStream.close();
break;
}
fileWriteStream.write(value);
value = null;
}
blobSlice = [];
};
//If you want to modify the video file completely, you can use this package "webmFixDuration", Other methods are applied at the display level only on the video tag With this method, the complete video file is modified
webmFixDuration github example
mediaRecorder.onstop = async () => {
const duration = Date.now() - startTime;
const buggyBlob = new Blob(mediaParts, { type: 'video/webm' });
const fixedBlob = await webmFixDuration(buggyBlob, duration);
displayResult(fixedBlob);
};
I'm making a simple video live-streaming site. My basic implementation is to record fragments of webcam video using MediaRecorder, and send them to the server using socket.io. Then, the server broadcasts the fragments back to all the other clients, where they are reconstructed using MediaSource and SourceBuffer and passed into the video.
This works when the client is already connected to the server when the stream begins, but if the client joins in the middle of streaming (meaning they only being receiving fragments from the middle of the video) the appendBuffer() fails and MediaSource closes.
I've tried finding a solution with no luck. I think it had to do with the encoding of the passed fragments, and you can't just start with a fragment from the middle of the recording, but I can't find a workaround to this issue. Would really appreciate the assistance. The relevant parts of my code are here:
function Stream(cam) {
//record video in chunks, send over websocket
this.cameraStream = null;
navigator.mediaDevices.getUserMedia({ video: true, audio: true }).then((stream) => {
this.cameraStream = stream;
switch (cam) {
case 1:
video1.srcObject = stream;
video1.play();
break;
case 2:
video2.srcObject = stream;
video2.play();
break;
default:
break;
}
if (cam !== null) record(stream, delayMS);
});
var record = (stream, ms) => {
var rec = new MediaRecorder(stream, {
mimeType: 'video/webm; codecs="opus,vp8"',
});
rec.start(ms);
rec.ondataavailable = (e) => {
var fileReader = new FileReader();
fileReader.onload = () => {
socket.emit('stream-frag', {
stream: fileReader.result,
room: window.location.pathname.split('/')[0] || '/',
cam: cam,
});
};
fileReader.readAsArrayBuffer(e.data);
};
};
this.endStream = function () {
cam = null;
cameraStream.getTracks().forEach((track) => track.stop());
};
}
function getStream() {
//recieve video chunks from server
socket.on('stream-frag', (data) => {
//console.log(stream);
switch (data.cam) {
case 1:
if (
mediaSource1.readyState === 'open' &&
sourceBuffer1 &&
sourceBuffer1.updating === false
) {
sourceBuffer1.appendBuffer(data.stream);
if (sourceBuffer1.buffered.length > 0) video1.play();
}
break;
case 2:
if (
mediaSource2.readyState === 'open' &&
sourceBuffer2 &&
sourceBuffer2.updating === false
) {
sourceBuffer2.appendBuffer(data.stream);
if (sourceBuffer2.buffered.length > 0) video2.play();
}
break;
}
});
}
you need to stop the record. you can to set a timeout to create chunks of video that you will concatenate in server using ffmpeg.
I'm making a WebRTC site and working on a one-to-many video connection right now. After finding that addStream() is deprecated, I switched to addTrack(). However, no matter which one I use, only the audio is being sent over, not the video. Originally I thought it was because I was on localhost without https, but even when I run it on my node server the same thing occurs. A solution would be appreciated.
Hosting code (host.js)
document.addEventListener("DOMContentLoaded", () => {
uuid = createUUID();
localVideo = document.getElementById('localVideo');
serverConnection = new WebSocket('wss://' + window.location.hostname + ':443');
console.log("Opened WS on :443")
serverConnection.onmessage = gotMessageFromServer;
var constraints = {
video: true,
audio: true,
};
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia(constraints).then(getUserMediaSuccess).catch(errorHandler);
} else {
alert('Your browser does not support getUserMedia API');
}
document.getElementById("start").addEventListener("click", (e) => {
start(uuid)
});
});
function getUserMediaSuccess(stream) {
localStream = stream;
localVideo.srcObject = stream;
}
function start(uid) {
peerConnections[uid] = new RTCPeerConnection(peerConnectionConfig);
peerConnections[uid].onicecandidate = gotIceCandidate;
for (const track of localStream.getTracks()) {
peerConnections[uid].addTrack(track, localStream);
}
}
Viewer code (client.js)
function pageReady() {
uuid = createUUID();
remoteVideo = document.getElementById('remoteVideo');
remoteVideo.srcObject = remoteStream;
remoteVideo.play();
serverConnection = new WebSocket('wss://' + window.location.hostname + ':443');
serverConnection.onmessage = gotMessageFromServer;
var constraints = {
video: false,
audio: true,
};
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia(constraints).then(getUserMediaSuccess).catch(errorHandler);
} else {
alert('Your browser does not support getUserMedia API');
}
}
function getUserMediaSuccess(stream) {
localStream = stream;
}
function start(isCaller) {
console.log("pressed Start")
peerConnection = new RTCPeerConnection(peerConnectionConfig);
console.log("new RTCconnection")
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.ontrack = gotRemoteStream;
peerConnection.addTrack(localStream.getTracks()[0]);
peerConnection.createOffer().then((desc) => {
createdDescription(desc);
}).catch(errorHandler);
}
function gotRemoteStream(e) {
console.log('got remote stream');
if (e.streams && e.streams[0]) {
remoteVideo.srcObject = e.streams[0];
} else {
if (!inboundStream) {
inboundStream = new MediaStream();
remoteVideo.srcObject = inboundStream;
}
inboundStream.addTrack(e.track);
}
}
P.S. I'm only sending audio from the viewer side because it's a one-way call, but the viewer has to initiate the call. My problem is getting both audio and video from the host side onto the viewer's side.
P.P.S. You probably want more code so you can run it yourselves, so the repo is here.
Open one client on /host and another on /class. Make sure you go to https://localhost or it won't work.
Add this line in the client.js file
peerConnection.addTransceiver("video"); after addtrack call.
function start(isCaller) {
console.log("pressed Start")
peerConnection = new RTCPeerConnection(peerConnectionConfig);
console.log("new RTCconnection")
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.ontrack = gotRemoteStream;
peerConnection.addTrack(localStream.getTracks()[0]);
peerConnection.addTransceiver("video"); // The line to be added
peerConnection.createOffer().then((desc) => {
createdDescription(desc);
}).catch(errorHandler);
}