What is the best approach for setting up an RTC connection that will success the first time ?
The following code sometimes works, sometimes doesn't. I think it's a problem with addIceCandidate being called either before or after createAnswer, and I don't know which is preferable, or if that is even the issue (for why its not working all the time, just paste it into a browser, and try it a few times, you should see that at least sometimes the "call" button doesn't work all the time):
<body>
<style>
video {
width: 300px
}
</style>
<button id="s">start</button>
<button id=c>Call</button><br>
<video id="L" autoplay muted></video>
<video id=R autoplay></video>
<video id=R2 autoplay></video>
<video id=R3 autoplay></video>
<script>
var ls, p, p2, bpl, bpr, con = {
// sdpSemantics: "default"
}, deets = {
offerToReceiveAudio: 1,
offerToReceiveVideo: 1
}
function g() {
navigator.
mediaDevices.getDisplayMedia().then(s => {
ls = L.srcObject = s;
})
}
s.onclick = e => {
g()
};
function wow(data={}) {
let local = new RTCPeerConnection(con),
remote = new RTCPeerConnection(con);
local .addEventListener("icecandidate", e => oic(remote, e));
remote.addEventListener("icecandidate", e => oic(local , e));
remote.addEventListener("track", e => grs(data.video, e));
data
.localStream
.getTracks()
.forEach(t => {
local.addTrack(t, data.localStream);
});
local.createOffer(deets).then(offer => {
local .setLocalDescription(offer);
remote.setRemoteDescription(offer);
remote.createAnswer().then(answer => {
remote.setLocalDescription(answer);
local .setRemoteDescription(answer);
})
});
}
c.onclick = e => {
let localStream = ls;
wow({
video: R,
localStream
});
wow({
video: R2,
localStream
});
wow({
video: R3,
localStream
});
};
function grs(vid,e) {
if(vid.srcObject !== e.streams[0]) {
vid.srcObject = e.streams[0];
}
}
function oic(pc, e) {
let other = pc;
if(e.candidate)
other
.addIceCandidate(e.candidate)
}
</script>
</video>
</body>
Notice how sometimes the video streams come in later and empty.
i run into the same problem when i use PeerConnection API before, the problem is Unified Plan SDP format, maybe it worth to read.
https://webrtc.org/getting-started/unified-plan-transition-guide
https://docs.google.com/document/d/1-ZfikoUtoJa9k-GZG1daN0BU3IjIanQ_JSscHxQesvU/edit#
Please look again at documentation. At first, you are using async functions, so it could not resolve at the time then you call it (for example user doesn't answer for prompt or browser decline it at all). At second, you're not handling errors, add catch() block to your code and browser will answer to your question itself
Related
Currently I am writing a block of code.
I want to start webcamera when video will start play and stop camera when video is paused but before loading camera I want to check if my all models are loaded or not. I want to load the model once until page refresh.thats totaly a seprate case that if user page refersh it will load again. Now if I click on play button in video it starts my camera. How can i configure it in a way where if user pause the button it only disable the camera and press play button it only load the camera (not load my models again).
<html>
<head>
<title>Video detecting and emotion storing in database</title>
<script type="text/css" defer src="https://cdn.jsdelivr.net/npm/bootstrap#4.6.2/dist/css/bootstrap.min.css"></script>
<script type="text/css" src="style.css"></script>
<script defer type="text/javascript" src="script/face-api.min.js"></script>
<script defer type="text/javascript" src="script/script.js"></script>
<style>
#livecam{
height: 400px;
width: 600px;
float: right;
}
</style>
</head>
<body>
<div class="container">
<video id="myvideo" src="laravel.mp4" controls height="400" width="600" >
</video>
<video id="livecam" autoplay muted height="400" width="600" ></video>
</div>
</body>
</html>
..................................... Index.php
now my script.js
var video = document.getElementById('myvideo');
const liveview = document.getElementById('livecam');
const height = 400;
const width = 600;
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
faceapi.nets.faceExpressionNet.loadFromUri('/models')
])
video.onplaying = function(){
enablewebcam();
alert("Video is playing");
};
video.onpause = function(){
disablecam();
}
function enablewebcam(){
navigator.mediaDevices.getUserMedia({video:{width:600, height:400},
audio:false}).then((stream) => {
liveview.srcObject = stream;
})
};
function disablecam(){
liveview.srcObject = null;
}
To check if your models have already been loaded before enabling the webcam, you can use a flag variable that is set to true after the models are loaded. Then, in your enablewebcam function, you can check the value of this flag before enabling the webcam:
let modelsLoaded = false;
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
faceapi.nets.faceExpressionNet.loadFromUri('/models')
]).then(() => {
modelsLoaded = true;
});
function enablewebcam() {
if (modelsLoaded) {
navigator.mediaDevices.getUserMedia({ video: { width: 600, height: 400 }, audio: false }).then((stream) => {
liveview.srcObject = stream;
});
}
}
Alternatively, you can use the faceapi.nets.tinyFaceDetector.isLoaded() function to check if the tinyFaceDetector model has been loaded. If it has, you can assume that all the models have been loaded, since they are all being loaded in the same Promise.all block.
function enablewebcam() {
if (faceapi.nets.tinyFaceDetector.isLoaded()) {
navigator.mediaDevices.getUserMedia({ video: { width: 600, height: 400 }, audio: false }).then((stream) => {
liveview.srcObject = stream;
});
}
}
Either way, you can use the video.onpause event to stop the webcam when the video is paused, as you have already done in your code.
To make sure the models have loaded when the user clicks play, delay adding the controls attribute until after the models have been fetched.
If you do this in a then handler of the Promise.all call, you can add event listeners to the video element to start and stop the video in the same place - the user can then start and stop the video at will. You may want to get rid of the alert message` however.
The snippet has been cut down for testing but unfortunately does not work on Stack overflow:
"use strict";
var video = document.getElementById('myvideo');
const liveview = document.getElementById('livecam');
const height = 400;
const width = 600;
Promise.all([
// production
// faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
// faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
// faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
// faceapi.nets.faceExpressionNet.loadFromUri('/models')
// testing
Promise.resolve(true),
Promise.resolve(true),
Promise.resolve(true),
Promise.resolve(true)
])
.then( ()=> {
video.onplaying = function(){
enablewebcam();
alert("Video is playing");
};
video.onpause = function(){
disablecam();
}
// show video controls:
video.setAttribute("controls", "");
function enablewebcam(){
navigator.mediaDevices.getUserMedia({video:{width:600, height:400},
audio:false})
.then((stream) => {
liveview.srcObject = stream;
})
.catch( err => {
console.log("no cam, ", err.message)
});
};
function disablecam(){
liveview.srcObject = null;
}
})
.catch(err => {
console.log(err);
alert("Something went wrong");
});
video { border: thin solid blue}
<div class="container">
<video id="myvideo"
src="https://i.imgur.com/LTc02xw_lq.mp4"
height="400" width="600">
</video>
<video id="livecam" autoplay muted height="400" width="600" ></video>
</div>
You can create an array of promises because the Promise class has an all method that receives an array of promises and returns the result once they are all fullfilled. If you think that one of the promises could fail, you should instead use allSettled because the all method returns Pending if one of the promises fails.
The allSettled method will return the promises data with a status and a value field. status giving you fullfilled or rejected and value giving you the promise data.
An example of this would be:
// Your array of promises
promises = [
...
]
// 'all' method
Promises.all(promises)
.then(data => console.log(data))
// 'allSettled' method
Promises.allSettled(promises)
.then(data => console.log(data))
So I found the answer as Hint by Ritik
Setting a variable true after successfull loading of model and everytime before enabling the camera will check that variable if it true or false.
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/mmodels'),
faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
faceapi.nets.faceExpressionNet.loadFromUri('/models')
]).then(function(){
modeloadedstatus = true;
})
checking modeloadedstatus before everytime enabling camera
video.onplaying = function(){
if(modeloadedstatus) {
enablewebcam();
console.log(modeloadedstatus)
alert("Video is playing");
} else{
alert("Error in model loading");
}
};
I tried to make a metronome using HTML and JS. I know the audio can't autoplay due to restrictions (I don't want it to anyway), so I placed an <audio ...> element with a <source ...> inside, with all (I thought?) appropriate attributes; I controlled playback using JS triggered by a button click. This works on my laptop and even in the XBox Edge browser, but on iOS browsers (both Safari and Firefox) the sound does not play. The HTML looks like this:
<button id="metronome-button">Start</button>
<audio id="metronome-audio" autoplay="0" autostart="0" loop="0">
<source src="tick.mp3" type="audio/mpeg">
</audio>
And the JS looks like this:
const metronomeAudio = document.getElementById('metronome-audio');
const metronomeButton = document.getElementById('metronome-button');
let metronomeInterval = null;
metronomeButton.addEventListener('click', () => {
metronomeInterval = setInterval(() => {
metronomeAudio.loop = false;
metronomeAudio.pause();
metronomeAudio.play();
}, 500);
});
Since this didn't work, I did more looking and found this solution in another StackOverflow thread, which uses JS and no HTML at all (other than being triggered by a button's click event):
function startMetronome () {
setInterval(() => {
let audio = new Audio('tick.mp3');
audio.loop = false;
audio.play();
audio.onended = () => {
audio.remove();
}
}, 500);
}
Again, this works on PC in various browsers, why does this fail specifically on iOS? (Have not tested on Android, don't have device.)
I am not sure if this is documented anywhere (I didn't stumble across it), but after some additional testing I discovered that iOS prohibits use of the .play() function on an existing HTML <audio> element or JS Audio() element, unless called directly and synchronously as the result of a user UI event. In other words, this will work:
const button = document.getElementById('my-button')
const audio = document.getElementById('my-audio')
button.addEventListener('click', () => {
audio.play()
})
But this will not work:
const button = document.getElementById('my-button')
const audio = document.getElementById('my-audio')
button.addEventListener('click', () => {
setTimeout(() => { audio.play() }, 1000)
})
Thus, having the .play() call in an async callback breaks playback on iOS.
However, as mentioned in a comment on this answer here: https://stackoverflow.com/a/54432573/983173, if you instantiate your audio element within a synchronous user interaction event handler, you can re-use and re-play (e.g. .play()) an Audio() element as much as you like. For example:
const button = document.getElementById('my-button')
let audio = null
button.addEventListener('click', () => {
audio = new Audio('myAudio.mp3')
// Works because `audio` itself was created synchronously during user event handler
setTimeout(() => { audio.play() }, 1000)
})
I'm using Firefox 62.0.3 on my tablet, it works on one of the tablets but not the other.
On one tablet I am getting an error that says
DOMException: "The fetching process for the media resource was aborted by the user agent at the user's request."
I am not at all sure what this is telling me, does my video need to be within a clickhandler? I thought if autoplay is true and muted is true, i am good to go?
Again it works on the other tablet with the same firefox version.
<video id="asd" class="asd" oncreate={(el) => stream.start(el)}> Sorry, your browser doesn't support embedded videos.</video>
I then do
this.peerConnection.ontrack = (event) => {
const mediaStream = event.streams[0];
let remoteVideoElement = document.getElementById('stream');
remoteVideoElement.autoplay = "true";
remoteVideoElement.defaultMuted = "true";
remoteVideoElement.muted = "muted";
remoteVideoElement.playsinline = "true";
if ('srcObject' in remoteVideoElement) {
remoteVideoElement.srcObject = mediaStream;
} else {
// Avoid using this in new browsers, as it is going away.
remoteVideoElement.src = URL.createObjectURL(mediaStream);
}
let playPromise = remoteVideoElement.play();
if (playPromise !== undefined) {
playPromise.then((_) => {
// will play video
}).catch((err) => {
console.error("playPromise error: ", err);
})
} else {
console.error("playPromise is undefined: ", playPromise);
remoteVideoElement.load();
}
}
It works but when i try to resolve playPromise it gives the DOMException error, and load() does not work either on the tablet that is giving issues.
I'm watching a series of videos on a website organised in a playlist. Each video is about 2 minutes long.
The website uses HTML 5 video player and it supports auto-play. That is each time a video ends, the next video is loaded and automatically played, which is great.
However, with Fullscreen, even if I fullscreened a video previously, when the next video loads in the playlist, the screen goes back to normal, and I have to click the fullscreen button again....
I've tried writing a simple javascript extension with Tampermonkey to load the video fullscreen automatically.
$(document).ready(function() {
function makefull() {
var vid = $('video')[0]
if (vid.requestFullscreen) {
vid.requestFullscreen();
} else if (vid.mozRequestFullScreen) {
vid.mozRequestFullScreen();
} else if (vid.webkitRequestFullscreen) {
vid.webkitRequestFullscreen();
}
//var vid = $('button.vjs-fullscreen-control').click();
}
makefull()
But I'm getting this error:
Failed to execute 'requestFullscreen' on 'Element': API can only be initiated by a user gesture.
It's extremely annoying to have to manually click fullscreen after each 2 min video. Is there a way I can achieve this in my own browser? I'm using Chrome.
If you can get the list of URL's then you can create your own playlist. The code cannot be accurately tested within a cross-origin <iframe>, for example at plnkr.co. The code can be tested at console at this very document. To test the code, you can use the variable urls at MediaFragmentRecorder and substitute "pause" event for "ended" event at .addEventListener().
If you have no control over the HTML or JavaScript used at the site not sure how to provide any code that will be able to solve the inquiry.
const video = document.createElement("video");
video.controls = true;
video.autoplay = true;
const urls = [
{
src: "/path/to/video/"
}, {
src: "/path/to/video/"
}
];
(async() => {
try {
video.requestFullscreen = video.requestFullscreen
|| video.mozRequestFullscreen
|| video.webkitRequestFullscreen;
let fullScreen = await video.requestFullscreen().catch(e => {throw e});
console.log(fullScreen);
} catch (e) {
console.error(e.message)
}
for (const {src} of urls) {
await new Promise(resolve => {
video.addEventListener("canplay", e => {
video.load();
video.play();
}, {
once: true
});
video.addEventListener("ended", resolve, {
once: true
});
video.src = src;
});
}
})();
I opened a webcam by using the following JavaScript code:
const stream = await navigator.mediaDevices.getUserMedia({ /* ... */ });
Is there any JavaScript code to stop or close the webcam?
Since this answer has been originally posted the browser API has changed.
.stop() is no longer available on the stream that gets passed to the callback.
The developer will have to access the tracks that make up the stream (audio or video) and stop each of them individually.
More info here: https://developers.google.com/web/updates/2015/07/mediastream-deprecations?hl=en#stop-ended-and-active
Example (from the link above):
stream.getTracks().forEach(function(track) {
track.stop();
});
Browser support may differ.
Previously, navigator.getUserMedia provided you with a stream in the success callback, you could call .stop() on that stream to stop the recording (at least in Chrome, seems FF doesn't like it)
Use any of these functions:
// stop both mic and camera
function stopBothVideoAndAudio(stream) {
stream.getTracks().forEach(function(track) {
if (track.readyState == 'live') {
track.stop();
}
});
}
// stop only camera
function stopVideoOnly(stream) {
stream.getTracks().forEach(function(track) {
if (track.readyState == 'live' && track.kind === 'video') {
track.stop();
}
});
}
// stop only mic
function stopAudioOnly(stream) {
stream.getTracks().forEach(function(track) {
if (track.readyState == 'live' && track.kind === 'audio') {
track.stop();
}
});
}
Don't use stream.stop(), it's deprecated
MediaStream Deprecations
Use stream.getTracks().forEach(track => track.stop())
FF, Chrome and Opera has started exposing getUserMedia via navigator.mediaDevices as standard now (Might change :)
online demo
navigator.mediaDevices.getUserMedia({audio:true,video:true})
.then(stream => {
window.localStream = stream;
})
.catch( (err) =>{
console.log(err);
});
// later you can do below
// stop both video and audio
localStream.getTracks().forEach( (track) => {
track.stop();
});
// stop only audio
localStream.getAudioTracks()[0].stop();
// stop only video
localStream.getVideoTracks()[0].stop();
Suppose we have streaming in video tag and id is video - <video id="video"></video> then we should have following code -
var videoEl = document.getElementById('video');
// now get the steam
stream = videoEl.srcObject;
// now get all tracks
tracks = stream.getTracks();
// now close each track by having forEach loop
tracks.forEach(function(track) {
// stopping every track
track.stop();
});
// assign null to srcObject of video
videoEl.srcObject = null;
Starting Webcam Video with different browsers
For Opera 12
window.navigator.getUserMedia(param, function(stream) {
video.src =window.URL.createObjectURL(stream);
}, videoError );
For Firefox Nightly 18.0
window.navigator.mozGetUserMedia(param, function(stream) {
video.mozSrcObject = stream;
}, videoError );
For Chrome 22
window.navigator.webkitGetUserMedia(param, function(stream) {
video.src =window.webkitURL.createObjectURL(stream);
}, videoError );
Stopping Webcam Video with different browsers
For Opera 12
video.pause();
video.src=null;
For Firefox Nightly 18.0
video.pause();
video.mozSrcObject=null;
For Chrome 22
video.pause();
video.src="";
With this the Webcam light go down everytime...
Try method below:
var mediaStream = null;
navigator.getUserMedia(
{
audio: true,
video: true
},
function (stream) {
mediaStream = stream;
mediaStream.stop = function () {
this.getAudioTracks().forEach(function (track) {
track.stop();
});
this.getVideoTracks().forEach(function (track) { //in case... :)
track.stop();
});
};
/*
* Rest of your code.....
* */
});
/*
* somewhere insdie your code you call
* */
mediaStream.stop();
You can end the stream directly using the stream object returned in the success handler to getUserMedia. e.g.
localMediaStream.stop()
video.src="" or null would just remove the source from video tag. It wont release the hardware.
Since you need the tracks to close the streaming, and you need the stream boject to get to the tracks, the code I have used with the help of the Muaz Khan's answer above is as follows:
if (navigator.getUserMedia) {
navigator.getUserMedia(constraints, function (stream) {
videoEl.src = stream;
videoEl.play();
document.getElementById('close').addEventListener('click', function () {
stopStream(stream);
});
}, errBack);
function stopStream(stream) {
console.log('stop called');
stream.getVideoTracks().forEach(function (track) {
track.stop();
});
Of course this will close all the active video tracks. If you have multiple, you should select accordingly.
If the .stop() is deprecated then I don't think we should re-add it like #MuazKhan dose. It's a reason as to why things get deprecated and should not be used anymore. Just create a helper function instead... Here is a more es6 version
function stopStream (stream) {
for (let track of stream.getTracks()) {
track.stop()
}
}
You need to stop all tracks (from webcam, microphone):
localStream.getTracks().forEach(track => track.stop());
Start and Stop Web Camera,(Update 2020 React es6 )
Start Web Camera
stopWebCamera =()=>
//Start Web Came
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
//use WebCam
navigator.mediaDevices.getUserMedia({ video: true }).then(stream => {
this.localStream = stream;
this.video.srcObject = stream;
this.video.play();
});
}
}
Stop Web Camera or Video playback in general
stopVideo =()=>
{
this.video.pause();
this.video.src = "";
this.video.srcObject = null;
// As per new API stop all streams
if (this.localStream)
this.localStream.getTracks().forEach(track => track.stop());
}
Stop Web Camera function works even with video streams:
this.video.src = this.state.videoToTest;
this.video.play();
Using .stop() on the stream works on chrome when connected via http. It does not work when using ssl (https).
Please check this: https://jsfiddle.net/wazb1jks/3/
navigator.getUserMedia(mediaConstraints, function(stream) {
window.streamReference = stream;
}, onMediaError);
Stop Recording
function stopStream() {
if (!window.streamReference) return;
window.streamReference.getAudioTracks().forEach(function(track) {
track.stop();
});
window.streamReference.getVideoTracks().forEach(function(track) {
track.stop();
});
window.streamReference = null;
}
The following code worked for me:
public vidOff() {
let stream = this.video.nativeElement.srcObject;
let tracks = stream.getTracks();
tracks.forEach(function (track) {
track.stop();
});
this.video.nativeElement.srcObject = null;
this.video.nativeElement.stop();
}
Have a reference of stream form successHandle
var streamRef;
var handleVideo = function (stream) {
streamRef = stream;
}
//this will stop video and audio both track
streamRef.getTracks().map(function (val) {
val.stop();
});