How to switch between front camera and rear camera in javascript? - javascript

let constraints;
function handleVideo(){
const constraints = {
video: {
facingMode: {
exact: 'user'
}
}
};
var video;
navigator.mediaDevices.getUserMedia(constraints).
then((stream) => {
video = document.createElement("video")
video.srcObject = stream
video.play()
video.onloadeddata = () => {
ctx.height = video.videoHeight
}
})
}
I know by changing exact to environment can switch between front and back camera. But I don't know how to do this onclick.

Something like this :
function handleVideo(cameraFacing) {
const constraints = {
video: {
facingMode: {
exact: cameraFacing
}
}
}
return constraints
};
function turnVideo(constraints) {
let video;
navigator.mediaDevices.getUserMedia(constraints)
.then((stream) => {
video = document.createElement("video")
video.srcObject = stream
video.play()
video.onloadeddata = () => {
ctx.height = video.videoHeight
}
})
}
document.querySelector(".frontCamera").addEventListener("click", () => {
turnVideo(handleVideo("user"));
})
document.querySelector(".backCamera").addEventListener("click", () => {
turnVideo(handleVideo("enviroment"));
})
<div class="frontCamera">front</div>
<div class="backCamera">back</div>

Related

Can I use addEventListener('play') inside a function? the function is running but the addeventlistener('play') is not

const faceDetection = () => {
const video = document.getElementById(`video`);
console.log(video);
const displaySize = { width: video.width, height: video.height };
video.addEventListener('click', () => {
console.log(`run`);
});
video.addEventListener('play', () => {
console.log(`run`);
const canvas = faceapi.createCanvasFromMedia(video);
camera.append(canvas);
faceapi.matchDimensions(canvas, displaySize);
// interval
setInterval(async () => {
console.log(`this run`);
const detections = await faceapi
.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
.withFaceLandmarks();
const resizedDetections = faceapi.resizeResults(detections, displaySize);
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height);
faceapi.draw.drawDetections(canvas, resizedDetections);
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections);
}, 100);
});
};
the video.addEventListener('click') does run but video.addEventListener('play') does not, can someone please give me idea why does it's not working?
My mistake I did call the vid.play(); on the navigator.mediaDevices.getUserMedia();
navigator.mediaDevices
.getUserMedia({ video: true })
.then((stream) => {
vid.srcObject = stream;
// vid.play();
if (backend === 'webgl') return faceDetection(100);
if (backend === 'cpu') return faceDetection(1000);
track = stream.getTracks();
resetMessages();
})
.catch((e) => {
console.log(e);
})
.finally(() => {
preloader.style.display = 'none';
});
Hi maybe your component may not be loaded yet when you call the function, can you try to make a timeout ?
Like in is issue :
Stackoverflow - How can i make a waitFor(delay)

Using camera flashlight not allowing to change facingmode - Navigator.mediaDevices

I'm trying to build a web app which takes photos using webcam or mobile camera depending on the device. I have already made a button which changes the constraints.facingmode so the user can use both cameras ( "environment", "user" ) if device supports it. The problem is that when I enable flashlight support as well, by creating a button and setting it as the flashlight toggler like that:
const SUPPORTS_MEDIA_DEVICES = 'mediaDevices' in navigator;
if (SUPPORTS_MEDIA_DEVICES) {
const track = stream.getVideoTracks()[0];
const imageCapture = new ImageCapture(track);
const photoCapabilities = imageCapture.getPhotoCapabilities().then(() => {
const btn = document.querySelector('.toggleCameraTorch');
btn.style.visibility = 'visible';
btn.addEventListener('click', function () {
try {
track.applyConstraints({
advanced: [{ torch: !wheelsfs.videoConstraint.torchState }]
});
wheelsfs.videoConstraint.torchState = !wheelsfs.videoConstraint.torchState;
}
catch(e) {
alert(e.message);
}
});
});
}
After that, the flashlight is working perfectly but I no longer have the option to swap camera ( facingmode ). When I'm trying to change the camera I get the error "could not start video source". Like the camera is already being used by something.
This is how I'm changing camera - facingmode:
wheelsfs.videoConstraint.facingMode.exact = wheelsfs.videoConstraint.facingMode.exact == "environment" ? "user" : "environment";
var cameraInput = wheelsfs.videoConstraint.facingMode.exact;
wheelsfs.videoTrue.srcObject && wheelsfs.videoTrue.srcObject.getTracks().forEach(t => t.stop());
wheelsfs.videoConstraint = {
video: {
width: { ideal: trueWidth },
height: { ideal: trueHeight },
facingMode: { ideal: "environment" }
},
facingMode: { exact: cameraInput }
};
navigator.mediaDevices.getUserMedia({ video: wheelsfs.videoConstraint }).then(function (stream) {
wheelsfs.videoTrue.srcObject = stream;
wheelsfs.videoTrue.play();
const SUPPORTS_MEDIA_DEVICES = 'mediaDevices' in navigator;
if (SUPPORTS_MEDIA_DEVICES) {
const track = stream.getVideoTracks()[0];
const imageCapture = new ImageCapture(track);
const photoCapabilities = imageCapture.getPhotoCapabilities().then(() => {
const btn = document.querySelector('.toggleCameraTorch');
btn.style.visibility = 'visible';
btn.addEventListener('click', function () {
try {
track.applyConstraints({
advanced: [{ torch: !wheelsfs.videoConstraint.torchState }]
});
wheelsfs.videoConstraint.torchState = !wheelsfs.videoConstraint.torchState;
}
catch (e) {
alert(e.message);
}
});
});
}
}).catch((e) => { console.log(e.message); }
Solved it by storing the stream.getVideoTracks()[0] to a variable and then calling stop() on it before changing the camera (facingmode).
So when I do:
if (SUPPORTS_MEDIA_DEVICES) {
wheelsfs.track = stream.getVideoTracks()[0];
const imageCapture = new ImageCapture(wheelsfs.track);
const photoCapabilities = imageCapture.getPhotoCapabilities().then(() => {
const btn = document.querySelector('.toggleCameraTorch');
btn.style.visibility = 'visible';
btn.addEventListener('click', function () {
try {
wheelsfs.track.applyConstraints({
advanced: [{ torch: !wheelsfs.videoConstraint.torchState }]
});
wheelsfs.videoConstraint.torchState = !wheelsfs.videoConstraint.torchState;
}
catch (e) {
alert(e.message);
}
});
});
}
In the 2nd line I save the track in a public variable and then when the function that changes the camera that is being used is called, I make sure I run
"wheelsfs.track.stop();" just before the navigator.mediaDevices.getUserMedia call.

camera no popping up when deployed on buildfire

I am developing a QRcode scanner with https://rawgit.com/sitepoint-editors/jsqrcode/master/src/qr_packed.js on buildfire. is working fine on development but when deployed on markeenter code heret place the camera is not popping up.
this is my code.
//const qrcode = window.qrcode;
enter code here
const video = document.createElement("video");
const canvasElement = document.getElementById("qr-canvas");
const canvas = canvasElement.getContext("2d");
const qrResult = document.getElementById("qr-result");
const outputData = document.getElementById("outputData");
const btnScanQR = document.getElementById("btn-scan-qr");
let scanning = false;
qrcode.callback = res => {
if (res) {
outputData.innerText = res;
scanning = false;
video.srcObject.getTracks().forEach(track => {
track.stop();
});
qrResult.hidden = false;
canvasElement.hidden = true;
btnScanQR.hidden = false;
}
};
buildfire.services.camera.barcodeScanner.scan(
{
preferFrontCamera: true,
showFlipCameraButton: true,
formats: "QR_CODE,PDF_417",
},
btnScanQR.onclick = () => {
navigator.mediaDevices
.getUserMedia({ video: { facingMode: "environment" } })
.then(function(stream) {
scanning = true;
qrResult.hidden = true;
btnScanQR.hidden = true;
canvasElement.hidden = false;
video.setAttribute("playsinline", true); // required to tell iOS safari we don't want fullscreen
video.srcObject = stream;
video.play();
tick();
scan();
});
}
);
function tick() {
canvasElement.height = video.videoHeight;
canvasElement.width = video.videoWidth;
canvas.drawImage(video, 0, 0, canvasElement.width, canvasElement.height);
scanning && requestAnimationFrame(tick);
}
function scan() {
try {
qrcode.decode();
} catch (e) {
setTimeout(scan, 300);
}
}

getDisplayMedia does not record all audio output from my pc

I'm having a hard time recording my computer screen + all its audio interface (system + mic) with getDisplayMedia, however I'm using a code from a react js component which refers to this code
const defaultDisplayMediaOptions = {
video: {
cursor: "never",
},
audio: {
echoCancellation: true,
noiseSuppression: true,
sampleRate: 44100,
},
};
export default function useScreenRecording({
displayMediaOptions = defaultDisplayMediaOptions,
onEnd = () => {},
onError = () => {},
onStart = () => {},
} = {}) {
const [captureStream, setCaptureStream] = React.useState(null);
const [error, setError] = React.useState(null);
const [mediaRecorder, setMediaRecorder] = React.useState(null);
const [isRecording, setIsRecording] = React.useState(false);
const [recording, setRecording] = React.useState(null);
const stopRecording = () => {
try {
setIsRecording(false);
mediaRecorder.stop();
captureStream.getTracks().forEach(track => track.stop());
} catch (e) {
onError(e);
setError(e);
}
};
const startRecording = async () => {
try {
const stream = await navigator.mediaDevices.getDisplayMedia(
displayMediaOptions,
);
setIsRecording(true);
stream.getTracks().forEach(track => {
track.onended = stopRecording;
});
setCaptureStream(stream);
const recorder = new MediaRecorder(stream);
recorder.ondataavailable = event => {
onEnd(event);
setRecording(event.data);
};
recorder.start();
setMediaRecorder(recorder);
onStart({ stream, recorder });
} catch (e) {
setIsRecording(false);
onError(e);
setError(e);
}
};
So far the recording is only managing to get the screen of my pc and the system's audio. Could anyone help me with this question. It is possible to record my pc screen plus all the audio outputs. I did a test recording with getUserMedia and its audio capture works 100% as I expect, but displayMedia doesn't do the same. Thank you in advance for your help
You can merge tracks from getUserMedia to to the displayMedia to get the audio from user mic.
const defaultDisplayMediaOptions = {
video: {
cursor: "never",
},
audio: {
echoCancellation: true,
noiseSuppression: true,
sampleRate: 44100,
},
};
export default function useScreenRecording({
displayMediaOptions = defaultDisplayMediaOptions,
onEnd = () => {},
onError = () => {},
onStart = () => {},
} = {}) {
const [captureStream, setCaptureStream] = React.useState(null);
const [error, setError] = React.useState(null);
const [mediaRecorder, setMediaRecorder] = React.useState(null);
const [isRecording, setIsRecording] = React.useState(false);
const [recording, setRecording] = React.useState(null);
const stopRecording = () => {
try {
setIsRecording(false);
mediaRecorder.stop();
captureStream.getTracks().forEach(track => track.stop());
} catch (e) {
onError(e);
setError(e);
}
};
const startRecording = async () => {
try {
const stream = await navigator.mediaDevices.getDisplayMedia(
displayMediaOptions,
);
const audioStream = await navigator.mediaDevices.getUserMedia({audio:true})
setIsRecording(true);
stream.getTracks().forEach(track => {
track.onended = stopRecording;
});
audioStream.getAudioTracks().forEach(track=>{
stream.addTrack(track);
});
setCaptureStream(stream);
const recorder = new MediaRecorder(stream);
recorder.ondataavailable = event => {
onEnd(event);
setRecording(event.data);
};
recorder.start();
setMediaRecorder(recorder);
onStart({ stream, recorder });
} catch (e) {
setIsRecording(false);
onError(e);
setError(e);
}
};

using device camera for capturing image in reactjs

can I access the device camera and take a photo in ReactJs? The goal is to create a component that allows the camera to take pictures with the click of a button. According to my studies, I should use mediaDevices, but I am looking for a sample code in ReactJs. Please provide me with a sample code, or if you have experience implementing this, please guide me.
I have prepared a sample code that can be used as a component. This code snippet is applicable to devices that also have two cameras. If you want to take a video instead of a photo, you can also enable the audio feature in the outputs.
import React from "react";
class App extends React.Component {
constructor() {
super();
this.cameraNumber = 0;
this.state = {
imageDataURL: null,
};
}
initializeMedia = async () => {
this.setState({ imageDataURL: null });
if (!("mediaDevices" in navigator)) {
navigator.mediaDevices = {};
}
if (!("getUserMedia" in navigator.mediaDevices)) {
navigator.mediaDevices.getUserMedia = function (constraints) {
var getUserMedia =
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (!getUserMedia) {
return Promise.reject(new Error("getUserMedia Not Implemented"));
}
return new Promise((resolve, reject) => {
getUserMedia.call(navigator, constraints, resolve, reject);
});
};
}
//Get the details of video inputs of the device
const videoInputs = await this.getListOfVideoInputs();
//The device has a camera
if (videoInputs.length) {
navigator.mediaDevices
.getUserMedia({
video: {
deviceId: {
exact: videoInputs[this.cameraNumber].deviceId,
},
},
})
.then((stream) => {
this.player.srcObject = stream;
})
.catch((error) => {
console.error(error);
});
} else {
alert("The device does not have a camera");
}
};
capturePicture = () => {
var canvas = document.createElement("canvas");
canvas.width = this.player.videoWidth;
canvas.height = this.player.videoHeight;
var contex = canvas.getContext("2d");
contex.drawImage(this.player, 0, 0, canvas.width, canvas.height);
this.player.srcObject.getVideoTracks().forEach((track) => {
track.stop();
});
console.log(canvas.toDataURL());
this.setState({ imageDataURL: canvas.toDataURL() });
};
switchCamera = async () => {
const listOfVideoInputs = await this.getListOfVideoInputs();
// The device has more than one camera
if (listOfVideoInputs.length > 1) {
if (this.player.srcObject) {
this.player.srcObject.getVideoTracks().forEach((track) => {
track.stop();
});
}
// switch to second camera
if (this.cameraNumber === 0) {
this.cameraNumber = 1;
}
// switch to first camera
else if (this.cameraNumber === 1) {
this.cameraNumber = 0;
}
// Restart based on camera input
this.initializeMedia();
} else if (listOfVideoInputs.length === 1) {
alert("The device has only one camera");
} else {
alert("The device does not have a camera");
}
};
getListOfVideoInputs = async () => {
// Get the details of audio and video output of the device
const enumerateDevices = await navigator.mediaDevices.enumerateDevices();
//Filter video outputs (for devices with multiple cameras)
return enumerateDevices.filter((device) => device.kind === "videoinput");
};
render() {
const playerORImage = Boolean(this.state.imageDataURL) ? (
<img src={this.state.imageDataURL} alt="cameraPic" />
) : (
<video
ref={(refrence) => {
this.player = refrence;
}}
autoPlay
></video>
);
return (
<div className="App">
{playerORImage}
<button onClick={this.initializeMedia}>Take Photo</button>
<button onClick={this.capturePicture}>Capture</button>
<button onClick={this.switchCamera}>Switch</button>
</div>
);
}
}
export default App;

Categories

Resources