using device camera for capturing image in reactjs - javascript

can I access the device camera and take a photo in ReactJs? The goal is to create a component that allows the camera to take pictures with the click of a button. According to my studies, I should use mediaDevices, but I am looking for a sample code in ReactJs. Please provide me with a sample code, or if you have experience implementing this, please guide me.

I have prepared a sample code that can be used as a component. This code snippet is applicable to devices that also have two cameras. If you want to take a video instead of a photo, you can also enable the audio feature in the outputs.
import React from "react";
class App extends React.Component {
constructor() {
super();
this.cameraNumber = 0;
this.state = {
imageDataURL: null,
};
}
initializeMedia = async () => {
this.setState({ imageDataURL: null });
if (!("mediaDevices" in navigator)) {
navigator.mediaDevices = {};
}
if (!("getUserMedia" in navigator.mediaDevices)) {
navigator.mediaDevices.getUserMedia = function (constraints) {
var getUserMedia =
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (!getUserMedia) {
return Promise.reject(new Error("getUserMedia Not Implemented"));
}
return new Promise((resolve, reject) => {
getUserMedia.call(navigator, constraints, resolve, reject);
});
};
}
//Get the details of video inputs of the device
const videoInputs = await this.getListOfVideoInputs();
//The device has a camera
if (videoInputs.length) {
navigator.mediaDevices
.getUserMedia({
video: {
deviceId: {
exact: videoInputs[this.cameraNumber].deviceId,
},
},
})
.then((stream) => {
this.player.srcObject = stream;
})
.catch((error) => {
console.error(error);
});
} else {
alert("The device does not have a camera");
}
};
capturePicture = () => {
var canvas = document.createElement("canvas");
canvas.width = this.player.videoWidth;
canvas.height = this.player.videoHeight;
var contex = canvas.getContext("2d");
contex.drawImage(this.player, 0, 0, canvas.width, canvas.height);
this.player.srcObject.getVideoTracks().forEach((track) => {
track.stop();
});
console.log(canvas.toDataURL());
this.setState({ imageDataURL: canvas.toDataURL() });
};
switchCamera = async () => {
const listOfVideoInputs = await this.getListOfVideoInputs();
// The device has more than one camera
if (listOfVideoInputs.length > 1) {
if (this.player.srcObject) {
this.player.srcObject.getVideoTracks().forEach((track) => {
track.stop();
});
}
// switch to second camera
if (this.cameraNumber === 0) {
this.cameraNumber = 1;
}
// switch to first camera
else if (this.cameraNumber === 1) {
this.cameraNumber = 0;
}
// Restart based on camera input
this.initializeMedia();
} else if (listOfVideoInputs.length === 1) {
alert("The device has only one camera");
} else {
alert("The device does not have a camera");
}
};
getListOfVideoInputs = async () => {
// Get the details of audio and video output of the device
const enumerateDevices = await navigator.mediaDevices.enumerateDevices();
//Filter video outputs (for devices with multiple cameras)
return enumerateDevices.filter((device) => device.kind === "videoinput");
};
render() {
const playerORImage = Boolean(this.state.imageDataURL) ? (
<img src={this.state.imageDataURL} alt="cameraPic" />
) : (
<video
ref={(refrence) => {
this.player = refrence;
}}
autoPlay
></video>
);
return (
<div className="App">
{playerORImage}
<button onClick={this.initializeMedia}>Take Photo</button>
<button onClick={this.capturePicture}>Capture</button>
<button onClick={this.switchCamera}>Switch</button>
</div>
);
}
}
export default App;

Related

Audio Worklet - How to capture microphone streams

I am trying to capture live microphone streams from my desktop, When converting float 32 bits to Int16 Array all values are coming 0. Not sure what i have done wrong here. how can i capture the live microphone stream in audio worklet?
Below is javascript code:
try {
navigator.getUserMedia = navigator.getUserMedia
|| navigator.webkitGetUserMedia
|| navigator.mozGetUserMedia;
microphone = navigator.getUserMedia({
audio : true,
video : false
}, onMicrophoneGranted, onMicrophoneDenied);
} catch (e) {
alert(e)
}
async function onMicrophoneGranted(stream) {
console.log(stream)
context = new AudioContext();
source = context.createMediaStreamSource(stream);
await context.audioWorklet.addModule('/assets/js/buffer-detector.js');
// Create our custom node.
bufferDetectorNode= new AudioWorkletNode(context, 'buffer-detector');
bufferDetectorNode.port.onmessage = (event) => {
};
source.connect(bufferDetectorNode);
bufferDetectorNode.connect(context.destination);
//source.connect(context.destination);
}
function onMicrophoneDenied() {
console.log('denied')
}
Below is AudioWorklet
class BufferProcessor extends AudioWorkletProcessor {
process (inputs) {
console.log(inputs)
inputs.forEach(floatArray => {
floatArray.forEach(elem => {
const intArray = Int16Array.from(elem)
console.log(intArray)
})
})
//const input = inputs[0];
//this.append(input[0])
return true;
}
static get parameterDescriptors() {
return [{
name: 'Buffer Detector',
}]
}
constructor() {
super();
this.initBuffer()
}
}
registerProcessor('buffer-detector',BufferProcessor );

How to switch between front camera and rear camera in javascript?

let constraints;
function handleVideo(){
const constraints = {
video: {
facingMode: {
exact: 'user'
}
}
};
var video;
navigator.mediaDevices.getUserMedia(constraints).
then((stream) => {
video = document.createElement("video")
video.srcObject = stream
video.play()
video.onloadeddata = () => {
ctx.height = video.videoHeight
}
})
}
I know by changing exact to environment can switch between front and back camera. But I don't know how to do this onclick.
Something like this :
function handleVideo(cameraFacing) {
const constraints = {
video: {
facingMode: {
exact: cameraFacing
}
}
}
return constraints
};
function turnVideo(constraints) {
let video;
navigator.mediaDevices.getUserMedia(constraints)
.then((stream) => {
video = document.createElement("video")
video.srcObject = stream
video.play()
video.onloadeddata = () => {
ctx.height = video.videoHeight
}
})
}
document.querySelector(".frontCamera").addEventListener("click", () => {
turnVideo(handleVideo("user"));
})
document.querySelector(".backCamera").addEventListener("click", () => {
turnVideo(handleVideo("enviroment"));
})
<div class="frontCamera">front</div>
<div class="backCamera">back</div>

Using camera flashlight not allowing to change facingmode - Navigator.mediaDevices

I'm trying to build a web app which takes photos using webcam or mobile camera depending on the device. I have already made a button which changes the constraints.facingmode so the user can use both cameras ( "environment", "user" ) if device supports it. The problem is that when I enable flashlight support as well, by creating a button and setting it as the flashlight toggler like that:
const SUPPORTS_MEDIA_DEVICES = 'mediaDevices' in navigator;
if (SUPPORTS_MEDIA_DEVICES) {
const track = stream.getVideoTracks()[0];
const imageCapture = new ImageCapture(track);
const photoCapabilities = imageCapture.getPhotoCapabilities().then(() => {
const btn = document.querySelector('.toggleCameraTorch');
btn.style.visibility = 'visible';
btn.addEventListener('click', function () {
try {
track.applyConstraints({
advanced: [{ torch: !wheelsfs.videoConstraint.torchState }]
});
wheelsfs.videoConstraint.torchState = !wheelsfs.videoConstraint.torchState;
}
catch(e) {
alert(e.message);
}
});
});
}
After that, the flashlight is working perfectly but I no longer have the option to swap camera ( facingmode ). When I'm trying to change the camera I get the error "could not start video source". Like the camera is already being used by something.
This is how I'm changing camera - facingmode:
wheelsfs.videoConstraint.facingMode.exact = wheelsfs.videoConstraint.facingMode.exact == "environment" ? "user" : "environment";
var cameraInput = wheelsfs.videoConstraint.facingMode.exact;
wheelsfs.videoTrue.srcObject && wheelsfs.videoTrue.srcObject.getTracks().forEach(t => t.stop());
wheelsfs.videoConstraint = {
video: {
width: { ideal: trueWidth },
height: { ideal: trueHeight },
facingMode: { ideal: "environment" }
},
facingMode: { exact: cameraInput }
};
navigator.mediaDevices.getUserMedia({ video: wheelsfs.videoConstraint }).then(function (stream) {
wheelsfs.videoTrue.srcObject = stream;
wheelsfs.videoTrue.play();
const SUPPORTS_MEDIA_DEVICES = 'mediaDevices' in navigator;
if (SUPPORTS_MEDIA_DEVICES) {
const track = stream.getVideoTracks()[0];
const imageCapture = new ImageCapture(track);
const photoCapabilities = imageCapture.getPhotoCapabilities().then(() => {
const btn = document.querySelector('.toggleCameraTorch');
btn.style.visibility = 'visible';
btn.addEventListener('click', function () {
try {
track.applyConstraints({
advanced: [{ torch: !wheelsfs.videoConstraint.torchState }]
});
wheelsfs.videoConstraint.torchState = !wheelsfs.videoConstraint.torchState;
}
catch (e) {
alert(e.message);
}
});
});
}
}).catch((e) => { console.log(e.message); }
Solved it by storing the stream.getVideoTracks()[0] to a variable and then calling stop() on it before changing the camera (facingmode).
So when I do:
if (SUPPORTS_MEDIA_DEVICES) {
wheelsfs.track = stream.getVideoTracks()[0];
const imageCapture = new ImageCapture(wheelsfs.track);
const photoCapabilities = imageCapture.getPhotoCapabilities().then(() => {
const btn = document.querySelector('.toggleCameraTorch');
btn.style.visibility = 'visible';
btn.addEventListener('click', function () {
try {
wheelsfs.track.applyConstraints({
advanced: [{ torch: !wheelsfs.videoConstraint.torchState }]
});
wheelsfs.videoConstraint.torchState = !wheelsfs.videoConstraint.torchState;
}
catch (e) {
alert(e.message);
}
});
});
}
In the 2nd line I save the track in a public variable and then when the function that changes the camera that is being used is called, I make sure I run
"wheelsfs.track.stop();" just before the navigator.mediaDevices.getUserMedia call.

Remote video not showing the stream

Im not sure what is wrong. The caller and callee both receive the same stream that is getting set in local video but it wont set the remote video.
I have seen it worked before when I clicked the call button twice but I changed the code and dont know how to recreate it.
Any help is appreciated. Thank you.
video.js
//Starts by calling the user
const callUser = (socketId, mySocket) => {
navigator.getUserMedia({ video: true, audio: true }, stream => {
console.log("My Stream: ", stream)
const localVideo = document.getElementById("local-video");
localVideo.srcObject = stream;
localStream = stream
createAndSendOffer(socketId, mySocket);
})
}
//Create Offer
const createAndSendOffer = async (socketId, mySocket) => {
peerConnection = new RTCPeerConnection()
peerConnection.ontrack = function(event) {
console.log("Remote Stream", event.streams[0])
const remoteVideo = document.getElementById("remoteVideo")
if(remoteVideo.srcObject !== event.stream[0]) {
remoteVideo.srcObject = event.streams[0];
}
}
localStream.getTracks().forEach(track => peerConnection.addTrack(track, localStream))
await peerConnection.createOffer( offer => {
peerConnection.setLocalDescription(new RTCSessionDescription(offer),
function() {
mySocket.emit("callUser", {offer,to: socketId});
})
})
}
//Callee Is getting called
const answerCall = (data, mySocket, inCall) => {
navigator.getUserMedia({ video: true, audio: true }, stream => {
console.log("My Stream: ", stream)
const localVideo = document.getElementById("local-video");
localVideo.srcObject = stream;
localStream = stream
createAndSendAnswer(data, mySocket, inCall);
})
}
//Create Answer
const createAndSendAnswer = async (data, mySocket, inCall) => {
peerConnection = new RTCPeerConnection()
peerConnection.ontrack = function(event) {
console.log("Remote Stream", event.streams[0])
const remoteVideo = document.getElementById("remoteVideo")
if(remoteVideo.srcObject !== event.stream[0]) {
console.log(event.streams[0])
remoteVideo.srcObject = event.streams[0];
}
inCall(true)
}
localStream.getTracks().forEach(track => peerConnection.addTrack(track, localStream))
await peerConnection.setRemoteDescription(new RTCSessionDescription(data.offer))
await peerConnection.createAnswer(answer => {
peerConnection.setLocalDescription(new RTCSessionDescription(answer),
function() {
mySocket.emit("makeAnswer", {answer, to: data.socket});
})
})
}
const startSockets = (socket, inCall) => {
socket = socket
socket.on("gettingCalled", data => {
console.log("You are getting called with socketId", data.socket)
answerCall(data, socket, inCall);
})
socket.on("answerMade", async data => {
await peerConnection.setRemoteDescription(new RTCSessionDescription(data.answer))
})
}
class Video extends Component {
constructor(props){
super(props);
this.state={
friends: [],
mySocket: [],
inCall: false
}
}
componentDidMount() {
if(this.props.user) {
startSockets(this.props.user.socket, this.inCallChange)
}
}
inCallChange = boolean => this.setState({inCall: boolean})
callUser = socketId => callUser(socketId, this.state.mySocket, this.state.inCall)
render() {
if(!this.props.user) {
return (<div>No User</div>)
} else {
return (
<div>`enter code here`
<div className="video-chat-container">
<div className="video-container">
<video
autoPlay
className="remote-video"
id="remoteVideo"
></video>
<video
autoPlay
muted
className="local-video"
id="local-video"
></video>
</div>
</div>
</div>
);
}
}
}
export default withRouter(connect(mapStateToProps)(Video))
Update
Im not sure why this is the case but I have to send just the offer and receive the answer again. For now, I resend an offer createAndSendOffer(), starting the process again. I dont think this is the best practice and if you find a better solution that would be great :)
first check your pc.ontrack() firing or not. pc.ontrack() event is fired after pc.addTrack() and remotedescription is set.and also make sure tracks are added before creating answer/offer

audio is shown disable in twilio video chat

I am trying to create a video chat with twilio. I could turn the webcam and run the video, however i could not make the audio work. When i select the control, i get to enlarge the video and picture to picture mode but not control the audio.
This is how seen
Here is the code
function App() {
let localMediaRef = React.useRef(null);;
const [data, setIdentity] = React.useState({
identity: null,
token: null
});
const [room, setRoom] = React.useState({
activeRoom: null,
localMediaAvailable: null,
hasJoinedRoom: null
});
async function fetchToken() {
try {
const response = await fetch("/token");
const jsonResponse = await response.json();
const { identity, token } = jsonResponse;
setIdentity({
identity,
token
});
} catch (e) {
console.error("e", e);
}
}
React.useEffect(() => {
fetchToken();
}, []);
const attachTracks = (tracks, container) => {
tracks.forEach(track => {
container.appendChild(track.attach());
});
};
// Attaches a track to a specified DOM container
const attachParticipantTracks = (participant, container) => {
const tracks = Array.from(participant.tracks.values());
attachTracks(tracks, container);
};
const roomJoined = room => {
// Called when a participant joins a room
console.log("Joined as '" + data.identity + "'");
setRoom({
activeRoom: room,
localMediaAvailable: true,
hasJoinedRoom: true
});
// Attach LocalParticipant's Tracks, if not already attached.
const previewContainer = localMediaRef.current;
if (!previewContainer.querySelector("video")) {
attachParticipantTracks(room.localParticipant, previewContainer);
}
};
const joinRoom = () => {
let connectOptions = {
name: "Interview Testing"
};
let settings = {
audio: true
}
console.log('data', data, data.token)
Video.connect(
data.token,
connectOptions,
settings
).then(roomJoined, error => {
alert("Could not connect to Twilio: " + error.message);
});
};
return (
<div className="App">
<FeatureGrid>
<span onClick={joinRoom}>Webcam</span>
</FeatureGrid>
<PanelGrid>
{room.localMediaAvailable ? (
<VideoPanels>
<VideoPanel ref={localMediaRef} />
</VideoPanels>
) : (
""
)}
</PanelGrid>
</div>
);
}
export default App;
How do i enable audio too? Also the settings of video is shown only after right click. can't we show this by default?
UPDATE
its a LocalAudioTrack
this is remoteaudiotrack

Categories

Resources