Turn off camera after photo - javascript

I am reverse-engineering a project and am running into some perplexing problems. The project is in Meteor, which I like, but doesn't seem to follow Meteors conventions.
This is essentially a javascript file to allow users to take a selfie using the laptop devices camera. However, after taking the photo, the camera does not turn off.
After having tried a number of suggestions online, I am putting the question: how does one turn off the camera?
Thank you for your help!
Template.newSelfie.rendered = function(){
// Grab elements, create settings, etc.
var canvas = document.getElementById("canvas"),
context = canvas.getContext("2d"),
video = document.getElementById("video"),
videoObj = { "video": true },
errBack = function(error) {
console.log("Video capture error: ", error.code);
};
// Put video listeners into place
if(navigator.getUserMedia) { // Standard
navigator.getUserMedia(videoObj, function(stream) {
video.src = stream;
video.play();
}, errBack);
} else if(navigator.webkitGetUserMedia) { // WebKit-prefixed
navigator.webkitGetUserMedia(videoObj, function(stream){
video.src = window.webkitURL.createObjectURL(stream);
video.play();
}, errBack);
}
else if(navigator.mozGetUserMedia) { // Firefox-prefixed
navigator.mozGetUserMedia(videoObj, function(stream){
video.src = window.URL.createObjectURL(stream);
video.play();
}, errBack);
}
// Converts canvas to an image
function convertCanvasToImage(canvas) {
var image = new Image();
image.src = canvas.toDataURL("image/png");
return image.src;
}
$('#take-selfie').click(function() {
context.drawImage(video, 0, 0, 450, 350);
var selfieImg = convertCanvasToImage(canvas);
Posts.insert({
ownerId: Meteor.userId(),
userWallId: Meteor.userId(),
content: '<img src="'+selfieImg+'">',
postedOn: new Date()
}, function(err, res) {
console.log(err || res);
});
Selfies.insert({
ownerId: Meteor.userId(),
image: selfieImg,
postedOn: moment().format('MM/DD/YYYY hh:mm a'),
createdAt: moment().format('YYYY-MM-DD')
}, function(err, res) {
console.log(err || res);
if(err){
console.log(err);
} else {
Router.go('profileSelfies');
}
});
});
};

const video = document.querySelector('video');
// A video's MediaStream object is available through its srcObject attribute
const mediaStream = video.srcObject;
// Through the MediaStream, you can get the MediaStreamTracks with getTracks():
const tracks = mediaStream.getTracks();
// Tracks are returned as an array, so if you know you only have one, you can stop it with:
tracks[0].stop();
// Or stop all like so:
tracks.forEach(track => track.stop())
https://dev.to/morinoko/stopping-a-webcam-with-javascript-4297

Related

No barcode detected using QuaggaJS

I have been trying to decode barcodes using QuaggaJS for a few days now. I am trying to decode UPC barcodes. I also have a high quality camera, so it is not the issue.
When I run my code, I only get the "onProcessed" event, never the "onDetected".
Here is my JS code for the camera:
const width = 640; // We will scale the photo width to this
let height = 0; // This will be computed based on the input stream
let streaming = false;
let video = null;
let lastQr = null;
function startup() {
video = document.getElementById("video");
navigator.mediaDevices
.getUserMedia({ video: { facingMode: "environment" }, audio: false })
.then((stream) => {
video.srcObject = stream;
video.play();
})
.catch((err) => {
console.error(`An error occurred: ${err}`);
});
video.addEventListener(
"canplay",
(ev) => {
if (!streaming) {
height = video.videoHeight / (video.videoWidth / width);
// Firefox currently has a bug where the height can't be read from
// the video, so we will make assumptions if this happens.
if (isNaN(height)) {
height = width / (4 / 3);
}
video.setAttribute("width", width);
video.setAttribute("height", height);
streaming = true;
}
},
false
);
Quagga.init({
inputStream : {
name : "Live",
type : "LiveStream",
target: document.querySelector('#camera') // Or '#yourElement' (optional)
},
decoder : {
readers : ["upc_reader"]
}
}, function(err) {
if (err) {
console.log(err);
return
}
console.log("Initialization finished. Ready to start");
Quagga.start();
Quagga.onProcessed(function(pr) {
if(pr != undefined) {
console.log(pr)
}
});
Quagga.onDetected(function(result) {
console.log('DETECTED');
var code = result.codeResult.code;
console.log(code);
});
});
document.getElementById('camera').style.display = 'block';
document.getElementById('startup-button').style.display = 'none';
}
Here is the HTML for the video stream:
<div id="camera" style="display: none;">
<video id="video">Video stream not available.</video>
</div>
<x-button id="startup-button" class="mb-4" onclick="startup()">{{ __('track.open_camera') }}</x-button>

Chrome extension video recording blob not able to convert in to video file

I am creating a chrome extension to record screen, facing an issue in converting the video recording blob into a video file, in background js video is getting recorded correctly but in content.js not able to convert the video blob to a video file
I am creating a chrome extension to record screen, facing an issue in converting the video recording blob into a video file, in background js video is getting recorded correctly but in content.js not able to convert the video blob to a video file
function startRecording() {
var constraints = {
audio: true,
video: true,
maxframeRate: fps,
};
navigator.mediaDevices.getDisplayMedia(constraints).then(function (stream) {
let output = new MediaStream();
if (output.getAudioTracks().length == 0) {
// Get microphone audio (system audio is unreliable & doesn't work on Mac)
if (micable) {
micsource.connect(destination);
output.addTrack(destination.stream.getAudioTracks()[0]);
}
} else {
syssource = audioCtx.createMediaStreamSource(stream);
if (micable) {
micsource.connect(destination);
}
syssource.connect(destination);
output.addTrack(destination.stream.getAudioTracks()[0]);
}
output.addTrack(stream.getVideoTracks()[0]);
mediaConstraints = {
audio: true,
video: true,
mimeType: "video/webm;codecs=vp8,opus",
};
mediaRecorder = new MediaRecorder(stream, mediaConstraints);
mediaRecorder.start(1000);
var recordedBlobs = [];
let writer = "";
mediaRecorder.ondataavailable = (event) => {
if (event.data && event.data.size > 0) {
recordedBlobs.push(event.data);
}
console.log("recordedBlobs", recordedBlobs);
};
mediaRecorder.onstop = () => {
chrome.tabs.getSelected(null, (tab) => {
chrome.tabs.sendMessage(tab.id, {
message: "download-video",
obj: {
blobs: recordedBlobs,
},
// camerasize: camerasize
});
});
endRecording(stream, writer, recordedBlobs);
};
stream.getVideoTracks()[0].onended = function () {
cancel = false;
mediaRecorder.stop();
};
});
}
content.js
function convertVideoBlobToVideo(obj) {
let chunk = obj.blobs;
// mediaRecorder.onstop = () => {
var superBuffer;
superBuffer = new Blob(chunks, {
type: "video/webm",
});
chunks = [];
// Create a video or audio element
// that stores the recorded media
const recordedMedia = document.createElement("video");
recordedMedia.controls = true;
const recordedMediaURL = URL.createObjectURL(superBuffer);
recordedMedia.src = recordedMediaURL;
const downloadButton = document.createElement("a");
downloadButton.download = "Recorded-Media";
downloadButton.href = recordedMediaURL;
downloadButton.innerText = "Download it!";
downloadButton.onclick = () => {
URL.revokeObjectURL(recordedMedia);
};
document.body.appendChild(recordedMedia, downloadButton);
// };
}

How to include mic audio in RecordRTC Screen Recording?

I am doing screen recording using RecordRTC.
How do I include my mic audio when recording?
My code below using Angular:
async startRecording() {
let mediaConstraints = {
video: {
},
audio: true
};
await this.mediaDevices.getDisplayMedia(mediaConstraints).then(this.successCallback.bind(this), this.errorCallback.bind(this));
}
successCallback(stream: MediaStream) {
this.recording = true;
var options = {
mimeType: 'video/webm', // or video/webm\;codecs=h264 or video/webm\;codecs=vp9
audioBitsPerSecond: 128000,
videoBitsPerSecond: 128000,
bitsPerSecond: 128000 // if this line is provided, skip above two
};
this.stream = stream;
this.recordRTC = RecordRTC(stream, options);
this.recordRTC.startRecording();
let video: HTMLVideoElement = this.rtcvideo.nativeElement;
video.src = window.URL.createObjectURL(stream);
this.toggleControls();
}
You need to attach an audio track to the stream
successCallback(stream){
//your other code here
//...
navigator.mediaDevices.getUserMedia({audio:true}).then(function(mic) {
stream.addTrack(mic.getTracks()[0]);
});
//
this.recordRTC = RecordRTC(stream, options);
this.recordRTC.startRecording();
}
This should be helpful. https://www.webrtc-experiment.com/RecordRTC/

How to record audio and video corss platform with electron

So i was making a electron project that records your screen and your desktop or selected app's audio with desktopCapture.
I got the screen record, and at one point even got the mic to work, but at no point, no matter what i tried, i couldn't record desktop audio nor any app's audio. After some research i found that you cannot record any desktop nor app's audio with chromium on linux.
So what could be the solution or some other ways to try to record desktop audio. Maybe there is some way to record desktop audio with a different library and then combine the video with audio somehow.
Any suggestions would be appreciated.
Code for the screen recorder itself:
videoSelectBtn.onclick = getVideoSources;
async function getVideoSources() {
const inputSources = await desktopCapturer.getSources({
types: ["window", "screen", "audio"],
});
inputSources.forEach((source) => {
if (source.name === "Screen 1") {
selectSource(source);
} else {
console.log(source);
}
});
}
async function selectSource(source) {
videoSelectBtn.innerText = source.name;
const constraints = {
audio: {
mandatory: {
chromeMediaSource: "desktop",
},
},
video: {
mandatory: {
chromeMediaSource: "desktop",
},
},
};
const stream = await navigator.mediaDevices.getUserMedia(constraints);
videoElement.srcObject = stream;
videoElement.play();
const options = {
mimeType: "video/webm; codecs=vp9",
};
mediaRecorder = new MediaRecorder(stream, options);
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.onstop = handleStop;
}
function handleDataAvailable(e) {
console.log("video data available");
recordedChunks.push(e.data);
}
async function handleStop(e) {
const blob = new Blob(recordedChunks, {
type: "video/webm; codecs=vp9",
});
const buffer = Buffer.from(await blob.arrayBuffer());
const { filePath } = await dialog.showSaveDialog({
buttonLabel: "Save video",
defaultPath: `vid-${Date.now()}.webm`,
});
if (filePath) {
writeFile(filePath, buffer, () => console.log("video saved successfully!"));
}
}

getUserMedia() can't release the camera

There is somewhere very silly mistake in my code that I can't find. Basically what I'm doing is, I'm using two separate buttons to start and stop recording the stream that I get from WebRTC getUserMedia() (I'm using RecordRTC for recording). My stop function stops the recording but does not release the camera.
<script type="text/javascript">
$(document).ready(function () {
var recorder;
var video = document.getElementById("video");
var videoConstraints = {
video: {
mandatory: {
minWidth: 1280,
minHeight: 720,
maxWidth: 1920,
maxHeight: 1080,
minFrameRate: 29.97,
maxFrameRate: 60,
minAspectRatio: 1.77
}
},
audio: true
};
function captureCamera(callback) {
navigator.mediaDevices.getUserMedia(videoConstraints).then(function (camera) {
callback(camera);
}).catch(function (error) {
alert('Unable to capture your camera. Please check console logs.');
console.error(error);
});
}
function stopRecordingCallback() {
video.src = video.srcObject = null;
video.src = URL.createObjectURL(recorder.getBlob());
video.play();
//recorder.camera.stop(); //its the deprecated way
recorder.camera.getTracks().forEach(track => track.stop()); //modern way as per documentation
recorder.destroy();
recorder = null;
}
hasGetUserMedia() {
return (navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
}
$('#startRecord').on("click", function () {
if (hasGetUserMedia()) {
/*----------------recording process start----------------*/
this.disabled = true;
captureCamera(function (camera) {
setSrcObject(camera, video);
video.play();
var options = {
recorderType: MediaStreamRecorder,
mimeType: 'video/webm\;codecs=h264',
audioBitsPerSecond: 128000,
videoBitsPerSecond: 2097152, // 2 mbps
};
recorder = RecordRTC(camera, options);
recorder.startRecording();
// release camera on stopRecording
recorder.camera = camera;
document.getElementById('stopRecord').disabled = false;
});
/*----------------recording process end----------------*/
}
else {
alert('getUserMedia() is not supported by your browser');
}
});
$('#stopRecord').on("click", function () {
this.disabled = true;
document.getElementById('startRecord').disabled = false;
recorder.stopRecording(stopRecordingCallback);
});
});
</script>
So I can't find the reason why the camera isn't released when the $('#stopRecord').on("click", function (){}) is called. Any help?
You can stop your stream's tracks, like this:
navigator.getUserMedia({audio: false, video: true},
function(stream) {
// can also use getAudioTracks() or getVideoTracks()
var track = stream.getTracks()[0]; // if only one media track
// ...
track.stop();
},
function(error){
console.log('getUserMedia() error', error);
});
So, in your case, I believe you can do something like this:
var track = recorder.camera.getTracks()[0]; // if only one media track
// ...
track.stop();

Categories

Resources