Using a browser stream on phones with more than one Camera - javascript

I currently have a React App that is using QuaggaJS to create a barcode scanner component. The scanner works fine with phone cameras that only possess one camera. When dealing with newer phones that possess multiple cameras the scanner does not work because there is no way of focusing the camera so it continuously changes between all cameras.
import React, { useEffect, useState } from "react";
import { useNavigate } from "react-router-dom";
import Quagga from "#ericblade/quagga2";
import adapter from "webrtc-adapter";
import "./BarcodeScanner.css";
const BarcodeScanner = (props) => {
const navigate = useNavigate();
useEffect(() => {
startQuagga();
}, []);
if (
!navigator.mediaDevices &&
!(typeof navigator.mediaDevices.getUserMedia === "function")
) {
console.log("getUserMedia function is not available in this browser.");
props.onError("getUserMedia function is not available in this browser");
return;
}
function startQuagga() {
try{
Quagga.init(
{
inputStream: {
name: "Live",
type: "LiveStream",
target: document.querySelector("#interactive"),
constraints: {
width: 640,
height: 480,
facingMode: "environment",
},
},
locate: true,
decoder: {
readers: ["upc_reader", "code_128_reader"],
},
},
function (err) {
if (err != null) {
console.log(err);
props.onError(err);
stopScanner();
return;
}
console.log("Initialization finished. Ready to start");
Quagga.start();
}
);
}catch {
props.onError("Failed to open camera");
}
}
Quagga.onDetected((data) => {
let countDecodedCodes = 0;
let err = 0;
for (let id in data.codeResult.decodedCodes) {
let error = data.codeResult.decodedCodes[id];
if (error.error != undefined) {
countDecodedCodes++;
err += parseFloat(error.error);
}
}
if (err / countDecodedCodes < 0.9) {
props.onDetected(data.codeResult.code);
Quagga.stop();
}
});
const stopScanner = () => {
console.log("stopping Quagga")
Quagga.stop();
};
useEffect(() => {
if (props.showBottomSheet === "false") {
stopScanner();
}
}, [props.showBottomSheet]);
return <div className="barcode-scanner viewport" id="interactive"></div>;
};
export default BarcodeScanner;```

I found that by changing the resolution to 1281 (Changing the height in the constraints) Quagga will automatically choose the high resolution camera. Adding this with a box overlay to guide the user seems to have fixed the issue.

Related

How to use Azure Custom Commands API with react native (expo)

I am trying to get Azure Speech Services Custom Commands to work with react native. I am unable to find examples in javascript that use Custom Commands. Has anyone created an application that I can refer to? I found the following resources but they do not show how to use Custom Commands specifically.
https://github.com/microsoft/cognitive-services-sdk-react-native-example/tree/main/ExampleTSProject
This one has JS code but does not show how to use microphone to send audio to the API.
https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/how-to-custom-commands-setup-speech-sdk
This one has example in C# for which I wrote JS equivalent code which does not work.
This is my code right now, I am able to record audio using expo-av but don't know how to convert it to audio stream and pass it to SDK.
import 'react-native-get-random-values';
import { useState, useEffect } from 'react';
import { Audio } from 'expo-av';
import { StyleSheet, Button, View } from 'react-native';
import * as SpeechSDK from 'microsoft-cognitiveservices-speech-sdk';
const APPLICATION_ID = '6e3b5880-918e-11ed-a04e-cf28b0773cf7';
const SPEECH_KEY = '51ecddfd91564074b3cb2cee2f9e4f52';
const SPEECH_RESOURCE_REGION = 'centralindia'
export default function App() {
const [recording, setRecording] = useState();
const [dialogueServiceConnector, setDialogueServiceConnector] = useState();
const [lastRecordedSound, setLastRecordedSound] = useState();
function initializeDialogueServiceConnector() {
const commandsConfig = new SpeechSDK.CustomCommandsConfig.fromSubscription(APPLICATION_ID, SPEECH_KEY, SPEECH_RESOURCE_REGION);
const connector = new SpeechSDK.DialogServiceConnector(commandsConfig);
connector.activityReceived = (sender, arg) => {
console.log(`Activity received, activity=${arg.activity}`);
let buff = [];
arg.audioStream.read(buff);
console.log(`Buffer: ${buff}`);
}
connector.canceled = (sender, arg) => {
console.log(`Cancelled, reason=${arg.reason}`);
if (arg.reason === SpeechSDK.CancellationReason.Error) {
console.log(`Error: code=${arg.errorCode}, details=${arg.errorDetails}`);
}
}
connector.recognizing = (sender, arg) => {
console.log(`Recognizing! in-progress text=${arg.result.text}`);
}
connector.recognized = (sender, arg) => {
console.log(`Final speech-to-text result: ${arg.result.text}`);
}
connector.sessionStarted = (sender, arg) => {
console.log(`Now listening! session started, id=${arg.sessionId}`);
}
connector.sessionStopped = (sender, arg) => {
console.log(`Listening complete. session ended, id=${arg.sessionId}`);
}
setDialogueServiceConnector(connector);
console.log("initialized");
}
useEffect(() => {
initializeDialogueServiceConnector();
}, [])
async function playSound() {
if (lastRecordedSound) {
await lastRecordedSound.playAsync()
}
}
async function startRecording() {
/*
console.log(dialogueServiceConnector)
dialogueServiceConnector.listenOnceAsync(() => {
console.log("Recording complete")
});
setRecording(true);
*/
try {
await Audio.requestPermissionsAsync();
await Audio.setAudioModeAsync({
allowsRecordingIOS: true,
playsInSilentModeIOS: true,
});
const { recording } = await Audio.Recording.createAsync(
Audio.RecordingOptionsPresets.HIGH_QUALITY
);
setRecording(recording);
} catch (err) {
console.error('Failed to start recording', err);
}
}
async function stopRecording() {
/*
setRecording(false);
*/
await recording.stopAndUnloadAsync();
await Audio.setAudioModeAsync({
allowsRecordingIOS: false,
playsInSilentModeIOS: true,
});
const { sound } = await recording.createNewLoadedSoundAsync();
setLastRecordedSound(sound)
}
return (
<View style={styles.container}>
<Button
title={recording ? 'Stop Recording' : 'Start Recording'}
onPress={recording ? stopRecording : startRecording}
/>
<Button
title='Play recording'
onPress={playSound}
/>
</View>
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: '#fff',
alignItems: 'center',
justifyContent: 'center',
},
});

React Barcode Scanner returning wrong results

I am creating a web app that is able to scan barcodes to pull up inventory of an item. I am using the QuaggaJS API to create this functionality. The problem is every time I scan the same item a different result is returned. I have tried fixing the lighting in my area so I don't think that is the issue.
import { useNavigate } from "react-router-dom";
import Quagga from "quagga";
const BarcodeScanner = () => {
const navigate = useNavigate()
const startScanner = () => {
Quagga.init(
{
inputStream: {
name: "Live",
type: "LiveStream",
constraints: {
facingMode: "environment",
},
locate: true,
},
decoder: {
readers: ["ean_reader"], //4171287788700
},
debug: {
drawBoundingBox: true,
showFrequency: true,
drawScanline: true,
showPattern: true
},
},
function (err) {
if (err) {
console.log(err);
return;
}
console.log("Initialization finished. Ready to start");
Quagga.start();
}
);
Quagga.onDetected((data) => {
console.log(data.codeResult.code);
Quagga.stop();
navigate('/home');
});
};
const stopScanner = () => {
Quagga.stop();
};
useEffect(() => {
startScanner();
return stopScanner;
}, []);
return <div id="interactive" className="viewport"></div>;
};
export default BarcodeScanner;

Electron send data to react with preload.js

I am trying achieve something where I can directly send data to react and where in react whenever it receives it does something.
So my electron.js is in below format.
// ./public/electron.js
const path = require("path");
const { app, BrowserWindow, ipcMain} = require("electron");
const isDev = false; //require("electron-is-dev"); //false
let splash = null;
let win = null;
let etmf_obj = null;
function createWindow() {
// Create the browser window.
win = new BrowserWindow({
width: 1920,
height: 1080,
webPreferences: {
nodeIntegration: true,
contextIsolation: true,
enableRemoteModule: true,
preload: path.join(__dirname, "./preloadDist.js"),
},
});
// win.loadFile("index.html");
win.loadURL(
isDev
? "http://localhost:3000"
: `file://${path.join(__dirname, "../build/index.html")}`
);
// Open the DevTools.
if (!isDev) {
win.webContents.openDevTools({ mode: "undocked" });
}
}
app.whenReady().then(createWindow);
app.on("window-all-closed", () => {
if (process.platform !== "darwin") {
app.quit();
}
});
app.on("activate", () => {
if (BrowserWindow.getAllWindows().length === 0) {
createWindow();
}
});
function restartApp() {
console.log("restarting app..");
app.exit();
app.relaunch();
}
//IPC SECTION
ipcMain.handle("notify", (event, args) => {
console.log("from react I got" + args);
console.log("hello from electron via react"); //this one works as expected
});
ipcMain.on("splashDone", function () {
console.log("splash done");
});
ipcMain.on("relaunchApp", function () {
restartApp();
});
ipcMain.on("closeAll", function () {
app.quit();
});
ipcMain.on("callAnim", function (args) {
win.webContents.send("showAnimation", args);// trying to send data directly to
//react here but don't know whether its right way or not
});
and my preload file preloadDist.js is in below format
const { ipcRenderer, contextBridge } = require("electron");
contextBridge.exposeInMainWorld("electron", {
notificationApi: {
sendNotification(message) {
ipcRenderer.invoke("notify", message);
},
},
batteryApi: {},
filesApi: {},
splashStatus: {
splashDone() {
ipcRenderer.invoke("splashDone")
}
},
});
and react to call function of or to send data I am do this
for example to send notification data :
<button
className="speak_border"
onMouseEnter={() => setHovermic(true)}
onMouseLeave={() => setHovermic(false)}
onClick={() => {
soundwave();
window.electron.notificationApi.sendNotification(
"From react Hi!");
}}
>
and to receive data I am not able to figure out but as I am doing win.webContents.send("showListenAnimation", args);
I am not able to understand how this will be received at the react end
what I tried is:
useEffect(() => {
try {
window.electron.on(
"showAnimation",
function (event, data) {
if (data) {
setAnim(true);
}
if (!data) {
setAnim(false);
}
}
);
} catch (e) {
console.log("issue with on getting data");
}
});
But this way I am getting error and not able to figure out the way to receive, but sending data from react to electron is working perfectly fine!
Please guide on this and how to achieve with about preload.js and electron.js
format.

Electron JS - Get path of chosen directory

Im fairly new in the programming world. I'm making an app where it should be possible to choose a directory, where to save some generated files.
I'm working with the ipc, and it seems like some of the code works, but it looks like i can't get the mainIpc to send the path back to the renderer.
I hope the hive can help, thanks in advance!
Renderer:
const electron = require("electron");
const ipc = require("electron").ipcRenderer;
createBtn.addEventListener("click", (event) => {
ipc.send("path:get");
});
ipc.on("path:selected", function (path) {
console.log("Full path: ", path);
});
Main
const ipc = require("electron").ipcMain;
const os = require("os");
const { dialog } = require("electron");
ipc.on("path:get", function (event) {
if (os.platform() === "linux" || os.platform() === "win32") {
dialog.showOpenDialog(
{
properties: ["openFile"],
},
function (files) {
if (files) win.webContents.send("path:selected", files[0]);
console.log("SENT");
}
);
} else {
dialog.showOpenDialog(
{
properties: ["openFile", "openDirectory"],
},
function (files) {
if (files) win.webContents.send("path:selected", files[0]);
console.log("SENT");
}
);
}
});
Edit: Adding the setup
Setup
const { app, BrowserWindow } = require("electron");
const ipc = require("electron").ipcMain;
const os = require("os");
const { dialog } = require("electron");
try {
require("electron-reloader")(module);
} catch (_) {}
let win;
function createWindow() {
win = new BrowserWindow({
width: 800,
height: 600,
webPreferences: {
nodeIntegration: true,
},
});
win.loadFile("./src/index.html");
}
app.whenReady().then(createWindow);
app.on("window-all-closed", () => {
if (process.platform !== "darwin") {
app.quit();
}
});
app.on("activate", () => {
if (BrowserWindow.getAllWindows().length === 0) {
createWindow();
}
});
I figured it out with some kind help.
So if anyone needs the same procedure i'll try to explain what i got to.
So, in the main, i had to add a then, because the showDialog returns a promise
if (os.platform() === "linux" || os.platform() === "win32") {
dialog
.showOpenDialog({
properties: ["openFile", "openDirectory"],
})
.then((result) => {
if (result) win.webContents.send("path:selected", result.filePaths);
})
.catch((err) => {
console.log(err);
});
} else {
dialog
.showOpenDialog({
properties: ["openFile", "openDirectory"],
})
.then((result) => {
console.log(result.filePaths);
if (result) win.webContents.send("path:selected", result.filePaths);
})
.catch((err) => {
console.log(err);
});
}
});
This sends back an array with the path at [0]
in the renderer i forgot to add the event as an parameter.
ipc.on("path:selected", (event, path) => {
chosenPath = path;
console.log("Full path: ", chosenPath[0]);
});

using device camera for capturing image in reactjs

can I access the device camera and take a photo in ReactJs? The goal is to create a component that allows the camera to take pictures with the click of a button. According to my studies, I should use mediaDevices, but I am looking for a sample code in ReactJs. Please provide me with a sample code, or if you have experience implementing this, please guide me.
I have prepared a sample code that can be used as a component. This code snippet is applicable to devices that also have two cameras. If you want to take a video instead of a photo, you can also enable the audio feature in the outputs.
import React from "react";
class App extends React.Component {
constructor() {
super();
this.cameraNumber = 0;
this.state = {
imageDataURL: null,
};
}
initializeMedia = async () => {
this.setState({ imageDataURL: null });
if (!("mediaDevices" in navigator)) {
navigator.mediaDevices = {};
}
if (!("getUserMedia" in navigator.mediaDevices)) {
navigator.mediaDevices.getUserMedia = function (constraints) {
var getUserMedia =
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (!getUserMedia) {
return Promise.reject(new Error("getUserMedia Not Implemented"));
}
return new Promise((resolve, reject) => {
getUserMedia.call(navigator, constraints, resolve, reject);
});
};
}
//Get the details of video inputs of the device
const videoInputs = await this.getListOfVideoInputs();
//The device has a camera
if (videoInputs.length) {
navigator.mediaDevices
.getUserMedia({
video: {
deviceId: {
exact: videoInputs[this.cameraNumber].deviceId,
},
},
})
.then((stream) => {
this.player.srcObject = stream;
})
.catch((error) => {
console.error(error);
});
} else {
alert("The device does not have a camera");
}
};
capturePicture = () => {
var canvas = document.createElement("canvas");
canvas.width = this.player.videoWidth;
canvas.height = this.player.videoHeight;
var contex = canvas.getContext("2d");
contex.drawImage(this.player, 0, 0, canvas.width, canvas.height);
this.player.srcObject.getVideoTracks().forEach((track) => {
track.stop();
});
console.log(canvas.toDataURL());
this.setState({ imageDataURL: canvas.toDataURL() });
};
switchCamera = async () => {
const listOfVideoInputs = await this.getListOfVideoInputs();
// The device has more than one camera
if (listOfVideoInputs.length > 1) {
if (this.player.srcObject) {
this.player.srcObject.getVideoTracks().forEach((track) => {
track.stop();
});
}
// switch to second camera
if (this.cameraNumber === 0) {
this.cameraNumber = 1;
}
// switch to first camera
else if (this.cameraNumber === 1) {
this.cameraNumber = 0;
}
// Restart based on camera input
this.initializeMedia();
} else if (listOfVideoInputs.length === 1) {
alert("The device has only one camera");
} else {
alert("The device does not have a camera");
}
};
getListOfVideoInputs = async () => {
// Get the details of audio and video output of the device
const enumerateDevices = await navigator.mediaDevices.enumerateDevices();
//Filter video outputs (for devices with multiple cameras)
return enumerateDevices.filter((device) => device.kind === "videoinput");
};
render() {
const playerORImage = Boolean(this.state.imageDataURL) ? (
<img src={this.state.imageDataURL} alt="cameraPic" />
) : (
<video
ref={(refrence) => {
this.player = refrence;
}}
autoPlay
></video>
);
return (
<div className="App">
{playerORImage}
<button onClick={this.initializeMedia}>Take Photo</button>
<button onClick={this.capturePicture}>Capture</button>
<button onClick={this.switchCamera}>Switch</button>
</div>
);
}
}
export default App;

Categories

Resources