Audio Worklet - How to capture microphone streams - javascript

I am trying to capture live microphone streams from my desktop, When converting float 32 bits to Int16 Array all values are coming 0. Not sure what i have done wrong here. how can i capture the live microphone stream in audio worklet?
Below is javascript code:
try {
navigator.getUserMedia = navigator.getUserMedia
|| navigator.webkitGetUserMedia
|| navigator.mozGetUserMedia;
microphone = navigator.getUserMedia({
audio : true,
video : false
}, onMicrophoneGranted, onMicrophoneDenied);
} catch (e) {
alert(e)
}
async function onMicrophoneGranted(stream) {
console.log(stream)
context = new AudioContext();
source = context.createMediaStreamSource(stream);
await context.audioWorklet.addModule('/assets/js/buffer-detector.js');
// Create our custom node.
bufferDetectorNode= new AudioWorkletNode(context, 'buffer-detector');
bufferDetectorNode.port.onmessage = (event) => {
};
source.connect(bufferDetectorNode);
bufferDetectorNode.connect(context.destination);
//source.connect(context.destination);
}
function onMicrophoneDenied() {
console.log('denied')
}
Below is AudioWorklet
class BufferProcessor extends AudioWorkletProcessor {
process (inputs) {
console.log(inputs)
inputs.forEach(floatArray => {
floatArray.forEach(elem => {
const intArray = Int16Array.from(elem)
console.log(intArray)
})
})
//const input = inputs[0];
//this.append(input[0])
return true;
}
static get parameterDescriptors() {
return [{
name: 'Buffer Detector',
}]
}
constructor() {
super();
this.initBuffer()
}
}
registerProcessor('buffer-detector',BufferProcessor );

Related

track.stop is not turning the camera off anymore

I have a webpage where I want user to take a picture with his laptop/phone camera.
Once he clicks on a button a modal is shown and the following js will start the camera stream to take the picture:
function startStreaming() {
if (null != cameraStream) {
var track = cameraStream.getTracks()[0];
track.stop();
stream.load();
cameraStream = null;
}
//const audioSource = audioInputSelect.value;
const videoSource = videoSelect.value;
const constraints = {
//audio: {deviceId: audioSource ? {exact: audioSource} : undefined},
video: {
deviceId: videoSource ? {
exact: videoSource
} : undefined
}
};
navigator.mediaDevices.getUserMedia(constraints).then(gotStream).then(gotDevices).catch(handleError);
var mediaSupport = 'mediaDevices' in navigator;
if (mediaSupport && null == cameraStream) {
const videoSource = videoSelect.value;
const constraints = {
video: {
deviceId: videoSource ? {
exact: videoSource
} : undefined
}
};
navigator.mediaDevices.getUserMedia(constraints)
.then(function (mediaStream) {
cameraStream = mediaStream;
stream.srcObject = mediaStream;
stream.play();
})
.catch(handleError);
} else {
alert('Your browser does not support media devices.');
return;
}
}
This is triggered by
$('#photoStudio').on('show.bs.modal', function (event) {
navigator.mediaDevices.enumerateDevices().then(gotDevices).catch(handleError);
startStreaming();
});
Then when I close the modal I want to stop the streaming but the led indicator next to my camera is still on)
$('#photoStudio').on('hide.bs.modal', function (event) {
stopStreaming();
});
where stopStreaming() is:
function stopStreaming() {
if (null != cameraStream) {
var track = cameraStream.getTracks()[0];
track.stop();
stream.load();
cameraStream = null;
}
}
I don't get any kind of error and I cannot find a way to debug why the camera is still running. Am I missing anything in the stopStreaming function?
If any track has not been stopped then your camera will still be active. In your stopStreaming function you only stop the first track in the returned array.
If you instead iterate through the tracks you may catch ones you aren't currently:
function stopStreaming() {
if (null != cameraStream) {
var tracks = cameraStream.getTracks();
// stop all the tracks, not just the first
tracks.forEach((track) => {
track.stop();
});
stream.load();
cameraStream = null;
}
}
this.camera_stream.getTracks().forEach((track) => {
console.log(track);
track.stop();
**track.enabled = false**
});
video.load()
this.camera_stream = null

using device camera for capturing image in reactjs

can I access the device camera and take a photo in ReactJs? The goal is to create a component that allows the camera to take pictures with the click of a button. According to my studies, I should use mediaDevices, but I am looking for a sample code in ReactJs. Please provide me with a sample code, or if you have experience implementing this, please guide me.
I have prepared a sample code that can be used as a component. This code snippet is applicable to devices that also have two cameras. If you want to take a video instead of a photo, you can also enable the audio feature in the outputs.
import React from "react";
class App extends React.Component {
constructor() {
super();
this.cameraNumber = 0;
this.state = {
imageDataURL: null,
};
}
initializeMedia = async () => {
this.setState({ imageDataURL: null });
if (!("mediaDevices" in navigator)) {
navigator.mediaDevices = {};
}
if (!("getUserMedia" in navigator.mediaDevices)) {
navigator.mediaDevices.getUserMedia = function (constraints) {
var getUserMedia =
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (!getUserMedia) {
return Promise.reject(new Error("getUserMedia Not Implemented"));
}
return new Promise((resolve, reject) => {
getUserMedia.call(navigator, constraints, resolve, reject);
});
};
}
//Get the details of video inputs of the device
const videoInputs = await this.getListOfVideoInputs();
//The device has a camera
if (videoInputs.length) {
navigator.mediaDevices
.getUserMedia({
video: {
deviceId: {
exact: videoInputs[this.cameraNumber].deviceId,
},
},
})
.then((stream) => {
this.player.srcObject = stream;
})
.catch((error) => {
console.error(error);
});
} else {
alert("The device does not have a camera");
}
};
capturePicture = () => {
var canvas = document.createElement("canvas");
canvas.width = this.player.videoWidth;
canvas.height = this.player.videoHeight;
var contex = canvas.getContext("2d");
contex.drawImage(this.player, 0, 0, canvas.width, canvas.height);
this.player.srcObject.getVideoTracks().forEach((track) => {
track.stop();
});
console.log(canvas.toDataURL());
this.setState({ imageDataURL: canvas.toDataURL() });
};
switchCamera = async () => {
const listOfVideoInputs = await this.getListOfVideoInputs();
// The device has more than one camera
if (listOfVideoInputs.length > 1) {
if (this.player.srcObject) {
this.player.srcObject.getVideoTracks().forEach((track) => {
track.stop();
});
}
// switch to second camera
if (this.cameraNumber === 0) {
this.cameraNumber = 1;
}
// switch to first camera
else if (this.cameraNumber === 1) {
this.cameraNumber = 0;
}
// Restart based on camera input
this.initializeMedia();
} else if (listOfVideoInputs.length === 1) {
alert("The device has only one camera");
} else {
alert("The device does not have a camera");
}
};
getListOfVideoInputs = async () => {
// Get the details of audio and video output of the device
const enumerateDevices = await navigator.mediaDevices.enumerateDevices();
//Filter video outputs (for devices with multiple cameras)
return enumerateDevices.filter((device) => device.kind === "videoinput");
};
render() {
const playerORImage = Boolean(this.state.imageDataURL) ? (
<img src={this.state.imageDataURL} alt="cameraPic" />
) : (
<video
ref={(refrence) => {
this.player = refrence;
}}
autoPlay
></video>
);
return (
<div className="App">
{playerORImage}
<button onClick={this.initializeMedia}>Take Photo</button>
<button onClick={this.capturePicture}>Capture</button>
<button onClick={this.switchCamera}>Switch</button>
</div>
);
}
}
export default App;

javascript AudioNodes can't change sample-rate

i can't figure out, why this error come from. or what i missed out.
here my code:
function mediaDeviceInit(deviceId) {
// this for fast codding see w3c spec for audio
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
console.log('IpCodec : Get user permissions for Media Access.');
let audioConstraints = {};
// check for default value
if (deviceId) {
audioConstraints = {
audio: { deviceId: deviceId, echoCancellation: false, sampleRate: defaultSampleRate }, video: false
};
} else {
audioConstraints = { audio: { echoCancellation: false, sampleRate: defaultSampleRate }, video: false };
}
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia(audioConstraints)
.then(function (stream) {
//console.log(navigator.mediaDevices.getSupportedConstraints());
userMediaSuccess(stream);
})
.catch(function (error) {
userMediaError(error);
});
} else {
console.log('IpCodec : Browser Unsuported to getUserMedia.')
}
// enumerate all input audio device
function enumAudioInput() {
// somecode
}
// callback on success
function userMediaSuccess(stream) {
let audioSrc = audioMixer.audioContext.createMediaStreamSource(stream); // --> error here
// some init code
console.log('IpCodec : Media permission granted by user.');
if (!deviceId) {
enumAudioInput();
}
}
// callback on error
function userMediaError(error) {
console.log('IpCodec' + error);
}
}
with error like :
Connecting AudioNodes from AudioContexts with different sample-rate is currently not supported.
and this part audioMixer class who own AudioContext :
class AudioMixer {
constructor(type, sRate, latency) {
this.audioContext;
// parse all browser compability
window.AudioContext = window.AudioContext || window.webkitAudioContext || window.mozAudioContext;
console.log('IpCodec : Initialize audio mixer success.');
if (window.AudioContext) {
this.audioContext = new window.AudioContext({ sampleRate: sRate, latencyHint: latency });
//console.log(this.audioContext);
} else {}
}
}

Record audio one at a time and stop to save in ReactJS

I am using getUserMedia() API to record audio when mic is off I am calling startRecord method on it's click
{this.props.events.map((event) => (
<div key={event.event_name}>
<div> {this.state.micStates[event.event_name]?
<span onClick={()=>this.stopRecord(event.event_name)}> <MicOn />
</span>
:<span onClick={()=>this.startRecord(event.event_name)}><MicOff />
</span>}
</div>
<li style={{color:'pink',}}>{event.date_time}</li>
</div> ))
}
Two methods:
startRecord,
StopRecord
startRecord: It will start the recording and store in chunks
stopRecord: It will stop the mediaRecorder and save the file in formData
constructor(props) {
super(props);
this.state = {
mediaRecorder : [],
audioURL : [],
micStates: {},
}
this.startRecord = this.startRecord.bind(this);
this.stopRecord = this.stopRecord.bind(this);
}
startRecord = event => {
this.setState(current => ({
micStates: { ...current.micStates, [event]:
!current.micStates[event] }
}));
let constraints = { audio: true }
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) {
audioContext = new AudioContext();
gumStream = stream;
input = audioContext.createMediaStreamSource(stream);
this.mediaRecorder = new MediaRecorder(input,{numChannels:1})
this.chunks = [];
// listen for data from media recorder
rec.ondataavailable = e => {
if (e.data && e.data.size > 0) {
this.chunks.push(e.data);
}
};
this.mediaRecorder.start(10);
console.log("Recording started");
}).catch(function(err) {
//enable the record button if getUserMedia() fails
});
}
stopRecord = event => {
this.setState(current => ({
micStates: { ...current.micStates, [event]:
!current.micStates[event] }
}));
this.mediaRecorder.stop();
var blob = new Blob(this.state.chunks, {type: 'audio/*'});
this.setState({ audioURL: window.URL.createObjectURL(blob)})
}
First Problem: TypeError: Cannot read property 'stop' of undefined
at this line this.mediaRecorder.stop();
Second Problem: I can switch on multiple audio means that without calling method stopRecord I can call startRecord method of another event

Using Google NodeJS speech-to-text with audio from client microphone

I am trying to use google's speech-to-text nodeJS library (https://github.com/googleapis/nodejs-speech) and stream audio from the client's microphone input with navigator.mediaDevices.getUserMedia.
I was able to stream microphone with sox to nodejs-speech streamingRecognize and it was working.
I was also able to stream audio from the client side and make and pipe it to the speaker on the server side. However, when I try to pipe it to streamingRecognize it doesn't recognize any word.
Server-side
var io = require("socket.io")(server);
const speech = require('#google-cloud/speech');
const request = {
config: {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
},
interimResults: true,
//singleUtterance: false
};
let recognizeStream = speechClient
.streamingRecognize(request)
.on('data', data => {
console.log(data);
process.stdout.write(
data.results[0] && data.results[0].alternatives[0] ?
`Transcription: ${data.results[0].alternatives[0].transcript}\n` :
`\n\nReached transcription time limit, press Ctrl+C\n`
)
});
io.on("connection", function (client) {
client.on("userSpeaking", function (data) {
if (recognizeStream !== null) {
recognizeStream.write(new Uint8Array(data));
}
});
});
Client-side
function convertFloat32ToInt16(buffer) {
let l = buffer.length;
let buf = new Int16Array(l);
while (l--) {
buf[l] = Math.min(1, buffer[l]) * 0x7FFF;
}
return buf.buffer;
}
AudioContext = window.AudioContext || window.webkitAudioContext;
context = new AudioContext();
processor = context.createScriptProcessor(bufferSize, 1, 1);
processor.connect(context.destination);
context.resume();
function microphoneProcess(e) {
var left = e.inputBuffer.getChannelData(0);
var left16 = convertFloat32ToInt16(left);
socket.emit('userSpeaking', left16);
}
navigator.mediaDevices
.getUserMedia({
video: false,
audio: true
}, {
type: 'audio',
sampleRate: 16000,
desiredSampleRate: 16000,
audioBitsPerSecond: 16000,
mimeType: 'audio/webm;codecs=opus'
})
.then((stream) => {
globalStream = stream;
input = context.createMediaStreamSource(stream);
input.connect(processor);
processor.onaudioprocess = function (e) {
microphoneProcess(e);
};
})
.catch(console.error);

Categories

Resources