js get base64 of partial live recording - javascript

Am trying to get base64 string of current recorded voice to be sent to server for other processing.
My approach is to push base64 string into recordedChunks to be qued then send to server.
const recordedChunks = [];
var context = null;
var blob = null;
const handler = function(stream) {
if (window.URL) {
player.srcObject = stream;
} else {
//player.src = stream;
}
const context = new AudioContext();
const source = context.createMediaStreamSource(stream);
let bufferSize=1024;
const processor = context.createScriptProcessor(bufferSize, 1, 1);
source.connect(processor);
processor.connect(context.destination);
processor.onaudioprocess = function(e) {
//e.inputBuffer
// Do something with the data, e.g. convert it to mp3
// How to get base64 of what has been recorded without stopping the recorder
// recordedChunks.push('somePrerecodedBase64String');
};
};
navigator.mediaDevices.getUserMedia({ audio: true, video: false })
.then(handler);

Related

Append blobs to url and show

What I am trying to achieve is to have "stream" of blobs from my socket and append it to a video.
Currently, I have something like
const socket = io();
const videoGrid = document.getElementById("video-grid");
const video = document.createElement("video");
video.muted = true;
videoGrid.append(video);
let blobArray = [];
socket.on("view-stream-10", (data) => {
blobArray.push(
new Blob([new Uint8Array(data)], {
type: "video/x-matroska;codecs=avc1,opus",
})
);
let currentTime = video.currentTime;
let blob = new Blob(blobArray, { type: "video/x-matroska;codecs=avc1,opus" });
video.src = window.URL.createObjectURL(blob);
video.currentTime = currentTime;
video.play();
});
It works but there are some problems that video stops for a X ms at the point where new blob beeing created and url is changed and it's beeing so visible.
Is there any better way?

WebRTC, getDisplayMedia() does not capture sound from the remote stream

I have a web application of my own, which is based on the peerjs library (It is a video conference).
I'm trying to make a recording with 'MediaRecorder', but I'm facing a very unpleasant case.
The code for capturing my desktop stream is the following:
let chooseScreen = document.querySelector('.chooseScreenBtn')
chooseScreen.onclick = async () => {
let desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: true });
}
I then successfully apply my received desktopStream to videoElement in DOM:
const videoElement = doc.querySelector('.videoElement')
videoElement.srcObject = desktopStream
videoElement.muted = false;
videoElement.onloadedmetadata = ()=>{videoElement.play();}
For example, I get desktopStream on the page with an active conference where everyone hears and sees each other.
To check the video and audio in desktopStream I play some video on the video player on the desktop.
I can hear any audio from my desktop but audio from any participant cannot be heard.
Of course, when I put the desktopStream in MediaRecorder I get a video file with no sound from anyone except my desktop. Any ideas on how to solve it?
Chrome's MediaRecorder API can only output one track.
The createMediaStreamSource can take streams from desktop audio and microphone, by connecting both together into one object created by createMediaStreamDestination it gives you the ability to pipe this one stream into the MediaRecorder API.
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
// Create a couple of sources
const source1 = context.createMediaStreamSource(desktopStream);
const source2 = context.createMediaStreamSource(voiceStream);
const destination = context.createMediaStreamDestination();
const desktopGain = context.createGain();
const voiceGain = context.createGain();
desktopGain.gain.value = 0.7;
voiceGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
// Connect source2
source2.connect(voiceGain).connect(destination);
return destination.stream.getAudioTracks();
};
It is also possible to use two or more audio inputs + video input.
window.onload = () => {
const warningEl = document.getElementById('warning');
const videoElement = document.getElementById('videoElement');
const captureBtn = document.getElementById('captureBtn');
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const download = document.getElementById('download');
const audioToggle = document.getElementById('audioToggle');
const micAudioToggle = document.getElementById('micAudioToggle');
if('getDisplayMedia' in navigator.mediaDevices) warningEl.style.display = 'none';
let blobs;
let blob;
let rec;
let stream;
let voiceStream;
let desktopStream;
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
const destination = context.createMediaStreamDestination();
let hasDesktop = false;
let hasVoice = false;
if (desktopStream && desktopStream.getAudioTracks().length > 0) {
// If you don't want to share Audio from the desktop it should still work with just the voice.
const source1 = context.createMediaStreamSource(desktopStream);
const desktopGain = context.createGain();
desktopGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
hasDesktop = true;
}
if (voiceStream && voiceStream.getAudioTracks().length > 0) {
const source2 = context.createMediaStreamSource(voiceStream);
const voiceGain = context.createGain();
voiceGain.gain.value = 0.7;
source2.connect(voiceGain).connect(destination);
hasVoice = true;
}
return (hasDesktop || hasVoice) ? destination.stream.getAudioTracks() : [];
};
captureBtn.onclick = async () => {
download.style.display = 'none';
const audio = audioToggle.checked || false;
const mic = micAudioToggle.checked || false;
desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: audio });
if (mic === true) {
voiceStream = await navigator.mediaDevices.getUserMedia({ video: false, audio: mic });
}
const tracks = [
...desktopStream.getVideoTracks(),
...mergeAudioStreams(desktopStream, voiceStream)
];
console.log('Tracks to add to stream', tracks);
stream = new MediaStream(tracks);
console.log('Stream', stream)
videoElement.srcObject = stream;
videoElement.muted = true;
blobs = [];
rec = new MediaRecorder(stream, {mimeType: 'video/webm; codecs=vp8,opus'});
rec.ondataavailable = (e) => blobs.push(e.data);
rec.onstop = async () => {
blob = new Blob(blobs, {type: 'video/webm'});
let url = window.URL.createObjectURL(blob);
download.href = url;
download.download = 'test.webm';
download.style.display = 'block';
};
startBtn.disabled = false;
captureBtn.disabled = true;
audioToggle.disabled = true;
micAudioToggle.disabled = true;
};
startBtn.onclick = () => {
startBtn.disabled = true;
stopBtn.disabled = false;
rec.start();
};
stopBtn.onclick = () => {
captureBtn.disabled = false;
audioToggle.disabled = false;
micAudioToggle.disabled = false;
startBtn.disabled = true;
stopBtn.disabled = true;
rec.stop();
stream.getTracks().forEach(s=>s.stop())
videoElement.srcObject = null
stream = null;
};
};
Audio capture with getDisplayMedia is only fully supported with Chrome for Windows. Other platforms have a number of limitations:
there is no support for audio capture at all under Firefox or Safari;
on Chrome/Chromium for Linux and Mac OS, only the audio of a Chrome/Chromium tab can be captured, not the audio of a non-browser application window.

Get webcam video in HTML and send to flask server via socketio

I'm trying to capture webcam video in the client side and sent frames to the server to process it. I'm newbie in JS and I'm having some problems.
I tried to use OpenCV.js to get the data but I didn't understand how to get it, in Python we can make
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
and the frame is an 2d-array with the image, but how can I get each frame (as 2d array) to send using OpenCV.js?
I have this code on the client side:
<script type="text/javascript">
function onOpenCvReady() {
cv['onRuntimeInitialized'] = () => {
var socket = io('http://localhost:5000');
socket.on('connect', function () {
console.log("Connected...!", socket.connected)
});
const video = document.querySelector("#videoElement");
video.width = 500;
video.height = 400;
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
video.srcObject = stream;
video.play();
})
.catch(function (err0r) {
console.log(err0r)
console.log("Something went wrong!");
});
}
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let cap = new cv.VideoCapture(video);
const FPS = 15;
function processVideo() {
let begin = Date.now();
cap.read(src);
handle_socket(src['data']);
// schedule next one.
let delay = 1000 / FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
}
// schedule first one.
setTimeout(processVideo, 0);
function handle_socket(src) {
socket.emit('event', { info: 'I\'m connected!', data: src });
}
}
}
</script>
My solution was:
// Select HTML video element where the webcam data is
const video = document.querySelector("#videoElement");
// returns a frame encoded in base64
const getFrame = () => {
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
const data = canvas.toDataURL('image/jpeg');
return data;
}
// Send data over socket
function handle_socket(data, dst) {
socket.emit('event', data, function (res) {
if (res !== 0) {
results = res;
}
});
}
frame64 = getFrame()
handle_socket(frame64);

Realtime Transmit Recorded Audio using RTP on websocket - Javascript and WEBRTC

I am new to webRTC. I want to record audio from browser and then send raw audio into RTP over WebSocket to another machine that in my local network.
How Do I form this RTP packets ?
Is is possible to stream this packets to remote machine/server while recording ?
I am using RECORD RTC.
Here is my Code :
const player = document.getElementById('player');
const handleSuccess = function(stream) {
const context = new AudioContext();
var channels = 2;
const source = context.createMediaStreamSource(stream);
const processor = context.createScriptProcessor(1024, 2, 1);
var buffer = context.createBuffer(2, 22050, 44100);
var frameCount = context.sampleRate * 2.0;
var myArrayBuffer = context.createBuffer(2, frameCount, context.sampleRate);
console.log("My Array buffer " , myArrayBuffer );
source.connect(processor);
processor.connect(context.destination);
processor.onaudioprocess = function(e) {
// Do something with the data, e.g. convert it to WAV
console.log(e.inputBuffer);
};
if (window.URL) {
player.srcObject = stream;
console.log("Stream is " , stream.toString());
} else {
player.src = stream;
console.log("Stream is " , stream.toString());
}
};
navigator.mediaDevices.getUserMedia({ audio: true, video: false })
.then(handleSuccess);
navigator.mediaDevices.enumerateDevices().then((devices) => {
devices = devices.filter((d) => d.kind === 'audioinput');
console.log("Devices are " , devices);
});
The code allows me to stream audio , I just want to send the recorded stream, in RAW format using RTP protocol.

Make WAV file from raw data

I use this examples for capture data from device microphone, but I can't figure how to convert it to WAV file for send to my server.
<script>
var handleSuccess = function(stream) {
var context = new AudioContext();
var source = context.createMediaStreamSource(stream);
var processor = context.createScriptProcessor(1024, 1, 1);
source.connect(processor);
processor.connect(context.destination);
processor.onaudioprocess = function(e) {
// Convert this to WAV and send to server
console.log(e.inputBuffer);
};
};
navigator.mediaDevices.getUserMedia({ audio: true, video: false })
.then(handleSuccess);
</script>
Disclosure synth-js is written by me.
The following script will create a valid WAV file as a Blob, containing the first 5 seconds of audio:
<script src="https://unpkg.com/synth-js/dst/synth.min.js"></script>
<script>
var handleSuccess = function(stream) {
var context = new AudioContext();
var source = context.createMediaStreamSource(stream);
var processor = context.createScriptProcessor(1024, 1, 1);
var data = [];
source.connect(processor);
processor.connect(context.destination);
processor.onaudioprocess = function(e) {
data.push.apply(data, e.inputBuffer.getChannelData(0));
// cut off after 5 seconds
if (data.length >= context.sampleRate * 5) {
context.close();
var track = stream.getAudioTracks()[0];
track.stop();
// Convert this to WAV
var wav = new synth.WAV(1, context.sampleRate, 16, true, data);
var blob = wav.toBlob();
// do something with blob
var src = URL.createObjectURL(blob);
var audio = new Audio();
audio.controls = true;
document.body.appendChild(audio);
// play back audio
audio.addEventListener('canplaythrough', function() { audio.play(); });
audio.src = src;
}
};
};
navigator.mediaDevices.getUserMedia({ audio: true, video: false }).then(handleSuccess);
</script>
You can try this on JSFiddle since Stack Snippets do not allow access to the microphone.
The line var wav = new synth.WAV(1, context.sampleRate, 16, true, data); creates a new WAV object with 1 channel, a sample rate that matches the input, 16 bits per sample in the WAV binary, in little endian format (required), with the PCM data collected by the onaudioprocess events.

Categories

Resources