TLDR
Imagine I have one video and one image. I want to create another video that overlays the image (e.g. watermark) at the center for 2 seconds in the beginning of the video and export it as the final video. I need to do this on the client-side only. Is it possible to use MediaRecorder + Canvas or should I resort to using ffmpeg.js?
Context
I am making a browser-based video editor where the user can upload videos and images and combine them. So far, I implemented this by embedding the video and images inside a canvas element appropriately. The data representation looks somewhat like this:
video: {
url: 'https://archive.com/video.mp4',
duration: 34,
},
images: [{
url: 'https://archive.com/img1.jpg',
start_time: 0,
end_time: 2,
top: 30,
left: 20,
width: 50,
height: 50,
}]
Attempts
I play the video and show/hide images in the canvas. Then, I can use the MediaRecorder to capture the canvas' stream and export it as a data blob at the end. The final output is as expected, but the problem with this approach is I need to play the video from the beginning to the end for me to capture the stream from the canvas. If the video is 60 seconds, exporting it also takes 60 seconds.
function record(canvas) {
return new Promise(function (res, rej) {
const stream = canvas.captureStream();
const mediaRecorder = new MediaRecorder(stream);
const recordedData = [];
// Register recorder events
mediaRecorder.ondataavailable = function (event) {
recordedData.push(event.data);
};
mediaRecorder.onstop = function (event) {
var blob = new Blob(recordedData, {
type: "video/webm",
});
var url = URL.createObjectURL(blob);
res(url);
};
// Start the video and start recording
videoRef.current.currentTime = 0;
videoRef.current.addEventListener(
"play",
(e) => {
mediaRecorder.start();
},
{
once: true,
}
);
videoRef.current.addEventListener(
"ended",
(e) => {
mediaRecorder.stop();
},
{
once: true,
}
);
videoRef.current.play();
});
}
I can use ffmpeg.js to encode the video. I haven't tried this method yet as I will have to convert my image representation into ffmpeg args (I wonder how much work that is).
Related
I am a student studying programming.
I am not good at English, so I wrote it using a translator.
I'm studying the mediapipe.
https://google.github.io/mediapipe/solutions/face_mesh
Do you know how to use local video instead of webcam?
let videoElement = document.querySelector(".input_video")
//#mediapipe/camera_utils/camera_utils.js"
const camera = new Camera(videoElement, {
onFrame: async () => {
await holistic.send({ image: videoElement });
},
width: 640,
height: 480,
});
camera.start();
This is the code to get the webcam.
I think I need to change this code but I don't know how to work it.
so I tried to find out about '#mediapipe/camera_utils/camera_utils.js', I couldn't find any data.
And I found using the local video in the codepen demo.
https://codepen.io/mediapipe/details/KKgVaPJ
But I don't know which part of the code to use.
Please teach me the way.
Rather than create a new Camera, you need to send the frames using requestAnimationFrame(). However as the send needs to be in an async function
the requestAnimationFrame needs to be within a Promise.
You have the standard mediapipe setup
let videoElement = document.querySelector(".input_video");
const config = {
locateFile: (file) => {
return 'https://cdn.jsdelivr.net/npm/#mediapipe/face_mesh#' +
`${mpFaceMesh.VERSION}/${file}`;
}
};
const solutionOptions = {
selfieMode: false,
enableFaceGeometry: false,
maxNumFaces: 1,
refineLandmarks: true, //false,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
};
const faceMesh = new mpFaceMesh.FaceMesh(config);
faceMesh.setOptions(solutionOptions);
faceMesh.onResults(onResults);
but rather than the new Camera() or SourcePicker() you need the an animation frame loop
async function onFrame() {
if (!videoElement.paused && !videoElement.ended) {
await faceMesh.send({
image: videoElement
});
// https://stackoverflow.com/questions/65144038/how-to-use-requestanimationframe-with-promise
await new Promise(requestAnimationFrame);
onFrame();
} else
setTimeout(onFrame, 500);
}
load the video
// must be same domain otherwise it will taint the canvas!
videoElement.src = "./mylocalvideo.mp4";
videoElement.onloadeddata = (evt) => {
let video = evt.target;
canvasElement.width = video.videoWidth;
canvasElement.height = video.videoHeight;
videoElement.play();
onFrame();
}
UPDATE
I found an example of high pass filter using webAudio here. Implemented it like this in my code.
function modifyGain(audioTrack, gainValue){
var ctx = new AudioContext();
var src = ctx.createMediaStreamTrackSource(audioTrack);
var dst = ctx.createMediaStreamDestination();
// create a filter node
var filterNode = ctx.createBiquadFilter();
filterNode.type = 'highpass';
filterNode.frequency.value = 0;
// cutoff frequency: for highpass, audio is attenuated below this frequency
var gainNode = ctx.createGain();
gainNode.gain.value = 1;
src.connect(filterNode);
//filterNode.connect(dst);
filterNode.connect(gainNode);
gainNode.connect(dst);
//alert(ctx.dst)
return dst.stream.getTracks()[0];
}
try {
webcamStream = await navigator.mediaDevices.getUserMedia(mediaConstraints);
document.getElementById("local_video").srcObject = webcamStream;
} catch(err) {
handleGetUserMediaError(err);
return;
}
// Add the tracks from the stream to the RTCPeerConnection
try {
webcamStream.getTracks().forEach(
function (track){
if(track.kind === 'audio'){
track = modifyGain(track, 0.5) //process only audio tracks
}
myPeerConnection.addTrack(track, webcamStream);
});
showLocalVideoContainer();
} catch(err) {
handleGetUserMediaError(err);
}
Before I can actually test if low sounds are silenced by highpass filter I am facing an issue. Using modifyGain mutes audio completely after few seconds. I tried 0, 1500 etc. It goes silence after few seconds.
Original POST
I am using the below constraints to try to suppress noise.
var mediaConstraints = {
audio: {advanced: [
{
echoCancellation: {exact: true}
},
{
autoGainControl: {exact: true}
},
{
noiseSuppression: {exact: true}
},
{
highpassFilter: {exact: true}
}
]
},
video: {
facingMode: "user",
width: { min: 160, ideal: 320, max: 640 },
height: { min: 120, ideal: 240, max: 480 },
}
};
But I want to silence some higher frequencies too. Even if I slowly move my phone on some surface the mic catches the noise and sends it to other peer. It catches even my breathing sound and send that to other side when I place it near my check(like phone call). I want some control over the frequencies which can allow me to select a threshold below which sounds will be not picked by mic.
I have tried searching but I am not sure what exactly will work for my case and how should I do it. I think following are my choices, but I maybe wrong.
Change SDP( codec params?).
Use webAudio and process the mic input before passing it on to other peer.
Use webAudio and process the received audio from other peer and direct it to output device(speaker).
Any help will be appreciated.
Thanks
You can process the audio from your microphone by piping it through web audio & using the BiquadFilter:
const stream = await navigator.mediaDevices.getUserMedia({audio: true});
const ctx = new AudioContext();
const src = ctx.createMediaStreamSource(stream);
const dst = ctx.createMediaStreamDestination();
const biquad = ctx.createBiquadFilter();
[src, biquad, dst].reduce((a, b) => a && a.connect(b));
audio.srcObject = dst.stream;
biquad.type = "highpass";
biquad.gain = 25;
biquad.frequency.value = 1000;
rangeButton.oninput = () => biquad.frequency.value = rangeButton.value;
Here's a working fiddle.
It goes silent after few seconds.
Could be a garbage collection issue. If you're experiencing this, try assigning your stream to a global JS var.
I am currently working on a project and need to be able to make a recording of my screen and save it locally to my computer.
The recording is being saved as a webm, but everyone of them has a really bad framerate of usually around 10-15 fps. Is there a way to increase the framerate for recording?
I am able to increase the quality of the recording by playing around with the MediaRecorder options and codecs, but this doesn't seem to affect the framerate I am getting at all.
Here is the code I am using to make my recording:
const options = {
mimeType: 'video/webm; codecs="vp9.00.41.8.00.01"',
videoBitsPerSecond: 800 * Mbps,
videoMaximizeFrameRate: true,
};
mediaRecorder = new MediaRecorder(stream, options);
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.onstop = handleStop;
startBtn.onclick = e => {
mediaRecorder.start();
startBtn.innerHTML = 'Recording';
}
stopBtn.onclick = e => {
mediaRecorder.stop();
startBtn.innerHTML = 'Start';
}
function handleDataAvailable(e) {
recordedChunks.push(e.data);
}
async function handleStop() {
const blob = new Blob(recordedChunks, {
mimeType: 'video/webm'
});
const buffer = Buffer.from(await blob.arrayBuffer());
const { filePath } = await dialog.showSaveDialog({
buttonLabel: 'Save video',
defaultPath: `vid-${Date.now()}.webm`
});
console.log(filePath);
if (filePath) {
writeFile(filePath, buffer, () => console.log('video saved successfully'));
}
}
I have looked through the MDN documentation and haven't found anything about it. I also tried using different codecs with different parameters, but the results are always the same.
The framerate you're getting is typical for any standard screen capture.
The only way to go faster is to utilize the GPU's specific capability to capture and encode. This is out of scope for the web APIs.
Here's my code to capture an image from a Canvas playing video:
let drawImage = function(time) {
prevCtx.drawImage(videoPlayer, 0, 0, w, h);
requestAnimationFrame(drawImage);
}
requestAnimationFrame(drawImage);
let currIndex = 0;
setInterval(function () {
if(currIndex === 30) {
currIndex = 0;
console.log("Finishing video...");
videoWorker.postMessage({action : "finish"});
} else {
console.log("Adding frame...");
// w/o this `toDataURL` this loop runs at 30 cycle / second
// so that means, this is the hot-spot and needs optimization:
const base64img = preview.toDataURL(mimeType, 0.9);
videoWorker.postMessage({ action: "addFrame", data: base64img});
currIndex++;
}
}, 1000 / 30)
The goal is at each 30 frames (which should be at 1 second) it would trigger to transcode the frames added.
The problem here is that the preview.toDataURL(mimeType, 0.9); adds at least 1 second, without it the log shows the currIndex === 30 gets triggered every second. What would be the best approach to be able to capture at least about 30 FPS image. What is the fastest way to capture image from a HTML Canvas that it will not be the bottleneck of real-time video transcoding process?
You should probably revise your project, because saving the whole video as still images will blow out the memory of most devices in no time. Instead have a look at MediaStreams and MediaRecorder APIs, which are able to do the transcoding and compression in real time. You can request a MediaStream from a canvas through its captureStream() method.
The fastest is probably to send an ImageBitmap to your Worker thread, these are really fast to generate from a canvas (simple copy of the pixel buffer), and can be transferred to your worker script, from where you should be able to draw it on a an OffscreenCanvas.
Main drawback: it's currently only supported in latest Chrome and Firefox (through webgl), and this can't be polyfilled...
main.js
else {
console.log("Adding frame...");
const bitmap = await createImageBitmap(preview);
videoWorker.postMessage({ action: "addFrame", data: bitmap }, [bitmap]);
currIndex++;
}
worker.js
const canvas = new OffscreenCanvas(width,height);
const ctx = canvas.getContext('2d'); // Chrome only
onmessage = async (evt) => {
// ...
ctx.drawImage( evt.data.data, 0, 0 );
const image = await canvas.convertToBlob();
storeImage(image);
};
An other option is to transfer an ImageData data. Not as fast as an ImageBitmap, it still has the advantage of not stopping your main thread with the compression part and since it can be transferred, the message to the Worker isn't computation heavy either.
If you go this road, you may want to compress the data using something like pako (which uses the compression algorithm used by PNG images) from your Worker thread.
main.js
else {
console.log("Adding frame...");
const img_data = prevCtx.getImageData(0,0,width,height);
videoWorker.postMessage({ action: "addFrame", data: img_data }, [img_data.data]);
currIndex++;
}
worker.js
onmessage = (evt) => {
// ...
const image = pako.deflate(evt.data.data); // compress to store
storeImage(image);
};
So I'm trying to record some audio on google chrome through an extension.
I'm doing it this way since I can't get the audio stream by other means and windows doesn't let me record an individual process without a virtual audio cable.
I noticed that all the recordings have micro stutters that align with my computer's CPU usage.
The stutters only exist when capturing the audio is enabled. It doesn't happen when its playing back normally on the web page.
In order to exacerbate the micro stutters into big noticeable stutters. I maxed out my CPU with some while loops threads doing float calculations. The moment I do that, you can hear the stutters.
You can hear the issue clearly here:
https://cdn.discordapp.com/attachments/462271295774654484/692816020011745320/Ruggero_Leoncavallo_-_Pagliacci_Act_1_Vesti_la_giubba_-_Live.mp3
Any attempt to set the audio constraint with an exact value besides 0.01 fails in the promise.
I tried using an AudioContext hoping it could maybe back log the buffer or let me decrease the latency more. Neither worked.
Increasing the latency just makes it stutter without maxing out my CPU.
How can I prevent this stuttering issue while recording?
Are there any alternatives?
chrome.tabCapture.capture(
{
audio: true,
audioConstraints:
{
mandatory:
{
chromeMediaSource: 'tab'
}
}
},
(stream) =>
{
console.log("Started recording, stream: ", stream);
// Latency doesn't go lower than 0.01
// higher latency makes stronger stutters
var ctx = new AudioContext({latencyHint:0.0000});
var source = ctx.createMediaStreamSource(stream);
var dest = ctx.createMediaStreamDestination();
console.log("Con", ctx, source, dest);
mediaRecorder = new MediaRecorder(dest.stream,
{
mimeType:"audio/webm"
});
source.connect(dest);
console.log("Recorder:" , mediaRecorder);
var chunks = [];
mediaRecorder.ondataavailable = function (blob)
{
console.log("data available", blob);
chunks.push(blob.data);
};
mediaRecorder.onstop = function(e)
{
console.log("Stopped recording");
var blob = new Blob(chunks, { 'type' : 'audio/webm' });
console.log("My blob is: ", blob);
var audioURL = window.URL.createObjectURL(blob);
chrome.downloads.download({url: audioURL, filename: "test.webm", saveAs: false}, function(downloadId)
{
console.log("finished downloading it");
});
}
// Playing it back so I can hear the stutters while its recording
let audio = new Audio();
audio.srcObject = dest.stream;
audio.play();
// changing the batch duration doesn't matter
mediaRecorder.start(1000);
});