video.captureStream becomes pitch green when sending through WebRTC - javascript

My product has a tool that allows you to share a video via WebRTC. When we first deployed it, we tried using a code like the following:
this.videoEl = document.createElement("video");
this.videoEl.src = url;
this.videoEl.oncanplay = function() {
this.oncanplay = undefined;
this.mediaStream = this.videoEl.captureStream();
};
The issue is that when sending this mediaStream, the result is a pitch green video, but with working audio:
The solution we came up with is to create a canvas and draw to our canvas the video contents, something like this:
this.canvas = document.createElement("canvas");
this.videoEl = document.createElement("video");
this.ctx = this.canvas.getContext("2d");
this.videoEl.src = url;
this.videoEl.oncanplay = function() {
this.oncanplay = undefined;
// Some code (stripping a lot of unnecessary stuff)
// Canvas drawing loop
this.canvas.width = this.videoEl.videoWidth;
this.canvas.height = this.videoEl.videoHeight;
this.ctx.drawImage(this.videoEl, 0, 0, this.videoEl.videoWidth, this.videoEl.videoHeight);
// Loop ends and more code
// Media stream element
this.mediaStream = this.canvas.captureStream(25);
// Attached audio track to Media Stream
try {
var audioContext = new AudioContext();
this.gainNode = audioContext.createGain();
audioSource = audioContext.createMediaStreamSource(this.videoEl.captureStream(25));
audioDestination = audioContext.createMediaStreamDestination();
audioSource.connect(this.gainNode);
this.gainNode.connect(audioDestination);
this.gainNode.gain.value = 1;
this.mediaStream.addTrack(audioDestination.stream.getAudioTracks()[0]);
} catch (e) {
// No audio tracks found
this.noAudio = true;
}
};
The solution works, however it consumes a lot of CPU and it would be great to avoid having to write all of that code. We also have customers complaining that the audio gets out of sync sometimes (which is understandable since I'm using a captureStream for audio and not for video.
At first I thought it was green because it was tainting the MediaStream, but that's not the case since I can normally draw the video to a canvas and capturing a MediaStream from it. PS: We are using a URL.createObjectURL(file) call to get the video url.
Do you know why the video is green?
Thanks.

It turns out it's a Google Chrome Bug.
Thanks to Philipp Hancke.

Related

Can a browser plugin "mitm" the local webcam?

I want to create a browser extension that would allow users to add effects to their video/audio streams, without special plugins, on any site that uses the javascript web apis.
Google searching has not been particularly helpful so I'm starting to wonder if this is even possible.
I have 2 primary questions here:
Is this possible with javascript+chrome?
Any links to additional resources are greatly appreciated.
I am not really into web-extensions, so there may even be a simpler API available and I won't go into details about the implementation but theoretically you can indeed do it.
All it takes is to override the methods from where you'd get your MediaStream, to draw the original MediaStream to an HTML canvas where you'd be able to apply your filter, and then simply to return a new MediaStream made of the VideoTrack of a MediaStream from the canvas element's captureStream(), and possibly other tracks from the original MediaStream.
A very basic proof of concept implementation for gUM could look like:
// overrides getUserMedia so it applies an invert filter on the videoTrack
{
const mediaDevices = navigator.mediaDevices;
const original_gUM = mediaDevices.getUserMedia.bind(mediaDevices);
mediaDevices.getUserMedia = async (...args) => {
const original_stream = await original_gUM(...args);
// no video track, no filter
if( !original_stream.getVideoTracks().length ) {
return original_stream;
}
// prepare our DOM elements
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
const video = document.createElement('video');
// a flag to know if we should keep drawing on the canvas or not
let should_draw = true;
// no need for audio there
video.muted = true;
// gUM video tracks can change size
video.onresize = (evt) => {
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
};
// in case users blocks the camera?
video.onpause = (evt) => {
should_draw = false;
};
video.onplaying = (evt) => {
should_draw = true;
drawVideoToCanvas();
};
video.srcObject = original_stream;
await video.play();
const canvas_track = canvas.captureStream().getVideoTracks()[0];
const originalStop = canvas_track.stop.bind(canvas_track);
// override the #stop method so we can revoke the camera stream
canvas_track.stop = () => {
originalStop();
should_draw = false;
original_stream.getVideoTracks()[0].stop();
};
// merge with audio tracks
return new MediaStream( original_stream.getAudioTracks().concat( canvas_track ) );
// the drawing loop
function drawVideoToCanvas() {
if(!should_draw) {
return;
}
ctx.filter = "none";
ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.filter = "invert(100%)";
ctx.drawImage(video,0,0);
requestAnimationFrame( drawVideoToCanvas );
}
};
}
And then every scripts that would call this method would receive a filtered videoTrack.
Outsourced example since gUM is not friend with StackSnippets.
Now I'm not sure how to override methods from web-extensions, you'll have to learn that by yourself, and beware this script is really just a proof of concept and not ready for production. I didn't put any though in handling anything than the demo case.

HTML5 Audio gets louder after pausing then playing again

I'm making an audio player with JavaScript, everything works fine until I add a sound visualizer. When I pause the song and then play it again, the sound gets more louder every time I do it, until it gets distorsionated.
I'm newbie with the HTML5 Audio API, I've tried to set the volume as a fixed value, but not works.
The code of the visualizer it's:
function visualizer(audio) {
let context = new AudioContext();
const gainNode = context.createGain();
gainNode.gain.value = 1; // setting it to 100%
gainNode.connect(context.destination);
let src = context.createMediaElementSource(audio);
let analyser = context.createAnalyser();
let canvas = document.getElementById("canvas");
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
let ctx = canvas.getContext("2d");
src.connect(analyser);
analyser.connect(context.destination);
analyser.fftSize = 2048;
let bufferLength = analyser.frequencyBinCount;
let dataArray = new Uint8Array(bufferLength);
let WIDTH = ctx.canvas.width;
let HEIGHT = ctx.canvas.height;
let barWidth = (WIDTH / bufferLength) * 1.5;
let barHeight;
let x = 0;
let color = randomColor();
function renderFrame() {
requestAnimationFrame(renderFrame);
x = 0;
analyser.getByteFrequencyData(dataArray);
ctx.clearRect(0, 0, WIDTH, HEIGHT);
for (let i = 0; i < bufferLength; i++) {
barHeight = dataArray[i];
ctx.fillStyle = color;
ctx.fillRect(x, HEIGHT - barHeight, barWidth, barHeight);
x += barWidth + 1;
}
}
musicPlay();
renderFrame();
}
And:
function musicPlay() {
status = 'playing';
audio.play();
}
So, I don't know if I'm doing something wrong on the audio analyzer, I've tried to make a global context and don't do the new AudioContext(); every time I enter on the function, also I've tried to specify a fixed volume with:
audio.volume = 1;
or with the GainNode as you can see on the function, but it's not working.
Where is my mistake and why the sound gets louder?
Regards!
--- Update 1 ---
The audio it's loaded from an URL:
function loadAudioElement(url) {
return new Promise(function (resolve, reject) {
let audio = new Audio();
audio.addEventListener('canplay', function () {
/* Resolve the promise, passing through the element. */
resolve(audio);
});
/* Reject the promise on an error. */
audio.addEventListener('error', reject);
audio.src = url;
});
}
And on my player I have:
let playButtonFunction = function () {
if (playstatus === 'pause') {
loadAudioElement(audio.src).then(
visualizer(audio)
);
} else if (playstatus === 'playing') {
musicPause();
}
};
I had a similar issue, did you try to set the audio context to a global object?
This is what I found here:
https://developer.mozilla.org/en-US/docs/Web/API/AudioContext
It's recommended to create one AudioContext and reuse it instead of initializing a new one each time
The AudioContext interface represents an audio-processing graph built from audio modules linked together, each represented by an AudioNode.
An audio context controls both the creation of the nodes it contains and the execution of the audio processing, or decoding. You need to create an AudioContext before you do anything else, as everything happens inside a context. It's recommended to create one AudioContext and reuse it instead of initializing a new one each time, and it's OK to use a single AudioContext for several different audio sources and pipeline concurrently.
Well, as Get Off My Lawn pointed, I was adding by mistake multiple audio elements.
The solution was taking the code of load the song outside the playButtonFunction and only do:
let playButtonFunction = function () {
if (playstatus === 'pause') {
musicPlay();
} else if (playstatus === 'playing') {
musicPause();
}
};
But I still had one problem, with the next/previous functions. In these cases I need call the loadAudioElement function because the song is changing (when you press play/pause no, it's the same song) but with this I have the same problem again.
Well, after a bit of digging, I found that if you want to play a playlist and visualize the music all the time, YOU HAVE TO RELEASE THE OLD CONTEXT BEFORE LOAD THE NEW SONG. Not only to avoid the increase of the song volume, the cpu and memory will also get increased after 3 - 4 songs and the browser will start to run slowly depending on the machine. So:
1 - I made a global variable called clearContextAudio = false;
2 - On my next/previous functions I added this code:
if (closeAudioContext) { //MANDATORY RELEASE THE PREVIOUS RESOURCES TO AVOID OBJECT OVERLAPPING AND CPU-MEMORY USE
context.close();
context = new AudioContext();
}
loadAudioElement(audio.src).then(
visualizer(audio)
);
3 - On my visualizer(audio) function I changed:
let context = new AudioContext();
to
closeAudioContext = true; //MANDATORY RELEASE THE PREVIOUS RESOURCES TO AVOID OBJECT OVERLAPPING AND CPU-MEMORY USE
The value it's initialized to false because the first time there is no song playing, and after play a song you will always need to release the old resources, so the variable will always set to true. Now, you can skip all the times you want a song and not concern about the memory and the overlapping issues.
Hope this helps someone else trying to achieve the same thing! Regards!

<video> tag doesn't set volume of remote MediaStream from javascript

I recieve remote MediaStream and set it to video:
this.Video = document.createElement("video");
this.Video.autoplay = 1;
this.Video.style.visibility = "hidden";
this.Video.style.float = "left";
this.Video.width = CAMERA_VIDEO_SIZES.SMALL;
this.Video.height = CAMERA_VIDEO_SIZES.SMALL;
this.Video.src = window.URL.createObjectURL(stream);
this.Video.volume = 0;
But if I set REMOTE MediaStream, volume isn't controlled (volume = 0, but I still hear sound)!
And if I set LOCAL MediaStream, volume is controlled!
How can that be?? And what I must to do to control Remote MediaStream volume?
Alright, guys! There was bug in my code... I made frankenstein from another program, with some parts from this program, and it works fine!!!

Exporting a video in p5.js

I am creating a simple animation program in p5.js. When a user clicks the save button, I want to download a video of the animation.
I have an object called frames where each key is labelled frame_1, frame_2 and so on. The value associated with each key is an array of line segments that makes up that frame.
I am trying to think of an approach to take this data and create an mp4 video. p5.js has a built in save function that I thought might be helpful but it is not a full solution on its own. I could save each frame as an individual image and then somehow stitch those images together on the client side but I have yet to find a solution to this.
Any other approaches would be great as well. The only requirement is that it is done client side.
Since p5.js is built on the Canvas API, in modern browsers, you can use a MediaRecorder to do this job.
const btn = document.querySelector('button'),
chunks = [];
function record() {
chunks.length = 0;
let stream = document.querySelector('canvas').captureStream(30),
recorder = new MediaRecorder(stream);
recorder.ondataavailable = e => {
if (e.data.size) {
chunks.push(e.data);
}
};
recorder.onstop = exportVideo;
btn.onclick = e => {
recorder.stop();
btn.textContent = 'start recording';
btn.onclick = record;
};
recorder.start();
btn.textContent = 'stop recording';
}
function exportVideo(e) {
var blob = new Blob(chunks);
var vid = document.createElement('video');
vid.id = 'recorded'
vid.controls = true;
vid.src = URL.createObjectURL(blob);
document.body.appendChild(vid);
vid.play();
}
btn.onclick = record;
// taken from pr.js docs
var x, y;
function setup() {
createCanvas(300, 200);
// Starts in the middle
x = width / 2;
y = height;
}
function draw() {
background(200);
// Draw a circle
stroke(50);
fill(100);
ellipse(x, y, 24, 24);
// Jiggling randomly on the horizontal axis
x = x + random(-1, 1);
// Moving up at a constant speed
y = y - 1;
// Reset to the bottom
if (y < 0) {
y = height;
}
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.5.7/p5.min.js"></script>
<button>start recording</button><br>
ccapture works well with p5.js to achieve the goal of recording what's displaying on a canvas.
Here is a demo of ccapture working with p5.js. The source code comes with the demo.
This method won't output laggy videos because it is not recording what you see on the screen, which can be laggy. Instead, it writes every frame into the video and tells the videos to play at a fixed frame rate. So even if it takes seconds to calculate just one frame, the output video will play smoothly without showing any delay between frames.
However, there is one caveat though. This method only works with Chrome.
As you specified in the comments that a gif would also work, here is a solution:
Below is a sample p5 sketch that records canvas animation and turns it into a gif, using gif.js.
Works in browsers supporting: Web Workers, File API and Typed Arrays.
I've provided this code so you can get an idea of how to use this library because not much documentation is provided for it and I had a hard time myself figuring it out.
var cnv;
var gif, recording = false;
function setup() {
cnv = createCanvas(400, 400);
var start_rec = createButton("Start Recording");
start_rec.mousePressed(saveVid);
var stop_rec = createButton("Stop Recording");
stop_rec.mousePressed(saveVid);
start_rec.position(500, 500);
stop_rec.position(650, 500);
setupGIF();
}
function saveVid() {
recording = !recording;
if (!recording) {
gif.render();
}
}
var x = 0;
var y = 0;
function draw() {
background(51);
fill(255);
ellipse(x, y, 20, 20);
x++;
y++;
if (recording) {
gif.addFrame(cnv.elt, {
delay: 1,
copy: true
});
}
}
function setupGIF() {
gif = new GIF({
workers: 5,
quality: 20
});
gif.on('finished', function(blob) {
window.open(URL.createObjectURL(blob));
});
}
More Info :
This sketch starts recording frames when you click start_rec and stops when you hit stop_rec, in your sketch you might want to control things differently, but keep in mind that addFrame only adds one frame to the gif so you need to call it in the draw function to add multiple frames, you can pass in an ImageElement, a CanvasElement or a CanvasContext along with other optional parameters.
In the gif.on function, you can specify a callback function to do whatever you like with the gif.
If you want to fine tune settings of the gif, like quality, repeat, background, you can read more here. Hope this helps!

Web audio api, stop sound gracefully

The web audio api furnish the method .stop() to stop a sound.
I want my sound to decrease in volume before stopping. To do so I used a gain node. However I'm facing weird issues with this where some sounds just don't play and I can't figure out why.
Here is a dumbed down version of what I do:
https://jsfiddle.net/01p1t09n/1/
You'll hear that if you remove the line with setTimeout() that every sound plays. When setTimeout is there not every sound plays. What really confuses me is that I use push and shift accordingly to find the correct source of the sound, however it seems like it's another that stop playing. The only way I can see this happening is if AudioContext.decodeAudioData isn't synchronous. Just try the jsfiddle to have a better understanding and put your headset on obviously.
Here is the code of the jsfiddle:
let url = "https://raw.githubusercontent.com/gleitz/midi-js-soundfonts/gh-pages/MusyngKite/acoustic_guitar_steel-mp3/A4.mp3";
let soundContainer = {};
let notesMap = {"A4": [] };
let _AudioContext_ = AudioContext || webkitAudioContext;
let audioContext = new _AudioContext_();
var oReq = new XMLHttpRequest();
oReq.open("GET", url, true);
oReq.responseType = "arraybuffer";
oReq.onload = function (oEvent) {
var arrayBuffer = oReq.response;
makeLoop(arrayBuffer);
};
oReq.send(null);
function makeLoop(arrayBuffer){
soundContainer["A4"] = arrayBuffer;
let currentTime = audioContext.currentTime;
for(let i = 0; i < 10; i++){
//playing at same intervals
play("A4", currentTime + i * 0.5);
setTimeout( () => stop("A4"), 500 + i * 500); //remove this line you will hear all the sounds.
}
}
function play(notePlayed, start) {
audioContext.decodeAudioData(soundContainer[notePlayed], (buffer) => {
let source;
let gainNode;
source = audioContext.createBufferSource();
gainNode = audioContext.createGain();
// pushing notes in note map
notesMap[notePlayed].push({ source, gainNode });
source.buffer = buffer;
source.connect(gainNode);
gainNode.connect(audioContext.destination);
gainNode.gain.value = 1;
source.start(start);
});
}
function stop(notePlayed){
let note = notesMap[notePlayed].shift();
note.source.stop();
}
This is just to explain why I do it like this, you can skip it, it's just to explain why I don't use stop()
The reason I'm doing all this is because I want to stop the sound gracefully, so if there is a possibility to do so without using setTimeout I'd gladly take it.
Basically I have a map at the top containing my sounds (notes like A1, A#1, B1,...).
soundMap = {"A": [], "lot": [], "of": [], "sounds": []};
and a play() fct where I populate the arrays once I play the sounds:
play(sound) {
// sound is just { soundName, velocity, start}
let source;
let gainNode;
// sound container is just a map from soundname to the sound data.
this.audioContext.decodeAudioData(this.soundContainer[sound.soundName], (buffer) => {
source = this.audioContext.createBufferSource();
gainNode = this.audioContext.createGain();
gainNode.gain.value = sound.velocity;
// pushing sound in sound map
this.soundMap[sound.soundName].push({ source, gainNode });
source.buffer = buffer;
source.connect(gainNode);
gainNode.connect(this.audioContext.destination);
source.start(sound.start);
});
}
And now the part that stops the sounds :
stop(sound){
//remember above, soundMap is a map from "soundName" to {gain, source}
let dasound = this.soundMap[sound.soundName].shift();
let gain = dasound.gainNode.gain.value - 0.1;
// we lower the gain via incremental values to not have the sound stop abruptly
let i = 0;
for(; gain > 0; i++, gain -= 0.1){ // watchout funky syntax
((gain, i) => {
setTimeout(() => dasound.gainNode.gain.value = gain, 50 * i );
})(gain, i)
}
// we stop the source after the gain is set at 0. stop is in sec
setTimeout(() => note.source.stop(), i * 50);
}
Aaah, yes, yes, yes! I finally found a lot of things by eventually bothering to read "everything" in the doc (diagonally). And let me tell you this api is a diamond in the rough. Anyway, they actually have what I wanted with Audio param :
The AudioParam interface represents an audio-related parameter, usually a parameter of an AudioNode (such as GainNode.gain). An
AudioParam can be set to a specific value or a change in value, and
can be scheduled to happen at a specific time and following a specific
pattern.
It has a function linearRampToValueAtTime()
And they even have an example with what I asked !
// create audio context
var AudioContext = window.AudioContext || window.webkitAudioContext;
var audioCtx = new AudioContext();
// set basic variables for example
var myAudio = document.querySelector('audio');
var pre = document.querySelector('pre');
var myScript = document.querySelector('script');
pre.innerHTML = myScript.innerHTML;
var linearRampPlus = document.querySelector('.linear-ramp-plus');
var linearRampMinus = document.querySelector('.linear-ramp-minus');
// Create a MediaElementAudioSourceNode
// Feed the HTMLMediaElement into it
var source = audioCtx.createMediaElementSource(myAudio);
// Create a gain node and set it's gain value to 0.5
var gainNode = audioCtx.createGain();
// connect the AudioBufferSourceNode to the gainNode
// and the gainNode to the destination
gainNode.gain.setValueAtTime(0, audioCtx.currentTime);
source.connect(gainNode);
gainNode.connect(audioCtx.destination);
// set buttons to do something onclick
linearRampPlus.onclick = function() {
gainNode.gain.linearRampToValueAtTime(1.0, audioCtx.currentTime + 2);
}
linearRampMinus.onclick = function() {
gainNode.gain.linearRampToValueAtTime(0, audioCtx.currentTime + 2);
}
Working example here
They also have different type of timings, like exponential instead of linear ramp which I guess would fit this scenario more.

Categories

Resources