I am creating a simple animation program in p5.js. When a user clicks the save button, I want to download a video of the animation.
I have an object called frames where each key is labelled frame_1, frame_2 and so on. The value associated with each key is an array of line segments that makes up that frame.
I am trying to think of an approach to take this data and create an mp4 video. p5.js has a built in save function that I thought might be helpful but it is not a full solution on its own. I could save each frame as an individual image and then somehow stitch those images together on the client side but I have yet to find a solution to this.
Any other approaches would be great as well. The only requirement is that it is done client side.
Since p5.js is built on the Canvas API, in modern browsers, you can use a MediaRecorder to do this job.
const btn = document.querySelector('button'),
chunks = [];
function record() {
chunks.length = 0;
let stream = document.querySelector('canvas').captureStream(30),
recorder = new MediaRecorder(stream);
recorder.ondataavailable = e => {
if (e.data.size) {
chunks.push(e.data);
}
};
recorder.onstop = exportVideo;
btn.onclick = e => {
recorder.stop();
btn.textContent = 'start recording';
btn.onclick = record;
};
recorder.start();
btn.textContent = 'stop recording';
}
function exportVideo(e) {
var blob = new Blob(chunks);
var vid = document.createElement('video');
vid.id = 'recorded'
vid.controls = true;
vid.src = URL.createObjectURL(blob);
document.body.appendChild(vid);
vid.play();
}
btn.onclick = record;
// taken from pr.js docs
var x, y;
function setup() {
createCanvas(300, 200);
// Starts in the middle
x = width / 2;
y = height;
}
function draw() {
background(200);
// Draw a circle
stroke(50);
fill(100);
ellipse(x, y, 24, 24);
// Jiggling randomly on the horizontal axis
x = x + random(-1, 1);
// Moving up at a constant speed
y = y - 1;
// Reset to the bottom
if (y < 0) {
y = height;
}
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.5.7/p5.min.js"></script>
<button>start recording</button><br>
ccapture works well with p5.js to achieve the goal of recording what's displaying on a canvas.
Here is a demo of ccapture working with p5.js. The source code comes with the demo.
This method won't output laggy videos because it is not recording what you see on the screen, which can be laggy. Instead, it writes every frame into the video and tells the videos to play at a fixed frame rate. So even if it takes seconds to calculate just one frame, the output video will play smoothly without showing any delay between frames.
However, there is one caveat though. This method only works with Chrome.
As you specified in the comments that a gif would also work, here is a solution:
Below is a sample p5 sketch that records canvas animation and turns it into a gif, using gif.js.
Works in browsers supporting: Web Workers, File API and Typed Arrays.
I've provided this code so you can get an idea of how to use this library because not much documentation is provided for it and I had a hard time myself figuring it out.
var cnv;
var gif, recording = false;
function setup() {
cnv = createCanvas(400, 400);
var start_rec = createButton("Start Recording");
start_rec.mousePressed(saveVid);
var stop_rec = createButton("Stop Recording");
stop_rec.mousePressed(saveVid);
start_rec.position(500, 500);
stop_rec.position(650, 500);
setupGIF();
}
function saveVid() {
recording = !recording;
if (!recording) {
gif.render();
}
}
var x = 0;
var y = 0;
function draw() {
background(51);
fill(255);
ellipse(x, y, 20, 20);
x++;
y++;
if (recording) {
gif.addFrame(cnv.elt, {
delay: 1,
copy: true
});
}
}
function setupGIF() {
gif = new GIF({
workers: 5,
quality: 20
});
gif.on('finished', function(blob) {
window.open(URL.createObjectURL(blob));
});
}
More Info :
This sketch starts recording frames when you click start_rec and stops when you hit stop_rec, in your sketch you might want to control things differently, but keep in mind that addFrame only adds one frame to the gif so you need to call it in the draw function to add multiple frames, you can pass in an ImageElement, a CanvasElement or a CanvasContext along with other optional parameters.
In the gif.on function, you can specify a callback function to do whatever you like with the gif.
If you want to fine tune settings of the gif, like quality, repeat, background, you can read more here. Hope this helps!
Related
My product has a tool that allows you to share a video via WebRTC. When we first deployed it, we tried using a code like the following:
this.videoEl = document.createElement("video");
this.videoEl.src = url;
this.videoEl.oncanplay = function() {
this.oncanplay = undefined;
this.mediaStream = this.videoEl.captureStream();
};
The issue is that when sending this mediaStream, the result is a pitch green video, but with working audio:
The solution we came up with is to create a canvas and draw to our canvas the video contents, something like this:
this.canvas = document.createElement("canvas");
this.videoEl = document.createElement("video");
this.ctx = this.canvas.getContext("2d");
this.videoEl.src = url;
this.videoEl.oncanplay = function() {
this.oncanplay = undefined;
// Some code (stripping a lot of unnecessary stuff)
// Canvas drawing loop
this.canvas.width = this.videoEl.videoWidth;
this.canvas.height = this.videoEl.videoHeight;
this.ctx.drawImage(this.videoEl, 0, 0, this.videoEl.videoWidth, this.videoEl.videoHeight);
// Loop ends and more code
// Media stream element
this.mediaStream = this.canvas.captureStream(25);
// Attached audio track to Media Stream
try {
var audioContext = new AudioContext();
this.gainNode = audioContext.createGain();
audioSource = audioContext.createMediaStreamSource(this.videoEl.captureStream(25));
audioDestination = audioContext.createMediaStreamDestination();
audioSource.connect(this.gainNode);
this.gainNode.connect(audioDestination);
this.gainNode.gain.value = 1;
this.mediaStream.addTrack(audioDestination.stream.getAudioTracks()[0]);
} catch (e) {
// No audio tracks found
this.noAudio = true;
}
};
The solution works, however it consumes a lot of CPU and it would be great to avoid having to write all of that code. We also have customers complaining that the audio gets out of sync sometimes (which is understandable since I'm using a captureStream for audio and not for video.
At first I thought it was green because it was tainting the MediaStream, but that's not the case since I can normally draw the video to a canvas and capturing a MediaStream from it. PS: We are using a URL.createObjectURL(file) call to get the video url.
Do you know why the video is green?
Thanks.
It turns out it's a Google Chrome Bug.
Thanks to Philipp Hancke.
When using CanvasCaptureMediaStream and MediaRecorder, is there a way to get an event on each frame?
What I need is not unlike requestAnimationFrame(), but I need it for the CanvasCaptureMediaStream (and/or the MediaRecorder) and not the window. The MediaRecorder could be running at a different frame rate than the window (possibly at a not regularly divisible rate, such as 25 FPS vs 60 FPS), so I want to update the canvas at its frame rate rather than the window's.
This example currently only fully works on FireFox, since chrome simply stops the canvas stream when the tab is blurred... (probably related to this bug, but well, my timer seems to be working but not the recording...)
[Edit]: it actually now works only in chrome, since they have fixed this bug, but not anymore in FF because of this one (caused by e10s).
There doesn't seem to be any event on MediaStream letting you know when a frame has been rendered to it, neither on the MediaRecorder.
Even the currentTime property of the MediaStream (currently only available in FF) doesn't seem to be changing accordingly with the fps argument passed in the captureStream() method.
But what you seem to want is a reliable timer, that won't loose its frequency when i.e the current tab is not focused (which happens for rAF).
Fortunately, the WebAudio API does also have an high precision timer, based on hardware clock, rather than on screen refresh rate.
So we can come with an alternative timed loop, able to keep its frequency even when the tab is blurred.
/*
An alternative timing loop, based on AudioContext's clock
#arg callback : a callback function
with the audioContext's currentTime passed as unique argument
#arg frequency : float in ms;
#returns : a stop function
*/
function audioTimerLoop(callback, frequency) {
// AudioContext time parameters are in seconds
var freq = frequency / 1000;
var aCtx = new AudioContext();
// Chrome needs our oscillator node to be attached to the destination
// So we create a silent Gain Node
var silence = aCtx.createGain();
silence.gain.value = 0;
silence.connect(aCtx.destination);
onOSCend();
var stopped = false;
function onOSCend() {
osc = aCtx.createOscillator();
osc.onended = onOSCend;
osc.connect(silence);
osc.start(0);
osc.stop(aCtx.currentTime + freq);
callback(aCtx.currentTime);
if (stopped) {
osc.onended = function() {
return;
};
}
};
// return a function to stop our loop
return function() {
stopped = true;
};
}
function start() {
// start our loop #25fps
var stopAnim = audioTimerLoop(anim, 1000 / 25);
// maximum stream rate set as 25 fps
cStream = canvas.captureStream(25);
let chunks = [];
var recorder = new MediaRecorder(cStream);
recorder.ondataavailable = e => chunks.push(e.data);
recorder.onstop = e => {
// we can stop our loop
stopAnim();
var url = URL.createObjectURL(new Blob(chunks));
var v = document.createElement('video');
v.src = url;
v.controls = true;
document.body.appendChild(v);
}
recorder.start();
// stops the recorder in 20s, try to change tab during this time
setTimeout(function() {
recorder.stop();
}, 20000)
}
// make something move on the canvas
var ctx = canvas.getContext('2d');
var x = 0;
function anim() {
x = (x + 2) % (canvas.width + 100);
ctx.fillStyle = 'ivory';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.fillStyle = 'red';
ctx.fillRect(x - 50, 20, 50, 50)
};
btn.onclick = start;
<button id="btn">begin</button>
<canvas id="canvas" width="500" height="200"></canvas>
Nota Bene :
In this example, I set the frequency to 25fps, but we can set it to 60fps and it seems to work correctly even on my old notebook, at least with such a simple animation.
I'm trying to write a program using Javascript and the p5.js library to trigger a random image from an array whenever a peak in an audio file is detected. p5's sound library can detect the audio peak for me and then trigger a function upon that audio peak. However, I don't have much experience in Javascript so I'm not sure where to go from here. I've created an array of images and am planning on creating a function using math.Random to grab one of these images. Can I then call that function within my triggerBeat function?
Also, I've set the image as the background so that it's not within p5's draw function, so I'm trying to change the bg variable. I've preloaded the background image, and I've also got code within the preload function to allow the user to upload an audio file.
Sorry if this doesn't make a ton of sense. I'm pretty new to Javascript and I've spent most of today trying to wrap my head around it.
EDIT: updated code
var cnv, song, fft, peakDetect, img, bg;
var imageset = new Array("1.png","2.png","3.png");
function preload(){
img = loadImage("1.png");
var loader = document.querySelector(".loader");
document.getElementById("audiofile").onchange = function(event) {
if(event.target.files[0]) {
if(typeof song != "undefined") {
song.disconnect();
song.stop();
}
song = loadSound(URL.createObjectURL(event.target.files[0]));
loader.classList.add("loading");
}
}
}
function setup() {
cnv = createCanvas(900,900);
drawImage(imageset[0]);
fft = new p5.FFT();
peakDetect = new p5.PeakDetect();
setupSound();
peakDetect.onPeak(drawImage(imageset));
}
function draw() {
drawImage();
}
function drawImage(arr) {
var bg = loadImage(random(arr));
background(bg);
fill(0);
text('play', width/2, height/2);
fft.analyze();
peakDetect.update(fft);
}
function setupSound() {
cnv.mouseClicked( function() {
if (song.isPlaying() ) {
song.stop();
} else {
song.play();
}
});
}
p5 has math functions, one of which is random.
If one argument is given and it is an array, returns a random element from that array.
EDIT
As the result was more messy after answering the initial question, I updated the whole code.
var cnv, song, fft, peakDetect, img, bg;
var imageset = new Array("pic1.png","pic2.png","pic3.png", "pic4.png");
var imagesArr = [];
//next line will make p5 global. Otherwise would the p5 functions be
//accessable from p5 struct functions only.
new p5();
/*******************************************************************
* PRELOAD
* we are using for loading images/audios only
********************************************************************/
function preload(){
//load all images from 'imageset' into 'imagesArr'
for(var i=0; i<imageset.length; i++){
loadImage('../img/'+imageset[i], function(img) {
imagesArr.push(img);
});
}
// next lets load soundfile(s).
//song = loadSound("../snd/test.mp3");
// I used testfile, didn't touch nor tested your code here,
// BUT, again:
// you should only (pre)load you sounds here, setting event should go
// to the setup()
var loader = document.querySelector(".loader");
document.getElementById("audiofile").onchange = function(event) {
if(event.target.files[0]) {
if(typeof song != "undefined") {
song.disconnect();
song.stop();
}
song = loadSound(URL.createObjectURL(event.target.files[0]));
loader.classList.add("loading");
}
}
}
/*******************************************************************
* SETUP
* run once, use for initialisation.
********************************************************************/
function setup() {
//create canvas, draw initial background and text
cnv = createCanvas(900,900);
drawBackground();
text('play', width/2, height/2);
//initiate fft, peakdetect. Set event (onpeak)
fft = new p5.FFT();
peakDetect = new p5.PeakDetect();
setupSound();
peakDetect.onPeak(drawBackground);
}
/*******************************************************************
* DRAW
* endless loop. Here happens all the action.
* But you cannot draw your background here, as it is done by event.
********************************************************************/
function draw(){
//fft and peakdetecting are in use.
fft.analyze();
peakDetect.update(fft);
}
function drawBackground() {
background(255);
background(random(imagesArr));
}
function setupSound() {
cnv.mouseClicked( function() {
if (song.isPlaying() ) {
song.stop();
} else {
song.play();
}
});
}
Have yourArray[Math.floor(Math.random() * yourArray.length)] to get a random img by calling it in your triggerBeat function
I'm making an audio player with JavaScript, everything works fine until I add a sound visualizer. When I pause the song and then play it again, the sound gets more louder every time I do it, until it gets distorsionated.
I'm newbie with the HTML5 Audio API, I've tried to set the volume as a fixed value, but not works.
The code of the visualizer it's:
function visualizer(audio) {
let context = new AudioContext();
const gainNode = context.createGain();
gainNode.gain.value = 1; // setting it to 100%
gainNode.connect(context.destination);
let src = context.createMediaElementSource(audio);
let analyser = context.createAnalyser();
let canvas = document.getElementById("canvas");
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
let ctx = canvas.getContext("2d");
src.connect(analyser);
analyser.connect(context.destination);
analyser.fftSize = 2048;
let bufferLength = analyser.frequencyBinCount;
let dataArray = new Uint8Array(bufferLength);
let WIDTH = ctx.canvas.width;
let HEIGHT = ctx.canvas.height;
let barWidth = (WIDTH / bufferLength) * 1.5;
let barHeight;
let x = 0;
let color = randomColor();
function renderFrame() {
requestAnimationFrame(renderFrame);
x = 0;
analyser.getByteFrequencyData(dataArray);
ctx.clearRect(0, 0, WIDTH, HEIGHT);
for (let i = 0; i < bufferLength; i++) {
barHeight = dataArray[i];
ctx.fillStyle = color;
ctx.fillRect(x, HEIGHT - barHeight, barWidth, barHeight);
x += barWidth + 1;
}
}
musicPlay();
renderFrame();
}
And:
function musicPlay() {
status = 'playing';
audio.play();
}
So, I don't know if I'm doing something wrong on the audio analyzer, I've tried to make a global context and don't do the new AudioContext(); every time I enter on the function, also I've tried to specify a fixed volume with:
audio.volume = 1;
or with the GainNode as you can see on the function, but it's not working.
Where is my mistake and why the sound gets louder?
Regards!
--- Update 1 ---
The audio it's loaded from an URL:
function loadAudioElement(url) {
return new Promise(function (resolve, reject) {
let audio = new Audio();
audio.addEventListener('canplay', function () {
/* Resolve the promise, passing through the element. */
resolve(audio);
});
/* Reject the promise on an error. */
audio.addEventListener('error', reject);
audio.src = url;
});
}
And on my player I have:
let playButtonFunction = function () {
if (playstatus === 'pause') {
loadAudioElement(audio.src).then(
visualizer(audio)
);
} else if (playstatus === 'playing') {
musicPause();
}
};
I had a similar issue, did you try to set the audio context to a global object?
This is what I found here:
https://developer.mozilla.org/en-US/docs/Web/API/AudioContext
It's recommended to create one AudioContext and reuse it instead of initializing a new one each time
The AudioContext interface represents an audio-processing graph built from audio modules linked together, each represented by an AudioNode.
An audio context controls both the creation of the nodes it contains and the execution of the audio processing, or decoding. You need to create an AudioContext before you do anything else, as everything happens inside a context. It's recommended to create one AudioContext and reuse it instead of initializing a new one each time, and it's OK to use a single AudioContext for several different audio sources and pipeline concurrently.
Well, as Get Off My Lawn pointed, I was adding by mistake multiple audio elements.
The solution was taking the code of load the song outside the playButtonFunction and only do:
let playButtonFunction = function () {
if (playstatus === 'pause') {
musicPlay();
} else if (playstatus === 'playing') {
musicPause();
}
};
But I still had one problem, with the next/previous functions. In these cases I need call the loadAudioElement function because the song is changing (when you press play/pause no, it's the same song) but with this I have the same problem again.
Well, after a bit of digging, I found that if you want to play a playlist and visualize the music all the time, YOU HAVE TO RELEASE THE OLD CONTEXT BEFORE LOAD THE NEW SONG. Not only to avoid the increase of the song volume, the cpu and memory will also get increased after 3 - 4 songs and the browser will start to run slowly depending on the machine. So:
1 - I made a global variable called clearContextAudio = false;
2 - On my next/previous functions I added this code:
if (closeAudioContext) { //MANDATORY RELEASE THE PREVIOUS RESOURCES TO AVOID OBJECT OVERLAPPING AND CPU-MEMORY USE
context.close();
context = new AudioContext();
}
loadAudioElement(audio.src).then(
visualizer(audio)
);
3 - On my visualizer(audio) function I changed:
let context = new AudioContext();
to
closeAudioContext = true; //MANDATORY RELEASE THE PREVIOUS RESOURCES TO AVOID OBJECT OVERLAPPING AND CPU-MEMORY USE
The value it's initialized to false because the first time there is no song playing, and after play a song you will always need to release the old resources, so the variable will always set to true. Now, you can skip all the times you want a song and not concern about the memory and the overlapping issues.
Hope this helps someone else trying to achieve the same thing! Regards!
I've been working on this way for people to draw stuff on canvas and upload it to the server.
It works fine except for people being able to insert their own images (my friend tested it)
Here's what it does:
Person clicks "submit" their canvas is saved into base64 and is sent using $.post()
php file in the $.post() runs and saves the file to a file in the server
Is there any way from preventing the user to be able to submit their own images, I already check image dimensions and so on but they'd just resize it and submit it. (I don't think php's ability to draw image rects would work due to my small servers)
Is there any way from preventing the user to be able to submit their own images
Nope.
Anyone can upload whatever they want by completely bypassing your client-side code. There's nothing you can do about this, outside of hacky heuristics. (Have they been on the page awhile? Were mouse movements or screen touches detected? Did they actually draw something?) These sorts of things can also be faked, it's just a hair more hassle to hack around. Don't bother with this, you'll create more problems for yourself than you will solve.
If, as I understand it, your app is an simple drawing tool, then one easy way would be to only send a JSON containing all the user gestures, instead of saving the image per se.
This way, even if one could still bypass your app, and produce images out of you control (i.e programmatically), they wouldn't be able to save e.g pr0n images on your server.
This would also allow you to perform a sanity check of the data structure server side, before saving it, and to implement an cancel feature in your app.
One counter-side effect however, would be a consequent increase of data to save on large drawings (but a decrease on smaller ones).
const ctx = canvas.getContext('2d');
let drawing = false,
rect = canvas.getBoundingClientRect(),
paths = [];
let savedData = '[]';
save_btn.onclick = _ => {
savedData = JSON.stringify(paths);
console.clear();
console.log(savedData);
// here send this JSON data to the server
};
load_btn.onclick = _ => {
// easy to grab from server too
paths = JSON.parse(savedData);
draw();
};
function draw() {
ctx.clearRect(0, 0, canvas.width, canvas.height);
// at each draw, we loop over all our paths
paths.forEach(p => {
ctx.lineWidth = p.strokeWidth;
ctx.strokeStyle = p.color;
ctx.beginPath();
const l = p.list;
ctx.moveTo(l[0], l[1]);
for (let i = 2; i < l.length; i += 2) {
ctx.lineTo(l[i], l[i + 1]);
}
ctx.stroke();
});
}
// returns a new path object
function makePath() {
return {
color: randCol(),
list: [],
strokeWidth: (Math.random() * 10) + 1
};
}
canvas.onmouseup = canvas.onmouseleave = e => {
drawing = false;
};
canvas.onmousedown = e => {
paths.push(makePath());
drawing = true;
}
canvas.onmousemove = throttle(e => {
if (!drawing) return;
// to minimize the size of our JSON data
// we fix the coordinates to precision(2)
let x = ~~((e.clientX - rect.left) *10)/10;
let y = ~~((e.clientY - rect.top) *10)/10;
paths[paths.length - 1].list.push(x, y);
draw();
});
window.onresize = window.onscroll = throttle(e => rect = canvas.getBoundingClientRect());
function throttle(callback) {
let active = false;
let evt;
const handler = function() {
active = false;
callback(evt);
}
return function handleEvent(e) {
evt = e;
if (!active) {
active = true;
requestAnimationFrame(handler);
};
};
}
function randCol() {
const letters = '0123456789ABCDEF'.split('');
let color = '#';
for (let i = 0; i < 6; i++) {
color += letters[Math.round(Math.random() * 15)];
}
return color;
}
canvas{
border: 1px solid;
}
<button id="save_btn">save</button>
<button id="load_btn">load last saved</button><br>
<canvas id="canvas"></canvas>
You can check image data such as the image height ,width, possibly even image size and color depending on what is rendered in the canvas. For an example let's say the canvas is 250 x 250 pixels and it renders 2D squares only using the colors blue, red, and green. If there are more than three colors, if the colors are not only blue, red, and green, or if the canvas is not 250 x 250 you deny it. You could also check the "referrer" value in the user agent, this can be easily changed though.