i have written this function to capture each frame for the GIF but the output is very laggy and crashes when the data increases. Any suggestions ?
Code :
function createGifFromPng(list, framerate, fileName, gifScale) {
gifshot.createGIF({
'images': list,
'gifWidth': wWidth * gifScale,
'gifHeight': wHeight * gifScale,
'interval': 1 / framerate,
}, function(obj) {
if (!obj.error) {
var image = obj.image;
var a = document.createElement('a');
document.body.append(a);
a.download = fileName;
a.href = image;
a.click();
a.remove();
}
});
}
/////////////////////////////////////////////////////////////////////////
function getGifFromCanvas(renderer, sprite, fileName, gifScale, framesCount, framerate) {
var listImgs = [];
var saving = false;
var interval = setInterval(function() {
renderer.extract.canvas(sprite).toBlob(function(b) {
if (listImgs.length >= framesCount) {
clearInterval(interval);
if (!saving) {
createGifFromPng(listImgs, framerate, fileName,gifScale);
saving = true;
}
}
else {
listImgs.push(URL.createObjectURL(b));
}
}, 'image/gif');
}, 1000 / framerate);
}
In modern browsers you can use a conjunction of the MediaRecorder API and the HTMLCanvasElement.captureStream method.
The MediaRecorder API will be able to encode a MediaStream in a video or audio media file on the fly, resulting in far less memory needed than when you grab still images.
const ctx = canvas.getContext('2d');
var x = 0;
anim();
startRecording();
function startRecording() {
const chunks = []; // here we will store our recorded media chunks (Blobs)
const stream = canvas.captureStream(); // grab our canvas MediaStream
const rec = new MediaRecorder(stream); // init the recorder
// every time the recorder has new data, we will store it in our array
rec.ondataavailable = e => chunks.push(e.data);
// only when the recorder stops, we construct a complete Blob from all the chunks
rec.onstop = e => exportVid(new Blob(chunks, {type: 'video/webm'}));
rec.start();
setTimeout(()=>rec.stop(), 3000); // stop recording in 3s
}
function exportVid(blob) {
const vid = document.createElement('video');
vid.src = URL.createObjectURL(blob);
vid.controls = true;
document.body.appendChild(vid);
const a = document.createElement('a');
a.download = 'myvid.webm';
a.href = vid.src;
a.textContent = 'download the video';
document.body.appendChild(a);
}
function anim(){
x = (x + 1) % canvas.width;
ctx.fillStyle = 'white';
ctx.fillRect(0,0,canvas.width,canvas.height);
ctx.fillStyle = 'black';
ctx.fillRect(x - 20, 0, 40, 40);
requestAnimationFrame(anim);
}
<canvas id="canvas"></canvas>
You can also use https://github.com/spite/ccapture.js/ to capture to gif or video.
Related
What I am trying to achieve is to have "stream" of blobs from my socket and append it to a video.
Currently, I have something like
const socket = io();
const videoGrid = document.getElementById("video-grid");
const video = document.createElement("video");
video.muted = true;
videoGrid.append(video);
let blobArray = [];
socket.on("view-stream-10", (data) => {
blobArray.push(
new Blob([new Uint8Array(data)], {
type: "video/x-matroska;codecs=avc1,opus",
})
);
let currentTime = video.currentTime;
let blob = new Blob(blobArray, { type: "video/x-matroska;codecs=avc1,opus" });
video.src = window.URL.createObjectURL(blob);
video.currentTime = currentTime;
video.play();
});
It works but there are some problems that video stops for a X ms at the point where new blob beeing created and url is changed and it's beeing so visible.
Is there any better way?
I have an webpage where the user will drop a video file, and the page will upload the video, and generate a thumbnail based on a timestamp that the user provide.
For the moment, I am just trying to generate the thumbnail based on the FIRST frame of the video.
here is a quick exemple on my current progress :
(please use chrome as firefox will complain about the https link, and also, sorry if it autodownload an image)
https://stackblitz.com/edit/rxjs-qc8iag
import { Observable, throwError } from 'rxjs'
const VIDEO = {
imageFromFrame(videoSrc: any): Observable<any> {
return new Observable<any>((obs) => {
const canvas = document.createElement('canvas')
const video = document.createElement('video')
const context = canvas.getContext('2d')
const source = document.createElement('source');
source.setAttribute('src', videoSrc);
video.appendChild(source);
document.body.appendChild(canvas)
document.body.appendChild(video)
if (!context) {
throwError(`Couldn't retrieve context 2d`)
obs.complete()
return
}
video.load()
video.addEventListener('loadedmetadata', function () {
console.log('loadedmetadata')
// Set canvas dimensions same as video dimensions
canvas.width = video.videoWidth
canvas.height = video.videoHeight
})
video.addEventListener('canplay', function () {
console.log('canplay')
canvas.style.display = 'inline'
context.drawImage(video, 0, 0, video.videoWidth, video.videoHeight)
// Convert canvas image to Base64
const img = canvas.toDataURL("image/png")
// Convert Base64 image to binary
obs.next(VIDEO.dataURItoBlob(img))
obs.complete()
})
})
},
dataURItoBlob(dataURI: string): Blob {
// convert base64/URLEncoded data component to raw binary data held in a string
var byteString
if (dataURI.split(',')[0].indexOf('base64') >= 0) byteString = atob(dataURI.split(',')[1])
else byteString = unescape(dataURI.split(',')[1])
// separate out the mime component
var mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0]
// write the bytes of the string to a typed array
var ia = new Uint8Array(byteString.length)
for (var i = 0; i < byteString.length; i++) {
ia[i] = byteString.charCodeAt(i)
}
return new Blob([ia], { type: mimeString })
},
}
VIDEO.imageFromFrame('https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4?_=1').subscribe((r) => {
var a = document.createElement('a')
document.body.appendChild(a)
const url = window.URL.createObjectURL(r)
a.href = url
a.download = 'sdf'
a.click()
window.URL.revokeObjectURL(url)
})
The problem is, the image it download is empty, and do not represent the first frame of the video. but the video should have been loaded. and drawed in the canvas.
I am trying to solve it, but if someone could help me found out the issue, thanks.
for anyone looking, I made this which work correctly (need improvement but the idea is there). I use observable cause the flow of my app use observable, but you can change to promise or whatever =>
imageFromFrame(
videoFile: File,
options: { frameTimeInSeconds: number; filename?: string; fileType?: string } = {
frameTimeInSeconds: 0.1,
}
): Observable<File> {
return new Observable<any>((obs) => {
const canvas = document.createElement('canvas')
const video = document.createElement('video')
const source = document.createElement('source')
const context = canvas.getContext('2d')
const urlRef = URL.createObjectURL(videoFile)
video.style.display = 'none'
canvas.style.display = 'none'
source.setAttribute('src', urlRef)
video.setAttribute('crossorigin', 'anonymous')
video.appendChild(source)
document.body.appendChild(canvas)
document.body.appendChild(video)
if (!context) {
throwError(`Couldn't retrieve context 2d`)
obs.complete()
return
}
video.currentTime = options.frameTimeInSeconds
video.load()
video.addEventListener('loadedmetadata', function () {
canvas.width = video.videoWidth
canvas.height = video.videoHeight
})
video.addEventListener('loadeddata', function () {
context.drawImage(video, 0, 0, video.videoWidth, video.videoHeight)
canvas.toBlob((blob) => {
if (!blob) {
return
}
obs.next(
new File([blob], options.filename || FILES.getName(videoFile.name), {
type: options.fileType || 'image/png',
})
)
obs.complete()
URL.revokeObjectURL(urlRef)
video.remove()
canvas.remove()
}, options.fileType || 'image/png')
})
})
},
I'm trying to capture webcam video in the client side and sent frames to the server to process it. I'm newbie in JS and I'm having some problems.
I tried to use OpenCV.js to get the data but I didn't understand how to get it, in Python we can make
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
and the frame is an 2d-array with the image, but how can I get each frame (as 2d array) to send using OpenCV.js?
I have this code on the client side:
<script type="text/javascript">
function onOpenCvReady() {
cv['onRuntimeInitialized'] = () => {
var socket = io('http://localhost:5000');
socket.on('connect', function () {
console.log("Connected...!", socket.connected)
});
const video = document.querySelector("#videoElement");
video.width = 500;
video.height = 400;
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
video.srcObject = stream;
video.play();
})
.catch(function (err0r) {
console.log(err0r)
console.log("Something went wrong!");
});
}
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let cap = new cv.VideoCapture(video);
const FPS = 15;
function processVideo() {
let begin = Date.now();
cap.read(src);
handle_socket(src['data']);
// schedule next one.
let delay = 1000 / FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
}
// schedule first one.
setTimeout(processVideo, 0);
function handle_socket(src) {
socket.emit('event', { info: 'I\'m connected!', data: src });
}
}
}
</script>
My solution was:
// Select HTML video element where the webcam data is
const video = document.querySelector("#videoElement");
// returns a frame encoded in base64
const getFrame = () => {
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
const data = canvas.toDataURL('image/jpeg');
return data;
}
// Send data over socket
function handle_socket(data, dst) {
socket.emit('event', data, function (res) {
if (res !== 0) {
results = res;
}
});
}
frame64 = getFrame()
handle_socket(frame64);
I want to mix two audio sources by put one song as background of another into single source.
for example, i have input :
<input id="files" type="file" name="files[]" multiple onchange="handleFilesSelect(event)"/>
And script to decode this files:
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new window.AudioContext();
var sources = [];
var files = [];
var mixed = {};
function handleFilesSelect(event){
if(event.target.files.length <= 1)
return false;
files = event.target.files;
readFiles(mixAudioSources);
}
function readFiles(index, callback){
var freader = new FileReader();
var i = index ? index : 0;
freader.onload = function (e) {
context.decodeAudioData(e.target.result, function (buf) {
sources[i] = context.createBufferSource();
sources[i].connect(context.destination);
sources[i].buffer = buf;
if(files.length > i+1){
readFiles(i + 1, callback);
} else {
if(callback){
callback();
}
}
});
};
freader.readAsArrayBuffer(files[i]);
}
function mixAudioSources(){
//So on our scenario we have here two decoded audio sources in "sources" array.
//How we can mix that "sources" into "mixed" variable by putting "sources[0]" as background of "sources[1]"
}
So how i can mix this sources into one source? For example i have two files, how i can put one source as background of another and put this mix into single source?
Another scenario: if i read input stream from microphone for example and i want to put this input on background song (some kind of karaoke) it is possible to do this work on client with html5 support? What about performance? Maybe better way to mix this audio sources on server side?
If it possible, so what the possible implementation of mixAudioSources function?
Thanks.
Two approach originally posted at Is it possible to mix multiple audio files on top of each other preferably with javascript, adjusted to process File objects at change event of <input type="file"> element.
The first approach utilizes OfflineAudioContext(), AudioContext.createBufferSource(), AudioContext.createMediaStreamDestination(), Promise constructor, Promise.all(), MediaRecorder() to mix audio tracks, then offer mixed audio file for download.
var div = document.querySelector("div");
function handleFilesSelect(input) {
div.innerHTML = "loading audio tracks.. please wait";
var files = Array.from(input.files);
var duration = 60000;
var chunks = [];
var audio = new AudioContext();
var mixedAudio = audio.createMediaStreamDestination();
var player = new Audio();
var context;
var recorder;
var description = "";
player.controls = "controls";
function get(file) {
description += file.name.replace(/\..*|\s+/g, "");
return new Promise(function(resolve, reject) {
var reader = new FileReader;
reader.readAsArrayBuffer(file);
reader.onload = function() {
resolve(reader.result)
}
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(files.map(get)).then(function(data) {
var len = Math.max.apply(Math, data.map(function(buffer) {
return buffer.byteLength
}));
context = new OfflineAudioContext(2, len, 44100);
return Promise.all(data.map(function(buffer) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var source = context.createBufferSource();
source.buffer = bufferSource;
source.connect(context.destination);
return source.start()
})
}))
.then(function() {
return context.startRendering()
})
.then(function(renderedBuffer) {
return new Promise(function(resolve) {
var mix = audio.createBufferSource();
mix.buffer = renderedBuffer;
mix.connect(audio.destination);
mix.connect(mixedAudio);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
mix.start(0);
div.innerHTML = "playing and recording tracks..";
// stop playback and recorder in 60 seconds
stopMix(duration, mix, recorder)
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
console.log("recording complete");
resolve(blob)
};
})
})
.then(function(blob) {
console.log(blob);
div.innerHTML = "mixed audio tracks ready for download..";
var audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
document.body.appendChild(a);
a.insertAdjacentHTML("afterend", "<br>");
player.src = audioDownload;
document.body.appendChild(player);
})
})
.catch(function(e) {
console.log(e)
});
}
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<input id="files"
type="file"
name="files[]"
accept="audio/*"
multiple
onchange="handleFilesSelect(this)" />
<div></div>
</body>
</html>
The second approach uses AudioContext.createChannelMerger(), AudioContext.createChannelSplitter()
var div = document.querySelector("div");
function handleFilesSelect(input) {
div.innerHTML = "loading audio tracks.. please wait";
var files = Array.from(input.files);
var chunks = [];
var channels = [
[0, 1],
[1, 0]
];
var audio = new AudioContext();
var player = new Audio();
var merger = audio.createChannelMerger(2);
var splitter = audio.createChannelSplitter(2);
var mixedAudio = audio.createMediaStreamDestination();
var duration = 60000;
var context;
var recorder;
var audioDownload;
var description = "";
player.controls = "controls";
function get(file) {
description += file.name.replace(/\..*|\s+/g, "");
console.log(description);
return new Promise(function(resolve, reject) {
var reader = new FileReader;
reader.readAsArrayBuffer(file);
reader.onload = function() {
resolve(reader.result)
}
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(files.map(get)).then(function(data) {
return Promise.all(data.map(function(buffer, index) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var channel = channels[index];
var source = audio.createBufferSource();
source.buffer = bufferSource;
source.connect(splitter);
splitter.connect(merger, channel[0], channel[1]);
return source
})
}))
.then(function(audionodes) {
merger.connect(mixedAudio);
merger.connect(audio.destination);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
audionodes.forEach(function(node, index) {
node.start(0)
});
div.innerHTML = "playing and recording tracks..";
stopMix(duration, ...audionodes, recorder);
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
player.src = audioDownload;
document.body.appendChild(a);
document.body.appendChild(player);
};
})
})
.catch(function(e) {
console.log(e)
});
}
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<input id="files"
type="file"
name="files[]"
accept="audio/*"
multiple onchange="handleFilesSelect(this)" />
<div></div>
</body>
</html>
I just want to complement the excellent answer of guest271314 and post here the solution based on answer of guest271314 for second scenario (the second source is microphone input). Actually is client karaoke. Script:
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new window.AudioContext();
var playbackTrack = null;
function handleFileSelect(event){
var file = event.files[0];
var freader = new FileReader();
freader.onload = function (e) {
context.decodeAudioData(e.target.result, function (buf) {
playbackTrack = context.createBufferSource();
playbackTrack.buffer = buf;
var karaokeButton = document.getElementById("karaoke_start");
karaokeButton.style.display = "inline-block";
karaokeButton.addEventListener("click", function(){
startKaraoke();
});
});
};
freader.readAsArrayBuffer(file);
}
function stopMix(duration, mediaRecorder) {
setTimeout(function(mediaRecorder) {
mediaRecorder.stop();
context.close();
}, duration, mediaRecorder)
}
function startKaraoke(){
navigator.mediaDevices.getUserMedia({audio: true,video: false})
.then(function(stream) {
var mixedAudio = context.createMediaStreamDestination();
var merger = context.createChannelMerger(2);
var splitter = context.createChannelSplitter(2);
var duration = 5000;
var chunks = [];
var channel1 = [0,1];
var channel2 = [1, 0];
var gainNode = context.createGain();
playbackTrack.connect(gainNode);
gainNode.connect(splitter);
gainNode.gain.value = 0.5; // From 0 to 1
splitter.connect(merger, channel1[0], channel1[1]);
var microphone = context.createMediaStreamSource(stream);
microphone.connect(splitter);
splitter.connect(merger, channel2[0], channel2[1]);
merger.connect(mixedAudio);
merger.connect(context.destination);
playbackTrack.start(0);
var mediaRecorder = new MediaRecorder(mixedAudio.stream);
mediaRecorder.start(1);
mediaRecorder.ondataavailable = function (event) {
chunks.push(event.data);
}
mediaRecorder.onstop = function(event) {
var player = new Audio();
player.controls = "controls";
var blob = new Blob(chunks, {
"type": "audio/mp3"
});
audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = "karaokefile." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
player.src = audioDownload;
document.body.appendChild(a);
document.body.appendChild(player);
};
stopMix(duration, mediaRecorder);
})
.catch(function(error) {
console.log('error: ' + error);
});
}
And Html:
<input id="file"
type="file"
name="file"
accept="audio/*"
onchange="handleFileSelect(this)" />
<span id="karaoke_start" style="display:none;background-color:yellow;cursor:pointer;">start karaoke</span>
Here the working plnkr example: plnkr
I want to combine audio clips, layered on top of each other so that they play synchronously and are saved in a new audio file. Any help would be much appreciated. I've done some digging online, but couldn't find a definitive answer as to whether or not many of the tools available as far as Javascript audio editing librarys go (Mix.js for example) are capable.
Yes, it is possible using OfflineAudioContext() or AudioContext.createChannelMerger() and creating a MediaStream. See Phonegap mixing audio files , Web Audio API.
You can use fetch() or XMLHttpRequest() to retrieve audio resource as an ArrayBuffer, AudioContext.decodeAudioData() to create an AudioBufferSourceNode from response; OfflineAudioContext() to render merged audio, AudioContext, AudioContext.createBufferSource(), AudioContext.createMediaStreamDestination() , MediaRecorder() to record stream; Promise.all(), Promise() constructor, .then() to process asynchronous requests to fetch(), AudioContext.decodeAudioData(), pass resulting mixed audio Blob at stop event of MediaRecorder.
Connect each AudioContext AudioBufferSourceNode to OfflineAudioContext.destination, call .start() on each node; call OfflineAudioContext.startRendering(); create new AudioContext node, connect renderedBuffer; call .createMediaStreamDestination() on AudioContext to create a MediaStream from merged audio buffers, pass .stream to MediaRecorder(), at stop event of MediaRecorder, create Blob URL of Blob of recorded audio mix with URL.createObjectURL(), which can be downloaded using <a> element with download attribute and href set to Blob URL.
var sources = ["https://upload.wikimedia.org/wikipedia/commons/b/be/"
+ "Hidden_Tribe_-_Didgeridoo_1_Live.ogg"
, "https://upload.wikimedia.org/wikipedia/commons/6/6e/"
+ "Micronesia_National_Anthem.ogg"];
var description = "HiddenTribeAnthem";
var context;
var recorder;
var div = document.querySelector("div");
var duration = 60000;
var chunks = [];
var audio = new AudioContext();
var mixedAudio = audio.createMediaStreamDestination();
var player = new Audio();
player.controls = "controls";
function get(src) {
return fetch(src)
.then(function(response) {
return response.arrayBuffer()
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(sources.map(get)).then(function(data) {
var len = Math.max.apply(Math, data.map(function(buffer) {
return buffer.byteLength
}));
context = new OfflineAudioContext(2, len, 44100);
return Promise.all(data.map(function(buffer) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var source = context.createBufferSource();
source.buffer = bufferSource;
source.connect(context.destination);
return source.start()
})
}))
.then(function() {
return context.startRendering()
})
.then(function(renderedBuffer) {
return new Promise(function(resolve) {
var mix = audio.createBufferSource();
mix.buffer = renderedBuffer;
mix.connect(audio.destination);
mix.connect(mixedAudio);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
mix.start(0);
div.innerHTML = "playing and recording tracks..";
// stop playback and recorder in 60 seconds
stopMix(duration, mix, recorder)
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
console.log("recording complete");
resolve(blob)
};
})
})
.then(function(blob) {
console.log(blob);
div.innerHTML = "mixed audio tracks ready for download..";
var audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
document.body.appendChild(a);
a.insertAdjacentHTML("afterend", "<br>");
player.src = audioDownload;
document.body.appendChild(player);
})
})
.catch(function(e) {
console.log(e)
});
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<div>loading audio tracks.. please wait</div>
</body>
</html>
You can alternatively utilize AudioContext.createChannelMerger(), AudioContext.createChannelSplitter()
var sources = ["/path/to/audoi1", "/path/to/audio2"];
var description = "mix";
var chunks = [];
var channels = [[0, 1], [1, 0]];
var audio = new AudioContext();
var player = new Audio();
var merger = audio.createChannelMerger(2);
var splitter = audio.createChannelSplitter(2);
var mixedAudio = audio.createMediaStreamDestination();
var duration = 60000;
var context;
var recorder;
var audioDownload;
player.controls = "controls";
function get(src) {
return fetch(src)
.then(function(response) {
return response.arrayBuffer()
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(sources.map(get)).then(function(data) {
return Promise.all(data.map(function(buffer, index) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var channel = channels[index];
var source = audio.createBufferSource();
source.buffer = bufferSource;
source.connect(splitter);
splitter.connect(merger, channel[0], channel[1]);
return source
})
}))
.then(function(audionodes) {
merger.connect(mixedAudio);
merger.connect(audio.destination);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
audionodes.forEach(function(node) {
node.start(0)
});
stopMix(duration, ...audionodes, recorder);
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
player.src = audioDownload;
document.body.appendChild(a);
document.body.appendChild(player);
};
})
})
.catch(function(e) {
console.log(e)
});