Get video duration when input a video file - javascript

I'm doing a project with HTML and Javascript that will run local with local files.
I need to select a file by input, get its information and then decide if I'll add to my list and reproduce or not. And if Ii decide to use it I'll have to put it on a queue to use later. Otherwise I'll just discard and select another file.
The problem that I'm facing is that I can't find a way to get the video duration just by selecting it on input.
I've searched a lot and I didn't find any method to get the duration. In this code below I tried to use 'file.duration' but it didn't work, it just returns 'undefined'.
This is my input, normal as you can see.
<div id="input-upload-file" class="box-shadow">
<span>upload! (ღ˘⌣˘ღ)</span> <!--ignore the text face lol -->
<input type="file" class="upload" id="fileUp" name="fileUpload" onchange="setFileInfo()">
</div>
And this is the function that I'm using to get all the information.
function setFileInfo(){
showInfo(); //change div content
var file = document.getElementById("fileUp").files[0];
var pid = 1;
var Pname = file.name;
Pname = Pname.slice(0, Pname.indexOf(".")); //get filename without extension
var Ptype = file.type;
var Psize = bytesToSize(file.size); //turns into KB,MB, etc...
var Pprior = setPriority(Ptype); //returns 1, 2 or 3
var Pdur = file.duration;
var Pmem = getMemory(Psize); //returns size * (100 || 10 || 1)
var Pown = 'user';
/* a lot of stuff throwing this info to the HTML */
console.log(Pdur);
}
Is there way to do this? If not, what are the alternatives that can help me?

In modern browsers, You can use the URL API's URL.createObjectURL() with an non appended video element to load the content of your file.
var myVideos = [];
window.URL = window.URL || window.webkitURL;
document.getElementById('fileUp').onchange = setFileInfo;
function setFileInfo() {
var files = this.files;
myVideos.push(files[0]);
var video = document.createElement('video');
video.preload = 'metadata';
video.onloadedmetadata = function() {
window.URL.revokeObjectURL(video.src);
var duration = video.duration;
myVideos[myVideos.length - 1].duration = duration;
updateInfos();
}
video.src = URL.createObjectURL(files[0]);;
}
function updateInfos() {
var infos = document.getElementById('infos');
infos.textContent = "";
for (var i = 0; i < myVideos.length; i++) {
infos.textContent += myVideos[i].name + " duration: " + myVideos[i].duration + '\n';
}
}
<div id="input-upload-file" class="box-shadow">
<span>upload! (ღ˘⌣˘ღ)</span>
<input type="file" class="upload" id="fileUp" name="fileUpload">
</div>
<pre id="infos"></pre>

I needed to validate a single file before continuing to execute more code, here is my method with the help of Kaiido's answer!
onchange event when a user uploads a file:
$("input[type=file]").on("change", function(e) {
var file = this.files[0]; // Get uploaded file
validateFile(file) // Validate Duration
e.target.value = ''; // Clear value to allow new uploads
})
Now validate duration:
function validateFile(file) {
var video = document.createElement('video');
video.preload = 'metadata';
video.onloadedmetadata = function() {
window.URL.revokeObjectURL(video.src);
if (video.duration < 1) {
console.log("Invalid Video! video is less than 1 second");
return;
}
methodToCallIfValid();
}
video.src = URL.createObjectURL(file);
}

Here is async/await Promise version:
const loadVideo = file => new Promise((resolve, reject) => {
try {
let video = document.createElement('video')
video.preload = 'metadata'
video.onloadedmetadata = function () {
resolve(this)
}
video.onerror = function () {
reject("Invalid video. Please select a video file.")
}
video.src = window.URL.createObjectURL(file)
} catch (e) {
reject(e)
}
})
Can be used as follows:
const video = await loadVideo(e.currentTarget.files[0])
console.log(video.duration)

It's simple to get video duration from FileReader and it's easy to manage in async/await.
const getVideoDuration = file =>
new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => {
const media = new Audio(reader.result);
media.onloadedmetadata = () => resolve(media.duration);
};
reader.readAsDataURL(file);
reader.onerror = error => reject(error);
});
const duration = await getVideoDuraion(file);
where file is File object
Live Example
const getVideoDuration = (file) =>
new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => {
const media = new Audio(reader.result);
media.onloadedmetadata = () => resolve(media.duration);
};
reader.readAsDataURL(file);
reader.onerror = (error) => reject(error);
});
const handleChange = async (e) => {
const duration = await getVideoDuration(e.target.files[0]);
document.querySelector("#duration").innerText = `Duration: ${duration}`;
};
<div>
<input type="file" onchange="handleChange(event)" />
<p id="duration">Duration: </p>
</div>

This is how I managed to get video duration before pushing it on S3..
I am using this code to upload video files of 4+ GB.
Note - for formats like .avi,.flv, .vob, .mpeg etc, duration will not be found, so handle it with a simple message to the user
//same method can be used for images/audio too, making some little changes
getVideoDuration = async (f) => {
const fileCallbackToPromise = (fileObj) => {
return Promise.race([
new Promise((resolve) => {
if (fileObj instanceof HTMLImageElement) fileObj.onload = resolve;
else fileObj.onloadedmetadata = resolve;
}),
new Promise((_, reject) => {
setTimeout(reject, 1000);
}),
]);
};
const objectUrl = URL.createObjectURL(f);
// const isVideo = type.startsWith('video/');
const video = document.createElement("video");
video.src = objectUrl;
await fileCallbackToPromise(video);
return {
duration: video.duration,
width: video.videoWidth,
height: video.videoHeight,
};
}
//call the function
//removing unwanted code for clarity
const meta = await this.getVideoDuration(file);
//meta.width, meta.height, meta.duration is ready for you to use

I've implemented getting video duration in nano-metadata package https://github.com/kalashnikovisme/nano-metadata.
You can do something like this
import nanoMetadata from 'nano-metadata'
const change = (e) => {
const file = e.target.files[0]
nanoMetadata.video.duration(file).then((duration) => {
console.log(duration) // will show you video duration in seconds
})
}

Related

Selecting Multiple Images and storing them in a data array (FIGMA)

I am building a plugin for FIGMA, where the user selects multiple images, which I then save in an array, that I send to be interpreted.
I have 2 issues with my code
img.src = bin; does not trigger img.onload, but if I set img.src = "literal string", the onload method workds.
the imageData array sent at the end is undefined, I assume because of my bad understanding of async functions.
I would appreciate your help figuring this out. Thank you
P.S. this is pure javascript, and you don't need to know FIGMA to follow the code.
<input type="file" id="images" accept="image/png" multiple />
<button id="button">Create image</button>
<script>
const button = document.getElementById('button');
button.onclick = () => {
const files = document.getElementById('images').files;
function readFile(index) {
console.log(1);
if (index >= files.length) {return};
console.log(2);
const file = files[index];
const imageCaption = file.name;
var reader = new FileReader();
reader.readAsArrayBuffer(file);
reader.onload = function (e) {
console.log(4);
// get file content
const bin = e.target.result;
const imageBytes = new Uint8Array(bin);
//Get Image width and height
var img = new Image();
img.src = bin;
img.onload = function () {
console.log(6);
const width = img.width;
const height = img.height;
console.log("imageCaption: " + imageCaption);
console.log("width: " + width);
console.log("height: " + height);
console.log("imageBytes: " + imageBytes);
var data = {imageBytes, imageCaption, width, height};
//Read Next File
nextData = readFile(index + 1);
if( nextData ) {
data.concat(nextData)
}
return data;
}
}
}
//Call function to Read and Send Images
const imageData = readFile(0);
//Send Data
parent.postMessage({
pluginMessage: {
type: 'send-image',
imageData,
}
}, '*');
A friend ended up helping me with it. Thank you Hiba!
const button = document.getElementById('button');
const input = document.getElementById('input');
button.addEventListener('click', async () => {
const files = input.files ? [...input.files] : [];
const data = await Promise.all(
files.map(async (file) => await getImageData(file))
);
console.log(data);
});
async function getImageData(file) {
// get binary data from file
const bin = await file.arrayBuffer();
// translate bin data into bytes
const imageBytes = new Uint8Array(bin)
// create data blob from bytes
const blob = new Blob([imageBytes], { type: "image/png" });
// create html image element and assign blob as src
const img = new Image();
img.src = URL.createObjectURL(blob);
// get width and height from rendered image
await img.decode();
const { width, height } = img;
const data = { image: blob, caption: file.name, width, height };
return data;
}

high quality media recorder from canvas 30 fps at 1080p

I have a canvas app that currently captures images of the canvas and compiles a video that is sent to ffmpeg which then outputs the video format of their choice. The problem is its super slow! Not on the video conversion but on the compiling of the actual frames, you see I have to pause the video and the animation and take a screenshot of the canvas. So rather than taking screenshots I was thinking about using MediaRecorder and canvas.captureStream. I am able to get video output but the quality is really low and the video keeps droping frames. I need to have the frame rate be at least 30 fps or higher and the quality be high. Heres my record function
async [RECORD] ({state}) {
state.videoOutputURL = null;
state.outputVideo = document.createElement("video");
const videoStream = state.canvas.captureStream(30);
const mediaRecorder = new MediaRecorder(videoStream);
mediaRecorder.ondataavailable = function(e) {
state.captures.push(e.data);
};
mediaRecorder.onstop = function(e) {
const blob = new Blob(state.captures);
state.captures = [];
const videoURL = URL.createObjectURL(blob);
state.outputVideo.src = videoURL;
state.outputVideo.width = 1280;
state.outputVideo.height = 720;
document.body.append(state.outputVideo);
};
mediaRecorder.start();
state.anim.start();
state.video.play();
lottie.play();
state.video.addEventListener("ended", async () => {
mediaRecorder.stop();
});
}
The best way I found to do this was to actually pause the video on a canvas and use canvas.toDataURL to take screenshots. I compile the screenshots into a video with a library called Whammy and send that over to FFmpeg to rip the final content. The following code should give a pretty good idea
async [TAKE_SCREENSHOT]({ state, dispatch }) {
let seekResolve;
if (!state.ended && state.video) {
state.video.addEventListener("seeked", async () => {
if (seekResolve) seekResolve();
});
await new Promise(async (resolve, reject) => {
if (state.animations.length) {
dispatch(PAUSE_LOTTIES);
}
dispatch(PAUSE_VIDEO);
await new Promise(r => (seekResolve = r));
if (state.layer) {
state.layer.draw();
}
if (state.canvas) {
state.captures.push(state.canvas.toDataURL("image/webp"));
}
resolve();
dispatch(TAKE_SCREENSHOT);
});
}
},
async [PAUSE_VIDEO]({ state, dispatch, commit }) {
state.video.pause();
const oneFrame = 1 / 30;
if (state.video.currentTime + oneFrame < state.video.duration) {
state.video.currentTime += oneFrame;
const percent = `${Math.round(
(state.video.currentTime / state.video.duration) * 100
)}%`;
commit(SET_MODAL_STATUS, percent);
} else {
commit(SET_MODAL_STATUS, "Uploading your video");
state.video.play();
state.ended = true;
await dispatch(GENERATE_VIDEO);
}
},
async [PAUSE_LOTTIES]({ state }) {
for (let i = 0; i < state.animations.length; i++) {
let step = 0;
let animation = state.animations[i].lottie;
if (animation.currentFrame <= animation.totalFrames) {
step = animation.currentFrame + 1;
}
await lottie.goToAndStop(step, true, animation.name);
}
},
async [GENERATE_VIDEO]({ state, rootState, dispatch, commit }) {
let status;
state.editingZoom = null;
const username =
rootState.user.currentUser.username;
const email = rootState.user.currentUser.email || rootState.user.guestEmail;
const name = rootState.user.currentUser.firstName || "guest";
const s3Id = rootState.templates.currentVideo.stock_s3_id || state.s3Id;
const type = rootState.dataClay.fileFormat || state.type;
const vid = new Whammy.fromImageArray(state.captures, 30);
vid.lastModifiedDate = new Date();
vid.name = "canvasVideo.webm";
const data = new FormData();
const id = `${username}_${new Date().getTime()}`;
data.append("id", id);
data.append("upload", vid);
let projectId,
fileName,
matrix = null;
if (!state.editorMode) {
projectId = await dispatch(INSERT_PROJECT);
fileName = `${rootState.dataClay.projectName}.${type}`;
matrix = rootState.dataClay.matrix[0];
} else {
matrix = rootState.canvasSidebarMenu.selectedDisplay;
projectId = id;
fileName = `${id}.${type}`;
}
if (projectId || state.editorMode) {
await dispatch(UPLOAD_TEMP_FILE, data);
const key = await dispatch(CONVERT_FILE_TYPE, {
id,
username,
type,
projectId,
matrix,
name,
email,
editorMode: state.editorMode
});
const role = rootState.user.currentUser.role;
state.file = `/api/files/${key}`;
let message;
let title = "Your video is ready";
status = "rendered";
if (!key) {
status = "failed";
message =
"<p class='error'>Error processing video! If error continues please contact Creative Group. We are sorry for any inconvenience.</p>";
title = "Error!";
} else if (!rootState.user.currentUser.id) {
message = `<p>Your video is ready. Signup for more great content!</p> <a href="${
state.file
}" download="${fileName}" class="btn btn-primary btn-block">Download</a>`;
} else if (role != "banner") {
message = `<p>Your video is ready.</p> <a href="${
state.file
}" download="${fileName}" class="btn btn-primary btn-block">Download</a>`;
} else {
message = `<p>Your video is ready. You may download your file from your banner account</p>`;
await dispatch(EXPORT_TO_BANNER, {
s3Id,
fileUrl: key,
extension: `.${type}`,
resolution: matrix
});
}
if (state.editorMode) {
await dispatch(SAVE_CANVAS, { status, fileId: projectId });
}
state.video.loop = "loop";
state.anim.stop();
state.video.pause();
lottie.unfreeze();
await dispatch(DELETE_PROJECT_IN_PROGRESS);
commit(RESET_PROJECT_IN_PROGRESS);
commit(RESET_CANVAS);
if (rootState.user.currentUser.id) {
router.push("/account/projects");
} else {
router.push("/pricing");
}
dispatch(SHOW_MODAL, {
name: "message",
title,
message
});
} else {
await dispatch(FETCH_ALL_PUBLISHED_TEMPLATES);
await dispatch(DELETE_PROJECT_IN_PROGRESS);
commit(RESET_PROJECT_IN_PROGRESS);
commit(RESET_CANVAS);
}
},

How to use "segments" mode at SourceBuffer of MediaSource to render same result at Chomium, Chorme and Firefox?

Reference to my original question: How to use Blob URL, MediaSource or other methods to play concatenated Blobs of media fragments?
In lieu of the potential for deprecation of the "sequence" mode for multiple tracks, which the current code is using for both Chromium and Firefox browsers my additional questions are:
Which adjustments need to be made in my MediaSource code to render the same result using both Chromium which Firefox browsers - currently renders as expected using "segments" .mode?
Or, is there a bug in the implementation of multitrack support using Chromium browsers when SourceBuffer .mode is set to "segments"?
Background information
I have been able to record discrete media fragments using MediaRecorder, adding cues to the resulting webm file using ts-ebml and recording the discrete media fragments as a single media file using MediaSource with .mode of SourceBuffer set to "sequence" using both Chromium and Firefox browsers.
The Chromium issue at Monitor and potentially deprecate support for multitrack SourceBuffer support of 'sequence' AppendMode discusses "sequence" mode is being considered for deprecation for multitrack SourceBuffer objects. When asked in the original references question regarding how to implement the code using "segments" .mode (default AppendMode of SourceBuffer) the response was essentially that "segments" mode also supports multitrack input at SourceBuffer.
However, when trying code with .mode of SourceBuffer set to "segments" Chromium 60 only plays approximately one second, the first buffer of multiple appended buffers, of an expected ten second playback of recorded media fragments having cues set at webm file which is converted to ArrayBuffer and passed to .appendBuffer(), while Firefox renders same result when .mode is set to either "sequence" and "segments".
Code which renders expected result at both Chromium and Firefox. Note, Firefox does not play .mp4 at <video> element if multipleUrls is tried, though Firefox does support playing .mp4 at MediaSource when proper media codec is set.
<!DOCTYPE html>
<html>
<!-- recordMediaFragments.js demo https://github.com/guest271314/recordMediaFragments/tree/master/demos 2017 guest271314 -->
<head>
<!-- https://github.com/guest271314/recordMediaFragments/ts-ebml -->
</head>
<body>
<video width="320" height="280" controls="true"></video>
<script>
(async() => {
let request = await fetch("https://raw.githubusercontent.com/guest271314/recordMediaFragments/master/ts-ebml/ts-ebml-min.js");
let blob = await request.blob();
const script = document.createElement("script");
document.head.appendChild(script);
script.src = URL.createObjectURL(blob);
script.onload = () => {
const tsebml = require("ts-ebml");
const video = document.querySelector("video");
const videoStream = document.createElement("video");
// `MediaSource`
const mediaSource = new MediaSource();
// for firefox
// see https://bugzilla.mozilla.org/show_bug.cgi?id=1259788
const hasCaptureStream = HTMLMediaElement.prototype.hasOwnProperty("captureStream");
// handle firefox and chromium
const captureStream = mediaElement =>
!!mediaElement.mozCaptureStream
? mediaElement.mozCaptureStream()
: mediaElement.captureStream();
let currentFragmentURL, currentBlobURL, fragments;
videoStream.width = video.width;
videoStream.height = video.height;
const mimeCodec = "video/webm;codecs=vp8,opus";
// set to `.currentTime` of `videoStream` at `pause`
// to set next media fragment starting `.currentTime`
// if URL to be set at `.src` has same origin and pathname
let cursor = 0;
// https://gist.github.com/jsturgis/3b19447b304616f18657
// https://www.w3.org/2010/05/video/mediaevents.html
const multipleUrls = [
"https://media.w3.org/2010/05/sintel/trailer.mp4#t=0,5",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=55,60",
"https://raw.githubusercontent.com/w3c/web-platform-tests/master/media-source/mp4/test.mp4#t=0,5",
"https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerBlazes.mp4#t=0,5",
"https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerJoyrides.mp4#t=0,5",
"https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerMeltdowns.mp4#t=0,6",
"https://media.w3.org/2010/05/video/movie_300.mp4#t=30,36"
];
const singleUrl = [
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=0,1",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=1,2",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=2,3",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=3,4",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=4,5",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=5,6",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=6,7",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=7,8",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=8,9",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=9,10"
];
const geckoUrl = [
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=10,11",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=11,12",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=12,13",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=13,14",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=14,15",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=15,16",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=16,17",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=17,18",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=18,19",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=19,20"
];
const mediaFragmentRecorder = async(urls) => {
// `ts-ebml`
const tsebmlTools = async() => ({
decoder: new tsebml.Decoder(),
encoder: new tsebml.Encoder(),
reader: new tsebml.Reader(),
tools: tsebml.tools
});
// create `ArrayBuffer` from `Blob`
const readAsArrayBuffer = (blob) => {
return new Promise((resolve, reject) => {
const fr = new FileReader();
fr.readAsArrayBuffer(blob);
fr.onloadend = () => {
resolve(fr.result);
};
fr.onerror = (ev) => {
reject(ev.error);
};
});
}
// `urls`: string or array of URLs
// record each media fragment
const recordMediaFragments = async(video, mimeCodec, decoder, encoder, reader, tools, ...urls) => {
urls = [].concat(...urls);
const media = [];
for (let url of urls) {
await new Promise(async(resolve) => {
let mediaStream, recorder;
videoStream.onprogress = e => {
videoStream.onprogress = null;
console.log("loading " + url)
}
videoStream.oncanplay = async(e) => {
videoStream.oncanplay = null;
videoStream.play();
mediaStream = captureStream(videoStream);
console.log(mediaStream);
recorder = new MediaRecorder(mediaStream, {
mimeType: mimeCodec
});
recorder.ondataavailable = async(e) => {
// set metadata of recorded media fragment `Blob`
const mediaBlob = await setMediaMetadata(e.data);
// create `ArrayBuffer` of `Blob` of recorded media fragment
const mediaBuffer = await readAsArrayBuffer(mediaBlob);
const mediaDuration = videoStream.played.end(0) - videoStream.played.start(0);
const mediaFragmentId = currentFragmentURL || new URL(url);
const mediaFileName = mediaFragmentId.pathname.split("/").pop() + mediaFragmentId.hash;
const mediaFragmentType = "singleMediaFragment";
if (currentBlobURL) {
URL.revokeObjectURL(currentBlobURL);
}
media.push({
mediaBlob,
mediaBuffer,
mediaDuration,
mediaFragmentType,
mediaFileName
});
resolve();
}
recorder.start();
}
videoStream.onpause = e => {
videoStream.onpause = null;
cursor = videoStream.currentTime;
recorder.stop();
// stop `MediaStreamTrack`s
for (let track of mediaStream.getTracks()) {
track.stop();
}
}
currentFragmentURL = new URL(url);
// for firefox to load cross origin media without silence
if (!hasCaptureStream) {
console.log(currentFragmentURL);
request = new Request(currentFragmentURL.href);
blob = await fetch(request).then(response => response.blob());
console.log(blob);
currentBlobURL = URL.createObjectURL(blob);
// set next media fragment URL to `.currentTime` at `pause` event
// of previous media fragment if `url` has same `origin` and `pathname`
if (urls.indexOf(currentFragmentURL.href) > 0
&& new URL(urls[urls.indexOf(currentFragmentURL.href) - 1]).origin === currentFragmentURL.origin
&& new URL(urls[urls.indexOf(currentFragmentURL.href) - 1]).pathname === currentFragmentURL.pathname) {
if (cursor > 0) {
url = url = currentBlobURL + currentFragmentURL.hash.replace(/=\d+/, "=" + cursor);
console.log(url)
}
} else {
url = currentBlobURL + currentFragmentURL.hash;
}
} else {
if (cursor > 0
&& new URL(urls[urls.indexOf(url) - 1]).origin === currentFragmentURL.origin
&& new URL(urls[urls.indexOf(currentFragmentURL.href) - 1]).pathname === currentFragmentURL.pathname) {
url = url.replace(/=\d+/, "=" + cursor);
console.log(url)
}
}
videoStream.src = url;
}).catch(err => err)
}
return media
}
// set metadata of media `Blob`
// see https://github.com/legokichi/ts-ebml/issues/14#issuecomment-325200151
const setMediaMetadata = async(blob) =>
tsebmlTools()
.then(async({
decoder,
encoder,
tools,
reader
}) => {
let webM = new Blob([], {
type: "video/webm"
});
webM = new Blob([webM, blob], {
type: blob.type
});
const buf = await readAsArrayBuffer(blob);
const elms = decoder.decode(buf);
elms.forEach((elm) => {
reader.read(elm);
});
reader.stop();
const refinedMetadataBuf = tools.makeMetadataSeekable(reader.metadatas, reader.duration, reader.cues);
const webMBuf = await readAsArrayBuffer(webM);
const body = webMBuf.slice(reader.metadataSize);
const refinedWebM = new Blob([refinedMetadataBuf, body], {
type: webM.type
});
// close Blobs
if (webM.close && blob.close) {
webM.close();
blob.close();
}
return refinedWebM;
})
.catch(err => console.error(err));
let mediaTools = await tsebmlTools();
const {
decoder,
encoder,
reader,
tools
} = mediaTools;
const mediaFragments = await recordMediaFragments(video, mimeCodec, decoder, encoder, reader, tools, urls);
const recordedMedia = await new Promise((resolveAllMedia, rejectAllMedia) => {
console.log(decoder, encoder, tools, reader, mediaFragments);
let mediaStream, recorder;
mediaSource.onsourceended = e => {
console.log(video.buffered.start(0), video.buffered.end(0));
video.currentTime = video.buffered.start(0);
console.log(video.paused, video.readyState);
video.ontimeupdate = e => {
console.log(video.currentTime, mediaSource.duration);
if (video.currentTime >= mediaSource.duration) {
video.ontimeupdate = null;
video.oncanplay = null;
video.onwaiting = null;
if (recorder.state === "recording") {
recorder.stop();
}
console.log(e, recorder);
}
}
}
video.onended = (e) => {
video.onended = null;
console.log(e, video.currentTime,
mediaSource.duration);
}
video.oncanplay = e => {
console.log(e, video.duration, video.buffered.end(0));
video.play()
}
video.onwaiting = e => {
console.log(e, video.currentTime);
}
// record `MediaSource` playback of recorded media fragments
video.onplaying = async(e) => {
console.log(e);
video.onplaying = null;
mediaStream = captureStream(video);
if (!hasCaptureStream) {
videoStream.srcObject = mediaStream;
videoStream.play();
}
recorder = new MediaRecorder(mediaStream, {
mimeType: mimeCodec
});
console.log(recorder);
recorder.ondataavailable = async(e) => {
console.log(e);
const mediaFragmentsRecording = {};
mediaFragmentsRecording.mediaBlob = await setMediaMetadata(e.data);
mediaFragmentsRecording.mediaBuffer = await readAsArrayBuffer(mediaFragmentsRecording.mediaBlob);
mediaFragmentsRecording.mediaFileName = urls.map(url => {
const id = new URL(url);
return id.pathname.split("/").pop() + id.hash
}).join("-");
mediaFragmentsRecording.mediaFragmentType = "multipleMediaFragments";
// `<video>` to play concatened media fragments
// recorded from playback of `MediaSource`
fragments = document.createElement("video");
fragments.id = "fragments";
fragments.width = video.width;
fragments.height = video.height;
fragments.controls = true;
fragments.onloadedmetadata = () => {
fragments.onloadedmetadata = null;
mediaFragmentsRecording.mediaDuration = fragments.duration;
URL.revokeObjectURL(currentBlobURL);
// stop `MediaStreamTrack`s
for (let track of mediaStream.getTracks()) {
track.stop();
}
resolveAllMedia([
...mediaFragments, mediaFragmentsRecording
]);
}
currentBlobURL = URL.createObjectURL(mediaFragmentsRecording.mediaBlob);
fragments.src = currentBlobURL;
document.body.appendChild(fragments);
}
recorder.start();
}
video.src = URL.createObjectURL(mediaSource);
mediaSource.addEventListener("sourceopen", sourceOpen);
async function sourceOpen(e) {
if (MediaSource.isTypeSupported(mimeCodec)) {
const sourceBuffer = mediaSource.addSourceBuffer(mimeCodec);
sourceBuffer.mode = "segments";
for (let {
mediaBuffer,
mediaDuration
} of mediaFragments) {
await new Promise((resolveUpdatedMediaSource) => {
sourceBuffer.onupdateend = async(e) => {
sourceBuffer.onupdateend = null;
console.log(e, mediaDuration, mediaSource.duration
, video.paused, video.ended, video.currentTime
, "media source playing", video.readyState);
// https://bugzilla.mozilla.org/show_bug.cgi?id=1400587
// https://bugs.chromium.org/p/chromium/issues/detail?id=766002&q=label%3AMSEptsdtsCleanup
try {
sourceBuffer.timestampOffset += mediaDuration;
resolveUpdatedMediaSource();
} catch (err) {
console.error(err);
resolveUpdatedMediaSource();
}
}
sourceBuffer.appendBuffer(mediaBuffer);
})
}
mediaSource.endOfStream()
} else {
console.warn(mimeCodec + " not supported");
}
};
})
return recordedMedia
};
mediaFragmentRecorder(geckoUrl)
.then(recordedMediaFragments => {
// do stuff with recorded media fragments
console.log(recordedMediaFragments);
const select = document.createElement("select");
for (let {
mediaFileName,
mediaBlob,
mediaFragmentType
} of Object.values(recordedMediaFragments)) {
const option = new Option(mediaFileName, URL.createObjectURL(mediaBlob));
select.appendChild(option);
}
select.onchange = () => {
document.getElementById("fragments").src = select.value;
}
video.parentNode.insertBefore(select, video);
video.controls = true;
video.currentTime = video.buffered.start(0);
})
.catch(err => console.error(err));
}
})()
</script>
</body>
</html>

Mixing two audio buffers, put one on background of another by using web Audio Api

I want to mix two audio sources by put one song as background of another into single source.
for example, i have input :
<input id="files" type="file" name="files[]" multiple onchange="handleFilesSelect(event)"/>
And script to decode this files:
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new window.AudioContext();
var sources = [];
var files = [];
var mixed = {};
function handleFilesSelect(event){
if(event.target.files.length <= 1)
return false;
files = event.target.files;
readFiles(mixAudioSources);
}
function readFiles(index, callback){
var freader = new FileReader();
var i = index ? index : 0;
freader.onload = function (e) {
context.decodeAudioData(e.target.result, function (buf) {
sources[i] = context.createBufferSource();
sources[i].connect(context.destination);
sources[i].buffer = buf;
if(files.length > i+1){
readFiles(i + 1, callback);
} else {
if(callback){
callback();
}
}
});
};
freader.readAsArrayBuffer(files[i]);
}
function mixAudioSources(){
//So on our scenario we have here two decoded audio sources in "sources" array.
//How we can mix that "sources" into "mixed" variable by putting "sources[0]" as background of "sources[1]"
}
So how i can mix this sources into one source? For example i have two files, how i can put one source as background of another and put this mix into single source?
Another scenario: if i read input stream from microphone for example and i want to put this input on background song (some kind of karaoke) it is possible to do this work on client with html5 support? What about performance? Maybe better way to mix this audio sources on server side?
If it possible, so what the possible implementation of mixAudioSources function?
Thanks.
Two approach originally posted at Is it possible to mix multiple audio files on top of each other preferably with javascript, adjusted to process File objects at change event of <input type="file"> element.
The first approach utilizes OfflineAudioContext(), AudioContext.createBufferSource(), AudioContext.createMediaStreamDestination(), Promise constructor, Promise.all(), MediaRecorder() to mix audio tracks, then offer mixed audio file for download.
var div = document.querySelector("div");
function handleFilesSelect(input) {
div.innerHTML = "loading audio tracks.. please wait";
var files = Array.from(input.files);
var duration = 60000;
var chunks = [];
var audio = new AudioContext();
var mixedAudio = audio.createMediaStreamDestination();
var player = new Audio();
var context;
var recorder;
var description = "";
player.controls = "controls";
function get(file) {
description += file.name.replace(/\..*|\s+/g, "");
return new Promise(function(resolve, reject) {
var reader = new FileReader;
reader.readAsArrayBuffer(file);
reader.onload = function() {
resolve(reader.result)
}
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(files.map(get)).then(function(data) {
var len = Math.max.apply(Math, data.map(function(buffer) {
return buffer.byteLength
}));
context = new OfflineAudioContext(2, len, 44100);
return Promise.all(data.map(function(buffer) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var source = context.createBufferSource();
source.buffer = bufferSource;
source.connect(context.destination);
return source.start()
})
}))
.then(function() {
return context.startRendering()
})
.then(function(renderedBuffer) {
return new Promise(function(resolve) {
var mix = audio.createBufferSource();
mix.buffer = renderedBuffer;
mix.connect(audio.destination);
mix.connect(mixedAudio);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
mix.start(0);
div.innerHTML = "playing and recording tracks..";
// stop playback and recorder in 60 seconds
stopMix(duration, mix, recorder)
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
console.log("recording complete");
resolve(blob)
};
})
})
.then(function(blob) {
console.log(blob);
div.innerHTML = "mixed audio tracks ready for download..";
var audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
document.body.appendChild(a);
a.insertAdjacentHTML("afterend", "<br>");
player.src = audioDownload;
document.body.appendChild(player);
})
})
.catch(function(e) {
console.log(e)
});
}
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<input id="files"
type="file"
name="files[]"
accept="audio/*"
multiple
onchange="handleFilesSelect(this)" />
<div></div>
</body>
</html>
The second approach uses AudioContext.createChannelMerger(), AudioContext.createChannelSplitter()
var div = document.querySelector("div");
function handleFilesSelect(input) {
div.innerHTML = "loading audio tracks.. please wait";
var files = Array.from(input.files);
var chunks = [];
var channels = [
[0, 1],
[1, 0]
];
var audio = new AudioContext();
var player = new Audio();
var merger = audio.createChannelMerger(2);
var splitter = audio.createChannelSplitter(2);
var mixedAudio = audio.createMediaStreamDestination();
var duration = 60000;
var context;
var recorder;
var audioDownload;
var description = "";
player.controls = "controls";
function get(file) {
description += file.name.replace(/\..*|\s+/g, "");
console.log(description);
return new Promise(function(resolve, reject) {
var reader = new FileReader;
reader.readAsArrayBuffer(file);
reader.onload = function() {
resolve(reader.result)
}
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(files.map(get)).then(function(data) {
return Promise.all(data.map(function(buffer, index) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var channel = channels[index];
var source = audio.createBufferSource();
source.buffer = bufferSource;
source.connect(splitter);
splitter.connect(merger, channel[0], channel[1]);
return source
})
}))
.then(function(audionodes) {
merger.connect(mixedAudio);
merger.connect(audio.destination);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
audionodes.forEach(function(node, index) {
node.start(0)
});
div.innerHTML = "playing and recording tracks..";
stopMix(duration, ...audionodes, recorder);
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
player.src = audioDownload;
document.body.appendChild(a);
document.body.appendChild(player);
};
})
})
.catch(function(e) {
console.log(e)
});
}
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<input id="files"
type="file"
name="files[]"
accept="audio/*"
multiple onchange="handleFilesSelect(this)" />
<div></div>
</body>
</html>
I just want to complement the excellent answer of guest271314 and post here the solution based on answer of guest271314 for second scenario (the second source is microphone input). Actually is client karaoke. Script:
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new window.AudioContext();
var playbackTrack = null;
function handleFileSelect(event){
var file = event.files[0];
var freader = new FileReader();
freader.onload = function (e) {
context.decodeAudioData(e.target.result, function (buf) {
playbackTrack = context.createBufferSource();
playbackTrack.buffer = buf;
var karaokeButton = document.getElementById("karaoke_start");
karaokeButton.style.display = "inline-block";
karaokeButton.addEventListener("click", function(){
startKaraoke();
});
});
};
freader.readAsArrayBuffer(file);
}
function stopMix(duration, mediaRecorder) {
setTimeout(function(mediaRecorder) {
mediaRecorder.stop();
context.close();
}, duration, mediaRecorder)
}
function startKaraoke(){
navigator.mediaDevices.getUserMedia({audio: true,video: false})
.then(function(stream) {
var mixedAudio = context.createMediaStreamDestination();
var merger = context.createChannelMerger(2);
var splitter = context.createChannelSplitter(2);
var duration = 5000;
var chunks = [];
var channel1 = [0,1];
var channel2 = [1, 0];
var gainNode = context.createGain();
playbackTrack.connect(gainNode);
gainNode.connect(splitter);
gainNode.gain.value = 0.5; // From 0 to 1
splitter.connect(merger, channel1[0], channel1[1]);
var microphone = context.createMediaStreamSource(stream);
microphone.connect(splitter);
splitter.connect(merger, channel2[0], channel2[1]);
merger.connect(mixedAudio);
merger.connect(context.destination);
playbackTrack.start(0);
var mediaRecorder = new MediaRecorder(mixedAudio.stream);
mediaRecorder.start(1);
mediaRecorder.ondataavailable = function (event) {
chunks.push(event.data);
}
mediaRecorder.onstop = function(event) {
var player = new Audio();
player.controls = "controls";
var blob = new Blob(chunks, {
"type": "audio/mp3"
});
audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = "karaokefile." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
player.src = audioDownload;
document.body.appendChild(a);
document.body.appendChild(player);
};
stopMix(duration, mediaRecorder);
})
.catch(function(error) {
console.log('error: ' + error);
});
}
And Html:
<input id="file"
type="file"
name="file"
accept="audio/*"
onchange="handleFileSelect(this)" />
<span id="karaoke_start" style="display:none;background-color:yellow;cursor:pointer;">start karaoke</span>
Here the working plnkr example: plnkr

Is it possible to mix multiple audio files on top of each other preferably with javascript

I want to combine audio clips, layered on top of each other so that they play synchronously and are saved in a new audio file. Any help would be much appreciated. I've done some digging online, but couldn't find a definitive answer as to whether or not many of the tools available as far as Javascript audio editing librarys go (Mix.js for example) are capable.
Yes, it is possible using OfflineAudioContext() or AudioContext.createChannelMerger() and creating a MediaStream. See Phonegap mixing audio files , Web Audio API.
You can use fetch() or XMLHttpRequest() to retrieve audio resource as an ArrayBuffer, AudioContext.decodeAudioData() to create an AudioBufferSourceNode from response; OfflineAudioContext() to render merged audio, AudioContext, AudioContext.createBufferSource(), AudioContext.createMediaStreamDestination() , MediaRecorder() to record stream; Promise.all(), Promise() constructor, .then() to process asynchronous requests to fetch(), AudioContext.decodeAudioData(), pass resulting mixed audio Blob at stop event of MediaRecorder.
Connect each AudioContext AudioBufferSourceNode to OfflineAudioContext.destination, call .start() on each node; call OfflineAudioContext.startRendering(); create new AudioContext node, connect renderedBuffer; call .createMediaStreamDestination() on AudioContext to create a MediaStream from merged audio buffers, pass .stream to MediaRecorder(), at stop event of MediaRecorder, create Blob URL of Blob of recorded audio mix with URL.createObjectURL(), which can be downloaded using <a> element with download attribute and href set to Blob URL.
var sources = ["https://upload.wikimedia.org/wikipedia/commons/b/be/"
+ "Hidden_Tribe_-_Didgeridoo_1_Live.ogg"
, "https://upload.wikimedia.org/wikipedia/commons/6/6e/"
+ "Micronesia_National_Anthem.ogg"];
var description = "HiddenTribeAnthem";
var context;
var recorder;
var div = document.querySelector("div");
var duration = 60000;
var chunks = [];
var audio = new AudioContext();
var mixedAudio = audio.createMediaStreamDestination();
var player = new Audio();
player.controls = "controls";
function get(src) {
return fetch(src)
.then(function(response) {
return response.arrayBuffer()
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(sources.map(get)).then(function(data) {
var len = Math.max.apply(Math, data.map(function(buffer) {
return buffer.byteLength
}));
context = new OfflineAudioContext(2, len, 44100);
return Promise.all(data.map(function(buffer) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var source = context.createBufferSource();
source.buffer = bufferSource;
source.connect(context.destination);
return source.start()
})
}))
.then(function() {
return context.startRendering()
})
.then(function(renderedBuffer) {
return new Promise(function(resolve) {
var mix = audio.createBufferSource();
mix.buffer = renderedBuffer;
mix.connect(audio.destination);
mix.connect(mixedAudio);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
mix.start(0);
div.innerHTML = "playing and recording tracks..";
// stop playback and recorder in 60 seconds
stopMix(duration, mix, recorder)
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
console.log("recording complete");
resolve(blob)
};
})
})
.then(function(blob) {
console.log(blob);
div.innerHTML = "mixed audio tracks ready for download..";
var audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
document.body.appendChild(a);
a.insertAdjacentHTML("afterend", "<br>");
player.src = audioDownload;
document.body.appendChild(player);
})
})
.catch(function(e) {
console.log(e)
});
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<div>loading audio tracks.. please wait</div>
</body>
</html>
You can alternatively utilize AudioContext.createChannelMerger(), AudioContext.createChannelSplitter()
var sources = ["/path/to/audoi1", "/path/to/audio2"];
var description = "mix";
var chunks = [];
var channels = [[0, 1], [1, 0]];
var audio = new AudioContext();
var player = new Audio();
var merger = audio.createChannelMerger(2);
var splitter = audio.createChannelSplitter(2);
var mixedAudio = audio.createMediaStreamDestination();
var duration = 60000;
var context;
var recorder;
var audioDownload;
player.controls = "controls";
function get(src) {
return fetch(src)
.then(function(response) {
return response.arrayBuffer()
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(sources.map(get)).then(function(data) {
return Promise.all(data.map(function(buffer, index) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var channel = channels[index];
var source = audio.createBufferSource();
source.buffer = bufferSource;
source.connect(splitter);
splitter.connect(merger, channel[0], channel[1]);
return source
})
}))
.then(function(audionodes) {
merger.connect(mixedAudio);
merger.connect(audio.destination);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
audionodes.forEach(function(node) {
node.start(0)
});
stopMix(duration, ...audionodes, recorder);
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
player.src = audioDownload;
document.body.appendChild(a);
document.body.appendChild(player);
};
})
})
.catch(function(e) {
console.log(e)
});

Categories

Resources