How to encode canvas stream to h264? - javascript

How to encode canvas stream to h264?
I want to encode canvas stream and download as h264 file.
I try to run the code as follow but the file can not play.
<body>
<canvas id="canvas" height="480" width="720"></canvas>
<video id="video" src="./video.mp4" controls></video>
<script type="module">
const canvas = document.getElementById("canvas")
const video = document.getElementById("video")
const ctx = canvas.getContext("2d")
video.onloadedmetadata = () => {
video.height = video.videoHeight
video.width = video.videoWidth
}
let fn = () => {
ctx.drawImage(video, 0, 0)
requestAnimationFrame(() => {
fn()
})
}
fn()
let result = new Uint8Array(0)
const encoder = new VideoEncoder({
output: (chunk, metadata) => {
let data = new Uint8Array(chunk.byteLength)
chunk.copyTo(data)
result = concatTypedArrays(result, data)
},
error: (e) => {
console.log("[error]", e)
}
})
const config = {
codec: "avc1.64001f"
width: 640,
height: 480,
bitrate: 2_000_000,
framerate: 30,
};
encoder.configure(config);
const stream = canvas.captureStream(30)
const track = stream.getVideoTracks()[0];
setTimeout(() => {
track.stop()
}, 3000)
const trackProcessor = new MediaStreamTrackProcessor(track);
const reader = trackProcessor.readable.getReader();
while (true) {
const result = await reader.read();
if (result.done) break;
const frame = result.value;
encoder.encode(frame, { keyframe: true });
frame.close();
}
function concatTypedArrays(a, b) {
var c = new (a.constructor)(a.length + b.length);
c.set(a, 0);
c.set(b, a.length);
return c;
}
await encoder.flush()
const blob = new Blob([result]);
const videoURL = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = videoURL;
a.download = "video.264";
a.click();
</script>
</body>
the file can not play, and I use ffprobe video.h264 return error Invalid data found when processing input
I want to encode canvas stream to h264 file and download.
I am new to encode/decode
I am not sure what missed

Related

WebRTC, getDisplayMedia() does not capture sound from the remote stream

I have a web application of my own, which is based on the peerjs library (It is a video conference).
I'm trying to make a recording with 'MediaRecorder', but I'm facing a very unpleasant case.
The code for capturing my desktop stream is the following:
let chooseScreen = document.querySelector('.chooseScreenBtn')
chooseScreen.onclick = async () => {
let desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: true });
}
I then successfully apply my received desktopStream to videoElement in DOM:
const videoElement = doc.querySelector('.videoElement')
videoElement.srcObject = desktopStream
videoElement.muted = false;
videoElement.onloadedmetadata = ()=>{videoElement.play();}
For example, I get desktopStream on the page with an active conference where everyone hears and sees each other.
To check the video and audio in desktopStream I play some video on the video player on the desktop.
I can hear any audio from my desktop but audio from any participant cannot be heard.
Of course, when I put the desktopStream in MediaRecorder I get a video file with no sound from anyone except my desktop. Any ideas on how to solve it?
Chrome's MediaRecorder API can only output one track.
The createMediaStreamSource can take streams from desktop audio and microphone, by connecting both together into one object created by createMediaStreamDestination it gives you the ability to pipe this one stream into the MediaRecorder API.
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
// Create a couple of sources
const source1 = context.createMediaStreamSource(desktopStream);
const source2 = context.createMediaStreamSource(voiceStream);
const destination = context.createMediaStreamDestination();
const desktopGain = context.createGain();
const voiceGain = context.createGain();
desktopGain.gain.value = 0.7;
voiceGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
// Connect source2
source2.connect(voiceGain).connect(destination);
return destination.stream.getAudioTracks();
};
It is also possible to use two or more audio inputs + video input.
window.onload = () => {
const warningEl = document.getElementById('warning');
const videoElement = document.getElementById('videoElement');
const captureBtn = document.getElementById('captureBtn');
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const download = document.getElementById('download');
const audioToggle = document.getElementById('audioToggle');
const micAudioToggle = document.getElementById('micAudioToggle');
if('getDisplayMedia' in navigator.mediaDevices) warningEl.style.display = 'none';
let blobs;
let blob;
let rec;
let stream;
let voiceStream;
let desktopStream;
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
const destination = context.createMediaStreamDestination();
let hasDesktop = false;
let hasVoice = false;
if (desktopStream && desktopStream.getAudioTracks().length > 0) {
// If you don't want to share Audio from the desktop it should still work with just the voice.
const source1 = context.createMediaStreamSource(desktopStream);
const desktopGain = context.createGain();
desktopGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
hasDesktop = true;
}
if (voiceStream && voiceStream.getAudioTracks().length > 0) {
const source2 = context.createMediaStreamSource(voiceStream);
const voiceGain = context.createGain();
voiceGain.gain.value = 0.7;
source2.connect(voiceGain).connect(destination);
hasVoice = true;
}
return (hasDesktop || hasVoice) ? destination.stream.getAudioTracks() : [];
};
captureBtn.onclick = async () => {
download.style.display = 'none';
const audio = audioToggle.checked || false;
const mic = micAudioToggle.checked || false;
desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: audio });
if (mic === true) {
voiceStream = await navigator.mediaDevices.getUserMedia({ video: false, audio: mic });
}
const tracks = [
...desktopStream.getVideoTracks(),
...mergeAudioStreams(desktopStream, voiceStream)
];
console.log('Tracks to add to stream', tracks);
stream = new MediaStream(tracks);
console.log('Stream', stream)
videoElement.srcObject = stream;
videoElement.muted = true;
blobs = [];
rec = new MediaRecorder(stream, {mimeType: 'video/webm; codecs=vp8,opus'});
rec.ondataavailable = (e) => blobs.push(e.data);
rec.onstop = async () => {
blob = new Blob(blobs, {type: 'video/webm'});
let url = window.URL.createObjectURL(blob);
download.href = url;
download.download = 'test.webm';
download.style.display = 'block';
};
startBtn.disabled = false;
captureBtn.disabled = true;
audioToggle.disabled = true;
micAudioToggle.disabled = true;
};
startBtn.onclick = () => {
startBtn.disabled = true;
stopBtn.disabled = false;
rec.start();
};
stopBtn.onclick = () => {
captureBtn.disabled = false;
audioToggle.disabled = false;
micAudioToggle.disabled = false;
startBtn.disabled = true;
stopBtn.disabled = true;
rec.stop();
stream.getTracks().forEach(s=>s.stop())
videoElement.srcObject = null
stream = null;
};
};
Audio capture with getDisplayMedia is only fully supported with Chrome for Windows. Other platforms have a number of limitations:
there is no support for audio capture at all under Firefox or Safari;
on Chrome/Chromium for Linux and Mac OS, only the audio of a Chrome/Chromium tab can be captured, not the audio of a non-browser application window.

Upload a video, and save first frame as image JS

I have an webpage where the user will drop a video file, and the page will upload the video, and generate a thumbnail based on a timestamp that the user provide.
For the moment, I am just trying to generate the thumbnail based on the FIRST frame of the video.
here is a quick exemple on my current progress :
(please use chrome as firefox will complain about the https link, and also, sorry if it autodownload an image)
https://stackblitz.com/edit/rxjs-qc8iag
import { Observable, throwError } from 'rxjs'
const VIDEO = {
imageFromFrame(videoSrc: any): Observable<any> {
return new Observable<any>((obs) => {
const canvas = document.createElement('canvas')
const video = document.createElement('video')
const context = canvas.getContext('2d')
const source = document.createElement('source');
source.setAttribute('src', videoSrc);
video.appendChild(source);
document.body.appendChild(canvas)
document.body.appendChild(video)
if (!context) {
throwError(`Couldn't retrieve context 2d`)
obs.complete()
return
}
video.load()
video.addEventListener('loadedmetadata', function () {
console.log('loadedmetadata')
// Set canvas dimensions same as video dimensions
canvas.width = video.videoWidth
canvas.height = video.videoHeight
})
video.addEventListener('canplay', function () {
console.log('canplay')
canvas.style.display = 'inline'
context.drawImage(video, 0, 0, video.videoWidth, video.videoHeight)
// Convert canvas image to Base64
const img = canvas.toDataURL("image/png")
// Convert Base64 image to binary
obs.next(VIDEO.dataURItoBlob(img))
obs.complete()
})
})
},
dataURItoBlob(dataURI: string): Blob {
// convert base64/URLEncoded data component to raw binary data held in a string
var byteString
if (dataURI.split(',')[0].indexOf('base64') >= 0) byteString = atob(dataURI.split(',')[1])
else byteString = unescape(dataURI.split(',')[1])
// separate out the mime component
var mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0]
// write the bytes of the string to a typed array
var ia = new Uint8Array(byteString.length)
for (var i = 0; i < byteString.length; i++) {
ia[i] = byteString.charCodeAt(i)
}
return new Blob([ia], { type: mimeString })
},
}
VIDEO.imageFromFrame('https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4?_=1').subscribe((r) => {
var a = document.createElement('a')
document.body.appendChild(a)
const url = window.URL.createObjectURL(r)
a.href = url
a.download = 'sdf'
a.click()
window.URL.revokeObjectURL(url)
})
The problem is, the image it download is empty, and do not represent the first frame of the video. but the video should have been loaded. and drawed in the canvas.
I am trying to solve it, but if someone could help me found out the issue, thanks.
for anyone looking, I made this which work correctly (need improvement but the idea is there). I use observable cause the flow of my app use observable, but you can change to promise or whatever =>
imageFromFrame(
videoFile: File,
options: { frameTimeInSeconds: number; filename?: string; fileType?: string } = {
frameTimeInSeconds: 0.1,
}
): Observable<File> {
return new Observable<any>((obs) => {
const canvas = document.createElement('canvas')
const video = document.createElement('video')
const source = document.createElement('source')
const context = canvas.getContext('2d')
const urlRef = URL.createObjectURL(videoFile)
video.style.display = 'none'
canvas.style.display = 'none'
source.setAttribute('src', urlRef)
video.setAttribute('crossorigin', 'anonymous')
video.appendChild(source)
document.body.appendChild(canvas)
document.body.appendChild(video)
if (!context) {
throwError(`Couldn't retrieve context 2d`)
obs.complete()
return
}
video.currentTime = options.frameTimeInSeconds
video.load()
video.addEventListener('loadedmetadata', function () {
canvas.width = video.videoWidth
canvas.height = video.videoHeight
})
video.addEventListener('loadeddata', function () {
context.drawImage(video, 0, 0, video.videoWidth, video.videoHeight)
canvas.toBlob((blob) => {
if (!blob) {
return
}
obs.next(
new File([blob], options.filename || FILES.getName(videoFile.name), {
type: options.fileType || 'image/png',
})
)
obs.complete()
URL.revokeObjectURL(urlRef)
video.remove()
canvas.remove()
}, options.fileType || 'image/png')
})
})
},

Get webcam video in HTML and send to flask server via socketio

I'm trying to capture webcam video in the client side and sent frames to the server to process it. I'm newbie in JS and I'm having some problems.
I tried to use OpenCV.js to get the data but I didn't understand how to get it, in Python we can make
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
and the frame is an 2d-array with the image, but how can I get each frame (as 2d array) to send using OpenCV.js?
I have this code on the client side:
<script type="text/javascript">
function onOpenCvReady() {
cv['onRuntimeInitialized'] = () => {
var socket = io('http://localhost:5000');
socket.on('connect', function () {
console.log("Connected...!", socket.connected)
});
const video = document.querySelector("#videoElement");
video.width = 500;
video.height = 400;
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
video.srcObject = stream;
video.play();
})
.catch(function (err0r) {
console.log(err0r)
console.log("Something went wrong!");
});
}
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let cap = new cv.VideoCapture(video);
const FPS = 15;
function processVideo() {
let begin = Date.now();
cap.read(src);
handle_socket(src['data']);
// schedule next one.
let delay = 1000 / FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
}
// schedule first one.
setTimeout(processVideo, 0);
function handle_socket(src) {
socket.emit('event', { info: 'I\'m connected!', data: src });
}
}
}
</script>
My solution was:
// Select HTML video element where the webcam data is
const video = document.querySelector("#videoElement");
// returns a frame encoded in base64
const getFrame = () => {
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
const data = canvas.toDataURL('image/jpeg');
return data;
}
// Send data over socket
function handle_socket(data, dst) {
socket.emit('event', data, function (res) {
if (res !== 0) {
results = res;
}
});
}
frame64 = getFrame()
handle_socket(frame64);

How to save canvas animation as gif or webm?

i have written this function to capture each frame for the GIF but the output is very laggy and crashes when the data increases. Any suggestions ?
Code :
function createGifFromPng(list, framerate, fileName, gifScale) {
gifshot.createGIF({
'images': list,
'gifWidth': wWidth * gifScale,
'gifHeight': wHeight * gifScale,
'interval': 1 / framerate,
}, function(obj) {
if (!obj.error) {
var image = obj.image;
var a = document.createElement('a');
document.body.append(a);
a.download = fileName;
a.href = image;
a.click();
a.remove();
}
});
}
/////////////////////////////////////////////////////////////////////////
function getGifFromCanvas(renderer, sprite, fileName, gifScale, framesCount, framerate) {
var listImgs = [];
var saving = false;
var interval = setInterval(function() {
renderer.extract.canvas(sprite).toBlob(function(b) {
if (listImgs.length >= framesCount) {
clearInterval(interval);
if (!saving) {
createGifFromPng(listImgs, framerate, fileName,gifScale);
saving = true;
}
}
else {
listImgs.push(URL.createObjectURL(b));
}
}, 'image/gif');
}, 1000 / framerate);
}
In modern browsers you can use a conjunction of the MediaRecorder API and the HTMLCanvasElement.captureStream method.
The MediaRecorder API will be able to encode a MediaStream in a video or audio media file on the fly, resulting in far less memory needed than when you grab still images.
const ctx = canvas.getContext('2d');
var x = 0;
anim();
startRecording();
function startRecording() {
const chunks = []; // here we will store our recorded media chunks (Blobs)
const stream = canvas.captureStream(); // grab our canvas MediaStream
const rec = new MediaRecorder(stream); // init the recorder
// every time the recorder has new data, we will store it in our array
rec.ondataavailable = e => chunks.push(e.data);
// only when the recorder stops, we construct a complete Blob from all the chunks
rec.onstop = e => exportVid(new Blob(chunks, {type: 'video/webm'}));
rec.start();
setTimeout(()=>rec.stop(), 3000); // stop recording in 3s
}
function exportVid(blob) {
const vid = document.createElement('video');
vid.src = URL.createObjectURL(blob);
vid.controls = true;
document.body.appendChild(vid);
const a = document.createElement('a');
a.download = 'myvid.webm';
a.href = vid.src;
a.textContent = 'download the video';
document.body.appendChild(a);
}
function anim(){
x = (x + 1) % canvas.width;
ctx.fillStyle = 'white';
ctx.fillRect(0,0,canvas.width,canvas.height);
ctx.fillStyle = 'black';
ctx.fillRect(x - 20, 0, 40, 40);
requestAnimationFrame(anim);
}
<canvas id="canvas"></canvas>
You can also use https://github.com/spite/ccapture.js/ to capture to gif or video.

How to use "segments" mode at SourceBuffer of MediaSource to render same result at Chomium, Chorme and Firefox?

Reference to my original question: How to use Blob URL, MediaSource or other methods to play concatenated Blobs of media fragments?
In lieu of the potential for deprecation of the "sequence" mode for multiple tracks, which the current code is using for both Chromium and Firefox browsers my additional questions are:
Which adjustments need to be made in my MediaSource code to render the same result using both Chromium which Firefox browsers - currently renders as expected using "segments" .mode?
Or, is there a bug in the implementation of multitrack support using Chromium browsers when SourceBuffer .mode is set to "segments"?
Background information
I have been able to record discrete media fragments using MediaRecorder, adding cues to the resulting webm file using ts-ebml and recording the discrete media fragments as a single media file using MediaSource with .mode of SourceBuffer set to "sequence" using both Chromium and Firefox browsers.
The Chromium issue at Monitor and potentially deprecate support for multitrack SourceBuffer support of 'sequence' AppendMode discusses "sequence" mode is being considered for deprecation for multitrack SourceBuffer objects. When asked in the original references question regarding how to implement the code using "segments" .mode (default AppendMode of SourceBuffer) the response was essentially that "segments" mode also supports multitrack input at SourceBuffer.
However, when trying code with .mode of SourceBuffer set to "segments" Chromium 60 only plays approximately one second, the first buffer of multiple appended buffers, of an expected ten second playback of recorded media fragments having cues set at webm file which is converted to ArrayBuffer and passed to .appendBuffer(), while Firefox renders same result when .mode is set to either "sequence" and "segments".
Code which renders expected result at both Chromium and Firefox. Note, Firefox does not play .mp4 at <video> element if multipleUrls is tried, though Firefox does support playing .mp4 at MediaSource when proper media codec is set.
<!DOCTYPE html>
<html>
<!-- recordMediaFragments.js demo https://github.com/guest271314/recordMediaFragments/tree/master/demos 2017 guest271314 -->
<head>
<!-- https://github.com/guest271314/recordMediaFragments/ts-ebml -->
</head>
<body>
<video width="320" height="280" controls="true"></video>
<script>
(async() => {
let request = await fetch("https://raw.githubusercontent.com/guest271314/recordMediaFragments/master/ts-ebml/ts-ebml-min.js");
let blob = await request.blob();
const script = document.createElement("script");
document.head.appendChild(script);
script.src = URL.createObjectURL(blob);
script.onload = () => {
const tsebml = require("ts-ebml");
const video = document.querySelector("video");
const videoStream = document.createElement("video");
// `MediaSource`
const mediaSource = new MediaSource();
// for firefox
// see https://bugzilla.mozilla.org/show_bug.cgi?id=1259788
const hasCaptureStream = HTMLMediaElement.prototype.hasOwnProperty("captureStream");
// handle firefox and chromium
const captureStream = mediaElement =>
!!mediaElement.mozCaptureStream
? mediaElement.mozCaptureStream()
: mediaElement.captureStream();
let currentFragmentURL, currentBlobURL, fragments;
videoStream.width = video.width;
videoStream.height = video.height;
const mimeCodec = "video/webm;codecs=vp8,opus";
// set to `.currentTime` of `videoStream` at `pause`
// to set next media fragment starting `.currentTime`
// if URL to be set at `.src` has same origin and pathname
let cursor = 0;
// https://gist.github.com/jsturgis/3b19447b304616f18657
// https://www.w3.org/2010/05/video/mediaevents.html
const multipleUrls = [
"https://media.w3.org/2010/05/sintel/trailer.mp4#t=0,5",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=55,60",
"https://raw.githubusercontent.com/w3c/web-platform-tests/master/media-source/mp4/test.mp4#t=0,5",
"https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerBlazes.mp4#t=0,5",
"https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerJoyrides.mp4#t=0,5",
"https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerMeltdowns.mp4#t=0,6",
"https://media.w3.org/2010/05/video/movie_300.mp4#t=30,36"
];
const singleUrl = [
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=0,1",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=1,2",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=2,3",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=3,4",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=4,5",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=5,6",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=6,7",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=7,8",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=8,9",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=9,10"
];
const geckoUrl = [
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=10,11",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=11,12",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=12,13",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=13,14",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=14,15",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=15,16",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=16,17",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=17,18",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=18,19",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=19,20"
];
const mediaFragmentRecorder = async(urls) => {
// `ts-ebml`
const tsebmlTools = async() => ({
decoder: new tsebml.Decoder(),
encoder: new tsebml.Encoder(),
reader: new tsebml.Reader(),
tools: tsebml.tools
});
// create `ArrayBuffer` from `Blob`
const readAsArrayBuffer = (blob) => {
return new Promise((resolve, reject) => {
const fr = new FileReader();
fr.readAsArrayBuffer(blob);
fr.onloadend = () => {
resolve(fr.result);
};
fr.onerror = (ev) => {
reject(ev.error);
};
});
}
// `urls`: string or array of URLs
// record each media fragment
const recordMediaFragments = async(video, mimeCodec, decoder, encoder, reader, tools, ...urls) => {
urls = [].concat(...urls);
const media = [];
for (let url of urls) {
await new Promise(async(resolve) => {
let mediaStream, recorder;
videoStream.onprogress = e => {
videoStream.onprogress = null;
console.log("loading " + url)
}
videoStream.oncanplay = async(e) => {
videoStream.oncanplay = null;
videoStream.play();
mediaStream = captureStream(videoStream);
console.log(mediaStream);
recorder = new MediaRecorder(mediaStream, {
mimeType: mimeCodec
});
recorder.ondataavailable = async(e) => {
// set metadata of recorded media fragment `Blob`
const mediaBlob = await setMediaMetadata(e.data);
// create `ArrayBuffer` of `Blob` of recorded media fragment
const mediaBuffer = await readAsArrayBuffer(mediaBlob);
const mediaDuration = videoStream.played.end(0) - videoStream.played.start(0);
const mediaFragmentId = currentFragmentURL || new URL(url);
const mediaFileName = mediaFragmentId.pathname.split("/").pop() + mediaFragmentId.hash;
const mediaFragmentType = "singleMediaFragment";
if (currentBlobURL) {
URL.revokeObjectURL(currentBlobURL);
}
media.push({
mediaBlob,
mediaBuffer,
mediaDuration,
mediaFragmentType,
mediaFileName
});
resolve();
}
recorder.start();
}
videoStream.onpause = e => {
videoStream.onpause = null;
cursor = videoStream.currentTime;
recorder.stop();
// stop `MediaStreamTrack`s
for (let track of mediaStream.getTracks()) {
track.stop();
}
}
currentFragmentURL = new URL(url);
// for firefox to load cross origin media without silence
if (!hasCaptureStream) {
console.log(currentFragmentURL);
request = new Request(currentFragmentURL.href);
blob = await fetch(request).then(response => response.blob());
console.log(blob);
currentBlobURL = URL.createObjectURL(blob);
// set next media fragment URL to `.currentTime` at `pause` event
// of previous media fragment if `url` has same `origin` and `pathname`
if (urls.indexOf(currentFragmentURL.href) > 0
&& new URL(urls[urls.indexOf(currentFragmentURL.href) - 1]).origin === currentFragmentURL.origin
&& new URL(urls[urls.indexOf(currentFragmentURL.href) - 1]).pathname === currentFragmentURL.pathname) {
if (cursor > 0) {
url = url = currentBlobURL + currentFragmentURL.hash.replace(/=\d+/, "=" + cursor);
console.log(url)
}
} else {
url = currentBlobURL + currentFragmentURL.hash;
}
} else {
if (cursor > 0
&& new URL(urls[urls.indexOf(url) - 1]).origin === currentFragmentURL.origin
&& new URL(urls[urls.indexOf(currentFragmentURL.href) - 1]).pathname === currentFragmentURL.pathname) {
url = url.replace(/=\d+/, "=" + cursor);
console.log(url)
}
}
videoStream.src = url;
}).catch(err => err)
}
return media
}
// set metadata of media `Blob`
// see https://github.com/legokichi/ts-ebml/issues/14#issuecomment-325200151
const setMediaMetadata = async(blob) =>
tsebmlTools()
.then(async({
decoder,
encoder,
tools,
reader
}) => {
let webM = new Blob([], {
type: "video/webm"
});
webM = new Blob([webM, blob], {
type: blob.type
});
const buf = await readAsArrayBuffer(blob);
const elms = decoder.decode(buf);
elms.forEach((elm) => {
reader.read(elm);
});
reader.stop();
const refinedMetadataBuf = tools.makeMetadataSeekable(reader.metadatas, reader.duration, reader.cues);
const webMBuf = await readAsArrayBuffer(webM);
const body = webMBuf.slice(reader.metadataSize);
const refinedWebM = new Blob([refinedMetadataBuf, body], {
type: webM.type
});
// close Blobs
if (webM.close && blob.close) {
webM.close();
blob.close();
}
return refinedWebM;
})
.catch(err => console.error(err));
let mediaTools = await tsebmlTools();
const {
decoder,
encoder,
reader,
tools
} = mediaTools;
const mediaFragments = await recordMediaFragments(video, mimeCodec, decoder, encoder, reader, tools, urls);
const recordedMedia = await new Promise((resolveAllMedia, rejectAllMedia) => {
console.log(decoder, encoder, tools, reader, mediaFragments);
let mediaStream, recorder;
mediaSource.onsourceended = e => {
console.log(video.buffered.start(0), video.buffered.end(0));
video.currentTime = video.buffered.start(0);
console.log(video.paused, video.readyState);
video.ontimeupdate = e => {
console.log(video.currentTime, mediaSource.duration);
if (video.currentTime >= mediaSource.duration) {
video.ontimeupdate = null;
video.oncanplay = null;
video.onwaiting = null;
if (recorder.state === "recording") {
recorder.stop();
}
console.log(e, recorder);
}
}
}
video.onended = (e) => {
video.onended = null;
console.log(e, video.currentTime,
mediaSource.duration);
}
video.oncanplay = e => {
console.log(e, video.duration, video.buffered.end(0));
video.play()
}
video.onwaiting = e => {
console.log(e, video.currentTime);
}
// record `MediaSource` playback of recorded media fragments
video.onplaying = async(e) => {
console.log(e);
video.onplaying = null;
mediaStream = captureStream(video);
if (!hasCaptureStream) {
videoStream.srcObject = mediaStream;
videoStream.play();
}
recorder = new MediaRecorder(mediaStream, {
mimeType: mimeCodec
});
console.log(recorder);
recorder.ondataavailable = async(e) => {
console.log(e);
const mediaFragmentsRecording = {};
mediaFragmentsRecording.mediaBlob = await setMediaMetadata(e.data);
mediaFragmentsRecording.mediaBuffer = await readAsArrayBuffer(mediaFragmentsRecording.mediaBlob);
mediaFragmentsRecording.mediaFileName = urls.map(url => {
const id = new URL(url);
return id.pathname.split("/").pop() + id.hash
}).join("-");
mediaFragmentsRecording.mediaFragmentType = "multipleMediaFragments";
// `<video>` to play concatened media fragments
// recorded from playback of `MediaSource`
fragments = document.createElement("video");
fragments.id = "fragments";
fragments.width = video.width;
fragments.height = video.height;
fragments.controls = true;
fragments.onloadedmetadata = () => {
fragments.onloadedmetadata = null;
mediaFragmentsRecording.mediaDuration = fragments.duration;
URL.revokeObjectURL(currentBlobURL);
// stop `MediaStreamTrack`s
for (let track of mediaStream.getTracks()) {
track.stop();
}
resolveAllMedia([
...mediaFragments, mediaFragmentsRecording
]);
}
currentBlobURL = URL.createObjectURL(mediaFragmentsRecording.mediaBlob);
fragments.src = currentBlobURL;
document.body.appendChild(fragments);
}
recorder.start();
}
video.src = URL.createObjectURL(mediaSource);
mediaSource.addEventListener("sourceopen", sourceOpen);
async function sourceOpen(e) {
if (MediaSource.isTypeSupported(mimeCodec)) {
const sourceBuffer = mediaSource.addSourceBuffer(mimeCodec);
sourceBuffer.mode = "segments";
for (let {
mediaBuffer,
mediaDuration
} of mediaFragments) {
await new Promise((resolveUpdatedMediaSource) => {
sourceBuffer.onupdateend = async(e) => {
sourceBuffer.onupdateend = null;
console.log(e, mediaDuration, mediaSource.duration
, video.paused, video.ended, video.currentTime
, "media source playing", video.readyState);
// https://bugzilla.mozilla.org/show_bug.cgi?id=1400587
// https://bugs.chromium.org/p/chromium/issues/detail?id=766002&q=label%3AMSEptsdtsCleanup
try {
sourceBuffer.timestampOffset += mediaDuration;
resolveUpdatedMediaSource();
} catch (err) {
console.error(err);
resolveUpdatedMediaSource();
}
}
sourceBuffer.appendBuffer(mediaBuffer);
})
}
mediaSource.endOfStream()
} else {
console.warn(mimeCodec + " not supported");
}
};
})
return recordedMedia
};
mediaFragmentRecorder(geckoUrl)
.then(recordedMediaFragments => {
// do stuff with recorded media fragments
console.log(recordedMediaFragments);
const select = document.createElement("select");
for (let {
mediaFileName,
mediaBlob,
mediaFragmentType
} of Object.values(recordedMediaFragments)) {
const option = new Option(mediaFileName, URL.createObjectURL(mediaBlob));
select.appendChild(option);
}
select.onchange = () => {
document.getElementById("fragments").src = select.value;
}
video.parentNode.insertBefore(select, video);
video.controls = true;
video.currentTime = video.buffered.start(0);
})
.catch(err => console.error(err));
}
})()
</script>
</body>
</html>

Categories

Resources