WebRTC, getDisplayMedia() does not capture sound from the remote stream - javascript

I have a web application of my own, which is based on the peerjs library (It is a video conference).
I'm trying to make a recording with 'MediaRecorder', but I'm facing a very unpleasant case.
The code for capturing my desktop stream is the following:
let chooseScreen = document.querySelector('.chooseScreenBtn')
chooseScreen.onclick = async () => {
let desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: true });
}
I then successfully apply my received desktopStream to videoElement in DOM:
const videoElement = doc.querySelector('.videoElement')
videoElement.srcObject = desktopStream
videoElement.muted = false;
videoElement.onloadedmetadata = ()=>{videoElement.play();}
For example, I get desktopStream on the page with an active conference where everyone hears and sees each other.
To check the video and audio in desktopStream I play some video on the video player on the desktop.
I can hear any audio from my desktop but audio from any participant cannot be heard.
Of course, when I put the desktopStream in MediaRecorder I get a video file with no sound from anyone except my desktop. Any ideas on how to solve it?

Chrome's MediaRecorder API can only output one track.
The createMediaStreamSource can take streams from desktop audio and microphone, by connecting both together into one object created by createMediaStreamDestination it gives you the ability to pipe this one stream into the MediaRecorder API.
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
// Create a couple of sources
const source1 = context.createMediaStreamSource(desktopStream);
const source2 = context.createMediaStreamSource(voiceStream);
const destination = context.createMediaStreamDestination();
const desktopGain = context.createGain();
const voiceGain = context.createGain();
desktopGain.gain.value = 0.7;
voiceGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
// Connect source2
source2.connect(voiceGain).connect(destination);
return destination.stream.getAudioTracks();
};
It is also possible to use two or more audio inputs + video input.
window.onload = () => {
const warningEl = document.getElementById('warning');
const videoElement = document.getElementById('videoElement');
const captureBtn = document.getElementById('captureBtn');
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const download = document.getElementById('download');
const audioToggle = document.getElementById('audioToggle');
const micAudioToggle = document.getElementById('micAudioToggle');
if('getDisplayMedia' in navigator.mediaDevices) warningEl.style.display = 'none';
let blobs;
let blob;
let rec;
let stream;
let voiceStream;
let desktopStream;
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
const destination = context.createMediaStreamDestination();
let hasDesktop = false;
let hasVoice = false;
if (desktopStream && desktopStream.getAudioTracks().length > 0) {
// If you don't want to share Audio from the desktop it should still work with just the voice.
const source1 = context.createMediaStreamSource(desktopStream);
const desktopGain = context.createGain();
desktopGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
hasDesktop = true;
}
if (voiceStream && voiceStream.getAudioTracks().length > 0) {
const source2 = context.createMediaStreamSource(voiceStream);
const voiceGain = context.createGain();
voiceGain.gain.value = 0.7;
source2.connect(voiceGain).connect(destination);
hasVoice = true;
}
return (hasDesktop || hasVoice) ? destination.stream.getAudioTracks() : [];
};
captureBtn.onclick = async () => {
download.style.display = 'none';
const audio = audioToggle.checked || false;
const mic = micAudioToggle.checked || false;
desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: audio });
if (mic === true) {
voiceStream = await navigator.mediaDevices.getUserMedia({ video: false, audio: mic });
}
const tracks = [
...desktopStream.getVideoTracks(),
...mergeAudioStreams(desktopStream, voiceStream)
];
console.log('Tracks to add to stream', tracks);
stream = new MediaStream(tracks);
console.log('Stream', stream)
videoElement.srcObject = stream;
videoElement.muted = true;
blobs = [];
rec = new MediaRecorder(stream, {mimeType: 'video/webm; codecs=vp8,opus'});
rec.ondataavailable = (e) => blobs.push(e.data);
rec.onstop = async () => {
blob = new Blob(blobs, {type: 'video/webm'});
let url = window.URL.createObjectURL(blob);
download.href = url;
download.download = 'test.webm';
download.style.display = 'block';
};
startBtn.disabled = false;
captureBtn.disabled = true;
audioToggle.disabled = true;
micAudioToggle.disabled = true;
};
startBtn.onclick = () => {
startBtn.disabled = true;
stopBtn.disabled = false;
rec.start();
};
stopBtn.onclick = () => {
captureBtn.disabled = false;
audioToggle.disabled = false;
micAudioToggle.disabled = false;
startBtn.disabled = true;
stopBtn.disabled = true;
rec.stop();
stream.getTracks().forEach(s=>s.stop())
videoElement.srcObject = null
stream = null;
};
};

Audio capture with getDisplayMedia is only fully supported with Chrome for Windows. Other platforms have a number of limitations:
there is no support for audio capture at all under Firefox or Safari;
on Chrome/Chromium for Linux and Mac OS, only the audio of a Chrome/Chromium tab can be captured, not the audio of a non-browser application window.

Related

where to add my websocket code in javascript

I am very new to javaScript, I know some basics but have not yet completely understood the complete logics behind it (so far I have only worked with Python and a little bit of VBA)
For uni I have to build a browser interface to record audio and transfer it to a server where a Speech to text application runs. I found some opensource code here (https://github.com/mdn/dom-examples/blob/main/media/web-dictaphone/scripts/app.js) which I wanted to use, but is missing the websocket part. Now I don't know, where exactly to insert that. So far I have this:
code of the Webdictaphone:
// set up basic variables for app
const record = document.querySelector('.record');
const stop = document.querySelector('.stop');
const soundClips = document.querySelector('.sound-clips');
const canvas = document.querySelector('.visualizer');
const mainSection = document.querySelector('.main-controls');
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia supported.');
const constraints = { audio: true };
let chunks = [];
let onSuccess = function(stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function() {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
}
stop.onclick = function() {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
}
mediaRecorder.onstop = function(e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt('Enter a name for your sound clip?','My unnamed clip');
const clipContainer = document.createElement('article');
const clipLabel = document.createElement('p');
const audio = document.createElement('audio');
const deleteButton = document.createElement('button');
clipContainer.classList.add('clip');
audio.setAttribute('controls', '');
deleteButton.textContent = 'Delete';
deleteButton.className = 'delete';
if(clipName === null) {
clipLabel.textContent = 'My unnamed clip';
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function(e) {
e.target.closest(".clip").remove();
}
clipLabel.onclick = function() {
const existingName = clipLabel.textContent;
const newClipName = prompt('Enter a new name for your sound clip?');
if(newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
}
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
}
let onError = function(err) {
console.log('The following error occured: ' + err);
}
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log('getUserMedia not supported on your browser!');
}
websocket part (client side):
window.addEventListener("DOMContentLoaded", () => {
// Open the WebSocket connection and register event handlers.
console.log('DOMContentLoaded done');
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
console.log('ws is defined');
})
Right now I just stacked both of the parts on top of each other, but this doesn't work, since, as I found out, you only can define and use variables (such as ws) within a block. This leads to an error that says that ws i not defined when I call the sending function within the if-statement.
I already tried to look for tutorials for hours but none that I found included this topic. I also tried moving the web socket part into the if statement, but that also did - unsurprisingly work, at least not in the way that I tried.
I feel like my problem lays in understanding how to define the websocket so I can call it within the if statement, or figure out a way to somehow get the audio somewhere where ws is considered to be defined. Unfortunately I just don't get behind it and already invested days which has become really frustrating.
I appreciate any help. If you have any ideas what I could change or move in the code or maybe just know any tutorial that could help, I'd be really grateful.
Thanks in advance!
You don't need that window.addEventListener("DOMContentLoaded", () => { part
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
const record = document.querySelector(".record");
const stop = document.querySelector(".stop");
const soundClips = document.querySelector(".sound-clips");
const canvas = document.querySelector(".visualizer");
const mainSection = document.querySelector(".main-controls");
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log("getUserMedia supported.");
const constraints = { audio: true };
let chunks = [];
let onSuccess = function (stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function () {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
};
stop.onclick = function () {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
};
mediaRecorder.onstop = function (e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt(
"Enter a name for your sound clip?",
"My unnamed clip"
);
const clipContainer = document.createElement("article");
const clipLabel = document.createElement("p");
const audio = document.createElement("audio");
const deleteButton = document.createElement("button");
clipContainer.classList.add("clip");
audio.setAttribute("controls", "");
deleteButton.textContent = "Delete";
deleteButton.className = "delete";
if (clipName === null) {
clipLabel.textContent = "My unnamed clip";
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { type: "audio/ogg; codecs=opus" });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function (e) {
e.target.closest(".clip").remove();
};
clipLabel.onclick = function () {
const existingName = clipLabel.textContent;
const newClipName = prompt("Enter a new name for your sound clip?");
if (newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
};
};
mediaRecorder.ondataavailable = function (e) {
chunks.push(e.data);
};
};
let onError = function (err) {
console.log("The following error occured: " + err);
};
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log("getUserMedia not supported on your browser!");
}

Append blobs to url and show

What I am trying to achieve is to have "stream" of blobs from my socket and append it to a video.
Currently, I have something like
const socket = io();
const videoGrid = document.getElementById("video-grid");
const video = document.createElement("video");
video.muted = true;
videoGrid.append(video);
let blobArray = [];
socket.on("view-stream-10", (data) => {
blobArray.push(
new Blob([new Uint8Array(data)], {
type: "video/x-matroska;codecs=avc1,opus",
})
);
let currentTime = video.currentTime;
let blob = new Blob(blobArray, { type: "video/x-matroska;codecs=avc1,opus" });
video.src = window.URL.createObjectURL(blob);
video.currentTime = currentTime;
video.play();
});
It works but there are some problems that video stops for a X ms at the point where new blob beeing created and url is changed and it's beeing so visible.
Is there any better way?

js get base64 of partial live recording

Am trying to get base64 string of current recorded voice to be sent to server for other processing.
My approach is to push base64 string into recordedChunks to be qued then send to server.
const recordedChunks = [];
var context = null;
var blob = null;
const handler = function(stream) {
if (window.URL) {
player.srcObject = stream;
} else {
//player.src = stream;
}
const context = new AudioContext();
const source = context.createMediaStreamSource(stream);
let bufferSize=1024;
const processor = context.createScriptProcessor(bufferSize, 1, 1);
source.connect(processor);
processor.connect(context.destination);
processor.onaudioprocess = function(e) {
//e.inputBuffer
// Do something with the data, e.g. convert it to mp3
// How to get base64 of what has been recorded without stopping the recorder
// recordedChunks.push('somePrerecodedBase64String');
};
};
navigator.mediaDevices.getUserMedia({ audio: true, video: false })
.then(handler);

RecordRTC works in Mac but not Windows

I am using angular 4 to implement an application which records audio using RecordRTC. I have implemented this in Mac it is working fine both safari and chrome.
But same code is not working in Windows 10.
Here is my code :
private stream: MediaStream;
private recordRTC: any;
#ViewChild('video') video;
ngAfterViewInit() {
// set the initial state of the video
let video:HTMLAudioElement = this.video.nativeElement;
video.muted = false;
video.controls = true;
video.autoplay = false;
}
toggleControls() {
let video: HTMLAudioElement = this.video.nativeElement;
video.muted = !video.muted;
video.controls = !video.controls;
video.autoplay = !video.autoplay;
}
successCallback(stream: MediaStream) {
var options = {
mimeType: 'audio/webm', // or video/webm\;codecs=h264 or video/webm\;codecs=vp9
bitsPerSecond: 128000 // if this line is provided, skip above two
};
this.stream = stream;
this.recordRTC = RecordRTC(stream, options);
this.recordRTC.startRecording();
let video: HTMLAudioElement = this.video.nativeElement;
video.src = window.URL.createObjectURL(stream);
this.toggleControls();
}
errorCallback() {
//handle error here
}
processVideo(audioVideoWebMURL) {
let video: HTMLAudioElement = this.video.nativeElement;
let recordRTC = this.recordRTC;
video.src = audioVideoWebMURL;
this.toggleControls();
var recordedBlob = recordRTC.getBlob();
recordRTC.getDataURL(function (dataURL) { });
}
startRecording() {
let mediaConstraints = {
audio: true
};
navigator.mediaDevices
.getUserMedia(mediaConstraints)
.then(this.successCallback.bind(this), this.errorCallback.bind(this));
}
stopRecording() {
let recordRTC = this.recordRTC;
recordRTC.stopRecording(this.processVideo.bind(this));
let stream = this.stream;
stream.getAudioTracks().forEach(track => track.stop());
}
How to make it work in windows?

How to use "segments" mode at SourceBuffer of MediaSource to render same result at Chomium, Chorme and Firefox?

Reference to my original question: How to use Blob URL, MediaSource or other methods to play concatenated Blobs of media fragments?
In lieu of the potential for deprecation of the "sequence" mode for multiple tracks, which the current code is using for both Chromium and Firefox browsers my additional questions are:
Which adjustments need to be made in my MediaSource code to render the same result using both Chromium which Firefox browsers - currently renders as expected using "segments" .mode?
Or, is there a bug in the implementation of multitrack support using Chromium browsers when SourceBuffer .mode is set to "segments"?
Background information
I have been able to record discrete media fragments using MediaRecorder, adding cues to the resulting webm file using ts-ebml and recording the discrete media fragments as a single media file using MediaSource with .mode of SourceBuffer set to "sequence" using both Chromium and Firefox browsers.
The Chromium issue at Monitor and potentially deprecate support for multitrack SourceBuffer support of 'sequence' AppendMode discusses "sequence" mode is being considered for deprecation for multitrack SourceBuffer objects. When asked in the original references question regarding how to implement the code using "segments" .mode (default AppendMode of SourceBuffer) the response was essentially that "segments" mode also supports multitrack input at SourceBuffer.
However, when trying code with .mode of SourceBuffer set to "segments" Chromium 60 only plays approximately one second, the first buffer of multiple appended buffers, of an expected ten second playback of recorded media fragments having cues set at webm file which is converted to ArrayBuffer and passed to .appendBuffer(), while Firefox renders same result when .mode is set to either "sequence" and "segments".
Code which renders expected result at both Chromium and Firefox. Note, Firefox does not play .mp4 at <video> element if multipleUrls is tried, though Firefox does support playing .mp4 at MediaSource when proper media codec is set.
<!DOCTYPE html>
<html>
<!-- recordMediaFragments.js demo https://github.com/guest271314/recordMediaFragments/tree/master/demos 2017 guest271314 -->
<head>
<!-- https://github.com/guest271314/recordMediaFragments/ts-ebml -->
</head>
<body>
<video width="320" height="280" controls="true"></video>
<script>
(async() => {
let request = await fetch("https://raw.githubusercontent.com/guest271314/recordMediaFragments/master/ts-ebml/ts-ebml-min.js");
let blob = await request.blob();
const script = document.createElement("script");
document.head.appendChild(script);
script.src = URL.createObjectURL(blob);
script.onload = () => {
const tsebml = require("ts-ebml");
const video = document.querySelector("video");
const videoStream = document.createElement("video");
// `MediaSource`
const mediaSource = new MediaSource();
// for firefox
// see https://bugzilla.mozilla.org/show_bug.cgi?id=1259788
const hasCaptureStream = HTMLMediaElement.prototype.hasOwnProperty("captureStream");
// handle firefox and chromium
const captureStream = mediaElement =>
!!mediaElement.mozCaptureStream
? mediaElement.mozCaptureStream()
: mediaElement.captureStream();
let currentFragmentURL, currentBlobURL, fragments;
videoStream.width = video.width;
videoStream.height = video.height;
const mimeCodec = "video/webm;codecs=vp8,opus";
// set to `.currentTime` of `videoStream` at `pause`
// to set next media fragment starting `.currentTime`
// if URL to be set at `.src` has same origin and pathname
let cursor = 0;
// https://gist.github.com/jsturgis/3b19447b304616f18657
// https://www.w3.org/2010/05/video/mediaevents.html
const multipleUrls = [
"https://media.w3.org/2010/05/sintel/trailer.mp4#t=0,5",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=55,60",
"https://raw.githubusercontent.com/w3c/web-platform-tests/master/media-source/mp4/test.mp4#t=0,5",
"https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerBlazes.mp4#t=0,5",
"https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerJoyrides.mp4#t=0,5",
"https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerMeltdowns.mp4#t=0,6",
"https://media.w3.org/2010/05/video/movie_300.mp4#t=30,36"
];
const singleUrl = [
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=0,1",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=1,2",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=2,3",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=3,4",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=4,5",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=5,6",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=6,7",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=7,8",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=8,9",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=9,10"
];
const geckoUrl = [
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=10,11",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=11,12",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=12,13",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=13,14",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=14,15",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=15,16",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=16,17",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=17,18",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=18,19",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=19,20"
];
const mediaFragmentRecorder = async(urls) => {
// `ts-ebml`
const tsebmlTools = async() => ({
decoder: new tsebml.Decoder(),
encoder: new tsebml.Encoder(),
reader: new tsebml.Reader(),
tools: tsebml.tools
});
// create `ArrayBuffer` from `Blob`
const readAsArrayBuffer = (blob) => {
return new Promise((resolve, reject) => {
const fr = new FileReader();
fr.readAsArrayBuffer(blob);
fr.onloadend = () => {
resolve(fr.result);
};
fr.onerror = (ev) => {
reject(ev.error);
};
});
}
// `urls`: string or array of URLs
// record each media fragment
const recordMediaFragments = async(video, mimeCodec, decoder, encoder, reader, tools, ...urls) => {
urls = [].concat(...urls);
const media = [];
for (let url of urls) {
await new Promise(async(resolve) => {
let mediaStream, recorder;
videoStream.onprogress = e => {
videoStream.onprogress = null;
console.log("loading " + url)
}
videoStream.oncanplay = async(e) => {
videoStream.oncanplay = null;
videoStream.play();
mediaStream = captureStream(videoStream);
console.log(mediaStream);
recorder = new MediaRecorder(mediaStream, {
mimeType: mimeCodec
});
recorder.ondataavailable = async(e) => {
// set metadata of recorded media fragment `Blob`
const mediaBlob = await setMediaMetadata(e.data);
// create `ArrayBuffer` of `Blob` of recorded media fragment
const mediaBuffer = await readAsArrayBuffer(mediaBlob);
const mediaDuration = videoStream.played.end(0) - videoStream.played.start(0);
const mediaFragmentId = currentFragmentURL || new URL(url);
const mediaFileName = mediaFragmentId.pathname.split("/").pop() + mediaFragmentId.hash;
const mediaFragmentType = "singleMediaFragment";
if (currentBlobURL) {
URL.revokeObjectURL(currentBlobURL);
}
media.push({
mediaBlob,
mediaBuffer,
mediaDuration,
mediaFragmentType,
mediaFileName
});
resolve();
}
recorder.start();
}
videoStream.onpause = e => {
videoStream.onpause = null;
cursor = videoStream.currentTime;
recorder.stop();
// stop `MediaStreamTrack`s
for (let track of mediaStream.getTracks()) {
track.stop();
}
}
currentFragmentURL = new URL(url);
// for firefox to load cross origin media without silence
if (!hasCaptureStream) {
console.log(currentFragmentURL);
request = new Request(currentFragmentURL.href);
blob = await fetch(request).then(response => response.blob());
console.log(blob);
currentBlobURL = URL.createObjectURL(blob);
// set next media fragment URL to `.currentTime` at `pause` event
// of previous media fragment if `url` has same `origin` and `pathname`
if (urls.indexOf(currentFragmentURL.href) > 0
&& new URL(urls[urls.indexOf(currentFragmentURL.href) - 1]).origin === currentFragmentURL.origin
&& new URL(urls[urls.indexOf(currentFragmentURL.href) - 1]).pathname === currentFragmentURL.pathname) {
if (cursor > 0) {
url = url = currentBlobURL + currentFragmentURL.hash.replace(/=\d+/, "=" + cursor);
console.log(url)
}
} else {
url = currentBlobURL + currentFragmentURL.hash;
}
} else {
if (cursor > 0
&& new URL(urls[urls.indexOf(url) - 1]).origin === currentFragmentURL.origin
&& new URL(urls[urls.indexOf(currentFragmentURL.href) - 1]).pathname === currentFragmentURL.pathname) {
url = url.replace(/=\d+/, "=" + cursor);
console.log(url)
}
}
videoStream.src = url;
}).catch(err => err)
}
return media
}
// set metadata of media `Blob`
// see https://github.com/legokichi/ts-ebml/issues/14#issuecomment-325200151
const setMediaMetadata = async(blob) =>
tsebmlTools()
.then(async({
decoder,
encoder,
tools,
reader
}) => {
let webM = new Blob([], {
type: "video/webm"
});
webM = new Blob([webM, blob], {
type: blob.type
});
const buf = await readAsArrayBuffer(blob);
const elms = decoder.decode(buf);
elms.forEach((elm) => {
reader.read(elm);
});
reader.stop();
const refinedMetadataBuf = tools.makeMetadataSeekable(reader.metadatas, reader.duration, reader.cues);
const webMBuf = await readAsArrayBuffer(webM);
const body = webMBuf.slice(reader.metadataSize);
const refinedWebM = new Blob([refinedMetadataBuf, body], {
type: webM.type
});
// close Blobs
if (webM.close && blob.close) {
webM.close();
blob.close();
}
return refinedWebM;
})
.catch(err => console.error(err));
let mediaTools = await tsebmlTools();
const {
decoder,
encoder,
reader,
tools
} = mediaTools;
const mediaFragments = await recordMediaFragments(video, mimeCodec, decoder, encoder, reader, tools, urls);
const recordedMedia = await new Promise((resolveAllMedia, rejectAllMedia) => {
console.log(decoder, encoder, tools, reader, mediaFragments);
let mediaStream, recorder;
mediaSource.onsourceended = e => {
console.log(video.buffered.start(0), video.buffered.end(0));
video.currentTime = video.buffered.start(0);
console.log(video.paused, video.readyState);
video.ontimeupdate = e => {
console.log(video.currentTime, mediaSource.duration);
if (video.currentTime >= mediaSource.duration) {
video.ontimeupdate = null;
video.oncanplay = null;
video.onwaiting = null;
if (recorder.state === "recording") {
recorder.stop();
}
console.log(e, recorder);
}
}
}
video.onended = (e) => {
video.onended = null;
console.log(e, video.currentTime,
mediaSource.duration);
}
video.oncanplay = e => {
console.log(e, video.duration, video.buffered.end(0));
video.play()
}
video.onwaiting = e => {
console.log(e, video.currentTime);
}
// record `MediaSource` playback of recorded media fragments
video.onplaying = async(e) => {
console.log(e);
video.onplaying = null;
mediaStream = captureStream(video);
if (!hasCaptureStream) {
videoStream.srcObject = mediaStream;
videoStream.play();
}
recorder = new MediaRecorder(mediaStream, {
mimeType: mimeCodec
});
console.log(recorder);
recorder.ondataavailable = async(e) => {
console.log(e);
const mediaFragmentsRecording = {};
mediaFragmentsRecording.mediaBlob = await setMediaMetadata(e.data);
mediaFragmentsRecording.mediaBuffer = await readAsArrayBuffer(mediaFragmentsRecording.mediaBlob);
mediaFragmentsRecording.mediaFileName = urls.map(url => {
const id = new URL(url);
return id.pathname.split("/").pop() + id.hash
}).join("-");
mediaFragmentsRecording.mediaFragmentType = "multipleMediaFragments";
// `<video>` to play concatened media fragments
// recorded from playback of `MediaSource`
fragments = document.createElement("video");
fragments.id = "fragments";
fragments.width = video.width;
fragments.height = video.height;
fragments.controls = true;
fragments.onloadedmetadata = () => {
fragments.onloadedmetadata = null;
mediaFragmentsRecording.mediaDuration = fragments.duration;
URL.revokeObjectURL(currentBlobURL);
// stop `MediaStreamTrack`s
for (let track of mediaStream.getTracks()) {
track.stop();
}
resolveAllMedia([
...mediaFragments, mediaFragmentsRecording
]);
}
currentBlobURL = URL.createObjectURL(mediaFragmentsRecording.mediaBlob);
fragments.src = currentBlobURL;
document.body.appendChild(fragments);
}
recorder.start();
}
video.src = URL.createObjectURL(mediaSource);
mediaSource.addEventListener("sourceopen", sourceOpen);
async function sourceOpen(e) {
if (MediaSource.isTypeSupported(mimeCodec)) {
const sourceBuffer = mediaSource.addSourceBuffer(mimeCodec);
sourceBuffer.mode = "segments";
for (let {
mediaBuffer,
mediaDuration
} of mediaFragments) {
await new Promise((resolveUpdatedMediaSource) => {
sourceBuffer.onupdateend = async(e) => {
sourceBuffer.onupdateend = null;
console.log(e, mediaDuration, mediaSource.duration
, video.paused, video.ended, video.currentTime
, "media source playing", video.readyState);
// https://bugzilla.mozilla.org/show_bug.cgi?id=1400587
// https://bugs.chromium.org/p/chromium/issues/detail?id=766002&q=label%3AMSEptsdtsCleanup
try {
sourceBuffer.timestampOffset += mediaDuration;
resolveUpdatedMediaSource();
} catch (err) {
console.error(err);
resolveUpdatedMediaSource();
}
}
sourceBuffer.appendBuffer(mediaBuffer);
})
}
mediaSource.endOfStream()
} else {
console.warn(mimeCodec + " not supported");
}
};
})
return recordedMedia
};
mediaFragmentRecorder(geckoUrl)
.then(recordedMediaFragments => {
// do stuff with recorded media fragments
console.log(recordedMediaFragments);
const select = document.createElement("select");
for (let {
mediaFileName,
mediaBlob,
mediaFragmentType
} of Object.values(recordedMediaFragments)) {
const option = new Option(mediaFileName, URL.createObjectURL(mediaBlob));
select.appendChild(option);
}
select.onchange = () => {
document.getElementById("fragments").src = select.value;
}
video.parentNode.insertBefore(select, video);
video.controls = true;
video.currentTime = video.buffered.start(0);
})
.catch(err => console.error(err));
}
})()
</script>
</body>
</html>

Categories

Resources