WebRTC mixing two audio streams with AudioContext - javascript

I'm using mediasoup and webrtc to create media streams.
async consume(transport) {
const { rtpCapabilities } = this.device;
const data = await this.socket.request('consume', { rtpCapabilities });
const {
producerId,
id,
kind,
rtpParameters,
} = data;
//Get playerID
let playerID = await this.socket.request('getPlayerID', id);
playerID = parseInt(playerID, 10);
let producerIDS = await this.socket.request('getProducerIDS', id);
let consumerIDS = await this.socket.request('getConsumerIDS', id);
console.log("producerIDS", producerIDS);
console.log("consumerIDS", consumerIDS);
let codecOptions = {};
const consumer = await transport.consume({
id: id,
producerId: producerId,
kind: kind,
rtpParameters: rtpParameters,
codecOptions: codecOptions,
});
console.log("First producer ", producerIDS[0]);
console.log("Current producer ", producerId);
console.log("Consumer ID", id)
console.log("Player ID:", playerID);
console.log("Consumer1", consumer, consumer.track);
//stream.addTrack(consumer.track);
let audioTracks = [];
audioTracks.push(consumer.track);
if (playerID === 2 && producerIDS.length === 2) {
const data1 = await this.socket.request('consume2', { rtpCapabilities });
console.log("Data: ", data);
console.log("producerId: ", data.producerId);
const consumer2 = await transport.consume({
id: data1.id,
producerId: data1.producerId,
kind: data1.kind,
rtpParameters: data1.rtpParameters,
codecOptions: codecOptions,
});
console.log("Second producer", data1.producerId);
console.log("Consumer2", consumer2, consumer2.track);
audioTracks.push(consumer2.track);
//stream.addTrack(consumer2.track);
}
console.log("Audio Tracks: ", audioTracks);
//const sources = audioTracks.map(t => ac.createMediaStreamSource(new MediaStream([t])));
//var dest = ac.createMediaStreamDestination();
//var aud1 = ac.createMediaStreamSource(s1);
//aud1.connect(dest);
//sources.forEach(s => s.connect(dest));
//console.log(dest.stream.getAudioTracks());
//stream.addTrack(dest.stream.getAudioTracks()[0]);
//stream = dest.stream;
/*
let stream = new MediaStream();
if (audioTracks.length <= 1) {
stream.addTrack(audioTracks[0]);
}
else {
stream.addTrack(audioTracks[1]);
}
*/
let stream = new MediaStream();
if (audioTracks.length <= 1) {
stream.addTrack(audioTracks[0]);
}
else {
const ac = new AudioContext();
const dest = ac.createMediaStreamDestination();
let aud1stream = new MediaStream();
aud1stream.addTrack(audioTracks[0]);
let aud2stream = new MediaStream();
aud2stream.addTrack(audioTracks[1]);
const aud1 = ac.createMediaStreamSource(aud1stream);
const aud2 = ac.createMediaStreamSource(aud2stream);
var gain = ac.createGain();
gain.gain.value = 10;
gain.connect(dest);
aud1.connect(dest);
aud2.connect(dest);
stream = dest.stream;
}
console.log("Stream tracks: ", stream.getAudioTracks());
return stream;
}
I'm attempting to mix two streams together here.
else {
const ac = new AudioContext();
const dest = ac.createMediaStreamDestination();
let aud1stream = new MediaStream();
aud1stream.addTrack(audioTracks[0]);
let aud2stream = new MediaStream();
aud2stream.addTrack(audioTracks[1]);
const aud1 = ac.createMediaStreamSource(aud1stream);
const aud2 = ac.createMediaStreamSource(aud2stream);
var gain = ac.createGain();
gain.gain.value = 10;
gain.connect(dest);
aud1.connect(dest);
aud2.connect(dest);
stream = dest.stream;
}
However the stream indicates that it is streaming but no audio can be heard. I'd appreciate any help all I want is to mix the two streams. If AudioContext doesn't work does anyone have advice on using Gstreamer to mix the audio or something else? Thanks.

Related

where to add my websocket code in javascript

I am very new to javaScript, I know some basics but have not yet completely understood the complete logics behind it (so far I have only worked with Python and a little bit of VBA)
For uni I have to build a browser interface to record audio and transfer it to a server where a Speech to text application runs. I found some opensource code here (https://github.com/mdn/dom-examples/blob/main/media/web-dictaphone/scripts/app.js) which I wanted to use, but is missing the websocket part. Now I don't know, where exactly to insert that. So far I have this:
code of the Webdictaphone:
// set up basic variables for app
const record = document.querySelector('.record');
const stop = document.querySelector('.stop');
const soundClips = document.querySelector('.sound-clips');
const canvas = document.querySelector('.visualizer');
const mainSection = document.querySelector('.main-controls');
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia supported.');
const constraints = { audio: true };
let chunks = [];
let onSuccess = function(stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function() {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
}
stop.onclick = function() {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
}
mediaRecorder.onstop = function(e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt('Enter a name for your sound clip?','My unnamed clip');
const clipContainer = document.createElement('article');
const clipLabel = document.createElement('p');
const audio = document.createElement('audio');
const deleteButton = document.createElement('button');
clipContainer.classList.add('clip');
audio.setAttribute('controls', '');
deleteButton.textContent = 'Delete';
deleteButton.className = 'delete';
if(clipName === null) {
clipLabel.textContent = 'My unnamed clip';
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function(e) {
e.target.closest(".clip").remove();
}
clipLabel.onclick = function() {
const existingName = clipLabel.textContent;
const newClipName = prompt('Enter a new name for your sound clip?');
if(newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
}
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
}
let onError = function(err) {
console.log('The following error occured: ' + err);
}
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log('getUserMedia not supported on your browser!');
}
websocket part (client side):
window.addEventListener("DOMContentLoaded", () => {
// Open the WebSocket connection and register event handlers.
console.log('DOMContentLoaded done');
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
console.log('ws is defined');
})
Right now I just stacked both of the parts on top of each other, but this doesn't work, since, as I found out, you only can define and use variables (such as ws) within a block. This leads to an error that says that ws i not defined when I call the sending function within the if-statement.
I already tried to look for tutorials for hours but none that I found included this topic. I also tried moving the web socket part into the if statement, but that also did - unsurprisingly work, at least not in the way that I tried.
I feel like my problem lays in understanding how to define the websocket so I can call it within the if statement, or figure out a way to somehow get the audio somewhere where ws is considered to be defined. Unfortunately I just don't get behind it and already invested days which has become really frustrating.
I appreciate any help. If you have any ideas what I could change or move in the code or maybe just know any tutorial that could help, I'd be really grateful.
Thanks in advance!
You don't need that window.addEventListener("DOMContentLoaded", () => { part
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
const record = document.querySelector(".record");
const stop = document.querySelector(".stop");
const soundClips = document.querySelector(".sound-clips");
const canvas = document.querySelector(".visualizer");
const mainSection = document.querySelector(".main-controls");
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log("getUserMedia supported.");
const constraints = { audio: true };
let chunks = [];
let onSuccess = function (stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function () {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
};
stop.onclick = function () {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
};
mediaRecorder.onstop = function (e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt(
"Enter a name for your sound clip?",
"My unnamed clip"
);
const clipContainer = document.createElement("article");
const clipLabel = document.createElement("p");
const audio = document.createElement("audio");
const deleteButton = document.createElement("button");
clipContainer.classList.add("clip");
audio.setAttribute("controls", "");
deleteButton.textContent = "Delete";
deleteButton.className = "delete";
if (clipName === null) {
clipLabel.textContent = "My unnamed clip";
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { type: "audio/ogg; codecs=opus" });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function (e) {
e.target.closest(".clip").remove();
};
clipLabel.onclick = function () {
const existingName = clipLabel.textContent;
const newClipName = prompt("Enter a new name for your sound clip?");
if (newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
};
};
mediaRecorder.ondataavailable = function (e) {
chunks.push(e.data);
};
};
let onError = function (err) {
console.log("The following error occured: " + err);
};
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log("getUserMedia not supported on your browser!");
}

How to make oscillator loudness match the loudness of a sample?

I want a tone from an oscillator to play under an audio clip of a song. How do I make the tones loudness match the loudness of the sample? I want the tone to be quieter when the song is quieter, and the tone to be louder at the louder parts. I have used the createAnalyzer to detect the volume of the sample, but the code does not work.
const audioContext = new AudioContext();
const audiodiv = document.querySelector('.audiodiv')
const buffer = audioContext.createBuffer(
1,
audioContext.sampleRate * 1,
audioContext.sampleRate
);
const channelData = buffer.getChannelData(0)
for (let i = 0; i < buffer.length; i++){
channelData[i] = Math.random() * 2 - 1;
}
const primaryGainControl = audioContext.createGain()
primaryGainControl.gain.setValueAtTime(0.005, 0);
primaryGainControl.connect(audioContext.destination);
const samplebutton = document.createElement('button')
samplebutton.innerText = 'sample'
samplebutton.addEventListener('click', async () => {
let volume = 0
const response = await fetch('testsong.wav')
const soundBuffer = await response.arrayBuffer()
const sampleBuffer = await audioContext.decodeAudioData(soundBuffer)
const analyzer = audioContext.createAnalyser()
volume = analyzer.frequencyBinCount
const sampleSource = audioContext.createBufferSource()
sampleSource.buffer = sampleBuffer
sampleSource.playbackRate.setValueAtTime(1, 0)
sampleSource.connect(primaryGainControl)
sampleSource.start()
})
audiodiv.appendChild(samplebutton)
const toneButton = document.createElement('button')
toneButton.innerText = "tone"
toneButton.addEventListener("click", () => {
const toneOscillator = audioContext.createOscillator();
toneOscillator.frequency.setValueAtTime(150, 0)
toneOscillator.type = "sine"
toneOscillator.connect(oscGain);
toneOscillator.start()
toneOscillator.stop(audioContext.currentTime + 5)
const oscGain = audioContext.createGain()
oscGain.gain.value = volume
oscGain.connect(primaryGainControl)
})
audiodiv.appendChild(toneButton)

WebRTC, getDisplayMedia() does not capture sound from the remote stream

I have a web application of my own, which is based on the peerjs library (It is a video conference).
I'm trying to make a recording with 'MediaRecorder', but I'm facing a very unpleasant case.
The code for capturing my desktop stream is the following:
let chooseScreen = document.querySelector('.chooseScreenBtn')
chooseScreen.onclick = async () => {
let desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: true });
}
I then successfully apply my received desktopStream to videoElement in DOM:
const videoElement = doc.querySelector('.videoElement')
videoElement.srcObject = desktopStream
videoElement.muted = false;
videoElement.onloadedmetadata = ()=>{videoElement.play();}
For example, I get desktopStream on the page with an active conference where everyone hears and sees each other.
To check the video and audio in desktopStream I play some video on the video player on the desktop.
I can hear any audio from my desktop but audio from any participant cannot be heard.
Of course, when I put the desktopStream in MediaRecorder I get a video file with no sound from anyone except my desktop. Any ideas on how to solve it?
Chrome's MediaRecorder API can only output one track.
The createMediaStreamSource can take streams from desktop audio and microphone, by connecting both together into one object created by createMediaStreamDestination it gives you the ability to pipe this one stream into the MediaRecorder API.
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
// Create a couple of sources
const source1 = context.createMediaStreamSource(desktopStream);
const source2 = context.createMediaStreamSource(voiceStream);
const destination = context.createMediaStreamDestination();
const desktopGain = context.createGain();
const voiceGain = context.createGain();
desktopGain.gain.value = 0.7;
voiceGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
// Connect source2
source2.connect(voiceGain).connect(destination);
return destination.stream.getAudioTracks();
};
It is also possible to use two or more audio inputs + video input.
window.onload = () => {
const warningEl = document.getElementById('warning');
const videoElement = document.getElementById('videoElement');
const captureBtn = document.getElementById('captureBtn');
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const download = document.getElementById('download');
const audioToggle = document.getElementById('audioToggle');
const micAudioToggle = document.getElementById('micAudioToggle');
if('getDisplayMedia' in navigator.mediaDevices) warningEl.style.display = 'none';
let blobs;
let blob;
let rec;
let stream;
let voiceStream;
let desktopStream;
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
const destination = context.createMediaStreamDestination();
let hasDesktop = false;
let hasVoice = false;
if (desktopStream && desktopStream.getAudioTracks().length > 0) {
// If you don't want to share Audio from the desktop it should still work with just the voice.
const source1 = context.createMediaStreamSource(desktopStream);
const desktopGain = context.createGain();
desktopGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
hasDesktop = true;
}
if (voiceStream && voiceStream.getAudioTracks().length > 0) {
const source2 = context.createMediaStreamSource(voiceStream);
const voiceGain = context.createGain();
voiceGain.gain.value = 0.7;
source2.connect(voiceGain).connect(destination);
hasVoice = true;
}
return (hasDesktop || hasVoice) ? destination.stream.getAudioTracks() : [];
};
captureBtn.onclick = async () => {
download.style.display = 'none';
const audio = audioToggle.checked || false;
const mic = micAudioToggle.checked || false;
desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: audio });
if (mic === true) {
voiceStream = await navigator.mediaDevices.getUserMedia({ video: false, audio: mic });
}
const tracks = [
...desktopStream.getVideoTracks(),
...mergeAudioStreams(desktopStream, voiceStream)
];
console.log('Tracks to add to stream', tracks);
stream = new MediaStream(tracks);
console.log('Stream', stream)
videoElement.srcObject = stream;
videoElement.muted = true;
blobs = [];
rec = new MediaRecorder(stream, {mimeType: 'video/webm; codecs=vp8,opus'});
rec.ondataavailable = (e) => blobs.push(e.data);
rec.onstop = async () => {
blob = new Blob(blobs, {type: 'video/webm'});
let url = window.URL.createObjectURL(blob);
download.href = url;
download.download = 'test.webm';
download.style.display = 'block';
};
startBtn.disabled = false;
captureBtn.disabled = true;
audioToggle.disabled = true;
micAudioToggle.disabled = true;
};
startBtn.onclick = () => {
startBtn.disabled = true;
stopBtn.disabled = false;
rec.start();
};
stopBtn.onclick = () => {
captureBtn.disabled = false;
audioToggle.disabled = false;
micAudioToggle.disabled = false;
startBtn.disabled = true;
stopBtn.disabled = true;
rec.stop();
stream.getTracks().forEach(s=>s.stop())
videoElement.srcObject = null
stream = null;
};
};
Audio capture with getDisplayMedia is only fully supported with Chrome for Windows. Other platforms have a number of limitations:
there is no support for audio capture at all under Firefox or Safari;
on Chrome/Chromium for Linux and Mac OS, only the audio of a Chrome/Chromium tab can be captured, not the audio of a non-browser application window.

high quality media recorder from canvas 30 fps at 1080p

I have a canvas app that currently captures images of the canvas and compiles a video that is sent to ffmpeg which then outputs the video format of their choice. The problem is its super slow! Not on the video conversion but on the compiling of the actual frames, you see I have to pause the video and the animation and take a screenshot of the canvas. So rather than taking screenshots I was thinking about using MediaRecorder and canvas.captureStream. I am able to get video output but the quality is really low and the video keeps droping frames. I need to have the frame rate be at least 30 fps or higher and the quality be high. Heres my record function
async [RECORD] ({state}) {
state.videoOutputURL = null;
state.outputVideo = document.createElement("video");
const videoStream = state.canvas.captureStream(30);
const mediaRecorder = new MediaRecorder(videoStream);
mediaRecorder.ondataavailable = function(e) {
state.captures.push(e.data);
};
mediaRecorder.onstop = function(e) {
const blob = new Blob(state.captures);
state.captures = [];
const videoURL = URL.createObjectURL(blob);
state.outputVideo.src = videoURL;
state.outputVideo.width = 1280;
state.outputVideo.height = 720;
document.body.append(state.outputVideo);
};
mediaRecorder.start();
state.anim.start();
state.video.play();
lottie.play();
state.video.addEventListener("ended", async () => {
mediaRecorder.stop();
});
}
The best way I found to do this was to actually pause the video on a canvas and use canvas.toDataURL to take screenshots. I compile the screenshots into a video with a library called Whammy and send that over to FFmpeg to rip the final content. The following code should give a pretty good idea
async [TAKE_SCREENSHOT]({ state, dispatch }) {
let seekResolve;
if (!state.ended && state.video) {
state.video.addEventListener("seeked", async () => {
if (seekResolve) seekResolve();
});
await new Promise(async (resolve, reject) => {
if (state.animations.length) {
dispatch(PAUSE_LOTTIES);
}
dispatch(PAUSE_VIDEO);
await new Promise(r => (seekResolve = r));
if (state.layer) {
state.layer.draw();
}
if (state.canvas) {
state.captures.push(state.canvas.toDataURL("image/webp"));
}
resolve();
dispatch(TAKE_SCREENSHOT);
});
}
},
async [PAUSE_VIDEO]({ state, dispatch, commit }) {
state.video.pause();
const oneFrame = 1 / 30;
if (state.video.currentTime + oneFrame < state.video.duration) {
state.video.currentTime += oneFrame;
const percent = `${Math.round(
(state.video.currentTime / state.video.duration) * 100
)}%`;
commit(SET_MODAL_STATUS, percent);
} else {
commit(SET_MODAL_STATUS, "Uploading your video");
state.video.play();
state.ended = true;
await dispatch(GENERATE_VIDEO);
}
},
async [PAUSE_LOTTIES]({ state }) {
for (let i = 0; i < state.animations.length; i++) {
let step = 0;
let animation = state.animations[i].lottie;
if (animation.currentFrame <= animation.totalFrames) {
step = animation.currentFrame + 1;
}
await lottie.goToAndStop(step, true, animation.name);
}
},
async [GENERATE_VIDEO]({ state, rootState, dispatch, commit }) {
let status;
state.editingZoom = null;
const username =
rootState.user.currentUser.username;
const email = rootState.user.currentUser.email || rootState.user.guestEmail;
const name = rootState.user.currentUser.firstName || "guest";
const s3Id = rootState.templates.currentVideo.stock_s3_id || state.s3Id;
const type = rootState.dataClay.fileFormat || state.type;
const vid = new Whammy.fromImageArray(state.captures, 30);
vid.lastModifiedDate = new Date();
vid.name = "canvasVideo.webm";
const data = new FormData();
const id = `${username}_${new Date().getTime()}`;
data.append("id", id);
data.append("upload", vid);
let projectId,
fileName,
matrix = null;
if (!state.editorMode) {
projectId = await dispatch(INSERT_PROJECT);
fileName = `${rootState.dataClay.projectName}.${type}`;
matrix = rootState.dataClay.matrix[0];
} else {
matrix = rootState.canvasSidebarMenu.selectedDisplay;
projectId = id;
fileName = `${id}.${type}`;
}
if (projectId || state.editorMode) {
await dispatch(UPLOAD_TEMP_FILE, data);
const key = await dispatch(CONVERT_FILE_TYPE, {
id,
username,
type,
projectId,
matrix,
name,
email,
editorMode: state.editorMode
});
const role = rootState.user.currentUser.role;
state.file = `/api/files/${key}`;
let message;
let title = "Your video is ready";
status = "rendered";
if (!key) {
status = "failed";
message =
"<p class='error'>Error processing video! If error continues please contact Creative Group. We are sorry for any inconvenience.</p>";
title = "Error!";
} else if (!rootState.user.currentUser.id) {
message = `<p>Your video is ready. Signup for more great content!</p> <a href="${
state.file
}" download="${fileName}" class="btn btn-primary btn-block">Download</a>`;
} else if (role != "banner") {
message = `<p>Your video is ready.</p> <a href="${
state.file
}" download="${fileName}" class="btn btn-primary btn-block">Download</a>`;
} else {
message = `<p>Your video is ready. You may download your file from your banner account</p>`;
await dispatch(EXPORT_TO_BANNER, {
s3Id,
fileUrl: key,
extension: `.${type}`,
resolution: matrix
});
}
if (state.editorMode) {
await dispatch(SAVE_CANVAS, { status, fileId: projectId });
}
state.video.loop = "loop";
state.anim.stop();
state.video.pause();
lottie.unfreeze();
await dispatch(DELETE_PROJECT_IN_PROGRESS);
commit(RESET_PROJECT_IN_PROGRESS);
commit(RESET_CANVAS);
if (rootState.user.currentUser.id) {
router.push("/account/projects");
} else {
router.push("/pricing");
}
dispatch(SHOW_MODAL, {
name: "message",
title,
message
});
} else {
await dispatch(FETCH_ALL_PUBLISHED_TEMPLATES);
await dispatch(DELETE_PROJECT_IN_PROGRESS);
commit(RESET_PROJECT_IN_PROGRESS);
commit(RESET_CANVAS);
}
},

How to use "segments" mode at SourceBuffer of MediaSource to render same result at Chomium, Chorme and Firefox?

Reference to my original question: How to use Blob URL, MediaSource or other methods to play concatenated Blobs of media fragments?
In lieu of the potential for deprecation of the "sequence" mode for multiple tracks, which the current code is using for both Chromium and Firefox browsers my additional questions are:
Which adjustments need to be made in my MediaSource code to render the same result using both Chromium which Firefox browsers - currently renders as expected using "segments" .mode?
Or, is there a bug in the implementation of multitrack support using Chromium browsers when SourceBuffer .mode is set to "segments"?
Background information
I have been able to record discrete media fragments using MediaRecorder, adding cues to the resulting webm file using ts-ebml and recording the discrete media fragments as a single media file using MediaSource with .mode of SourceBuffer set to "sequence" using both Chromium and Firefox browsers.
The Chromium issue at Monitor and potentially deprecate support for multitrack SourceBuffer support of 'sequence' AppendMode discusses "sequence" mode is being considered for deprecation for multitrack SourceBuffer objects. When asked in the original references question regarding how to implement the code using "segments" .mode (default AppendMode of SourceBuffer) the response was essentially that "segments" mode also supports multitrack input at SourceBuffer.
However, when trying code with .mode of SourceBuffer set to "segments" Chromium 60 only plays approximately one second, the first buffer of multiple appended buffers, of an expected ten second playback of recorded media fragments having cues set at webm file which is converted to ArrayBuffer and passed to .appendBuffer(), while Firefox renders same result when .mode is set to either "sequence" and "segments".
Code which renders expected result at both Chromium and Firefox. Note, Firefox does not play .mp4 at <video> element if multipleUrls is tried, though Firefox does support playing .mp4 at MediaSource when proper media codec is set.
<!DOCTYPE html>
<html>
<!-- recordMediaFragments.js demo https://github.com/guest271314/recordMediaFragments/tree/master/demos 2017 guest271314 -->
<head>
<!-- https://github.com/guest271314/recordMediaFragments/ts-ebml -->
</head>
<body>
<video width="320" height="280" controls="true"></video>
<script>
(async() => {
let request = await fetch("https://raw.githubusercontent.com/guest271314/recordMediaFragments/master/ts-ebml/ts-ebml-min.js");
let blob = await request.blob();
const script = document.createElement("script");
document.head.appendChild(script);
script.src = URL.createObjectURL(blob);
script.onload = () => {
const tsebml = require("ts-ebml");
const video = document.querySelector("video");
const videoStream = document.createElement("video");
// `MediaSource`
const mediaSource = new MediaSource();
// for firefox
// see https://bugzilla.mozilla.org/show_bug.cgi?id=1259788
const hasCaptureStream = HTMLMediaElement.prototype.hasOwnProperty("captureStream");
// handle firefox and chromium
const captureStream = mediaElement =>
!!mediaElement.mozCaptureStream
? mediaElement.mozCaptureStream()
: mediaElement.captureStream();
let currentFragmentURL, currentBlobURL, fragments;
videoStream.width = video.width;
videoStream.height = video.height;
const mimeCodec = "video/webm;codecs=vp8,opus";
// set to `.currentTime` of `videoStream` at `pause`
// to set next media fragment starting `.currentTime`
// if URL to be set at `.src` has same origin and pathname
let cursor = 0;
// https://gist.github.com/jsturgis/3b19447b304616f18657
// https://www.w3.org/2010/05/video/mediaevents.html
const multipleUrls = [
"https://media.w3.org/2010/05/sintel/trailer.mp4#t=0,5",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=55,60",
"https://raw.githubusercontent.com/w3c/web-platform-tests/master/media-source/mp4/test.mp4#t=0,5",
"https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerBlazes.mp4#t=0,5",
"https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerJoyrides.mp4#t=0,5",
"https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerMeltdowns.mp4#t=0,6",
"https://media.w3.org/2010/05/video/movie_300.mp4#t=30,36"
];
const singleUrl = [
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=0,1",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=1,2",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=2,3",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=3,4",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=4,5",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=5,6",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=6,7",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=7,8",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=8,9",
"https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4#t=9,10"
];
const geckoUrl = [
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=10,11",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=11,12",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=12,13",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=13,14",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=14,15",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=15,16",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=16,17",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=17,18",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=18,19",
"https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=19,20"
];
const mediaFragmentRecorder = async(urls) => {
// `ts-ebml`
const tsebmlTools = async() => ({
decoder: new tsebml.Decoder(),
encoder: new tsebml.Encoder(),
reader: new tsebml.Reader(),
tools: tsebml.tools
});
// create `ArrayBuffer` from `Blob`
const readAsArrayBuffer = (blob) => {
return new Promise((resolve, reject) => {
const fr = new FileReader();
fr.readAsArrayBuffer(blob);
fr.onloadend = () => {
resolve(fr.result);
};
fr.onerror = (ev) => {
reject(ev.error);
};
});
}
// `urls`: string or array of URLs
// record each media fragment
const recordMediaFragments = async(video, mimeCodec, decoder, encoder, reader, tools, ...urls) => {
urls = [].concat(...urls);
const media = [];
for (let url of urls) {
await new Promise(async(resolve) => {
let mediaStream, recorder;
videoStream.onprogress = e => {
videoStream.onprogress = null;
console.log("loading " + url)
}
videoStream.oncanplay = async(e) => {
videoStream.oncanplay = null;
videoStream.play();
mediaStream = captureStream(videoStream);
console.log(mediaStream);
recorder = new MediaRecorder(mediaStream, {
mimeType: mimeCodec
});
recorder.ondataavailable = async(e) => {
// set metadata of recorded media fragment `Blob`
const mediaBlob = await setMediaMetadata(e.data);
// create `ArrayBuffer` of `Blob` of recorded media fragment
const mediaBuffer = await readAsArrayBuffer(mediaBlob);
const mediaDuration = videoStream.played.end(0) - videoStream.played.start(0);
const mediaFragmentId = currentFragmentURL || new URL(url);
const mediaFileName = mediaFragmentId.pathname.split("/").pop() + mediaFragmentId.hash;
const mediaFragmentType = "singleMediaFragment";
if (currentBlobURL) {
URL.revokeObjectURL(currentBlobURL);
}
media.push({
mediaBlob,
mediaBuffer,
mediaDuration,
mediaFragmentType,
mediaFileName
});
resolve();
}
recorder.start();
}
videoStream.onpause = e => {
videoStream.onpause = null;
cursor = videoStream.currentTime;
recorder.stop();
// stop `MediaStreamTrack`s
for (let track of mediaStream.getTracks()) {
track.stop();
}
}
currentFragmentURL = new URL(url);
// for firefox to load cross origin media without silence
if (!hasCaptureStream) {
console.log(currentFragmentURL);
request = new Request(currentFragmentURL.href);
blob = await fetch(request).then(response => response.blob());
console.log(blob);
currentBlobURL = URL.createObjectURL(blob);
// set next media fragment URL to `.currentTime` at `pause` event
// of previous media fragment if `url` has same `origin` and `pathname`
if (urls.indexOf(currentFragmentURL.href) > 0
&& new URL(urls[urls.indexOf(currentFragmentURL.href) - 1]).origin === currentFragmentURL.origin
&& new URL(urls[urls.indexOf(currentFragmentURL.href) - 1]).pathname === currentFragmentURL.pathname) {
if (cursor > 0) {
url = url = currentBlobURL + currentFragmentURL.hash.replace(/=\d+/, "=" + cursor);
console.log(url)
}
} else {
url = currentBlobURL + currentFragmentURL.hash;
}
} else {
if (cursor > 0
&& new URL(urls[urls.indexOf(url) - 1]).origin === currentFragmentURL.origin
&& new URL(urls[urls.indexOf(currentFragmentURL.href) - 1]).pathname === currentFragmentURL.pathname) {
url = url.replace(/=\d+/, "=" + cursor);
console.log(url)
}
}
videoStream.src = url;
}).catch(err => err)
}
return media
}
// set metadata of media `Blob`
// see https://github.com/legokichi/ts-ebml/issues/14#issuecomment-325200151
const setMediaMetadata = async(blob) =>
tsebmlTools()
.then(async({
decoder,
encoder,
tools,
reader
}) => {
let webM = new Blob([], {
type: "video/webm"
});
webM = new Blob([webM, blob], {
type: blob.type
});
const buf = await readAsArrayBuffer(blob);
const elms = decoder.decode(buf);
elms.forEach((elm) => {
reader.read(elm);
});
reader.stop();
const refinedMetadataBuf = tools.makeMetadataSeekable(reader.metadatas, reader.duration, reader.cues);
const webMBuf = await readAsArrayBuffer(webM);
const body = webMBuf.slice(reader.metadataSize);
const refinedWebM = new Blob([refinedMetadataBuf, body], {
type: webM.type
});
// close Blobs
if (webM.close && blob.close) {
webM.close();
blob.close();
}
return refinedWebM;
})
.catch(err => console.error(err));
let mediaTools = await tsebmlTools();
const {
decoder,
encoder,
reader,
tools
} = mediaTools;
const mediaFragments = await recordMediaFragments(video, mimeCodec, decoder, encoder, reader, tools, urls);
const recordedMedia = await new Promise((resolveAllMedia, rejectAllMedia) => {
console.log(decoder, encoder, tools, reader, mediaFragments);
let mediaStream, recorder;
mediaSource.onsourceended = e => {
console.log(video.buffered.start(0), video.buffered.end(0));
video.currentTime = video.buffered.start(0);
console.log(video.paused, video.readyState);
video.ontimeupdate = e => {
console.log(video.currentTime, mediaSource.duration);
if (video.currentTime >= mediaSource.duration) {
video.ontimeupdate = null;
video.oncanplay = null;
video.onwaiting = null;
if (recorder.state === "recording") {
recorder.stop();
}
console.log(e, recorder);
}
}
}
video.onended = (e) => {
video.onended = null;
console.log(e, video.currentTime,
mediaSource.duration);
}
video.oncanplay = e => {
console.log(e, video.duration, video.buffered.end(0));
video.play()
}
video.onwaiting = e => {
console.log(e, video.currentTime);
}
// record `MediaSource` playback of recorded media fragments
video.onplaying = async(e) => {
console.log(e);
video.onplaying = null;
mediaStream = captureStream(video);
if (!hasCaptureStream) {
videoStream.srcObject = mediaStream;
videoStream.play();
}
recorder = new MediaRecorder(mediaStream, {
mimeType: mimeCodec
});
console.log(recorder);
recorder.ondataavailable = async(e) => {
console.log(e);
const mediaFragmentsRecording = {};
mediaFragmentsRecording.mediaBlob = await setMediaMetadata(e.data);
mediaFragmentsRecording.mediaBuffer = await readAsArrayBuffer(mediaFragmentsRecording.mediaBlob);
mediaFragmentsRecording.mediaFileName = urls.map(url => {
const id = new URL(url);
return id.pathname.split("/").pop() + id.hash
}).join("-");
mediaFragmentsRecording.mediaFragmentType = "multipleMediaFragments";
// `<video>` to play concatened media fragments
// recorded from playback of `MediaSource`
fragments = document.createElement("video");
fragments.id = "fragments";
fragments.width = video.width;
fragments.height = video.height;
fragments.controls = true;
fragments.onloadedmetadata = () => {
fragments.onloadedmetadata = null;
mediaFragmentsRecording.mediaDuration = fragments.duration;
URL.revokeObjectURL(currentBlobURL);
// stop `MediaStreamTrack`s
for (let track of mediaStream.getTracks()) {
track.stop();
}
resolveAllMedia([
...mediaFragments, mediaFragmentsRecording
]);
}
currentBlobURL = URL.createObjectURL(mediaFragmentsRecording.mediaBlob);
fragments.src = currentBlobURL;
document.body.appendChild(fragments);
}
recorder.start();
}
video.src = URL.createObjectURL(mediaSource);
mediaSource.addEventListener("sourceopen", sourceOpen);
async function sourceOpen(e) {
if (MediaSource.isTypeSupported(mimeCodec)) {
const sourceBuffer = mediaSource.addSourceBuffer(mimeCodec);
sourceBuffer.mode = "segments";
for (let {
mediaBuffer,
mediaDuration
} of mediaFragments) {
await new Promise((resolveUpdatedMediaSource) => {
sourceBuffer.onupdateend = async(e) => {
sourceBuffer.onupdateend = null;
console.log(e, mediaDuration, mediaSource.duration
, video.paused, video.ended, video.currentTime
, "media source playing", video.readyState);
// https://bugzilla.mozilla.org/show_bug.cgi?id=1400587
// https://bugs.chromium.org/p/chromium/issues/detail?id=766002&q=label%3AMSEptsdtsCleanup
try {
sourceBuffer.timestampOffset += mediaDuration;
resolveUpdatedMediaSource();
} catch (err) {
console.error(err);
resolveUpdatedMediaSource();
}
}
sourceBuffer.appendBuffer(mediaBuffer);
})
}
mediaSource.endOfStream()
} else {
console.warn(mimeCodec + " not supported");
}
};
})
return recordedMedia
};
mediaFragmentRecorder(geckoUrl)
.then(recordedMediaFragments => {
// do stuff with recorded media fragments
console.log(recordedMediaFragments);
const select = document.createElement("select");
for (let {
mediaFileName,
mediaBlob,
mediaFragmentType
} of Object.values(recordedMediaFragments)) {
const option = new Option(mediaFileName, URL.createObjectURL(mediaBlob));
select.appendChild(option);
}
select.onchange = () => {
document.getElementById("fragments").src = select.value;
}
video.parentNode.insertBefore(select, video);
video.controls = true;
video.currentTime = video.buffered.start(0);
})
.catch(err => console.error(err));
}
})()
</script>
</body>
</html>

Categories

Resources