Change playout delay in WebRTC stream - javascript

I'm trying to cast a live MediaStream (Eventually from the camera) from peerA to peerB and I want peerB to receive the live stream in real time and then replay it with an added delay. Unfortunately in isn't possible to simply pause the stream and resume with play since it jump forward to the live moment.
So I have figured out that I can use MediaRecorder + SourceBuffer rewatch the live stream. Record the stream and append the buffers to MSE (SourceBuffer) and play it 5 seconds later.
This works grate on the local device (stream). But when I try to use Media Recorder on the receivers MediaStream (from pc.onaddstream) is looks like it gets some data and it's able to append the buffer to the sourceBuffer. however it dose not replay. sometime i get just one frame.
const [pc1, pc2] = localPeerConnectionLoop()
const canvasStream = canvas.captureStream(200)
videoA.srcObject = canvasStream
videoA.play()
// Note: using two MediaRecorder at the same time seem problematic
// But this one works
// stream2mediaSorce(canvasStream, videoB)
// setTimeout(videoB.play.bind(videoB), 5000)
pc1.addTransceiver(canvasStream.getTracks()[0], {
streams: [ canvasStream ]
})
pc2.onaddstream = (evt) => {
videoC.srcObject = evt.stream
videoC.play()
// Note: using two MediaRecorder at the same time seem problematic
// THIS DOSE NOT WORK
stream2mediaSorce(evt.stream, videoD)
setTimeout(() => videoD.play(), 2000)
}
/**
* Turn a MediaStream into a SourceBuffer
*
* #param {MediaStream} stream Live Stream to record
* #param {HTMLVideoElement} videoElm Video element to play the recorded video in
* #return {undefined}
*/
function stream2mediaSorce (stream, videoElm) {
const RECORDER_MIME_TYPE = 'video/webm;codecs=vp9'
const recorder = new MediaRecorder(stream, { mimeType : RECORDER_MIME_TYPE })
const mediaSource = new MediaSource()
videoElm.src = URL.createObjectURL(mediaSource)
mediaSource.onsourceopen = (e) => {
sourceBuffer = mediaSource.addSourceBuffer(RECORDER_MIME_TYPE);
const fr = new FileReader()
fr.onerror = console.log
fr.onload = ({ target }) => {
console.log(target.result)
sourceBuffer.appendBuffer(target.result)
}
recorder.ondataavailable = ({ data }) => {
console.log(data)
fr.readAsArrayBuffer(data)
}
setInterval(recorder.requestData.bind(recorder), 1000)
}
console.log('Recorder created')
recorder.start()
}
Do you know why it won't play the video?
I have created a fiddle with all the necessary code to try it out, the javascript tab is the same code as above, (the html is mostly irrelevant and dose not need to be changed)
Some try to reduce the latency, but I actually want to increase it to ~10 seconds to rewatch something you did wrong in a golf swing or something, and if possible avoid MediaRecorder altogether
EDIT:
I found something called "playout-delay" in some RTC extension
that allows the sender to control the minimum and maximum latency from capture to render time
https://webrtc.org/experiments/rtp-hdrext/playout-delay/
How can i use it?
Will it be of any help to me?

Update, there is new feature that will enable this, called playoutDelayHint.
We want to provide means for javascript applications to set their preferences on how fast they want to render audio or video data. As fast as possible might be beneficial for applications which concentrates on real time experience. For others additional data buffering may provide smother experience in case of network issues.
Refs:
https://discourse.wicg.io/t/hint-attribute-in-webrtc-to-influence-underlying-audio-video-buffering/4038
https://bugs.chromium.org/p/webrtc/issues/detail?id=10287
Demo: https://jsfiddle.net/rvekxns5/
doe i was only able to set max 10s in my browser but it's more up to the UA vendor to do it's best it can with the resources available
import('https://jimmy.warting.se/packages/dummycontent/canvas-clock.js')
.then(({AnalogClock}) => {
const {canvas} = new AnalogClock(100)
document.querySelector('canvas').replaceWith(canvas)
const [pc1, pc2] = localPeerConnectionLoop()
const canvasStream = canvas.captureStream(200)
videoA.srcObject = canvasStream
videoA.play()
pc1.addTransceiver(canvasStream.getTracks()[0], {
streams: [ canvasStream ]
})
pc2.onaddstream = (evt) => {
videoC.srcObject = evt.stream
videoC.play()
}
$dur.onchange = () => {
pc2.getReceivers()[0].playoutDelayHint = $dur.valueAsNumber
}
})
<!-- all the irrelevant part, that you don't need to know anything about -->
<h3 style="border-bottom: 1px solid">Original canvas</h3>
<canvas id="canvas" width="100" height="100"></canvas>
<script>
function localPeerConnectionLoop(cfg = {sdpSemantics: 'unified-plan'}) {
const setD = (d, a, b) => Promise.all([a.setLocalDescription(d), b.setRemoteDescription(d)]);
return [0, 1].map(() => new RTCPeerConnection(cfg)).map((pc, i, pcs) => Object.assign(pc, {
onicecandidate: e => e.candidate && pcs[i ^ 1].addIceCandidate(e.candidate),
onnegotiationneeded: async e => {
try {
await setD(await pc.createOffer(), pc, pcs[i ^ 1]);
await setD(await pcs[i ^ 1].createAnswer(), pcs[i ^ 1], pc);
} catch (e) {
console.log(e);
}
}
}));
}
</script>
<h3 style="border-bottom: 1px solid">Local peer (PC1)</h3>
<video id="videoA" muted width="100" height="100"></video>
<h3 style="border-bottom: 1px solid">Remote peer (PC2)</h3>
<video id="videoC" muted width="100" height="100"></video>
<label> Change playoutDelayHint
<input type="number" value="1" id="$dur">
</label>

Related

JS: play multiple audio sources simultaneously when loaded

I have a web project (vanilla HTML/CSS/JS only) with three audio sources. The idea is for all three to play simultaneously, but I noticed on mobile that the files were playing out of sync (i.e. one source would start, then a few ms later the second would start, then the third). I believe they are playing due to the individual files playing as soon as they're loaded, so I would like to request that once all files have loaded that the play() method is called on all three at the same time,
What would be the best way to achieve this using vanilla JS?
Example: https://jacksorjacksor.xyz/soundblocks/
Repo: https://github.com/jacksorjacksor/jacksorjacksor/tree/master/soundblocks
TIA!
Rich
MediaElements are meant for normal playback of media and aren't optimized enough to get low latency. The best is to use the Web Audio API, and AudioBuffers.
You will first fetch each file's data in memory, then decode the audio data from these, and once all the audio data has been decoded, you'll be able to schedule playing all at the same precise moment:
(async() => {
const urls = [ "layer1_big.mp3", "layer2_big.mp3", "layer3_big.mp3" ]
.map( (url) => "https://cdn.jsdelivr.net/gh/jacksorjacksor/jacksorjacksor/soundblocks/audio/" + url );
// first, fetch each file's data
const data_buffers = await Promise.all(
urls.map( (url) => fetch( url ).then( (res) => res.arrayBuffer() ) )
);
// get our AudioContext
const context = new (window.AudioContext || window.webkitAudioContext)();
// decode the data
const audio_buffers = await Promise.all(
data_buffers.map( (buf) => context.decodeAudioData( buf ) )
);
// to enable the AudioContext we need to handle a user gesture
const btn = document.querySelector( "button" );
btn.onclick = (evt) => {
const current_time = context.currentTime;
audio_buffers.forEach( (buf) => {
// a buffer source is a really small object
// don't be afraid of creating and throwing it
const source = context.createBufferSource();
// we only connect the decoded data, it's not copied
source.buffer = buf;
// in order to make some noise
source.connect( context.destination );
// make it loop?
//source.loop = true;
// start them all 0.5s after we began, so we're sure they're in sync
source.start( current_time + 0.5 );
} );
};
btn.disabled = false;
})();
<button disabled>play</button>

MediaRecorder API not able to record at higher framerate

I am currently working on a project and need to be able to make a recording of my screen and save it locally to my computer.
The recording is being saved as a webm, but everyone of them has a really bad framerate of usually around 10-15 fps. Is there a way to increase the framerate for recording?
I am able to increase the quality of the recording by playing around with the MediaRecorder options and codecs, but this doesn't seem to affect the framerate I am getting at all.
Here is the code I am using to make my recording:
const options = {
mimeType: 'video/webm; codecs="vp9.00.41.8.00.01"',
videoBitsPerSecond: 800 * Mbps,
videoMaximizeFrameRate: true,
};
mediaRecorder = new MediaRecorder(stream, options);
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.onstop = handleStop;
startBtn.onclick = e => {
mediaRecorder.start();
startBtn.innerHTML = 'Recording';
}
stopBtn.onclick = e => {
mediaRecorder.stop();
startBtn.innerHTML = 'Start';
}
function handleDataAvailable(e) {
recordedChunks.push(e.data);
}
async function handleStop() {
const blob = new Blob(recordedChunks, {
mimeType: 'video/webm'
});
const buffer = Buffer.from(await blob.arrayBuffer());
const { filePath } = await dialog.showSaveDialog({
buttonLabel: 'Save video',
defaultPath: `vid-${Date.now()}.webm`
});
console.log(filePath);
if (filePath) {
writeFile(filePath, buffer, () => console.log('video saved successfully'));
}
}
I have looked through the MDN documentation and haven't found anything about it. I also tried using different codecs with different parameters, but the results are always the same.
The framerate you're getting is typical for any standard screen capture.
The only way to go faster is to utilize the GPU's specific capability to capture and encode. This is out of scope for the web APIs.

Playing Audio Base64s With No Delay

I have an Object with couple of base64s (Audio) inside. The base64s will start to play with a keydown event. In some situations (when the Base64 size is a little high), a delay will occur before playing. Is there any way to remove this delay, or at least reduce it?
App Witten in JavaScript And Running On Electron
//audio base64s object
var audio = {A: new Audio('base64[1]'), B: new Audio('base64[2]'), C: new Audio('base64[3]')};
//audio will start plying with key down
function keydown(ev) {
if (audio[String.fromCharCode(ev.keyCode)].classList.contains('holding') == false) {
audio[String.fromCharCode(ev.keyCode)].classList.add('holding');
if (audio[String.fromCharCode(ev.keyCode)].paused) {
playPromise = audio[String.fromCharCode(ev.keyCode)].play();
if (playPromise) {
playPromise.then(function() {
setTimeout(function() {
// Follow up operation
}, audio.duration * 1000);
}).catch(function() {
// Audio loading failure
});
} else {
audio[String.fromCharCode(ev.keyCode)].currentTime = 0;
}
}
}
I wrote up a complete example for you, and annotated below.
Some key takeaways:
If you need any sort of expediency or control over timing, you need to use the Web Audio API. Without it, you have no control over the buffering or other behavior of audio playback.
Don't use base64 for this. You don't need it. Base64 encoding is a method for encoding binary data into a text format. There is no text format here... therefore it isn't necessary. When you use base64 encoding, you add 33% overhead to the storage, you use CPU, memory, etc. There is no reason for it here.
Do use the appropriate file APIs to get what you need. To decode an audio sample, we need an array buffer. Therefore, we can use the .arrayBuffer() method on the file itself to get that. This retains the content in binary the entire time and allows the browser to memory-map if it wants to.
The code:
const audioContext = new AudioContext();
let buffer;
document.addEventListener('DOMContentLoaded', (e) => {
document.querySelector('input[type="file"]').addEventListener('change', async (e) => {
// Start the AudioContext, now that we have user ineraction
audioContext.resume();
// Ensure we actually have at least one file before continuing
if ( !(e.currentTarget.files && e.currentTarget.files[0]) ) {
return;
}
// Read the file and decode the audio
buffer = await audioContext.decodeAudioData(
await e.currentTarget.files[0].arrayBuffer()
);
});
});
document.addEventListener('keydown', (e) => {
// Ensure we've loaded audio
if (!buffer) {
return;
}
// Create the node that will play our previously decoded buffer
bufferSourceNode = audioContext.createBufferSource();
bufferSourceNode.buffer = buffer;
// Hook up the buffer source to our output node (speakers, headphones, etc.)
bufferSourceNode.connect(audioContext.destination);
// Adjust pitch based on the key we pressed, just for fun
bufferSourceNode.detune.value = (e.keyCode - 65) * 100;
// Start playing... right now
bufferSourceNode.start();
});
JSFiddle: https://jsfiddle.net/bradisbell/sc9jpxvn/1/

How to play multiple audio sequentially with ionic Media plugin

I am trying to play multiple audio files with ionic media plugin : https://ionicframework.com/docs/native/media. but I am having a hard time making it work as a playlist without using a timeout function.
Here is what I have tried out
playOne(track: AudioFile): Promise<any> {
return new Promise(async resolve =>{
const AudFile = await this.media.create(this.file.externalDataDirectory+track.trackUrl);
await resolve(AudFile.play())
});
}
Then to play All , I have this :
async playAll(tracks: AudioFile[]): Promise<any>{
let player = (acc, track:AudioFile) => acc.then(() =>
this.playOne(track)
);
tracks.reduce(player, Promise.resolve());
}
This way they are all playing at the same time.
But If The PlayOne method is wrapped in a timeout function, the interval of the milli seconds set on the timeout exists among the play list, but one does not necessarily finish before the other starts and sometimes it waits for a long time before the subsequent file is plaid.
The timeout implementation looks like this :
playOne(track: AudioFile): Promise<any> {
return new Promise(async resolve =>{
setTimeout(async ()=>{
const AudFile = await this.media.create(this.file.externalDataDirectory+track.trackUrl);
await resolve(AudFile.play())
},3000)
});
}
Digging into ionic wrapper of the plugin, the create method looks like this :
/**
* Open a media file
* #param src {string} A URI containing the audio content.
* #return {MediaObject}
*/
Media.prototype.create = function (src) {
var instance;
if (checkAvailability(Media.getPluginRef(), null, Media.getPluginName()) ===
true) {
// Creates a new media object
instance = new (Media.getPlugin())(src);
}
return new MediaObject(instance);
};
Media.pluginName = "Media";
Media.repo = "https://github.com/apache/cordova-plugin-media";
Media.plugin = "cordova-plugin-media";
Media.pluginRef = "Media";
Media.platforms = ["Android", "Browser", "iOS", "Windows"];
Media = __decorate([
Injectable()
], Media);
return Media;
}(IonicNativePlugin));
Any suggestion would be appreciated
You may get it working by looping over your tracks and await playOne on each track.
async playAll(tracks: AudioFile[]): Promise<any> {
for (const track of tracks) {
await this.playOne(track);
}
}
If I'm not mistaking play function doesn't block until playing the audio file is finished. It doesn't return a promise either. A work around would be to use a seTimeout for the duration of the track
playOne(track: AudioFile): Promise<any> {
return new Promise((resolve, reject) => {
const audFile = await this.media.create(this.file.externalDataDirectory+track.trackUrl);
const duration = audFile.getDuration(); // duration in seconds
AudFile.play();
setTimeout(() => {
resolve();
},
duration * 1000 // setTimeout expect milliseconds
);
});
}
I eventually got it to work with a recursive function. This works as expected.
PlayAllList(i,tracks: AudioFile[]){
var self = this;
this.Audiofile = this.media.create(this.file.externalDataDirectory+tracks[i].trackUrl);
this.Audiofile.play()
this.Audiofile.onSuccess.subscribe(() => {
if ((i + 1) == tracks.length) {
// do nothing
} else {
self.PlayAllList(i + 1, tracks)
}
})
}
Then
this.PlayAllList(0,tracks)
If there is any improvement on this, I will appreciate.
I think you will be better of with the Web Audio API. I have used it before, and the possibilities are endless.
Apparently it can be used in Ionic without issues:
https://www.airpair.com/ionic-framework/posts/using-web-audio-api-for-precision-audio-in-ionic
I have used it on http://chordoracle.com to play multiple audio samples at the same time (up to 6 simultaneous samples for each string of the guitar). In this case i also alter their pitch to get different notes.
In order to play multiple samples, you just need to create multiple bufferSources:
https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/createBufferSource
Some links to get you started:
https://www.w3.org/TR/webaudio/
https://medium.com/better-programming/all-you-need-to-know-about-the-web-audio-api-3df170559378

Obtain MediaStream from input device

Looking for experience working with media devices:
I'm working on recording on cache and playback from Microphone source; Firefox & Chrome using HTML5.
This is what I've so far:
var constraints = {audio: true, video: false};
var promise = navigator.mediaDevices.getUserMedia(constraints);
I've been checking on official documentation from MDN on getUserMedia
but nothing related to storage the audio from the constraint to cache.
No such question has been asked previously at Stackoverflow; I'm wondering if's possible.
Thanks you.
You can simply use the MediaRecorder API for such task.
In order to record only the audio from your video+audio gUM stream, you will need to create a new MediaStream, from the gUM's audioTrack:
// using async for brevity
async function doit() {
// first request both mic and camera
const gUMStream = await navigator.mediaDevices.getUserMedia({video: true, audio: true});
// create a new MediaStream with only the audioTrack
const audioStream = new MediaStream(gUMStream.getAudioTracks());
// to save recorded data
const chunks = [];
const recorder = new MediaRecorder(audioStream);
recorder.ondataavailable = e => chunks.push(e.data);
recorder.start();
// when user decides to stop
stop_btn.onclick = e => {
recorder.stop();
// kill all tracks to free the devices
gUMStream.getTracks().forEach(t => t.stop());
audioStream.getTracks().forEach(t => t.stop());
};
// export all the saved data as one Blob
recorder.onstop = e => exportMedia(new Blob(chunks));
// play current gUM stream
vid.srcObject = gUMStream;
stop_btn.disabled = false;
}
function exportMedia(blob) {
// here blob is your recorded audio file, you can do whatever you want with it
const aud = new Audio(URL.createObjectURL(blob));
aud.controls = true;
document.body.appendChild(aud);
document.body.removeChild(vid);
}
doit()
.then(e=>console.log("recording"))
.catch(e => {
console.error(e);
console.log('you may want to try from jsfiddle: https://jsfiddle.net/5s2zabb2/');
});
<video id="vid" controls autoplay></video>
<button id="stop_btn" disabled>stop</button>
And as a fiddle since stacksnippets don't work very well with gUM...

Categories

Resources