Here is the resume of the code:
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
//(.....)
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
audioInput = context.createMediaStreamSource(e);
var bufferSize = 2048;
store_data = context.createScriptProcessor(bufferSize, 1, 1);
//(...)
}
//(....)
client.on('open', function() {
console.log("createStream");
Stream = client.createStream(command_list);
var recording = false;
window.startRecording = function() {
document.getElementById("startbutton").disabled = true;
document.getElementById("stopbutton").disabled = false;
recording = true;
window.Stream.resume();
}
window.stopRecording = function() {
document.getElementById("startbutton").disabled = false;
document.getElementById("stopbutton").disabled = true;
recording = false
//window.Stream.end();
window.Stream.pause();
}
store_data.onaudioprocess = function(e){ //<---line of the error
if(!recording) return;
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
window.Stream.write(convertoFloat32ToInt16(left));
}
//(..events generated from server..)
In chrome my code works just fine. In Mozilla I am getting always the error "store data is undefined". Any idea why? Because I am declaring store_data as global and when getusermedia is a sucess the value is changed.
Without knowing what calls the success function, it's difficult to say exactly, but I am fairly sure you want your client.on('open') listener to be contingent on the success function running.
I don't know how it will affect the rest of the omitted code, but I would only connect the BinaryClient when the success function has run and you are sure you have store_data defined.
function success() {
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
// do the original success code here
// now create that listener.
client.on('open', function() {
// do original code here
});
}
// you probably have a line of code that looks like this
navigator.getUserMedia({}, success);
Moving all of your code into the success function may work, but it won't be elegant. Once you've got the flow working, I would suggest refactoring the code, by splitting each logical bit up into its own function.
Yes it's a race. Your code must wait until getUserMedia succeeds and open is fired.
Promises are a great way to solve this:
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
(Use the above polyfill to access modern getUserMedia in all supported browsers.)
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
//(.....)
var haveStoreData = navigator.mediaDevices.getUserMedia({audio:true})
.then(function(stream) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
audioInput = context.createMediaStreamSource(stream);
var bufferSize = 2048;
return context.createScriptProcessor(bufferSize, 1, 1);
});
//(....)
client.on('open', function() {
console.log("opened");
haveStoreData.then(function(store_data) {
console.log("createStream");
Stream = client.createStream(command_list);
var recording = false;
window.startRecording = function() {
document.getElementById("startbutton").disabled = true;
document.getElementById("stopbutton").disabled = false;
recording = true;
window.Stream.resume();
};
window.stopRecording = function() {
document.getElementById("startbutton").disabled = false;
document.getElementById("stopbutton").disabled = true;
recording = false;
//window.Stream.end();
window.Stream.pause();
};
store_data.onaudioprocess = function(e){
if(!recording) return;
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
window.Stream.write(convertoFloat32ToInt16(left));
};
//(..events generated from server..)
})
.catch(function(e) { console.error(e); });
});
This will give users time to choose "Allow" in the mic permission prompt (Unlike Chrome, Firefox asks the user for permission every time, unless they choose "Always Allow").
var client = new BinaryClient('ws://193.136.94.233:9001');
var context = null;
var gain = null;
var store_data = null;
//(.....)
navigator.mediaDevices.getUserMedia({audio:true}) .then( function(stream){
context = new AudioContext();
audioInput = context.createMediaStreamSource(stream);
var bufferSize = 4096;
store_data = context.createScriptProcessor(bufferSize, 1, 1);
biquadFilter = context.createBiquadFilter();
biquadFilter.type = "lowpass";
biquadFilter.frequency.value = 11500;
biquadFilter.Q.value = 3;
ganho = context.createGain();
ganho.gain.value=0.5;
//audioInput.connect(ganho);//compresso
//ganho.connect(recorder);
//recorder.connect(context.destination);
audioInput.connect(biquadFilter);
biquadFilter.connect(ganho);
ganho.connect(store_data);
store_data.connect(context.destination);
store_data.onaudioprocess = function(e){
if(!recording){
//console.log("nada faz nada desta vida")
return;
}
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
Stream.write(convertoFloat32ToInt16(left));
}
//audioInput.connect(store_data);
} ) .catch( function(e){ console.log(e) } );
//(...)
client.on('open', function() {
console.log("opened connection");
//haveStoreData.then(function(store_data) {
Stream = client.createStream(command_list);
//recording = false;
//(........)
);
//Other function
Here is the solution to stream with BinaryJS with Chrome an Mozilla. Thanks to #jib and #Kaiido
Related
I am very new to javaScript, I know some basics but have not yet completely understood the complete logics behind it (so far I have only worked with Python and a little bit of VBA)
For uni I have to build a browser interface to record audio and transfer it to a server where a Speech to text application runs. I found some opensource code here (https://github.com/mdn/dom-examples/blob/main/media/web-dictaphone/scripts/app.js) which I wanted to use, but is missing the websocket part. Now I don't know, where exactly to insert that. So far I have this:
code of the Webdictaphone:
// set up basic variables for app
const record = document.querySelector('.record');
const stop = document.querySelector('.stop');
const soundClips = document.querySelector('.sound-clips');
const canvas = document.querySelector('.visualizer');
const mainSection = document.querySelector('.main-controls');
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia supported.');
const constraints = { audio: true };
let chunks = [];
let onSuccess = function(stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function() {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
}
stop.onclick = function() {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
}
mediaRecorder.onstop = function(e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt('Enter a name for your sound clip?','My unnamed clip');
const clipContainer = document.createElement('article');
const clipLabel = document.createElement('p');
const audio = document.createElement('audio');
const deleteButton = document.createElement('button');
clipContainer.classList.add('clip');
audio.setAttribute('controls', '');
deleteButton.textContent = 'Delete';
deleteButton.className = 'delete';
if(clipName === null) {
clipLabel.textContent = 'My unnamed clip';
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function(e) {
e.target.closest(".clip").remove();
}
clipLabel.onclick = function() {
const existingName = clipLabel.textContent;
const newClipName = prompt('Enter a new name for your sound clip?');
if(newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
}
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
}
let onError = function(err) {
console.log('The following error occured: ' + err);
}
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log('getUserMedia not supported on your browser!');
}
websocket part (client side):
window.addEventListener("DOMContentLoaded", () => {
// Open the WebSocket connection and register event handlers.
console.log('DOMContentLoaded done');
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
console.log('ws is defined');
})
Right now I just stacked both of the parts on top of each other, but this doesn't work, since, as I found out, you only can define and use variables (such as ws) within a block. This leads to an error that says that ws i not defined when I call the sending function within the if-statement.
I already tried to look for tutorials for hours but none that I found included this topic. I also tried moving the web socket part into the if statement, but that also did - unsurprisingly work, at least not in the way that I tried.
I feel like my problem lays in understanding how to define the websocket so I can call it within the if statement, or figure out a way to somehow get the audio somewhere where ws is considered to be defined. Unfortunately I just don't get behind it and already invested days which has become really frustrating.
I appreciate any help. If you have any ideas what I could change or move in the code or maybe just know any tutorial that could help, I'd be really grateful.
Thanks in advance!
You don't need that window.addEventListener("DOMContentLoaded", () => { part
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
const record = document.querySelector(".record");
const stop = document.querySelector(".stop");
const soundClips = document.querySelector(".sound-clips");
const canvas = document.querySelector(".visualizer");
const mainSection = document.querySelector(".main-controls");
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log("getUserMedia supported.");
const constraints = { audio: true };
let chunks = [];
let onSuccess = function (stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function () {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
};
stop.onclick = function () {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
};
mediaRecorder.onstop = function (e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt(
"Enter a name for your sound clip?",
"My unnamed clip"
);
const clipContainer = document.createElement("article");
const clipLabel = document.createElement("p");
const audio = document.createElement("audio");
const deleteButton = document.createElement("button");
clipContainer.classList.add("clip");
audio.setAttribute("controls", "");
deleteButton.textContent = "Delete";
deleteButton.className = "delete";
if (clipName === null) {
clipLabel.textContent = "My unnamed clip";
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { type: "audio/ogg; codecs=opus" });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function (e) {
e.target.closest(".clip").remove();
};
clipLabel.onclick = function () {
const existingName = clipLabel.textContent;
const newClipName = prompt("Enter a new name for your sound clip?");
if (newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
};
};
mediaRecorder.ondataavailable = function (e) {
chunks.push(e.data);
};
};
let onError = function (err) {
console.log("The following error occured: " + err);
};
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log("getUserMedia not supported on your browser!");
}
I am looking for a solution to allow a website to access and process a stream of audio from the users Microphone. It's unfamiliar territory for me. I've been working with webRTC examples but so far have only got it working on:
firefox and chrome on a 2011 mac air running Sierra.
firefox on windows 10.
My script throws errors on other Browser/OS combinations and with some it doesn't but regardless it doesn't function.
Is there a better solution?
!function(t,e){
"use strict";
t.AudioContext = t.AudioContext||t.webkitAudioContext,
t.OfflineAudioContext = t.OfflineAudioContext||t.webkitOfflineAudioContext;
var o=AudioContext.prototype,
r=new AudioContext,n=function(t,e){
return void 0===t&&void 0!==e
},
c=r.createBufferSource().constructor.prototype;
if(n(c.start,c.noteOn)||n(c.stop,c.noteOff)){
var i=o.createBufferSource;
o.createBufferSource=function(){
var t=i.call(this);
return t.start=t.start||t.noteOn,t.stop=t.stop||t.noteOff,t
}
}
if("function"==typeof r.createOscillator){
var a=r.createOscillator().constructor.prototype;
if(n(a.start,a.noteOn)||n(a.stop,a.noteOff)){
var s=o.createOscillator;o.createOscillator=function(){
var t=s.call(this);
return t.start=t.start||t.noteOn,t.stop=t.stop||t.noteOff,t
}
}
}
if(void 0===o.createGain&&void 0!==o.createGainNode&&(o.createGain=o.createGainNode),void 0===o.createDelay&&void 0!==o.createDelayNode&&(o.createDelay=o.createGainNode),void 0===o.createScriptProcessor&&void 0!==o.createJavaScriptNode&&(o.createScriptProcessor=o.createJavaScriptNode),-1!==navigator.userAgent.indexOf("like Mac OS X")){
var u=AudioContext;t.AudioContext=function(){
function t(){
r.start(0),r.connect(n),n.connect(e.destination)
}
var e=new u,
o=document.body,
r=e.createBufferSource(),
n=e.createScriptProcessor(256,1,1);
return o.addEventListener("touchstart",t,!1),n.onaudioprocess=function(){
r.disconnect(),
n.disconnect(),
o.removeEventListener("touchstart",t,!1),
n.onaudioprocess=null
},e
}
}
}(window);
var context, analyser, gUM, dataArray, bufferLength, connect_source;
if (AudioContext){
context = new AudioContext();
analyser = context.createAnalyser();
function success(stream){
// Create a new volume meter and connect it.
var source = context.createMediaStreamSource(stream);
compressor = context.createDynamicsCompressor();
compressor.threshold.value = -50;
compressor.knee.value = 40;
compressor.ratio.value = 12;
compressor.reduction.value = -20;
compressor.attack.value = 0;
compressor.release.value = 0.25;
filter = context.createBiquadFilter();
filter.Q.value = 8.30;
filter.frequency.value = 355;
filter.gain.value = 3.0;
filter.type = 'bandpass';
filter.connect(compressor);
source.connect( filter );
source.connect(analyser);
analyser.fftSize = 512;
bufferLength = analyser.frequencyBinCount; // half the FFT value
dataArray = new Uint8Array(bufferLength); // create an array to store the data
};
function fail(e){
if(e){}
console.log(e);
aizuchi.error();
};
var select = document.getElementById("AudioSourceSelect");
function generateSelector(devices){
while(select.firstChild) select.removeChild(select.firstChild);
var opt;
for(var l = devices.length; l--;){
console.log(devices[l]);
if(devices[l].kind == "audioinput"){
opt = document.createElement("option")
opt.text = devices[l].label
opt.value = devices[l].deviceId
if(devices[l].deviceId == "default") opt.setAttribute("selected","")
select.appendChild( opt );
}
}
select.onchange = function(){
connect_source(this.value);
}
select.onchange();
}
try {
navigator.mediaDevices.enumerateDevices().then(generateSelector)
} catch(e){
fail(e);
}
connect_source = function(audioSource){
try {
if(Modernizr.getusermedia){
gUM = Modernizr.prefixed('getUserMedia', navigator);
gUM({video:false, audio : {deviceId: audioSource ? {exact: audioSource} : undefined}},success,fail);
} else {
navigator.mediaDevices.getUserMedia({video:false, audio : {deviceId: audioSource ? {exact: audioSource} : undefined}}).then(success,fail);
}
} catch(e){
fail(e);
}
}
}
Try
var AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new AudioContext();
It doesn't look like the browsers have unified on the syntax of this one yet.
Source: MDN
I am currently trying to simulate audio autorecording. User speaks, and after he stops then audio should be submitted to the backend.
I already have a sample script that submits audio with start and stop click functions.
I'm trying to get sometype of value such as Amplitude, Volume or maybe a Threshold but I'm not sure if MediaRecorder supports this or if I need to look at Web Audio API or other solutions.
Can I achieve this with MediaRecorder?
Regarding the audio analysis of the mic input, the following example shows you how to take the audio captured by the mic, create an analyzer with createAnalyser method of the webkitAudioContext, connect the stream to the analyzer and calculate the FFT of the specified size, in order to calculate pitch and display the output sound wave.
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var audioContext = null;
var isPlaying = false;
var sourceNode = null;
var analyser = null;
var theBuffer = null;
var audioCtx = null;
var mediaStreamSource = null;
var rafID = null;
var j = 0;
var waveCanvas = null;
window.onload = function() {
audioContext = new AudioContext();
audioCtx = document.getElementById( "waveform" );
canvasCtx = audioCtx.getContext("2d");
};
function getUserMedia(dictionary, callback) {
try {
navigator.getUserMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia;
navigator.getUserMedia(dictionary, callback, error);
} catch (e) {
alert('getUserMedia threw exception :' + e);
}
}
function gotStream(stream) {
// Create an AudioNode from the stream.
mediaStreamSource = audioContext.createMediaStreamSource(stream);
// Connect it to the destination.
analyser = audioContext.createAnalyser();
analyser.fftSize = 1024;
mediaStreamSource.connect( analyser );
updatePitch();
}
function toggleLiveInput()
{
canvasCtx.clearRect(0, 0, audioCtx.width, audioCtx.height);
canvasCtx.beginPath();
j = 0;
buflen = 1024;
buf = new Float32Array( buflen );
document.getElementById('toggleLiveInput').disabled = true;
document.getElementById('toggleLiveInputStop').disabled = false;
if (isPlaying) {
//stop playing and return
sourceNode.stop( 0 );
sourceNode = null;
//analyser = null;
isPlaying = false;
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = window.webkitCancelAnimationFrame;
window.cancelAnimationFrame( rafID );
}
getUserMedia(
{
"audio": {
"mandatory": {
"googEchoCancellation": "false",
"googAutoGainControl": "false",
"googNoiseSuppression": "false",
"googHighpassFilter": "false"
},
"optional": []
},
}, gotStream);
}
function stop()
{
document.getElementById('toggleLiveInput').disabled = false;
document.getElementById('toggleLiveInputStop').disabled = true;
//waveCanvas.closePath();
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = window.webkitCancelAnimationFrame;
window.cancelAnimationFrame( rafID );
return "start";
}
function updatePitch()
{
analyser.fftSize = 1024;
analyser.getFloatTimeDomainData(buf);
canvasCtx.strokeStyle = "red";
for (var i=0;i<2;i+=2)
{
x = j*5;
if(audioCtx.width < x)
{
x = audioCtx.width - 5;
previousImage = canvasCtx.getImageData(5, 0, audioCtx.width, audioCtx.height);
canvasCtx.putImageData(previousImage, 0, 0);
canvasCtx.beginPath();
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = "red";
prex = prex - 5;
canvasCtx.lineTo(prex,prey);
prex = x;
prey = 128+(buf[i]*128);
canvasCtx.lineTo(x,128+(buf[i]*128));
canvasCtx.stroke();
}
else
{
prex = x;
prey = 128+(buf[i]*128);
canvasCtx.lineWidth = 2;
canvasCtx.lineTo(x,128+(buf[i]*128));
canvasCtx.stroke();
}
j++;
}
if (!window.requestAnimationFrame)
window.requestAnimationFrame = window.webkitRequestAnimationFrame;
rafID = window.requestAnimationFrame( updatePitch );
}
function error() {
console.error(new Error('error while generating audio'));
}
Try the demo here.
Example adapted from pitch-liveinput.
I'm having an issue getting a captured blob from the mediaRecorder api to playback in Chrome (it works in Firefox). Not sure if it's a bug in Chrome.
The error it reports:
undefined:1 Uncaught (in promise) DOMException: Unable to decode audio data
window.AudioContext = window.AudioContext || window.webkitAudioContext;
navigator.getUserMedia = (navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
var context = new AudioContext();
var record = document.querySelector('#record');
var stop = document.querySelector('#stop');
if (navigator.getUserMedia) {
console.log('getUserMedia supported.');
var constraints = {
audio: true
};
var chunks = [];
var onSuccess = function(stream) {
var mediaRecorder = new MediaRecorder(stream);
record.onclick = function() {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
}
stop.onclick = function() {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
stop.disabled = true;
record.disabled = false;
}
mediaRecorder.onstop = function(e) {
console.log("onstop() called.", e);
var blob = new Blob(chunks, {
'type': 'audio/wav'
});
chunks = [];
var reader = new FileReader();
reader.addEventListener("loadend", function() {
context.decodeAudioData(reader.result, function(buffer) {
playsound(buffer);
},
function(e) {
console.log("error ", e)
});
});
reader.readAsArrayBuffer(blob);
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
}
var onError = function(err) {
console.log('The following error occured: ' + err);
}
navigator.getUserMedia(constraints, onSuccess, onError);
} else {
console.log('getUserMedia not supported on your browser!');
}
function playsound(thisbuffer) {
var source = context.createBufferSource();
source.buffer = thisbuffer;
source.connect(context.destination);
source.start(0);
}
<button id="record">record</button>
<button id="stop">stop</button>
I have used your code exactly the way it is. Everything is working fine in Chrome browser.
This issue was fixed when bug https://codereview.chromium.org/1579693006/ was closed and added to the Chrome pipeline.
This is no longer an issue.
To close the loop on this, I suspect this was due to the Chrome bug documented in a comment above. It appears this bug was fixed several years ago and should no longer be a problem as WebAudio now uses ffmpeg for decoding.
In my web application, I have a requirement to play part of mp3 file. This is a local web app, so I don't care about downloads etc, everything is stored locally.
My use case is as follows:
determine file to play
determine start and stop of the sound
load the file [I use BufferLoader]
play
Quite simple.
Right now I just grab the mp3 file, decode it in memory for use with WebAudio API, and play it.
Unfortunately, because the mp3 files can get quite long [30minutes of audio for example] the decoded file in memory can take up to 900MB. That's a bit too much to handle.
Is there any option, where I could decode only part of the file? How could I detect where to start and how far to go?
I cannot anticipate the bitrate, it can be constant, but I would expect variable as well.
Here's an example of what I did:
http://tinyurl.com/z9vjy34
The code [I've tried to make it as compact as possible]:
var MediaPlayerAudioContext = window.AudioContext || window.webkitAudioContext;
var MediaPlayer = function () {
this.mediaPlayerAudioContext = new MediaPlayerAudioContext();
this.currentTextItem = 0;
this.playing = false;
this.active = false;
this.currentPage = null;
this.currentAudioTrack = 0;
};
MediaPlayer.prototype.setPageNumber = function (page_number) {
this.pageTotalNumber = page_number
};
MediaPlayer.prototype.generateAudioTracks = function () {
var audioTracks = [];
var currentBegin;
var currentEnd;
var currentPath;
audioTracks[0] = {
begin: 4.300,
end: 10.000,
path: "example.mp3"
};
this.currentPageAudioTracks = audioTracks;
};
MediaPlayer.prototype.show = function () {
this.mediaPlayerAudioContext = new MediaPlayerAudioContext();
};
MediaPlayer.prototype.hide = function () {
if (this.playing) {
this.stop();
}
this.mediaPlayerAudioContext = null;
this.active = false;
};
MediaPlayer.prototype.play = function () {
this.stopped = false;
console.trace();
this.playMediaPlayer();
};
MediaPlayer.prototype.playbackStarted = function() {
this.playing = true;
};
MediaPlayer.prototype.playMediaPlayer = function () {
var instance = this;
var audioTrack = this.currentPageAudioTracks[this.currentAudioTrack];
var newBufferPath = audioTrack.path;
if (this.mediaPlayerBufferPath && this.mediaPlayerBufferPath === newBufferPath) {
this.currentBufferSource = this.mediaPlayerAudioContext.createBufferSource();
this.currentBufferSource.buffer = this.mediaPlayerBuffer;
this.currentBufferSource.connect(this.mediaPlayerAudioContext.destination);
this.currentBufferSource.onended = function () {
instance.currentBufferSource.disconnect(0);
instance.audioTrackFinishedPlaying()
};
this.playing = true;
this.currentBufferSource.start(0, audioTrack.begin, audioTrack.end - audioTrack.begin);
this.currentAudioStartTimeInAudioContext = this.mediaPlayerAudioContext.currentTime;
this.currentAudioStartTimeOffset = audioTrack.begin;
this.currentTrackStartTime = this.mediaPlayerAudioContext.currentTime - (this.currentTrackResumeOffset || 0);
this.currentTrackResumeOffset = null;
}
else {
function finishedLoading(bufferList) {
instance.mediaPlayerBuffer = bufferList[0];
instance.playMediaPlayer();
}
if (this.currentBufferSource){
this.currentBufferSource.disconnect(0);
this.currentBufferSource.stop(0);
this.currentBufferSource = null;
}
this.mediaPlayerBuffer = null;
this.mediaPlayerBufferPath = newBufferPath;
this.bufferLoader = new BufferLoader(this.mediaPlayerAudioContext, [this.mediaPlayerBufferPath], finishedLoading);
this.bufferLoader.load();
}
};
MediaPlayer.prototype.stop = function () {
this.stopped = true;
if (this.currentBufferSource) {
this.currentBufferSource.onended = null;
this.currentBufferSource.disconnect(0);
this.currentBufferSource.stop(0);
this.currentBufferSource = null;
}
this.bufferLoader = null;
this.mediaPlayerBuffer = null;
this.mediaPlayerBufferPath = null;
this.currentTrackStartTime = null;
this.currentTrackResumeOffset = null;
this.currentAudioTrack = 0;
if (this.currentTextTimeout) {
clearTimeout(this.currentTextTimeout);
this.textHighlightFinished();
this.currentTextTimeout = null;
this.currentTextItem = null;
}
this.playing = false;
};
MediaPlayer.prototype.getNumberOfPages = function () {
return this.pageTotalNumber;
};
MediaPlayer.prototype.playbackFinished = function () {
this.currentAudioTrack = 0;
this.playing = false;
};
MediaPlayer.prototype.audioTrackFinishedPlaying = function () {
this.currentAudioTrack++;
if (this.currentAudioTrack >= this.currentPageAudioTracks.length) {
this.playbackFinished();
} else {
this.playMediaPlayer();
}
};
//
//
// Buffered Loader
//
// Class used to get the sound files
//
function BufferLoader(context, urlList, callback) {
this.context = context;
this.urlList = urlList;
this.onload = callback;
this.bufferList = [];
this.loadCount = 0;
}
// this allows us to handle media files with embedded artwork/id3 tags
function syncStream(node) { // should be done by api itself. and hopefully will.
var buf8 = new Uint8Array(node.buf);
buf8.indexOf = Array.prototype.indexOf;
var i = node.sync, b = buf8;
while (1) {
node.retry++;
i = b.indexOf(0xFF, i);
if (i == -1 || (b[i + 1] & 0xE0 == 0xE0 )) break;
i++;
}
if (i != -1) {
var tmp = node.buf.slice(i); //carefull there it returns copy
delete(node.buf);
node.buf = null;
node.buf = tmp;
node.sync = i;
return true;
}
return false;
}
BufferLoader.prototype.loadBuffer = function (url, index) {
// Load buffer asynchronously
var request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
var loader = this;
function decode(sound) {
loader.context.decodeAudioData(
sound.buf,
function (buffer) {
if (!buffer) {
alert('error decoding file data');
return
}
loader.bufferList[index] = buffer;
if (++loader.loadCount == loader.urlList.length)
loader.onload(loader.bufferList);
},
function (error) {
if (syncStream(sound)) {
decode(sound);
} else {
console.error('decodeAudioData error', error);
}
}
);
}
request.onload = function () {
// Asynchronously decode the audio file data in request.response
var sound = {};
sound.buf = request.response;
sound.sync = 0;
sound.retry = 0;
decode(sound);
};
request.onerror = function () {
alert('BufferLoader: XHR error');
};
request.send();
};
BufferLoader.prototype.load = function () {
for (var i = 0; i < this.urlList.length; ++i)
this.loadBuffer(this.urlList[i], i);
};
There is no way of streaming with decodeAudioData(), you need to use MediaElement with createMediaStreamSource and run your stuff then. decodeAudioData() cannot stream on a part.#zre00ne And mp3 will be decoded big!!! Verybig!!!