Because one of the browsers I'm trying to support doesn't allow me to decode a specific codec using AudioContext.decodeAudioData() I'm using Aurora.js to decode a audio files.
How can I change the decoded samples received from Aurora.js into an AudioBuffer I can actually use to playback the audio?
This is my code so far:
var AudioContext = (window.AudioContext || window.webkitAudioContext);
var context = new AudioContext();
var segmentUrls = [
'/segments/00.wav',
'/segments/05.wav',
'/segments/10.wav',
'/segments/15.wav',
'/segments/20.wav',
'/segments/25.wav',
'/segments/30.wav',
'/segments/35.wav',
'/segments/40.wav',
'/segments/45.wav',
'/segments/50.wav',
'/segments/55.wav'
];
Promise.all(segmentUrls.map(loadSound))
.then(function(buffers) {
var startAt = 0;
buffers.forEach(function(buffer) {
playSound(startAt, buffer);
startAt += buffer.duration;
});
})
.catch(function(err) {
console.error(err);
});
function playSound(offset, buffer) {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
source.start(offset);
}
function loadSound(url) {
return new Promise(function(resolve, reject) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = function onLoad() {
resolve(decodeAudioData(request.response));
};
request.onerror = function onError() {
reject('Could not request file');
};
request.send();
});
}
function decodeAudioData(audioData) {
return new Promise(function(resolve, reject) {
var asset = AV.Asset.fromBuffer(audioData);
asset.decodeToBuffer(function(buffer) {
// Create an AudioBuffer here
});
});
}
You'll have to create an AudioBuffer of the appropriate size and # of channels, and copy the data from one Float32 buffer to another.
Here is the MDN code snippet to put data in an AudioBuffer and then play it:
https://developer.mozilla.org/en-US/docs/Web/API/AudioBuffer
// Stereo
var channels = 2;
// Create an empty two second stereo buffer at the
// sample rate of the AudioContext
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var frameCount = audioCtx.sampleRate * 2.0;
var myArrayBuffer = audioCtx.createBuffer(channels, frameCount, audioCtx.sampleRate);
button.onclick = function() {
// Fill the buffer with white noise;
// just random values between -1.0 and 1.0
for (var channel = 0; channel < channels; channel++) {
// This gives us the actual array that contains the data
var nowBuffering = myArrayBuffer.getChannelData(channel);
for (var i = 0; i < frameCount; i++) {
// Math.random() is in [0; 1.0]
// audio needs to be in [-1.0; 1.0]
nowBuffering[i] = Math.random() * 2 - 1;
}
}
// Get an AudioBufferSourceNode.
// This is the AudioNode to use when we want to play an AudioBuffer
var source = audioCtx.createBufferSource();
// set the buffer in the AudioBufferSourceNode
source.buffer = myArrayBuffer;
// connect the AudioBufferSourceNode to the
// destination so we can hear the sound
source.connect(audioCtx.destination);
// start the source playing
source.start();
}
Related
I have an audio element
var audioSrc = 'https://mfbx9da4.github.io/assets/audio/dope-drum-loop_C_major.wav'
var audio = document.createElement('audio')
audio.src = audioSrc
I need the AudioBuffer
to do beat detection so I tried accessing the buffer when the audio is loaded as so:
audio.oncanplaythrough = () => {
console.info('loaded');
var source = context.createMediaElementSource(audio);
source.connect(context.destination);
console.info('source.buffer', source.buffer);
source.start()
}
However, the above code snippet logs
> loaded
> source.buffer undefined
It seems the best way to do this is to avoid <audio> tags and load the audio via XHR:
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
const audioSrc =
"https://davidalbertoadler.com/assets/audio/dope-drum-loop_C_major.wav";
const audioData = await fetchAudio(audioSrc);
audioContext.decodeAudioData(audioData, onDecoded, onDecodeError);
function fetchAudio(url) {
return new Promise((resolve, reject) => {
const request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
request.onload = () => resolve(request.response);
request.onerror = (e) => reject(e);
request.send();
});
}
function onDecoded(buffer) {
// Play the song
console.log("Got the decoded buffer now play the song", buffer);
const source = audioContext.createBufferSource();
source.buffer = buffer;
source.connect(audioContext.destination);
source.loop = true;
source.start();
}
function onDecodeError(e) {
console.log("Error decoding buffer: " + e.message);
console.log(e);
}
I have an audio using Web Audio API and want to record it at half the speed and later speed it up to normal at back-end. The output I get is pitched down.
var mr = null;
var source = null;
var stop = function(){
mr.stop();
source.stop();
}
var save = function(blob, filename){
var link = document.createElement('a');
link.href = URL.createObjectURL(blob);
link.download = filename || 'data.json';
link.click();
}
var context = new ( window.AudioContext || window.webkitAudioContext )();
var dest = context.createMediaStreamDestination();
function loadSound(url) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
// Decode asynchronously
request.onload = function() {
context.decodeAudioData(request.response, function(buffer) {
source = context.createBufferSource();
source.buffer = buffer;
source.playbackRate.value = 0.5;
source.connect(dest);
mr = new MediaRecorder(dest.stream);
mr.start();
source.start(0);
var chunks = [];
mr.ondataavailable = function(e){
chunks.push(e.data);
}
mr.onstop = function(e) {
var blob = new Blob(chunks, { 'type' : 'video/webm' });
save(blob,"a.webm");
}
setTimeout(stop, 10000);
}, null);
}
request.send();
}
loadSound("https://storage.googleapis.com/frulix-dev.appspot.com/words");
I have created a fiddle for the case https://jsfiddle.net/thenectorgod/v3ogtwvp/4/.
It does not happen for normal recording containing audio and that audio comes clear after speeding to 2.
Can anyone suggest how to get proper audio in this case?
I want to combine audio clips, layered on top of each other so that they play synchronously and are saved in a new audio file. Any help would be much appreciated. I've done some digging online, but couldn't find a definitive answer as to whether or not many of the tools available as far as Javascript audio editing librarys go (Mix.js for example) are capable.
Yes, it is possible using OfflineAudioContext() or AudioContext.createChannelMerger() and creating a MediaStream. See Phonegap mixing audio files , Web Audio API.
You can use fetch() or XMLHttpRequest() to retrieve audio resource as an ArrayBuffer, AudioContext.decodeAudioData() to create an AudioBufferSourceNode from response; OfflineAudioContext() to render merged audio, AudioContext, AudioContext.createBufferSource(), AudioContext.createMediaStreamDestination() , MediaRecorder() to record stream; Promise.all(), Promise() constructor, .then() to process asynchronous requests to fetch(), AudioContext.decodeAudioData(), pass resulting mixed audio Blob at stop event of MediaRecorder.
Connect each AudioContext AudioBufferSourceNode to OfflineAudioContext.destination, call .start() on each node; call OfflineAudioContext.startRendering(); create new AudioContext node, connect renderedBuffer; call .createMediaStreamDestination() on AudioContext to create a MediaStream from merged audio buffers, pass .stream to MediaRecorder(), at stop event of MediaRecorder, create Blob URL of Blob of recorded audio mix with URL.createObjectURL(), which can be downloaded using <a> element with download attribute and href set to Blob URL.
var sources = ["https://upload.wikimedia.org/wikipedia/commons/b/be/"
+ "Hidden_Tribe_-_Didgeridoo_1_Live.ogg"
, "https://upload.wikimedia.org/wikipedia/commons/6/6e/"
+ "Micronesia_National_Anthem.ogg"];
var description = "HiddenTribeAnthem";
var context;
var recorder;
var div = document.querySelector("div");
var duration = 60000;
var chunks = [];
var audio = new AudioContext();
var mixedAudio = audio.createMediaStreamDestination();
var player = new Audio();
player.controls = "controls";
function get(src) {
return fetch(src)
.then(function(response) {
return response.arrayBuffer()
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(sources.map(get)).then(function(data) {
var len = Math.max.apply(Math, data.map(function(buffer) {
return buffer.byteLength
}));
context = new OfflineAudioContext(2, len, 44100);
return Promise.all(data.map(function(buffer) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var source = context.createBufferSource();
source.buffer = bufferSource;
source.connect(context.destination);
return source.start()
})
}))
.then(function() {
return context.startRendering()
})
.then(function(renderedBuffer) {
return new Promise(function(resolve) {
var mix = audio.createBufferSource();
mix.buffer = renderedBuffer;
mix.connect(audio.destination);
mix.connect(mixedAudio);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
mix.start(0);
div.innerHTML = "playing and recording tracks..";
// stop playback and recorder in 60 seconds
stopMix(duration, mix, recorder)
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
console.log("recording complete");
resolve(blob)
};
})
})
.then(function(blob) {
console.log(blob);
div.innerHTML = "mixed audio tracks ready for download..";
var audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
document.body.appendChild(a);
a.insertAdjacentHTML("afterend", "<br>");
player.src = audioDownload;
document.body.appendChild(player);
})
})
.catch(function(e) {
console.log(e)
});
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<div>loading audio tracks.. please wait</div>
</body>
</html>
You can alternatively utilize AudioContext.createChannelMerger(), AudioContext.createChannelSplitter()
var sources = ["/path/to/audoi1", "/path/to/audio2"];
var description = "mix";
var chunks = [];
var channels = [[0, 1], [1, 0]];
var audio = new AudioContext();
var player = new Audio();
var merger = audio.createChannelMerger(2);
var splitter = audio.createChannelSplitter(2);
var mixedAudio = audio.createMediaStreamDestination();
var duration = 60000;
var context;
var recorder;
var audioDownload;
player.controls = "controls";
function get(src) {
return fetch(src)
.then(function(response) {
return response.arrayBuffer()
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(sources.map(get)).then(function(data) {
return Promise.all(data.map(function(buffer, index) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var channel = channels[index];
var source = audio.createBufferSource();
source.buffer = bufferSource;
source.connect(splitter);
splitter.connect(merger, channel[0], channel[1]);
return source
})
}))
.then(function(audionodes) {
merger.connect(mixedAudio);
merger.connect(audio.destination);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
audionodes.forEach(function(node) {
node.start(0)
});
stopMix(duration, ...audionodes, recorder);
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
player.src = audioDownload;
document.body.appendChild(a);
document.body.appendChild(player);
};
})
})
.catch(function(e) {
console.log(e)
});
I am using the web audio api's offlineAudioContext to render an audio file with a different playback speed. When I do this the altered audio is held in the renderedBuffer.
I am wondering how I can download the audio held in the renderedBuffer?
Here is my code:
// define online and offline audio context
var audioCtx = new AudioContext();
var offlineCtx = new OfflineAudioContext(2,44100*40,44100);
source = offlineCtx.createBufferSource();
// define variables
var pre = document.querySelector('pre');
var myScript = document.querySelector('script');
var play = document.querySelector('.play');
var stop = document.querySelector('.stop');
// use XHR to load an audio track, and
// decodeAudioData to decode it and stick it in a buffer.
// Then we put the buffer into the source
function getData() {
request = new XMLHttpRequest();
request.open('GET', 'sound.mp4', true);
request.responseType = 'arraybuffer';
request.onload = function() {
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
myBuffer = buffer;
source.buffer = myBuffer;
source.playbackRate.value = 0.50;
source.connect(offlineCtx.destination);
source.start();
//source.loop = true;
offlineCtx.startRendering().then(function(renderedBuffer) {
console.log('Rendering completed successfully');
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var song = audioCtx.createBufferSource();
song.buffer = renderedBuffer;
song.connect(audioCtx.destination);
play.onclick = function() {
song.start();
}
}).catch(function(err) {
console.log('Rendering failed: ' + err);
// Note: The promise should reject when startRendering is called a second time on an OfflineAudioContext
});
});
}
request.send();
}
// Run getData to start the process off
getData();
// dump script to pre element
pre.innerHTML = myScript.innerHTML;
I am trying to load and play an audio file in chrome successfully, but I can't play it backwards:
audio = new Audio('http://mathweirdo.com/bingo/audio/buzzer.mp3');
audio.playbackRate = -1;
audio.currentTime = audio.duration; // I have tried ommiting this line
audio.play()
This produces no sound and only one timeupdate event firing.
Using negative values is currently not supported so you will have to load and reverse the buffers manually.
Note that this will require CORS enabled audio source (the one in the example isn't, so I couldn't set up a live demo). Here is one way of doing this:
Load the data via AJAX (this requires CORS enabled for the audio file)
Let the browser parse the buffer into an audio buffer
Get the channel buffer(s) (references)
Reverse the buffer(s)
Initialize the audio buffer and play
This will of course limit you some as you cannot use the Audio element anymore. You will have to support the features you want by adding controls and code for them manually.
// load audio as a raw array buffer:
fetch("http://mathweirdo.com/bingo/audio/buzzer.mp3", process);
// then process the buffer using decoder
function process(file) {
var actx = new (window.AudioContext || window.webkitAudioContext);
actx.decodeAudioData(file, function(buffer) {
var src = actx.createBufferSource(), // enable using loaded data as source
channel, tmp, i, t = 0, len, len2;
// reverse channels
while(t < buffer.numberOfChannels) { // iterate each channel
channel = buffer.getChannelData(t++); // get reference to a channel
len = channel.length - 1; // end of buffer
len2 = len >>> 1; // center of buffer (integer)
for(i = 0; i < len2; i++) { // loop to center
tmp = channel[len - i]; // from end -> tmp
channel[len - i] = channel[i]; // end = from beginning
channel[i] = tmp; // tmp -> beginning
}
}
// play
src.buffer = buffer;
src.connect(actx.destination);
if (!src.start) src.start = src.noteOn;
src.start(0);
},
function() {alert("Could not decode audio!")}
)
}
// ajax loader
function fetch(url, callback) {
var xhr = new XMLHttpRequest();
try {
xhr.open("GET", url);
xhr.responseType = "arraybuffer";
xhr.onerror = function() {alert("Network error")};
xhr.onload = function() {
if (xhr.status === 200) callback(xhr.response);
else alert(xhr.statusText);
};
xhr.send();
} catch (err) {alert(err.message)}
}