I am using the web audio api's offlineAudioContext to render an audio file with a different playback speed. When I do this the altered audio is held in the renderedBuffer.
I am wondering how I can download the audio held in the renderedBuffer?
Here is my code:
// define online and offline audio context
var audioCtx = new AudioContext();
var offlineCtx = new OfflineAudioContext(2,44100*40,44100);
source = offlineCtx.createBufferSource();
// define variables
var pre = document.querySelector('pre');
var myScript = document.querySelector('script');
var play = document.querySelector('.play');
var stop = document.querySelector('.stop');
// use XHR to load an audio track, and
// decodeAudioData to decode it and stick it in a buffer.
// Then we put the buffer into the source
function getData() {
request = new XMLHttpRequest();
request.open('GET', 'sound.mp4', true);
request.responseType = 'arraybuffer';
request.onload = function() {
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
myBuffer = buffer;
source.buffer = myBuffer;
source.playbackRate.value = 0.50;
source.connect(offlineCtx.destination);
source.start();
//source.loop = true;
offlineCtx.startRendering().then(function(renderedBuffer) {
console.log('Rendering completed successfully');
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var song = audioCtx.createBufferSource();
song.buffer = renderedBuffer;
song.connect(audioCtx.destination);
play.onclick = function() {
song.start();
}
}).catch(function(err) {
console.log('Rendering failed: ' + err);
// Note: The promise should reject when startRendering is called a second time on an OfflineAudioContext
});
});
}
request.send();
}
// Run getData to start the process off
getData();
// dump script to pre element
pre.innerHTML = myScript.innerHTML;
Related
I have an audio element
var audioSrc = 'https://mfbx9da4.github.io/assets/audio/dope-drum-loop_C_major.wav'
var audio = document.createElement('audio')
audio.src = audioSrc
I need the AudioBuffer
to do beat detection so I tried accessing the buffer when the audio is loaded as so:
audio.oncanplaythrough = () => {
console.info('loaded');
var source = context.createMediaElementSource(audio);
source.connect(context.destination);
console.info('source.buffer', source.buffer);
source.start()
}
However, the above code snippet logs
> loaded
> source.buffer undefined
It seems the best way to do this is to avoid <audio> tags and load the audio via XHR:
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
const audioSrc =
"https://davidalbertoadler.com/assets/audio/dope-drum-loop_C_major.wav";
const audioData = await fetchAudio(audioSrc);
audioContext.decodeAudioData(audioData, onDecoded, onDecodeError);
function fetchAudio(url) {
return new Promise((resolve, reject) => {
const request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
request.onload = () => resolve(request.response);
request.onerror = (e) => reject(e);
request.send();
});
}
function onDecoded(buffer) {
// Play the song
console.log("Got the decoded buffer now play the song", buffer);
const source = audioContext.createBufferSource();
source.buffer = buffer;
source.connect(audioContext.destination);
source.loop = true;
source.start();
}
function onDecodeError(e) {
console.log("Error decoding buffer: " + e.message);
console.log(e);
}
I have an audio using Web Audio API and want to record it at half the speed and later speed it up to normal at back-end. The output I get is pitched down.
var mr = null;
var source = null;
var stop = function(){
mr.stop();
source.stop();
}
var save = function(blob, filename){
var link = document.createElement('a');
link.href = URL.createObjectURL(blob);
link.download = filename || 'data.json';
link.click();
}
var context = new ( window.AudioContext || window.webkitAudioContext )();
var dest = context.createMediaStreamDestination();
function loadSound(url) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
// Decode asynchronously
request.onload = function() {
context.decodeAudioData(request.response, function(buffer) {
source = context.createBufferSource();
source.buffer = buffer;
source.playbackRate.value = 0.5;
source.connect(dest);
mr = new MediaRecorder(dest.stream);
mr.start();
source.start(0);
var chunks = [];
mr.ondataavailable = function(e){
chunks.push(e.data);
}
mr.onstop = function(e) {
var blob = new Blob(chunks, { 'type' : 'video/webm' });
save(blob,"a.webm");
}
setTimeout(stop, 10000);
}, null);
}
request.send();
}
loadSound("https://storage.googleapis.com/frulix-dev.appspot.com/words");
I have created a fiddle for the case https://jsfiddle.net/thenectorgod/v3ogtwvp/4/.
It does not happen for normal recording containing audio and that audio comes clear after speeding to 2.
Can anyone suggest how to get proper audio in this case?
I am developing a web application in which application downloads the encrypted chunks of data. And after then I have to decrypt and play the video. But I cannot let the user to wait for all decryption. Hence I am using Media Stream API. It is working. But I am getting this error after decryption of last chunk.
"Uncaught DOMException: Failed to execute 'addSourceBuffer' on 'MediaSource': This MediaSource has reached the limit of SourceBuffer objects it can handle. No additional SourceBuffer objects may be added.(…)"
<script type="text/javascript">
//////////
var no_of_files = 0;
var no_of_dlfiles = 0;
var FilesURL = [];
var files_str = 'video/vid_1.webm, video/vid_2.webm, video/vid_3.webm, video/vid_4.webm, video/vid_5.webm';
var file_counter = 0;
var mimeCodec = 'video/webm; codecs="vorbis,vp8"';
var passkey = "014bcbc0e15c4fc68b098f9b16f62bb7shahbaz.hansinfotech#gmail.com";
FilesURL = files_str.split(',');
no_of_files = FilesURL.length;
var player = document.getElementById('videoplayer');
if ('MediaSource' in window && MediaSource.isTypeSupported(mimeCodec)) {
var mediaSource = new MediaSource;
//console.log(mediaSource.readyState); // closed
player.src = URL.createObjectURL(mediaSource);
mediaSource.addEventListener('sourceopen', sourceOpen);
} else {
console.error('Unsupported MIME type or codec: ', mimeCodec);
}
//////////
function sourceOpen (_) {
console.log("start");
var mediaSource = this;
var sourceBuffer = mediaSource.addSourceBuffer(mimeCodec);
sourceBuffer.mode = "sequence";
function WriteDatatoTemp()
{
//console.log(this.readyState); // open
if(file_counter<FilesURL.length)
{
console.log(file_counter);
no_of_dlfiles++;
$("#decryptionRatio").text(no_of_dlfiles+" of "+no_of_files);
$("#decryption_status").show();
getFileObject(FilesURL[file_counter], function (fileObject) {
//
var outputFile = fileObject;
var reader = new FileReader();
reader.onloadend = function(e){
var decrypted_data = JSON.parse(CryptoJS.AES.decrypt(e.target.result, passkey, {format: CryptoJSAesJson}).toString(CryptoJS.enc.Utf8));
var byteArray = Base64Binary.decodeArrayBuffer(decrypted_data);
sourceBuffer.addEventListener('updateend', function(){
file_counter++;
// console.log(file_counter);
if(player.paused)
{
player.play();
}
if(file_counter == FilesURL.length - 1)
{
mediaSource.endOfStream();
}
WriteDatatoTemp();
});
try
{
while(!sourceBuffer.updating)
{
sourceBuffer.appendBuffer(byteArray);
}
}
catch(e)
{
console.log(e);
}
};
reader.readAsText(outputFile);
//
});
}
}
WriteDatatoTemp();
}
///
var getFileBlob = function (url, cb) {
var xhr = new XMLHttpRequest();
xhr.open("GET", url);
xhr.responseType = "blob";
xhr.addEventListener('load', function() {
cb(xhr.response);
});
xhr.send();
};
var blobToFile = function (blob, name) {
blob.lastModifiedDate = new Date();
blob.name = name;
return blob;
};
var getFileObject = function(filePathOrUrl, cb) {
getFileBlob(filePathOrUrl, function (blob) {
cb(blobToFile(blob, 'vid.webm'));
});
};
</script>
I want to combine audio clips, layered on top of each other so that they play synchronously and are saved in a new audio file. Any help would be much appreciated. I've done some digging online, but couldn't find a definitive answer as to whether or not many of the tools available as far as Javascript audio editing librarys go (Mix.js for example) are capable.
Yes, it is possible using OfflineAudioContext() or AudioContext.createChannelMerger() and creating a MediaStream. See Phonegap mixing audio files , Web Audio API.
You can use fetch() or XMLHttpRequest() to retrieve audio resource as an ArrayBuffer, AudioContext.decodeAudioData() to create an AudioBufferSourceNode from response; OfflineAudioContext() to render merged audio, AudioContext, AudioContext.createBufferSource(), AudioContext.createMediaStreamDestination() , MediaRecorder() to record stream; Promise.all(), Promise() constructor, .then() to process asynchronous requests to fetch(), AudioContext.decodeAudioData(), pass resulting mixed audio Blob at stop event of MediaRecorder.
Connect each AudioContext AudioBufferSourceNode to OfflineAudioContext.destination, call .start() on each node; call OfflineAudioContext.startRendering(); create new AudioContext node, connect renderedBuffer; call .createMediaStreamDestination() on AudioContext to create a MediaStream from merged audio buffers, pass .stream to MediaRecorder(), at stop event of MediaRecorder, create Blob URL of Blob of recorded audio mix with URL.createObjectURL(), which can be downloaded using <a> element with download attribute and href set to Blob URL.
var sources = ["https://upload.wikimedia.org/wikipedia/commons/b/be/"
+ "Hidden_Tribe_-_Didgeridoo_1_Live.ogg"
, "https://upload.wikimedia.org/wikipedia/commons/6/6e/"
+ "Micronesia_National_Anthem.ogg"];
var description = "HiddenTribeAnthem";
var context;
var recorder;
var div = document.querySelector("div");
var duration = 60000;
var chunks = [];
var audio = new AudioContext();
var mixedAudio = audio.createMediaStreamDestination();
var player = new Audio();
player.controls = "controls";
function get(src) {
return fetch(src)
.then(function(response) {
return response.arrayBuffer()
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(sources.map(get)).then(function(data) {
var len = Math.max.apply(Math, data.map(function(buffer) {
return buffer.byteLength
}));
context = new OfflineAudioContext(2, len, 44100);
return Promise.all(data.map(function(buffer) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var source = context.createBufferSource();
source.buffer = bufferSource;
source.connect(context.destination);
return source.start()
})
}))
.then(function() {
return context.startRendering()
})
.then(function(renderedBuffer) {
return new Promise(function(resolve) {
var mix = audio.createBufferSource();
mix.buffer = renderedBuffer;
mix.connect(audio.destination);
mix.connect(mixedAudio);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
mix.start(0);
div.innerHTML = "playing and recording tracks..";
// stop playback and recorder in 60 seconds
stopMix(duration, mix, recorder)
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
console.log("recording complete");
resolve(blob)
};
})
})
.then(function(blob) {
console.log(blob);
div.innerHTML = "mixed audio tracks ready for download..";
var audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
document.body.appendChild(a);
a.insertAdjacentHTML("afterend", "<br>");
player.src = audioDownload;
document.body.appendChild(player);
})
})
.catch(function(e) {
console.log(e)
});
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<div>loading audio tracks.. please wait</div>
</body>
</html>
You can alternatively utilize AudioContext.createChannelMerger(), AudioContext.createChannelSplitter()
var sources = ["/path/to/audoi1", "/path/to/audio2"];
var description = "mix";
var chunks = [];
var channels = [[0, 1], [1, 0]];
var audio = new AudioContext();
var player = new Audio();
var merger = audio.createChannelMerger(2);
var splitter = audio.createChannelSplitter(2);
var mixedAudio = audio.createMediaStreamDestination();
var duration = 60000;
var context;
var recorder;
var audioDownload;
player.controls = "controls";
function get(src) {
return fetch(src)
.then(function(response) {
return response.arrayBuffer()
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(sources.map(get)).then(function(data) {
return Promise.all(data.map(function(buffer, index) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var channel = channels[index];
var source = audio.createBufferSource();
source.buffer = bufferSource;
source.connect(splitter);
splitter.connect(merger, channel[0], channel[1]);
return source
})
}))
.then(function(audionodes) {
merger.connect(mixedAudio);
merger.connect(audio.destination);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
audionodes.forEach(function(node) {
node.start(0)
});
stopMix(duration, ...audionodes, recorder);
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
player.src = audioDownload;
document.body.appendChild(a);
document.body.appendChild(player);
};
})
})
.catch(function(e) {
console.log(e)
});
Because one of the browsers I'm trying to support doesn't allow me to decode a specific codec using AudioContext.decodeAudioData() I'm using Aurora.js to decode a audio files.
How can I change the decoded samples received from Aurora.js into an AudioBuffer I can actually use to playback the audio?
This is my code so far:
var AudioContext = (window.AudioContext || window.webkitAudioContext);
var context = new AudioContext();
var segmentUrls = [
'/segments/00.wav',
'/segments/05.wav',
'/segments/10.wav',
'/segments/15.wav',
'/segments/20.wav',
'/segments/25.wav',
'/segments/30.wav',
'/segments/35.wav',
'/segments/40.wav',
'/segments/45.wav',
'/segments/50.wav',
'/segments/55.wav'
];
Promise.all(segmentUrls.map(loadSound))
.then(function(buffers) {
var startAt = 0;
buffers.forEach(function(buffer) {
playSound(startAt, buffer);
startAt += buffer.duration;
});
})
.catch(function(err) {
console.error(err);
});
function playSound(offset, buffer) {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
source.start(offset);
}
function loadSound(url) {
return new Promise(function(resolve, reject) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = function onLoad() {
resolve(decodeAudioData(request.response));
};
request.onerror = function onError() {
reject('Could not request file');
};
request.send();
});
}
function decodeAudioData(audioData) {
return new Promise(function(resolve, reject) {
var asset = AV.Asset.fromBuffer(audioData);
asset.decodeToBuffer(function(buffer) {
// Create an AudioBuffer here
});
});
}
You'll have to create an AudioBuffer of the appropriate size and # of channels, and copy the data from one Float32 buffer to another.
Here is the MDN code snippet to put data in an AudioBuffer and then play it:
https://developer.mozilla.org/en-US/docs/Web/API/AudioBuffer
// Stereo
var channels = 2;
// Create an empty two second stereo buffer at the
// sample rate of the AudioContext
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var frameCount = audioCtx.sampleRate * 2.0;
var myArrayBuffer = audioCtx.createBuffer(channels, frameCount, audioCtx.sampleRate);
button.onclick = function() {
// Fill the buffer with white noise;
// just random values between -1.0 and 1.0
for (var channel = 0; channel < channels; channel++) {
// This gives us the actual array that contains the data
var nowBuffering = myArrayBuffer.getChannelData(channel);
for (var i = 0; i < frameCount; i++) {
// Math.random() is in [0; 1.0]
// audio needs to be in [-1.0; 1.0]
nowBuffering[i] = Math.random() * 2 - 1;
}
}
// Get an AudioBufferSourceNode.
// This is the AudioNode to use when we want to play an AudioBuffer
var source = audioCtx.createBufferSource();
// set the buffer in the AudioBufferSourceNode
source.buffer = myArrayBuffer;
// connect the AudioBufferSourceNode to the
// destination so we can hear the sound
source.connect(audioCtx.destination);
// start the source playing
source.start();
}