Prevent pitch correction of Web Audio API - javascript

I have an audio using Web Audio API and want to record it at half the speed and later speed it up to normal at back-end. The output I get is pitched down.
var mr = null;
var source = null;
var stop = function(){
mr.stop();
source.stop();
}
var save = function(blob, filename){
var link = document.createElement('a');
link.href = URL.createObjectURL(blob);
link.download = filename || 'data.json';
link.click();
}
var context = new ( window.AudioContext || window.webkitAudioContext )();
var dest = context.createMediaStreamDestination();
function loadSound(url) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
// Decode asynchronously
request.onload = function() {
context.decodeAudioData(request.response, function(buffer) {
source = context.createBufferSource();
source.buffer = buffer;
source.playbackRate.value = 0.5;
source.connect(dest);
mr = new MediaRecorder(dest.stream);
mr.start();
source.start(0);
var chunks = [];
mr.ondataavailable = function(e){
chunks.push(e.data);
}
mr.onstop = function(e) {
var blob = new Blob(chunks, { 'type' : 'video/webm' });
save(blob,"a.webm");
}
setTimeout(stop, 10000);
}, null);
}
request.send();
}
loadSound("https://storage.googleapis.com/frulix-dev.appspot.com/words");
I have created a fiddle for the case https://jsfiddle.net/thenectorgod/v3ogtwvp/4/.
It does not happen for normal recording containing audio and that audio comes clear after speeding to 2.
Can anyone suggest how to get proper audio in this case?

Related

using web-dictaphone need to save audio to server

I have the web-dictaphone working: https://github.com/mdn/web-dictaphone/
My goal is to have it work just like it does by default, but I want to add a save button that will save the recorded file onto the wordpress server.
Anyone have any suggestions on how to do this? Here is the code that executes after recording,
I imagine I need to do something with the audioURL and use something like file_put_contents() in php. I'm not sure if I need to convert to base64 or anything? Any help is appreciated, thanks.
mediaRecorder.onstop = function(e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt('Enter a name for your sound clip?','My unnamed clip');
const clipContainer = document.createElement('article');
const clipLabel = document.createElement('p');
const audio = document.createElement('audio');
const deleteButton = document.createElement('button');
clipContainer.classList.add('clip');
audio.setAttribute('controls', '');
deleteButton.textContent = 'Delete';
deleteButton.className = 'delete';
if(clipName === null) {
clipLabel.textContent = 'My unnamed clip';
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function(e) {
let evtTgt = e.target;
evtTgt.parentNode.parentNode.removeChild(evtTgt.parentNode);
}
clipLabel.onclick = function() {
const existingName = clipLabel.textContent;
const newClipName = prompt('Enter a new name for your sound clip?');
if(newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
}
}
It is not needed to convert to base64, just append blob to an instance of the FormData class and send.
var formData = new FormData();
formData.append('attachment', blob);
var request = new XMLHttpRequest();
request.open('POST', 'URL', true); // edit URL
request.onload = function() { console.log("Status: %s", request.responseText) };
request.send(formData);
On the backend side, receiving can be archived with $_FILES and move_uploaded_file() like a normal upload procedure.

Web audio API get AudioBuffer of <audio> element

I have an audio element
var audioSrc = 'https://mfbx9da4.github.io/assets/audio/dope-drum-loop_C_major.wav'
var audio = document.createElement('audio')
audio.src = audioSrc
I need the AudioBuffer
to do beat detection so I tried accessing the buffer when the audio is loaded as so:
audio.oncanplaythrough = () => {
console.info('loaded');
var source = context.createMediaElementSource(audio);
source.connect(context.destination);
console.info('source.buffer', source.buffer);
source.start()
}
However, the above code snippet logs
> loaded
> source.buffer undefined
It seems the best way to do this is to avoid <audio> tags and load the audio via XHR:
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
const audioSrc =
"https://davidalbertoadler.com/assets/audio/dope-drum-loop_C_major.wav";
const audioData = await fetchAudio(audioSrc);
audioContext.decodeAudioData(audioData, onDecoded, onDecodeError);
function fetchAudio(url) {
return new Promise((resolve, reject) => {
const request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
request.onload = () => resolve(request.response);
request.onerror = (e) => reject(e);
request.send();
});
}
function onDecoded(buffer) {
// Play the song
console.log("Got the decoded buffer now play the song", buffer);
const source = audioContext.createBufferSource();
source.buffer = buffer;
source.connect(audioContext.destination);
source.loop = true;
source.start();
}
function onDecodeError(e) {
console.log("Error decoding buffer: " + e.message);
console.log(e);
}

Is it possible to mix multiple audio files on top of each other preferably with javascript

I want to combine audio clips, layered on top of each other so that they play synchronously and are saved in a new audio file. Any help would be much appreciated. I've done some digging online, but couldn't find a definitive answer as to whether or not many of the tools available as far as Javascript audio editing librarys go (Mix.js for example) are capable.
Yes, it is possible using OfflineAudioContext() or AudioContext.createChannelMerger() and creating a MediaStream. See Phonegap mixing audio files , Web Audio API.
You can use fetch() or XMLHttpRequest() to retrieve audio resource as an ArrayBuffer, AudioContext.decodeAudioData() to create an AudioBufferSourceNode from response; OfflineAudioContext() to render merged audio, AudioContext, AudioContext.createBufferSource(), AudioContext.createMediaStreamDestination() , MediaRecorder() to record stream; Promise.all(), Promise() constructor, .then() to process asynchronous requests to fetch(), AudioContext.decodeAudioData(), pass resulting mixed audio Blob at stop event of MediaRecorder.
Connect each AudioContext AudioBufferSourceNode to OfflineAudioContext.destination, call .start() on each node; call OfflineAudioContext.startRendering(); create new AudioContext node, connect renderedBuffer; call .createMediaStreamDestination() on AudioContext to create a MediaStream from merged audio buffers, pass .stream to MediaRecorder(), at stop event of MediaRecorder, create Blob URL of Blob of recorded audio mix with URL.createObjectURL(), which can be downloaded using <a> element with download attribute and href set to Blob URL.
var sources = ["https://upload.wikimedia.org/wikipedia/commons/b/be/"
+ "Hidden_Tribe_-_Didgeridoo_1_Live.ogg"
, "https://upload.wikimedia.org/wikipedia/commons/6/6e/"
+ "Micronesia_National_Anthem.ogg"];
var description = "HiddenTribeAnthem";
var context;
var recorder;
var div = document.querySelector("div");
var duration = 60000;
var chunks = [];
var audio = new AudioContext();
var mixedAudio = audio.createMediaStreamDestination();
var player = new Audio();
player.controls = "controls";
function get(src) {
return fetch(src)
.then(function(response) {
return response.arrayBuffer()
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(sources.map(get)).then(function(data) {
var len = Math.max.apply(Math, data.map(function(buffer) {
return buffer.byteLength
}));
context = new OfflineAudioContext(2, len, 44100);
return Promise.all(data.map(function(buffer) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var source = context.createBufferSource();
source.buffer = bufferSource;
source.connect(context.destination);
return source.start()
})
}))
.then(function() {
return context.startRendering()
})
.then(function(renderedBuffer) {
return new Promise(function(resolve) {
var mix = audio.createBufferSource();
mix.buffer = renderedBuffer;
mix.connect(audio.destination);
mix.connect(mixedAudio);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
mix.start(0);
div.innerHTML = "playing and recording tracks..";
// stop playback and recorder in 60 seconds
stopMix(duration, mix, recorder)
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
console.log("recording complete");
resolve(blob)
};
})
})
.then(function(blob) {
console.log(blob);
div.innerHTML = "mixed audio tracks ready for download..";
var audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
document.body.appendChild(a);
a.insertAdjacentHTML("afterend", "<br>");
player.src = audioDownload;
document.body.appendChild(player);
})
})
.catch(function(e) {
console.log(e)
});
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<div>loading audio tracks.. please wait</div>
</body>
</html>
You can alternatively utilize AudioContext.createChannelMerger(), AudioContext.createChannelSplitter()
var sources = ["/path/to/audoi1", "/path/to/audio2"];
var description = "mix";
var chunks = [];
var channels = [[0, 1], [1, 0]];
var audio = new AudioContext();
var player = new Audio();
var merger = audio.createChannelMerger(2);
var splitter = audio.createChannelSplitter(2);
var mixedAudio = audio.createMediaStreamDestination();
var duration = 60000;
var context;
var recorder;
var audioDownload;
player.controls = "controls";
function get(src) {
return fetch(src)
.then(function(response) {
return response.arrayBuffer()
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(sources.map(get)).then(function(data) {
return Promise.all(data.map(function(buffer, index) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var channel = channels[index];
var source = audio.createBufferSource();
source.buffer = bufferSource;
source.connect(splitter);
splitter.connect(merger, channel[0], channel[1]);
return source
})
}))
.then(function(audionodes) {
merger.connect(mixedAudio);
merger.connect(audio.destination);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
audionodes.forEach(function(node) {
node.start(0)
});
stopMix(duration, ...audionodes, recorder);
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
player.src = audioDownload;
document.body.appendChild(a);
document.body.appendChild(player);
};
})
})
.catch(function(e) {
console.log(e)
});

How to save renderedBuffer as an audio file

I am using the web audio api's offlineAudioContext to render an audio file with a different playback speed. When I do this the altered audio is held in the renderedBuffer.
I am wondering how I can download the audio held in the renderedBuffer?
Here is my code:
// define online and offline audio context
var audioCtx = new AudioContext();
var offlineCtx = new OfflineAudioContext(2,44100*40,44100);
source = offlineCtx.createBufferSource();
// define variables
var pre = document.querySelector('pre');
var myScript = document.querySelector('script');
var play = document.querySelector('.play');
var stop = document.querySelector('.stop');
// use XHR to load an audio track, and
// decodeAudioData to decode it and stick it in a buffer.
// Then we put the buffer into the source
function getData() {
request = new XMLHttpRequest();
request.open('GET', 'sound.mp4', true);
request.responseType = 'arraybuffer';
request.onload = function() {
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
myBuffer = buffer;
source.buffer = myBuffer;
source.playbackRate.value = 0.50;
source.connect(offlineCtx.destination);
source.start();
//source.loop = true;
offlineCtx.startRendering().then(function(renderedBuffer) {
console.log('Rendering completed successfully');
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var song = audioCtx.createBufferSource();
song.buffer = renderedBuffer;
song.connect(audioCtx.destination);
play.onclick = function() {
song.start();
}
}).catch(function(err) {
console.log('Rendering failed: ' + err);
// Note: The promise should reject when startRendering is called a second time on an OfflineAudioContext
});
});
}
request.send();
}
// Run getData to start the process off
getData();
// dump script to pre element
pre.innerHTML = myScript.innerHTML;

Converting a Float32Array of decoded samples to an AudioBuffer

Because one of the browsers I'm trying to support doesn't allow me to decode a specific codec using AudioContext.decodeAudioData() I'm using Aurora.js to decode a audio files.
How can I change the decoded samples received from Aurora.js into an AudioBuffer I can actually use to playback the audio?
This is my code so far:
var AudioContext = (window.AudioContext || window.webkitAudioContext);
var context = new AudioContext();
var segmentUrls = [
'/segments/00.wav',
'/segments/05.wav',
'/segments/10.wav',
'/segments/15.wav',
'/segments/20.wav',
'/segments/25.wav',
'/segments/30.wav',
'/segments/35.wav',
'/segments/40.wav',
'/segments/45.wav',
'/segments/50.wav',
'/segments/55.wav'
];
Promise.all(segmentUrls.map(loadSound))
.then(function(buffers) {
var startAt = 0;
buffers.forEach(function(buffer) {
playSound(startAt, buffer);
startAt += buffer.duration;
});
})
.catch(function(err) {
console.error(err);
});
function playSound(offset, buffer) {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
source.start(offset);
}
function loadSound(url) {
return new Promise(function(resolve, reject) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = function onLoad() {
resolve(decodeAudioData(request.response));
};
request.onerror = function onError() {
reject('Could not request file');
};
request.send();
});
}
function decodeAudioData(audioData) {
return new Promise(function(resolve, reject) {
var asset = AV.Asset.fromBuffer(audioData);
asset.decodeToBuffer(function(buffer) {
// Create an AudioBuffer here
});
});
}
You'll have to create an AudioBuffer of the appropriate size and # of channels, and copy the data from one Float32 buffer to another.
Here is the MDN code snippet to put data in an AudioBuffer and then play it:
https://developer.mozilla.org/en-US/docs/Web/API/AudioBuffer
// Stereo
var channels = 2;
// Create an empty two second stereo buffer at the
// sample rate of the AudioContext
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var frameCount = audioCtx.sampleRate * 2.0;
var myArrayBuffer = audioCtx.createBuffer(channels, frameCount, audioCtx.sampleRate);
button.onclick = function() {
// Fill the buffer with white noise;
// just random values between -1.0 and 1.0
for (var channel = 0; channel < channels; channel++) {
// This gives us the actual array that contains the data
var nowBuffering = myArrayBuffer.getChannelData(channel);
for (var i = 0; i < frameCount; i++) {
// Math.random() is in [0; 1.0]
// audio needs to be in [-1.0; 1.0]
nowBuffering[i] = Math.random() * 2 - 1;
}
}
// Get an AudioBufferSourceNode.
// This is the AudioNode to use when we want to play an AudioBuffer
var source = audioCtx.createBufferSource();
// set the buffer in the AudioBufferSourceNode
source.buffer = myArrayBuffer;
// connect the AudioBufferSourceNode to the
// destination so we can hear the sound
source.connect(audioCtx.destination);
// start the source playing
source.start();
}

Categories

Resources