I have the recorder.js which will record the audio and takes buffer inputs but I want to downsample the audio buffers but I am lot confused where to call it though I have written it. Please check my function and if possible please suggest where to call it.
import InlineWorker from 'inline-worker';
export class Recorder {
config = {
bufferLen: 4096,
numChannels: 2,
mimeType: 'audio/mp3'
};
recording = false;
callbacks = {
getBuffer: [],
exportWAV: []
};
constructor(source, cfg) {
Object.assign(this.config, cfg);
this.context = source.context;
this.node = (this.context.createScriptProcessor ||
this.context.createJavaScriptNode).call(this.context,
this.config.bufferLen, this.config.numChannels, this.config.numChannels);
this.node.onaudioprocess = (e) => {
if (!this.recording) return;
var buffer = [];
for (var channel = 0; channel < this.config.numChannels; channel++) {
buffer.push(e.inputBuffer.getChannelData(channel));
}
this.worker.postMessage({
command: 'record',
buffer: buffer
});
};
source.connect(this.node);
this.node.connect(this.context.destination); //this should not be necessary
let self = {};
this.worker = new InlineWorker(function () {
let recLength = 0,
recBuffers = [],
sampleRate,
numChannels;
this.onmessage = function (e) {
switch (e.data.command) {
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config) {
sampleRate = config.sampleRate;
numChannels = config.numChannels;
initBuffers();
}
function record(inputBuffer) {
for (var channel = 0; channel < numChannels; channel++) {
recBuffers[channel].push(inputBuffer[channel]);
}
recLength += inputBuffer[0].length;
}
function exportWAV(type) {
let buffers = [];
for (let channel = 0; channel < numChannels; channel++) {
buffers.push(mergeBuffers(recBuffers[channel], recLength));
}
let interleaved;
if (numChannels === 2) {
interleaved = interleave(downsampleBuffer(buffers[0]), downsampleBuffer(buffers[1]));
} else {
interleaved = buffers[0];
}
let dataview = encodeWAV(interleaved);
let audioBlob = new Blob([dataview], {type: type});
this.postMessage({command: 'exportWAV', data: audioBlob});
}
function downsampleBuffer(buffer) {
if (16000 === sampleRate) {
return buffer;
}
var sampleRateRatio = sampleRate / 16000;
var newLength = Math.round(buffer.length / sampleRateRatio);
var result = new Float32Array(newLength);
var offsetResult = 0;
var offsetBuffer = 0;
while (offsetResult < result.length) {
var nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
var accum = 0,
count = 0;
for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
accum += buffer[i];
count++;
}
result[offsetResult] = accum / count;
offsetResult++;
offsetBuffer = nextOffsetBuffer;
}
return result;
}
function getBuffer() {
let buffers = [];
for (let channel = 0; channel < numChannels; channel++) {
buffers.push(mergeBuffers(recBuffers[channel], recLength));
}
this.postMessage({command: 'getBuffer', data: buffers});
}
function clear() {
recLength = 0;
recBuffers = [];
initBuffers();
}
function initBuffers() {
for (let channel = 0; channel < numChannels; channel++) {
recBuffers[channel] = [];
}
}
function mergeBuffers(recBuffers, recLength) {
let result = new Float32Array(recLength);
let offset = 0;
for (let i = 0; i < recBuffers.length; i++) {
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR) {
let length = inputL.length + inputR.length;
let result = new Float32Array(length);
let index = 0,
inputIndex = 0;
while (index < length) {
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input) {
for (let i = 0; i < input.length; i++, offset += 2) {
let s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string) {
for (let i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples) {
let buffer = new ArrayBuffer(44 + samples.length * 2);
let view = new DataView(buffer);
/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* RIFF chunk length */
view.setUint32(4, 36 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, numChannels, true);
/* sample rate */
view.setUint32(24, sampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, numChannels * 2, true);
/* bits per sample */
view.setUint16(34, 16, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
}, self);
this.worker.postMessage({
command: 'init',
config: {
sampleRate: this.context.sampleRate,
numChannels: this.config.numChannels
}
});
this.worker.onmessage = (e) => {
let cb = this.callbacks[e.data.command].pop();
if (typeof cb == 'function') {
cb(e.data.data);
}
};
}
record() {
this.recording = true;
}
stop() {
this.recording = false;
}
clear() {
this.worker.postMessage({command: 'clear'});
}
getBuffer(cb) {
cb = cb || this.config.callback;
if (!cb) throw new Error('Callback not set');
this.callbacks.getBuffer.push(cb);
this.worker.postMessage({command: 'getBuffer'});
}
exportWAV(cb, mimeType) {
mimeType = mimeType || this.config.mimeType;
cb = cb || this.config.callback;
if (!cb) throw new Error('Callback not set');
this.callbacks.exportWAV.push(cb);
this.worker.postMessage({
command: 'exportWAV',
type: mimeType
});
}
static
forceDownload(blob, filename) {
let url = (window.URL || window.webkitURL).createObjectURL(blob);
let link = window.document.createElement('a');
link.href = url;
link.download = filename || 'output.wav';
let click = document.createEvent("Event");
click.initEvent("click", true, true);
link.dispatchEvent(click);
}
}
export default Recorder;
It's a code which I took from github repository but the sample rate is 48000
File after downsampling left and right buffers is uploaded here
Calling from my component class
recorder && recorder.exportWAV(function(blob) {
var formData=new FormData();
formData.append("event_name",fileName);
formData.append("file",new File([blob], fileName, {type: 'audio/mpeg;', lastModified: Date.now()}));
formData.append("fileExtension", "mp3");
fetch('http://localhost:6020/uploadFile', {
method: "POST", // *GET, POST, PUT, DELETE, etc.
cache: "no-cache", // *default, no-cache, reload, force-cache, only-if-cached
redirect: "follow", // manual, *follow, error
referrer: "no-referrer", // no-referrer, *client
body: formData, // body data type must match "Content-Type" header
})
.then(response => response.json())
.then(function(data){ console.log( JSON.stringify( data ) ) });
},"audio/mp3");
I downsampled my audio clips (wav files) successfully using recorder.js library referring the solution given by ilikerei in this thread.
Add this method to recorder.js to resample the buffer to change sample rate, before calling encodeWav() method.
function downsampleBuffer(buffer, rate) {
if (rate == sampleRate) {
return buffer;
}
if (rate > sampleRate) {
throw "downsampling rate show be smaller than original sample rate";
}
var sampleRateRatio = sampleRate / rate;
var newLength = Math.round(buffer.length / sampleRateRatio);
var result = new Float32Array(newLength);
var offsetResult = 0;
var offsetBuffer = 0;
while (offsetResult < result.length) {
var nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
// Use average value of skipped samples
var accum = 0, count = 0;
for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
accum += buffer[i];
count++;
}
result[offsetResult] = accum / count;
// Or you can simply get rid of the skipped samples:
// result[offsetResult] = buffer[nextOffsetBuffer];
offsetResult++;
offsetBuffer = nextOffsetBuffer;
}
return result;
}
Then call encodeWav with the new sample buffer.
var downsampledBuffer = downsampleBuffer(interleaved, targetRate);
var dataview = encodeWAV(downsampledBuffer);
Now use the new sample rate for encoding.
/* sample rate */
view.setUint32(24, newRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, newRate * 4, true);
Related
My problem:
I'm trying to merge multiple blob audio files to a single blob and download it on the page.
What I tried:
I tried to concatenate the Audio blobs in the following ways:
Method - 1:
const url = window.URL.createObjectURL(new Blob(fullBlobArray), {
type: 'audio/*'
});
const a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
a.href = url;
a.download = "testing.wav";
a.click();
URL.revokeObjectURL(url);
a.remove();
Method - 2 (Using - ConcatenateBlobs.js plugin - ConcatenateJS)
ConcatenateBlobs(fullBlobArray, 'audio/wav', function (fullBlob) {
const url = window.URL.createObjectURL(fullBlob);
const a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
a.href = url;
a.download = "testing.wav";
a.click();
URL.revokeObjectURL(url);
a.remove();
//Close the window if it downloaded.
window.close();
Output is explained below:
If you have the following audio blobs:
[audio1, audio2, audio3]
Then, after downloading from the above code, only the Audio from the first file (i.e. audio1 ) is getting played. But the file size of the full blob is the total size of audio1 + audio2 + audio3
I couldn't figure out where I went wrong. Kindly help me in this to get rid of this problem.
Finally, found a solution!!!
Thanks for this StackOverflow article. Highly appreciated for the efforts for this article.
Thanks for the commenting out (#Bergi, #Zac, #Peter Krebs in the comments) that we need to Format the blob according to WAV Format
For merging multiple WAV files into a single file and below is the code:
wav_merger.js
var _index;
function readFileAsync(blob) {
return new Promise((resolve, reject) => {
let reader = new FileReader();
reader.addEventListener("loadend", function () {
resolve(reader.result);
});
reader.onerror = reject;
reader.readAsArrayBuffer(blob);
})
}
function getBufferFromBlobs(blobArray) {
return new Promise((resolve, reject) => {
var _arrBytes = [];
var _promises = [];
if (blobArray.length > 0) {
$.each(blobArray, function (index, blob) {
_index = index;
var dfd = $.Deferred();
readFileAsync(blob).then(function (byteArray) {
_arrBytes.push(byteArray);
dfd.resolve();
});
_promises.push(dfd);
});
$.when.apply($, _promises).done(function () {
var _blob = combineWavsBuffers(_arrBytes);
resolve(_blob);
});
}
});
}
function loadWav(blobArray) {
return getBufferFromBlobs(blobArray);
debugger;
// .then(function (bufferArray) {
// return combineWavsBuffers(bufferArray); //Combine original wav buffer and play
//});
}
function combineWavsBuffers(bufferArray) {
if (bufferArray.length > 0) {
var _bufferLengths = bufferArray.map(buffer => buffer.byteLength);
// Getting sum of numbers
var _totalBufferLength = _bufferLengths.reduce(function (a, b) {
return a + b;
}, 0);
var tmp = new Uint8Array(_totalBufferLength);
//Get buffer1 audio data to create the new combined wav
var audioData = getAudioData.WavHeader.readHeader(new DataView(bufferArray[0]));
var _bufferLength = 0;
$.each(bufferArray, function (index, buffer) {
//Combine array bytes of original wavs buffers.
tmp.set(new Uint8Array(buffer), _bufferLength);
_bufferLength+= buffer.byteLength;
});
//Send combined buffer and send audio data to create the audio data of combined
var arrBytesFinal = getWavBytes(tmp, {
isFloat: false, // floating point or 16-bit integer
numChannels: audioData.channels,
sampleRate: audioData.sampleRate,
});
//Create a Blob as Base64 Raw data with audio/wav type
return new Blob([arrBytesFinal], { type: 'audio/wav; codecs=MS_PCM' });
}
return null;
}
//Combine two audio .wav buffers.
function combineWavsBuffers1(buffer1, buffer2) {
//Combine array bytes of original wavs buffers
var tmp = new Uint8Array(buffer1.byteLength + buffer2.byteLength);
tmp.set(new Uint8Array(buffer1), 0);
tmp.set(new Uint8Array(buffer2), buffer1.byteLength);
//Get buffer1 audio data to create the new combined wav
var audioData = getAudioData.WavHeader.readHeader(new DataView(buffer1));
console.log('Audio Data: ', audioData);
//Send combined buffer and send audio data to create the audio data of combined
var arrBytesFinal = getWavBytes(tmp, {
isFloat: false, // floating point or 16-bit integer
numChannels: audioData.channels,
sampleRate: audioData.sampleRate,
});
//Create a Blob as Base64 Raw data with audio/wav type
return new Blob([arrBytesFinal], { type: 'audio/wav; codecs=MS_PCM' });
}
//Other functions //////////////////////////////////////////////////////////////
// Returns Uint8Array of WAV bytes
function getWavBytes(buffer, options) {
const type = options.isFloat ? Float32Array : Uint16Array
const numFrames = buffer.byteLength / type.BYTES_PER_ELEMENT
const headerBytes = getWavHeader(Object.assign({}, options, { numFrames }))
const wavBytes = new Uint8Array(headerBytes.length + buffer.byteLength);
// prepend header, then add pcmBytes
wavBytes.set(headerBytes, 0)
wavBytes.set(new Uint8Array(buffer), headerBytes.length)
return wavBytes
}
// adapted from https://gist.github.com/also/900023
// returns Uint8Array of WAV header bytes
function getWavHeader(options) {
const numFrames = options.numFrames
const numChannels = options.numChannels || 2
const sampleRate = options.sampleRate || 44100
const bytesPerSample = options.isFloat ? 4 : 2
const format = options.isFloat ? 3 : 1
const blockAlign = numChannels * bytesPerSample
const byteRate = sampleRate * blockAlign
const dataSize = numFrames * blockAlign
const buffer = new ArrayBuffer(44)
const dv = new DataView(buffer)
let p = 0
function writeString(s) {
for (let i = 0; i < s.length; i++) {
dv.setUint8(p + i, s.charCodeAt(i))
}
p += s.length
}
function writeUint32(d) {
dv.setUint32(p, d, true)
p += 4
}
function writeUint16(d) {
dv.setUint16(p, d, true)
p += 2
}
writeString('RIFF') // ChunkID
writeUint32(dataSize + 36) // ChunkSize
writeString('WAVE') // Format
writeString('fmt ') // Subchunk1ID
writeUint32(16) // Subchunk1Size
writeUint16(format) // AudioFormat
writeUint16(numChannels) // NumChannels
writeUint32(sampleRate) // SampleRate
writeUint32(byteRate) // ByteRate
writeUint16(blockAlign) // BlockAlign
writeUint16(bytesPerSample * 8) // BitsPerSample
writeString('data') // Subchunk2ID
writeUint32(dataSize) // Subchunk2Size
return new Uint8Array(buffer)
}
function getAudioData() {
function WavHeader() {
this.dataOffset = 0;
this.dataLen = 0;
this.channels = 0;
this.sampleRate = 0;
}
function fourccToInt(fourcc) {
return fourcc.charCodeAt(0) << 24 | fourcc.charCodeAt(1) << 16 | fourcc.charCodeAt(2) << 8 | fourcc.charCodeAt(3);
}
WavHeader.RIFF = fourccToInt("RIFF");
WavHeader.WAVE = fourccToInt("WAVE");
WavHeader.fmt_ = fourccToInt("fmt ");
WavHeader.data = fourccToInt("data");
WavHeader.readHeader = function (dataView) {
var w = new WavHeader();
var header = dataView.getUint32(0, false);
if (WavHeader.RIFF != header) {
return;
}
var fileLen = dataView.getUint32(4, true);
if (WavHeader.WAVE != dataView.getUint32(8, false)) {
return;
}
if (WavHeader.fmt_ != dataView.getUint32(12, false)) {
return;
}
var fmtLen = dataView.getUint32(16, true);
var pos = 16 + 4;
switch (fmtLen) {
case 16:
case 18:
w.channels = dataView.getUint16(pos + 2, true);
w.sampleRate = dataView.getUint32(pos + 4, true);
break;
default:
throw 'extended fmt chunk not implemented';
}
pos += fmtLen;
var data = WavHeader.data;
var len = 0;
while (data != header) {
header = dataView.getUint32(pos, false);
len = dataView.getUint32(pos + 4, true);
if (data == header) {
break;
}
pos += (len + 8);
}
w.dataLen = len;
w.dataOffset = pos + 8;
return w;
};
getAudioData.WavHeader = WavHeader;
}
getAudioData();
custom_script.js
getBufferFromBlobs(fullBlobArray).then(function (singleBlob) {
const url = window.URL.createObjectURL(singleBlob);
const a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
a.href = url;
a.download = "testing.wav";
a.click();
URL.revokeObjectURL(url);
a.remove();
});
Have the same problem, thank #Vikash to bring it here. I'm using ConcatenateBlobs.js to concat wav blobs and it seems only working on Chrome. Your solution is great but the source is a bit long, so I tried to fix ConcatenateBlobs.js base on the fact that file length in the header need to be fixed. Luckily, it works:
function ConcatenateBlobs(blobs, type, callback) {
var buffers = [];
var index = 0;
function readAsArrayBuffer() {
if (!blobs[index]) {
return concatenateBuffers();
}
var reader = new FileReader();
reader.onload = function(event) {
buffers.push(event.target.result);
index++;
readAsArrayBuffer();
};
reader.readAsArrayBuffer(blobs[index]);
}
readAsArrayBuffer();
function audioLengthTo32Bit(n) {
n = Math.floor(n);
var b1 = n & 255;
var b2 = (n >> 8) & 255;
var b3 = (n >> 16) & 255;
var b4 = (n >> 24) & 255;
return [b1, b2, b3, b4];
}
function concatenateBuffers() {
var byteLength = 0;
buffers.forEach(function(buffer) {
byteLength += buffer.byteLength;
});
var tmp = new Uint8Array(byteLength);
var lastOffset = 0;
var newData;
buffers.forEach(function(buffer) {
if (type=='audio/wav' && lastOffset > 0) newData = new Uint8Array(buffer, 44);
else newData = new Uint8Array(buffer);
tmp.set(newData, lastOffset);
lastOffset += newData.length;
});
if (type=='audio/wav') {
tmp.set(audioLengthTo32Bit(lastOffset - 8), 4);
tmp.set(audioLengthTo32Bit(lastOffset - 44), 40); // update audio length in the header
}
var blob = new Blob([tmp.buffer], {
type: type
});
callback(blob);
}
}
How could I merge or combine two audio.wav loaded from url into a new .wav and play it in the web browser and download it?
The HTML and JS code:
NOTE: To avoid cross-origin load wavs from your local or server or allow it in your .htaccess
<!DOCTYPE html>
<html>
<meta content="text/html;" http-equiv="content-type" charset="utf-8">
<head>
<script>
window.onload = function() {
//Original Wav
//https://www.wavsource.com/snds_2020-10-01_3728627494378403/video_games/pacman/pacman_intro.wav
var audioFileUrl1 = 'pacman_intro.wav'; //Atention to cross-origin** !!!
var audioFileUrl2 = 'pacman_intro.wav'; //I love PacMan intro :P
loadWav(audioFileUrl1, audioFileUrl2); //Load wavs from url
function loadWav(url1,url2){
var arrBytesWav1, arrBytesWav2;
fetch(url1)
.then(function(response) { return response.blob(); })
.then(function(blob) {
var reader = new FileReader();
reader.readAsArrayBuffer(blob);
reader.addEventListener("loadend", function() {
arrBytesWav1 = reader.result;
});
return fetch(url2); //Return the second url as a Promise.
})
//Second load
.then(function(response) { return response.blob(); })
.then(function(blob) {
var reader = new FileReader();
reader.readAsArrayBuffer(blob);
reader.addEventListener("loadend", function() {
arrBytesWav2 = reader.result;
combineWavsBuffers( arrBytesWav1, arrBytesWav2 ); //Combine original wav buffer and play
});
})
.catch( function(error) {
alert('Error wav loading: ' + error.message);
});
}
//Combine two audio .wav buffers and assign to audio control and play it.
function combineWavsBuffers(buffer1, buffer2) {
//Combine array bytes of original wavs buffers
var tmp = new Uint8Array(buffer1.byteLength + buffer2.byteLength);
tmp.set( new Uint8Array(buffer1), 0 );
tmp.set( new Uint8Array(buffer2), buffer1.byteLength );
//Get buffer1 audio data to create the new combined wav
var audioData = getAudioData.WavHeader.readHeader(new DataView(buffer1));
console.log('Audio Data: ', audioData);
//Send combined buffer and send audio data to create the audio data of combined
var arrBytesFinal = getWavBytes( tmp, {
isFloat: false, // floating point or 16-bit integer
numChannels: audioData.channels,
sampleRate: audioData.sampleRate,
})
//Create a Blob as Base64 Raw data with audio/wav type
var myBlob = new Blob( [arrBytesFinal] , { type : 'audio/wav; codecs=MS_PCM' });
var combineBase64Wav;
var readerBlob = new FileReader();
readerBlob.addEventListener("loadend", function() {
combineBase64Wav = readerBlob.result.toString();
//Assign to audiocontrol to play the new combined wav.
var audioControl = document.getElementById('audio');
audioControl.src = combineBase64Wav;
audioControl.play();
});
readerBlob.readAsDataURL(myBlob);
console.log( "Buffer1 Size: " + buffer1.byteLength );
console.log( "Buffer2 Size: " + buffer1.byteLength );
console.log( "Combined Size: " + arrBytesFinal.byteLength );
return combineBase64Wav;
}
//Other functions //////////////////////////////////////////////////////////////
// Returns Uint8Array of WAV bytes
function getWavBytes(buffer, options) {
const type = options.isFloat ? Float32Array : Uint16Array
const numFrames = buffer.byteLength / type.BYTES_PER_ELEMENT
const headerBytes = getWavHeader(Object.assign({}, options, { numFrames }))
const wavBytes = new Uint8Array(headerBytes.length + buffer.byteLength);
// prepend header, then add pcmBytes
wavBytes.set(headerBytes, 0)
wavBytes.set(new Uint8Array(buffer), headerBytes.length)
return wavBytes
}
// adapted from https://gist.github.com/also/900023
// returns Uint8Array of WAV header bytes
function getWavHeader(options) {
const numFrames = options.numFrames
const numChannels = options.numChannels || 2
const sampleRate = options.sampleRate || 44100
const bytesPerSample = options.isFloat? 4 : 2
const format = options.isFloat? 3 : 1
const blockAlign = numChannels * bytesPerSample
const byteRate = sampleRate * blockAlign
const dataSize = numFrames * blockAlign
const buffer = new ArrayBuffer(44)
const dv = new DataView(buffer)
let p = 0
function writeString(s) {
for (let i = 0; i < s.length; i++) {
dv.setUint8(p + i, s.charCodeAt(i))
}
p += s.length
}
function writeUint32(d) {
dv.setUint32(p, d, true)
p += 4
}
function writeUint16(d) {
dv.setUint16(p, d, true)
p += 2
}
writeString('RIFF') // ChunkID
writeUint32(dataSize + 36) // ChunkSize
writeString('WAVE') // Format
writeString('fmt ') // Subchunk1ID
writeUint32(16) // Subchunk1Size
writeUint16(format) // AudioFormat
writeUint16(numChannels) // NumChannels
writeUint32(sampleRate) // SampleRate
writeUint32(byteRate) // ByteRate
writeUint16(blockAlign) // BlockAlign
writeUint16(bytesPerSample * 8) // BitsPerSample
writeString('data') // Subchunk2ID
writeUint32(dataSize) // Subchunk2Size
return new Uint8Array(buffer)
}
function getAudioData(){
function WavHeader() {
this.dataOffset = 0;
this.dataLen = 0;
this.channels = 0;
this.sampleRate = 0;
}
function fourccToInt(fourcc) {
return fourcc.charCodeAt(0) << 24 | fourcc.charCodeAt(1) << 16 | fourcc.charCodeAt(2) << 8 | fourcc.charCodeAt(3);
}
WavHeader.RIFF = fourccToInt("RIFF");
WavHeader.WAVE = fourccToInt("WAVE");
WavHeader.fmt_ = fourccToInt("fmt ");
WavHeader.data = fourccToInt("data");
WavHeader.readHeader = function (dataView) {
var w = new WavHeader();
var header = dataView.getUint32(0, false);
if (WavHeader.RIFF != header) {
return;
}
var fileLen = dataView.getUint32(4, true);
if (WavHeader.WAVE != dataView.getUint32(8, false)) {
return;
}
if (WavHeader.fmt_ != dataView.getUint32(12, false)) {
return;
}
var fmtLen = dataView.getUint32(16, true);
var pos = 16 + 4;
switch (fmtLen) {
case 16:
case 18:
w.channels = dataView.getUint16(pos + 2, true);
w.sampleRate = dataView.getUint32(pos + 4, true);
break;
default:
throw 'extended fmt chunk not implemented';
}
pos += fmtLen;
var data = WavHeader.data;
var len = 0;
while (data != header) {
header = dataView.getUint32(pos, false);
len = dataView.getUint32(pos + 4, true);
if (data == header) {
break;
}
pos += (len + 8);
}
w.dataLen = len;
w.dataOffset = pos + 8;
return w;
};
getAudioData.WavHeader = WavHeader;
}
getAudioData();
};//Window onLoad
</script>
</head>
<body>
<!--The audio control HTML element -->
<audio id="audio" src="" controls></audio>
</body>
</html>
I am using MediaRecorder in ReactJS to record audio from the microphone and storing into the blob with MIME type "audio/mp3". I want to convert this blob to MP3 and upload it in S3 bucket.
I am able to convert it into WAV by using audioContext, decodeAudioData and audioBufferToWav functions, but the size of the WAV is very large. Since the MP3 file has relatively very small in size so I want it to convert my blob to MP3. Any help?
My code for recording and converting to wav:
getUserMedia({ audio: true })
.then(stream => {
this.stream = stream;
const mimeType = 'audio/mp3';
this.mediaRecorder = new MediaRecorder(stream);
this.mediaRecorder.start();
const audioChunks = [];
this.mediaRecorder.addEventListener('dataavailable', event => {
audioChunks.push(event.data);
});
this.mediaRecorder.addEventListener('stop', () => {
const audioBlob = new Blob(audioChunks, {
type: mimeType});
});
}).catch(error => { });
Converting above created blob to WAV:
const reader = new window.FileReader();
reader.readAsDataURL(audioBlob);
reader.onloadend = () => {
let base64 = reader.result + '';
base64 = base64.split(',')[1];
const ab = new ArrayBuffer(base64.length);
const buff = new Buffer.from(base64, 'base64');
const view = new Uint8Array(ab);
for (let i = 0; i < buff.length; ++i) {
view[i] = buff[i];
}
const context = new AudioContext();
context.decodeAudioData(ab, (buffer) => {
const wavFile = toWav(buffer);
}
I am storing wavFile into the S3. I want MP3, please help?
I'm not using the ReactJS MediaRecorder nor do I exactly follow what's going on in your specific example, but I have a solution for converting an AudioBuffer to an mp3, way of wave.
The first function is based on russellgood.com/how-to-convert-audiobuffer-to-audio-file. The second is based on lamejs.
First, convert the AudioBuffer to a wave blob
function audioBufferToWav(aBuffer) {
let numOfChan = aBuffer.numberOfChannels,
btwLength = aBuffer.length * numOfChan * 2 + 44,
btwArrBuff = new ArrayBuffer(btwLength),
btwView = new DataView(btwArrBuff),
btwChnls = [],
btwIndex,
btwSample,
btwOffset = 0,
btwPos = 0;
setUint32(0x46464952); // "RIFF"
setUint32(btwLength - 8); // file length - 8
setUint32(0x45564157); // "WAVE"
setUint32(0x20746d66); // "fmt " chunk
setUint32(16); // length = 16
setUint16(1); // PCM (uncompressed)
setUint16(numOfChan);
setUint32(aBuffer.sampleRate);
setUint32(aBuffer.sampleRate * 2 * numOfChan); // avg. bytes/sec
setUint16(numOfChan * 2); // block-align
setUint16(16); // 16-bit
setUint32(0x61746164); // "data" - chunk
setUint32(btwLength - btwPos - 4); // chunk length
for (btwIndex = 0; btwIndex < aBuffer.numberOfChannels; btwIndex++)
btwChnls.push(aBuffer.getChannelData(btwIndex));
while (btwPos < btwLength) {
for (btwIndex = 0; btwIndex < numOfChan; btwIndex++) {
// interleave btwChnls
btwSample = Math.max(-1, Math.min(1, btwChnls[btwIndex][btwOffset])); // clamp
btwSample = (0.5 + btwSample < 0 ? btwSample * 32768 : btwSample * 32767) | 0; // scale to 16-bit signed int
btwView.setInt16(btwPos, btwSample, true); // write 16-bit sample
btwPos += 2;
}
btwOffset++; // next source sample
}
let wavHdr = lamejs.WavHeader.readHeader(new DataView(btwArrBuff));
let wavSamples = new Int16Array(btwArrBuff, wavHdr.dataOffset, wavHdr.dataLen / 2);
wavToMp3(wavHdr.channels, wavHdr.sampleRate, wavSamples);
function setUint16(data) {
btwView.setUint16(btwPos, data, true);
btwPos += 2;
}
function setUint32(data) {
btwView.setUint32(btwPos, data, true);
btwPos += 4;
}
}
Second, convert the wave to an mp3
function wavToMp3(channels, sampleRate, samples) {
var buffer = [];
var mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128);
var remaining = samples.length;
var samplesPerFrame = 1152;
for (var i = 0; remaining >= samplesPerFrame; i += samplesPerFrame) {
var mono = samples.subarray(i, i + samplesPerFrame);
var mp3buf = mp3enc.encodeBuffer(mono);
if (mp3buf.length > 0) {
buffer.push(new Int8Array(mp3buf));
}
remaining -= samplesPerFrame;
}
var d = mp3enc.flush();
if(d.length > 0){
buffer.push(new Int8Array(d));
}
var mp3Blob = new Blob(buffer, {type: 'audio/mp3'});
var bUrl = window.URL.createObjectURL(mp3Blob);
// send the download link to the console
console.log('mp3 download:', bUrl);
}
Hope this helps!
This is how I combined MediaRecorder and Format converter based on great CuriousChad answer. Just needed to consider MP3 encoder as Stereo to work.
First, set AudioFormat equals to 'WEBM' (Chrome), 'MP3', or 'WAV':
this.mediaRecorder.onstop = (e) => {
if (AudioFormat === "MP3" || AudioFormat === "WAV") {
var data = this.chunks[0];
var blob = new Blob(this.chunks, { type: "video/webm" });
const audioContext = new AudioContext();
const fileReader = new FileReader();
// Set up file reader on loaded end event
fileReader.onloadend = () => {
const arrayBuffer = fileReader.result; // as ArrayBuffer;
// Convert array buffer into audio buffer
audioContext.decodeAudioData(arrayBuffer, (audioBuffer) => {
// Do something with audioBuffer
console.log(audioBuffer);
var MP3Blob = audioBufferToWav(audioBuffer);
onStop(MP3Blob, audioBuffer);
});
};
//Load blob
fileReader.readAsArrayBuffer(blob);
} else {
var data = this.chunks[0];
var blob = new Blob(this.chunks, { type: "audio/mpeg" });
onStop(blob, data);
}
this.chunks = [];
};
Second, convert Buffer to Wav:
function audioBufferToWav(aBuffer) {
let numOfChan = aBuffer.numberOfChannels,
btwLength = aBuffer.length * numOfChan * 2 + 44,
btwArrBuff = new ArrayBuffer(btwLength),
btwView = new DataView(btwArrBuff),
btwChnls = [],
btwIndex,
btwSample,
btwOffset = 0,
btwPos = 0;
setUint32(0x46464952); // "RIFF"
setUint32(btwLength - 8); // file length - 8
setUint32(0x45564157); // "WAVE"
setUint32(0x20746d66); // "fmt " chunk
setUint32(16); // length = 16
setUint16(1); // PCM (uncompressed)
setUint16(numOfChan);
setUint32(aBuffer.sampleRate);
setUint32(aBuffer.sampleRate * 2 * numOfChan); // avg. bytes/sec
setUint16(numOfChan * 2); // block-align
setUint16(16); // 16-bit
setUint32(0x61746164); // "data" - chunk
setUint32(btwLength - btwPos - 4); // chunk length
for (btwIndex = 0; btwIndex < aBuffer.numberOfChannels; btwIndex++)
btwChnls.push(aBuffer.getChannelData(btwIndex));
while (btwPos < btwLength) {
for (btwIndex = 0; btwIndex < numOfChan; btwIndex++) {
// interleave btwChnls
btwSample = Math.max(-1, Math.min(1, btwChnls[btwIndex][btwOffset])); // clamp
btwSample =
(0.5 + btwSample < 0 ? btwSample * 32768 : btwSample * 32767) | 0; // scale to 16-bit signed int
btwView.setInt16(btwPos, btwSample, true); // write 16-bit sample
btwPos += 2;
}
btwOffset++; // next source sample
}
let wavHdr = lamejs.WavHeader.readHeader(new DataView(btwArrBuff));
//Stereo
let data = new Int16Array(btwArrBuff, wavHdr.dataOffset, wavHdr.dataLen / 2);
let leftData = [];
let rightData = [];
for (let i = 0; i < data.length; i += 2) {
leftData.push(data[i]);
rightData.push(data[i + 1]);
}
var left = new Int16Array(leftData);
var right = new Int16Array(rightData);
if (AudioFormat === "MP3") {
//STEREO
if (wavHdr.channels === 2)
return wavToMp3Stereo(
wavHdr.channels,
wavHdr.sampleRate,
left,
right,
);
//MONO
else if (wavHdr.channels === 1)
return wavToMp3(wavHdr.channels, wavHdr.sampleRate, data);
} else return new Blob([btwArrBuff], { type: "audio/wav" });
function setUint16(data) {
btwView.setUint16(btwPos, data, true);
btwPos += 2;
}
function setUint32(data) {
btwView.setUint32(btwPos, data, true);
btwPos += 4;
}
}
Third, convert WAV to MP3:
I had to switch to Stereo as I have 2 channels for MP3Encoder left & right.
function wavToMp3(channels, sampleRate, left, right = null) {
var buffer = [];
var mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128);
var remaining = left.length;
var samplesPerFrame = 1152;
for (var i = 0; remaining >= samplesPerFrame; i += samplesPerFrame) {
if (!right) {
var mono = left.subarray(i, i + samplesPerFrame);
var mp3buf = mp3enc.encodeBuffer(mono);
} else {
var leftChunk = left.subarray(i, i + samplesPerFrame);
var rightChunk = right.subarray(i, i + samplesPerFrame);
var mp3buf = mp3enc.encodeBuffer(leftChunk, rightChunk);
}
if (mp3buf.length > 0) {
buffer.push(mp3buf); //new Int8Array(mp3buf));
}
remaining -= samplesPerFrame;
}
var d = mp3enc.flush();
if (d.length > 0) {
buffer.push(new Int8Array(d));
}
var mp3Blob = new Blob(buffer, { type: "audio/mp3" });
//var bUrl = window.URL.createObjectURL(mp3Blob);
// send the download link to the console
//console.log('mp3 download:', bUrl);
return mp3Blob;
}
I am developing a web app in which i want to use recorder.js file to implement audio recording. I have this code but i am unable to use in web2py framework. Can you give me detail info how to use it?
There is a .bower.json file which is getting hidden on uploading in web2py. how can we use this and link all the files so that recording can be done?
This is my .bower.json script:
{
"name": "Recorderjs",
"homepage": "https://github.com/mattdiamond/Recorderjs",
"_release": "f814ac7b3f",
"_resolution": {
"type": "branch",
"branch": "master",
"commit": "f814ac7b3f4ed4f62729860d5a02720f167480b3"
},
"_source": "git://github.com/mattdiamond/Recorderjs.git",
"_target": "*",
"_originalSource": "Recorderjs",
"_direct": true
}
This is my html code:
<body>
<script src="bower_components/Recorderjs/recorder.js">
</script>
<script src="app.js"></script>
<button onclick="record()">Record</button>
<button onclick="stop()">Stop</button>
These are my javascripts: /app.js
var navigator = window.navigator;
var Context = window.AudioContext || window.webkitAudioContext;
var context = new Context();
// audio
var mediaStream;
var rec;
navigator.getUserMedia = (
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia
);
function record() {
navigator.getUserMedia({audio: true}, function(localMediaStream){
mediaStream = localMediaStream;
var mediaStreamSource = context.createMediaStreamSource(localMediaStream);
rec = new Recorder(mediaStreamSource, {
workerPath: '/bower_components/Recorderjs/recorderWorker.js'
});
rec.record();
}, function(err){
console.log('Not supported');
});
}
function stop() {
mediaStream.stop();
rec.stop();
rec.exportWAV(function(e){
rec.clear();
Recorder.forceDownload(e, "test.wav");
});
}
/recorder.js
(function(window){
var WORKER_PATH = 'recorderWorker.js';
var Recorder = function(source, cfg){
var config = cfg || {};
var bufferLen = config.bufferLen || 4096;
this.context = source.context;
this.node = (this.context.createScriptProcessor ||
this.context.createJavaScriptNode).call(this.context,
bufferLen, 2, 2);
var worker = new Worker(config.workerPath || WORKER_PATH);
worker.postMessage({
command: 'init',
config: {
sampleRate: this.context.sampleRate
}
});
var recording = false,
currCallback;
this.node.onaudioprocess = function(e){
if (!recording) return;
worker.postMessage({
command: 'record',
buffer: [
e.inputBuffer.getChannelData(0),
e.inputBuffer.getChannelData(1)
]
});
}
this.configure = function(cfg){
for (var prop in cfg){
if (cfg.hasOwnProperty(prop)){
config[prop] = cfg[prop];
}
}
}
this.record = function(){
recording = true;
}
this.stop = function(){
recording = false;
}
this.clear = function(){
worker.postMessage({ command: 'clear' });
}
this.getBuffer = function(cb) {
currCallback = cb || config.callback;
worker.postMessage({ command: 'getBuffer' })
}
recorderWorker.js:
var recLength = 0,
recBuffersL = [],
recBuffersR = [],
sampleRate;
this.onmessage = function(e){
switch(e.data.command){
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config){
sampleRate = config.sampleRate;
}
function record(inputBuffer){
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength += inputBuffer[0].length;
}
function exportWAV(type){
var bufferL = mergeBuffers(recBuffersL, recLength);
var bufferR = mergeBuffers(recBuffersR, recLength);
var interleaved = interleave(bufferL, bufferR);
var dataview = encodeWAV(interleaved);
var audioBlob = new Blob([dataview], { type: type });
this.postMessage(audioBlob);
}
function getBuffer() {
var buffers = [];
buffers.push( mergeBuffers(recBuffersL, recLength) );
buffers.push( mergeBuffers(recBuffersR, recLength) );
this.postMessage(buffers);
}
function clear(){
recLength = 0;
recBuffersL = [];
recBuffersR = [];
}
function mergeBuffers(recBuffers, recLength){
var result = new Float32Array(recLength);
var offset = 0;
for (var i = 0; i < recBuffers.length; i++){
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR){
var length = inputL.length + inputR.length;
var result = new Float32Array(length);
var index = 0,
inputIndex = 0;
while (index < length){
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input){
for (var i = 0; i < input.length; i++, offset+=2){
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string){
for (var i = 0; i < string.length; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples){
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);
/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* file length */
view.setUint32(4, 32 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, 2, true);
/* sample rate */
view.setUint32(24, sampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, 4, true);
/* bits per sample */
view.setUint16(34, 16, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
I don't know if this is the only problem, but the following script source references are relative URLs:
<script src="bower_components/Recorderjs/recorder.js"></script>
<script src="app.js"></script>
Because the URLs are not preceded with a /, they will be appended to the URL of the current page. So, if you are at /myapp/default/index, the first URL will reference /myapp/default/index/bower_components/Recorderjs/recorder.js, which will not be correct.
You should keep your Javascript files in the /web2py/applications/myapp/static folder. In that case, it is best to use the web2py URL() function to generate proper URLs:
<script src="{{=URL('static', 'js/bower_components/Recorderjs/recorder.js')}}"></script>
<script src="{{=URL('static', 'js/app.js')}}"></script>
Regarding internal URLs, assuming you move everything into the /myapp/static/js folder, a URL like:
/bower_components/Recorderjs/recorderWorker.js
would change to:
/myapp/static/js/bower_components/Recorderjs/recorderWorker.js
Relative internal URLs should continue to work without alteration.
I have a feature that records audio annotations for a user. It uses HTML5 with a flash fallback. I am able to get the audio data from the HTML5 version via getUserMedia(), but the flash fallback provides the data as an array of floats.
I need this data as a wav file, and I can't figure out how to do it. Any help much appreciated!
var recLength = 0,
recBuffersL = [],
recBuffersR = [],
sampleRate;
this.onmessage = function (e) {
switch (e.data.command) {
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config) {
sampleRate = config.sampleRate;
}
function record(inputBuffer) {
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength += inputBuffer[0].length;
}
function exportWAV(type) {
var bufferL = mergeBuffers(recBuffersL, recLength);
var bufferR = mergeBuffers(recBuffersR, recLength);
var interleaved = interleave(bufferL, bufferR);
var dataview = encodeWAV(interleaved);
var audioBlob = new Blob([dataview], {
type: type
});
this.postMessage(audioBlob);
}
function getBuffer() {
var buffers = [];
buffers.push(mergeBuffers(recBuffersL, recLength));
buffers.push(mergeBuffers(recBuffersR, recLength));
this.postMessage(buffers);
}
function clear() {
recLength = 0;
recBuffersL = [];
recBuffersR = [];
}
function mergeBuffers(recBuffers, recLength) {
var result = new Float32Array(recLength);
var offset = 0;
for (var i = 0; i < recBuffers.length; i++) {
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR) {
var length = inputL.length + inputR.length;
var result = new Float32Array(length);
var index = 0,
inputIndex = 0;
while (index < length) {
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input) {
for (var i = 0; i < input.length; i++, offset += 2) {
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string) {
for (var i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples) {
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);
writeString(view, 0, 'RIFF');
view.setUint32(4, 32 + samples.length * 2, true);
writeString(view, 8, 'WAVE');
writeString(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
writeString(view, 36, 'data');
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
usage:
var AudioContext = win.webkitAudioContext,
recorder, audioContext;
function recordAudio() {
if (!config.stream) {
alert('No audio.');
return;
}
initAudioRecorder(config.audioWorkerPath);
audioContext = new AudioContext;
var mediaStreamSource = audioContext.createMediaStreamSource(config.stream);
mediaStreamSource.connect(audioContext.destination);
recorder = new window.Recorder(mediaStreamSource);
recorder && recorder.record();
}
function stopAudioRecording() {
console.warn('Audio recording stopeed');
recorder && recorder.stop();
recorder && recorder.exportWAV(function (blob) {
fileType = 'wav';
setBlob(blob);
});
recorder && recorder.clear();
}
var writer;
function setBlob(blob) {
blobURL = blob;
var config = {
blob: blobURL,
type: 'audio/wav',
fileName: (Math.random() * 1000 << 1000) + '.' + fileType,
size: blobURL.length
};
writer = RecordRTCFileWriter(config);
var reader = new win.FileReader();
reader.readAsDataURL(blobURL);
reader.onload = function (event) {
blobURL2 = event.target.result;
};
}
return {
stopAudio: stopAudioRecording,
stopVideo: stopVideoRecording,
recordVideo: recordVideo,
recordAudio: recordAudio,
save: saveToDisk,
getBlob: function () {
return blobURL2;
},
toURL: function () {
return writer.toURL();
}
};