I'm trying to convert an AudioBuffer into a wav file that I can download.
I tried 2 methods:
The first one, I record all the sounds going threw a mediaRecorder and do this:
App.model.mediaRecorder.ondataavailable = function(evt) {
// push each chunk (blobs) in an array
//console.log(evt.data)
App.model.chunks.push(evt.data);
};
App.model.mediaRecorder.onstop = function(evt) {
// Make blob out of our blobs, and open it.
var blob = new Blob(App.model.chunks, { 'type' : 'audio/wav; codecs=opus' });
createDownloadLink(blob);
};
I create a chunk table containing blobs and then create a new Blob with these chunks. Then in the function "createDownloadLink()" I create an audio node and a download link:
function createDownloadLink(blob) {
var url = URL.createObjectURL(blob);
var li = document.createElement('li');
var au = document.createElement('audio');
li.className = "recordedElement";
var hf = document.createElement('a');
li.style.textDecoration ="none";
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'myrecording' + App.model.countRecordings + ".wav";
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(hf);
recordingslist.appendChild(li);
}
The audio node is created and I can listen to the sound that I recorded so everything seems to work. But when I download the file it can't be read by any player. I think it's because it's not encoded in WAV so it's not understand.
The second method is the same than above except for the "createDownloadLink()" function.
function createDownloadLink(blob) {
var reader = new FileReader();
reader.readAsArrayBuffer(blob);
App.model.sourceBuffer = App.model.audioCtx.createBufferSource();
reader.onloadend = function()
{
App.model.recordBuffer = reader.result;
App.model.audioCtx.decodeAudioData(App.model.recordBuffer, function(decodedData)
{
App.model.sourceBuffer.buffer = decodedData;
})
}
Here I get an AudioBuffer of the sounds I recorded, but I didn't find how to convert it into a WAV file...
Can you use a variation of this?
https://gist.github.com/asanoboy/3979747
Maybe something like this?
var wav = createWavFromBuffer(convertBlock(decodedData), 44100);
// Then call wav.getBuffer or wav.getWavInt16Array() for the WAV-RIFF formatted data
The other functions here:
class Wav {
constructor(opt_params) {
this._sampleRate = opt_params && opt_params.sampleRate ? opt_params.sampleRate : 44100;
this._channels = opt_params && opt_params.channels ? opt_params.channels : 2;
this._eof = true;
this._bufferNeedle = 0;
this._buffer;
}
setBuffer(buffer) {
this._buffer = this.getWavInt16Array(buffer);
this._bufferNeedle = 0;
this._internalBuffer = '';
this._hasOutputHeader = false;
this._eof = false;
}
getBuffer(len) {
var rt;
if( this._bufferNeedle + len >= this._buffer.length ){
rt = new Int16Array(this._buffer.length - this._bufferNeedle);
this._eof = true;
}
else {
rt = new Int16Array(len);
}
for(var i=0; i<rt.length; i++){
rt[i] = this._buffer[i+this._bufferNeedle];
}
this._bufferNeedle += rt.length;
return rt.buffer;
}
eof() {
return this._eof;
}
getWavInt16Array(buffer) {
var intBuffer = new Int16Array(buffer.length + 23), tmp;
intBuffer[0] = 0x4952; // "RI"
intBuffer[1] = 0x4646; // "FF"
intBuffer[2] = (2*buffer.length + 15) & 0x0000ffff; // RIFF size
intBuffer[3] = ((2*buffer.length + 15) & 0xffff0000) >> 16; // RIFF size
intBuffer[4] = 0x4157; // "WA"
intBuffer[5] = 0x4556; // "VE"
intBuffer[6] = 0x6d66; // "fm"
intBuffer[7] = 0x2074; // "t "
intBuffer[8] = 0x0012; // fmt chunksize: 18
intBuffer[9] = 0x0000; //
intBuffer[10] = 0x0001; // format tag : 1
intBuffer[11] = this._channels; // channels: 2
intBuffer[12] = this._sampleRate & 0x0000ffff; // sample per sec
intBuffer[13] = (this._sampleRate & 0xffff0000) >> 16; // sample per sec
intBuffer[14] = (2*this._channels*this._sampleRate) & 0x0000ffff; // byte per sec
intBuffer[15] = ((2*this._channels*this._sampleRate) & 0xffff0000) >> 16; // byte per sec
intBuffer[16] = 0x0004; // block align
intBuffer[17] = 0x0010; // bit per sample
intBuffer[18] = 0x0000; // cb size
intBuffer[19] = 0x6164; // "da"
intBuffer[20] = 0x6174; // "ta"
intBuffer[21] = (2*buffer.length) & 0x0000ffff; // data size[byte]
intBuffer[22] = ((2*buffer.length) & 0xffff0000) >> 16; // data size[byte]
for (var i = 0; i < buffer.length; i++) {
tmp = buffer[i];
if (tmp >= 1) {
intBuffer[i+23] = (1 << 15) - 1;
}
else if (tmp <= -1) {
intBuffer[i+23] = -(1 << 15);
}
else {
intBuffer[i+23] = Math.round(tmp * (1 << 15));
}
}
return intBuffer;
}
}
// factory
function createWavFromBuffer(buffer, sampleRate) {
var wav = new Wav({
sampleRate: sampleRate,
channels: 1
});
wav.setBuffer(buffer);
return wav;
}
// ArrayBuffer -> Float32Array
var convertBlock = function(buffer) {
var incomingData = new Uint8Array(buffer);
var i, l = incomingData.length;
var outputData = new Float32Array(incomingData.length);
for (i = 0; i < l; i++) {
outputData[i] = (incomingData[i] - 128) / 128.0;
}
return outputData;
}
Related
My problem:
I'm trying to merge multiple blob audio files to a single blob and download it on the page.
What I tried:
I tried to concatenate the Audio blobs in the following ways:
Method - 1:
const url = window.URL.createObjectURL(new Blob(fullBlobArray), {
type: 'audio/*'
});
const a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
a.href = url;
a.download = "testing.wav";
a.click();
URL.revokeObjectURL(url);
a.remove();
Method - 2 (Using - ConcatenateBlobs.js plugin - ConcatenateJS)
ConcatenateBlobs(fullBlobArray, 'audio/wav', function (fullBlob) {
const url = window.URL.createObjectURL(fullBlob);
const a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
a.href = url;
a.download = "testing.wav";
a.click();
URL.revokeObjectURL(url);
a.remove();
//Close the window if it downloaded.
window.close();
Output is explained below:
If you have the following audio blobs:
[audio1, audio2, audio3]
Then, after downloading from the above code, only the Audio from the first file (i.e. audio1 ) is getting played. But the file size of the full blob is the total size of audio1 + audio2 + audio3
I couldn't figure out where I went wrong. Kindly help me in this to get rid of this problem.
Finally, found a solution!!!
Thanks for this StackOverflow article. Highly appreciated for the efforts for this article.
Thanks for the commenting out (#Bergi, #Zac, #Peter Krebs in the comments) that we need to Format the blob according to WAV Format
For merging multiple WAV files into a single file and below is the code:
wav_merger.js
var _index;
function readFileAsync(blob) {
return new Promise((resolve, reject) => {
let reader = new FileReader();
reader.addEventListener("loadend", function () {
resolve(reader.result);
});
reader.onerror = reject;
reader.readAsArrayBuffer(blob);
})
}
function getBufferFromBlobs(blobArray) {
return new Promise((resolve, reject) => {
var _arrBytes = [];
var _promises = [];
if (blobArray.length > 0) {
$.each(blobArray, function (index, blob) {
_index = index;
var dfd = $.Deferred();
readFileAsync(blob).then(function (byteArray) {
_arrBytes.push(byteArray);
dfd.resolve();
});
_promises.push(dfd);
});
$.when.apply($, _promises).done(function () {
var _blob = combineWavsBuffers(_arrBytes);
resolve(_blob);
});
}
});
}
function loadWav(blobArray) {
return getBufferFromBlobs(blobArray);
debugger;
// .then(function (bufferArray) {
// return combineWavsBuffers(bufferArray); //Combine original wav buffer and play
//});
}
function combineWavsBuffers(bufferArray) {
if (bufferArray.length > 0) {
var _bufferLengths = bufferArray.map(buffer => buffer.byteLength);
// Getting sum of numbers
var _totalBufferLength = _bufferLengths.reduce(function (a, b) {
return a + b;
}, 0);
var tmp = new Uint8Array(_totalBufferLength);
//Get buffer1 audio data to create the new combined wav
var audioData = getAudioData.WavHeader.readHeader(new DataView(bufferArray[0]));
var _bufferLength = 0;
$.each(bufferArray, function (index, buffer) {
//Combine array bytes of original wavs buffers.
tmp.set(new Uint8Array(buffer), _bufferLength);
_bufferLength+= buffer.byteLength;
});
//Send combined buffer and send audio data to create the audio data of combined
var arrBytesFinal = getWavBytes(tmp, {
isFloat: false, // floating point or 16-bit integer
numChannels: audioData.channels,
sampleRate: audioData.sampleRate,
});
//Create a Blob as Base64 Raw data with audio/wav type
return new Blob([arrBytesFinal], { type: 'audio/wav; codecs=MS_PCM' });
}
return null;
}
//Combine two audio .wav buffers.
function combineWavsBuffers1(buffer1, buffer2) {
//Combine array bytes of original wavs buffers
var tmp = new Uint8Array(buffer1.byteLength + buffer2.byteLength);
tmp.set(new Uint8Array(buffer1), 0);
tmp.set(new Uint8Array(buffer2), buffer1.byteLength);
//Get buffer1 audio data to create the new combined wav
var audioData = getAudioData.WavHeader.readHeader(new DataView(buffer1));
console.log('Audio Data: ', audioData);
//Send combined buffer and send audio data to create the audio data of combined
var arrBytesFinal = getWavBytes(tmp, {
isFloat: false, // floating point or 16-bit integer
numChannels: audioData.channels,
sampleRate: audioData.sampleRate,
});
//Create a Blob as Base64 Raw data with audio/wav type
return new Blob([arrBytesFinal], { type: 'audio/wav; codecs=MS_PCM' });
}
//Other functions //////////////////////////////////////////////////////////////
// Returns Uint8Array of WAV bytes
function getWavBytes(buffer, options) {
const type = options.isFloat ? Float32Array : Uint16Array
const numFrames = buffer.byteLength / type.BYTES_PER_ELEMENT
const headerBytes = getWavHeader(Object.assign({}, options, { numFrames }))
const wavBytes = new Uint8Array(headerBytes.length + buffer.byteLength);
// prepend header, then add pcmBytes
wavBytes.set(headerBytes, 0)
wavBytes.set(new Uint8Array(buffer), headerBytes.length)
return wavBytes
}
// adapted from https://gist.github.com/also/900023
// returns Uint8Array of WAV header bytes
function getWavHeader(options) {
const numFrames = options.numFrames
const numChannels = options.numChannels || 2
const sampleRate = options.sampleRate || 44100
const bytesPerSample = options.isFloat ? 4 : 2
const format = options.isFloat ? 3 : 1
const blockAlign = numChannels * bytesPerSample
const byteRate = sampleRate * blockAlign
const dataSize = numFrames * blockAlign
const buffer = new ArrayBuffer(44)
const dv = new DataView(buffer)
let p = 0
function writeString(s) {
for (let i = 0; i < s.length; i++) {
dv.setUint8(p + i, s.charCodeAt(i))
}
p += s.length
}
function writeUint32(d) {
dv.setUint32(p, d, true)
p += 4
}
function writeUint16(d) {
dv.setUint16(p, d, true)
p += 2
}
writeString('RIFF') // ChunkID
writeUint32(dataSize + 36) // ChunkSize
writeString('WAVE') // Format
writeString('fmt ') // Subchunk1ID
writeUint32(16) // Subchunk1Size
writeUint16(format) // AudioFormat
writeUint16(numChannels) // NumChannels
writeUint32(sampleRate) // SampleRate
writeUint32(byteRate) // ByteRate
writeUint16(blockAlign) // BlockAlign
writeUint16(bytesPerSample * 8) // BitsPerSample
writeString('data') // Subchunk2ID
writeUint32(dataSize) // Subchunk2Size
return new Uint8Array(buffer)
}
function getAudioData() {
function WavHeader() {
this.dataOffset = 0;
this.dataLen = 0;
this.channels = 0;
this.sampleRate = 0;
}
function fourccToInt(fourcc) {
return fourcc.charCodeAt(0) << 24 | fourcc.charCodeAt(1) << 16 | fourcc.charCodeAt(2) << 8 | fourcc.charCodeAt(3);
}
WavHeader.RIFF = fourccToInt("RIFF");
WavHeader.WAVE = fourccToInt("WAVE");
WavHeader.fmt_ = fourccToInt("fmt ");
WavHeader.data = fourccToInt("data");
WavHeader.readHeader = function (dataView) {
var w = new WavHeader();
var header = dataView.getUint32(0, false);
if (WavHeader.RIFF != header) {
return;
}
var fileLen = dataView.getUint32(4, true);
if (WavHeader.WAVE != dataView.getUint32(8, false)) {
return;
}
if (WavHeader.fmt_ != dataView.getUint32(12, false)) {
return;
}
var fmtLen = dataView.getUint32(16, true);
var pos = 16 + 4;
switch (fmtLen) {
case 16:
case 18:
w.channels = dataView.getUint16(pos + 2, true);
w.sampleRate = dataView.getUint32(pos + 4, true);
break;
default:
throw 'extended fmt chunk not implemented';
}
pos += fmtLen;
var data = WavHeader.data;
var len = 0;
while (data != header) {
header = dataView.getUint32(pos, false);
len = dataView.getUint32(pos + 4, true);
if (data == header) {
break;
}
pos += (len + 8);
}
w.dataLen = len;
w.dataOffset = pos + 8;
return w;
};
getAudioData.WavHeader = WavHeader;
}
getAudioData();
custom_script.js
getBufferFromBlobs(fullBlobArray).then(function (singleBlob) {
const url = window.URL.createObjectURL(singleBlob);
const a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
a.href = url;
a.download = "testing.wav";
a.click();
URL.revokeObjectURL(url);
a.remove();
});
Have the same problem, thank #Vikash to bring it here. I'm using ConcatenateBlobs.js to concat wav blobs and it seems only working on Chrome. Your solution is great but the source is a bit long, so I tried to fix ConcatenateBlobs.js base on the fact that file length in the header need to be fixed. Luckily, it works:
function ConcatenateBlobs(blobs, type, callback) {
var buffers = [];
var index = 0;
function readAsArrayBuffer() {
if (!blobs[index]) {
return concatenateBuffers();
}
var reader = new FileReader();
reader.onload = function(event) {
buffers.push(event.target.result);
index++;
readAsArrayBuffer();
};
reader.readAsArrayBuffer(blobs[index]);
}
readAsArrayBuffer();
function audioLengthTo32Bit(n) {
n = Math.floor(n);
var b1 = n & 255;
var b2 = (n >> 8) & 255;
var b3 = (n >> 16) & 255;
var b4 = (n >> 24) & 255;
return [b1, b2, b3, b4];
}
function concatenateBuffers() {
var byteLength = 0;
buffers.forEach(function(buffer) {
byteLength += buffer.byteLength;
});
var tmp = new Uint8Array(byteLength);
var lastOffset = 0;
var newData;
buffers.forEach(function(buffer) {
if (type=='audio/wav' && lastOffset > 0) newData = new Uint8Array(buffer, 44);
else newData = new Uint8Array(buffer);
tmp.set(newData, lastOffset);
lastOffset += newData.length;
});
if (type=='audio/wav') {
tmp.set(audioLengthTo32Bit(lastOffset - 8), 4);
tmp.set(audioLengthTo32Bit(lastOffset - 44), 40); // update audio length in the header
}
var blob = new Blob([tmp.buffer], {
type: type
});
callback(blob);
}
}
How could I merge or combine two audio.wav loaded from url into a new .wav and play it in the web browser and download it?
The HTML and JS code:
NOTE: To avoid cross-origin load wavs from your local or server or allow it in your .htaccess
<!DOCTYPE html>
<html>
<meta content="text/html;" http-equiv="content-type" charset="utf-8">
<head>
<script>
window.onload = function() {
//Original Wav
//https://www.wavsource.com/snds_2020-10-01_3728627494378403/video_games/pacman/pacman_intro.wav
var audioFileUrl1 = 'pacman_intro.wav'; //Atention to cross-origin** !!!
var audioFileUrl2 = 'pacman_intro.wav'; //I love PacMan intro :P
loadWav(audioFileUrl1, audioFileUrl2); //Load wavs from url
function loadWav(url1,url2){
var arrBytesWav1, arrBytesWav2;
fetch(url1)
.then(function(response) { return response.blob(); })
.then(function(blob) {
var reader = new FileReader();
reader.readAsArrayBuffer(blob);
reader.addEventListener("loadend", function() {
arrBytesWav1 = reader.result;
});
return fetch(url2); //Return the second url as a Promise.
})
//Second load
.then(function(response) { return response.blob(); })
.then(function(blob) {
var reader = new FileReader();
reader.readAsArrayBuffer(blob);
reader.addEventListener("loadend", function() {
arrBytesWav2 = reader.result;
combineWavsBuffers( arrBytesWav1, arrBytesWav2 ); //Combine original wav buffer and play
});
})
.catch( function(error) {
alert('Error wav loading: ' + error.message);
});
}
//Combine two audio .wav buffers and assign to audio control and play it.
function combineWavsBuffers(buffer1, buffer2) {
//Combine array bytes of original wavs buffers
var tmp = new Uint8Array(buffer1.byteLength + buffer2.byteLength);
tmp.set( new Uint8Array(buffer1), 0 );
tmp.set( new Uint8Array(buffer2), buffer1.byteLength );
//Get buffer1 audio data to create the new combined wav
var audioData = getAudioData.WavHeader.readHeader(new DataView(buffer1));
console.log('Audio Data: ', audioData);
//Send combined buffer and send audio data to create the audio data of combined
var arrBytesFinal = getWavBytes( tmp, {
isFloat: false, // floating point or 16-bit integer
numChannels: audioData.channels,
sampleRate: audioData.sampleRate,
})
//Create a Blob as Base64 Raw data with audio/wav type
var myBlob = new Blob( [arrBytesFinal] , { type : 'audio/wav; codecs=MS_PCM' });
var combineBase64Wav;
var readerBlob = new FileReader();
readerBlob.addEventListener("loadend", function() {
combineBase64Wav = readerBlob.result.toString();
//Assign to audiocontrol to play the new combined wav.
var audioControl = document.getElementById('audio');
audioControl.src = combineBase64Wav;
audioControl.play();
});
readerBlob.readAsDataURL(myBlob);
console.log( "Buffer1 Size: " + buffer1.byteLength );
console.log( "Buffer2 Size: " + buffer1.byteLength );
console.log( "Combined Size: " + arrBytesFinal.byteLength );
return combineBase64Wav;
}
//Other functions //////////////////////////////////////////////////////////////
// Returns Uint8Array of WAV bytes
function getWavBytes(buffer, options) {
const type = options.isFloat ? Float32Array : Uint16Array
const numFrames = buffer.byteLength / type.BYTES_PER_ELEMENT
const headerBytes = getWavHeader(Object.assign({}, options, { numFrames }))
const wavBytes = new Uint8Array(headerBytes.length + buffer.byteLength);
// prepend header, then add pcmBytes
wavBytes.set(headerBytes, 0)
wavBytes.set(new Uint8Array(buffer), headerBytes.length)
return wavBytes
}
// adapted from https://gist.github.com/also/900023
// returns Uint8Array of WAV header bytes
function getWavHeader(options) {
const numFrames = options.numFrames
const numChannels = options.numChannels || 2
const sampleRate = options.sampleRate || 44100
const bytesPerSample = options.isFloat? 4 : 2
const format = options.isFloat? 3 : 1
const blockAlign = numChannels * bytesPerSample
const byteRate = sampleRate * blockAlign
const dataSize = numFrames * blockAlign
const buffer = new ArrayBuffer(44)
const dv = new DataView(buffer)
let p = 0
function writeString(s) {
for (let i = 0; i < s.length; i++) {
dv.setUint8(p + i, s.charCodeAt(i))
}
p += s.length
}
function writeUint32(d) {
dv.setUint32(p, d, true)
p += 4
}
function writeUint16(d) {
dv.setUint16(p, d, true)
p += 2
}
writeString('RIFF') // ChunkID
writeUint32(dataSize + 36) // ChunkSize
writeString('WAVE') // Format
writeString('fmt ') // Subchunk1ID
writeUint32(16) // Subchunk1Size
writeUint16(format) // AudioFormat
writeUint16(numChannels) // NumChannels
writeUint32(sampleRate) // SampleRate
writeUint32(byteRate) // ByteRate
writeUint16(blockAlign) // BlockAlign
writeUint16(bytesPerSample * 8) // BitsPerSample
writeString('data') // Subchunk2ID
writeUint32(dataSize) // Subchunk2Size
return new Uint8Array(buffer)
}
function getAudioData(){
function WavHeader() {
this.dataOffset = 0;
this.dataLen = 0;
this.channels = 0;
this.sampleRate = 0;
}
function fourccToInt(fourcc) {
return fourcc.charCodeAt(0) << 24 | fourcc.charCodeAt(1) << 16 | fourcc.charCodeAt(2) << 8 | fourcc.charCodeAt(3);
}
WavHeader.RIFF = fourccToInt("RIFF");
WavHeader.WAVE = fourccToInt("WAVE");
WavHeader.fmt_ = fourccToInt("fmt ");
WavHeader.data = fourccToInt("data");
WavHeader.readHeader = function (dataView) {
var w = new WavHeader();
var header = dataView.getUint32(0, false);
if (WavHeader.RIFF != header) {
return;
}
var fileLen = dataView.getUint32(4, true);
if (WavHeader.WAVE != dataView.getUint32(8, false)) {
return;
}
if (WavHeader.fmt_ != dataView.getUint32(12, false)) {
return;
}
var fmtLen = dataView.getUint32(16, true);
var pos = 16 + 4;
switch (fmtLen) {
case 16:
case 18:
w.channels = dataView.getUint16(pos + 2, true);
w.sampleRate = dataView.getUint32(pos + 4, true);
break;
default:
throw 'extended fmt chunk not implemented';
}
pos += fmtLen;
var data = WavHeader.data;
var len = 0;
while (data != header) {
header = dataView.getUint32(pos, false);
len = dataView.getUint32(pos + 4, true);
if (data == header) {
break;
}
pos += (len + 8);
}
w.dataLen = len;
w.dataOffset = pos + 8;
return w;
};
getAudioData.WavHeader = WavHeader;
}
getAudioData();
};//Window onLoad
</script>
</head>
<body>
<!--The audio control HTML element -->
<audio id="audio" src="" controls></audio>
</body>
</html>
I use the sdk.connection methods to capture audio from the speech to text recognizer. It creates PCM audio that I want to convert into MP3.
This is how connection is initialised:
const con = SpeechSDK.Connection.fromRecognizer(this.recognizer);
con.messageSent = args => {
// Only record outbound audio mesages that have data in them.
if (
args.message.path === "audio" &&
args.message.isBinaryMessage &&
args.message.binaryMessage !== null
) {
this.wavFragments[this.wavFragmentCount++] =
args.message.binaryMessage;
}
};
and this is the wav file build:
let byteCount = 0;
for (let i = 0; i < this.wavFragmentCount; i++) {
byteCount += this.wavFragments[i].byteLength;
}
// Output array.
const sentAudio = new Uint8Array(byteCount);
byteCount = 0;
for (let i = 0; i < this.wavFragmentCount; i++) {
sentAudio.set(new Uint8Array(this.wavFragments[i]), byteCount);
byteCount += this.wavFragments[i].byteLength;
} // Write the audio back to disk.
// Set the file size in the wave header:
const view = new DataView(sentAudio.buffer);
view.setUint32(4, byteCount, true);
view.setUint32(40, byteCount, true);
I tried using lamejs to convert 'sentAudio' into MP3.
import {lamejs} from "../../modules/lame.min.js";
const wavBlob = new Blob([sentAudio]);
const reader = new FileReader();
reader.onload = evt => {
const audioData = evt.target.result;
const wav = lamejs.WavHeader.readHeader(new DataView(audioData));
const mp3enc = new lamejs.Mp3Encoder(1, wav.sampleRate, 128);
const samples = new Int8Array(audioData, wav.dataOffset, wav.dataLen / 2);
let mp3Tmp = mp3enc.encodeBuffer(samples); // encode mp3
// Push encode buffer to mp3Data variable
const mp3Data = [];
mp3Data.push(mp3Tmp);
// Get end part of mp3
mp3Tmp = mp3enc.flush();
// Write last data to the output data, too
// mp3Data contains now the complete mp3Data
mp3Data.push(mp3Tmp);
const blob = new Blob(mp3Data, { type: "audio/mp3" });
this.createDownloadLink(blob, "mp3");
};
reader.readAsArrayBuffer(wavBlob);
MP3 Blob is empty or contains inaudible sounds.
I have also tried using the 'encodeMP3' method described in this example but it gives the same output.
Any existing solutions to support this mp3 conversion ?
Regarding the issue, please refer to the following code.
let byteCount = 0;
for (let i= 0; i < wavFragmentCount; i++) {
byteCount += wavFragments[i].byteLength;
}
// Output array.
const sentAudio: Uint8Array = new Uint8Array(byteCount);
byteCount = 0;
for (let i: number = 0; i < wavFragmentCount; i++) {
sentAudio.set(new Uint8Array(wavFragments[i]), byteCount);
byteCount += wavFragments[i].byteLength;
}
// create wav file blob
const view = new DataView(sentAudio.buffer);
view.setUint32(4, byteCount, true);
view.setUint32(40, byteCount, true);
let wav = new Blob([view], { type: 'audio/wav' });
// read wave file as base64
var reader = new FileReader();
reader.readAsDataURL(wav);
reader.onload = () => {
var base64String = reader.result.toString();
base64String = base64String.split(',')[1];
// convert to buffer
var binary_string = window.atob(base64String);
var len = binary_string.length;
var bytes = new Uint8Array(len);
for (var i = 0; i < len; i++) {
bytes[i] = binary_string.charCodeAt(i);
}
// convert to mp3 with lamejs
var wavHdr = lamejs.WavHeader.readHeader(
new DataView(bytes.buffer)
);
console.log(wavHdr);
var wavSamples = new Int16Array(
bytes.buffer,
0,
wavHdr.dataLen / 2
);
let mp3 = this.wavToMp3(
wavHdr.channels,
wavHdr.sampleRate,
wavSamples
);
reader.readAsDataURL(mp3);
reader.onload = () => {
var base64String = reader.result;
console.log(base64String);
};
};
function wavToMp3(channels, sampleRate, samples) {
console.log(channels);
console.log(sampleRate);
var buffer = [];
var mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128);
var remaining = samples.length;
var maxSamples = 1152;
for (var i = 0; remaining >= maxSamples; i += maxSamples) {
var mono = samples.subarray(i, i + maxSamples);
var mp3buf = mp3enc.encodeBuffer(mono);
if (mp3buf.length > 0) {
buffer.push(new Int8Array(mp3buf));
}
remaining -= maxSamples;
}
var d = mp3enc.flush();
if (d.length > 0) {
buffer.push(new Int8Array(d));
}
console.log('done encoding, size=', buffer.length);
var blob = new Blob(buffer, { type: 'audio/mp3' });
var bUrl = window.URL.createObjectURL(blob);
console.log('Blob created, URL:', bUrl);
return blob;
}
I am using MediaRecorder in ReactJS to record audio from the microphone and storing into the blob with MIME type "audio/mp3". I want to convert this blob to MP3 and upload it in S3 bucket.
I am able to convert it into WAV by using audioContext, decodeAudioData and audioBufferToWav functions, but the size of the WAV is very large. Since the MP3 file has relatively very small in size so I want it to convert my blob to MP3. Any help?
My code for recording and converting to wav:
getUserMedia({ audio: true })
.then(stream => {
this.stream = stream;
const mimeType = 'audio/mp3';
this.mediaRecorder = new MediaRecorder(stream);
this.mediaRecorder.start();
const audioChunks = [];
this.mediaRecorder.addEventListener('dataavailable', event => {
audioChunks.push(event.data);
});
this.mediaRecorder.addEventListener('stop', () => {
const audioBlob = new Blob(audioChunks, {
type: mimeType});
});
}).catch(error => { });
Converting above created blob to WAV:
const reader = new window.FileReader();
reader.readAsDataURL(audioBlob);
reader.onloadend = () => {
let base64 = reader.result + '';
base64 = base64.split(',')[1];
const ab = new ArrayBuffer(base64.length);
const buff = new Buffer.from(base64, 'base64');
const view = new Uint8Array(ab);
for (let i = 0; i < buff.length; ++i) {
view[i] = buff[i];
}
const context = new AudioContext();
context.decodeAudioData(ab, (buffer) => {
const wavFile = toWav(buffer);
}
I am storing wavFile into the S3. I want MP3, please help?
I'm not using the ReactJS MediaRecorder nor do I exactly follow what's going on in your specific example, but I have a solution for converting an AudioBuffer to an mp3, way of wave.
The first function is based on russellgood.com/how-to-convert-audiobuffer-to-audio-file. The second is based on lamejs.
First, convert the AudioBuffer to a wave blob
function audioBufferToWav(aBuffer) {
let numOfChan = aBuffer.numberOfChannels,
btwLength = aBuffer.length * numOfChan * 2 + 44,
btwArrBuff = new ArrayBuffer(btwLength),
btwView = new DataView(btwArrBuff),
btwChnls = [],
btwIndex,
btwSample,
btwOffset = 0,
btwPos = 0;
setUint32(0x46464952); // "RIFF"
setUint32(btwLength - 8); // file length - 8
setUint32(0x45564157); // "WAVE"
setUint32(0x20746d66); // "fmt " chunk
setUint32(16); // length = 16
setUint16(1); // PCM (uncompressed)
setUint16(numOfChan);
setUint32(aBuffer.sampleRate);
setUint32(aBuffer.sampleRate * 2 * numOfChan); // avg. bytes/sec
setUint16(numOfChan * 2); // block-align
setUint16(16); // 16-bit
setUint32(0x61746164); // "data" - chunk
setUint32(btwLength - btwPos - 4); // chunk length
for (btwIndex = 0; btwIndex < aBuffer.numberOfChannels; btwIndex++)
btwChnls.push(aBuffer.getChannelData(btwIndex));
while (btwPos < btwLength) {
for (btwIndex = 0; btwIndex < numOfChan; btwIndex++) {
// interleave btwChnls
btwSample = Math.max(-1, Math.min(1, btwChnls[btwIndex][btwOffset])); // clamp
btwSample = (0.5 + btwSample < 0 ? btwSample * 32768 : btwSample * 32767) | 0; // scale to 16-bit signed int
btwView.setInt16(btwPos, btwSample, true); // write 16-bit sample
btwPos += 2;
}
btwOffset++; // next source sample
}
let wavHdr = lamejs.WavHeader.readHeader(new DataView(btwArrBuff));
let wavSamples = new Int16Array(btwArrBuff, wavHdr.dataOffset, wavHdr.dataLen / 2);
wavToMp3(wavHdr.channels, wavHdr.sampleRate, wavSamples);
function setUint16(data) {
btwView.setUint16(btwPos, data, true);
btwPos += 2;
}
function setUint32(data) {
btwView.setUint32(btwPos, data, true);
btwPos += 4;
}
}
Second, convert the wave to an mp3
function wavToMp3(channels, sampleRate, samples) {
var buffer = [];
var mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128);
var remaining = samples.length;
var samplesPerFrame = 1152;
for (var i = 0; remaining >= samplesPerFrame; i += samplesPerFrame) {
var mono = samples.subarray(i, i + samplesPerFrame);
var mp3buf = mp3enc.encodeBuffer(mono);
if (mp3buf.length > 0) {
buffer.push(new Int8Array(mp3buf));
}
remaining -= samplesPerFrame;
}
var d = mp3enc.flush();
if(d.length > 0){
buffer.push(new Int8Array(d));
}
var mp3Blob = new Blob(buffer, {type: 'audio/mp3'});
var bUrl = window.URL.createObjectURL(mp3Blob);
// send the download link to the console
console.log('mp3 download:', bUrl);
}
Hope this helps!
This is how I combined MediaRecorder and Format converter based on great CuriousChad answer. Just needed to consider MP3 encoder as Stereo to work.
First, set AudioFormat equals to 'WEBM' (Chrome), 'MP3', or 'WAV':
this.mediaRecorder.onstop = (e) => {
if (AudioFormat === "MP3" || AudioFormat === "WAV") {
var data = this.chunks[0];
var blob = new Blob(this.chunks, { type: "video/webm" });
const audioContext = new AudioContext();
const fileReader = new FileReader();
// Set up file reader on loaded end event
fileReader.onloadend = () => {
const arrayBuffer = fileReader.result; // as ArrayBuffer;
// Convert array buffer into audio buffer
audioContext.decodeAudioData(arrayBuffer, (audioBuffer) => {
// Do something with audioBuffer
console.log(audioBuffer);
var MP3Blob = audioBufferToWav(audioBuffer);
onStop(MP3Blob, audioBuffer);
});
};
//Load blob
fileReader.readAsArrayBuffer(blob);
} else {
var data = this.chunks[0];
var blob = new Blob(this.chunks, { type: "audio/mpeg" });
onStop(blob, data);
}
this.chunks = [];
};
Second, convert Buffer to Wav:
function audioBufferToWav(aBuffer) {
let numOfChan = aBuffer.numberOfChannels,
btwLength = aBuffer.length * numOfChan * 2 + 44,
btwArrBuff = new ArrayBuffer(btwLength),
btwView = new DataView(btwArrBuff),
btwChnls = [],
btwIndex,
btwSample,
btwOffset = 0,
btwPos = 0;
setUint32(0x46464952); // "RIFF"
setUint32(btwLength - 8); // file length - 8
setUint32(0x45564157); // "WAVE"
setUint32(0x20746d66); // "fmt " chunk
setUint32(16); // length = 16
setUint16(1); // PCM (uncompressed)
setUint16(numOfChan);
setUint32(aBuffer.sampleRate);
setUint32(aBuffer.sampleRate * 2 * numOfChan); // avg. bytes/sec
setUint16(numOfChan * 2); // block-align
setUint16(16); // 16-bit
setUint32(0x61746164); // "data" - chunk
setUint32(btwLength - btwPos - 4); // chunk length
for (btwIndex = 0; btwIndex < aBuffer.numberOfChannels; btwIndex++)
btwChnls.push(aBuffer.getChannelData(btwIndex));
while (btwPos < btwLength) {
for (btwIndex = 0; btwIndex < numOfChan; btwIndex++) {
// interleave btwChnls
btwSample = Math.max(-1, Math.min(1, btwChnls[btwIndex][btwOffset])); // clamp
btwSample =
(0.5 + btwSample < 0 ? btwSample * 32768 : btwSample * 32767) | 0; // scale to 16-bit signed int
btwView.setInt16(btwPos, btwSample, true); // write 16-bit sample
btwPos += 2;
}
btwOffset++; // next source sample
}
let wavHdr = lamejs.WavHeader.readHeader(new DataView(btwArrBuff));
//Stereo
let data = new Int16Array(btwArrBuff, wavHdr.dataOffset, wavHdr.dataLen / 2);
let leftData = [];
let rightData = [];
for (let i = 0; i < data.length; i += 2) {
leftData.push(data[i]);
rightData.push(data[i + 1]);
}
var left = new Int16Array(leftData);
var right = new Int16Array(rightData);
if (AudioFormat === "MP3") {
//STEREO
if (wavHdr.channels === 2)
return wavToMp3Stereo(
wavHdr.channels,
wavHdr.sampleRate,
left,
right,
);
//MONO
else if (wavHdr.channels === 1)
return wavToMp3(wavHdr.channels, wavHdr.sampleRate, data);
} else return new Blob([btwArrBuff], { type: "audio/wav" });
function setUint16(data) {
btwView.setUint16(btwPos, data, true);
btwPos += 2;
}
function setUint32(data) {
btwView.setUint32(btwPos, data, true);
btwPos += 4;
}
}
Third, convert WAV to MP3:
I had to switch to Stereo as I have 2 channels for MP3Encoder left & right.
function wavToMp3(channels, sampleRate, left, right = null) {
var buffer = [];
var mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128);
var remaining = left.length;
var samplesPerFrame = 1152;
for (var i = 0; remaining >= samplesPerFrame; i += samplesPerFrame) {
if (!right) {
var mono = left.subarray(i, i + samplesPerFrame);
var mp3buf = mp3enc.encodeBuffer(mono);
} else {
var leftChunk = left.subarray(i, i + samplesPerFrame);
var rightChunk = right.subarray(i, i + samplesPerFrame);
var mp3buf = mp3enc.encodeBuffer(leftChunk, rightChunk);
}
if (mp3buf.length > 0) {
buffer.push(mp3buf); //new Int8Array(mp3buf));
}
remaining -= samplesPerFrame;
}
var d = mp3enc.flush();
if (d.length > 0) {
buffer.push(new Int8Array(d));
}
var mp3Blob = new Blob(buffer, { type: "audio/mp3" });
//var bUrl = window.URL.createObjectURL(mp3Blob);
// send the download link to the console
//console.log('mp3 download:', bUrl);
return mp3Blob;
}
I'm building an HTML5 voice recording software with visualizer.I want the user when recording the voice, and after uploading the file as wave in a blob (server-side), the user should be able to select the audio format of that file using ffmpeg. what I Have achieved so far is uploading the file as wave.what I still want to do is:
On the server
side pick your preferable web programming framework
The web programming framework accepts the upload and stores the file on the server
The web programming framework runs a ffmpeg (command line) which processes the file
The user can download the processed file
here is my code so far:
// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({
audio: true
}, success, function (e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function getVal(value) {
// if R is pressed, we start recording
if (value == "record") {
recording = true;
// reset the buffers for the new recording
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
document.getElementById('output').innerHTML = "Recording now...";
// if S is pressed, we stop the recording and package the WAV file
} else if (value == "stop") {
// we stop recording
recording = false;
document.getElementById('output').innerHTML = "Building wav file...";
// we flat the left and right channels down
var leftBuffer = mergeBuffers(leftchannel, recordingLength);
var rightBuffer = mergeBuffers(rightchannel, recordingLength);
// we interleave both channels together
var interleaved = interleave(leftBuffer, rightBuffer);
var buffer = new ArrayBuffer(44 + interleaved.length * 2);
var view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
// FMT sub-chunk
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
// stereo (2 channels)
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
// data sub-chunk
writeUTFBytes(view, 36, 'data');
view.setUint32(40, interleaved.length * 2, true);
var lng = interleaved.length;
var index = 44;
var volume = 1;
for (var i = 0; i < lng; i++) {
view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
index += 2;
}
var blob = new Blob([view], {
type: 'audio/wav'
});
// let's save it locally
document.getElementById('output').innerHTML = 'Handing off the file now...';
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var li = document.createElement('li');
var au = document.createElement('audio');
var hf = document.createElement('a');
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(hf);
recordingList.appendChild(li);
}
}
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
volume = context.createGain();
// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);
// connect the stream(source) to the gain node
source.connect(volume);
var bufferSize = 2048;
recorder = context.createScriptProcessor(bufferSize, 2, 2);
//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;
splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function (e) {
if (!recording) return;
var left = e.inputBuffer.getChannelData(0);
var right = e.inputBuffer.getChannelData(1);
leftchannel.push(new Float32Array(left));
rightchannel.push(new Float32Array(right));
recordingLength += bufferSize;
// get the average for the first channel
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var c = document.getElementById("myCanvas");
var ctx = c.getContext("2d");
// clear the current state
ctx.clearRect(0, 0, 1000, 325);
var gradient = ctx.createLinearGradient(0, 0, 0, 300);
gradient.addColorStop(1, '#000000');
gradient.addColorStop(0.75, '#ff0000');
gradient.addColorStop(0.25, '#ffff00');
gradient.addColorStop(0, '#ffffff');
// set the fill style
ctx.fillStyle = gradient;
drawSpectrum(array);
function drawSpectrum(array) {
for (var i = 0; i < (array.length); i++) {
var value = array[i];
ctx.fillRect(i * 5, 325 - value, 3, 325);
}
}
}
function getAverageVolume(array) {
var values = 0;
var average;
var length = array.length;
// get all the frequency amplitudes
for (var i = 0; i < length; i++) {
values += array[i];
}
average = values / length;
return average;
}
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
}
function mergeBuffers(channelBuffer, recordingLength) {
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++) {
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel) {
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length;) {
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string) {
var lng = string.length;
for (var i = 0; i < lng; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}