I am developing a web app in which i want to use recorder.js file to implement audio recording. I have this code but i am unable to use in web2py framework. Can you give me detail info how to use it?
There is a .bower.json file which is getting hidden on uploading in web2py. how can we use this and link all the files so that recording can be done?
This is my .bower.json script:
{
"name": "Recorderjs",
"homepage": "https://github.com/mattdiamond/Recorderjs",
"_release": "f814ac7b3f",
"_resolution": {
"type": "branch",
"branch": "master",
"commit": "f814ac7b3f4ed4f62729860d5a02720f167480b3"
},
"_source": "git://github.com/mattdiamond/Recorderjs.git",
"_target": "*",
"_originalSource": "Recorderjs",
"_direct": true
}
This is my html code:
<body>
<script src="bower_components/Recorderjs/recorder.js">
</script>
<script src="app.js"></script>
<button onclick="record()">Record</button>
<button onclick="stop()">Stop</button>
These are my javascripts: /app.js
var navigator = window.navigator;
var Context = window.AudioContext || window.webkitAudioContext;
var context = new Context();
// audio
var mediaStream;
var rec;
navigator.getUserMedia = (
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia
);
function record() {
navigator.getUserMedia({audio: true}, function(localMediaStream){
mediaStream = localMediaStream;
var mediaStreamSource = context.createMediaStreamSource(localMediaStream);
rec = new Recorder(mediaStreamSource, {
workerPath: '/bower_components/Recorderjs/recorderWorker.js'
});
rec.record();
}, function(err){
console.log('Not supported');
});
}
function stop() {
mediaStream.stop();
rec.stop();
rec.exportWAV(function(e){
rec.clear();
Recorder.forceDownload(e, "test.wav");
});
}
/recorder.js
(function(window){
var WORKER_PATH = 'recorderWorker.js';
var Recorder = function(source, cfg){
var config = cfg || {};
var bufferLen = config.bufferLen || 4096;
this.context = source.context;
this.node = (this.context.createScriptProcessor ||
this.context.createJavaScriptNode).call(this.context,
bufferLen, 2, 2);
var worker = new Worker(config.workerPath || WORKER_PATH);
worker.postMessage({
command: 'init',
config: {
sampleRate: this.context.sampleRate
}
});
var recording = false,
currCallback;
this.node.onaudioprocess = function(e){
if (!recording) return;
worker.postMessage({
command: 'record',
buffer: [
e.inputBuffer.getChannelData(0),
e.inputBuffer.getChannelData(1)
]
});
}
this.configure = function(cfg){
for (var prop in cfg){
if (cfg.hasOwnProperty(prop)){
config[prop] = cfg[prop];
}
}
}
this.record = function(){
recording = true;
}
this.stop = function(){
recording = false;
}
this.clear = function(){
worker.postMessage({ command: 'clear' });
}
this.getBuffer = function(cb) {
currCallback = cb || config.callback;
worker.postMessage({ command: 'getBuffer' })
}
recorderWorker.js:
var recLength = 0,
recBuffersL = [],
recBuffersR = [],
sampleRate;
this.onmessage = function(e){
switch(e.data.command){
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config){
sampleRate = config.sampleRate;
}
function record(inputBuffer){
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength += inputBuffer[0].length;
}
function exportWAV(type){
var bufferL = mergeBuffers(recBuffersL, recLength);
var bufferR = mergeBuffers(recBuffersR, recLength);
var interleaved = interleave(bufferL, bufferR);
var dataview = encodeWAV(interleaved);
var audioBlob = new Blob([dataview], { type: type });
this.postMessage(audioBlob);
}
function getBuffer() {
var buffers = [];
buffers.push( mergeBuffers(recBuffersL, recLength) );
buffers.push( mergeBuffers(recBuffersR, recLength) );
this.postMessage(buffers);
}
function clear(){
recLength = 0;
recBuffersL = [];
recBuffersR = [];
}
function mergeBuffers(recBuffers, recLength){
var result = new Float32Array(recLength);
var offset = 0;
for (var i = 0; i < recBuffers.length; i++){
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR){
var length = inputL.length + inputR.length;
var result = new Float32Array(length);
var index = 0,
inputIndex = 0;
while (index < length){
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input){
for (var i = 0; i < input.length; i++, offset+=2){
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string){
for (var i = 0; i < string.length; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples){
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);
/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* file length */
view.setUint32(4, 32 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, 2, true);
/* sample rate */
view.setUint32(24, sampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, 4, true);
/* bits per sample */
view.setUint16(34, 16, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
I don't know if this is the only problem, but the following script source references are relative URLs:
<script src="bower_components/Recorderjs/recorder.js"></script>
<script src="app.js"></script>
Because the URLs are not preceded with a /, they will be appended to the URL of the current page. So, if you are at /myapp/default/index, the first URL will reference /myapp/default/index/bower_components/Recorderjs/recorder.js, which will not be correct.
You should keep your Javascript files in the /web2py/applications/myapp/static folder. In that case, it is best to use the web2py URL() function to generate proper URLs:
<script src="{{=URL('static', 'js/bower_components/Recorderjs/recorder.js')}}"></script>
<script src="{{=URL('static', 'js/app.js')}}"></script>
Regarding internal URLs, assuming you move everything into the /myapp/static/js folder, a URL like:
/bower_components/Recorderjs/recorderWorker.js
would change to:
/myapp/static/js/bower_components/Recorderjs/recorderWorker.js
Relative internal URLs should continue to work without alteration.
Related
I have the recorder.js which will record the audio and takes buffer inputs but I want to downsample the audio buffers but I am lot confused where to call it though I have written it. Please check my function and if possible please suggest where to call it.
import InlineWorker from 'inline-worker';
export class Recorder {
config = {
bufferLen: 4096,
numChannels: 2,
mimeType: 'audio/mp3'
};
recording = false;
callbacks = {
getBuffer: [],
exportWAV: []
};
constructor(source, cfg) {
Object.assign(this.config, cfg);
this.context = source.context;
this.node = (this.context.createScriptProcessor ||
this.context.createJavaScriptNode).call(this.context,
this.config.bufferLen, this.config.numChannels, this.config.numChannels);
this.node.onaudioprocess = (e) => {
if (!this.recording) return;
var buffer = [];
for (var channel = 0; channel < this.config.numChannels; channel++) {
buffer.push(e.inputBuffer.getChannelData(channel));
}
this.worker.postMessage({
command: 'record',
buffer: buffer
});
};
source.connect(this.node);
this.node.connect(this.context.destination); //this should not be necessary
let self = {};
this.worker = new InlineWorker(function () {
let recLength = 0,
recBuffers = [],
sampleRate,
numChannels;
this.onmessage = function (e) {
switch (e.data.command) {
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config) {
sampleRate = config.sampleRate;
numChannels = config.numChannels;
initBuffers();
}
function record(inputBuffer) {
for (var channel = 0; channel < numChannels; channel++) {
recBuffers[channel].push(inputBuffer[channel]);
}
recLength += inputBuffer[0].length;
}
function exportWAV(type) {
let buffers = [];
for (let channel = 0; channel < numChannels; channel++) {
buffers.push(mergeBuffers(recBuffers[channel], recLength));
}
let interleaved;
if (numChannels === 2) {
interleaved = interleave(downsampleBuffer(buffers[0]), downsampleBuffer(buffers[1]));
} else {
interleaved = buffers[0];
}
let dataview = encodeWAV(interleaved);
let audioBlob = new Blob([dataview], {type: type});
this.postMessage({command: 'exportWAV', data: audioBlob});
}
function downsampleBuffer(buffer) {
if (16000 === sampleRate) {
return buffer;
}
var sampleRateRatio = sampleRate / 16000;
var newLength = Math.round(buffer.length / sampleRateRatio);
var result = new Float32Array(newLength);
var offsetResult = 0;
var offsetBuffer = 0;
while (offsetResult < result.length) {
var nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
var accum = 0,
count = 0;
for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
accum += buffer[i];
count++;
}
result[offsetResult] = accum / count;
offsetResult++;
offsetBuffer = nextOffsetBuffer;
}
return result;
}
function getBuffer() {
let buffers = [];
for (let channel = 0; channel < numChannels; channel++) {
buffers.push(mergeBuffers(recBuffers[channel], recLength));
}
this.postMessage({command: 'getBuffer', data: buffers});
}
function clear() {
recLength = 0;
recBuffers = [];
initBuffers();
}
function initBuffers() {
for (let channel = 0; channel < numChannels; channel++) {
recBuffers[channel] = [];
}
}
function mergeBuffers(recBuffers, recLength) {
let result = new Float32Array(recLength);
let offset = 0;
for (let i = 0; i < recBuffers.length; i++) {
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR) {
let length = inputL.length + inputR.length;
let result = new Float32Array(length);
let index = 0,
inputIndex = 0;
while (index < length) {
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input) {
for (let i = 0; i < input.length; i++, offset += 2) {
let s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string) {
for (let i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples) {
let buffer = new ArrayBuffer(44 + samples.length * 2);
let view = new DataView(buffer);
/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* RIFF chunk length */
view.setUint32(4, 36 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, numChannels, true);
/* sample rate */
view.setUint32(24, sampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, numChannels * 2, true);
/* bits per sample */
view.setUint16(34, 16, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
}, self);
this.worker.postMessage({
command: 'init',
config: {
sampleRate: this.context.sampleRate,
numChannels: this.config.numChannels
}
});
this.worker.onmessage = (e) => {
let cb = this.callbacks[e.data.command].pop();
if (typeof cb == 'function') {
cb(e.data.data);
}
};
}
record() {
this.recording = true;
}
stop() {
this.recording = false;
}
clear() {
this.worker.postMessage({command: 'clear'});
}
getBuffer(cb) {
cb = cb || this.config.callback;
if (!cb) throw new Error('Callback not set');
this.callbacks.getBuffer.push(cb);
this.worker.postMessage({command: 'getBuffer'});
}
exportWAV(cb, mimeType) {
mimeType = mimeType || this.config.mimeType;
cb = cb || this.config.callback;
if (!cb) throw new Error('Callback not set');
this.callbacks.exportWAV.push(cb);
this.worker.postMessage({
command: 'exportWAV',
type: mimeType
});
}
static
forceDownload(blob, filename) {
let url = (window.URL || window.webkitURL).createObjectURL(blob);
let link = window.document.createElement('a');
link.href = url;
link.download = filename || 'output.wav';
let click = document.createEvent("Event");
click.initEvent("click", true, true);
link.dispatchEvent(click);
}
}
export default Recorder;
It's a code which I took from github repository but the sample rate is 48000
File after downsampling left and right buffers is uploaded here
Calling from my component class
recorder && recorder.exportWAV(function(blob) {
var formData=new FormData();
formData.append("event_name",fileName);
formData.append("file",new File([blob], fileName, {type: 'audio/mpeg;', lastModified: Date.now()}));
formData.append("fileExtension", "mp3");
fetch('http://localhost:6020/uploadFile', {
method: "POST", // *GET, POST, PUT, DELETE, etc.
cache: "no-cache", // *default, no-cache, reload, force-cache, only-if-cached
redirect: "follow", // manual, *follow, error
referrer: "no-referrer", // no-referrer, *client
body: formData, // body data type must match "Content-Type" header
})
.then(response => response.json())
.then(function(data){ console.log( JSON.stringify( data ) ) });
},"audio/mp3");
I downsampled my audio clips (wav files) successfully using recorder.js library referring the solution given by ilikerei in this thread.
Add this method to recorder.js to resample the buffer to change sample rate, before calling encodeWav() method.
function downsampleBuffer(buffer, rate) {
if (rate == sampleRate) {
return buffer;
}
if (rate > sampleRate) {
throw "downsampling rate show be smaller than original sample rate";
}
var sampleRateRatio = sampleRate / rate;
var newLength = Math.round(buffer.length / sampleRateRatio);
var result = new Float32Array(newLength);
var offsetResult = 0;
var offsetBuffer = 0;
while (offsetResult < result.length) {
var nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
// Use average value of skipped samples
var accum = 0, count = 0;
for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
accum += buffer[i];
count++;
}
result[offsetResult] = accum / count;
// Or you can simply get rid of the skipped samples:
// result[offsetResult] = buffer[nextOffsetBuffer];
offsetResult++;
offsetBuffer = nextOffsetBuffer;
}
return result;
}
Then call encodeWav with the new sample buffer.
var downsampledBuffer = downsampleBuffer(interleaved, targetRate);
var dataview = encodeWAV(downsampledBuffer);
Now use the new sample rate for encoding.
/* sample rate */
view.setUint32(24, newRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, newRate * 4, true);
I'm building an HTML5 voice recording software with visualizer.I want the user when recording the voice, and after uploading the file as wave in a blob (server-side), the user should be able to select the audio format of that file using ffmpeg. what I Have achieved so far is uploading the file as wave.what I still want to do is:
On the server
side pick your preferable web programming framework
The web programming framework accepts the upload and stores the file on the server
The web programming framework runs a ffmpeg (command line) which processes the file
The user can download the processed file
here is my code so far:
// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({
audio: true
}, success, function (e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function getVal(value) {
// if R is pressed, we start recording
if (value == "record") {
recording = true;
// reset the buffers for the new recording
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
document.getElementById('output').innerHTML = "Recording now...";
// if S is pressed, we stop the recording and package the WAV file
} else if (value == "stop") {
// we stop recording
recording = false;
document.getElementById('output').innerHTML = "Building wav file...";
// we flat the left and right channels down
var leftBuffer = mergeBuffers(leftchannel, recordingLength);
var rightBuffer = mergeBuffers(rightchannel, recordingLength);
// we interleave both channels together
var interleaved = interleave(leftBuffer, rightBuffer);
var buffer = new ArrayBuffer(44 + interleaved.length * 2);
var view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
// FMT sub-chunk
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
// stereo (2 channels)
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
// data sub-chunk
writeUTFBytes(view, 36, 'data');
view.setUint32(40, interleaved.length * 2, true);
var lng = interleaved.length;
var index = 44;
var volume = 1;
for (var i = 0; i < lng; i++) {
view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
index += 2;
}
var blob = new Blob([view], {
type: 'audio/wav'
});
// let's save it locally
document.getElementById('output').innerHTML = 'Handing off the file now...';
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var li = document.createElement('li');
var au = document.createElement('audio');
var hf = document.createElement('a');
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(hf);
recordingList.appendChild(li);
}
}
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
volume = context.createGain();
// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);
// connect the stream(source) to the gain node
source.connect(volume);
var bufferSize = 2048;
recorder = context.createScriptProcessor(bufferSize, 2, 2);
//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;
splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function (e) {
if (!recording) return;
var left = e.inputBuffer.getChannelData(0);
var right = e.inputBuffer.getChannelData(1);
leftchannel.push(new Float32Array(left));
rightchannel.push(new Float32Array(right));
recordingLength += bufferSize;
// get the average for the first channel
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var c = document.getElementById("myCanvas");
var ctx = c.getContext("2d");
// clear the current state
ctx.clearRect(0, 0, 1000, 325);
var gradient = ctx.createLinearGradient(0, 0, 0, 300);
gradient.addColorStop(1, '#000000');
gradient.addColorStop(0.75, '#ff0000');
gradient.addColorStop(0.25, '#ffff00');
gradient.addColorStop(0, '#ffffff');
// set the fill style
ctx.fillStyle = gradient;
drawSpectrum(array);
function drawSpectrum(array) {
for (var i = 0; i < (array.length); i++) {
var value = array[i];
ctx.fillRect(i * 5, 325 - value, 3, 325);
}
}
}
function getAverageVolume(array) {
var values = 0;
var average;
var length = array.length;
// get all the frequency amplitudes
for (var i = 0; i < length; i++) {
values += array[i];
}
average = values / length;
return average;
}
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
}
function mergeBuffers(channelBuffer, recordingLength) {
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++) {
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel) {
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length;) {
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string) {
var lng = string.length;
for (var i = 0; i < lng; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 8 years ago.
Improve this question
I'm building a HTML5 software that records a voice and when playing that voice a visualizer should be in action.
Here is my code:
// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;
if (!navigator.getUserMedia) navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({
audio: true
}, success, function (e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
// when pressing record
function getVal(value) {
// if R is pressed, we start recording
if (value == "record") {
recording = true;
// reset the buffers for the new recording
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
document.getElementById('output').innerHTML = "Recording now...";
// if S is pressed, we stop the recording and package the WAV file
} else if (value == "stop") {
// we stop recording
recording = false;
document.getElementById('output').innerHTML = "Building wav file...";
// we flat the left and right channels down
var leftBuffer = mergeBuffers(leftchannel, recordingLength);
var rightBuffer = mergeBuffers(rightchannel, recordingLength);
// we interleave both channels together
var interleaved = interleave(leftBuffer, rightBuffer);
var buffer = new ArrayBuffer(44 + interleaved.length * 2);
var view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
// FMT sub-chunk
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
// stereo (2 channels)
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
// data sub-chunk
writeUTFBytes(view, 36, 'data');
view.setUint32(40, interleaved.length * 2, true);
var lng = interleaved.length;
var index = 44;
var volume = 1;
for (var i = 0; i < lng; i++) {
view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
index += 2;
}
var blob = new Blob([view], {
type: 'audio/wav'
});
// let's save it locally
document.getElementById('output').innerHTML = 'Handing off the file now...';
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var li = document.createElement('li');
var au = document.createElement('audio');
var hf = document.createElement('a');
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(hf);
recordingList.appendChild(li);
}
}
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
volume = context.createGain();
// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);
// connect the stream(source) to the gain node
source.connect(volume);
var bufferSize = 2048;
recorder = context.createScriptProcessor(bufferSize, 2, 2);
//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 1024;
analyser2 = context.createAnalyser();
analyser2.smoothingTimeConstant = 0.0;
analyser2.fftSize = 1024;
splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function (e) {
if (!recording) return;
var left = e.inputBuffer.getChannelData(0);
var right = e.inputBuffer.getChannelData(1);
// get the average of the first channel, bincount is fftsize / 2
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var average = getAverageVolume(array);
// get the average for the second channel
var array2 = new Uint8Array(analyser2.frequencyBinCount);
analyser2.getByteFrequencyData(array2);
var average2 = getAverageVolume(array2);
// clear the current state
ctx.clearRect(0, 0, 60, 130);
// set the fill style
ctx.fillStyle = gradient;
// create the meters
ctx.fillRect(0, 130 - average, 25, 130);
ctx.fillRect(30, 130 - average2, 25, 130);
}
function getAverageVolume(array) {
var values = 0;
var average;
var length = array.length;
// get all the frequency amplitudes
for (var i = 0; i < length; i++) {
values += array[i];
}
average = values / length;
return average;
}
leftchannel.push(new Float32Array(left));
rightchannel.push(new Float32Array(right));
recordingLength += bufferSize;
}
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
splitter.connect(analyser2, 1, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
function mergeBuffers(channelBuffer, recordingLength) {
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++) {
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel) {
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length;) {
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string) {
var lng = string.length;
for (var i = 0; i < lng; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
My problem is that when running it's giving me an error :
cannot read property 'connect' of null in this statement: volume.connect(splitter);
What is going wrong??
The creation of the volume gain node is done only after the success of getUserMedia, in the success function.
By the time the code encounter the volume connect command, volume is not yet allocated.
You have to 'chain' all your node connection starting from success.
Quick fix : just put those lines :
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
splitter.connect(analyser2, 1, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
at the end of the success function.
I have a feature that records audio annotations for a user. It uses HTML5 with a flash fallback. I am able to get the audio data from the HTML5 version via getUserMedia(), but the flash fallback provides the data as an array of floats.
I need this data as a wav file, and I can't figure out how to do it. Any help much appreciated!
var recLength = 0,
recBuffersL = [],
recBuffersR = [],
sampleRate;
this.onmessage = function (e) {
switch (e.data.command) {
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config) {
sampleRate = config.sampleRate;
}
function record(inputBuffer) {
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength += inputBuffer[0].length;
}
function exportWAV(type) {
var bufferL = mergeBuffers(recBuffersL, recLength);
var bufferR = mergeBuffers(recBuffersR, recLength);
var interleaved = interleave(bufferL, bufferR);
var dataview = encodeWAV(interleaved);
var audioBlob = new Blob([dataview], {
type: type
});
this.postMessage(audioBlob);
}
function getBuffer() {
var buffers = [];
buffers.push(mergeBuffers(recBuffersL, recLength));
buffers.push(mergeBuffers(recBuffersR, recLength));
this.postMessage(buffers);
}
function clear() {
recLength = 0;
recBuffersL = [];
recBuffersR = [];
}
function mergeBuffers(recBuffers, recLength) {
var result = new Float32Array(recLength);
var offset = 0;
for (var i = 0; i < recBuffers.length; i++) {
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR) {
var length = inputL.length + inputR.length;
var result = new Float32Array(length);
var index = 0,
inputIndex = 0;
while (index < length) {
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input) {
for (var i = 0; i < input.length; i++, offset += 2) {
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string) {
for (var i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples) {
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);
writeString(view, 0, 'RIFF');
view.setUint32(4, 32 + samples.length * 2, true);
writeString(view, 8, 'WAVE');
writeString(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
writeString(view, 36, 'data');
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
usage:
var AudioContext = win.webkitAudioContext,
recorder, audioContext;
function recordAudio() {
if (!config.stream) {
alert('No audio.');
return;
}
initAudioRecorder(config.audioWorkerPath);
audioContext = new AudioContext;
var mediaStreamSource = audioContext.createMediaStreamSource(config.stream);
mediaStreamSource.connect(audioContext.destination);
recorder = new window.Recorder(mediaStreamSource);
recorder && recorder.record();
}
function stopAudioRecording() {
console.warn('Audio recording stopeed');
recorder && recorder.stop();
recorder && recorder.exportWAV(function (blob) {
fileType = 'wav';
setBlob(blob);
});
recorder && recorder.clear();
}
var writer;
function setBlob(blob) {
blobURL = blob;
var config = {
blob: blobURL,
type: 'audio/wav',
fileName: (Math.random() * 1000 << 1000) + '.' + fileType,
size: blobURL.length
};
writer = RecordRTCFileWriter(config);
var reader = new win.FileReader();
reader.readAsDataURL(blobURL);
reader.onload = function (event) {
blobURL2 = event.target.result;
};
}
return {
stopAudio: stopAudioRecording,
stopVideo: stopVideoRecording,
recordVideo: recordVideo,
recordAudio: recordAudio,
save: saveToDisk,
getBlob: function () {
return blobURL2;
},
toURL: function () {
return writer.toURL();
}
};
One line version:
What open source software (WAMI-Recorder)/web browser (via getUserMedia) will give me the best quality audio recordings?
High quality would be defined as (44.1 or 48 sample rate) and 16 bit sample size.
More Information:
So currently my solution is WAMI-Recorder, but I am wondering if the HTML5 specification has matured to a point in browser such that I can record without Flash and get equal or higher quality audio recordings. Currently it looks like WAMI maxes out at 22050.
I do not need cross browser support as this is for internal business use.
A non-Flash solution would also be preferred.
I found something here. hope it will help you to record audio
<html>
<body>
<audio controls autoplay></audio>
<script type="text/javascript" src="recorder.js"> </script>
<input onclick="startRecording()" type="button" value="start recording" />
<input onclick="stopRecording()" type="button" value="stop recording and play" />
<script>
var onFail = function(e) {
console.log('Rejected!', e);
};
var onSuccess = function(s) {
var context = new webkitAudioContext();
var mediaStreamSource = context.createMediaStreamSource(s);
recorder = new Recorder(mediaStreamSource);
recorder.record();
// audio loopback
// mediaStreamSource.connect(context.destination);
}
window.URL = window.URL || window.webkitURL;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
var recorder;
var audio = document.querySelector('audio');
function startRecording() {
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true}, onSuccess, onFail);
} else {
console.log('navigator.getUserMedia not present');
}
}
function stopRecording() {
recorder.stop();
recorder.exportWAV(function(s) {
audio.src = window.URL.createObjectURL(s);
});
}
</script>
</body>
</html>
download the sample https://github.com/rokgregoric/html5record/archive/master.zip
The WebRTC project is the only available solution for high quality audio that I know of. There is currently a stable version for chrome. I believe firefox is still in the beta stage. As WebRTC matures I am sure all browsers will provide support for this.
http://www.webrtc.org/
Check this tutorial on how to capture audio and video using HTML5.
http://www.html5rocks.com/en/tutorials/getusermedia/intro/
I don't see any reference about the sample rate or the sample size in the API's, but I would assume that will be possible.
Your major problem could be the implementation status by different browser vendors.
Save this files and use
//HTML FILE
<html>
<body>
<audio controls autoplay></audio>
<script type="text/javascript" src="recorder.js"> </script>
<fieldset><legend>RECORD AUDIO</legend>
<input onclick="startRecording()" type="button" value="start recording" />
<input onclick="stopRecording()" type="button" value="stop recording and play" />
</fieldset>
<script>
var onFail = function(e) {
console.log('Rejected!', e);
};
var onSuccess = function(s) {
var context = new webkitAudioContext();
var mediaStreamSource = context.createMediaStreamSource(s);
recorder = new Recorder(mediaStreamSource);
recorder.record();
// audio loopback
// mediaStreamSource.connect(context.destination);
}
window.URL = window.URL || window.webkitURL;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
var recorder;
var audio = document.querySelector('audio');
function startRecording() {
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true}, onSuccess, onFail);
} else {
console.log('navigator.getUserMedia not present');
}
}
function stopRecording() {
recorder.stop();
recorder.exportWAV(function(s) {
audio.src = window.URL.createObjectURL(s);
});
}
</script>
</body>
</html>
//JS FILES RECORDER.JS
(function(window){
var WORKER_PATH = 'recorderWorker.js';
var Recorder = function(source, cfg){
var config = cfg || {};
var bufferLen = config.bufferLen || 4096;
this.context = source.context;
this.node = this.context.createJavaScriptNode(bufferLen, 2, 2);
var worker = new Worker(config.workerPath || WORKER_PATH);
worker.postMessage({
command: 'init',
config: {
sampleRate: this.context.sampleRate
}
});
var recording = false,
currCallback;
this.node.onaudioprocess = function(e){
if (!recording) return;
worker.postMessage({
command: 'record',
buffer: [
e.inputBuffer.getChannelData(0),
e.inputBuffer.getChannelData(1)
]
});
}
this.configure = function(cfg){
for (var prop in cfg){
if (cfg.hasOwnProperty(prop)){
config[prop] = cfg[prop];
}
}
}
this.record = function(){
recording = true;
}
this.stop = function(){
recording = false;
}
this.clear = function(){
worker.postMessage({ command: 'clear' });
}
this.getBuffer = function(cb) {
currCallback = cb || config.callback;
worker.postMessage({ command: 'getBuffer' })
}
this.exportWAV = function(cb, type){
currCallback = cb || config.callback;
type = type || config.type || 'audio/wav';
if (!currCallback) throw new Error('Callback not set');
worker.postMessage({
command: 'exportWAV',
type: type
});
}
worker.onmessage = function(e){
var blob = e.data;
currCallback(blob);
}
source.connect(this.node);
this.node.connect(this.context.destination); //this should not be necessary
};
Recorder.forceDownload = function(blob, filename){
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var link = window.document.createElement('a');
link.href = url;
link.download = filename || 'output.wav';
var click = document.createEvent("Event");
click.initEvent("click", true, true);
link.dispatchEvent(click);
}
window.Recorder = Recorder;
})(window);
//ADDITIONAL JS recorderWorker.js
var recLength = 0,
recBuffersL = [],
recBuffersR = [],
sampleRate;
this.onmessage = function(e){
switch(e.data.command){
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config){
sampleRate = config.sampleRate;
}
function record(inputBuffer){
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength += inputBuffer[0].length;
}
function exportWAV(type){
var bufferL = mergeBuffers(recBuffersL, recLength);
var bufferR = mergeBuffers(recBuffersR, recLength);
var interleaved = interleave(bufferL, bufferR);
var dataview = encodeWAV(interleaved);
var audioBlob = new Blob([dataview], { type: type });
this.postMessage(audioBlob);
}
function getBuffer() {
var buffers = [];
buffers.push( mergeBuffers(recBuffersL, recLength) );
buffers.push( mergeBuffers(recBuffersR, recLength) );
this.postMessage(buffers);
}
function clear(){
recLength = 0;
recBuffersL = [];
recBuffersR = [];
}
function mergeBuffers(recBuffers, recLength){
var result = new Float32Array(recLength);
var offset = 0;
for (var i = 0; i < recBuffers.length; i++){
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR){
var length = inputL.length + inputR.length;
var result = new Float32Array(length);
var index = 0,
inputIndex = 0;
while (index < length){
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input){
for (var i = 0; i < input.length; i++, offset+=2){
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string){
for (var i = 0; i < string.length; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples){
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);
/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* file length */
view.setUint32(4, 32 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, 2, true);
/* sample rate */
view.setUint32(24, sampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, 4, true);
/* bits per sample */
view.setUint16(34, 16, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
If you use Chrome Web Browser, and as you ask for High Quality Audio Recording, you probably be better to build a native client module, you may take a look here (chrome developper native client dev guide):
Selecting a sample frame count for an audio stream involves a tradeoff between latency and CPU usage. If you want your module to have short audio latency so that it can rapidly change what’s playing in the audio stream, you should request a small sample frame count.
[...]
After the module obtains a sample frame count, it can create an audio
configuration resource. Currently the Pepper audio API supports audio
streams with the configuration settings shown above
And here you will find audio configuration which fit your requirements
The Pepper audio API currently lets Native Client modules play audio streams with the following configurations:
sample rate: 44,100 Hz or 48,000 Hz
bit depth: 16
channels: 2 (stereo)
This official link could be also helpful.
Regards