cross-browser Microphone access - javascript

I am looking for a solution to allow a website to access and process a stream of audio from the users Microphone. It's unfamiliar territory for me. I've been working with webRTC examples but so far have only got it working on:
firefox and chrome on a 2011 mac air running Sierra.
firefox on windows 10.
My script throws errors on other Browser/OS combinations and with some it doesn't but regardless it doesn't function.
Is there a better solution?
!function(t,e){
"use strict";
t.AudioContext = t.AudioContext||t.webkitAudioContext,
t.OfflineAudioContext = t.OfflineAudioContext||t.webkitOfflineAudioContext;
var o=AudioContext.prototype,
r=new AudioContext,n=function(t,e){
return void 0===t&&void 0!==e
},
c=r.createBufferSource().constructor.prototype;
if(n(c.start,c.noteOn)||n(c.stop,c.noteOff)){
var i=o.createBufferSource;
o.createBufferSource=function(){
var t=i.call(this);
return t.start=t.start||t.noteOn,t.stop=t.stop||t.noteOff,t
}
}
if("function"==typeof r.createOscillator){
var a=r.createOscillator().constructor.prototype;
if(n(a.start,a.noteOn)||n(a.stop,a.noteOff)){
var s=o.createOscillator;o.createOscillator=function(){
var t=s.call(this);
return t.start=t.start||t.noteOn,t.stop=t.stop||t.noteOff,t
}
}
}
if(void 0===o.createGain&&void 0!==o.createGainNode&&(o.createGain=o.createGainNode),void 0===o.createDelay&&void 0!==o.createDelayNode&&(o.createDelay=o.createGainNode),void 0===o.createScriptProcessor&&void 0!==o.createJavaScriptNode&&(o.createScriptProcessor=o.createJavaScriptNode),-1!==navigator.userAgent.indexOf("like Mac OS X")){
var u=AudioContext;t.AudioContext=function(){
function t(){
r.start(0),r.connect(n),n.connect(e.destination)
}
var e=new u,
o=document.body,
r=e.createBufferSource(),
n=e.createScriptProcessor(256,1,1);
return o.addEventListener("touchstart",t,!1),n.onaudioprocess=function(){
r.disconnect(),
n.disconnect(),
o.removeEventListener("touchstart",t,!1),
n.onaudioprocess=null
},e
}
}
}(window);
var context, analyser, gUM, dataArray, bufferLength, connect_source;
if (AudioContext){
context = new AudioContext();
analyser = context.createAnalyser();
function success(stream){
// Create a new volume meter and connect it.
var source = context.createMediaStreamSource(stream);
compressor = context.createDynamicsCompressor();
compressor.threshold.value = -50;
compressor.knee.value = 40;
compressor.ratio.value = 12;
compressor.reduction.value = -20;
compressor.attack.value = 0;
compressor.release.value = 0.25;
filter = context.createBiquadFilter();
filter.Q.value = 8.30;
filter.frequency.value = 355;
filter.gain.value = 3.0;
filter.type = 'bandpass';
filter.connect(compressor);
source.connect( filter );
source.connect(analyser);
analyser.fftSize = 512;
bufferLength = analyser.frequencyBinCount; // half the FFT value
dataArray = new Uint8Array(bufferLength); // create an array to store the data
};
function fail(e){
if(e){}
console.log(e);
aizuchi.error();
};
var select = document.getElementById("AudioSourceSelect");
function generateSelector(devices){
while(select.firstChild) select.removeChild(select.firstChild);
var opt;
for(var l = devices.length; l--;){
console.log(devices[l]);
if(devices[l].kind == "audioinput"){
opt = document.createElement("option")
opt.text = devices[l].label
opt.value = devices[l].deviceId
if(devices[l].deviceId == "default") opt.setAttribute("selected","")
select.appendChild( opt );
}
}
select.onchange = function(){
connect_source(this.value);
}
select.onchange();
}
try {
navigator.mediaDevices.enumerateDevices().then(generateSelector)
} catch(e){
fail(e);
}
connect_source = function(audioSource){
try {
if(Modernizr.getusermedia){
gUM = Modernizr.prefixed('getUserMedia', navigator);
gUM({video:false, audio : {deviceId: audioSource ? {exact: audioSource} : undefined}},success,fail);
} else {
navigator.mediaDevices.getUserMedia({video:false, audio : {deviceId: audioSource ? {exact: audioSource} : undefined}}).then(success,fail);
}
} catch(e){
fail(e);
}
}
}

Try
var AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new AudioContext();
It doesn't look like the browsers have unified on the syntax of this one yet.
Source: MDN

Related

MediaRecorderAPI - Autorecord

I am currently trying to simulate audio autorecording. User speaks, and after he stops then audio should be submitted to the backend.
I already have a sample script that submits audio with start and stop click functions.
I'm trying to get sometype of value such as Amplitude, Volume or maybe a Threshold but I'm not sure if MediaRecorder supports this or if I need to look at Web Audio API or other solutions.
Can I achieve this with MediaRecorder?
Regarding the audio analysis of the mic input, the following example shows you how to take the audio captured by the mic, create an analyzer with createAnalyser method of the webkitAudioContext, connect the stream to the analyzer and calculate the FFT of the specified size, in order to calculate pitch and display the output sound wave.
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var audioContext = null;
var isPlaying = false;
var sourceNode = null;
var analyser = null;
var theBuffer = null;
var audioCtx = null;
var mediaStreamSource = null;
var rafID = null;
var j = 0;
var waveCanvas = null;
window.onload = function() {
audioContext = new AudioContext();
audioCtx = document.getElementById( "waveform" );
canvasCtx = audioCtx.getContext("2d");
};
function getUserMedia(dictionary, callback) {
try {
navigator.getUserMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia;
navigator.getUserMedia(dictionary, callback, error);
} catch (e) {
alert('getUserMedia threw exception :' + e);
}
}
function gotStream(stream) {
// Create an AudioNode from the stream.
mediaStreamSource = audioContext.createMediaStreamSource(stream);
// Connect it to the destination.
analyser = audioContext.createAnalyser();
analyser.fftSize = 1024;
mediaStreamSource.connect( analyser );
updatePitch();
}
function toggleLiveInput()
{
canvasCtx.clearRect(0, 0, audioCtx.width, audioCtx.height);
canvasCtx.beginPath();
j = 0;
buflen = 1024;
buf = new Float32Array( buflen );
document.getElementById('toggleLiveInput').disabled = true;
document.getElementById('toggleLiveInputStop').disabled = false;
if (isPlaying) {
//stop playing and return
sourceNode.stop( 0 );
sourceNode = null;
//analyser = null;
isPlaying = false;
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = window.webkitCancelAnimationFrame;
window.cancelAnimationFrame( rafID );
}
getUserMedia(
{
"audio": {
"mandatory": {
"googEchoCancellation": "false",
"googAutoGainControl": "false",
"googNoiseSuppression": "false",
"googHighpassFilter": "false"
},
"optional": []
},
}, gotStream);
}
function stop()
{
document.getElementById('toggleLiveInput').disabled = false;
document.getElementById('toggleLiveInputStop').disabled = true;
//waveCanvas.closePath();
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = window.webkitCancelAnimationFrame;
window.cancelAnimationFrame( rafID );
return "start";
}
function updatePitch()
{
analyser.fftSize = 1024;
analyser.getFloatTimeDomainData(buf);
canvasCtx.strokeStyle = "red";
for (var i=0;i<2;i+=2)
{
x = j*5;
if(audioCtx.width < x)
{
x = audioCtx.width - 5;
previousImage = canvasCtx.getImageData(5, 0, audioCtx.width, audioCtx.height);
canvasCtx.putImageData(previousImage, 0, 0);
canvasCtx.beginPath();
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = "red";
prex = prex - 5;
canvasCtx.lineTo(prex,prey);
prex = x;
prey = 128+(buf[i]*128);
canvasCtx.lineTo(x,128+(buf[i]*128));
canvasCtx.stroke();
}
else
{
prex = x;
prey = 128+(buf[i]*128);
canvasCtx.lineWidth = 2;
canvasCtx.lineTo(x,128+(buf[i]*128));
canvasCtx.stroke();
}
j++;
}
if (!window.requestAnimationFrame)
window.requestAnimationFrame = window.webkitRequestAnimationFrame;
rafID = window.requestAnimationFrame( updatePitch );
}
function error() {
console.error(new Error('error while generating audio'));
}
Try the demo here.
Example adapted from pitch-liveinput.

Variable is null (only in Mozilla)

Here is the resume of the code:
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
//(.....)
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
audioInput = context.createMediaStreamSource(e);
var bufferSize = 2048;
store_data = context.createScriptProcessor(bufferSize, 1, 1);
//(...)
}
//(....)
client.on('open', function() {
console.log("createStream");
Stream = client.createStream(command_list);
var recording = false;
window.startRecording = function() {
document.getElementById("startbutton").disabled = true;
document.getElementById("stopbutton").disabled = false;
recording = true;
window.Stream.resume();
}
window.stopRecording = function() {
document.getElementById("startbutton").disabled = false;
document.getElementById("stopbutton").disabled = true;
recording = false
//window.Stream.end();
window.Stream.pause();
}
store_data.onaudioprocess = function(e){ //<---line of the error
if(!recording) return;
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
window.Stream.write(convertoFloat32ToInt16(left));
}
//(..events generated from server..)
In chrome my code works just fine. In Mozilla I am getting always the error "store data is undefined". Any idea why? Because I am declaring store_data as global and when getusermedia is a sucess the value is changed.
Without knowing what calls the success function, it's difficult to say exactly, but I am fairly sure you want your client.on('open') listener to be contingent on the success function running.
I don't know how it will affect the rest of the omitted code, but I would only connect the BinaryClient when the success function has run and you are sure you have store_data defined.
function success() {
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
// do the original success code here
// now create that listener.
client.on('open', function() {
// do original code here
});
}
// you probably have a line of code that looks like this
navigator.getUserMedia({}, success);
Moving all of your code into the success function may work, but it won't be elegant. Once you've got the flow working, I would suggest refactoring the code, by splitting each logical bit up into its own function.
Yes it's a race. Your code must wait until getUserMedia succeeds and open is fired.
Promises are a great way to solve this:
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
(Use the above polyfill to access modern getUserMedia in all supported browsers.)
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
//(.....)
var haveStoreData = navigator.mediaDevices.getUserMedia({audio:true})
.then(function(stream) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
audioInput = context.createMediaStreamSource(stream);
var bufferSize = 2048;
return context.createScriptProcessor(bufferSize, 1, 1);
});
//(....)
client.on('open', function() {
console.log("opened");
haveStoreData.then(function(store_data) {
console.log("createStream");
Stream = client.createStream(command_list);
var recording = false;
window.startRecording = function() {
document.getElementById("startbutton").disabled = true;
document.getElementById("stopbutton").disabled = false;
recording = true;
window.Stream.resume();
};
window.stopRecording = function() {
document.getElementById("startbutton").disabled = false;
document.getElementById("stopbutton").disabled = true;
recording = false;
//window.Stream.end();
window.Stream.pause();
};
store_data.onaudioprocess = function(e){
if(!recording) return;
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
window.Stream.write(convertoFloat32ToInt16(left));
};
//(..events generated from server..)
})
.catch(function(e) { console.error(e); });
});
This will give users time to choose "Allow" in the mic permission prompt (Unlike Chrome, Firefox asks the user for permission every time, unless they choose "Always Allow").
var client = new BinaryClient('ws://193.136.94.233:9001');
var context = null;
var gain = null;
var store_data = null;
//(.....)
navigator.mediaDevices.getUserMedia({audio:true}) .then( function(stream){
context = new AudioContext();
audioInput = context.createMediaStreamSource(stream);
var bufferSize = 4096;
store_data = context.createScriptProcessor(bufferSize, 1, 1);
biquadFilter = context.createBiquadFilter();
biquadFilter.type = "lowpass";
biquadFilter.frequency.value = 11500;
biquadFilter.Q.value = 3;
ganho = context.createGain();
ganho.gain.value=0.5;
//audioInput.connect(ganho);//compresso
//ganho.connect(recorder);
//recorder.connect(context.destination);
audioInput.connect(biquadFilter);
biquadFilter.connect(ganho);
ganho.connect(store_data);
store_data.connect(context.destination);
store_data.onaudioprocess = function(e){
if(!recording){
//console.log("nada faz nada desta vida")
return;
}
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
Stream.write(convertoFloat32ToInt16(left));
}
//audioInput.connect(store_data);
} ) .catch( function(e){ console.log(e) } );
//(...)
client.on('open', function() {
console.log("opened connection");
//haveStoreData.then(function(store_data) {
Stream = client.createStream(command_list);
//recording = false;
//(........)
);
//Other function
Here is the solution to stream with BinaryJS with Chrome an Mozilla. Thanks to #jib and #Kaiido

HTML Audio recording until silence?

I'm looking for a browser-based way of recording until a silence occurs.
HTML audio recording from the microphone is possible in Firefox and Chrome - using
Recordmp3js see:
http://nusofthq.com/blog/recording-mp3-using-only-html5-and-javascript-recordmp3-js/
and the code on github: http://github.com/nusofthq/Recordmp3js
I can't see a way to change that code to record until silence.
Record until silence can be done (and tuned) using Java for a native Android App - see here:
Android audio capture silence detection
Google Voice Search demonstrates a browser can doit - but how can I using Javascript?
Any ideas?
If you use the Web Audio API, open up a live microphone audio capture by making a call to : navigator.getUserMedia , then create a node using : createScriptProcessor, then you assign to that node a callback for its event : onaudioprocess . Inside your callback function (below I use script_processor_analysis_node) you have access to the live real-time audio buffer to which you can then parse looking for silence (some length of time where amplitude is low [stays close to zero]).
for normal time domain audio curve see : array_time_domain
which is populated fresh upon each call to callback script_processor_analysis_node ... similarly for frequency domain see array_freq_domain
Turn down your speaker volume or use headphones to avoid feedback from mic -> speaker -> mic ...
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone then show time & frequency domain output</title>
<script type="text/javascript">
var webaudio_tooling_obj = function () {
var audioContext = new AudioContext();
console.log("audio is starting up ...");
var BUFF_SIZE_RENDERER = 16384;
var audioInput = null,
microphone_stream = null,
gain_node = null,
script_processor_node = null,
script_processor_analysis_node = null,
analyser_node = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true},
function(stream) {
start_microphone(stream);
},
function(e) {
alert('Error capturing audio.');
}
);
} else { alert('getUserMedia not supported in this browser.'); }
// ---
function show_some_data(given_typed_array, num_row_to_display, label) {
var size_buffer = given_typed_array.length;
var index = 0;
console.log("__________ " + label);
if (label === "time") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
var curr_value_time = (given_typed_array[index] / 128) - 1.0;
console.log(curr_value_time);
}
} else if (label === "frequency") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
console.log(given_typed_array[index]);
}
} else {
throw new Error("ERROR - must pass time or frequency");
}
}
function process_microphone_buffer(event) {
var i, N, inp, microphone_output_buffer;
microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
}
function start_microphone(stream){
gain_node = audioContext.createGain();
gain_node.connect( audioContext.destination );
microphone_stream = audioContext.createMediaStreamSource(stream);
microphone_stream.connect(gain_node);
script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE_RENDERER, 1, 1);
script_processor_node.onaudioprocess = process_microphone_buffer;
microphone_stream.connect(script_processor_node);
// --- enable volume control for output speakers
document.getElementById('volume').addEventListener('change', function() {
var curr_volume = this.value;
gain_node.gain.value = curr_volume;
console.log("curr_volume ", curr_volume);
});
// --- setup FFT
script_processor_analysis_node = audioContext.createScriptProcessor(2048, 1, 1);
script_processor_analysis_node.connect(gain_node);
analyser_node = audioContext.createAnalyser();
analyser_node.smoothingTimeConstant = 0;
analyser_node.fftSize = 2048;
microphone_stream.connect(analyser_node);
analyser_node.connect(script_processor_analysis_node);
var buffer_length = analyser_node.frequencyBinCount;
var array_freq_domain = new Uint8Array(buffer_length);
var array_time_domain = new Uint8Array(buffer_length);
console.log("buffer_length " + buffer_length);
script_processor_analysis_node.onaudioprocess = function() {
// get the average for the first channel
analyser_node.getByteFrequencyData(array_freq_domain);
analyser_node.getByteTimeDomainData(array_time_domain);
// draw the spectrogram
if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {
show_some_data(array_freq_domain, 5, "frequency");
show_some_data(array_time_domain, 5, "time"); // store this to record to aggregate buffer/file
// examine array_time_domain for near zero values over some time period
}
};
}
}(); // webaudio_tooling_obj = function()
</script>
</head>
<body>
<p>Volume</p>
<input id="volume" type="range" min="0" max="1" step="0.1" value="0.5"/>
</body>
</html>
This is an old post but I am sure many will have the same problem so I am posting my solution here.
Use hark.js
Below is sample demo code i used for my electron app
hark = require('./node_modules/hark/hark.bundle.js')
navigator.getUserMedia({ audio : true}, onMediaSuccess, function(){});
function onMediaSuccess(blog) {
var options = {};
var speechEvents = hark(blog, options);
speechEvents.on('speaking', function() {
console.log('speaking');
});
speechEvents.on('stopped_speaking', function() {
console.log('stopped_speaking');
});
};
The solution from #Scott Stensland doesn't allow me to parse for silence. I am getting the same value for when I parse the two arrays - that is I'm getting 0 always when parsing arrayFreqDomain and 128 always when parsing arrayTimeDomain
let analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0;
analyser.fftSize = 2048;
let buffLength = analyser.frequencyBinCount;
let arrayFreqDomain = new Uint8Array(buffLength);
let arrayTimeDomain = new Uint8Array(buffLength);
processor.connect(analyser);
processor.onaudioprocess = (event) => {
/**
*
* Parse live real-time buffer looking for silence
*
**/
analyser.getByteFrequencyData(arrayFreqDomain);
analyser.getByteTimeDomainData(arrayTimeDomain);
if (context.state === "running") {
let sizeBuffer = arrayTimeDomain.length;
let index = 0;
for (; index < 5 && index < sizeBuffer; index += 1) {
console.log(arrayTimeDomain[index]); <----
}
}
}

HTML5 record audio to file

What I ultimately want to do is record from the user's microphone, and upload the file to the server when they're done. So far, I've managed to make a stream to an element with the following code:
var audio = document.getElementById("audio_preview");
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
navigator.getUserMedia({video: false, audio: true}, function(stream) {
audio.src = window.URL.createObjectURL(stream);
}, onRecordFail);
var onRecordFail = function (e) {
console.log(e);
}
How do I go from that, to recording to a file?
There is a fairly complete recording demo available at: http://webaudiodemos.appspot.com/AudioRecorder/index.html
It allows you to record audio in the browser, then gives you the option to export and download what you've recorded.
You can view the source of that page to find links to the javascript, but to summarize, there's a Recorder object that contains an exportWAV method, and a forceDownload method.
The code shown below is copyrighted to Matt Diamond and available for use under MIT license. The original files are here:
http://webaudiodemos.appspot.com/AudioRecorder/index.html
http://webaudiodemos.appspot.com/AudioRecorder/js/recorderjs/recorderWorker.js
Save this files and use
(function(window){
var WORKER_PATH = 'recorderWorker.js';
var Recorder = function(source, cfg){
var config = cfg || {};
var bufferLen = config.bufferLen || 4096;
this.context = source.context;
this.node = this.context.createScriptProcessor(bufferLen, 2, 2);
var worker = new Worker(config.workerPath || WORKER_PATH);
worker.postMessage({
command: 'init',
config: {
sampleRate: this.context.sampleRate
}
});
var recording = false,
currCallback;
this.node.onaudioprocess = function(e){
if (!recording) return;
worker.postMessage({
command: 'record',
buffer: [
e.inputBuffer.getChannelData(0),
e.inputBuffer.getChannelData(1)
]
});
}
this.configure = function(cfg){
for (var prop in cfg){
if (cfg.hasOwnProperty(prop)){
config[prop] = cfg[prop];
}
}
}
this.record = function(){
recording = true;
}
this.stop = function(){
recording = false;
}
this.clear = function(){
worker.postMessage({ command: 'clear' });
}
this.getBuffer = function(cb) {
currCallback = cb || config.callback;
worker.postMessage({ command: 'getBuffer' })
}
this.exportWAV = function(cb, type){
currCallback = cb || config.callback;
type = type || config.type || 'audio/wav';
if (!currCallback) throw new Error('Callback not set');
worker.postMessage({
command: 'exportWAV',
type: type
});
}
worker.onmessage = function(e){
var blob = e.data;
currCallback(blob);
}
source.connect(this.node);
this.node.connect(this.context.destination); //this should not be necessary
};
Recorder.forceDownload = function(blob, filename){
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var link = window.document.createElement('a');
link.href = url;
link.download = filename || 'output.wav';
var click = document.createEvent("Event");
click.initEvent("click", true, true);
link.dispatchEvent(click);
}
window.Recorder = Recorder;
})(window);
//ADDITIONAL JS recorderWorker.js
var recLength = 0,
recBuffersL = [],
recBuffersR = [],
sampleRate;
this.onmessage = function(e){
switch(e.data.command){
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config){
sampleRate = config.sampleRate;
}
function record(inputBuffer){
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength += inputBuffer[0].length;
}
function exportWAV(type){
var bufferL = mergeBuffers(recBuffersL, recLength);
var bufferR = mergeBuffers(recBuffersR, recLength);
var interleaved = interleave(bufferL, bufferR);
var dataview = encodeWAV(interleaved);
var audioBlob = new Blob([dataview], { type: type });
this.postMessage(audioBlob);
}
function getBuffer() {
var buffers = [];
buffers.push( mergeBuffers(recBuffersL, recLength) );
buffers.push( mergeBuffers(recBuffersR, recLength) );
this.postMessage(buffers);
}
function clear(){
recLength = 0;
recBuffersL = [];
recBuffersR = [];
}
function mergeBuffers(recBuffers, recLength){
var result = new Float32Array(recLength);
var offset = 0;
for (var i = 0; i < recBuffers.length; i++){
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR){
var length = inputL.length + inputR.length;
var result = new Float32Array(length);
var index = 0,
inputIndex = 0;
while (index < length){
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input){
for (var i = 0; i < input.length; i++, offset+=2){
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string){
for (var i = 0; i < string.length; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples){
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);
/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* file length */
view.setUint32(4, 32 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, 2, true);
/* sample rate */
view.setUint32(24, sampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, 4, true);
/* bits per sample */
view.setUint16(34, 16, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
<html>
<body>
<audio controls autoplay></audio>
<script type="text/javascript" src="recorder.js"> </script>
<fieldset><legend>RECORD AUDIO</legend>
<input onclick="startRecording()" type="button" value="start recording" />
<input onclick="stopRecording()" type="button" value="stop recording and play" />
</fieldset>
<script>
var onFail = function(e) {
console.log('Rejected!', e);
};
var onSuccess = function(s) {
var context = new webkitAudioContext();
var mediaStreamSource = context.createMediaStreamSource(s);
recorder = new Recorder(mediaStreamSource);
recorder.record();
// audio loopback
// mediaStreamSource.connect(context.destination);
}
window.URL = window.URL || window.webkitURL;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
var recorder;
var audio = document.querySelector('audio');
function startRecording() {
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true}, onSuccess, onFail);
} else {
console.log('navigator.getUserMedia not present');
}
}
function stopRecording() {
recorder.stop();
recorder.exportWAV(function(s) {
audio.src = window.URL.createObjectURL(s);
});
}
</script>
</body>
</html>
Update now Chrome also supports MediaRecorder API from v47. The same thing to do would be to use it( guessing native recording method is bound to be faster than work arounds), the API is really easy to use, and you would find tons of answers as to how to upload a blob for the server.
Demo - would work in Chrome and Firefox, intentionally left out pushing blob to server...
Code Source
Currently, there are three ways to do it:
as wav[ all code client-side, uncompressed recording], you can check out --> Recorderjs. Problem: file size is quite big, more upload bandwidth required.
as mp3[ all code client-side, compressed recording], you can check out --> mp3Recorder. Problem: personally, I find the quality bad, also there is this licensing issue.
as ogg [ client+ server(node.js) code, compressed recording, infinite hours of recording without browser crash ], you can check out --> recordOpus, either only client-side recording, or client-server bundling, the choice is yours.
ogg recording example( only firefox):
var mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start(); // to start recording.
...
mediaRecorder.stop(); // to stop recording.
mediaRecorder.ondataavailable = function(e) {
// do something with the data.
}
Fiddle Demo for ogg recording.
This is a simple JavaScript sound recorder and editor. You can try it.
https://www.danieldemmel.me/JSSoundRecorder/
Can download from here
https://github.com/daaain/JSSoundRecorder
The question is very old and many of the answers are not supported in the current version of the browser. I was trying to create the audio recorder using simple html, css, and js. I further went on to use the same code in electron to make a cross platform application.
<html>
<head>
<title>Recorder App</title>
</head>
<h2>Recorder App</h2>
<p>
<button type="button" id="record">Record</button>
<button type="button" id="stopRecord" disabled>Stop</button>
</p>
<p>
<audio id="recordedAudio"></audio>
</p>
<script>
navigator.mediaDevices.getUserMedia({audio:true})
.then(stream => {handlerFunction(stream)})
function handlerFunction(stream) {
rec = new MediaRecorder(stream);
rec.ondataavailable = e => {
audioChunks.push(e.data);
if (rec.state == "inactive"){
let blob = new Blob(audioChunks,{type:'audio/mp3'});
recordedAudio.src = URL.createObjectURL(blob);
recordedAudio.controls=true;
recordedAudio.autoplay=true;
sendData(blob)
}
}
}
function sendData(data) {}
record.onclick = e => {
record.disabled = true;
record.style.backgroundColor = "blue"
stopRecord.disabled=false;
audioChunks = [];
rec.start();
}
stopRecord.onclick = e => {
record.disabled = false;
stop.disabled=true;
record.style.backgroundColor = "red"
rec.stop();
}
</script>
</html>
The above code works in Windows 10, Mac, Linux, and obviously, on both google chrome and firefox.
You can use Recordmp3js from GitHub to achieve your requirements. You can record from user's microphone and then get the file as an mp3. Finally upload it to your server.
I used this in my demo. There is a already a sample available with the source code by the author in this location :
https://github.com/Audior/Recordmp3js
The demo is here:
http://audior.ec/recordmp3js/
But currently works only on Chrome and Firefox.
Seems to work fine and pretty simple. Hope this helps.
Here's a gitHub project that does just that.
It records audio from the browser in mp3 format, and it automatically saves it to the webserver.
https://github.com/Audior/Recordmp3js
You can also view a detailed explanation of the implementation:
http://audior.ec/blog/recording-mp3-using-only-html5-and-javascript-recordmp3-js/
Stream audio in realtime without waiting for recording to end:
https://github.com/noamtcohen/AudioStreamer
This streams PCM data but you could modify the code to stream mp3 or Speex

High Quality Audio Recording in Web Browser

One line version:
What open source software (WAMI-Recorder)/web browser (via getUserMedia) will give me the best quality audio recordings?
High quality would be defined as (44.1 or 48 sample rate) and 16 bit sample size.
More Information:
So currently my solution is WAMI-Recorder, but I am wondering if the HTML5 specification has matured to a point in browser such that I can record without Flash and get equal or higher quality audio recordings. Currently it looks like WAMI maxes out at 22050.
I do not need cross browser support as this is for internal business use.
A non-Flash solution would also be preferred.
I found something here. hope it will help you to record audio
<html>
<body>
<audio controls autoplay></audio>
<script type="text/javascript" src="recorder.js"> </script>
<input onclick="startRecording()" type="button" value="start recording" />
<input onclick="stopRecording()" type="button" value="stop recording and play" />
<script>
var onFail = function(e) {
console.log('Rejected!', e);
};
var onSuccess = function(s) {
var context = new webkitAudioContext();
var mediaStreamSource = context.createMediaStreamSource(s);
recorder = new Recorder(mediaStreamSource);
recorder.record();
// audio loopback
// mediaStreamSource.connect(context.destination);
}
window.URL = window.URL || window.webkitURL;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
var recorder;
var audio = document.querySelector('audio');
function startRecording() {
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true}, onSuccess, onFail);
} else {
console.log('navigator.getUserMedia not present');
}
}
function stopRecording() {
recorder.stop();
recorder.exportWAV(function(s) {
audio.src = window.URL.createObjectURL(s);
});
}
</script>
</body>
</html>
download the sample https://github.com/rokgregoric/html5record/archive/master.zip
The WebRTC project is the only available solution for high quality audio that I know of. There is currently a stable version for chrome. I believe firefox is still in the beta stage. As WebRTC matures I am sure all browsers will provide support for this.
http://www.webrtc.org/
Check this tutorial on how to capture audio and video using HTML5.
http://www.html5rocks.com/en/tutorials/getusermedia/intro/
I don't see any reference about the sample rate or the sample size in the API's, but I would assume that will be possible.
Your major problem could be the implementation status by different browser vendors.
Save this files and use
//HTML FILE
<html>
<body>
<audio controls autoplay></audio>
<script type="text/javascript" src="recorder.js"> </script>
<fieldset><legend>RECORD AUDIO</legend>
<input onclick="startRecording()" type="button" value="start recording" />
<input onclick="stopRecording()" type="button" value="stop recording and play" />
</fieldset>
<script>
var onFail = function(e) {
console.log('Rejected!', e);
};
var onSuccess = function(s) {
var context = new webkitAudioContext();
var mediaStreamSource = context.createMediaStreamSource(s);
recorder = new Recorder(mediaStreamSource);
recorder.record();
// audio loopback
// mediaStreamSource.connect(context.destination);
}
window.URL = window.URL || window.webkitURL;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
var recorder;
var audio = document.querySelector('audio');
function startRecording() {
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true}, onSuccess, onFail);
} else {
console.log('navigator.getUserMedia not present');
}
}
function stopRecording() {
recorder.stop();
recorder.exportWAV(function(s) {
audio.src = window.URL.createObjectURL(s);
});
}
</script>
</body>
</html>
//JS FILES RECORDER.JS
(function(window){
var WORKER_PATH = 'recorderWorker.js';
var Recorder = function(source, cfg){
var config = cfg || {};
var bufferLen = config.bufferLen || 4096;
this.context = source.context;
this.node = this.context.createJavaScriptNode(bufferLen, 2, 2);
var worker = new Worker(config.workerPath || WORKER_PATH);
worker.postMessage({
command: 'init',
config: {
sampleRate: this.context.sampleRate
}
});
var recording = false,
currCallback;
this.node.onaudioprocess = function(e){
if (!recording) return;
worker.postMessage({
command: 'record',
buffer: [
e.inputBuffer.getChannelData(0),
e.inputBuffer.getChannelData(1)
]
});
}
this.configure = function(cfg){
for (var prop in cfg){
if (cfg.hasOwnProperty(prop)){
config[prop] = cfg[prop];
}
}
}
this.record = function(){
recording = true;
}
this.stop = function(){
recording = false;
}
this.clear = function(){
worker.postMessage({ command: 'clear' });
}
this.getBuffer = function(cb) {
currCallback = cb || config.callback;
worker.postMessage({ command: 'getBuffer' })
}
this.exportWAV = function(cb, type){
currCallback = cb || config.callback;
type = type || config.type || 'audio/wav';
if (!currCallback) throw new Error('Callback not set');
worker.postMessage({
command: 'exportWAV',
type: type
});
}
worker.onmessage = function(e){
var blob = e.data;
currCallback(blob);
}
source.connect(this.node);
this.node.connect(this.context.destination); //this should not be necessary
};
Recorder.forceDownload = function(blob, filename){
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var link = window.document.createElement('a');
link.href = url;
link.download = filename || 'output.wav';
var click = document.createEvent("Event");
click.initEvent("click", true, true);
link.dispatchEvent(click);
}
window.Recorder = Recorder;
})(window);
//ADDITIONAL JS recorderWorker.js
var recLength = 0,
recBuffersL = [],
recBuffersR = [],
sampleRate;
this.onmessage = function(e){
switch(e.data.command){
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config){
sampleRate = config.sampleRate;
}
function record(inputBuffer){
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength += inputBuffer[0].length;
}
function exportWAV(type){
var bufferL = mergeBuffers(recBuffersL, recLength);
var bufferR = mergeBuffers(recBuffersR, recLength);
var interleaved = interleave(bufferL, bufferR);
var dataview = encodeWAV(interleaved);
var audioBlob = new Blob([dataview], { type: type });
this.postMessage(audioBlob);
}
function getBuffer() {
var buffers = [];
buffers.push( mergeBuffers(recBuffersL, recLength) );
buffers.push( mergeBuffers(recBuffersR, recLength) );
this.postMessage(buffers);
}
function clear(){
recLength = 0;
recBuffersL = [];
recBuffersR = [];
}
function mergeBuffers(recBuffers, recLength){
var result = new Float32Array(recLength);
var offset = 0;
for (var i = 0; i < recBuffers.length; i++){
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR){
var length = inputL.length + inputR.length;
var result = new Float32Array(length);
var index = 0,
inputIndex = 0;
while (index < length){
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input){
for (var i = 0; i < input.length; i++, offset+=2){
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string){
for (var i = 0; i < string.length; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples){
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);
/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* file length */
view.setUint32(4, 32 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, 2, true);
/* sample rate */
view.setUint32(24, sampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, 4, true);
/* bits per sample */
view.setUint16(34, 16, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
If you use Chrome Web Browser, and as you ask for High Quality Audio Recording, you probably be better to build a native client module, you may take a look here (chrome developper native client dev guide):
Selecting a sample frame count for an audio stream involves a tradeoff between latency and CPU usage. If you want your module to have short audio latency so that it can rapidly change what’s playing in the audio stream, you should request a small sample frame count.
[...]
After the module obtains a sample frame count, it can create an audio
configuration resource. Currently the Pepper audio API supports audio
streams with the configuration settings shown above
And here you will find audio configuration which fit your requirements
The Pepper audio API currently lets Native Client modules play audio streams with the following configurations:
sample rate: 44,100 Hz or 48,000 Hz
bit depth: 16
channels: 2 (stereo)
This official link could be also helpful.
Regards

Categories

Resources