One line version:
What open source software (WAMI-Recorder)/web browser (via getUserMedia) will give me the best quality audio recordings?
High quality would be defined as (44.1 or 48 sample rate) and 16 bit sample size.
More Information:
So currently my solution is WAMI-Recorder, but I am wondering if the HTML5 specification has matured to a point in browser such that I can record without Flash and get equal or higher quality audio recordings. Currently it looks like WAMI maxes out at 22050.
I do not need cross browser support as this is for internal business use.
A non-Flash solution would also be preferred.
I found something here. hope it will help you to record audio
<html>
<body>
<audio controls autoplay></audio>
<script type="text/javascript" src="recorder.js"> </script>
<input onclick="startRecording()" type="button" value="start recording" />
<input onclick="stopRecording()" type="button" value="stop recording and play" />
<script>
var onFail = function(e) {
console.log('Rejected!', e);
};
var onSuccess = function(s) {
var context = new webkitAudioContext();
var mediaStreamSource = context.createMediaStreamSource(s);
recorder = new Recorder(mediaStreamSource);
recorder.record();
// audio loopback
// mediaStreamSource.connect(context.destination);
}
window.URL = window.URL || window.webkitURL;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
var recorder;
var audio = document.querySelector('audio');
function startRecording() {
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true}, onSuccess, onFail);
} else {
console.log('navigator.getUserMedia not present');
}
}
function stopRecording() {
recorder.stop();
recorder.exportWAV(function(s) {
audio.src = window.URL.createObjectURL(s);
});
}
</script>
</body>
</html>
download the sample https://github.com/rokgregoric/html5record/archive/master.zip
The WebRTC project is the only available solution for high quality audio that I know of. There is currently a stable version for chrome. I believe firefox is still in the beta stage. As WebRTC matures I am sure all browsers will provide support for this.
http://www.webrtc.org/
Check this tutorial on how to capture audio and video using HTML5.
http://www.html5rocks.com/en/tutorials/getusermedia/intro/
I don't see any reference about the sample rate or the sample size in the API's, but I would assume that will be possible.
Your major problem could be the implementation status by different browser vendors.
Save this files and use
//HTML FILE
<html>
<body>
<audio controls autoplay></audio>
<script type="text/javascript" src="recorder.js"> </script>
<fieldset><legend>RECORD AUDIO</legend>
<input onclick="startRecording()" type="button" value="start recording" />
<input onclick="stopRecording()" type="button" value="stop recording and play" />
</fieldset>
<script>
var onFail = function(e) {
console.log('Rejected!', e);
};
var onSuccess = function(s) {
var context = new webkitAudioContext();
var mediaStreamSource = context.createMediaStreamSource(s);
recorder = new Recorder(mediaStreamSource);
recorder.record();
// audio loopback
// mediaStreamSource.connect(context.destination);
}
window.URL = window.URL || window.webkitURL;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
var recorder;
var audio = document.querySelector('audio');
function startRecording() {
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true}, onSuccess, onFail);
} else {
console.log('navigator.getUserMedia not present');
}
}
function stopRecording() {
recorder.stop();
recorder.exportWAV(function(s) {
audio.src = window.URL.createObjectURL(s);
});
}
</script>
</body>
</html>
//JS FILES RECORDER.JS
(function(window){
var WORKER_PATH = 'recorderWorker.js';
var Recorder = function(source, cfg){
var config = cfg || {};
var bufferLen = config.bufferLen || 4096;
this.context = source.context;
this.node = this.context.createJavaScriptNode(bufferLen, 2, 2);
var worker = new Worker(config.workerPath || WORKER_PATH);
worker.postMessage({
command: 'init',
config: {
sampleRate: this.context.sampleRate
}
});
var recording = false,
currCallback;
this.node.onaudioprocess = function(e){
if (!recording) return;
worker.postMessage({
command: 'record',
buffer: [
e.inputBuffer.getChannelData(0),
e.inputBuffer.getChannelData(1)
]
});
}
this.configure = function(cfg){
for (var prop in cfg){
if (cfg.hasOwnProperty(prop)){
config[prop] = cfg[prop];
}
}
}
this.record = function(){
recording = true;
}
this.stop = function(){
recording = false;
}
this.clear = function(){
worker.postMessage({ command: 'clear' });
}
this.getBuffer = function(cb) {
currCallback = cb || config.callback;
worker.postMessage({ command: 'getBuffer' })
}
this.exportWAV = function(cb, type){
currCallback = cb || config.callback;
type = type || config.type || 'audio/wav';
if (!currCallback) throw new Error('Callback not set');
worker.postMessage({
command: 'exportWAV',
type: type
});
}
worker.onmessage = function(e){
var blob = e.data;
currCallback(blob);
}
source.connect(this.node);
this.node.connect(this.context.destination); //this should not be necessary
};
Recorder.forceDownload = function(blob, filename){
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var link = window.document.createElement('a');
link.href = url;
link.download = filename || 'output.wav';
var click = document.createEvent("Event");
click.initEvent("click", true, true);
link.dispatchEvent(click);
}
window.Recorder = Recorder;
})(window);
//ADDITIONAL JS recorderWorker.js
var recLength = 0,
recBuffersL = [],
recBuffersR = [],
sampleRate;
this.onmessage = function(e){
switch(e.data.command){
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config){
sampleRate = config.sampleRate;
}
function record(inputBuffer){
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength += inputBuffer[0].length;
}
function exportWAV(type){
var bufferL = mergeBuffers(recBuffersL, recLength);
var bufferR = mergeBuffers(recBuffersR, recLength);
var interleaved = interleave(bufferL, bufferR);
var dataview = encodeWAV(interleaved);
var audioBlob = new Blob([dataview], { type: type });
this.postMessage(audioBlob);
}
function getBuffer() {
var buffers = [];
buffers.push( mergeBuffers(recBuffersL, recLength) );
buffers.push( mergeBuffers(recBuffersR, recLength) );
this.postMessage(buffers);
}
function clear(){
recLength = 0;
recBuffersL = [];
recBuffersR = [];
}
function mergeBuffers(recBuffers, recLength){
var result = new Float32Array(recLength);
var offset = 0;
for (var i = 0; i < recBuffers.length; i++){
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR){
var length = inputL.length + inputR.length;
var result = new Float32Array(length);
var index = 0,
inputIndex = 0;
while (index < length){
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input){
for (var i = 0; i < input.length; i++, offset+=2){
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string){
for (var i = 0; i < string.length; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples){
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);
/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* file length */
view.setUint32(4, 32 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, 2, true);
/* sample rate */
view.setUint32(24, sampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, 4, true);
/* bits per sample */
view.setUint16(34, 16, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
If you use Chrome Web Browser, and as you ask for High Quality Audio Recording, you probably be better to build a native client module, you may take a look here (chrome developper native client dev guide):
Selecting a sample frame count for an audio stream involves a tradeoff between latency and CPU usage. If you want your module to have short audio latency so that it can rapidly change what’s playing in the audio stream, you should request a small sample frame count.
[...]
After the module obtains a sample frame count, it can create an audio
configuration resource. Currently the Pepper audio API supports audio
streams with the configuration settings shown above
And here you will find audio configuration which fit your requirements
The Pepper audio API currently lets Native Client modules play audio streams with the following configurations:
sample rate: 44,100 Hz or 48,000 Hz
bit depth: 16
channels: 2 (stereo)
This official link could be also helpful.
Regards
Related
I have searched for a pitch detection program but nothing has given me the guidance I need. I am trying to use pitch.js but I can't put the microphone buffer into the program.
I am trying to create a program that uses the pitch of a singer to move an object up and down the screen. For this I need a continuous (or every 250ms or so) Pitch detection program and access to the microphone.
I am trying to use "pitch.js" but I cannot get the microphone buffer into “pitch.input”
/* Copy samples to the internal buffer */
pitch.input(inputBuffer);
The code I am using is:
var audioContext = new AudioContext();
console.log("audio is starting up ...");
var BUFF_SIZE = 16384;
var audioInput = null,
microphone_stream = null,
gain_node = null,
script_processor_node = null,
script_processor_node = null,
analyserNode = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({ audio: true },
function (stream) {
start_microphone(stream);
},
function (e) {
alert('Error capturing audio.');
}
);
} else { alert('getUserMedia not supported in this browser.'); }
function start_microphone(stream) {
gain_node = audioContext.createGain();
gain_node.connect(audioContext.destination);
microphone_stream = audioContext.createMediaStreamSource(stream);
microphone_stream.connect(gain_node);
// --- enable volume control for output speakers
document.getElementById('volume').addEventListener('change', function ()
{
var curr_volume = this.value;
gain_node.gain.value = curr_volume;
console.log("curr_volume ", curr_volume);
});
script_processor_node = audioContext.createScriptProcessor(2048, 1, 1);
script_processor_node.connect(gain_node);
function setPitch() {
var pitch = new PitchAnalyzer(4100);
/* Copy samples to the internal buffer */
//This is where I have been having the problem.
pitch.input(????);
/* Process the current input in the internal buffer */
pitch.process();
var tone = pitch.findTone();
if (tone === null) {
console.log('No tone found!');
} else {
console.log('Found a tone, frequency:', tone.freq, 'volume:',
tone.db);
}
}
</script>
</body>
</html>
Problem solved! I found a resource called "Wad" at githum
https://github.com/rserota/wad and it did exactly what I wanted!
I am developing a web app in which i want to use recorder.js file to implement audio recording. I have this code but i am unable to use in web2py framework. Can you give me detail info how to use it?
There is a .bower.json file which is getting hidden on uploading in web2py. how can we use this and link all the files so that recording can be done?
This is my .bower.json script:
{
"name": "Recorderjs",
"homepage": "https://github.com/mattdiamond/Recorderjs",
"_release": "f814ac7b3f",
"_resolution": {
"type": "branch",
"branch": "master",
"commit": "f814ac7b3f4ed4f62729860d5a02720f167480b3"
},
"_source": "git://github.com/mattdiamond/Recorderjs.git",
"_target": "*",
"_originalSource": "Recorderjs",
"_direct": true
}
This is my html code:
<body>
<script src="bower_components/Recorderjs/recorder.js">
</script>
<script src="app.js"></script>
<button onclick="record()">Record</button>
<button onclick="stop()">Stop</button>
These are my javascripts: /app.js
var navigator = window.navigator;
var Context = window.AudioContext || window.webkitAudioContext;
var context = new Context();
// audio
var mediaStream;
var rec;
navigator.getUserMedia = (
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia
);
function record() {
navigator.getUserMedia({audio: true}, function(localMediaStream){
mediaStream = localMediaStream;
var mediaStreamSource = context.createMediaStreamSource(localMediaStream);
rec = new Recorder(mediaStreamSource, {
workerPath: '/bower_components/Recorderjs/recorderWorker.js'
});
rec.record();
}, function(err){
console.log('Not supported');
});
}
function stop() {
mediaStream.stop();
rec.stop();
rec.exportWAV(function(e){
rec.clear();
Recorder.forceDownload(e, "test.wav");
});
}
/recorder.js
(function(window){
var WORKER_PATH = 'recorderWorker.js';
var Recorder = function(source, cfg){
var config = cfg || {};
var bufferLen = config.bufferLen || 4096;
this.context = source.context;
this.node = (this.context.createScriptProcessor ||
this.context.createJavaScriptNode).call(this.context,
bufferLen, 2, 2);
var worker = new Worker(config.workerPath || WORKER_PATH);
worker.postMessage({
command: 'init',
config: {
sampleRate: this.context.sampleRate
}
});
var recording = false,
currCallback;
this.node.onaudioprocess = function(e){
if (!recording) return;
worker.postMessage({
command: 'record',
buffer: [
e.inputBuffer.getChannelData(0),
e.inputBuffer.getChannelData(1)
]
});
}
this.configure = function(cfg){
for (var prop in cfg){
if (cfg.hasOwnProperty(prop)){
config[prop] = cfg[prop];
}
}
}
this.record = function(){
recording = true;
}
this.stop = function(){
recording = false;
}
this.clear = function(){
worker.postMessage({ command: 'clear' });
}
this.getBuffer = function(cb) {
currCallback = cb || config.callback;
worker.postMessage({ command: 'getBuffer' })
}
recorderWorker.js:
var recLength = 0,
recBuffersL = [],
recBuffersR = [],
sampleRate;
this.onmessage = function(e){
switch(e.data.command){
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config){
sampleRate = config.sampleRate;
}
function record(inputBuffer){
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength += inputBuffer[0].length;
}
function exportWAV(type){
var bufferL = mergeBuffers(recBuffersL, recLength);
var bufferR = mergeBuffers(recBuffersR, recLength);
var interleaved = interleave(bufferL, bufferR);
var dataview = encodeWAV(interleaved);
var audioBlob = new Blob([dataview], { type: type });
this.postMessage(audioBlob);
}
function getBuffer() {
var buffers = [];
buffers.push( mergeBuffers(recBuffersL, recLength) );
buffers.push( mergeBuffers(recBuffersR, recLength) );
this.postMessage(buffers);
}
function clear(){
recLength = 0;
recBuffersL = [];
recBuffersR = [];
}
function mergeBuffers(recBuffers, recLength){
var result = new Float32Array(recLength);
var offset = 0;
for (var i = 0; i < recBuffers.length; i++){
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR){
var length = inputL.length + inputR.length;
var result = new Float32Array(length);
var index = 0,
inputIndex = 0;
while (index < length){
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input){
for (var i = 0; i < input.length; i++, offset+=2){
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string){
for (var i = 0; i < string.length; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples){
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);
/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* file length */
view.setUint32(4, 32 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, 2, true);
/* sample rate */
view.setUint32(24, sampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, 4, true);
/* bits per sample */
view.setUint16(34, 16, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
I don't know if this is the only problem, but the following script source references are relative URLs:
<script src="bower_components/Recorderjs/recorder.js"></script>
<script src="app.js"></script>
Because the URLs are not preceded with a /, they will be appended to the URL of the current page. So, if you are at /myapp/default/index, the first URL will reference /myapp/default/index/bower_components/Recorderjs/recorder.js, which will not be correct.
You should keep your Javascript files in the /web2py/applications/myapp/static folder. In that case, it is best to use the web2py URL() function to generate proper URLs:
<script src="{{=URL('static', 'js/bower_components/Recorderjs/recorder.js')}}"></script>
<script src="{{=URL('static', 'js/app.js')}}"></script>
Regarding internal URLs, assuming you move everything into the /myapp/static/js folder, a URL like:
/bower_components/Recorderjs/recorderWorker.js
would change to:
/myapp/static/js/bower_components/Recorderjs/recorderWorker.js
Relative internal URLs should continue to work without alteration.
I'm trying to get a stream of data from my microphone (ex. volume, pitch).
For now, I've been using getUserMedia to access my microphone audio.
But I couldn't find a way to extract the data from it.
My code :
$(function () {
var audioContext = new AudioContext();
var audioInput = null,
realAudioInput = null,
inputPoint = null,
analyserNode = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function success(stream){
inputPoint = audioContext.createGain();
realAudioInput = audioContext.createMediaStreamSource(stream);
audioInput = realAudioInput;
audioInput.connect(inputPoint);
analyserNode = audioContext.createAnalyser();
analyserNode.fftSize = 2048;
inputPoint.connect( analyserNode );
}
function live(){
requestAnimationFrame(live);
var freqByteData = new Uint8Array(analyserNode.frequencyBinCount);
analyserNode.getByteFrequencyData(freqByteData);
console.log(analyserNode.frequencyBinCount);
}
});
Here is a version of your code which does two things :
retrieves raw PCM audio buffer from the live microphone which is sent to console.log (to show javascript console hit ctrl-shift-i ), this is the PCM raw audio curve of streaming mic audio data in the time domain.
It also runs this same audio data into a FFT (fast Fourier transform) which is also sent to console.log, this is the frequency domain representation of the same Web Audio API event loop buffer
NOTE - either wear headphones OR turn down your speaker volume otherwise you will hear the squeal of audio feedback as the mic will pickup speaker audio a la Jimmy Hendrix !
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone then show time & frequency domain output</title>
<script type="text/javascript">
var webaudio_tooling_obj = function () {
var audioContext = new AudioContext();
console.log("audio is starting up ...");
var BUFF_SIZE_RENDERER = 16384;
var audioInput = null,
microphone_stream = null,
gain_node = null,
script_processor_node = null,
script_processor_analysis_node = null,
analyser_node = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true},
function(stream) {
start_microphone(stream);
},
function(e) {
alert('Error capturing audio.');
}
);
} else { alert('getUserMedia not supported in this browser.'); }
// ---
function show_some_data(given_typed_array, num_row_to_display, label) {
var size_buffer = given_typed_array.length;
var index = 0;
console.log("__________ " + label);
if (label === "time") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
var curr_value_time = (given_typed_array[index] / 128) - 1.0;
console.log(curr_value_time);
}
} else if (label === "frequency") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
console.log(given_typed_array[index]);
}
} else {
throw new Error("ERROR - must pass time or frequency");
}
}
function process_microphone_buffer(event) {
var i, N, inp, microphone_output_buffer;
microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
}
function start_microphone(stream){
gain_node = audioContext.createGain();
gain_node.connect( audioContext.destination );
microphone_stream = audioContext.createMediaStreamSource(stream);
microphone_stream.connect(gain_node);
script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE_RENDERER, 1, 1);
script_processor_node.onaudioprocess = process_microphone_buffer;
microphone_stream.connect(script_processor_node);
// --- enable volume control for output speakers
document.getElementById('volume').addEventListener('change', function() {
var curr_volume = this.value;
gain_node.gain.value = curr_volume;
console.log("curr_volume ", curr_volume);
});
// --- setup FFT
script_processor_analysis_node = audioContext.createScriptProcessor(2048, 1, 1);
script_processor_analysis_node.connect(gain_node);
analyser_node = audioContext.createAnalyser();
analyser_node.smoothingTimeConstant = 0;
analyser_node.fftSize = 2048;
microphone_stream.connect(analyser_node);
analyser_node.connect(script_processor_analysis_node);
var buffer_length = analyser_node.frequencyBinCount;
var array_freq_domain = new Uint8Array(buffer_length);
var array_time_domain = new Uint8Array(buffer_length);
console.log("buffer_length " + buffer_length);
script_processor_analysis_node.onaudioprocess = function() {
// get the average for the first channel
analyser_node.getByteFrequencyData(array_freq_domain);
analyser_node.getByteTimeDomainData(array_time_domain);
// draw the spectrogram
if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {
show_some_data(array_freq_domain, 5, "frequency");
show_some_data(array_time_domain, 5, "time"); // store this to record to aggregate buffer/file
}
};
}
}(); // webaudio_tooling_obj = function()
</script>
</head>
<body>
<p>Volume</p>
<input id="volume" type="range" min="0" max="1" step="0.1" value="0.5"/>
</body>
</html>
I'm looking for a browser-based way of recording until a silence occurs.
HTML audio recording from the microphone is possible in Firefox and Chrome - using
Recordmp3js see:
http://nusofthq.com/blog/recording-mp3-using-only-html5-and-javascript-recordmp3-js/
and the code on github: http://github.com/nusofthq/Recordmp3js
I can't see a way to change that code to record until silence.
Record until silence can be done (and tuned) using Java for a native Android App - see here:
Android audio capture silence detection
Google Voice Search demonstrates a browser can doit - but how can I using Javascript?
Any ideas?
If you use the Web Audio API, open up a live microphone audio capture by making a call to : navigator.getUserMedia , then create a node using : createScriptProcessor, then you assign to that node a callback for its event : onaudioprocess . Inside your callback function (below I use script_processor_analysis_node) you have access to the live real-time audio buffer to which you can then parse looking for silence (some length of time where amplitude is low [stays close to zero]).
for normal time domain audio curve see : array_time_domain
which is populated fresh upon each call to callback script_processor_analysis_node ... similarly for frequency domain see array_freq_domain
Turn down your speaker volume or use headphones to avoid feedback from mic -> speaker -> mic ...
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone then show time & frequency domain output</title>
<script type="text/javascript">
var webaudio_tooling_obj = function () {
var audioContext = new AudioContext();
console.log("audio is starting up ...");
var BUFF_SIZE_RENDERER = 16384;
var audioInput = null,
microphone_stream = null,
gain_node = null,
script_processor_node = null,
script_processor_analysis_node = null,
analyser_node = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true},
function(stream) {
start_microphone(stream);
},
function(e) {
alert('Error capturing audio.');
}
);
} else { alert('getUserMedia not supported in this browser.'); }
// ---
function show_some_data(given_typed_array, num_row_to_display, label) {
var size_buffer = given_typed_array.length;
var index = 0;
console.log("__________ " + label);
if (label === "time") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
var curr_value_time = (given_typed_array[index] / 128) - 1.0;
console.log(curr_value_time);
}
} else if (label === "frequency") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
console.log(given_typed_array[index]);
}
} else {
throw new Error("ERROR - must pass time or frequency");
}
}
function process_microphone_buffer(event) {
var i, N, inp, microphone_output_buffer;
microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
}
function start_microphone(stream){
gain_node = audioContext.createGain();
gain_node.connect( audioContext.destination );
microphone_stream = audioContext.createMediaStreamSource(stream);
microphone_stream.connect(gain_node);
script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE_RENDERER, 1, 1);
script_processor_node.onaudioprocess = process_microphone_buffer;
microphone_stream.connect(script_processor_node);
// --- enable volume control for output speakers
document.getElementById('volume').addEventListener('change', function() {
var curr_volume = this.value;
gain_node.gain.value = curr_volume;
console.log("curr_volume ", curr_volume);
});
// --- setup FFT
script_processor_analysis_node = audioContext.createScriptProcessor(2048, 1, 1);
script_processor_analysis_node.connect(gain_node);
analyser_node = audioContext.createAnalyser();
analyser_node.smoothingTimeConstant = 0;
analyser_node.fftSize = 2048;
microphone_stream.connect(analyser_node);
analyser_node.connect(script_processor_analysis_node);
var buffer_length = analyser_node.frequencyBinCount;
var array_freq_domain = new Uint8Array(buffer_length);
var array_time_domain = new Uint8Array(buffer_length);
console.log("buffer_length " + buffer_length);
script_processor_analysis_node.onaudioprocess = function() {
// get the average for the first channel
analyser_node.getByteFrequencyData(array_freq_domain);
analyser_node.getByteTimeDomainData(array_time_domain);
// draw the spectrogram
if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {
show_some_data(array_freq_domain, 5, "frequency");
show_some_data(array_time_domain, 5, "time"); // store this to record to aggregate buffer/file
// examine array_time_domain for near zero values over some time period
}
};
}
}(); // webaudio_tooling_obj = function()
</script>
</head>
<body>
<p>Volume</p>
<input id="volume" type="range" min="0" max="1" step="0.1" value="0.5"/>
</body>
</html>
This is an old post but I am sure many will have the same problem so I am posting my solution here.
Use hark.js
Below is sample demo code i used for my electron app
hark = require('./node_modules/hark/hark.bundle.js')
navigator.getUserMedia({ audio : true}, onMediaSuccess, function(){});
function onMediaSuccess(blog) {
var options = {};
var speechEvents = hark(blog, options);
speechEvents.on('speaking', function() {
console.log('speaking');
});
speechEvents.on('stopped_speaking', function() {
console.log('stopped_speaking');
});
};
The solution from #Scott Stensland doesn't allow me to parse for silence. I am getting the same value for when I parse the two arrays - that is I'm getting 0 always when parsing arrayFreqDomain and 128 always when parsing arrayTimeDomain
let analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0;
analyser.fftSize = 2048;
let buffLength = analyser.frequencyBinCount;
let arrayFreqDomain = new Uint8Array(buffLength);
let arrayTimeDomain = new Uint8Array(buffLength);
processor.connect(analyser);
processor.onaudioprocess = (event) => {
/**
*
* Parse live real-time buffer looking for silence
*
**/
analyser.getByteFrequencyData(arrayFreqDomain);
analyser.getByteTimeDomainData(arrayTimeDomain);
if (context.state === "running") {
let sizeBuffer = arrayTimeDomain.length;
let index = 0;
for (; index < 5 && index < sizeBuffer; index += 1) {
console.log(arrayTimeDomain[index]); <----
}
}
}
What I ultimately want to do is record from the user's microphone, and upload the file to the server when they're done. So far, I've managed to make a stream to an element with the following code:
var audio = document.getElementById("audio_preview");
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
navigator.getUserMedia({video: false, audio: true}, function(stream) {
audio.src = window.URL.createObjectURL(stream);
}, onRecordFail);
var onRecordFail = function (e) {
console.log(e);
}
How do I go from that, to recording to a file?
There is a fairly complete recording demo available at: http://webaudiodemos.appspot.com/AudioRecorder/index.html
It allows you to record audio in the browser, then gives you the option to export and download what you've recorded.
You can view the source of that page to find links to the javascript, but to summarize, there's a Recorder object that contains an exportWAV method, and a forceDownload method.
The code shown below is copyrighted to Matt Diamond and available for use under MIT license. The original files are here:
http://webaudiodemos.appspot.com/AudioRecorder/index.html
http://webaudiodemos.appspot.com/AudioRecorder/js/recorderjs/recorderWorker.js
Save this files and use
(function(window){
var WORKER_PATH = 'recorderWorker.js';
var Recorder = function(source, cfg){
var config = cfg || {};
var bufferLen = config.bufferLen || 4096;
this.context = source.context;
this.node = this.context.createScriptProcessor(bufferLen, 2, 2);
var worker = new Worker(config.workerPath || WORKER_PATH);
worker.postMessage({
command: 'init',
config: {
sampleRate: this.context.sampleRate
}
});
var recording = false,
currCallback;
this.node.onaudioprocess = function(e){
if (!recording) return;
worker.postMessage({
command: 'record',
buffer: [
e.inputBuffer.getChannelData(0),
e.inputBuffer.getChannelData(1)
]
});
}
this.configure = function(cfg){
for (var prop in cfg){
if (cfg.hasOwnProperty(prop)){
config[prop] = cfg[prop];
}
}
}
this.record = function(){
recording = true;
}
this.stop = function(){
recording = false;
}
this.clear = function(){
worker.postMessage({ command: 'clear' });
}
this.getBuffer = function(cb) {
currCallback = cb || config.callback;
worker.postMessage({ command: 'getBuffer' })
}
this.exportWAV = function(cb, type){
currCallback = cb || config.callback;
type = type || config.type || 'audio/wav';
if (!currCallback) throw new Error('Callback not set');
worker.postMessage({
command: 'exportWAV',
type: type
});
}
worker.onmessage = function(e){
var blob = e.data;
currCallback(blob);
}
source.connect(this.node);
this.node.connect(this.context.destination); //this should not be necessary
};
Recorder.forceDownload = function(blob, filename){
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var link = window.document.createElement('a');
link.href = url;
link.download = filename || 'output.wav';
var click = document.createEvent("Event");
click.initEvent("click", true, true);
link.dispatchEvent(click);
}
window.Recorder = Recorder;
})(window);
//ADDITIONAL JS recorderWorker.js
var recLength = 0,
recBuffersL = [],
recBuffersR = [],
sampleRate;
this.onmessage = function(e){
switch(e.data.command){
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config){
sampleRate = config.sampleRate;
}
function record(inputBuffer){
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength += inputBuffer[0].length;
}
function exportWAV(type){
var bufferL = mergeBuffers(recBuffersL, recLength);
var bufferR = mergeBuffers(recBuffersR, recLength);
var interleaved = interleave(bufferL, bufferR);
var dataview = encodeWAV(interleaved);
var audioBlob = new Blob([dataview], { type: type });
this.postMessage(audioBlob);
}
function getBuffer() {
var buffers = [];
buffers.push( mergeBuffers(recBuffersL, recLength) );
buffers.push( mergeBuffers(recBuffersR, recLength) );
this.postMessage(buffers);
}
function clear(){
recLength = 0;
recBuffersL = [];
recBuffersR = [];
}
function mergeBuffers(recBuffers, recLength){
var result = new Float32Array(recLength);
var offset = 0;
for (var i = 0; i < recBuffers.length; i++){
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR){
var length = inputL.length + inputR.length;
var result = new Float32Array(length);
var index = 0,
inputIndex = 0;
while (index < length){
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input){
for (var i = 0; i < input.length; i++, offset+=2){
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string){
for (var i = 0; i < string.length; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples){
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);
/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* file length */
view.setUint32(4, 32 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, 2, true);
/* sample rate */
view.setUint32(24, sampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, 4, true);
/* bits per sample */
view.setUint16(34, 16, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
<html>
<body>
<audio controls autoplay></audio>
<script type="text/javascript" src="recorder.js"> </script>
<fieldset><legend>RECORD AUDIO</legend>
<input onclick="startRecording()" type="button" value="start recording" />
<input onclick="stopRecording()" type="button" value="stop recording and play" />
</fieldset>
<script>
var onFail = function(e) {
console.log('Rejected!', e);
};
var onSuccess = function(s) {
var context = new webkitAudioContext();
var mediaStreamSource = context.createMediaStreamSource(s);
recorder = new Recorder(mediaStreamSource);
recorder.record();
// audio loopback
// mediaStreamSource.connect(context.destination);
}
window.URL = window.URL || window.webkitURL;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
var recorder;
var audio = document.querySelector('audio');
function startRecording() {
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true}, onSuccess, onFail);
} else {
console.log('navigator.getUserMedia not present');
}
}
function stopRecording() {
recorder.stop();
recorder.exportWAV(function(s) {
audio.src = window.URL.createObjectURL(s);
});
}
</script>
</body>
</html>
Update now Chrome also supports MediaRecorder API from v47. The same thing to do would be to use it( guessing native recording method is bound to be faster than work arounds), the API is really easy to use, and you would find tons of answers as to how to upload a blob for the server.
Demo - would work in Chrome and Firefox, intentionally left out pushing blob to server...
Code Source
Currently, there are three ways to do it:
as wav[ all code client-side, uncompressed recording], you can check out --> Recorderjs. Problem: file size is quite big, more upload bandwidth required.
as mp3[ all code client-side, compressed recording], you can check out --> mp3Recorder. Problem: personally, I find the quality bad, also there is this licensing issue.
as ogg [ client+ server(node.js) code, compressed recording, infinite hours of recording without browser crash ], you can check out --> recordOpus, either only client-side recording, or client-server bundling, the choice is yours.
ogg recording example( only firefox):
var mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start(); // to start recording.
...
mediaRecorder.stop(); // to stop recording.
mediaRecorder.ondataavailable = function(e) {
// do something with the data.
}
Fiddle Demo for ogg recording.
This is a simple JavaScript sound recorder and editor. You can try it.
https://www.danieldemmel.me/JSSoundRecorder/
Can download from here
https://github.com/daaain/JSSoundRecorder
The question is very old and many of the answers are not supported in the current version of the browser. I was trying to create the audio recorder using simple html, css, and js. I further went on to use the same code in electron to make a cross platform application.
<html>
<head>
<title>Recorder App</title>
</head>
<h2>Recorder App</h2>
<p>
<button type="button" id="record">Record</button>
<button type="button" id="stopRecord" disabled>Stop</button>
</p>
<p>
<audio id="recordedAudio"></audio>
</p>
<script>
navigator.mediaDevices.getUserMedia({audio:true})
.then(stream => {handlerFunction(stream)})
function handlerFunction(stream) {
rec = new MediaRecorder(stream);
rec.ondataavailable = e => {
audioChunks.push(e.data);
if (rec.state == "inactive"){
let blob = new Blob(audioChunks,{type:'audio/mp3'});
recordedAudio.src = URL.createObjectURL(blob);
recordedAudio.controls=true;
recordedAudio.autoplay=true;
sendData(blob)
}
}
}
function sendData(data) {}
record.onclick = e => {
record.disabled = true;
record.style.backgroundColor = "blue"
stopRecord.disabled=false;
audioChunks = [];
rec.start();
}
stopRecord.onclick = e => {
record.disabled = false;
stop.disabled=true;
record.style.backgroundColor = "red"
rec.stop();
}
</script>
</html>
The above code works in Windows 10, Mac, Linux, and obviously, on both google chrome and firefox.
You can use Recordmp3js from GitHub to achieve your requirements. You can record from user's microphone and then get the file as an mp3. Finally upload it to your server.
I used this in my demo. There is a already a sample available with the source code by the author in this location :
https://github.com/Audior/Recordmp3js
The demo is here:
http://audior.ec/recordmp3js/
But currently works only on Chrome and Firefox.
Seems to work fine and pretty simple. Hope this helps.
Here's a gitHub project that does just that.
It records audio from the browser in mp3 format, and it automatically saves it to the webserver.
https://github.com/Audior/Recordmp3js
You can also view a detailed explanation of the implementation:
http://audior.ec/blog/recording-mp3-using-only-html5-and-javascript-recordmp3-js/
Stream audio in realtime without waiting for recording to end:
https://github.com/noamtcohen/AudioStreamer
This streams PCM data but you could modify the code to stream mp3 or Speex