How to fix this?
its failing in past it was working fine but not anymore.
(Normally it show green bar in the canvas if you speak in the mic.)
<script type="text/javascript">
var navigator = window.navigator;
navigator.getMedia = ( navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
navigator.getMedia({ video: false, audio: true}, function(stream) {
console.log('doing....');
audioContext = new webkitAudioContext();
analyser = audioContext.createAnalyser();
microphone = audioContext.createMediaStreamSource(stream);
javascriptNode = audioContext.createJavaScriptNode(2048, 1, 1);
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 1024;
microphone.connect(analyser);
analyser.connect(javascriptNode);
javascriptNode.connect(audioContext.destination);
canvasContext = document.getElementById("test");
canvasContext= canvasContext.getContext("2d");
javascriptNode.onaudioprocess = function() {
console.log('doing.... bla bla');
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var values = 0;
var length = array.length;
for (var i = 0; i < length; i++) {
values += array[i];
}
var average = values / length;
canvasContext.clearRect(0, 0, 300, 130);
canvasContext.fillStyle = '#00ff00';
canvasContext.fillRect(0,130-average,300,130);
}
console.log('doing.... done');
}, function(err) {
console.log("An error occured! " + err);
});
</script>
webkitAudioContext() does not have createJavaScriptNode and I believe you should not use it anywhere.
Try javascriptNode = audioContext.createScriptProcessor(2048, 1, 1);
createJavaScriptNode() has been marked as obsolete (https://developer.mozilla.org/en-US/docs/Web/API/AudioContext.createJavaScriptNode), and it's use is now discouraged. Looks like the method name has been changed to createScriptProcessor(), here's some doc on it: https://developer.mozilla.org/en-US/docs/Web/API/AudioContext.createScriptProcessor
Hope this helps!
Related
I am currently trying to simulate audio autorecording. User speaks, and after he stops then audio should be submitted to the backend.
I already have a sample script that submits audio with start and stop click functions.
I'm trying to get sometype of value such as Amplitude, Volume or maybe a Threshold but I'm not sure if MediaRecorder supports this or if I need to look at Web Audio API or other solutions.
Can I achieve this with MediaRecorder?
Regarding the audio analysis of the mic input, the following example shows you how to take the audio captured by the mic, create an analyzer with createAnalyser method of the webkitAudioContext, connect the stream to the analyzer and calculate the FFT of the specified size, in order to calculate pitch and display the output sound wave.
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var audioContext = null;
var isPlaying = false;
var sourceNode = null;
var analyser = null;
var theBuffer = null;
var audioCtx = null;
var mediaStreamSource = null;
var rafID = null;
var j = 0;
var waveCanvas = null;
window.onload = function() {
audioContext = new AudioContext();
audioCtx = document.getElementById( "waveform" );
canvasCtx = audioCtx.getContext("2d");
};
function getUserMedia(dictionary, callback) {
try {
navigator.getUserMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia;
navigator.getUserMedia(dictionary, callback, error);
} catch (e) {
alert('getUserMedia threw exception :' + e);
}
}
function gotStream(stream) {
// Create an AudioNode from the stream.
mediaStreamSource = audioContext.createMediaStreamSource(stream);
// Connect it to the destination.
analyser = audioContext.createAnalyser();
analyser.fftSize = 1024;
mediaStreamSource.connect( analyser );
updatePitch();
}
function toggleLiveInput()
{
canvasCtx.clearRect(0, 0, audioCtx.width, audioCtx.height);
canvasCtx.beginPath();
j = 0;
buflen = 1024;
buf = new Float32Array( buflen );
document.getElementById('toggleLiveInput').disabled = true;
document.getElementById('toggleLiveInputStop').disabled = false;
if (isPlaying) {
//stop playing and return
sourceNode.stop( 0 );
sourceNode = null;
//analyser = null;
isPlaying = false;
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = window.webkitCancelAnimationFrame;
window.cancelAnimationFrame( rafID );
}
getUserMedia(
{
"audio": {
"mandatory": {
"googEchoCancellation": "false",
"googAutoGainControl": "false",
"googNoiseSuppression": "false",
"googHighpassFilter": "false"
},
"optional": []
},
}, gotStream);
}
function stop()
{
document.getElementById('toggleLiveInput').disabled = false;
document.getElementById('toggleLiveInputStop').disabled = true;
//waveCanvas.closePath();
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = window.webkitCancelAnimationFrame;
window.cancelAnimationFrame( rafID );
return "start";
}
function updatePitch()
{
analyser.fftSize = 1024;
analyser.getFloatTimeDomainData(buf);
canvasCtx.strokeStyle = "red";
for (var i=0;i<2;i+=2)
{
x = j*5;
if(audioCtx.width < x)
{
x = audioCtx.width - 5;
previousImage = canvasCtx.getImageData(5, 0, audioCtx.width, audioCtx.height);
canvasCtx.putImageData(previousImage, 0, 0);
canvasCtx.beginPath();
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = "red";
prex = prex - 5;
canvasCtx.lineTo(prex,prey);
prex = x;
prey = 128+(buf[i]*128);
canvasCtx.lineTo(x,128+(buf[i]*128));
canvasCtx.stroke();
}
else
{
prex = x;
prey = 128+(buf[i]*128);
canvasCtx.lineWidth = 2;
canvasCtx.lineTo(x,128+(buf[i]*128));
canvasCtx.stroke();
}
j++;
}
if (!window.requestAnimationFrame)
window.requestAnimationFrame = window.webkitRequestAnimationFrame;
rafID = window.requestAnimationFrame( updatePitch );
}
function error() {
console.error(new Error('error while generating audio'));
}
Try the demo here.
Example adapted from pitch-liveinput.
I'm trying to get a stream of data from my microphone (ex. volume, pitch).
For now, I've been using getUserMedia to access my microphone audio.
But I couldn't find a way to extract the data from it.
My code :
$(function () {
var audioContext = new AudioContext();
var audioInput = null,
realAudioInput = null,
inputPoint = null,
analyserNode = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function success(stream){
inputPoint = audioContext.createGain();
realAudioInput = audioContext.createMediaStreamSource(stream);
audioInput = realAudioInput;
audioInput.connect(inputPoint);
analyserNode = audioContext.createAnalyser();
analyserNode.fftSize = 2048;
inputPoint.connect( analyserNode );
}
function live(){
requestAnimationFrame(live);
var freqByteData = new Uint8Array(analyserNode.frequencyBinCount);
analyserNode.getByteFrequencyData(freqByteData);
console.log(analyserNode.frequencyBinCount);
}
});
Here is a version of your code which does two things :
retrieves raw PCM audio buffer from the live microphone which is sent to console.log (to show javascript console hit ctrl-shift-i ), this is the PCM raw audio curve of streaming mic audio data in the time domain.
It also runs this same audio data into a FFT (fast Fourier transform) which is also sent to console.log, this is the frequency domain representation of the same Web Audio API event loop buffer
NOTE - either wear headphones OR turn down your speaker volume otherwise you will hear the squeal of audio feedback as the mic will pickup speaker audio a la Jimmy Hendrix !
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone then show time & frequency domain output</title>
<script type="text/javascript">
var webaudio_tooling_obj = function () {
var audioContext = new AudioContext();
console.log("audio is starting up ...");
var BUFF_SIZE_RENDERER = 16384;
var audioInput = null,
microphone_stream = null,
gain_node = null,
script_processor_node = null,
script_processor_analysis_node = null,
analyser_node = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true},
function(stream) {
start_microphone(stream);
},
function(e) {
alert('Error capturing audio.');
}
);
} else { alert('getUserMedia not supported in this browser.'); }
// ---
function show_some_data(given_typed_array, num_row_to_display, label) {
var size_buffer = given_typed_array.length;
var index = 0;
console.log("__________ " + label);
if (label === "time") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
var curr_value_time = (given_typed_array[index] / 128) - 1.0;
console.log(curr_value_time);
}
} else if (label === "frequency") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
console.log(given_typed_array[index]);
}
} else {
throw new Error("ERROR - must pass time or frequency");
}
}
function process_microphone_buffer(event) {
var i, N, inp, microphone_output_buffer;
microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
}
function start_microphone(stream){
gain_node = audioContext.createGain();
gain_node.connect( audioContext.destination );
microphone_stream = audioContext.createMediaStreamSource(stream);
microphone_stream.connect(gain_node);
script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE_RENDERER, 1, 1);
script_processor_node.onaudioprocess = process_microphone_buffer;
microphone_stream.connect(script_processor_node);
// --- enable volume control for output speakers
document.getElementById('volume').addEventListener('change', function() {
var curr_volume = this.value;
gain_node.gain.value = curr_volume;
console.log("curr_volume ", curr_volume);
});
// --- setup FFT
script_processor_analysis_node = audioContext.createScriptProcessor(2048, 1, 1);
script_processor_analysis_node.connect(gain_node);
analyser_node = audioContext.createAnalyser();
analyser_node.smoothingTimeConstant = 0;
analyser_node.fftSize = 2048;
microphone_stream.connect(analyser_node);
analyser_node.connect(script_processor_analysis_node);
var buffer_length = analyser_node.frequencyBinCount;
var array_freq_domain = new Uint8Array(buffer_length);
var array_time_domain = new Uint8Array(buffer_length);
console.log("buffer_length " + buffer_length);
script_processor_analysis_node.onaudioprocess = function() {
// get the average for the first channel
analyser_node.getByteFrequencyData(array_freq_domain);
analyser_node.getByteTimeDomainData(array_time_domain);
// draw the spectrogram
if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {
show_some_data(array_freq_domain, 5, "frequency");
show_some_data(array_time_domain, 5, "time"); // store this to record to aggregate buffer/file
}
};
}
}(); // webaudio_tooling_obj = function()
</script>
</head>
<body>
<p>Volume</p>
<input id="volume" type="range" min="0" max="1" step="0.1" value="0.5"/>
</body>
</html>
I'm building an HTML5 voice recording software with visualizer.I want the user when recording the voice, and after uploading the file as wave in a blob (server-side), the user should be able to select the audio format of that file using ffmpeg. what I Have achieved so far is uploading the file as wave.what I still want to do is:
On the server
side pick your preferable web programming framework
The web programming framework accepts the upload and stores the file on the server
The web programming framework runs a ffmpeg (command line) which processes the file
The user can download the processed file
here is my code so far:
// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({
audio: true
}, success, function (e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function getVal(value) {
// if R is pressed, we start recording
if (value == "record") {
recording = true;
// reset the buffers for the new recording
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
document.getElementById('output').innerHTML = "Recording now...";
// if S is pressed, we stop the recording and package the WAV file
} else if (value == "stop") {
// we stop recording
recording = false;
document.getElementById('output').innerHTML = "Building wav file...";
// we flat the left and right channels down
var leftBuffer = mergeBuffers(leftchannel, recordingLength);
var rightBuffer = mergeBuffers(rightchannel, recordingLength);
// we interleave both channels together
var interleaved = interleave(leftBuffer, rightBuffer);
var buffer = new ArrayBuffer(44 + interleaved.length * 2);
var view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
// FMT sub-chunk
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
// stereo (2 channels)
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
// data sub-chunk
writeUTFBytes(view, 36, 'data');
view.setUint32(40, interleaved.length * 2, true);
var lng = interleaved.length;
var index = 44;
var volume = 1;
for (var i = 0; i < lng; i++) {
view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
index += 2;
}
var blob = new Blob([view], {
type: 'audio/wav'
});
// let's save it locally
document.getElementById('output').innerHTML = 'Handing off the file now...';
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var li = document.createElement('li');
var au = document.createElement('audio');
var hf = document.createElement('a');
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(hf);
recordingList.appendChild(li);
}
}
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
volume = context.createGain();
// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);
// connect the stream(source) to the gain node
source.connect(volume);
var bufferSize = 2048;
recorder = context.createScriptProcessor(bufferSize, 2, 2);
//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;
splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function (e) {
if (!recording) return;
var left = e.inputBuffer.getChannelData(0);
var right = e.inputBuffer.getChannelData(1);
leftchannel.push(new Float32Array(left));
rightchannel.push(new Float32Array(right));
recordingLength += bufferSize;
// get the average for the first channel
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var c = document.getElementById("myCanvas");
var ctx = c.getContext("2d");
// clear the current state
ctx.clearRect(0, 0, 1000, 325);
var gradient = ctx.createLinearGradient(0, 0, 0, 300);
gradient.addColorStop(1, '#000000');
gradient.addColorStop(0.75, '#ff0000');
gradient.addColorStop(0.25, '#ffff00');
gradient.addColorStop(0, '#ffffff');
// set the fill style
ctx.fillStyle = gradient;
drawSpectrum(array);
function drawSpectrum(array) {
for (var i = 0; i < (array.length); i++) {
var value = array[i];
ctx.fillRect(i * 5, 325 - value, 3, 325);
}
}
}
function getAverageVolume(array) {
var values = 0;
var average;
var length = array.length;
// get all the frequency amplitudes
for (var i = 0; i < length; i++) {
values += array[i];
}
average = values / length;
return average;
}
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
}
function mergeBuffers(channelBuffer, recordingLength) {
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++) {
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel) {
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length;) {
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string) {
var lng = string.length;
for (var i = 0; i < lng; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 8 years ago.
Improve this question
I'm building a HTML5 software that records a voice and when playing that voice a visualizer should be in action.
Here is my code:
// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;
if (!navigator.getUserMedia) navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({
audio: true
}, success, function (e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
// when pressing record
function getVal(value) {
// if R is pressed, we start recording
if (value == "record") {
recording = true;
// reset the buffers for the new recording
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
document.getElementById('output').innerHTML = "Recording now...";
// if S is pressed, we stop the recording and package the WAV file
} else if (value == "stop") {
// we stop recording
recording = false;
document.getElementById('output').innerHTML = "Building wav file...";
// we flat the left and right channels down
var leftBuffer = mergeBuffers(leftchannel, recordingLength);
var rightBuffer = mergeBuffers(rightchannel, recordingLength);
// we interleave both channels together
var interleaved = interleave(leftBuffer, rightBuffer);
var buffer = new ArrayBuffer(44 + interleaved.length * 2);
var view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
// FMT sub-chunk
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
// stereo (2 channels)
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
// data sub-chunk
writeUTFBytes(view, 36, 'data');
view.setUint32(40, interleaved.length * 2, true);
var lng = interleaved.length;
var index = 44;
var volume = 1;
for (var i = 0; i < lng; i++) {
view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
index += 2;
}
var blob = new Blob([view], {
type: 'audio/wav'
});
// let's save it locally
document.getElementById('output').innerHTML = 'Handing off the file now...';
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var li = document.createElement('li');
var au = document.createElement('audio');
var hf = document.createElement('a');
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(hf);
recordingList.appendChild(li);
}
}
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
volume = context.createGain();
// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);
// connect the stream(source) to the gain node
source.connect(volume);
var bufferSize = 2048;
recorder = context.createScriptProcessor(bufferSize, 2, 2);
//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 1024;
analyser2 = context.createAnalyser();
analyser2.smoothingTimeConstant = 0.0;
analyser2.fftSize = 1024;
splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function (e) {
if (!recording) return;
var left = e.inputBuffer.getChannelData(0);
var right = e.inputBuffer.getChannelData(1);
// get the average of the first channel, bincount is fftsize / 2
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var average = getAverageVolume(array);
// get the average for the second channel
var array2 = new Uint8Array(analyser2.frequencyBinCount);
analyser2.getByteFrequencyData(array2);
var average2 = getAverageVolume(array2);
// clear the current state
ctx.clearRect(0, 0, 60, 130);
// set the fill style
ctx.fillStyle = gradient;
// create the meters
ctx.fillRect(0, 130 - average, 25, 130);
ctx.fillRect(30, 130 - average2, 25, 130);
}
function getAverageVolume(array) {
var values = 0;
var average;
var length = array.length;
// get all the frequency amplitudes
for (var i = 0; i < length; i++) {
values += array[i];
}
average = values / length;
return average;
}
leftchannel.push(new Float32Array(left));
rightchannel.push(new Float32Array(right));
recordingLength += bufferSize;
}
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
splitter.connect(analyser2, 1, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
function mergeBuffers(channelBuffer, recordingLength) {
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++) {
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel) {
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length;) {
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string) {
var lng = string.length;
for (var i = 0; i < lng; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
My problem is that when running it's giving me an error :
cannot read property 'connect' of null in this statement: volume.connect(splitter);
What is going wrong??
The creation of the volume gain node is done only after the success of getUserMedia, in the success function.
By the time the code encounter the volume connect command, volume is not yet allocated.
You have to 'chain' all your node connection starting from success.
Quick fix : just put those lines :
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
splitter.connect(analyser2, 1, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
at the end of the success function.
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 8 years ago.
Improve this question
I'm building a HTML5 software that records a voice and when playing that voice a visualizer should be in action.
Here is my code:
// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;
if (!navigator.getUserMedia) navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({
audio: true
}, success, function (e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
// when pressing record
function getVal(value) {
// if R is pressed, we start recording
if (value == "record") {
recording = true;
// reset the buffers for the new recording
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
document.getElementById('output').innerHTML = "Recording now...";
// if S is pressed, we stop the recording and package the WAV file
} else if (value == "stop") {
// we stop recording
recording = false;
document.getElementById('output').innerHTML = "Building wav file...";
// we flat the left and right channels down
var leftBuffer = mergeBuffers(leftchannel, recordingLength);
var rightBuffer = mergeBuffers(rightchannel, recordingLength);
// we interleave both channels together
var interleaved = interleave(leftBuffer, rightBuffer);
var buffer = new ArrayBuffer(44 + interleaved.length * 2);
var view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
// FMT sub-chunk
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
// stereo (2 channels)
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
// data sub-chunk
writeUTFBytes(view, 36, 'data');
view.setUint32(40, interleaved.length * 2, true);
var lng = interleaved.length;
var index = 44;
var volume = 1;
for (var i = 0; i < lng; i++) {
view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
index += 2;
}
var blob = new Blob([view], {
type: 'audio/wav'
});
// let's save it locally
document.getElementById('output').innerHTML = 'Handing off the file now...';
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var li = document.createElement('li');
var au = document.createElement('audio');
var hf = document.createElement('a');
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(hf);
recordingList.appendChild(li);
}
}
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
volume = context.createGain();
// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);
// connect the stream(source) to the gain node
source.connect(volume);
var bufferSize = 2048;
recorder = context.createScriptProcessor(bufferSize, 2, 2);
//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 1024;
analyser2 = context.createAnalyser();
analyser2.smoothingTimeConstant = 0.0;
analyser2.fftSize = 1024;
splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function (e) {
if (!recording) return;
var left = e.inputBuffer.getChannelData(0);
var right = e.inputBuffer.getChannelData(1);
// get the average of the first channel, bincount is fftsize / 2
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var average = getAverageVolume(array);
// get the average for the second channel
var array2 = new Uint8Array(analyser2.frequencyBinCount);
analyser2.getByteFrequencyData(array2);
var average2 = getAverageVolume(array2);
// clear the current state
ctx.clearRect(0, 0, 60, 130);
// set the fill style
ctx.fillStyle = gradient;
// create the meters
ctx.fillRect(0, 130 - average, 25, 130);
ctx.fillRect(30, 130 - average2, 25, 130);
}
function getAverageVolume(array) {
var values = 0;
var average;
var length = array.length;
// get all the frequency amplitudes
for (var i = 0; i < length; i++) {
values += array[i];
}
average = values / length;
return average;
}
leftchannel.push(new Float32Array(left));
rightchannel.push(new Float32Array(right));
recordingLength += bufferSize;
}
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
splitter.connect(analyser2, 1, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
function mergeBuffers(channelBuffer, recordingLength) {
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++) {
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel) {
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length;) {
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string) {
var lng = string.length;
for (var i = 0; i < lng; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
My problem is that when running it's giving me an error :
cannot read property 'connect' of null in this statement: volume.connect(splitter);
What is going wrong??
The creation of the volume gain node is done only after the success of getUserMedia, in the success function.
By the time the code encounter the volume connect command, volume is not yet allocated.
You have to 'chain' all your node connection starting from success.
Quick fix : just put those lines :
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
splitter.connect(analyser2, 1, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
at the end of the success function.