MediaRecorderAPI - Autorecord - javascript

I am currently trying to simulate audio autorecording. User speaks, and after he stops then audio should be submitted to the backend.
I already have a sample script that submits audio with start and stop click functions.
I'm trying to get sometype of value such as Amplitude, Volume or maybe a Threshold but I'm not sure if MediaRecorder supports this or if I need to look at Web Audio API or other solutions.
Can I achieve this with MediaRecorder?

Regarding the audio analysis of the mic input, the following example shows you how to take the audio captured by the mic, create an analyzer with createAnalyser method of the webkitAudioContext, connect the stream to the analyzer and calculate the FFT of the specified size, in order to calculate pitch and display the output sound wave.
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var audioContext = null;
var isPlaying = false;
var sourceNode = null;
var analyser = null;
var theBuffer = null;
var audioCtx = null;
var mediaStreamSource = null;
var rafID = null;
var j = 0;
var waveCanvas = null;
window.onload = function() {
audioContext = new AudioContext();
audioCtx = document.getElementById( "waveform" );
canvasCtx = audioCtx.getContext("2d");
};
function getUserMedia(dictionary, callback) {
try {
navigator.getUserMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia;
navigator.getUserMedia(dictionary, callback, error);
} catch (e) {
alert('getUserMedia threw exception :' + e);
}
}
function gotStream(stream) {
// Create an AudioNode from the stream.
mediaStreamSource = audioContext.createMediaStreamSource(stream);
// Connect it to the destination.
analyser = audioContext.createAnalyser();
analyser.fftSize = 1024;
mediaStreamSource.connect( analyser );
updatePitch();
}
function toggleLiveInput()
{
canvasCtx.clearRect(0, 0, audioCtx.width, audioCtx.height);
canvasCtx.beginPath();
j = 0;
buflen = 1024;
buf = new Float32Array( buflen );
document.getElementById('toggleLiveInput').disabled = true;
document.getElementById('toggleLiveInputStop').disabled = false;
if (isPlaying) {
//stop playing and return
sourceNode.stop( 0 );
sourceNode = null;
//analyser = null;
isPlaying = false;
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = window.webkitCancelAnimationFrame;
window.cancelAnimationFrame( rafID );
}
getUserMedia(
{
"audio": {
"mandatory": {
"googEchoCancellation": "false",
"googAutoGainControl": "false",
"googNoiseSuppression": "false",
"googHighpassFilter": "false"
},
"optional": []
},
}, gotStream);
}
function stop()
{
document.getElementById('toggleLiveInput').disabled = false;
document.getElementById('toggleLiveInputStop').disabled = true;
//waveCanvas.closePath();
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = window.webkitCancelAnimationFrame;
window.cancelAnimationFrame( rafID );
return "start";
}
function updatePitch()
{
analyser.fftSize = 1024;
analyser.getFloatTimeDomainData(buf);
canvasCtx.strokeStyle = "red";
for (var i=0;i<2;i+=2)
{
x = j*5;
if(audioCtx.width < x)
{
x = audioCtx.width - 5;
previousImage = canvasCtx.getImageData(5, 0, audioCtx.width, audioCtx.height);
canvasCtx.putImageData(previousImage, 0, 0);
canvasCtx.beginPath();
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = "red";
prex = prex - 5;
canvasCtx.lineTo(prex,prey);
prex = x;
prey = 128+(buf[i]*128);
canvasCtx.lineTo(x,128+(buf[i]*128));
canvasCtx.stroke();
}
else
{
prex = x;
prey = 128+(buf[i]*128);
canvasCtx.lineWidth = 2;
canvasCtx.lineTo(x,128+(buf[i]*128));
canvasCtx.stroke();
}
j++;
}
if (!window.requestAnimationFrame)
window.requestAnimationFrame = window.webkitRequestAnimationFrame;
rafID = window.requestAnimationFrame( updatePitch );
}
function error() {
console.error(new Error('error while generating audio'));
}
Try the demo here.
Example adapted from pitch-liveinput.

Related

using microphone input for speech recognition and web audio together?

I would like to do speech analysis in the browser. I have a microphone input as my main stream that is created when I start the speech recognition object. I would like to get frequencies from the same stream. How do I connect the audio context source to the same microphone stream as the voice recognition one? Do I have to request for microphone permissions twice? I tried the code below but the getMicData() will only log '0' values.
JS
var SpeechRecognition = SpeechRecognition || webkitSpeechRecognition
var requestAnimationFrame = window.requestAnimationFrame || window.mozRequestAnimationFrame ||
window.webkitRequestAnimationFrame || window.msRequestAnimationFrame;
var cancelAnimationFrame = window.cancelAnimationFrame || window.mozCancelAnimationFrame;
let audioCtx, analyser;
let amplitude;
let bufferLength;
let dataArray;
let bassArray;
let trebleArray;
let recognition = new SpeechRecognition();
recognition.continuous = false;
recognition.lang = 'en-US';
recognition.interimResults = false;
recognition.maxAlternatives = 1
let animationRequest;
const recordbtn = document.getElementById('record');
recordbtn.addEventListener('click', () => {
// start speech rec
recognition.start();
audioCtx = new(window.AudioContext || window.webkitAudioContext)();
analyser = audioCtx.createAnalyser();
analyser.fftSize = 512;
analyser.smoothingTimeConstant = 0.85;
})
recognition.onstart = function () {
document.getElementById('font-name').innerHTML = "START SPEAKING";
getMicData()
}
recognition.onspeechend = function () {
cancelAnimationFrame(animationRequest);
}
function getMicData() {
animationRequest = window.requestAnimationFrame(getMicData)
bufferLength = analyser.fftSize;
dataArray = new Uint8Array(bufferLength);
analyser.getByteFrequencyData(dataArray);
let maxAmp = 0;
let sumOfAmplitudes = 0;
for (let i = 0; i < bufferLength; i++) {
let thisAmp = dataArray[i]; // amplitude of current bin
if (thisAmp > maxAmp) {
sumOfAmplitudes = sumOfAmplitudes + thisAmp;
}
}
let averageAmplitude = sumOfAmplitudes / bufferLength;
console.log(averageAmplitude)
return averageAmplitude;
}

cross-browser Microphone access

I am looking for a solution to allow a website to access and process a stream of audio from the users Microphone. It's unfamiliar territory for me. I've been working with webRTC examples but so far have only got it working on:
firefox and chrome on a 2011 mac air running Sierra.
firefox on windows 10.
My script throws errors on other Browser/OS combinations and with some it doesn't but regardless it doesn't function.
Is there a better solution?
!function(t,e){
"use strict";
t.AudioContext = t.AudioContext||t.webkitAudioContext,
t.OfflineAudioContext = t.OfflineAudioContext||t.webkitOfflineAudioContext;
var o=AudioContext.prototype,
r=new AudioContext,n=function(t,e){
return void 0===t&&void 0!==e
},
c=r.createBufferSource().constructor.prototype;
if(n(c.start,c.noteOn)||n(c.stop,c.noteOff)){
var i=o.createBufferSource;
o.createBufferSource=function(){
var t=i.call(this);
return t.start=t.start||t.noteOn,t.stop=t.stop||t.noteOff,t
}
}
if("function"==typeof r.createOscillator){
var a=r.createOscillator().constructor.prototype;
if(n(a.start,a.noteOn)||n(a.stop,a.noteOff)){
var s=o.createOscillator;o.createOscillator=function(){
var t=s.call(this);
return t.start=t.start||t.noteOn,t.stop=t.stop||t.noteOff,t
}
}
}
if(void 0===o.createGain&&void 0!==o.createGainNode&&(o.createGain=o.createGainNode),void 0===o.createDelay&&void 0!==o.createDelayNode&&(o.createDelay=o.createGainNode),void 0===o.createScriptProcessor&&void 0!==o.createJavaScriptNode&&(o.createScriptProcessor=o.createJavaScriptNode),-1!==navigator.userAgent.indexOf("like Mac OS X")){
var u=AudioContext;t.AudioContext=function(){
function t(){
r.start(0),r.connect(n),n.connect(e.destination)
}
var e=new u,
o=document.body,
r=e.createBufferSource(),
n=e.createScriptProcessor(256,1,1);
return o.addEventListener("touchstart",t,!1),n.onaudioprocess=function(){
r.disconnect(),
n.disconnect(),
o.removeEventListener("touchstart",t,!1),
n.onaudioprocess=null
},e
}
}
}(window);
var context, analyser, gUM, dataArray, bufferLength, connect_source;
if (AudioContext){
context = new AudioContext();
analyser = context.createAnalyser();
function success(stream){
// Create a new volume meter and connect it.
var source = context.createMediaStreamSource(stream);
compressor = context.createDynamicsCompressor();
compressor.threshold.value = -50;
compressor.knee.value = 40;
compressor.ratio.value = 12;
compressor.reduction.value = -20;
compressor.attack.value = 0;
compressor.release.value = 0.25;
filter = context.createBiquadFilter();
filter.Q.value = 8.30;
filter.frequency.value = 355;
filter.gain.value = 3.0;
filter.type = 'bandpass';
filter.connect(compressor);
source.connect( filter );
source.connect(analyser);
analyser.fftSize = 512;
bufferLength = analyser.frequencyBinCount; // half the FFT value
dataArray = new Uint8Array(bufferLength); // create an array to store the data
};
function fail(e){
if(e){}
console.log(e);
aizuchi.error();
};
var select = document.getElementById("AudioSourceSelect");
function generateSelector(devices){
while(select.firstChild) select.removeChild(select.firstChild);
var opt;
for(var l = devices.length; l--;){
console.log(devices[l]);
if(devices[l].kind == "audioinput"){
opt = document.createElement("option")
opt.text = devices[l].label
opt.value = devices[l].deviceId
if(devices[l].deviceId == "default") opt.setAttribute("selected","")
select.appendChild( opt );
}
}
select.onchange = function(){
connect_source(this.value);
}
select.onchange();
}
try {
navigator.mediaDevices.enumerateDevices().then(generateSelector)
} catch(e){
fail(e);
}
connect_source = function(audioSource){
try {
if(Modernizr.getusermedia){
gUM = Modernizr.prefixed('getUserMedia', navigator);
gUM({video:false, audio : {deviceId: audioSource ? {exact: audioSource} : undefined}},success,fail);
} else {
navigator.mediaDevices.getUserMedia({video:false, audio : {deviceId: audioSource ? {exact: audioSource} : undefined}}).then(success,fail);
}
} catch(e){
fail(e);
}
}
}
Try
var AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new AudioContext();
It doesn't look like the browsers have unified on the syntax of this one yet.
Source: MDN

Variable is null (only in Mozilla)

Here is the resume of the code:
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
//(.....)
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
audioInput = context.createMediaStreamSource(e);
var bufferSize = 2048;
store_data = context.createScriptProcessor(bufferSize, 1, 1);
//(...)
}
//(....)
client.on('open', function() {
console.log("createStream");
Stream = client.createStream(command_list);
var recording = false;
window.startRecording = function() {
document.getElementById("startbutton").disabled = true;
document.getElementById("stopbutton").disabled = false;
recording = true;
window.Stream.resume();
}
window.stopRecording = function() {
document.getElementById("startbutton").disabled = false;
document.getElementById("stopbutton").disabled = true;
recording = false
//window.Stream.end();
window.Stream.pause();
}
store_data.onaudioprocess = function(e){ //<---line of the error
if(!recording) return;
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
window.Stream.write(convertoFloat32ToInt16(left));
}
//(..events generated from server..)
In chrome my code works just fine. In Mozilla I am getting always the error "store data is undefined". Any idea why? Because I am declaring store_data as global and when getusermedia is a sucess the value is changed.
Without knowing what calls the success function, it's difficult to say exactly, but I am fairly sure you want your client.on('open') listener to be contingent on the success function running.
I don't know how it will affect the rest of the omitted code, but I would only connect the BinaryClient when the success function has run and you are sure you have store_data defined.
function success() {
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
// do the original success code here
// now create that listener.
client.on('open', function() {
// do original code here
});
}
// you probably have a line of code that looks like this
navigator.getUserMedia({}, success);
Moving all of your code into the success function may work, but it won't be elegant. Once you've got the flow working, I would suggest refactoring the code, by splitting each logical bit up into its own function.
Yes it's a race. Your code must wait until getUserMedia succeeds and open is fired.
Promises are a great way to solve this:
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
(Use the above polyfill to access modern getUserMedia in all supported browsers.)
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
//(.....)
var haveStoreData = navigator.mediaDevices.getUserMedia({audio:true})
.then(function(stream) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
audioInput = context.createMediaStreamSource(stream);
var bufferSize = 2048;
return context.createScriptProcessor(bufferSize, 1, 1);
});
//(....)
client.on('open', function() {
console.log("opened");
haveStoreData.then(function(store_data) {
console.log("createStream");
Stream = client.createStream(command_list);
var recording = false;
window.startRecording = function() {
document.getElementById("startbutton").disabled = true;
document.getElementById("stopbutton").disabled = false;
recording = true;
window.Stream.resume();
};
window.stopRecording = function() {
document.getElementById("startbutton").disabled = false;
document.getElementById("stopbutton").disabled = true;
recording = false;
//window.Stream.end();
window.Stream.pause();
};
store_data.onaudioprocess = function(e){
if(!recording) return;
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
window.Stream.write(convertoFloat32ToInt16(left));
};
//(..events generated from server..)
})
.catch(function(e) { console.error(e); });
});
This will give users time to choose "Allow" in the mic permission prompt (Unlike Chrome, Firefox asks the user for permission every time, unless they choose "Always Allow").
var client = new BinaryClient('ws://193.136.94.233:9001');
var context = null;
var gain = null;
var store_data = null;
//(.....)
navigator.mediaDevices.getUserMedia({audio:true}) .then( function(stream){
context = new AudioContext();
audioInput = context.createMediaStreamSource(stream);
var bufferSize = 4096;
store_data = context.createScriptProcessor(bufferSize, 1, 1);
biquadFilter = context.createBiquadFilter();
biquadFilter.type = "lowpass";
biquadFilter.frequency.value = 11500;
biquadFilter.Q.value = 3;
ganho = context.createGain();
ganho.gain.value=0.5;
//audioInput.connect(ganho);//compresso
//ganho.connect(recorder);
//recorder.connect(context.destination);
audioInput.connect(biquadFilter);
biquadFilter.connect(ganho);
ganho.connect(store_data);
store_data.connect(context.destination);
store_data.onaudioprocess = function(e){
if(!recording){
//console.log("nada faz nada desta vida")
return;
}
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
Stream.write(convertoFloat32ToInt16(left));
}
//audioInput.connect(store_data);
} ) .catch( function(e){ console.log(e) } );
//(...)
client.on('open', function() {
console.log("opened connection");
//haveStoreData.then(function(store_data) {
Stream = client.createStream(command_list);
//recording = false;
//(........)
);
//Other function
Here is the solution to stream with BinaryJS with Chrome an Mozilla. Thanks to #jib and #Kaiido

How do I get audio data from microphone using AudioContext HTML5

I'm trying to get a stream of data from my microphone (ex. volume, pitch).
For now, I've been using getUserMedia to access my microphone audio.
But I couldn't find a way to extract the data from it.
My code :
$(function () {
var audioContext = new AudioContext();
var audioInput = null,
realAudioInput = null,
inputPoint = null,
analyserNode = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function success(stream){
inputPoint = audioContext.createGain();
realAudioInput = audioContext.createMediaStreamSource(stream);
audioInput = realAudioInput;
audioInput.connect(inputPoint);
analyserNode = audioContext.createAnalyser();
analyserNode.fftSize = 2048;
inputPoint.connect( analyserNode );
}
function live(){
requestAnimationFrame(live);
var freqByteData = new Uint8Array(analyserNode.frequencyBinCount);
analyserNode.getByteFrequencyData(freqByteData);
console.log(analyserNode.frequencyBinCount);
}
});
Here is a version of your code which does two things :
retrieves raw PCM audio buffer from the live microphone which is sent to console.log (to show javascript console hit ctrl-shift-i ), this is the PCM raw audio curve of streaming mic audio data in the time domain.
It also runs this same audio data into a FFT (fast Fourier transform) which is also sent to console.log, this is the frequency domain representation of the same Web Audio API event loop buffer
NOTE - either wear headphones OR turn down your speaker volume otherwise you will hear the squeal of audio feedback as the mic will pickup speaker audio a la Jimmy Hendrix !
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone then show time & frequency domain output</title>
<script type="text/javascript">
var webaudio_tooling_obj = function () {
var audioContext = new AudioContext();
console.log("audio is starting up ...");
var BUFF_SIZE_RENDERER = 16384;
var audioInput = null,
microphone_stream = null,
gain_node = null,
script_processor_node = null,
script_processor_analysis_node = null,
analyser_node = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true},
function(stream) {
start_microphone(stream);
},
function(e) {
alert('Error capturing audio.');
}
);
} else { alert('getUserMedia not supported in this browser.'); }
// ---
function show_some_data(given_typed_array, num_row_to_display, label) {
var size_buffer = given_typed_array.length;
var index = 0;
console.log("__________ " + label);
if (label === "time") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
var curr_value_time = (given_typed_array[index] / 128) - 1.0;
console.log(curr_value_time);
}
} else if (label === "frequency") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
console.log(given_typed_array[index]);
}
} else {
throw new Error("ERROR - must pass time or frequency");
}
}
function process_microphone_buffer(event) {
var i, N, inp, microphone_output_buffer;
microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
}
function start_microphone(stream){
gain_node = audioContext.createGain();
gain_node.connect( audioContext.destination );
microphone_stream = audioContext.createMediaStreamSource(stream);
microphone_stream.connect(gain_node);
script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE_RENDERER, 1, 1);
script_processor_node.onaudioprocess = process_microphone_buffer;
microphone_stream.connect(script_processor_node);
// --- enable volume control for output speakers
document.getElementById('volume').addEventListener('change', function() {
var curr_volume = this.value;
gain_node.gain.value = curr_volume;
console.log("curr_volume ", curr_volume);
});
// --- setup FFT
script_processor_analysis_node = audioContext.createScriptProcessor(2048, 1, 1);
script_processor_analysis_node.connect(gain_node);
analyser_node = audioContext.createAnalyser();
analyser_node.smoothingTimeConstant = 0;
analyser_node.fftSize = 2048;
microphone_stream.connect(analyser_node);
analyser_node.connect(script_processor_analysis_node);
var buffer_length = analyser_node.frequencyBinCount;
var array_freq_domain = new Uint8Array(buffer_length);
var array_time_domain = new Uint8Array(buffer_length);
console.log("buffer_length " + buffer_length);
script_processor_analysis_node.onaudioprocess = function() {
// get the average for the first channel
analyser_node.getByteFrequencyData(array_freq_domain);
analyser_node.getByteTimeDomainData(array_time_domain);
// draw the spectrogram
if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {
show_some_data(array_freq_domain, 5, "frequency");
show_some_data(array_time_domain, 5, "time"); // store this to record to aggregate buffer/file
}
};
}
}(); // webaudio_tooling_obj = function()
</script>
</head>
<body>
<p>Volume</p>
<input id="volume" type="range" min="0" max="1" step="0.1" value="0.5"/>
</body>
</html>

webrtc, is it possible convert image to mediastream?

I make webrtc video chating.
we need to sending image instead of video. someone say image can convert mediastream.
I try, image to base64 and call addstream but I am fail. how to do that?
var imagestream = getBase64FromImageUrl('./unown.png');
function getBase64FromImageUrl(URL) {
var img = new Image();
img.src = URL;
img.onload = function () {
var canvas = document.createElement("canvas");
canvas.width =this.width;
canvas.height =this.height;
var ctx = canvas.getContext("2d");
ctx.drawImage(this, 0, 0);
var dataURL = canvas.toDataURL("image/png");
alert( dataURL.replace(/^data:image\/(png|jpg);base64,/, ""));
}
}
Try Whammy.js : A Real Time Javascript WebM Encoder
Try Recorder.js : This is for Audio (if you need) ;)
JS(script.js):
/*Adapating for different vendors*/
window.URL =
window.URL ||
window.webkitURL ||
window.mozURL ||
window.msURL;
window.requestAnimationFrame =
window.requestAnimationFrame ||
window.webkitRequestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.msRequestAnimationFrame ||
window.oRequestAnimationFrame;
window.cancelAnimationFrame =
window.cancelAnimationFrame ||
window.webkitCancelAnimationFrame ||
window.mozCancelAnimationFrame ||
window.msCancelAnimationFrame ||
window.oCancelAnimationFrame;
navigator.getUserMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
window.AudioContext =
window.AudioContext ||
window.webkitAudioContext;
/*Global stuff*/
var video = get('video');
video.width = 320;
video.height = 240;
var canvas = document.createElement('canvas');
var rafId = null;
var frames = [];
var audioContext = new AudioContext;
var audioRecorder;
/*Save typing :) */
function get(selector) {
return document.querySelector(selector) || null;
}
/*Wrapper for recording*/
function recordIt() {
var record = get('#record');
record.textContent = record.disabled ? 'Record' : 'Recording...';
record.classList.toggle('recording');
record.disabled = !record.disabled;
}
/*Get Media (Video and Audio) from user*/
function getMedia(event) {
event.target.disabled = true;
get('#record').disabled = false;
video.controls = false;
var setVideo = function() {
setTimeout(function() {
video.width = 320;
video.height = 240;
canvas.width = video.width;
canvas.height = video.height;
}, 1000);
};
if (navigator.getUserMedia) {
navigator.getUserMedia({video: true, audio: true}, function(stream) {
if (video.mozSrcObject !== undefined) {
video.mozSrcObject = stream;
} else {
video.src = (window.URL && window.URL.createObjectURL(stream)) || stream;
}
var audioInput = audioContext.createMediaStreamSource(stream);
audioInput.connect(audioContext.destination);
audioRecorder = new Recorder(audioInput);
setVideo();
}, function(e) {
alert('Error'+e);
console.log(e)
});
} else {
console.log('getUserMedia() not supported in this browser.');
}
};
/*Record function: Draws frames and pushes to array*/
function record() {
var context = canvas.getContext('2d');
var CANVAS_HEIGHT = canvas.height;
var CANVAS_WIDTH = canvas.width;
frames = [];
recordIt();
get('#stop').disabled = false;
function draw(time) {
rafId = requestAnimationFrame(draw);
context.drawImage(video, 0, 0, CANVAS_WIDTH, CANVAS_HEIGHT);
var url = canvas.toDataURL('image/webp', 1);
frames.push(url);
};
rafId = requestAnimationFrame(draw);
//Audio stuff
audioRecorder.clear();
audioRecorder.record();
};
/*Stop Recording*/
function stop() {
cancelAnimationFrame(rafId);
get('#stop').disabled = true;
recordIt();
setVideo();
//Audio stuff
audioRecorder.stop();
setAudio();
};
/*Call Whammy for creating video*/
function setVideo(vidUrl) {
var url = vidUrl || null;
var video = get('#recordedDiv video') || null;
if (!video) {
video = document.createElement('video');
video.autoplay = true;
video.controls = true;
video.style.width = canvas.width + 'px';
video.style.height = canvas.height + 'px';
get('#recordedDiv').appendChild(video);
} else {
window.URL.revokeObjectURL(video.src);
}
if (!url) {
var webmBlob = Whammy.fromImageArray(frames, 1000 / 60);
url = window.URL.createObjectURL(webmBlob);
}
video.src = url;
}
function setAudio() {
audioRecorder.exportWAV(function(blob) {
var audio = get('#recordedDiv audio') || null;
var url = URL.createObjectURL(blob);
if(!audio) {
var audio = document.createElement('audio');
audio.autoplay = true;
audio.controls = true;
audio.src = url;
get('#recordedDiv').appendChild(audio);
}
else {
audio.src = url;
}
});
}
/*Fingers Crossed*/
function init() {
get('#camera').addEventListener('click', getMedia);
get('#record').addEventListener('click', record);
get('#stop').addEventListener('click', stop);
}
init();
HTML
<html><head>
<meta charset="utf-8">
<title>Record and Play Simple Messages</title>
<link rel="stylesheet" type="text/css" href="./css/style.css">
<style type="text/css"></style></head>
<body>
Records webm video and audio using WebAudioAPI, whammy.js and recorder.js
Webp images not supported in firefox, hence it fails. Works on Chrome though.
<section>
<div>
<video autoplay="" width="320" height="240"></video><br>
<button id="camera">GetUserMedia</button>
</div>
<div id="recordedDiv">
<button id="record" disabled="">Record</button>
<button id="stop" disabled="">Stop</button><br>
</div>
</section>
<script type="text/javascript" src="./js/whammy.min.js"></script>
<script type="text/javascript" src="./js/recorder.js"></script>
<script type="text/javascript" src="./js/script.js"></script>
</body></html>
DEMO
I know I am answering bit late, and this is only applicable for firefox( 41 and above), you can try and create a mediastream from the canvas using CanvasCaptureMediaStream
Edit: they are implementing this media capture option in chrome as well, you can follow the issue here

Categories

Resources