I need to encode to base64 a wav binary file that I get with the famous
https://github.com/mattdiamond/Recorderjs project:
function ($scope) {
var ctrl = this;
var gumStream;
//stream from getUserMedia()
var rec;
//Recorder.js object
var input;
var audioContext = new AudioContext();
//new audio context to help us record
var recordButton = document.getElementById("recordButton");
var stopButton = document.getElementById("stopButton");
$scope.properties.value = undefined;
var srConfig = {"config":
{
"sampleRateHertz":16000,
"languageCode": $scope.properties.languageCode
},
"audio":
{"content":""}
};
/* Disable the record button until we get a success or fail from getUserMedia() */
this.startRecording = function(){
console.log("recordButton clicked");
var constraints = { audio: true, video:false };
recordButton.disabled = true;
stopButton.disabled = false;
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
console.log("getUserMedia() success, stream created, initializing Recorder.js ...");
document.getElementById("formats").innerHTML="Format: 1 channel pcm # "+audioContext.sampleRate/1000+"kHz";
/* assign to gumStream for later use */
srConfig.config.sampleRateHertz = audioContext.sampleRate;
gumStream = stream;
/* use the stream */
input = audioContext.createMediaStreamSource(stream);
rec = new Recorder(input,{numChannels:1});
//start the recording process
rec.record();
console.log("Recording started");
setTimeout(function(){
ctrl.stopRecording();
}, parseInt($scope.properties.secondsToRegister)*1000);
}).catch(function(err) {
//enable the record button if getUserMedia() fails
console.log("Error in getUserMedia");
console.log(err);
recordButton.disabled = false;
stopButton.disabled = true;
});
};
this.stopRecording =function(){
console.log("stopButton clicked");
//disable the stop button, enable the record too allow for new recordings
stopButton.disabled = true;
recordButton.disabled = false;
//tell the recorder to stop the recording
rec.stop(); //stop microphone access
gumStream.getAudioTracks()[0].stop();
//create the wav blob and pass it on to createDownloadLink
var response = rec.exportWAV(this.callDetectorService);
};
this.callDetectorService = function(blob){
var xhr=new XMLHttpRequest();
if ("withCredentials" in xhr){
console.log("withCredentials supported");
}
else{
console.log("withCredentials NOT supported");
}
var response;
xhr.onload=function(e) {
if(this.readyState === 4) {
console.log("Server returned: ", e.target.responseText);
$scope.$apply(function () {
$scope.properties.value = JSON.parse(e.target.responseText);
});
response = e.target.responseText;
}
};
srConfig.audio.content=btoa(unescape(encodeURIComponent(blob)));
console.log(srConfig);
xhr.open("POST",$scope.properties.serviceUrl+"?key="+$scope.properties.secretkey,true);
var data = JSON.stringify(srConfig);
xhr.send(data);
};
}
When the request is submitted (line xhr.send(data)) I get then the following error:
Server returned: {
"error": {
"code": 400,
"message": "Invalid recognition 'config': bad encoding..",
"status": "INVALID_ARGUMENT"
}
}
This let me think that the mistake is in the line that makes the encoding:
srConfig.audio.content=btoa(unescape(encodeURIComponent(blob)));
Thank you
Related
I am very new to javaScript, I know some basics but have not yet completely understood the complete logics behind it (so far I have only worked with Python and a little bit of VBA)
For uni I have to build a browser interface to record audio and transfer it to a server where a Speech to text application runs. I found some opensource code here (https://github.com/mdn/dom-examples/blob/main/media/web-dictaphone/scripts/app.js) which I wanted to use, but is missing the websocket part. Now I don't know, where exactly to insert that. So far I have this:
code of the Webdictaphone:
// set up basic variables for app
const record = document.querySelector('.record');
const stop = document.querySelector('.stop');
const soundClips = document.querySelector('.sound-clips');
const canvas = document.querySelector('.visualizer');
const mainSection = document.querySelector('.main-controls');
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia supported.');
const constraints = { audio: true };
let chunks = [];
let onSuccess = function(stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function() {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
}
stop.onclick = function() {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
}
mediaRecorder.onstop = function(e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt('Enter a name for your sound clip?','My unnamed clip');
const clipContainer = document.createElement('article');
const clipLabel = document.createElement('p');
const audio = document.createElement('audio');
const deleteButton = document.createElement('button');
clipContainer.classList.add('clip');
audio.setAttribute('controls', '');
deleteButton.textContent = 'Delete';
deleteButton.className = 'delete';
if(clipName === null) {
clipLabel.textContent = 'My unnamed clip';
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function(e) {
e.target.closest(".clip").remove();
}
clipLabel.onclick = function() {
const existingName = clipLabel.textContent;
const newClipName = prompt('Enter a new name for your sound clip?');
if(newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
}
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
}
let onError = function(err) {
console.log('The following error occured: ' + err);
}
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log('getUserMedia not supported on your browser!');
}
websocket part (client side):
window.addEventListener("DOMContentLoaded", () => {
// Open the WebSocket connection and register event handlers.
console.log('DOMContentLoaded done');
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
console.log('ws is defined');
})
Right now I just stacked both of the parts on top of each other, but this doesn't work, since, as I found out, you only can define and use variables (such as ws) within a block. This leads to an error that says that ws i not defined when I call the sending function within the if-statement.
I already tried to look for tutorials for hours but none that I found included this topic. I also tried moving the web socket part into the if statement, but that also did - unsurprisingly work, at least not in the way that I tried.
I feel like my problem lays in understanding how to define the websocket so I can call it within the if statement, or figure out a way to somehow get the audio somewhere where ws is considered to be defined. Unfortunately I just don't get behind it and already invested days which has become really frustrating.
I appreciate any help. If you have any ideas what I could change or move in the code or maybe just know any tutorial that could help, I'd be really grateful.
Thanks in advance!
You don't need that window.addEventListener("DOMContentLoaded", () => { part
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
const record = document.querySelector(".record");
const stop = document.querySelector(".stop");
const soundClips = document.querySelector(".sound-clips");
const canvas = document.querySelector(".visualizer");
const mainSection = document.querySelector(".main-controls");
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log("getUserMedia supported.");
const constraints = { audio: true };
let chunks = [];
let onSuccess = function (stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function () {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
};
stop.onclick = function () {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
};
mediaRecorder.onstop = function (e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt(
"Enter a name for your sound clip?",
"My unnamed clip"
);
const clipContainer = document.createElement("article");
const clipLabel = document.createElement("p");
const audio = document.createElement("audio");
const deleteButton = document.createElement("button");
clipContainer.classList.add("clip");
audio.setAttribute("controls", "");
deleteButton.textContent = "Delete";
deleteButton.className = "delete";
if (clipName === null) {
clipLabel.textContent = "My unnamed clip";
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { type: "audio/ogg; codecs=opus" });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function (e) {
e.target.closest(".clip").remove();
};
clipLabel.onclick = function () {
const existingName = clipLabel.textContent;
const newClipName = prompt("Enter a new name for your sound clip?");
if (newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
};
};
mediaRecorder.ondataavailable = function (e) {
chunks.push(e.data);
};
};
let onError = function (err) {
console.log("The following error occured: " + err);
};
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log("getUserMedia not supported on your browser!");
}
requirement :
Container WAV
Encoding PCM
Rate 16K
Sample Format 16 bit
Channels Mono
My output :
Container WAV
Encoding PCM
Rate 16K
Sample Format 32 bit float
Channels Mono
I need to get an audio output with a sample format of 16-bit PCM, currently the only output i get is 32-bit FLOAT
My code :
URL = window.URL || window.webkitURL;
var gumStream;
//stream from getUserMedia()
var rec;
//Recorder.js object
var input;
//MediaStreamAudioSourceNode we'll be recording
// shim for AudioContext when it's not avb.
//new audio context to help us record
var recordButton = document.getElementById("recordButton");
var stopButton = document.getElementById("stopButton");
var pauseButton = document.getElementById("pauseButton");
var recordButton_ = document.getElementById("recordButton_");
var stopButton_ = document.getElementById("stopButton_");
var pauseButton_ = document.getElementById("pauseButton_");
//add events to those 3 buttons
recordButton.addEventListener("click", startRecording);
stopButton.addEventListener("click", stopRecording);
pauseButton.addEventListener("click", pauseRecording);
function startRecording() {
var AudioContext = (window.AudioContext) || (window.webkitAudioContext)
var audioContext = new AudioContext({
sampleRate: 16000,
});
console.log("recordButton clicked");
/* Simple constraints object, for more advanced audio features see
https://addpipe.com/blog/audio-constraints-getusermedia/ */
var constraints = {
audio: true,
video: false
}
/* Disable the record button until we get a success or fail from getUserMedia() */
recordButton.disabled = true;
stopButton.disabled = false;
pauseButton.disabled = false
/* We're using the standard promise based getUserMedia()
https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia */
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
console.log("getUserMedia() success, stream created, initializing Recorder.js ...");
/* assign to gumStream for later use */
gumStream = stream;
/* use the stream */
input = audioContext.createMediaStreamSource(stream);
/* Create the Recorder object and configure to record mono sound (1 channel) Recording 2 channels will double the file size */
rec = new Recorder(input, {
numChannels: 1
})
//start the recording process
rec.record()
console.log("Recording started");
}).catch(function(err) {
//enable the record button if getUserMedia() fails
recordButton.disabled = false;
stopButton.disabled = true;
pauseButton.disabled = true
});
}
function pauseRecording() {
console.log("pauseButton clicked rec.recording=", rec.recording);
if (rec.recording) {
//pause
rec.stop();
pauseButton.innerHTML = "Resume";
} else {
//resume
rec.record()
pauseButton.innerHTML = "Pause";
}
}
function stopRecording() {
console.log("stopButton clicked");
//disable the stop button, enable the record too allow for new recordings
stopButton.disabled = true;
recordButton.disabled = false;
pauseButton.disabled = true;
//reset button just in case the recording is stopped while paused
pauseButton.innerHTML = "Pause";
//tell the recorder to stop the recording
rec.stop(); //stop microphone access
gumStream.getAudioTracks()[0].stop();
//create the wav blob and pass it on to createDownloadLink
rec.exportWAV(createDownloadLink);
}
function createDownloadLink(blob) {
var url = URL.createObjectURL(blob);
var au = document.createElement('audio');
var li = document.createElement('li');
var link = document.createElement('a');
//add controls to the <audio> element
au.controls = true;
au.src = url;
au.sampleRate = 16000
//link the a element to the blob
link.href = url;
// link.download = new Date().toISOString() + '.wav';
link.innerHTML = link.download;
//add the new audio and a elements to the li element
li.appendChild(au);
li.appendChild(link);
//add the li element to the ordered list
recordingsList.appendChild(li);
var p = document.createElement("br");
recordingsList.appendChild(p);
}
There's nothing wrong on your code, by default the output provided by the Web Audio API is 32 bit, you will need to process it by using the BitCrusher node as described in the documentation:
https://webaudio.github.io/web-audio-api/#the-bitcrusher-node
Hope this helps.
I'm having an issue getting a captured blob from the mediaRecorder api to playback in Chrome (it works in Firefox). Not sure if it's a bug in Chrome.
The error it reports:
undefined:1 Uncaught (in promise) DOMException: Unable to decode audio data
window.AudioContext = window.AudioContext || window.webkitAudioContext;
navigator.getUserMedia = (navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
var context = new AudioContext();
var record = document.querySelector('#record');
var stop = document.querySelector('#stop');
if (navigator.getUserMedia) {
console.log('getUserMedia supported.');
var constraints = {
audio: true
};
var chunks = [];
var onSuccess = function(stream) {
var mediaRecorder = new MediaRecorder(stream);
record.onclick = function() {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
}
stop.onclick = function() {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
stop.disabled = true;
record.disabled = false;
}
mediaRecorder.onstop = function(e) {
console.log("onstop() called.", e);
var blob = new Blob(chunks, {
'type': 'audio/wav'
});
chunks = [];
var reader = new FileReader();
reader.addEventListener("loadend", function() {
context.decodeAudioData(reader.result, function(buffer) {
playsound(buffer);
},
function(e) {
console.log("error ", e)
});
});
reader.readAsArrayBuffer(blob);
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
}
var onError = function(err) {
console.log('The following error occured: ' + err);
}
navigator.getUserMedia(constraints, onSuccess, onError);
} else {
console.log('getUserMedia not supported on your browser!');
}
function playsound(thisbuffer) {
var source = context.createBufferSource();
source.buffer = thisbuffer;
source.connect(context.destination);
source.start(0);
}
<button id="record">record</button>
<button id="stop">stop</button>
I have used your code exactly the way it is. Everything is working fine in Chrome browser.
This issue was fixed when bug https://codereview.chromium.org/1579693006/ was closed and added to the Chrome pipeline.
This is no longer an issue.
To close the loop on this, I suspect this was due to the Chrome bug documented in a comment above. It appears this bug was fixed several years ago and should no longer be a problem as WebAudio now uses ffmpeg for decoding.
Here is the resume of the code:
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
//(.....)
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
audioInput = context.createMediaStreamSource(e);
var bufferSize = 2048;
store_data = context.createScriptProcessor(bufferSize, 1, 1);
//(...)
}
//(....)
client.on('open', function() {
console.log("createStream");
Stream = client.createStream(command_list);
var recording = false;
window.startRecording = function() {
document.getElementById("startbutton").disabled = true;
document.getElementById("stopbutton").disabled = false;
recording = true;
window.Stream.resume();
}
window.stopRecording = function() {
document.getElementById("startbutton").disabled = false;
document.getElementById("stopbutton").disabled = true;
recording = false
//window.Stream.end();
window.Stream.pause();
}
store_data.onaudioprocess = function(e){ //<---line of the error
if(!recording) return;
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
window.Stream.write(convertoFloat32ToInt16(left));
}
//(..events generated from server..)
In chrome my code works just fine. In Mozilla I am getting always the error "store data is undefined". Any idea why? Because I am declaring store_data as global and when getusermedia is a sucess the value is changed.
Without knowing what calls the success function, it's difficult to say exactly, but I am fairly sure you want your client.on('open') listener to be contingent on the success function running.
I don't know how it will affect the rest of the omitted code, but I would only connect the BinaryClient when the success function has run and you are sure you have store_data defined.
function success() {
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
// do the original success code here
// now create that listener.
client.on('open', function() {
// do original code here
});
}
// you probably have a line of code that looks like this
navigator.getUserMedia({}, success);
Moving all of your code into the success function may work, but it won't be elegant. Once you've got the flow working, I would suggest refactoring the code, by splitting each logical bit up into its own function.
Yes it's a race. Your code must wait until getUserMedia succeeds and open is fired.
Promises are a great way to solve this:
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
(Use the above polyfill to access modern getUserMedia in all supported browsers.)
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
//(.....)
var haveStoreData = navigator.mediaDevices.getUserMedia({audio:true})
.then(function(stream) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
audioInput = context.createMediaStreamSource(stream);
var bufferSize = 2048;
return context.createScriptProcessor(bufferSize, 1, 1);
});
//(....)
client.on('open', function() {
console.log("opened");
haveStoreData.then(function(store_data) {
console.log("createStream");
Stream = client.createStream(command_list);
var recording = false;
window.startRecording = function() {
document.getElementById("startbutton").disabled = true;
document.getElementById("stopbutton").disabled = false;
recording = true;
window.Stream.resume();
};
window.stopRecording = function() {
document.getElementById("startbutton").disabled = false;
document.getElementById("stopbutton").disabled = true;
recording = false;
//window.Stream.end();
window.Stream.pause();
};
store_data.onaudioprocess = function(e){
if(!recording) return;
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
window.Stream.write(convertoFloat32ToInt16(left));
};
//(..events generated from server..)
})
.catch(function(e) { console.error(e); });
});
This will give users time to choose "Allow" in the mic permission prompt (Unlike Chrome, Firefox asks the user for permission every time, unless they choose "Always Allow").
var client = new BinaryClient('ws://193.136.94.233:9001');
var context = null;
var gain = null;
var store_data = null;
//(.....)
navigator.mediaDevices.getUserMedia({audio:true}) .then( function(stream){
context = new AudioContext();
audioInput = context.createMediaStreamSource(stream);
var bufferSize = 4096;
store_data = context.createScriptProcessor(bufferSize, 1, 1);
biquadFilter = context.createBiquadFilter();
biquadFilter.type = "lowpass";
biquadFilter.frequency.value = 11500;
biquadFilter.Q.value = 3;
ganho = context.createGain();
ganho.gain.value=0.5;
//audioInput.connect(ganho);//compresso
//ganho.connect(recorder);
//recorder.connect(context.destination);
audioInput.connect(biquadFilter);
biquadFilter.connect(ganho);
ganho.connect(store_data);
store_data.connect(context.destination);
store_data.onaudioprocess = function(e){
if(!recording){
//console.log("nada faz nada desta vida")
return;
}
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
Stream.write(convertoFloat32ToInt16(left));
}
//audioInput.connect(store_data);
} ) .catch( function(e){ console.log(e) } );
//(...)
client.on('open', function() {
console.log("opened connection");
//haveStoreData.then(function(store_data) {
Stream = client.createStream(command_list);
//recording = false;
//(........)
);
//Other function
Here is the solution to stream with BinaryJS with Chrome an Mozilla. Thanks to #jib and #Kaiido
How to detect headphone and cam when calling start recording function in recordRTC liabrary.
btnStartRecording.onclick = function() {
btnStartRecording.disabled = true;
captureUserMedia(function(stream) {
mediaStream = stream;
videoElement.src = window.URL.createObjectURL(stream);
videoElement.play();
videoElement.muted = true;
videoElement.controls = false;
// it is second parameter of the RecordRTC
var audioConfig = {};
if (!isRecordOnlyAudio) {
audioConfig.onAudioProcessStarted = function() {
// invoke video recorder in this callback
// to get maximum sync
videoRecorder.startRecording();
};
}
audioRecorder = RecordRTC(stream, audioConfig);
if (!isRecordOnlyAudio) {
// it is second parameter of the RecordRTC
var videoConfig = {type: 'video'};
videoRecorder = RecordRTC(stream, videoConfig);
}
audioRecorder.startRecording();
// enable stop-recording button
btnStopRecording.disabled = false;
});
};
If any method in captureUserMedia that detects devices and return error message.