where to add my websocket code in javascript - javascript

I am very new to javaScript, I know some basics but have not yet completely understood the complete logics behind it (so far I have only worked with Python and a little bit of VBA)
For uni I have to build a browser interface to record audio and transfer it to a server where a Speech to text application runs. I found some opensource code here (https://github.com/mdn/dom-examples/blob/main/media/web-dictaphone/scripts/app.js) which I wanted to use, but is missing the websocket part. Now I don't know, where exactly to insert that. So far I have this:
code of the Webdictaphone:
// set up basic variables for app
const record = document.querySelector('.record');
const stop = document.querySelector('.stop');
const soundClips = document.querySelector('.sound-clips');
const canvas = document.querySelector('.visualizer');
const mainSection = document.querySelector('.main-controls');
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia supported.');
const constraints = { audio: true };
let chunks = [];
let onSuccess = function(stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function() {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
}
stop.onclick = function() {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
}
mediaRecorder.onstop = function(e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt('Enter a name for your sound clip?','My unnamed clip');
const clipContainer = document.createElement('article');
const clipLabel = document.createElement('p');
const audio = document.createElement('audio');
const deleteButton = document.createElement('button');
clipContainer.classList.add('clip');
audio.setAttribute('controls', '');
deleteButton.textContent = 'Delete';
deleteButton.className = 'delete';
if(clipName === null) {
clipLabel.textContent = 'My unnamed clip';
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function(e) {
e.target.closest(".clip").remove();
}
clipLabel.onclick = function() {
const existingName = clipLabel.textContent;
const newClipName = prompt('Enter a new name for your sound clip?');
if(newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
}
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
}
let onError = function(err) {
console.log('The following error occured: ' + err);
}
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log('getUserMedia not supported on your browser!');
}
websocket part (client side):
window.addEventListener("DOMContentLoaded", () => {
// Open the WebSocket connection and register event handlers.
console.log('DOMContentLoaded done');
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
console.log('ws is defined');
})
Right now I just stacked both of the parts on top of each other, but this doesn't work, since, as I found out, you only can define and use variables (such as ws) within a block. This leads to an error that says that ws i not defined when I call the sending function within the if-statement.
I already tried to look for tutorials for hours but none that I found included this topic. I also tried moving the web socket part into the if statement, but that also did - unsurprisingly work, at least not in the way that I tried.
I feel like my problem lays in understanding how to define the websocket so I can call it within the if statement, or figure out a way to somehow get the audio somewhere where ws is considered to be defined. Unfortunately I just don't get behind it and already invested days which has become really frustrating.
I appreciate any help. If you have any ideas what I could change or move in the code or maybe just know any tutorial that could help, I'd be really grateful.
Thanks in advance!

You don't need that window.addEventListener("DOMContentLoaded", () => { part
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
const record = document.querySelector(".record");
const stop = document.querySelector(".stop");
const soundClips = document.querySelector(".sound-clips");
const canvas = document.querySelector(".visualizer");
const mainSection = document.querySelector(".main-controls");
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log("getUserMedia supported.");
const constraints = { audio: true };
let chunks = [];
let onSuccess = function (stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function () {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
};
stop.onclick = function () {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
};
mediaRecorder.onstop = function (e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt(
"Enter a name for your sound clip?",
"My unnamed clip"
);
const clipContainer = document.createElement("article");
const clipLabel = document.createElement("p");
const audio = document.createElement("audio");
const deleteButton = document.createElement("button");
clipContainer.classList.add("clip");
audio.setAttribute("controls", "");
deleteButton.textContent = "Delete";
deleteButton.className = "delete";
if (clipName === null) {
clipLabel.textContent = "My unnamed clip";
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { type: "audio/ogg; codecs=opus" });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function (e) {
e.target.closest(".clip").remove();
};
clipLabel.onclick = function () {
const existingName = clipLabel.textContent;
const newClipName = prompt("Enter a new name for your sound clip?");
if (newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
};
};
mediaRecorder.ondataavailable = function (e) {
chunks.push(e.data);
};
};
let onError = function (err) {
console.log("The following error occured: " + err);
};
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log("getUserMedia not supported on your browser!");
}

Related

Recrod audio to flac file using MediaRecorder in electron.js/Chrome

I can record audio to ogg file in Electron and Chrome by creating blob this way
const blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
Full example code is this
if (navigator.mediaDevices) {
console.log('getUserMedia supported.');
const constraints = { audio: true };
let chunks = [];
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function() {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
record.style.color = "black";
}
stop.onclick = function() {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
}
mediaRecorder.onstop = function(e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt('Enter a name for your sound clip');
const clipContainer = document.createElement('article');
const clipLabel = document.createElement('p');
const audio = document.createElement('audio');
const deleteButton = document.createElement('button');
clipContainer.classList.add('clip');
audio.setAttribute('controls', '');
deleteButton.innerHTML = "Delete";
clipLabel.innerHTML = clipName;
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
const audioURL = URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function(e) {
const evtTgt = e.target;
evtTgt.parentNode.parentNode.removeChild(evtTgt.parentNode);
}
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
})
.catch(function(err) {
console.log('The following error occurred: ' + err);
})
}
I want to create flac file so I tried
const blob = new Blob(chunks, { 'type': 'audio/flac; codecs=flac' });
When I check output file type using Linux file command I get webM in both cases.
How can I get output file in flac format?

WebRTC, getDisplayMedia() does not capture sound from the remote stream

I have a web application of my own, which is based on the peerjs library (It is a video conference).
I'm trying to make a recording with 'MediaRecorder', but I'm facing a very unpleasant case.
The code for capturing my desktop stream is the following:
let chooseScreen = document.querySelector('.chooseScreenBtn')
chooseScreen.onclick = async () => {
let desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: true });
}
I then successfully apply my received desktopStream to videoElement in DOM:
const videoElement = doc.querySelector('.videoElement')
videoElement.srcObject = desktopStream
videoElement.muted = false;
videoElement.onloadedmetadata = ()=>{videoElement.play();}
For example, I get desktopStream on the page with an active conference where everyone hears and sees each other.
To check the video and audio in desktopStream I play some video on the video player on the desktop.
I can hear any audio from my desktop but audio from any participant cannot be heard.
Of course, when I put the desktopStream in MediaRecorder I get a video file with no sound from anyone except my desktop. Any ideas on how to solve it?
Chrome's MediaRecorder API can only output one track.
The createMediaStreamSource can take streams from desktop audio and microphone, by connecting both together into one object created by createMediaStreamDestination it gives you the ability to pipe this one stream into the MediaRecorder API.
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
// Create a couple of sources
const source1 = context.createMediaStreamSource(desktopStream);
const source2 = context.createMediaStreamSource(voiceStream);
const destination = context.createMediaStreamDestination();
const desktopGain = context.createGain();
const voiceGain = context.createGain();
desktopGain.gain.value = 0.7;
voiceGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
// Connect source2
source2.connect(voiceGain).connect(destination);
return destination.stream.getAudioTracks();
};
It is also possible to use two or more audio inputs + video input.
window.onload = () => {
const warningEl = document.getElementById('warning');
const videoElement = document.getElementById('videoElement');
const captureBtn = document.getElementById('captureBtn');
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const download = document.getElementById('download');
const audioToggle = document.getElementById('audioToggle');
const micAudioToggle = document.getElementById('micAudioToggle');
if('getDisplayMedia' in navigator.mediaDevices) warningEl.style.display = 'none';
let blobs;
let blob;
let rec;
let stream;
let voiceStream;
let desktopStream;
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
const destination = context.createMediaStreamDestination();
let hasDesktop = false;
let hasVoice = false;
if (desktopStream && desktopStream.getAudioTracks().length > 0) {
// If you don't want to share Audio from the desktop it should still work with just the voice.
const source1 = context.createMediaStreamSource(desktopStream);
const desktopGain = context.createGain();
desktopGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
hasDesktop = true;
}
if (voiceStream && voiceStream.getAudioTracks().length > 0) {
const source2 = context.createMediaStreamSource(voiceStream);
const voiceGain = context.createGain();
voiceGain.gain.value = 0.7;
source2.connect(voiceGain).connect(destination);
hasVoice = true;
}
return (hasDesktop || hasVoice) ? destination.stream.getAudioTracks() : [];
};
captureBtn.onclick = async () => {
download.style.display = 'none';
const audio = audioToggle.checked || false;
const mic = micAudioToggle.checked || false;
desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: audio });
if (mic === true) {
voiceStream = await navigator.mediaDevices.getUserMedia({ video: false, audio: mic });
}
const tracks = [
...desktopStream.getVideoTracks(),
...mergeAudioStreams(desktopStream, voiceStream)
];
console.log('Tracks to add to stream', tracks);
stream = new MediaStream(tracks);
console.log('Stream', stream)
videoElement.srcObject = stream;
videoElement.muted = true;
blobs = [];
rec = new MediaRecorder(stream, {mimeType: 'video/webm; codecs=vp8,opus'});
rec.ondataavailable = (e) => blobs.push(e.data);
rec.onstop = async () => {
blob = new Blob(blobs, {type: 'video/webm'});
let url = window.URL.createObjectURL(blob);
download.href = url;
download.download = 'test.webm';
download.style.display = 'block';
};
startBtn.disabled = false;
captureBtn.disabled = true;
audioToggle.disabled = true;
micAudioToggle.disabled = true;
};
startBtn.onclick = () => {
startBtn.disabled = true;
stopBtn.disabled = false;
rec.start();
};
stopBtn.onclick = () => {
captureBtn.disabled = false;
audioToggle.disabled = false;
micAudioToggle.disabled = false;
startBtn.disabled = true;
stopBtn.disabled = true;
rec.stop();
stream.getTracks().forEach(s=>s.stop())
videoElement.srcObject = null
stream = null;
};
};
Audio capture with getDisplayMedia is only fully supported with Chrome for Windows. Other platforms have a number of limitations:
there is no support for audio capture at all under Firefox or Safari;
on Chrome/Chromium for Linux and Mac OS, only the audio of a Chrome/Chromium tab can be captured, not the audio of a non-browser application window.

high quality media recorder from canvas 30 fps at 1080p

I have a canvas app that currently captures images of the canvas and compiles a video that is sent to ffmpeg which then outputs the video format of their choice. The problem is its super slow! Not on the video conversion but on the compiling of the actual frames, you see I have to pause the video and the animation and take a screenshot of the canvas. So rather than taking screenshots I was thinking about using MediaRecorder and canvas.captureStream. I am able to get video output but the quality is really low and the video keeps droping frames. I need to have the frame rate be at least 30 fps or higher and the quality be high. Heres my record function
async [RECORD] ({state}) {
state.videoOutputURL = null;
state.outputVideo = document.createElement("video");
const videoStream = state.canvas.captureStream(30);
const mediaRecorder = new MediaRecorder(videoStream);
mediaRecorder.ondataavailable = function(e) {
state.captures.push(e.data);
};
mediaRecorder.onstop = function(e) {
const blob = new Blob(state.captures);
state.captures = [];
const videoURL = URL.createObjectURL(blob);
state.outputVideo.src = videoURL;
state.outputVideo.width = 1280;
state.outputVideo.height = 720;
document.body.append(state.outputVideo);
};
mediaRecorder.start();
state.anim.start();
state.video.play();
lottie.play();
state.video.addEventListener("ended", async () => {
mediaRecorder.stop();
});
}
The best way I found to do this was to actually pause the video on a canvas and use canvas.toDataURL to take screenshots. I compile the screenshots into a video with a library called Whammy and send that over to FFmpeg to rip the final content. The following code should give a pretty good idea
async [TAKE_SCREENSHOT]({ state, dispatch }) {
let seekResolve;
if (!state.ended && state.video) {
state.video.addEventListener("seeked", async () => {
if (seekResolve) seekResolve();
});
await new Promise(async (resolve, reject) => {
if (state.animations.length) {
dispatch(PAUSE_LOTTIES);
}
dispatch(PAUSE_VIDEO);
await new Promise(r => (seekResolve = r));
if (state.layer) {
state.layer.draw();
}
if (state.canvas) {
state.captures.push(state.canvas.toDataURL("image/webp"));
}
resolve();
dispatch(TAKE_SCREENSHOT);
});
}
},
async [PAUSE_VIDEO]({ state, dispatch, commit }) {
state.video.pause();
const oneFrame = 1 / 30;
if (state.video.currentTime + oneFrame < state.video.duration) {
state.video.currentTime += oneFrame;
const percent = `${Math.round(
(state.video.currentTime / state.video.duration) * 100
)}%`;
commit(SET_MODAL_STATUS, percent);
} else {
commit(SET_MODAL_STATUS, "Uploading your video");
state.video.play();
state.ended = true;
await dispatch(GENERATE_VIDEO);
}
},
async [PAUSE_LOTTIES]({ state }) {
for (let i = 0; i < state.animations.length; i++) {
let step = 0;
let animation = state.animations[i].lottie;
if (animation.currentFrame <= animation.totalFrames) {
step = animation.currentFrame + 1;
}
await lottie.goToAndStop(step, true, animation.name);
}
},
async [GENERATE_VIDEO]({ state, rootState, dispatch, commit }) {
let status;
state.editingZoom = null;
const username =
rootState.user.currentUser.username;
const email = rootState.user.currentUser.email || rootState.user.guestEmail;
const name = rootState.user.currentUser.firstName || "guest";
const s3Id = rootState.templates.currentVideo.stock_s3_id || state.s3Id;
const type = rootState.dataClay.fileFormat || state.type;
const vid = new Whammy.fromImageArray(state.captures, 30);
vid.lastModifiedDate = new Date();
vid.name = "canvasVideo.webm";
const data = new FormData();
const id = `${username}_${new Date().getTime()}`;
data.append("id", id);
data.append("upload", vid);
let projectId,
fileName,
matrix = null;
if (!state.editorMode) {
projectId = await dispatch(INSERT_PROJECT);
fileName = `${rootState.dataClay.projectName}.${type}`;
matrix = rootState.dataClay.matrix[0];
} else {
matrix = rootState.canvasSidebarMenu.selectedDisplay;
projectId = id;
fileName = `${id}.${type}`;
}
if (projectId || state.editorMode) {
await dispatch(UPLOAD_TEMP_FILE, data);
const key = await dispatch(CONVERT_FILE_TYPE, {
id,
username,
type,
projectId,
matrix,
name,
email,
editorMode: state.editorMode
});
const role = rootState.user.currentUser.role;
state.file = `/api/files/${key}`;
let message;
let title = "Your video is ready";
status = "rendered";
if (!key) {
status = "failed";
message =
"<p class='error'>Error processing video! If error continues please contact Creative Group. We are sorry for any inconvenience.</p>";
title = "Error!";
} else if (!rootState.user.currentUser.id) {
message = `<p>Your video is ready. Signup for more great content!</p> <a href="${
state.file
}" download="${fileName}" class="btn btn-primary btn-block">Download</a>`;
} else if (role != "banner") {
message = `<p>Your video is ready.</p> <a href="${
state.file
}" download="${fileName}" class="btn btn-primary btn-block">Download</a>`;
} else {
message = `<p>Your video is ready. You may download your file from your banner account</p>`;
await dispatch(EXPORT_TO_BANNER, {
s3Id,
fileUrl: key,
extension: `.${type}`,
resolution: matrix
});
}
if (state.editorMode) {
await dispatch(SAVE_CANVAS, { status, fileId: projectId });
}
state.video.loop = "loop";
state.anim.stop();
state.video.pause();
lottie.unfreeze();
await dispatch(DELETE_PROJECT_IN_PROGRESS);
commit(RESET_PROJECT_IN_PROGRESS);
commit(RESET_CANVAS);
if (rootState.user.currentUser.id) {
router.push("/account/projects");
} else {
router.push("/pricing");
}
dispatch(SHOW_MODAL, {
name: "message",
title,
message
});
} else {
await dispatch(FETCH_ALL_PUBLISHED_TEMPLATES);
await dispatch(DELETE_PROJECT_IN_PROGRESS);
commit(RESET_PROJECT_IN_PROGRESS);
commit(RESET_CANVAS);
}
},

WebRTC Javascript cannot answer on iOS

I'm creating a web-app where a computer needs to communicate with another device, an iPhone XR (iOS 13). I have created the shell of the program and works fine with two computers running Chrome, but am having trouble getting it to work on the phone.
Here is the code for the 'creator' of the WebRTC server:
<textarea id="creater-sdp"></textarea>
<textarea id="joiner-sdp"></textarea>
<button onclick="start()">Start</button>
<div id="chat"></div>
<input type="text" id="msg"><button onclick="sendMSG()">Send</button>
<script>
let id = (x) => {return document.getElementById(x);};
let constraints = {optional: [{RtpDataChannels: true}]};
let pc = new RTCPeerConnection(null);
let dc;
pc.oniceconnectionstatechange = function(e) {
let state = pc.iceConnectionState;
id("status").innerHTML = state;
};
pc.onicecandidate = function(e) {
if (e.candidate) return;
id("creater-sdp").value = JSON.stringify(pc.localDescription);
}
function createOfferSDP() {
dc = pc.createDataChannel("chat");
pc.createOffer().then(function(e) {
pc.setLocalDescription(e)
});
dc.onopen = function() {
addMSG("CONNECTED!", "info")
};
dc.onmessage = function(e) {
if (e.data) addMSG(e.data, "other");
}
};
function start() {
let answerSDP = id("joiner-sdp").value;
let answerDesc = new RTCSessionDescription(JSON.parse(answerSDP));
pc.setRemoteDescription(answerDesc);
}
let addMSG = function(msg, who) {
let node = document.createElement("div");
let textnode = document.createTextNode(`[${who}] ${msg}`);
node.appendChild(textnode);
id("chat").appendChild(node);
}
createOfferSDP();
let sendMSG = function() {
let value = id("msg").value;
if(value) {
dc.send(value);
addMSG(value, "me");
id("msg").value = "";
}
}
</script>
First, the SDP is copied from the textarea to the other 'joiner' client, and then another SDP is created which is returned to the 'creator' with the following code:
<textarea id="creater-sdp"></textarea>
<textarea id="joiner-sdp"></textarea>
<button onclick="createAnswerSDP()">Create</button>
<div id="chat"></div>
<input type="text" id="msg"><button onclick="sendMSG()">Send</button>
<script>
let id = (x) => {return document.getElementById(x);};
let constraints = {optional: [{RtpDataChannels: true}]};
let pc = new RTCPeerConnection(null);
let dc;
pc.ondatachannel = function(e) {dc = e.channel; dcInit(dc)};
pc.onicecandidate = function(e) {
if (e.candidate) return;
id("joiner-sdp").value = JSON.stringify(pc.localDescription);
};
pc.oniceconnectionstatechange = function(e) {
let state = pc.iceConnectionState;
id("status").innerHTML = state;
};
function dcInit(dc) {
dc.onopen = function() {
addMSG("CONNECTED!", "info")
};
dc.onmessage = function(e) {
if (e.data) addMSG(e.data, "other");
}
}
function createAnswerSDP() {
let offerDesc = new RTCSessionDescription(JSON.parse(id("creater-sdp").value));
pc.setRemoteDescription(offerDesc)
pc.createAnswer(function (answerDesc) {
pc.setLocalDescription(answerDesc)
}, function() {alert("Couldn't create offer")},
constraints);
};
let sendMSG = function() {
let value = id("msg").value;
if(value) {
dc.send(value);
addMSG(value, "me");
id("msg").value = "";
}
}
let addMSG = function(msg, who) {
let node = document.createElement("div");
let textnode = document.createTextNode(`[${who}] ${msg}`);
node.appendChild(textnode);
id("chat").appendChild(node);
}
</script>
This entire process works flawlessly on the computers, but for some reason cannot be done on the iPhone, even when switching the roles. Am I doing something wrong? Or could it be a feature I'm using isn't implemented yet? I've tried both Safari and Chrome on the phone.

Variable is null (only in Mozilla)

Here is the resume of the code:
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
//(.....)
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
audioInput = context.createMediaStreamSource(e);
var bufferSize = 2048;
store_data = context.createScriptProcessor(bufferSize, 1, 1);
//(...)
}
//(....)
client.on('open', function() {
console.log("createStream");
Stream = client.createStream(command_list);
var recording = false;
window.startRecording = function() {
document.getElementById("startbutton").disabled = true;
document.getElementById("stopbutton").disabled = false;
recording = true;
window.Stream.resume();
}
window.stopRecording = function() {
document.getElementById("startbutton").disabled = false;
document.getElementById("stopbutton").disabled = true;
recording = false
//window.Stream.end();
window.Stream.pause();
}
store_data.onaudioprocess = function(e){ //<---line of the error
if(!recording) return;
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
window.Stream.write(convertoFloat32ToInt16(left));
}
//(..events generated from server..)
In chrome my code works just fine. In Mozilla I am getting always the error "store data is undefined". Any idea why? Because I am declaring store_data as global and when getusermedia is a sucess the value is changed.
Without knowing what calls the success function, it's difficult to say exactly, but I am fairly sure you want your client.on('open') listener to be contingent on the success function running.
I don't know how it will affect the rest of the omitted code, but I would only connect the BinaryClient when the success function has run and you are sure you have store_data defined.
function success() {
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
// do the original success code here
// now create that listener.
client.on('open', function() {
// do original code here
});
}
// you probably have a line of code that looks like this
navigator.getUserMedia({}, success);
Moving all of your code into the success function may work, but it won't be elegant. Once you've got the flow working, I would suggest refactoring the code, by splitting each logical bit up into its own function.
Yes it's a race. Your code must wait until getUserMedia succeeds and open is fired.
Promises are a great way to solve this:
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
(Use the above polyfill to access modern getUserMedia in all supported browsers.)
var client = new BinaryClient('ws://localhost:9001');
var context = null;
var store_data = null;
//(.....)
var haveStoreData = navigator.mediaDevices.getUserMedia({audio:true})
.then(function(stream) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
audioInput = context.createMediaStreamSource(stream);
var bufferSize = 2048;
return context.createScriptProcessor(bufferSize, 1, 1);
});
//(....)
client.on('open', function() {
console.log("opened");
haveStoreData.then(function(store_data) {
console.log("createStream");
Stream = client.createStream(command_list);
var recording = false;
window.startRecording = function() {
document.getElementById("startbutton").disabled = true;
document.getElementById("stopbutton").disabled = false;
recording = true;
window.Stream.resume();
};
window.stopRecording = function() {
document.getElementById("startbutton").disabled = false;
document.getElementById("stopbutton").disabled = true;
recording = false;
//window.Stream.end();
window.Stream.pause();
};
store_data.onaudioprocess = function(e){
if(!recording) return;
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
window.Stream.write(convertoFloat32ToInt16(left));
};
//(..events generated from server..)
})
.catch(function(e) { console.error(e); });
});
This will give users time to choose "Allow" in the mic permission prompt (Unlike Chrome, Firefox asks the user for permission every time, unless they choose "Always Allow").
var client = new BinaryClient('ws://193.136.94.233:9001');
var context = null;
var gain = null;
var store_data = null;
//(.....)
navigator.mediaDevices.getUserMedia({audio:true}) .then( function(stream){
context = new AudioContext();
audioInput = context.createMediaStreamSource(stream);
var bufferSize = 4096;
store_data = context.createScriptProcessor(bufferSize, 1, 1);
biquadFilter = context.createBiquadFilter();
biquadFilter.type = "lowpass";
biquadFilter.frequency.value = 11500;
biquadFilter.Q.value = 3;
ganho = context.createGain();
ganho.gain.value=0.5;
//audioInput.connect(ganho);//compresso
//ganho.connect(recorder);
//recorder.connect(context.destination);
audioInput.connect(biquadFilter);
biquadFilter.connect(ganho);
ganho.connect(store_data);
store_data.connect(context.destination);
store_data.onaudioprocess = function(e){
if(!recording){
//console.log("nada faz nada desta vida")
return;
}
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
Stream.write(convertoFloat32ToInt16(left));
}
//audioInput.connect(store_data);
} ) .catch( function(e){ console.log(e) } );
//(...)
client.on('open', function() {
console.log("opened connection");
//haveStoreData.then(function(store_data) {
Stream = client.createStream(command_list);
//recording = false;
//(........)
);
//Other function
Here is the solution to stream with BinaryJS with Chrome an Mozilla. Thanks to #jib and #Kaiido

Categories

Resources