I'm creating a web-app where a computer needs to communicate with another device, an iPhone XR (iOS 13). I have created the shell of the program and works fine with two computers running Chrome, but am having trouble getting it to work on the phone.
Here is the code for the 'creator' of the WebRTC server:
<textarea id="creater-sdp"></textarea>
<textarea id="joiner-sdp"></textarea>
<button onclick="start()">Start</button>
<div id="chat"></div>
<input type="text" id="msg"><button onclick="sendMSG()">Send</button>
<script>
let id = (x) => {return document.getElementById(x);};
let constraints = {optional: [{RtpDataChannels: true}]};
let pc = new RTCPeerConnection(null);
let dc;
pc.oniceconnectionstatechange = function(e) {
let state = pc.iceConnectionState;
id("status").innerHTML = state;
};
pc.onicecandidate = function(e) {
if (e.candidate) return;
id("creater-sdp").value = JSON.stringify(pc.localDescription);
}
function createOfferSDP() {
dc = pc.createDataChannel("chat");
pc.createOffer().then(function(e) {
pc.setLocalDescription(e)
});
dc.onopen = function() {
addMSG("CONNECTED!", "info")
};
dc.onmessage = function(e) {
if (e.data) addMSG(e.data, "other");
}
};
function start() {
let answerSDP = id("joiner-sdp").value;
let answerDesc = new RTCSessionDescription(JSON.parse(answerSDP));
pc.setRemoteDescription(answerDesc);
}
let addMSG = function(msg, who) {
let node = document.createElement("div");
let textnode = document.createTextNode(`[${who}] ${msg}`);
node.appendChild(textnode);
id("chat").appendChild(node);
}
createOfferSDP();
let sendMSG = function() {
let value = id("msg").value;
if(value) {
dc.send(value);
addMSG(value, "me");
id("msg").value = "";
}
}
</script>
First, the SDP is copied from the textarea to the other 'joiner' client, and then another SDP is created which is returned to the 'creator' with the following code:
<textarea id="creater-sdp"></textarea>
<textarea id="joiner-sdp"></textarea>
<button onclick="createAnswerSDP()">Create</button>
<div id="chat"></div>
<input type="text" id="msg"><button onclick="sendMSG()">Send</button>
<script>
let id = (x) => {return document.getElementById(x);};
let constraints = {optional: [{RtpDataChannels: true}]};
let pc = new RTCPeerConnection(null);
let dc;
pc.ondatachannel = function(e) {dc = e.channel; dcInit(dc)};
pc.onicecandidate = function(e) {
if (e.candidate) return;
id("joiner-sdp").value = JSON.stringify(pc.localDescription);
};
pc.oniceconnectionstatechange = function(e) {
let state = pc.iceConnectionState;
id("status").innerHTML = state;
};
function dcInit(dc) {
dc.onopen = function() {
addMSG("CONNECTED!", "info")
};
dc.onmessage = function(e) {
if (e.data) addMSG(e.data, "other");
}
}
function createAnswerSDP() {
let offerDesc = new RTCSessionDescription(JSON.parse(id("creater-sdp").value));
pc.setRemoteDescription(offerDesc)
pc.createAnswer(function (answerDesc) {
pc.setLocalDescription(answerDesc)
}, function() {alert("Couldn't create offer")},
constraints);
};
let sendMSG = function() {
let value = id("msg").value;
if(value) {
dc.send(value);
addMSG(value, "me");
id("msg").value = "";
}
}
let addMSG = function(msg, who) {
let node = document.createElement("div");
let textnode = document.createTextNode(`[${who}] ${msg}`);
node.appendChild(textnode);
id("chat").appendChild(node);
}
</script>
This entire process works flawlessly on the computers, but for some reason cannot be done on the iPhone, even when switching the roles. Am I doing something wrong? Or could it be a feature I'm using isn't implemented yet? I've tried both Safari and Chrome on the phone.
Related
I am very new to javaScript, I know some basics but have not yet completely understood the complete logics behind it (so far I have only worked with Python and a little bit of VBA)
For uni I have to build a browser interface to record audio and transfer it to a server where a Speech to text application runs. I found some opensource code here (https://github.com/mdn/dom-examples/blob/main/media/web-dictaphone/scripts/app.js) which I wanted to use, but is missing the websocket part. Now I don't know, where exactly to insert that. So far I have this:
code of the Webdictaphone:
// set up basic variables for app
const record = document.querySelector('.record');
const stop = document.querySelector('.stop');
const soundClips = document.querySelector('.sound-clips');
const canvas = document.querySelector('.visualizer');
const mainSection = document.querySelector('.main-controls');
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia supported.');
const constraints = { audio: true };
let chunks = [];
let onSuccess = function(stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function() {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
}
stop.onclick = function() {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
}
mediaRecorder.onstop = function(e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt('Enter a name for your sound clip?','My unnamed clip');
const clipContainer = document.createElement('article');
const clipLabel = document.createElement('p');
const audio = document.createElement('audio');
const deleteButton = document.createElement('button');
clipContainer.classList.add('clip');
audio.setAttribute('controls', '');
deleteButton.textContent = 'Delete';
deleteButton.className = 'delete';
if(clipName === null) {
clipLabel.textContent = 'My unnamed clip';
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function(e) {
e.target.closest(".clip").remove();
}
clipLabel.onclick = function() {
const existingName = clipLabel.textContent;
const newClipName = prompt('Enter a new name for your sound clip?');
if(newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
}
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
}
let onError = function(err) {
console.log('The following error occured: ' + err);
}
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log('getUserMedia not supported on your browser!');
}
websocket part (client side):
window.addEventListener("DOMContentLoaded", () => {
// Open the WebSocket connection and register event handlers.
console.log('DOMContentLoaded done');
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
console.log('ws is defined');
})
Right now I just stacked both of the parts on top of each other, but this doesn't work, since, as I found out, you only can define and use variables (such as ws) within a block. This leads to an error that says that ws i not defined when I call the sending function within the if-statement.
I already tried to look for tutorials for hours but none that I found included this topic. I also tried moving the web socket part into the if statement, but that also did - unsurprisingly work, at least not in the way that I tried.
I feel like my problem lays in understanding how to define the websocket so I can call it within the if statement, or figure out a way to somehow get the audio somewhere where ws is considered to be defined. Unfortunately I just don't get behind it and already invested days which has become really frustrating.
I appreciate any help. If you have any ideas what I could change or move in the code or maybe just know any tutorial that could help, I'd be really grateful.
Thanks in advance!
You don't need that window.addEventListener("DOMContentLoaded", () => { part
const ws = new WebSocket("ws://localhost:8001/"); // temp moved to mediarecorder.onstop
dataToBeSent = function (data) {
ws.send(data);
};
const record = document.querySelector(".record");
const stop = document.querySelector(".stop");
const soundClips = document.querySelector(".sound-clips");
const canvas = document.querySelector(".visualizer");
const mainSection = document.querySelector(".main-controls");
// disable stop button while not recording
stop.disabled = true;
// visualiser setup - create web audio api context and canvas
let audioCtx;
const canvasCtx = canvas.getContext("2d");
//main block for doing the audio recording
if (navigator.mediaDevices.getUserMedia) {
console.log("getUserMedia supported.");
const constraints = { audio: true };
let chunks = [];
let onSuccess = function (stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function () {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
};
stop.onclick = function () {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
// mediaRecorder.requestData();
stop.disabled = true;
record.disabled = false;
};
mediaRecorder.onstop = function (e) {
console.log("data available after MediaRecorder.stop() called.");
const clipName = prompt(
"Enter a name for your sound clip?",
"My unnamed clip"
);
const clipContainer = document.createElement("article");
const clipLabel = document.createElement("p");
const audio = document.createElement("audio");
const deleteButton = document.createElement("button");
clipContainer.classList.add("clip");
audio.setAttribute("controls", "");
deleteButton.textContent = "Delete";
deleteButton.className = "delete";
if (clipName === null) {
clipLabel.textContent = "My unnamed clip";
} else {
clipLabel.textContent = clipName;
}
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, { type: "audio/ogg; codecs=opus" });
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function (e) {
e.target.closest(".clip").remove();
};
clipLabel.onclick = function () {
const existingName = clipLabel.textContent;
const newClipName = prompt("Enter a new name for your sound clip?");
if (newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
};
};
mediaRecorder.ondataavailable = function (e) {
chunks.push(e.data);
};
};
let onError = function (err) {
console.log("The following error occured: " + err);
};
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log("getUserMedia not supported on your browser!");
}
file: websocket_server.py
import asyncio
import websockets
import json
import ssl
peers = ()
async def on_open(websocket,path):
async for message in websocket:
message = json.loads(message)
if(message["type"]=="register"):
await register(websocket,message["username"])
elif(message["type"]=="offer"):
await send_offer(websocket,message)
elif(message["type"]=="answer"):
await send_answer(websocket,message)
elif(message["type"]=="candidate"):
await send_candidate(websocket,message)
await unregister(websocket)
async def register(websocket,username):
global peers
print(username+" logged in.")
peers = peers + ((websocket,username),)
for peer in peers:
if peer[0] is not websocket:
await websocket.send(json.dumps({"type": "create_peer","username":peer[1]}))
async def send_offer(websocket,message):
global peers
offer_creator = message["from"]
offer_receiver = message["to"]
offer = message["offer"]
print(offer_creator+" creates and sends offer to "+offer_receiver)
for peer in peers:
if(peer[1]==offer_receiver):
await peer[0].send(json.dumps({"type": "offer","username":offer_creator,"offer":offer}))
async def send_answer(websocket,message):
global peers
answer_creator = message["from"]
answer_receiver = message["to"]
answer = message["answer"]
print(answer_creator+" creates and sends answer to "+answer_receiver)
for peer in peers:
if(peer[1]==answer_receiver):
await peer[0].send(json.dumps({"type": "answer","username":answer_creator,"answer":answer}))
async def send_candidate(websocket,message):
global peers
candidate_creator = message["from"]
candidate_receiver = message["to"]
candidate = message["candidate"]
print(candidate_creator+" send candidate packet to "+candidate_receiver)
for peer in peers:
if(peer[1]==candidate_receiver):
await peer[0].send(json.dumps({"type": "candidate","username":candidate_creator,"candidate":candidate}))
async def unregister(websocket):
global peers
for peer_1 in peers:
if(peer_1[0]==websocket):
username = peer_1[1]
print(username+" logged out.")
for peer_2 in peers:
if(peer_2[0] is not websocket):
await peer_2[0].send(json.dumps({"type": "unregister","username":username}))
peers_list = list(peers)
peers_list.remove((websocket,username))
peers = tuple(peers_list)
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ssl_context = None
start_server = websockets.serve(on_open, "127.0.0.1", 8080)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
file index.html
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Audio Calls</title>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
<script type="text/javascript" src="main.js"></script>
<style type="text/css">
.video{
border:2px solid black;
position:relative;
}
video{
}
.username{
border-top:2px solid black;
text-align:center;
font-weight:bold;
}
</style>
</head>
<body>
</body>
</html>
file main.js
var ws_url = "ws://127.0.0.1:8080"
var ws = new WebSocket(ws_url);
var pcs = [];
var peer_connections = 0;
var constraints = {audio: true,video: true};
function makeid(length) {
var result = '';
var characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
var charactersLength = characters.length;
for ( var i = 0; i < length; i++ ) {
result += characters.charAt(Math.floor(Math.random() * charactersLength));
}
return result;
}
var local_username;
$(document).ready(function(){
local_username = makeid(5);
ws.onopen = () => ws.send(JSON.stringify({"type":"register","username":local_username}));
})
var signal_in_progress;
ws.onmessage = async function (event) {
var signal = JSON.parse(event.data);
if(signal.type=="create_peer"){
username = signal.username;
create_peer(username);
//console.log("Creating peer for: "+username);
}else if(signal.type=="offer"){
receive_offer(signal);
}else if(signal.type=="answer"){
receive_answer(signal);
//console.log("Receiving answer from peer: "+signal.username)
}else if(signal.type=="candidate"){
receive_candidate(signal);
}else if(signal.type=="unregister"){
unregister(signal);
}
}
async function create_peer(username){
pc = new RTCPeerConnection();
stream = await navigator.mediaDevices.getUserMedia(constraints);
pc.addStream(stream);
pc.onaddstream = function(event) {
make_video_element(event.stream,username)
};
pc.createOffer(function(offer) {
pc.setLocalDescription(offer, function() {
data = {"type":"offer","from":local_username,"to":username,"offer":offer}
ws.send(JSON.stringify(data));
}, fail);
}, fail);
pc.onicecandidate = function(event) {
if (event.candidate) {
data = {"type":"candidate","from":local_username,"to":username,"candidate":event.candidate}
ws.send(JSON.stringify(data));
}
};
pcs[peer_connections] = [username,pc];
peer_connections = peer_connections+1;
}
async function receive_offer(signal){
username = signal.username;
offer = signal.offer;
pc = new RTCPeerConnection();
stream = await navigator.mediaDevices.getUserMedia(constraints);
pc.addStream(stream);
pc.onaddstream = function(event) {
make_video_element(event.stream,username)
};
pc.setRemoteDescription(new RTCSessionDescription(offer), function() {
pc.createAnswer(function(answer) {
pc.setLocalDescription(answer, function() {
data = {"type":"answer","from":local_username,"to":username,"answer":answer}
ws.send(JSON.stringify(data));
}, fail);
}, fail);
}, fail);
pc.onicecandidate = function(event) {
if (event.candidate) {
data = {"type":"candidate","from":local_username,"to":username,"candidate":event.candidate}
ws.send(JSON.stringify(data));
}
};
pcs[peer_connections] = [username,pc];
peer_connections = peer_connections+1;
}
async function receive_answer(signal){
username = signal.username;
answer = signal.answer;
for(var i=0;i<peer_connections;i++){
if(pcs[i][0]==username){
pcs[i][1].setRemoteDescription(new RTCSessionDescription(answer));
}
}
}
async function receive_candidate(signal){
username = signal.username;
candidate = signal.candidate;
for(var i=0;i<peer_connections;i++){
if(pcs[i][0]==username){
pcs[i][1].addIceCandidate(new RTCIceCandidate(candidate));
}
}
}
async function unregister(signal){
username = signal.username;
index = 0
for(var i=0;i<peer_connections;i++){
if(pcs[i][0]==username){
pcs[i][1].close();
document.getElementById(username).style.display = "none";
document.getElementById(username).getElementsByTagName("video")[0].pause();
index = i;
break;
}
}
pcs.splice(index, 1);
peer_connections = peer_connections-1;
}
function make_video_element(stream,username){
var video_container = document.createElement("div");
video_container.id = username;
video_container.classList.add("video");
var video = document.createElement("video");
video.srcObject = stream;
video_container.appendChild(video);
var username_element = document.createElement("div");
username_element.classList.add("username");
username_element.innerHTML = username;
video_container.appendChild(username_element);
document.body.appendChild(video_container);
video.play();
}
function fail(error){
console.log(error);
}
The above code works fine for one connection (send-receive).
But if i open three tabs something is wrong.
I don't know where the problem focused.
Can you help me please?
I found the error:
main.js
var ws_url = "ws://127.0.0.1:8080"
var ws = new WebSocket(ws_url);
var pcs = [];
var peer_connections = 0;
var constraints = {audio: true,video: true};
function makeid(length) {
var result = '';
var characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
var charactersLength = characters.length;
for ( var i = 0; i < length; i++ ) {
result += characters.charAt(Math.floor(Math.random() * charactersLength));
}
return result;
}
var local_username;
$(document).ready(function(){
local_username = makeid(5);
ws.onopen = () => ws.send(JSON.stringify({"type":"register","username":local_username}));
})
var signal_in_progress;
ws.onmessage = async function (event) {
var signal = JSON.parse(event.data);
if(signal.type=="create_peer"){
username = signal.username;
create_peer(username);
}else if(signal.type=="offer"){
receive_offer(signal);
}else if(signal.type=="answer"){
receive_answer(signal);
}else if(signal.type=="candidate"){
receive_candidate(signal);
}else if(signal.type=="unregister"){
unregister(signal);
}
}
async function create_peer(username){
pc = new RTCPeerConnection();
var pc_index = peer_connections;
pcs[pc_index] = [username,pc];
peer_connections = peer_connections+1;
stream = await navigator.mediaDevices.getUserMedia(constraints);
pcs[pc_index][1].addStream(stream);
pcs[pc_index][1].onaddstream = function(event) {
make_video_element(event.stream,username)
};
console.log(pc_index);
pcs[pc_index][1].createOffer(function(offer) {
pcs[pc_index][1].setLocalDescription(offer, function() {
data = {"type":"offer","from":local_username,"to":username,"offer":offer}
console.log("Offer to: "+username);
ws.send(JSON.stringify(data));
}, fail);
}, fail);
pcs[pc_index][1].onicecandidate = function(event) {
if (event.candidate) {
data = {"type":"candidate","from":local_username,"to":username,"candidate":event.candidate}
ws.send(JSON.stringify(data));
}
};
}
async function receive_offer(signal){
username = signal.username;
offer = signal.offer;
pc = new RTCPeerConnection();
pc_index = peer_connections;
pcs[pc_index] = [username,pc];
peer_connections = peer_connections+1;
stream = await navigator.mediaDevices.getUserMedia(constraints);
pcs[pc_index][1].addStream(stream);
pcs[pc_index][1].onaddstream = function(event) {
make_video_element(event.stream,username)
};
pcs[pc_index][1].setRemoteDescription(new RTCSessionDescription(offer), function() {
pcs[pc_index][1].createAnswer(function(answer) {
pcs[pc_index][1].setLocalDescription(answer, function() {
data = {"type":"answer","from":local_username,"to":username,"answer":answer}
ws.send(JSON.stringify(data));
}, fail);
}, fail);
}, fail);
pcs[pc_index][1].onicecandidate = function(event) {
if (event.candidate) {
data = {"type":"candidate","from":local_username,"to":username,"candidate":event.candidate}
ws.send(JSON.stringify(data));
}
};
}
async function receive_answer(signal){
username = signal.username;
answer = signal.answer;
for(var i=0;i<peer_connections;i++){
if(pcs[i][0]==username){
console.log("Accept answer");
console.log(username);
console.log(i)
pcs[i][1].setRemoteDescription(new RTCSessionDescription(answer));
}
}
}
async function receive_candidate(signal){
username = signal.username;
candidate = signal.candidate;
for(var i=0;i<peer_connections;i++){
if(pcs[i][0]==username){
pcs[i][1].addIceCandidate(new RTCIceCandidate(candidate));
}
}
}
async function unregister(signal){
username = signal.username;
index = 0
for(var i=0;i<peer_connections;i++){
if(pcs[i][0]==username){
pcs[i][1].close();
document.getElementById(username).style.display = "none";
document.getElementById(username).getElementsByTagName("video")[0].pause();
index = i;
break;
}
}
pcs.splice(index, 1);
peer_connections = peer_connections-1;
}
function make_video_element(stream,username){
var video_container = document.createElement("div");
video_container.id = username;
video_container.classList.add("video");
var video = document.createElement("video");
video.srcObject = stream;
video_container.appendChild(video);
var username_element = document.createElement("div");
username_element.classList.add("username");
username_element.innerHTML = username;
video_container.appendChild(username_element);
document.body.appendChild(video_container);
video.play();
}
function fail(error){
console.log(error);
}
Maybe the error oqqured by the following reason:
the peer_connections variable has changed before the offer has created because another offer was trying to made.
Edit: For someone who wants to implement the above code in a local network also note that the windows defender firewall must turned off in every peer.
I'm trying to get the amount of currently viewer of a stream.
I tried to trigger some codes whenever onUserStatusChanged is fired (used as if user onjoin event).
Something like:
connection.onUserStatusChanged = function(status) {
console.log("onUserStatusChanged");
updateViewers();
};
function updateViewers(){
connection.getAllParticipants().forEach(function(participantId) {
var peer = connection.peers[participantId];
if(viewers && viewers.indexOf(peer.extra.nickname) === -1 && "UserA" !== peer.extra.nickname)
viewers.push(peer.extra.nickname);
});
console.log(viewers.length);
}
So far so good it works, But now it pushes also the user who has cam and not watching user A his cam.
I only want the extra.nickname of the user who is connected to User A his cam/channel/connection
Using "beforeAddingStream":
Please download latest codes from github to use this "beforeAddingStream" method.
var listOfStreamReceivers = {};
connection.beforeAddingStream = function(stream, peer) {
var remoteUserExtra = connection.peers[peer.userid].extra;
listOfStreamReceivers[remoteUserExtra.nickname] = peer;
return stream;
};
Using "onExtraDataUpdated" and "updateExtraData"
var listOfStreamReceivers = {};
connection.onstream = function(event) {
if (event.type == 'remote') {
connection.extra.receivedStreamById = event.streamid;
connection.updateExtraData();
}
};
connection.onExtraDataUpdated = function(event) {
if (event.extra.receivedStreamById) {
listOfStreamReceivers[event.extra.nickname] = event;
}
};
Using data channels
connection.session.data = true;
var listOfStreamReceivers = {};
connection.onstream = function(event) {
if (event.type == 'remote') {
var data = {
receivedStreamById: event.streamid
};
// second parameter sends direct message to the broadcast initiator
connection.send(data, connection.sessionid);
}
};
connection.onmessage = function(event) {
if (event.data.receivedStreamById) {
listOfStreamReceivers[event.extra.nickname] = event;
}
};
"listOfStreamReceivers" to array
var array = Object.keys(listOfStreamReceivers);
console.log('number of receivers', array.length);
array.forEach(function(key) {
var peer = listOfStreamReceivers[key];
var userid = peer.userid;
var extra = peer.extra;
});
In my web application, I have a requirement to play part of mp3 file. This is a local web app, so I don't care about downloads etc, everything is stored locally.
My use case is as follows:
determine file to play
determine start and stop of the sound
load the file [I use BufferLoader]
play
Quite simple.
Right now I just grab the mp3 file, decode it in memory for use with WebAudio API, and play it.
Unfortunately, because the mp3 files can get quite long [30minutes of audio for example] the decoded file in memory can take up to 900MB. That's a bit too much to handle.
Is there any option, where I could decode only part of the file? How could I detect where to start and how far to go?
I cannot anticipate the bitrate, it can be constant, but I would expect variable as well.
Here's an example of what I did:
http://tinyurl.com/z9vjy34
The code [I've tried to make it as compact as possible]:
var MediaPlayerAudioContext = window.AudioContext || window.webkitAudioContext;
var MediaPlayer = function () {
this.mediaPlayerAudioContext = new MediaPlayerAudioContext();
this.currentTextItem = 0;
this.playing = false;
this.active = false;
this.currentPage = null;
this.currentAudioTrack = 0;
};
MediaPlayer.prototype.setPageNumber = function (page_number) {
this.pageTotalNumber = page_number
};
MediaPlayer.prototype.generateAudioTracks = function () {
var audioTracks = [];
var currentBegin;
var currentEnd;
var currentPath;
audioTracks[0] = {
begin: 4.300,
end: 10.000,
path: "example.mp3"
};
this.currentPageAudioTracks = audioTracks;
};
MediaPlayer.prototype.show = function () {
this.mediaPlayerAudioContext = new MediaPlayerAudioContext();
};
MediaPlayer.prototype.hide = function () {
if (this.playing) {
this.stop();
}
this.mediaPlayerAudioContext = null;
this.active = false;
};
MediaPlayer.prototype.play = function () {
this.stopped = false;
console.trace();
this.playMediaPlayer();
};
MediaPlayer.prototype.playbackStarted = function() {
this.playing = true;
};
MediaPlayer.prototype.playMediaPlayer = function () {
var instance = this;
var audioTrack = this.currentPageAudioTracks[this.currentAudioTrack];
var newBufferPath = audioTrack.path;
if (this.mediaPlayerBufferPath && this.mediaPlayerBufferPath === newBufferPath) {
this.currentBufferSource = this.mediaPlayerAudioContext.createBufferSource();
this.currentBufferSource.buffer = this.mediaPlayerBuffer;
this.currentBufferSource.connect(this.mediaPlayerAudioContext.destination);
this.currentBufferSource.onended = function () {
instance.currentBufferSource.disconnect(0);
instance.audioTrackFinishedPlaying()
};
this.playing = true;
this.currentBufferSource.start(0, audioTrack.begin, audioTrack.end - audioTrack.begin);
this.currentAudioStartTimeInAudioContext = this.mediaPlayerAudioContext.currentTime;
this.currentAudioStartTimeOffset = audioTrack.begin;
this.currentTrackStartTime = this.mediaPlayerAudioContext.currentTime - (this.currentTrackResumeOffset || 0);
this.currentTrackResumeOffset = null;
}
else {
function finishedLoading(bufferList) {
instance.mediaPlayerBuffer = bufferList[0];
instance.playMediaPlayer();
}
if (this.currentBufferSource){
this.currentBufferSource.disconnect(0);
this.currentBufferSource.stop(0);
this.currentBufferSource = null;
}
this.mediaPlayerBuffer = null;
this.mediaPlayerBufferPath = newBufferPath;
this.bufferLoader = new BufferLoader(this.mediaPlayerAudioContext, [this.mediaPlayerBufferPath], finishedLoading);
this.bufferLoader.load();
}
};
MediaPlayer.prototype.stop = function () {
this.stopped = true;
if (this.currentBufferSource) {
this.currentBufferSource.onended = null;
this.currentBufferSource.disconnect(0);
this.currentBufferSource.stop(0);
this.currentBufferSource = null;
}
this.bufferLoader = null;
this.mediaPlayerBuffer = null;
this.mediaPlayerBufferPath = null;
this.currentTrackStartTime = null;
this.currentTrackResumeOffset = null;
this.currentAudioTrack = 0;
if (this.currentTextTimeout) {
clearTimeout(this.currentTextTimeout);
this.textHighlightFinished();
this.currentTextTimeout = null;
this.currentTextItem = null;
}
this.playing = false;
};
MediaPlayer.prototype.getNumberOfPages = function () {
return this.pageTotalNumber;
};
MediaPlayer.prototype.playbackFinished = function () {
this.currentAudioTrack = 0;
this.playing = false;
};
MediaPlayer.prototype.audioTrackFinishedPlaying = function () {
this.currentAudioTrack++;
if (this.currentAudioTrack >= this.currentPageAudioTracks.length) {
this.playbackFinished();
} else {
this.playMediaPlayer();
}
};
//
//
// Buffered Loader
//
// Class used to get the sound files
//
function BufferLoader(context, urlList, callback) {
this.context = context;
this.urlList = urlList;
this.onload = callback;
this.bufferList = [];
this.loadCount = 0;
}
// this allows us to handle media files with embedded artwork/id3 tags
function syncStream(node) { // should be done by api itself. and hopefully will.
var buf8 = new Uint8Array(node.buf);
buf8.indexOf = Array.prototype.indexOf;
var i = node.sync, b = buf8;
while (1) {
node.retry++;
i = b.indexOf(0xFF, i);
if (i == -1 || (b[i + 1] & 0xE0 == 0xE0 )) break;
i++;
}
if (i != -1) {
var tmp = node.buf.slice(i); //carefull there it returns copy
delete(node.buf);
node.buf = null;
node.buf = tmp;
node.sync = i;
return true;
}
return false;
}
BufferLoader.prototype.loadBuffer = function (url, index) {
// Load buffer asynchronously
var request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
var loader = this;
function decode(sound) {
loader.context.decodeAudioData(
sound.buf,
function (buffer) {
if (!buffer) {
alert('error decoding file data');
return
}
loader.bufferList[index] = buffer;
if (++loader.loadCount == loader.urlList.length)
loader.onload(loader.bufferList);
},
function (error) {
if (syncStream(sound)) {
decode(sound);
} else {
console.error('decodeAudioData error', error);
}
}
);
}
request.onload = function () {
// Asynchronously decode the audio file data in request.response
var sound = {};
sound.buf = request.response;
sound.sync = 0;
sound.retry = 0;
decode(sound);
};
request.onerror = function () {
alert('BufferLoader: XHR error');
};
request.send();
};
BufferLoader.prototype.load = function () {
for (var i = 0; i < this.urlList.length; ++i)
this.loadBuffer(this.urlList[i], i);
};
There is no way of streaming with decodeAudioData(), you need to use MediaElement with createMediaStreamSource and run your stuff then. decodeAudioData() cannot stream on a part.#zre00ne And mp3 will be decoded big!!! Verybig!!!
How to start a basic WebRTC data channel?
This is what I have so far, but it doesn't even seem to try and connect. Im sure I am just missing something basic.
var RTCPeerConnection = window.RTCPeerConnection || window.mozRTCPeerConnection || window.webkitRTCPeerConnection || window.msRTCPeerConnection;
var peerConnection = new RTCPeerConnection({
iceServers: [
{url: 'stun:stun1.l.google.com:19302'},
{url: 'stun:stun2.l.google.com:19302'},
{url: 'stun:stun3.l.google.com:19302'},
{url: 'stun:stun4.l.google.com:19302'},
]
});
peerConnection.ondatachannel = function () {
console.log('peerConnection.ondatachannel');
};
peerConnection.onicecandidate = function () {
console.log('peerConnection.onicecandidate');
};
var dataChannel = peerConnection.createDataChannel('myLabel', {
});
dataChannel.onerror = function (error) {
console.log('dataChannel.onerror');
};
dataChannel.onmessage = function (event) {
console.log('dataChannel.onmessage');
};
dataChannel.onopen = function () {
console.log('dataChannel.onopen');
dataChannel.send('Hello World!');
};
dataChannel.onclose = function () {
console.log('dataChannel.onclose');
};
console.log(peerConnection, dataChannel);
WebRTC assumes you have a way to signal (send an offer-string to, and receive an answer-string from) whomever you wish to contact. Without some server, how will you do that?
To illustrate, here's some code that does everything but that (works in Firefox and Chrome 45):
var config = { iceServers: [{ urls: "stun:stun.l.google.com:19302" }]};
var dc, pc = new RTCPeerConnection(config);
pc.ondatachannel = e => {
dc = e.channel;
dc.onopen = e => (log("Chat!"), chat.select());
dc.onmessage = e => log(e.data);
}
function createOffer() {
button.disabled = true;
pc.ondatachannel({ channel: pc.createDataChannel("chat") });
pc.createOffer().then(d => pc.setLocalDescription(d)).catch(failed);
pc.onicecandidate = e => {
if (e.candidate) return;
offer.value = pc.localDescription.sdp;
offer.select();
answer.placeholder = "Paste answer here";
};
};
offer.onkeypress = e => {
if (e.keyCode != 13 || pc.signalingState != "stable") return;
button.disabled = offer.disabled = true;
var obj = { type:"offer", sdp:offer.value };
pc.setRemoteDescription(new RTCSessionDescription(obj))
.then(() => pc.createAnswer()).then(d => pc.setLocalDescription(d))
.catch(failed);
pc.onicecandidate = e => {
if (e.candidate) return;
answer.focus();
answer.value = pc.localDescription.sdp;
answer.select();
};
};
answer.onkeypress = e => {
if (e.keyCode != 13 || pc.signalingState != "have-local-offer") return;
answer.disabled = true;
var obj = { type:"answer", sdp:answer.value };
pc.setRemoteDescription(new RTCSessionDescription(obj)).catch(failed);
};
chat.onkeypress = e => {
if (e.keyCode != 13) return;
dc.send(chat.value);
log(chat.value);
chat.value = "";
};
var log = msg => div.innerHTML += "<p>" + msg + "</p>";
var failed = e => log(e + ", line " + e.lineNumber);
<script src="https://rawgit.com/webrtc/adapter/master/adapter.js"></script>
<button id="button" onclick="createOffer()">Offer:</button>
<textarea id="offer" placeholder="Paste offer here"></textarea><br>
Answer: <textarea id="answer"></textarea><br><div id="div"></div>
Chat: <input id="chat"></input><br>
Open this page in a second tab, and you can chat from one tab to the other (or to a different machine around the world). What stinks is that you must get the offer there yourself:
Press the Offer button in Tab A (only) and wait 1-20 seconds till you see the offer-text,
copy-paste the offer-text from Tab A to Tab B, and hit Enter
copy-paste the answer-text that appears from Tab B to Tab A, and hit Enter.
You should now be able to chat between tabs, without a server.
As you can see, this is a sub-par experience, which is why you need some basic websocket server to pass offer/answer (as well as trickle ice candidates if you want connecting to happen fast) between A and B, to get things started. Once you have a connection, you can use data-channels for this, with a little extra work.