I have a question about the sequence of JavaScript. Let me show you my code first:
Here is my HTML:
<video id="video" width="320" height="320" autoplay></video><br>
<button id="snap">Snap Photo</button><br>
<canvas id="canvas" width="320" height="320"></canvas>
<p id="pngHolder"></p>
And here is my JavaScript:
<script>
var Id;
//List cameras and microphones.
if (!navigator.mediaDevices || !navigator.mediaDevices.enumerateDevices) {
console.log("enumerateDevices() not supported.");
}
navigator.mediaDevices.enumerateDevices()
.then(function (devices) {
devices.forEach(function (device) {
if (device.kind == "videoinput" && device.label.indexOf('back') >= 0) {
Id = device.deviceId;
alert("ID 1 : " + Id);
}
});
})
.catch(function (err) {
console.log(err.name + ": " + err.message);
});
// Put event listeners into place
window.addEventListener("DOMContentLoaded", function () {
// Grab elements, create settings, etc.
alert("ID 2 : "+ Id);
var canvas = document.getElementById("canvas"),
videoObj = {
video: {
optional: [{ deviceId: Id }]
}
},
context = canvas.getContext("2d"),
video = document.getElementById("video"),
errBack = function (error) {
console.log("Video capture error: ", error.code);
};
// Trigger photo take
document.getElementById("snap").addEventListener("click", function () {
context.drawImage(video, 0, 0, 640, 480);
// Get the image
var image = convertCanvasToImage(canvas);
// Actions
document.getElementById("pngHolder").appendChild(image);
// Converts canvas to an image
function convertCanvasToImage(canvas) {
var image = new Image();
image.src = canvas.toDataURL("image/png");
return image;
}
});
//alert("ID 2 : " + Id);
// Put video listeners into place
if (navigator.getUserMedia) { // Standard
navigator.getUserMedia(videoObj, function (stream) {
video.src = stream;
video.play();
}, errBack);
} else if (navigator.webkitGetUserMedia) { // WebKit-prefixed
navigator.webkitGetUserMedia(videoObj, function (stream) {
video.src = window.webkitURL.createObjectURL(stream);
video.play();
}, errBack);
} else if (navigator.mozGetUserMedia) { // Firefox-prefixed
navigator.mozGetUserMedia(videoObj, function (stream) {
video.src = window.URL.createObjectURL(stream);
video.play();
}, errBack);
}
}, false);
I want to insert the value of device.deviceId into variable Id that I define on the first of my JavaScript row. It is still a success (it is shown by the alert("ID 1 : " + Id);). But when I try to put it into optional: [{ deviceId: Id }], the Id doesn't have any value.
And also, when I try to run it with browser, I have found that the alert("ID 2 : " + Id); is shown first instead of alert("ID 1 : " + Id);. In fact, I have already put the alert("ID 1 : " + Id); first. I think that's why the variable is still empty.
My question is how can I insert the device.deviceId value to optional: [{ deviceId: Id }] ?
navigator.mediaDevices.enumerateDevices and DOMContentLoaded are racing, and the latter is winning, so you're using Id before it's set.
To solve this use a temporary haveId promise:
var haveId = navigator.mediaDevices.enumerateDevices()
.then(devices => devices.find(d => d.kind == "videoinput" &&
d.label.indexOf("back") >= 0));
// Put event listeners into place
window.addEventListener("DOMContentLoaded", function () {
haveId.then(id => {
// Grab elements, create settings, etc.
alert("ID 2 : "+ id);
var canvas = document.getElementById("canvas"),
videoObj = { video: { deviceId: id } }, // <-- adapter.js constraints
Promise-chains create dependencies, and this way the getUserMedia code wont proceed until both things have happened.
The second problem is you're mixing new and outdated Chrome-specific constraints. Either use adapter.js until Chrome catches up, or in a pinch, use the Chrome-only sourceId (but that wont work in any other browser).
navigator.mediaDevices.enumerateDevices() is asynchronous. It returns a promise (think callback, but fancier).
You should trigger your call to getUserMedia from there or wait for both DOMContentLoaded and enumerateDevices and then execute getUserMedia.
Related
I would like to play through Vimeo videos in sequence with JavaScript with continuous playback and looping through items. The following code moves to the second video at the end of the first but the same functions are not called at the end of the second video. Only the end of the first video shows Called! in the Javascript console, not the end of the second.
The Vimeo Player SDK has player.getVideoId() but not player.setVideoId() so I used a hack to load the next video: setting the source of the iframe and starting the Vimeo player anew. I wonder if this is the problem and the listener for the end of the video is attached to the discarded player.
Another issue with this code is that it exits fullscreen to load the next video.
I attempted this CodePen that sets the Vimeo Player on any element instead of an iframe and was unable to change the played video.
What is a cleaner or effective way of changing videos with the Vimeo Player SDK?
The HTML is:
<div class="vimeo">
<div class="resp-iframe-container container-centered">
<iframe class="resp-iframe" scrolling="no" frameborder="no" src="https://player.vimeo.com/video/238301525" allowfullscreen="" data-ready="true"></iframe></div></div>
And the javascript is:
var l = ["238301525", "496371201", "238301525", "496371201", "..."];
window.current = 0;
var increment = function() {
window.current += 1;
if (window.current == l.length) {
window.current = 0;
}
};
var decrement = function() {
window.current -= 1;
if (window.current < 0) {
window.current = l.length - 1;
}
};
prevA.addEventListener("click", function() {
decrement();
loadPlayer();
});
nextA.addEventListener("click", function() {
increment();
console.log("here!");
loadPlayer();
console.log("there!");
});
var loadPlayer = function() {
console.log("Called!");
var iframe = document.querySelector('iframe');
iframe.src = "https://player.vimeo.com/video/" + l[window.current];
var player = new Vimeo.Player(iframe);
player.autoplay = true;
var treatEvent = function(data, eventLabel) {
// Custom code...
if ("end" == eventLabel) {
console.log("incrementing automatically");
increment();
loadPlayer();
}
};
var onPlay = function(data) {
treatEvent(data, "play");
}
var onPause = function(data) {
treatEvent(data, "pause");
}
var onSeeked = function(data) {
treatEvent(data, "seeked");
}
var onEnd = function(data) {
treatEvent(data, "end");
}
player.on('play', onPlay);
player.on('pause', onPause);
player.on('seeked', onSeeked);
player.on('ended', onEnd);
setTimeout(function() {
//console.log("Trying to play...");
player.play().then(function() {
// the video was played
console.log("success: video played");
}).catch(function(error) {
console.log("Error! " + error.name);
});
}, 1000);
}
loadPlayer();
It seems that the Vimeo Player keeps track of whether it was initialized, adding a tag data-vimeo-initialized="true" in the HTML element.
One solution is to use the Vimeo Player SDK function .loadVideo():
var song_iframe = document.querySelector("iframe#song_video");
var song_player = new Vimeo.Player(song_iframe);
var on_song_end = function(data) {
// Logic to get the new id.
song_player.loadVideo(newId).then(function() {
song_player.play();
});
};
song_player.on("ended", on_song_end);
I am using the youtube api to search for youtube videos. The videos will then be displayed on #searchBar with the video id ex. NJNlqeMM8Ns as data-video. I get the video id by pressing on a img:
<img data-video = "{{videoid}}" src = "bilder/play.png" alt = "play" class = "knapp" width = "40" height = "40">
Which in my poorly understanding of javascript becomes (this).
When I search for videos I will get more than one result which means that I will get more than one img tag.
In this case I want to play the next song when the first one is finished. I tried to get the index when I pressed on my img tag:
$(".knapp").click(function(){
var index = $(".knapp").index(this);
alert(index);
});
However, when I alerted the index after the video was finshed I always got the value 0 back.
So I thought I could do something like this:
function onPlayerStateChange(event) {
if (event.data == YT.PlayerState.ENDED){
playNext();
}
}
$('#searchBar').on('click', '[data-video]', function(){
player.current_video = $(this).attr('data-video');
playVideo();
});
function playVideo(){
var video_id = player.current_video;
player.loadVideoById(video_id, 0, "large");
}
function playNext(){
var player.current_videon = **$(this + 1).attr('data-video');**
var next_id = player.current_videon;
player.loadVideoById(next_id, 0, "large");
}
But I'm not sure how to make it work, as you can see in the bold section, can I solve my problem like this or do I need another approach?
Edit:
With some research I found out that I need to set the value of the current video being played and also efter the video was done playing I add this number by 1.
However even if it did make the next video play, I was unable to chose which song I wanted anymore...
function onPlayerStateChange(event) {
if (event.data == YT.PlayerState.ENDED){
player.current_video++;
playVideo();
}
}
var player = document.querySelector('iframe');
function onYouTubeIframeAPIReady() {
player = new YT.Player('player', {
height: '390',
width: '640',
videoId: '40mSZPyqpag',
playerVars: {rel: 0},
events: {
'onStateChange': onPlayerStateChange
}
});
player.current_video = 0;
}
$('#searchBar').on('click', '[data-video]', function(){
player.current_video = $(this).index();
alert(player.current_video);
playVideo();
});
function playVideo(){
var video_id = $('[data-video]').eq(player.current_video).attr('data-video');
player.loadVideoById(video_id, 0, "large");
}
Here is a working PEN (click to RUN)
The solution is based on a global currentSongIndex index, without facilitating next()
var currentSongIndex = null;
$(".knapp").click(function () {
var index = $(this).attr('data-video');
playVideo(index);
});
function playVideo(index) {
console.log("Playing song INDEX: " + index);
currentSongIndex = index;
playNext();
}
function playNext() {
currentSongIndex++;
let nextSongIndex = $('img[data-video="' + currentSongIndex + '"]').attr('data-video');
console.log("Next song INDEX: " + nextSongIndex);
if(typeof nextSongIndex != "undefined") {
playVideo(nextSongIndex);
} else {
console.log('end of list... you can play from index 1 or whatever....');
}
}
I got a suggestion to get player.current_video by the closest li index, so I made an update to my data-video img tag.
<li class = liItem>
<img data-video = "{{videoid}}" src = "bilder/play.png" alt = "play" class = "knapp" width = "40" height = "40">
</li>
Then I changed the index on my click function in my edited example:
$('#searchBar').on('click', '[data-video]', function(){
player.current_video = $(this).closest('li').index();
playVideo();
});
With this new fuction I was able to chose and play the next song!
Shoutout to #cale_b for providing me with the .closest('li') suggestion.
I have a video application written in asp.net and c# with webRTC in JS.
some outsource wrote it for me a while ago (can't reach him any more) the application is working really good, except for one problem.
this video application was written for screen sharing use, and it uses some chrome extension for the screen sharing that also work good. the problem is that as soon as you go back from the screen sharing to the webcam stream, you cannot go back to the screen sharing stream. and it look like the programmer wrote this like this.
i tried turning on the extension again but it sends me an error. my question is, is there a way i can save the screen sharing stream and change the video feed with out touching the extension?
this is the js code for webrtc:
main problem in getfeed in the third 'if'
getFeed = function (shareScreen, username, userType, reinit, cb) {
if (shareScreen) {
$("#screen-share").attr("disabled", "disabled");
$("#video-share").removeAttr("disabled");
// getUserMedia(session, onSuccess, onError);
if (webrtcDetectedBrowser == "chrome") {
DetectRTC.screen.isChromeExtensionAvailable(function (isAvaliabe) {
if (isInScreenSharing) {
return;
}
if (isAvaliabe) {
isInScreenSharing = true;
captureUserMedia(onSuccess);
} else {
alertify.confirm("Screen sharing extension is not installed," +
" do you wish to install it now?",
function(e) {
if (e) {
window.open("https://chrome.google.com/webstore/detail/screen-capturing-
for-micr/hmppjaalobpkbpneompfdpfilcmfhfik", "_blank");
alertify.alert("Click Ok to reload page
after installing extension ", function () {
location.href = location.href;
});
}
});
}
});
} else {
isInScreenSharing = false;
getScreenId(function (error, sourceId, screen_constraints) {
screen_constraints.video = _showWebCam;
screen_constraints.audio = _showWebCam;
getUserMedia(screen_constraints, onSuccess, onError);
});
}
} else {
$("#video-share").attr("disabled", "disabled");
$("#screen-share").removeAttr("disabled");
getUserMedia(
{
// Permissions to request
video: _showWebCam,
audio: _showWebCam
}, onSuccess, onError);
}
function onSuccess(stream) { // succcess callback gives us a media stream
$('.instructions').hide();
if (reinit) {
if (screenStream) {
screenStream.getTracks().forEach(function (e) {
e.stop();
});
screenStream = null;
}
// Store off the stream reference so we can share it later
_mediaStream = stream;
if (webrtcDetectedBrowser == "chrome") {
if (shareScreen) {
//get the audio track from video stream and put it in screeen stream
var audioTrack = videoStream.getAudioTracks()[0];
var screenAudio = stream.getAudioTracks()[0];
if (screenAudio) {
_mediaStream.removeTrack(screenAudio)
}
_mediaStream.addTrack(audioTrack)
screenStream = _mediaStream;
} else {
videoStream = stream;
}
}
// Load the stream into a video element so it starts playing in the UI
console.log('playing my local video feed');
var videoElement = document.querySelector('.video.mine');
attachMediaStream(videoElement, _mediaStream);
if (cb) {
cb();
}
} else {
if (webrtcDetectedBrowser == "chrome") {
videoStream = stream;
}
var gameId =viewModel.GameId();
// Now we have everything we need for interaction, so fire up SignalR
_connect(username, userType,gameId, function (hub) {
// tell the viewmodel our conn id, so we can be treated like the special person we are.
viewModel.MyConnectionId(hub.connection.id);
// Initialize our client signal manager, giving it a
signaler (the SignalR hub) and some callbacks
console.log('initializing connection manager');
connectionManager.initialize(hub.server, _callbacks.onReadyForStream, _callbacks.onStreamAdded, _callbacks.onStreamRemoved);
// Store off the stream reference so we can share it later
_mediaStream = stream;
// Load the stream into a video element so it starts playing in the UI
console.log('playing my local video feed');
var videoElement = document.querySelector('.video.mine');
attachMediaStream(videoElement, _mediaStream);
// Hook up the UI
_attachUiHandlers();
viewModel.Loading(false);
if (cb) {
cb();
}
}, function (event) {
alertify.alert('<h4>Failed SignalR Connection</h4> We were not able to connect you to the signaling server.<br/><br/>Error: ' + JSON.stringify(event));
viewModel.Loading(false);
});
}
}
function onError(error) {
if (webrtcDetectedBrowser == "firefox") {
if (window.location.protocol === "https:") {
alertify.confirm("Screen sharing extension is not installed," +
" do you wish to install it now?",
function (e) {
if (e) {
InstallTrigger.install({ "ScreenShare": { URL: "https://addons.mozilla.org/firefox/downloads/file/457292/easy_screen_sharing_for_microgamecoaching_ltd-1.0.000-fx.xpi" } });
}
});
return;
}
}
alertify.alert(JSON.stringify(error));
viewModel.Loading(false);
}
},
_startSession = function (username, userType, gameId) {
// viewModel.Username(username); // Set the selected username in the UI
viewModel.Loading(true); // Turn on the loading indicator
// viewModel.UserType(userType); // Set the selected username in the UI
//viewModel.GameId(gameId);
if (location.hash === "#ss") {
getFeed(true, username, userType);
} else {
getFeed(false, username, userType);
}
$("#screen-share").click(function () {
getFeed(true, username, userType, true, function () {
var p = connectionManager.currentPartnerId;
connectionManager.closeAllConnections();
_hub.server.hangUp(true);
// _hub.server.callUser(p,true);
});
});
$("#video-share").click(function () {
getFeed(false, username, userType, true, function () {
var p = connectionManager.currentPartnerId;
connectionManager.closeAllConnections();
_hub.server.hangUp(true);
// _hub.server.callUser(p, true);
});
});
$("#mute").click(function () {
if (_mediaStream) {
_mediaStream.getAudioTracks().forEach(function (t) {
t.enabled = t.muted = false;
});
var videoElement = document.querySelector('.video.mine');
attachMediaStream(videoElement, _mediaStream);
}
});
$("#unmute").click(function () {
if (_mediaStream) {
_mediaStream.getAudioTracks().forEach(function (t) {
t.enabled = t.muted = true;
});
var videoElement = document.querySelector('.video.mine');
attachMediaStream(videoElement, _mediaStream);
}
});
},
_attachUiHandlers = function() {
// Add click handler to users in the "Users" pane
$(document).on("click", ".user", function () {
var userName = viewModel.Username();
if (!userName) {
alertify.alert("Please log in to enter the room", function() {
location.href = viewModel.LoginUrl() + "?returnUrl="+location.href;
});
return;
}
// Find the target user's SignalR client id
var targetConnectionId = $(this).attr('data-cid');
// Make sure we are in a state where we can make a call
if (viewModel.Mode() !== 'idle') {
alertify.error('Sorry, you are already in a call. Conferencing is not yet implemented.');
return;
}
// Then make sure we aren't calling ourselves.
if (targetConnectionId != viewModel.MyConnectionId()) {
// Initiate a call
_hub.server.callUser(targetConnectionId, false);
// UI in calling mode
viewModel.Mode('calling');
} else {
alertify.error("Ah, nope. Can't call yourself.");
}
});
$('#btnMessageSend').click(function() {
sendChatMessage();
});
$('#inpMessageText').keypress(function (e) {
if (e.keyCode === 13) {
sendChatMessage();
}
});
function sendChatMessage() {
var text = $('#inpMessageText').val();
if (text !== '') {
$('#inpMessageText').val('');
_hub.server.sendMessage(text, viewModel.MyConnectionId(), _callPartner.ConnectionId);
if (_lastMessageMine === null || !_lastMessageMine) {
$('#pnlMessagesContainer .message-container')
.append('<div><div class="autor">You</div><div class="message">' + text + '</div></div>');
} else {
$('#pnlMessagesContainer .message-container')
.append('<div><div class="message">' + text + '</div></div>');
}
_lastMessageMine = true;
}
$('#pnlMessagesContainer .message-container').scrollTop($('#pnlMessagesContainer .message-container').prop("scrollHeight"));
}
// Add handler for the hangup button
$('.hangup').click(function () {
// Only allow hangup if we are not idle
if (viewModel.Mode() != 'idle') {
_hub.server.hangUp(false);
connectionManager.closeAllConnections();
viewModel.Mode('idle');
$('#videoTitle').html('Coaching Session');
$('#videoPartnerName').html('');
$('#pnlMessagesContainer .message-container').empty();
$('#pnlChatContainer').hide();
}
});
$('input[name="rbtnWebcamToogle"]').change(function () {
_showWebCam = $(this).val() === "1";
if (!_showWebCam) {
//connectionManager.closeConnection(viewModel.MyConnectionId());
var mediaStream = _mediaStream.getVideoTracks()[0];
mediaStream.stop();
} else {
getFeed(false, viewModel.Username(), viewModel.UserType(), true, function () {
var p = connectionManager.currentPartnerId;
connectionManager.closeAllConnections();
_hub.server.hangUp(true);
});
}
});
},
_setupHubCallbacks = function (hub) {
// Hub Callback: Incoming Call
hub.client.incomingCall = function (callingUser, switching) {
console.log('incoming call from: ' + JSON.stringify(callingUser));
if (switching) {
hub.server.answerCall(true, callingUser.ConnectionId);
// So lets go into call mode on the UI
viewModel.Mode('incall');
$('#videoTitle').html('Your Session is LIVE');
$('#videoPartnerName').html('Your ' + callingUser.Usertype + ': ' + callingUser.Username);
return;
}
// Ask if we want to talk
alertify.confirm(callingUser.Username + ' is calling. Do you want to chat?', function (e) {
if (e) {
// I want to chat
hub.server.answerCall(true, callingUser.ConnectionId);
_callPartner = callingUser;
// So lets go into call mode on the UI
viewModel.Mode('incall');
$('#videoTitle').html('Your Session is LIVE');
$('#videoPartnerName').html('Your ' + callingUser.Usertype + ': ' + callingUser.Username);
$('#pnlMessagesContainer .message-container').empty();
$('#pnlChatContainer').show();
} else {
// Go away, I don't want to chat with you
hub.server.answerCall(false, callingUser.ConnectionId);
$('#videoTitle').html('Coaching Session');
$('#videoPartnerName').html('');
$('#pnlMessagesContainer .message-container').empty();
$('#pnlChatContainer').hide();
}
});
};
// Hub Callback: Call Accepted
hub.client.callAccepted = function (acceptingUser) {
console.log('call accepted from: ' + JSON.stringify(acceptingUser) + '. Initiating WebRTC call and offering my stream up...');
// Callee accepted our call, let's send them an offer with our video stream
connectionManager.initiateOffer(acceptingUser.ConnectionId, _mediaStream);
_callPartner = acceptingUser;
// Set UI into call mode
viewModel.Mode('incall');
$('#videoTitle').html('Your Session is LIVE');
$('#videoPartnerName').html('Your ' + acceptingUser.Usertype + ': ' + acceptingUser.Username);
$('#pnlMessagesContainer .message-container').empty();
$('#pnlChatContainer').show();
};
// Hub Callback: Call Declined
hub.client.callDeclined = function (decliningConnectionId, reason) {
console.log('call declined from: ' + decliningConnectionId);
_callPartner = null;
// Let the user know that the callee declined to talk
alertify.error(reason);
// Back to an idle UI
viewModel.Mode('idle');
};
// Hub Callback: Call Ended
hub.client.callEnded = function (connectionId, reason, switching) {
console.log('call with ' + connectionId + ' has ended: ' + reason);
if (!switching) {
// Let the user know why the server says the call is over
alertify.error(reason);
connectionManager.closeConnection(connectionId);
// Set the UI back into idle mode
viewModel.Mode('idle');
$('#pnlMessagesContainer .message-container').empty();
$('#pnlChatContainer').hide();
_callPartner = null;
$('#videoTitle').html('Coaching Session');
$('#videoPartnerName').html('');
} else {
connectionManager.closeConnection(connectionId);
_hub.server.callUser(connectionId, true);
}
// Close the WebRTC connection
};
hub.client.receiveMessage = function (senderUser, message) {
if (_lastMessageMine == null || _lastMessageMine) {
$('#pnlMessagesContainer .message-container')
.append('<div><div class="autor">' + senderUser.Username + '</div><div class="message">' + message + '</div></div>');
} else {
$('#pnlMessagesContainer .message-container')
.append('<div><div class="message">' + message + '</div></div>');
}
$('#pnlMessagesContainer .message-container').scrollTop($('#pnlMessagesContainer .message-container').prop("scrollHeight"));
_lastMessageMine = false;
}
// Hub Callback: Update User List
hub.client.updateUserList = function (userList) {
viewModel.setUsers(userList);
};
// Hub Callback: WebRTC Signal Received
hub.client.receiveSignal = function (callingUser, data) {
connectionManager.newSignal(callingUser.ConnectionId, data);
};
},
// Connection Manager Callbacks
_callbacks = {
onReadyForStream: function (connection) {
// The connection manager needs our stream
// todo: not sure I like this
connection.addStream(_mediaStream);
},
onStreamAdded: function (connection, event) {
console.log('binding remote stream to the partner window');
// Bind the remote stream to the partner window
var otherVideo = document.querySelector('.video.partner');
attachMediaStream(otherVideo, event.stream); // from adapter.js
},
onStreamRemoved: function (connection, streamId) {
console.log('removing remote stream from partner window');
// Clear out the partner window
var otherVideo = document.querySelector('.video.partner');
otherVideo.src = '';
}
};
return {
start: _start, // Starts the UI process
getStream: function() { // Temp hack for the connection manager to reach back in here for a stream
return _mediaStream;
}
};
})(WebRtcDemo.ViewModel, WebRtcDemo.ConnectionManager);
// Kick off the app
WebRtcDemo.App.start();
var isChrome = !!navigator.webkitGetUserMedia;
var DetectRTC = {};
(function () {
var screenCallback;
DetectRTC.screen = {
chromeMediaSource: 'screen',
getSourceId: function (callback) {
if (!callback) throw '"callback" parameter is mandatory.';
screenCallback = callback;
window.postMessage('get-sourceId', '*');
},
isChromeExtensionAvailable: function (callback) {
if (!callback) return;
if (DetectRTC.screen.chromeMediaSource == 'desktop') callback(true);
// ask extension if it is available
window.postMessage('are-you-there', '*');
setTimeout(function () {
if (DetectRTC.screen.chromeMediaSource == 'screen') {
callback(false);
} else callback(true);
}, 2000);
},
onMessageCallback: function (data) {
console.log('chrome message', data);
// "cancel" button is clicked
if (data == 'PermissionDeniedError') {
DetectRTC.screen.chromeMediaSource = 'PermissionDeniedError';
if (screenCallback) return
screenCallback('PermissionDeniedError');
else throw new Error('PermissionDeniedError');
}
// extension notified his presence
if (data == 'rtcmulticonnection-extension-loaded') {
DetectRTC.screen.chromeMediaSource = 'desktop';
}
// extension shared temp sourceId
if (data.sourceId) {
DetectRTC.screen.sourceId = data.sourceId;
if (screenCallback) screenCallback(DetectRTC.screen.sourceId);
}
}
};
// check if desktop-capture extension installed.
if (window.postMessage && isChrome) {
DetectRTC.screen.isChromeExtensionAvailable();
}
})();
window.addEventListener('message', function (event) {
if (event.origin != window.location.origin) {
return;
}
DetectRTC.screen.onMessageCallback(event.data);
});
function captureUserMedia(onStreamApproved) {
// this statement defines getUserMedia constraints
// that will be used to capture content of screen
var screen_constraints = {
mandatory: {
chromeMediaSource: DetectRTC.screen.chromeMediaSource,
maxWidth: 1920,
maxHeight: 1080,
minAspectRatio: 1.77
},
optional: []
};
// this statement verifies chrome extension availability
// if installed and available then it will invoke extension API
// otherwise it will fallback to command-line based screen capturing API
if (DetectRTC.screen.chromeMediaSource == 'desktop' && !DetectRTC.screen.sourceId) {
DetectRTC.screen.getSourceId(function (error) {
// if exception occurred or access denied
if (error && error == 'PermissionDeniedError') {
alert('PermissionDeniedError: User denied to share content of his screen.');
}
captureUserMedia(onStreamApproved);
});
return;
}
// this statement sets gets 'sourceId" and sets "chromeMediaSourceId"
if (DetectRTC.screen.chromeMediaSource == 'desktop') {
screen_constraints.mandatory.chromeMediaSourceId = DetectRTC.screen.sourceId;
}
// it is the session that we want to be captured
// audio must be false
var session = {
audio: false,
video: screen_constraints
};
// now invoking native getUserMedia API
navigator.webkitGetUserMedia(session, onStreamApproved, function (error){console.error(error)});
};
I'm looking into the vimeo embed api. I want the video to autoplay on load which I have working but currently what happens is like this:
player loads then autoplays [event "ready"]
thumbnail removed and shows black for about a second
video begins playing [event "playProgess"]
The problem is the second step. I'm trying to eliminate that black screen between when the thumbnail hides (when play is initiated) to when the video actually appears and starts playing.
The way I figure it can be solved is to keep the thumbnail around and trigger the thumbnail hide on the first "playProgress" but I can't seem to find anyway to control when the thumbnail turns on or off.
Is this possible to control? I'm aware that I can pull the thumbnail and overlay it over the iframe but I'm hoping for a cleaner fix (keep it all contained to the iframe).
Here's a pen with api running:
http://codepen.io/mattcoady/pen/KMzZMZ
$(function() {
var player = $('iframe');
var playerOrigin = '*';
var status = $('.status');
// Listen for messages from the player
if (window.addEventListener) {
window.addEventListener('message', onMessageReceived, false);
}
else {
window.attachEvent('onmessage', onMessageReceived, false);
}
// Handle messages received from the player
function onMessageReceived(event) {
// Handle messages from the vimeo player only
if (!(/^https?:\/\/player.vimeo.com/).test(event.origin)) {
return false;
}
if (playerOrigin === '*') {
playerOrigin = event.origin;
}
var data = JSON.parse(event.data);
console.log(data.event);
switch (data.event) {
case 'ready':
onReady();
break;
case 'playProgress':
onPlayProgress(data.data);
break;
case 'pause':
onPause();
break;
case 'finish':
onFinish();
break;
case 'play':
onPlay();
break;
}
}
// Call the API when a button is pressed
$('button').on('click', function() {
post($(this).text().toLowerCase());
});
// Helper function for sending a message to the player
function post(action, value) {
var data = {
method: action
};
if (value) {
data.value = value;
}
var message = JSON.stringify(data);
player[0].contentWindow.postMessage(message, playerOrigin);
}
function onReady() {
status.text('ready');
post('play');
post('addEventListener', 'pause');
post('addEventListener', 'finish');
post('addEventListener', 'playProgress');
}
function onPause() {
status.text('paused');
}
function onFinish() {
status.text('finished');
}
function onPlay(){
alert('play')
}
function onPlayProgress(data) {
status.text(data.seconds + 's played');
}
});
What I ended up going with my hacky fix. It's pulls the thumbnail and lays it over the video. When my script detects the 'playProgress' event that means the video is actually playing. I use jQuery to fade away the thumbnail cover.
http://codepen.io/mattcoady/pen/YWqaWJ
$(function() {
var player = $('iframe');
var playerOrigin = '*';
var videoId = 76979871;
player.attr('src', 'https://player.vimeo.com/video/' + videoId + '?api=1&player_id=player1&background=1&autoplay=1&loop=1');
// Listen for messages from the player
if (window.addEventListener) {
window.addEventListener('message', onMessageReceived, false);
} else {
window.attachEvent('onmessage', onMessageReceived, false);
}
$.getJSON('http://vimeo.com/api/v2/video/' + videoId + '.json', {jsonp: 'callback',dataType: 'jsonp'}, function(data) {
var thumbnail = document.createElement('img');
thumbnail.src = data[0].thumbnail_large;
thumbnail.style.width = document.querySelector('#player1').offsetWidth + 'px';
thumbnail.style.height = document.querySelector('#player1').offsetHeight + 'px';
document.querySelector('#vimeo-thumb-container').appendChild(thumbnail);
})
// Handle messages received from the player
function onMessageReceived(event) {
// Handle messages from the vimeo player only
if (!(/^https?:\/\/player.vimeo.com/).test(event.origin)) {return false;}
if (playerOrigin === '*') { playerOrigin = event.origin; }
var data = JSON.parse(event.data);
switch (data.event) {
case 'ready':
onReady();
break;
case 'playProgress':
onPlayProgress(data.data);
break;
}
}
// Helper function for sending a message to the player
function post(action, value) {
var data = { method: action };
if (value) {data.value = value;}
var message = JSON.stringify(data);
player[0].contentWindow.postMessage(message, playerOrigin);
}
function onReady() {
post('play');
post('addEventListener', 'playProgress');
}
function onPlayProgress(data) {
$('#vimeo-thumb-container').fadeOut(250);
}
});
I'm working on an online radio website and am trying to detect what state the audio player is currently in for the user.
The current setup I've got is often wrong in knowing this and if I'm on iOS and I pause from the lock screen player instead, it will still think it's playing because I never clicked the html pause button.
Finally how is it possible to detect when my stream has fully ended. I've tried: onstalled="", onwaiting="", onerror="", onended="". But none of them work 100% of the time. The closest one to work would be: onstalled="", but even that only had a 60% success rate or so (occasionally when I'm loading the site it would tell me it has ended).
HTML:
<audio autoplay="" id="player" title="" oncanplay="radioLoaded()">
<source src="...">
</audio>
Javascript:
function radioLoaded() {
if (player.paused) {
document.getElementById('radioplaypause').innerHTML = varRadioResume;
} else if (player.play) {
document.getElementById('radioplaypause').innerHTML = varRadioPause;
} else {
document.getElementById('radioplaypause').innerHTML = varRadioLoading;
}
window.player = document.getElementById('player');
document.getElementById('radioplaypause').onclick = function () {
if (player.paused) {
player.play();
this.innerHTML = varRadioPause;
} else {
player.pause();
this.innerHTML = varRadioResume;
}
}
};
function radioEnded() {
document.getElementById('radiolivetext').innerHTML = 'OFFLINE';
document.getElementById('radioplayer').style.display = 'none';
document.getElementById('radioinformation').style.display = 'none';
};
Try this, with some corrections/notes to the code in general:
function radioLoaded() {
// move this line to the top
window.player = document.getElementById('player'); // !!! <-- name too generic, possibility of global collisions
var playPauseButton = document.getElementById('radioplaypause');
if (player.paused) {
playPauseButton.innerHTML = varRadioResume;
} else if (player.play) {
playPauseButton.innerHTML = varRadioPause;
} else {
playPauseButton.innerHTML = varRadioLoading;
}
playPauseButton.onclick = function () {
if (player.paused) {
player.play();
this.innerHTML = varRadioPause;
} else {
player.pause();
this.innerHTML = varRadioResume;
}
};
player.onended = function () {
console.log('ended');
};
// other helpful events
player.onpause = function () {
console.log('paused...');
}
player.onplay = function () {
console.log('playing...');
}
player.onprogress = function (e) {
console.log(e.loaded + ' of ' + e.total + ' loaded');
}
};