I am using the RecordRTC script for recording video/Audio.
Everything is fine until I require 720p.
If you visit the RecordRTC site and choose 1280x720 for both video and canvas options you'll notice considerable slow down when compared to smaller sizes (expected).
if I record ONLY video it works perfectly but I require both.
$('.btn.record').on('click',function() {
!window.stream && navigator.getUserMedia(
{
audio: true,
video: {
optional: [],
mandatory: {
minWidth: 1280,
minHeight: 720,
maxWidth: 1280,
maxHeight: 720,
minAspectRatio: 1.7777777778,
maxAspectRatio: 1.7777777778,
}
}
},
function(stream) {
window.stream = stream;
onstream();
},
function(error) {
alert(error);
}
);
window.stream && onstream();
function onstream() {
preview.src = window.URL.createObjectURL(stream);
preview.play();
preview.muted = true;
recordAudio = RecordRTC(stream, {
onAudioProcessStarted: function() {
if(!isFirefox) {
recordVideo.startRecording();
}
}
});
recordVideo = RecordRTC(stream, {
type: 'video',
video: {
width: 1280,
height: 720
},
canvas: {
width: 1280,
height: 720
}
});
recordAudio.startRecording(); // DISABLE THIS AND VIDEO IS PERFECT
}
Use <canvas> width/height as low as possible; 320*240 are suggested values:
recordVideo = RecordRTC(stream, {
type: 'video',
video: {
width: 1280, // redundant because offsetWidth/videoWidth will be 1280
height: 720 // redundant because offsetHeight/videoHeight will be 720
},
canvas: {
width: 320, // suggested width
height: 240 // suggested height
}
});
Related
I have a couple problems with sprite scaling which seem to work fine for other people.
So, this is my game:
As you can see there are 2 big problems: the images are really pixelated and texture bleeding.
This is my config:
var config = {
type: Phaser.AUTO,
parent: 'phaser-example',
width: '100%',
height: '100%',
mode: Phaser.Scale.RESIZE,
autoCenter:Phaser.Scale.CENTER_BOTH,
physics: {
default: 'arcade',
arcade: {
debug: true,
},
pixelArt:true,
render:{
antialias: false,
}
},
scene: {
preload: preload,
create: create,
update: update,
}
};
This is how I preload my assets:
function preload(){
this.load.image("tiles", "assets/turtle_wars_tiles.png");
this.load.tilemapTiledJSON("tutorial_map", "assets/tutorial_map.json");
//preloading player
this.load.spritesheet("player", "assets/characters.png", {
frameWidth: 26,
frameHeight: 36,
});
}
And this is how I create the tilemap:
const map = this.make.tilemap({ key: "tutorial_map", tileWidth:48,tileHeight:48 });
const tileset = map.addTilesetImage("turtle_wars_tiles", "tiles");
for (let i = 0; i < map.layers.length; i++) {
const layer = map.createStaticLayer(i, tileset,0,0)
layer.setDepth(i);
layer.setScale(5)
layer.setCollisionByProperty({ collides: true });
this.physics.add.collider(this.player.collider, layer).collideCallback = () =>{
this.player.collide()
};
}
I tried extruding my tile set of 16x16 tiles but it messes up my whole map and it only solves texture bleeding.
Any idea how can I solve this?
I just tried it on a small demo project, I think the solution is just, to edit your game - config to this (check code below).
Info: You added the correct properties, but only into the physics- Object. I belongs one level higher.
Like this it should work:
var config = {
type: Phaser.AUTO,
parent: 'phaser-example',
width: '100%',
height: '100%',
mode: Phaser.Scale.RESIZE,
autoCenter: Phaser.Scale.CENTER_BOTH,
physics: {
default: 'arcade',
arcade: {
debug: true,
},
},
pixelArt: false,
render: {
antialias: false,
}
...
};
I am creating a skateboarding game in JavaScript using the Phaser 3 framework. I have loaded the ramp image onto the screen, and I am currently using the "arcade" physics engine in my file. I know I have to use the matter physics engine to have a non-rectangular hitbox. How do I implement it with the triangular hitbox?
I have the .png image file of the ramp, along with the .json file for its hitbox.
I tried following a tutorial off of a website on how to create new hitboxes for the matter physics engine. Everything ended up falling off the screen and I couldn't figure out how to use the .json file for the ramp.
//Configurations for the physics engine
var physicsConfig = {
default: 'arcade',
arcade : {
debug : true //CHANGE THIS TO TRUE TO SEE LINES
}
}
//Game configurations
var config = {
type: Phaser.AUTO,
width: 1200 ,
height: 600,
physics: physicsConfig,
scene: {
preload: preload,
create: create,
update: update
}
}
//Start the game
var game = new Phaser.Game(config);
function preload() {
//Images
this.load.image('sky', 'archery_assets/images/sky.png');
this.load.image('ground', 'skate_assets/images/ground.png');
this.load.image('up_ramp', 'skate_assets/images/up_ramp.png')
//Spritesheets
this.load.spritesheet('player', 'skate_assets/spritesheets/skater.png', {frameWidth: 160, frameHeight: 160});
}
function create() {
//Background
skyImg = this.add.image(600, 300, 'sky');
//Scale the images
skyImg.setDisplaySize(1200, 600);
groundImg = this.add.image(600, 600, 'ground');
groundImg.setDisplaySize(1200, 250);
//Create the player
this.player = this.physics.add.sprite(100, 410, 'player');
this.player.setCollideWorldBounds(true);
//Rolling animation
this.anims.create({
key: 'move',
frames: this.anims.generateFrameNumbers('player', {start: 0, end: 3}),
frameRate: 16,
repeat: -1 // <-- keeps the rolling animation going
});
//Pushing animation
this.anims.create({
key: 'push',
frames: this.anims.generateFrameNumbers('player', {start: 4, end: 8}),
frameRate: 16
});
//Start and keep the rolling animation going
this.player.anims.play('move', true);
//Up ramp (1st ramp)
this.upRamp = this.physics.add.sprite(700, 330, 'up_ramp');
this.upRamp.setSize(320, 150).setOffset(0, 175);
this.upRamp.enableBody = true;
this.upRamp.setImmovable();
this.upRamp.body.angle = 150;
//Input
this.cursors = this.input.keyboard.createCursorKeys();
//Spacebar
this.spacebar = this.input.keyboard.addKey(Phaser.Input.Keyboard.KeyCodes.SPACE);
this.physics.add.collider(this.player, this.upRamp);
}
function update() {
//Set variable for push speed
var playerPushSpeed = 0;
//If the spacebar is pressed
if (this.spacebar.isDown) {
//Play the push animation
this.player.anims.play('push', true);
//Push speed
playerPushSpeed += 175;
//Move player
this.player.setVelocityX(playerPushSpeed);
}
if (this.upRamp.body.touching.left) {
this.player.setVelocityY(-200);
}
}
I need to know how to implement the .png image of the ramp along with its .json hitbox file, so that the player can properly "ride" up the ramp.
You'll have to use the physics: { default: 'matter' } in order to change the hitbox's shape. Use this code snippet for reference:
var config = {
type: Phaser.AUTO,
width: 800,
height: 600,
backgroundColor: '#000000',
parent: 'phaser-example',
physics: {
default: 'matter',
matter: {
gravity: {
y: 0
},
debug: true
}
},
scene: {
create: create
}
};
var game = new Phaser.Game(config);
function create ()
{
this.matter.world.setBounds();
this.matter.add.rectangle(200, 200, 200, 200, {
chamfer: { radius: [230, 0, 200, 0 ] }
});
this.matter.add.mouseSpring();
}
You could also use a PhysicsEditor, you can check this tutorial on how to implement different shapes.
EDIT:
You can use console.log(this.matter.bodies) to check other available shapes to implement.
I want to show the camera of my mobile on the form. but what i have for now is the camera is showing ONLY on web page of the Desktop not on the web page of the Mobile, why? why i cant access the camera of my mobile? please help thanks here is the code of the webcam.js
var WebCodeCamJS = function(element) {
'use strict';
this.Version = {
name: 'WebCodeCamJS',
version: '2.7.0',
author: 'Tóth András',
};
var mediaDevices = window.navigator.mediaDevices;
mediaDevices.getUserMedia = function(c) {
return new Promise(function(y, n) {
(window.navigator.getUserMedia || window.navigator.mozGetUserMedia || window.navigator.webkitGetUserMedia).call(navigator, c, y, n);
});
}
HTMLVideoElement.prototype.streamSrc = ('srcObject' in HTMLVideoElement.prototype) ? function(stream) {
this.srcObject = !!stream ? stream : null;
} : function(stream) {
if (!!stream) {
this.src = (window.URL || window.webkitURL).createObjectURL(stream);
} else {
this.removeAttribute('src');
}
};
var videoSelect, lastImageSrc, con, beepSound, w, h, lastCode;
var display = Q(element),
DecodeWorker = null,
video = html('<video muted autoplay playsinline></video>'),
sucessLocalDecode = false,
localImage = false,
flipMode = [1, 3, 6, 8],
isStreaming = false,
delayBool = false,
initialized = false,
localStream = null,
options = {
decodeQRCodeRate: 5,
decodeBarCodeRate: 3,
successTimeout: 500,
codeRepetition: true,
tryVertical: true,
frameRate: 15,
width: 320,
height: 240,
constraints: {
video: {
mandatory: {
maxWidth: 1280,
maxHeight: 720
},
optional: [{
sourceId: true
}]
},
audio: false,
},
flipVertical: false,
flipHorizontal: false,
zoom: 0,
beep: 'audio/beep.mp3',
decoderWorker: 'js/DecoderWorker.js',
brightness: 0,
autoBrightnessValue: 0,
grayScale: 0,
contrast: 0,
threshold: 0,
sharpness: [],
resultFunction: function(res) {
console.log(res.format + ": " + res.code);
},
cameraSuccess: function(stream) {
console.log('cameraSuccess');
},
canPlayFunction: function() {
console.log('canPlayFunction');
},
getDevicesError: function(error) {
console.log(error);
},
getUserMediaError: function(error) {
console.log(error);
},
cameraError: function(error) {
console.log(error);
}
};
and here is my code of the Play which will trigger the button to show the camera
function play() {
if (!localImage) {
if (!localStream) {
init();
}
const p = video.play();
if (p && (typeof Promise !== 'undefined') && (p instanceof Promise)) {
p.catch(e => null);
}
delay();
}
}
Ciao I'm using https://github.com/LazarSoft/jsqrcode, but I can not get the decoding of qrcode through the camera.
What I want to do is just get the decoding of reading qrcode
This is my code
HTML
<video autoplay></video>
<canvas id="qr-canvas"></canvas>
JS
var video = document.querySelector('video');
var constraints =
{
audio: false,
video:
{
facingMode: { exact: "environment" },
width: { min: 100, ideal: 400, max: 450 },
height: { min: 100, ideal: 400, max: 450 }
}
}
navigator.mediaDevices.getUserMedia(constraints)
.then(function(mediaStream)
{
video.srcObject = mediaStream;
video.onloadedmetadata = function(e)
{
video.play();
};
}).catch(function(err)
{
console.log(err.name + ": " + err.message);
});
var qr_canvas =
document.getElementById('qr-canvas').getContext('2d');
qr_canvas.drawImage(video, 0, 0, 400, 400);
qrcode.decode();
function readQR(a)
{
alert(a);
}
qrcode.callback = readQR;
Grazie
Chrome does not support navigator.mediaDevices.getUserMedia any more. Please use navigator.mediaDevices.enumerateDevices().then(function(MediaDeviceInfo) { ... }) for your development, and you can reference my project on github: https://github.com/licaomeng/js-qrcode-scanner
I recorded video for 10 second in (Firefox/Chrome) using this example https://www.webrtc-experiment.com/RecordRTC/. Recorded blob size around [10 Sec, 4.36MB (320x240)] , Then I modified some parameter as fallows
var videoConstraints = {
audio: false,
video: {
mandatory: {
width: { min: 320 },
height: { min: 240 }
},
optional: [
{ width: 320 },
{ width:
{ min: 320 }
},
{ frameRate: 60 },
{ quality: 10 },
{ width:
{ max: 320 }
},
{ facingMode: "user" }
]
}
};
But still blob size is almost same. what can I do, for reduce recorded blob size.
Your constraints mix Chrome-specific format and the standard, and will work in no browser.
The standard (still requires a polyfill in Chrome):
var constraints = {
video: {
width: { min: 320 },
height: { min: 240 },
advanced: [
{ width: 320 },
{ width: { min: 320 } },
{ frameRate: 60 },
{ width: { max: 320 } },
{ facingMode: "user" }
]
}
};
The standard also lets you express things declaratively, so this is the same/better:
var constraints = {
video: {
width: { min: 320, ideal: 320 },
height: { min: 240 },
frameRate: 60,
facingMode: "user",
}
};
Finally, if you're trying to reduce the size, don't use min. min and max are bounds, so min wont give you low resolutions, just the opposite, it sets a lower bound. Instead use ideal for gravity, or even max.
I wont explain what this would look like in Chrome, since it uses an outdated syntax that wont work in other browsers, but this snippet works across browsers using a polyfill:
var constraints = {
video: {
width: { min: 320, ideal: 320 },
height: { min: 240 },
frameRate: 60,
facingMode: "user",
}
};
function start() {
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) { v.srcObject = stream; })
.then(function() {
return new Promise(function(r) { v.onloadedmetadata = r;});
})
.then(function() {
log("Success: "+ v.videoWidth +"x"+ v.videoHeight);
})
.catch(failed);
}
function log(msg) { div.innerHTML += "<p>" + msg + "</p>"; }
function failed(e) { log(e + ", line " + e.lineNumber); }
<video id="v" height="120" width="160" autoplay></video><br>
<button onclick="start()">Start!</button><div id="div"></div>
<script src="https://rawgit.com/webrtc/adapter/master/adapter.js"></script>
quality is not a standard constraint.
#jib's answer will reduce the resolution/framerate of the capture. However, the reason the blob size doesn't change is that you need to reduce the encoding bitrate for MediaRecorder. However, no browser has implemented bitrate limits for MediaRecorder (Firefox plans to do so soon - Firefox's initial implementation was to support video recording in Firefox OS cameras, and that ran at a fixed bitrate).