While trying to follow this tutorial for pixi.js I came across something I can't quite understand.
I set the size of the renderer to 512 by 512, but when trying to render the stage with a TilingSprite, the scene gets cut before it reaches the end of the canvas, like so:
EDIT: the blackness you see on the image above is still within the 512 by 512 canvas itself, it is not the browser's background.
Instead of reproducing the floor tile forever, the scene just becomes black after 100 pixels or so.
What's happening here?
Here is the code:
PIXI.SCALE_MODES.DEFAULT = PIXI.SCALE_MODES.NEAREST;
//Aliases
var Container = PIXI.Container,
autoDetectRenderer = PIXI.autoDetectRenderer,
loader = PIXI.loader,
resources = PIXI.loader.resources,
TextureCache = PIXI.utils.TextureCache,
Texture = PIXI.Texture,
Sprite = PIXI.Sprite;
TilingSprite = PIXI.TilingSprite;
var stage = new Container();
stage.scale.set(3,3);
var renderer = autoDetectRenderer(512, 512);
document.body.appendChild(renderer.view);
PIXI.loader
.add("src/tileset.json")
.load(setup);
function setup() {
var id = PIXI.loader.resources["src/tileset.json"].textures;
let dude = new Sprite( id["dude"] );
let blob = new Sprite( id["blob"] );
let floor = new TilingSprite( id["floor"] );
let chest = new Sprite( id["chest"] );
//Position the chest next to the right edge of the canvas
chest.x = stage.width - chest.width - 48;
chest.y = stage.height / 2 - chest.height / 2;
stage.addChild(floor);
stage.addChild(chest);
stage.addChild(dude);
renderer.render(stage);
}
Turns out TilingSprite takes width and height as parameters as well, and while I erroneously assumed that it would just repeat until the stage was completely covered, it has a default value of 100px for both width and height:
#param {PIXI.Texture} texture - the texture of the tiling sprite
#param {number} [width=100] - the width of the tiling sprite
#param {number} [height=100] - the height of the tiling sprite
Initializing it with the dimensions I wanted solved the issue.
Related
I am currently trying to create some smooth terrain using the PlaneBufferGeometry of three.js from a height map I got from Google Images:
https://forums.unrealengine.com/filedata/fetch?id=1192062&d=1471726925
but the result is kinda choppy..
(Sorry, this is my first question and evidently I need 10 reputation to post images, otherwise I would.. but here's an even better thing: a live demo! left click + drag to rotate, scroll to zoom)
I want, like i said, a smooth terrain, so am I doing something wrong or is this just the result and i need to smoothen it afterwards somehow?
Also here is my code:
const IMAGE_SRC = 'terrain2.png';
const SIZE_AMPLIFIER = 5;
const HEIGHT_AMPLIFIER = 10;
var WIDTH;
var HEIGHT;
var container = jQuery('#wrapper');
var scene, camera, renderer, controls;
var data, plane;
image();
// init();
function image() {
var image = new Image();
image.src = IMAGE_SRC;
image.onload = function() {
WIDTH = image.width;
HEIGHT = image.height;
var canvas = document.createElement('canvas');
canvas.width = WIDTH;
canvas.height = HEIGHT;
var context = canvas.getContext('2d');
console.log('image loaded');
context.drawImage(image, 0, 0);
data = context.getImageData(0, 0, WIDTH, HEIGHT).data;
console.log(data);
init();
}
}
function init() {
// initialize camera
camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, .1, 100000);
camera.position.set(0, 1000, 0);
// initialize scene
scene = new THREE.Scene();
// initialize directional light (sun)
var sun = new THREE.DirectionalLight(0xFFFFFF, 1.0);
sun.position.set(300, 400, 300);
sun.distance = 1000;
scene.add(sun);
var frame = new THREE.SpotLightHelper(sun);
scene.add(frame);
// initialize renderer
renderer = new THREE.WebGLRenderer();
renderer.setClearColor(0x000000);
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
container.append(renderer.domElement);
// initialize controls
controls = new THREE.OrbitControls(camera, renderer.domElement);
controls.enableDamping = true;
controls.dampingFactor = .05;
controls.rotateSpeed = .1;
// initialize plane
plane = new THREE.PlaneBufferGeometry(WIDTH * SIZE_AMPLIFIER, HEIGHT * SIZE_AMPLIFIER, WIDTH - 1, HEIGHT - 1);
plane.castShadow = true;
plane.receiveShadow = true;
var vertices = plane.attributes.position.array;
// apply height map to vertices of plane
for(i=0, j=2; i < data.length; i += 4, j += 3) {
vertices[j] = data[i] * HEIGHT_AMPLIFIER;
}
var material = new THREE.MeshPhongMaterial({color: 0xFFFFFF, side: THREE.DoubleSide, shading: THREE.FlatShading});
var mesh = new THREE.Mesh(plane, material);
mesh.rotation.x = - Math.PI / 2;
mesh.matrixAutoUpdate = false;
mesh.updateMatrix();
plane.computeFaceNormals();
plane.computeVertexNormals();
scene.add(mesh);
animate();
}
function animate() {
requestAnimationFrame(animate);
renderer.render(scene, camera);
controls.update();
}
The result is jagged because the height map has low color depth. I took the liberty of coloring a portion of the height map (Paint bucket in Photoshop, 0 tolerance, non-continuous) so you can see for yourself how large are the areas which have the same color value, i.e. the same height.
The areas of the same color will create a plateau in your terrain. That's why you have plateaus and sharp steps in your terrain.
What you can do is either smooth out the Z values of the geometry or use a height map which utilizes 16bits or event 32bits for height information. The current height map only uses 8bits, i.e. 256 values.
One thing you could do to smooth things out a bit is to sample more than just a single pixel from the heightmap. Right now, the vertex indices directly correspond to the pixel position in the data-array. And you just update the z-value from the image.
for(i=0, j=2; i < data.length; i += 4, j += 3) {
vertices[j] = data[i] * HEIGHT_AMPLIFIER;
}
Instead you could do things like this:
get multiple samples with certain offsets along the x/y axes
compute an (weighted) average value from the samples
That way you would get some smoothing at the borders of the same-height areas.
The second option is to use something like a blur-kernel (gaussian blur is horribly expensive, but maybe something like a fast box-blur would work for you).
As you are very limited in resolution due to just using a single byte, you should convert that image to float32 first:
const highResData = new Float32Array(data.length / 4);
for (let i = 0; i < highResData.length; i++) {
highResData[i] = data[4 * i] / 255;
}
Now the data is in a format that allows for far higher numeric resolution, so we can smooth that now. You could either adjust something like the StackBlur for the float32 use-case, use ndarrays and ndarray-gaussian-filter or implement something simple yourself. The basic idea is to find an average value for all the values in those uniformly colored plateaus.
Hope that helps, good luck :)
I'm trying to use part of a video as a texture in a Three.js mesh.
Video is here, http://video-processing.s3.amazonaws.com/example.MP4 it's a fisheye lens and I want to only use the part with actual content, i.e. the circle in the middle.
I want to somehow mask, crop or position and stretch the video on the mesh so that only this part shows and the black part is ignored.
Video code
var video = document.createElement( 'video' );
video.loop = true;
video.crossOrigin = 'anonymous';
video.preload = 'auto';
video.src = "http://video-processing.s3.amazonaws.com/example.MP4";
video.play();
var texture = new THREE.VideoTexture( video );
texture.minFilter = THREE.NearestFilter;
texture.magFilter = THREE.LinearFilter;
texture.format = THREE.RGBFormat;
var material = new THREE.MeshBasicMaterial( { map : texture } );
The video is then projected onto a 220 degree sphere, to give the VR impression.
var geometry = new THREE.SphereGeometry( 200,100,100, 0, 220 * Math.PI / 180, 0, Math.PI);
Here is a code pen
http://codepen.io/bknill/pen/vXBWGv
Can anyone let me know how I'm best to do this?
You can use texture.repeat to scale the texture
http://threejs.org/docs/#Reference/Textures/Texture
for example, to scale 2x on both axis
texture.repeat.set(0.5, 0.5);
In short, you need to update the UV-Map of the sphere so that the relevant area of your texture is assigned to the corresponding vertices of the sphere.
The UV-coordinates for each vertex define the coordinates within the texture that is assigned to that vertex (in a range [0..1], so coordinates (0, 0) are the top left corner and (1,1) the bottom right corner of your video). This example should give you an Idea what this is about.
Those UV-coordinates are stored in your geometry as geometry.faceVertexUvs[0] such that every vertex of every face has a THREE.Vector2 value for the UV-coordinate. This is a two-dimensional array, the first index is the face-index and the second one the vertex-index for the face (see example).
As for generating the UV-map there are at least two ways to do this. The probably easier way (ymmv, but I'd always go this route) would be to create the UV-map using 3D-editing software like blender and export the resulting object using the three.js exporter-plugin.
The other way is to compute the values by hand. I would suggest you first try to simply use an orthographic projection of the sphere. So basically, if you have a unit-sphere at the origin, simply drop the z-coordinate of the vertices and use u = x/2 + 0.5 and v = y/2 + 0.5 as UV-coordinates.
In JS that would be something like this:
// create the geometry (note that for simplicity, we're
// a) using a unit-sphere and
// b) use an exact half-sphere)
const geometry = new THREE.SphereGeometry(1, 18, 18, Math.PI, Math.PI)
const uvs = geometry.faceVertexUvs[0];
const vertices = geometry.vertices;
// compute the UV from the vertices of the sphere. You will probably need
// something a bit more elaborate than this for the 220degree FOV, also maybe
// some lens-distorion, but it will boild down to something like this:
for(let i = 0; i<geometry.faces.length; i++) {
const face = geometry.faces[i];
const faceVertices = [vertices[face.a], vertices[face.b], vertices[face.c]];
for(let j = 0; j<3; j++) {
const vertex = faceVertices[j];
uvs[i][j].set(vertex.x/2 + 0.5, vertex.y/2 + 0.5);
}
}
geometry.uvsNeedUpdate = true;
(if you need more information in either direction, drop a comment and i will elaborate)
I am trying to load(dynamically) object files using THREE.OBJLoader and place them in the center of a scene(or canvas), so that the whole object can be visible in the Camera. Objects are dynamic, so I don't have fixed height or width data.
What I have got:
What I want:
What I have referred to get to this point:
Three.js zoom to fit width of objects (ignoring height)
How to Fit Camera to Object
Smart Centering and Scaling after Model Import in three.js
Adjusting camera for visible Three.js shape
Calculate camera zoom required for object to fit in screen height
Move camera to fit 3D scene
Three.js calculate object distance required to fill screen
How to calculate the z-distance of a camera to view an image at 100% of its original scale in a 3D space
Code:
camera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 1, 2000);
controls = new THREE.TrackballControls(camera);
controls.rotateSpeed = 5.0;
controls.zoomSpeed = 5;
controls.panSpeed = 2;
controls.noZoom = false;
controls.noPan = false;
controls.staticMoving = true;
controls.dynamicDampingFactor = 0.3;
// scene
scene = new THREE.Scene();
var mtlLoader = new THREE.MTLLoader();
mtlLoader.setBaseUrl(path);
mtlLoader.setPath(path);
mtlLoader.setMaterialOptions({
ignoreZeroRGBs: true
});
mtlLoader.load(path, function(materials) {
materials.preload();
var objLoader = new THREE.OBJLoader();
objLoader.setMaterials(materials);
objLoader.setPath(path);
objLoader.load(path, function(object) {
var helperBox = new THREE.BoundingBoxHelper(object, 0x888888);
helperBox.update();
scene.add(helperBox);
//Scene
scene.add(object);
var boxFrmScene = new THREE.Box3().setFromObject(scene);
var height = Math.max(boxFrmScene.size().y, boxFrmScene.size().x);
var dist = height / (2 * Math.tan(camera.fov * Math.PI / 360));
var pos = scene.position;
var boundingSphere = boxFrmScene.getBoundingSphere();
var center = boundingSphere.center;
camera.position.set(center.x, center.y, (dist * 1.1));
camera.lookAt(pos);
}, function(error) {
console.log(error);
});
}, function(error) {
console.log(error);
});
The deadpool object I used is from here : http://tf3dm.com/3d-model/deadpool-42722.html . I don't know if I have been reading the right questions. I would be very glad if someone can point me in the right direction. Thanks in advance.
PS: I'm not good with 3D maths.
Edit:
I have tried solution given at this:How to Fit Camera to Object but it has not solved my issue. In fact its flipping my object upside down. I am trying to position the object to center of the camera.
Edit 2: I have partially fixed this. Now the object is within the camera frustum and fully visible. Now I need to find a way to center it. I changed the whole code below scene.add(object);
var pos = scene.position;
camera.position.set(pos.x, pos.y, 100);
camera.lookAt(pos);
var boxFrmScene = new THREE.Box3().setFromObject(scene);
var height = Math.max(boxFrmScene.size().y, boxFrmScene.size().x);
var fov = camera.fov * (Math.PI / 180);
var distance = Math.abs(height / Math.sin(fov / 2));
camera.position.set(pos.x, pos.y, distance + (height / 2));
camera.updateProjectionMatrix();
Try adding this code after creating mesh,
var box = new THREE.Box3().setFromObject(mesh);
box.center(mesh.position);
mesh.localToWorld(box);
mesh.position.multiplyScalar(-1);
This will bring your object to the center of the screen
I am using the following code to scale and center a msgpack compressed object loaded using the ObjectLoader and it is not working. I think that my object has a rotation on it, and hence causing weird behaviors. On some objects, it successfully centers, but on others the centering is offset and scaling isn't right either.
In this snippet, result is the scene from the ObjectLoader. My thought was that the object was not very well formed, but I'm not sure. I wanted the table on the image or any other user entered mesh to be on the top of the grid, centered and scaled so that the maximum size is 1 unit.
Each square measures 0.25, the axis are at 0,0,0 http://i.stack.imgur.com/fkKYC.png
// result is a threejs scene
var geometry = result.children[0].geometry;
var mesh = result.children[0];
geometry.computeBoundingBox();
var middle = new THREE.Vector3();
middle.x = ( geometry.boundingBox.max.x + geometry.boundingBox.min.x ) / 2;
middle.y = -geometry.boundingBox.min.y;
middle.z = ( geometry.boundingBox.max.z + geometry.boundingBox.min.z ) / 2;
middle.negate();
mesh.position.copy(middle);
// scales the mesh up to maxsize
var maxSize = 1;
// gets the biggest axis
var maxVal = geometry.boundingBox.max.x - geometry.boundingBox.min.x;
if (maxVal < geometry.boundingBox.max.y - geometry.boundingBox.min.y) {
maxVal = geometry.boundingBox.max.y - geometry.boundingBox.min.y;
}
if (maxVal < geometry.boundingBox.max.z - geometry.boundingBox.min.z) {
maxVal = geometry.boundingBox.max.z - geometry.boundingBox.min.z;
// scales the current size proportional to the maxval, times maxsize
mesh.scale.divideScalar(maxVal * maxSize);
self.scene.add(result);
Instead of calling geometry.computeBoundingBox(); call geometry.center(); then you don't need the middle.x or middle.z and you can just call mesh.translateY() rather than fiddling with middle at all
I'm making a game in Phaser using some large images that I want to scale down in the actual game:
create() {
//Create the sprite group and scale it down to 30%
this.pieces = this.add.group(undefined, "pieces", true);
this.pieces.scale.x = 0.3;
this.pieces.scale.y = 0.3;
//Add the players to the middle of the stage and add them to the 'pieces' group
var middle = new Phaser.Point( game.stage.width/2, game.stage.height/2);
var player_two = this.add.sprite(middle.x - 50, middle.y, 'image1', undefined, this.pieces);
var player_one = this.add.sprite(middle.x, middle.y-50, 'image2', undefined, this.pieces);
}
However because the sprites are scaled in size, their starting location is also scaled, so instead appearing in the middle of the stage, they appear only 30% of the distance to the middle.
How do I scale the sprite image without it affecting their location?
(The code is incidentally in Typescript but I think this particular sample is also javascript so that's probably irrelevant)
Set Sprite.anchor to 0.5 so the physics body is centered on the Sprite, scale the sprite image without it affecting their location.
this.image.anchor.setTo(0.5, 0.5);
Doc Phaser.Image
Anchor example
You can scale your sprite like so:
var scaleX = 2;
var scaleY = 2;
sprite.scale.set(scaleX, scaleY);
then calculate position of sprite:
var positionX = 100;
var positionY = 100;
sprite.x = positionX / scaleX;
sprite.y = positionY / scaleY;
Now your sprite is at position (100, 100).
The problem is that sprite.x got multiplied by with scaleX.
Regarding Phaser, I'd like to add that in the specific case of weapon.bullets or other groups you create yourself you're going to have to do it this way instead:
weapon.bullets.setAll('scale.x', 0.5);
weapon.bullets.setAll('scale.y', 0.5);
I got stuck on this and ended up in this thread, which is closed but in my case just not what I needed. Others will hopefully have some use out of this :)