i have tried a lot of ways to go around this topic, before asking and now i really have no clue how to accomplish object picking with gpu on a gltf loaded file, so im hoping for any help that i can get :(
I've loaded a huge GLTF file, with a lot of little objects in it, due to the file count its not possible to achieve a good fps, if i just add them to the scene, so i have managed to achieve 60fps merging the gltfs children into chunks, but when i try to implement the webgl_interactive_cubes_gpu example, but it doesn't seem to be working for me, I always get the same object when im clicking.
To debug i have tried rendering the pickingScene and everything seems to be in place, graphically speaking, but when it comes to picking it doesnt seem to be working as i expected, unless im doing something wrong.
Raycast picking is not a suitable option for me as there are a lot of objects and adding renderin them would kill the fps. (55k objects);
Below is the code once the gltf is loaded:
var child = gltf.scene.children[i];
var childGeomCopy = child.geometry.clone();
childGeomCopy.translate(geomPosition.x, geomPosition.y, geomPosition.z);
childGeomCopy.scale(child.scale.x * Scalar, child.scale.y * Scalar, child.scale.z * Scalar);
childGeomCopy.computeBoundingBox();
childGeomCopy.computeBoundingSphere();
childGeomCopy.applyMatrix(new THREE.Matrix4());
geometriesPicking.push(childGeomCopy);
var individualObj = new THREE.Mesh(childGeomCopy, IndividualObjMat);
individualObj.name = "individual_" + child.name;
pickingData[childCounter] = {
object: individualObj,
position: individualObj.position.clone(),
rotation: individualObj.rotation.clone(),
scale: individualObj.scale.clone()
};
childCounter++;
Edit:
gltf.scene.traverse(function (child) {
//console.log(child.type);
if (child.isMesh) {
let geometry = child.geometry.clone();
let position = new THREE.Vector3();
position.x = child.position.x;
position.y = child.position.y;
position.z = child.position.z;
let rotation = new THREE.Euler();
rotation.x = child.rotation.x;
rotation.y = child.rotation.y;
rotation.z = child.rotation.z;
let scale = new THREE.Vector3();
scale.x = child.scale.x;
scale.y = child.scale.y;
scale.z = child.scale.z;
quaternion.setFromEuler(rotation);
matrix.compose(position.multiplyScalar(Scalar), quaternion, scale.multiplyScalar(Scalar));
geometry.applyMatrix(matrix);
applyVertexColors(geometry, color.setHex(Math.random() * 0xffffff));
geometriesDrawn.push(geometry);
geometry = geometry.clone();
applyVertexColors(geometry, color.setHex(childCounter));
geometriesPicking.push(geometry);
pickingData[childCounter] = {
object: new THREE.Mesh(geometry.clone(), new THREE.MeshBasicMaterial({ color: 0xffff00, blending: THREE.AdditiveBlending, transparent: true, opacity: 0.8 })),
id: childCounter,
position: position,
rotation: rotation,
scale: scale
};
childCounter++;
//console.log("%c [childCounter] :", "", childCounter);
}
});
...
var pickingGeom = THREE.BufferGeometryUtils.mergeBufferGeometries(geometriesPicking);
pickingGeom.rotateX(THREE.Math.degToRad(90)); pickingScene.add(new THREE.Mesh(pickingGeom, pickingMaterial));
Then on my MouseUp function I call pick(mouse*) and pass in the mouse* information:
function pick(mouse) {
camera.setViewOffset(renderer.domElement.width, renderer.domElement.height, mouse.x * window.devicePixelRatio | 0, mouse.y * window.devicePixelRatio | 0, 1, 1);
renderer.setRenderTarget(pickingTexture);
renderer.render(pickingScene, camera);
camera.clearViewOffset();
var pixelBuffer = new Uint8Array(4);
renderer.readRenderTargetPixels(pickingTexture, 0, 0, 1, 1, pixelBuffer);
var id = (pixelBuffer[0] << 16) | (pixelBuffer[1] << 8) | (pixelBuffer[2]);
var data = pickingData[id];
if (data) {
console.log(data.object.name, ":", data.position); // Always return the same object
}}
Related
I have made a simple plugin for the game Rust that dumps out the color information for the ingame map and NPC coordinates to datafile on a interval.
The map size ranges from -2000 to 2000 in the X and Z axis so the NPC coordinates X and Z also ranges from -2000 to 2000.
In three.js i have a PlaneBufferGeometry representing the map that is setup like this:
const mapGeometry = new THREE.PlaneBufferGeometry( 2, 2, 2000, 2000 ); // width,height,width segments,height segments
mapGeometry.rotateX( - Math.PI / 2 ); // rotate the geometry to match the scene
const customUniforms = {
bumpTexture: { value: heightTexture },
bumpScale: { type: "f", value: 0.02 },
colorTexture: { value: colorTexture }
};
const mapMaterial = new THREE.ShaderMaterial({
uniforms: customUniforms,
vertexShader: document.getElementById( 'vertexShader' ).textContent,
fragmentShader: document.getElementById( 'fragmentShader' ).textContent,
wireframe:true
});
const mapMesh = new THREE.Mesh( mapGeometry, mapMaterial );
scene.add( mapMesh );
The webpage is served with express server with socket.io integration.
The server emits updated coordinates to the connected clients on an interval.
socket.on('PositionData', function(data) {
storeNPCPositions(data);
});
I'm iterating over the NPC data and try to remap the coordinates to correspond with the setup in Three.js like this:
function storeNPCPositions(data) {
let npcs = [];
for (const npc in data.npcPositions) {
npcs.push({
name: npc,
position: {
x: remapPosition(data.npcPositions[npc].x, -2000, 2000, -1, 1), // i am uncertain about the -1 to 1 range, maybe 0 to 2?
y: remapPosition(data.npcPositions[npc].y, heightData.min, heightData.max, 0, .02),
z: remapPosition(data.npcPositions[npc].z, -2000, 2000, -1, 1), // i am uncertain about the -1 to 1 range, maybe 0 to 2?
}
});
}
window.murkymap.positionData.npcs = npcs;
}
function remapPosition(value, from1, to1, from2, to2)
{
return (value - from1) / (to1 - from1) * (to2 - from2) + from2;
}
As you can see in the code above in the storeNPCPositions function I have commented some uncertainty regarding the remapping, but either way it is wrong placement in the end result.
The image below is what I got right now, the npc's are not in the correct positions.
I hope that anyone can see the error in my code, i've been at it for many hours now.
The problem was that the NPC positions were flipped on the X axis. I made a THREE.Object3D() and added all the NPC's to that and then flipped it like this:
let npcContainer = new THREE.Object3D();
npcContainer.position.set(0,0,0);
npcContainer.rotateX(Math.PI);
let npcs = [];
const npcLineMaterial = new THREE.LineBasicMaterial({color: 0xff0000});
for (let i = 0; i < window.murkymap.positionData.npcs.length; i++) {
const npc = window.murkymap.positionData.npcs[i];
const npcPoints = [];
npcPoints.push(new THREE.Vector3(npc.position.x, 1000, npc.position.z));
npcPoints.push(new THREE.Vector3(npc.position.x,200,npc.position.z));
npcPoints.push(new THREE.Vector3(npc.position.x,-50,npc.position.z));
const npcLineGeometry = new THREE.BufferGeometry().setFromPoints( npcPoints );
const npcLine = new THREE.Line(npcLineGeometry, npcLineMaterial);
npcLine.position.y = -750;
npcLine.name = "npc";
npcLine.userData.prefab = npc.name;
npcs.push(npcLine);
}
npcContainer.remove(...npcContainer.children);
npcContainer.add(...npcs);
scene.add(npcContainer);
so I'm trying to create an online game using Babylon.js but have run into a problem thats got me a little stumped so hoping someone here would be willing to help me out. Please bear with me on this one, i'm a complete newbie with babylon as i've only every worked with THREE.js. Right now my game consists of a scene compromising of multiple meshes with multiple users represented as avatars (created from basic circle geometry for the moment) loaded into an environment. What I want to do is highlight the outline of these avatars ONLY when they are occluded by any other object, meaning that when they are not occluded they look normal with no highlight but when behind an object their highlighted silhouette can be seen by others (including yourself as you can see your own avatar). This is very akin to effects used in many other video games (see example below).
Example of Effect
Thus far, based on some googling and forum browsing (Babylonjs outline through walls & https://forum.babylonjs.com/t/highlight-through-objects/8002/4) I've figured out how to highlight the outline of objects using Babylon.HighlighLayer and I know that i can render objects above others via RenderingGroups but I can't seem to figure out how to use them in conjunction to create the effect I want. The best i've managed to do is get the highlighted avatar render above everything but I need just the silhouette not the entire mesh. I'm also constrained by the fact that my scene has many meshes in it that are loaded dynamically and i'm also trying to keep things as optimal as possible. Can't afford to use very computationally expensive procedures.
Anybody know of the best way to approach this? Would greatly appreciate any advice or assistance you can provide.Thanks!
So I asked the same question on the babylon forums which helped me to find a solution. All credit goes to the guy's that helped me out over there but just in case someone else comes across this question seeking an answer, here is a link to that forum question https://forum.babylonjs.com/t/showing-highlighted-silhouette-of-mesh-only-when-it-is-occluded/27783/7
Edit:
Ok thought i'd include the two possible solutions here properly as well as their babylon playgrounds. All credit goes to roland & evgeni_popov who came up with these solutions on the forum linked above.
The first solution is easier to implement but slightly less performant than the second solution.
Clone Solution: https://playground.babylonjs.com/#JXYGLT%235
// roland#babylonjs.xyz, 2022
const createScene = function () {
const scene = new BABYLON.Scene(engine);
const camera = new BABYLON.ArcRotateCamera('camera', -Math.PI / 2, Math.PI / 2, 20, new BABYLON.Vector3(0, 0, 0), scene)
camera.attachControl(canvas, true);
const light = new BABYLON.HemisphericLight("light", new BABYLON.Vector3(0, 1, 0), scene);
light.intensity = 0.7;
const wall = BABYLON.MeshBuilder.CreateBox('wall', { width: 5, height: 5, depth: 0.5 }, scene)
wall.position.y = 1
wall.position.z = -2
const sphere = BABYLON.MeshBuilder.CreateSphere('sphere', { diameter: 2, segments: 32 }, scene)
sphere.position.y = 1
const sphereClone = sphere.clone('sphereClone')
sphereClone.setEnabled(false)
const matc = new BABYLON.StandardMaterial("matc", scene);
matc.depthFunction = BABYLON.Constants.ALWAYS;
matc.disableColorWrite = true;
matc.disableDepthWrite = true;
sphereClone.material = matc;
sphere.occlusionQueryAlgorithmType = BABYLON.AbstractMesh.OCCLUSION_ALGORITHM_TYPE_ACCURATE
sphere.occlusionType = BABYLON.AbstractMesh.OCCLUSION_TYPE_STRICT
const hl = new BABYLON.HighlightLayer('hl1', scene, { camera: camera })
hl.addMesh(sphereClone, BABYLON.Color3.Green())
hl.addExcludedMesh(wall);
let t = 0;
scene.onBeforeRenderObservable.add(() => {
sphere.position.x = 10 * Math.cos(t);
sphere.position.z = 100 + 104 * Math.sin(t);
if (sphere.isOccluded) {
sphereClone.setEnabled(true)
sphereClone.position.copyFrom(sphere.position);
} else {
sphereClone.setEnabled(false)
}
t += 0.03;
})
return scene;
};
This second solution is slightly more performant than above as you don't need a clone but involves overriding the AbstactMesh._checkOcclusionQuery function which is the function that updates the isOccluded property for meshes such that the mesh is always rendered even when occluded. There’s no overhead if you are using the occlusion queries only for the purpose of drawing silhouettes however If you are also using them to avoid drawing occluded meshes then there’s an overhead because the meshes will be drawn even if they are occluded. In which case your probably best of going with the first solution
Non-Clone solution: https://playground.babylonjs.com/#JXYGLT#14
// roland#babylonjs.xyz, 2022
const createScene = function () {
const scene = new BABYLON.Scene(engine);
const camera = new BABYLON.ArcRotateCamera('camera', -Math.PI / 2, Math.PI / 2, 20, new BABYLON.Vector3(0, 0, 0), scene)
camera.attachControl(canvas, true);
const light = new BABYLON.HemisphericLight("light", new BABYLON.Vector3(0, 1, 0), scene);
light.intensity = 0.7;
const wall = BABYLON.MeshBuilder.CreateBox('wall', { width: 5, height: 5, depth: 0.5 }, scene)
wall.position.y = 1
wall.position.z = -2
const sphere = BABYLON.MeshBuilder.CreateSphere('sphere', { diameter: 2, segments: 32 }, scene)
sphere.position.y = 1
sphere.occlusionQueryAlgorithmType = BABYLON.AbstractMesh.OCCLUSION_ALGORITHM_TYPE_ACCURATE
sphere.occlusionType = BABYLON.AbstractMesh.OCCLUSION_TYPE_STRICT
const mats = new BABYLON.StandardMaterial("mats", scene);
sphere.material = mats;
const hl = new BABYLON.HighlightLayer('hl1', scene, { camera: camera })
hl.addExcludedMesh(wall);
let t = 0;
const cur = BABYLON.AbstractMesh.prototype._checkOcclusionQuery;
scene.onDisposeObservable.add(() => {
BABYLON.AbstractMesh.prototype._checkOcclusionQuery = cur;
});
BABYLON.AbstractMesh.prototype._checkOcclusionQuery = function() {
cur.apply(this);
return false;
}
scene.onBeforeRenderObservable.add(() => {
sphere.position.x = 10 * Math.cos(t);
sphere.position.z = 100 + 104 * Math.sin(t);
if (sphere.isOccluded) {
hl.addMesh(sphere, BABYLON.Color3.Green())
mats.depthFunction = BABYLON.Constants.ALWAYS;
mats.disableColorWrite = true;
} else {
hl.removeMesh(sphere);
mats.depthFunction = BABYLON.Constants.LESS;
mats.disableColorWrite = false;
}
t += 0.03;
})
return scene;
};
I'm creating a 3D game, and I just began. However, I quickly ran into a problem with the localhost GET taking more than 2 minutes, and after like 15-45 seconds of life, then the canvas turns white and in console, I get a warn showing that the WebGL context has been lost. Also, in Task Manager, the game takes up 30% of the CPU and 100% of the GPU.
It is for a new online 3d multiplayer game. I've tried to dispose the memory after a new frame, but that didn't work. I've tried also to pre-load all the textures to use less CPU, but the 30% CPU remains the same. This is my code: (client-side)
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 1, 1000)
var renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
scene.autoUpdate = false;
var preLoad = new THREE.TextureLoader();
var sunTexture = preLoad.load("/static/sun-texture.jpg");
var mercuryTexture = preLoad.load("/static/mercury-texture.jpg");
var socketio = io();
var Geometries = [];
socketio.on("TX2", function (data) {
Geometries = [];
var collectedEntities = [];
data.objects.forEach(obj => {
collectedEntities.push(obj);
});
DisplayAllEntities(collectedEntities);
});
function DisplayAllEntities(objects) {
var loader;
objects.forEach(obj => {
if (obj.geometry == "sphere") {
if (obj.type != "ordinary_sphere") {
switch (obj.type) {
case "sun":
var material = new THREE.MeshBasicMaterial({ map: sunTexture });
break;
case "mercury":
var material = new THREE.MeshBasicMaterial({ map: mercuryTexture });
camera.position.z = obj.z + 500;
break;
}
}
loader = new THREE.TextureLoader();
var texture = loader.load(obj.texture)
var geometry = new THREE.SphereGeometry(obj.radius, 50, 50, 0, Math.PI * 2, 0, Math.PI * 2);
var mesh = new THREE.Mesh(geometry, material);
Geometries.push(mesh);
} else if (obj.geometry == "cube") {
loader = new THREE.TextureLoader();
var texture = loader.load(obj.texture)
var geometry = new THREE.CubeGeometry(obj.width, obj.height, obj.depth);
var material = new THREE.MeshBasicMaterial({ map: texture });
var mesh = new THREE.Mesh(geometry, material);
mesh.position = {"x": obj.x, "y": obj.y, "z": obj.z}
Geometries.push(mesh);
}
loader = null;
});
scene.children = [];
scene.dispose();
Geometries.forEach(obj => {
scene.add(obj);
});
render();
}
function render() {
requestAnimationFrame(render);
renderer.render(scene, camera);
}
The server just makes calculations about positions, and sends them to clients to render them.
I expect to get a lower CPU and GPU usage and a much lower load time, but the performance still remains the same.
It is very inefficient to create each object every time you need to render it, and especially to load the corresponding textures. A better solution would be set up the objects beforehand, and then to update these objects continually. This would require
a server emit for initializing (setting up objects, loading textures etc.)
a server emit for game state updates (adding/removing items if needed)
a server emit to update the positions. (this is the one that will execute 60 times per second)
Each object to have a unique id given by the server at creation so that the client knows which object server references.
This involves a bit more effort but would boost performance greatly
My display has a resolution of 7680x4320 pixels. I want to display up to 4 million different colored squares. And I want to change the number of squares with a slider. If have currently two versions. One with canvas-fillRect which looks somethink like this:
var c = document.getElementById("myCanvas");
var ctx = c.getContext("2d");
for (var i = 0; i < num_squares; i ++) {
ctx.fillStyle = someColor;
ctx.fillRect(pos_x, pos_y, pos_x + square_width, pos_y + square_height);
// set pos_x and pos_y for next square
}
And one with webGL and three.js. Same loop, but I create a box geometry and a mesh for every square:
var geometry = new THREE.BoxGeometry( width_height, width_height, 0);
for (var i = 0; i < num_squares; i ++) {
var material = new THREE.MeshLambertMaterial( { color: Math.random() * 0xffffff } );
material.emissive = new THREE.Color( Math.random(), Math.random(), Math.random() );
var object = new THREE.Mesh( geometry, material );
}
They both work quite fine for a few thousand squares. The first version can do up to one million squares, but everything over a million is just awful slow. I want to update the color and the number of squares dynamically.
Does anyone has tips on how to be more efficient with three.js/ WebGL/ Canvas?
EDIT1: Second version: This is what I do at the beginning and when the slider has changed:
// Remove all objects from scene
var obj, i;
for ( i = scene.children.length - 1; i >= 0 ; i -- ) {
obj = scene.children[ i ];
if ( obj !== camera) {
scene.remove(obj);
}
}
// Fill scene with new objects
num_squares = gui_dat.squareNum;
var window_pixel = window.innerWidth * window.innerHeight;
var pixel_per_square = window_pixel / num_squares;
var width_height = Math.floor(Math.sqrt(pixel_per_square));
var geometry = new THREE.BoxGeometry( width_height, width_height, 0);
var pos_x = width_height/2;
var pos_y = width_height/2;
for (var i = 0; i < num_squares; i ++) {
//var object = new THREE.Mesh( geometry, );
var material = new THREE.Material()( { color: Math.random() * 0xffffff } );
material.emissive = new THREE.Color( Math.random(), Math.random(), Math.random() );
var object = new THREE.Mesh( geometry, material );
object.position.x = pos_x;
object.position.y = pos_y;
pos_x += width_height;
if (pos_x > window.innerWidth) {
pos_x = width_height/2;
pos_y += width_height;
}
scene.add( object );
}
The fastest way to draw squares is to use the gl.POINTS primitive and then setting gl_PointSize to the pixel size.
In three.js, gl.POINTS is wrapped inside the THREE.PointCloud object.
You'll have to create a geometry object with one position for each point and pass that to the PointCloud constructor.
Here is an example of THREE.PointCloud in action:
http://codepen.io/seanseansean/pen/EaBZEY
geometry = new THREE.Geometry();
for (i = 0; i < particleCount; i++) {
var vertex = new THREE.Vector3();
vertex.x = Math.random() * 2000 - 1000;
vertex.y = Math.random() * 2000 - 1000;
vertex.z = Math.random() * 2000 - 1000;
geometry.vertices.push(vertex);
}
...
materials[i] = new THREE.PointCloudMaterial({size:size});
particles = new THREE.PointCloud(geometry, materials[i]);
I didn't dig through all the code but I've set the particle count to 2m and from my understanding, 5 point clouds are generated so 2m*5 = 10m particles and I'm getting around 30fps.
The highest number of individual points I've seen so far was with potree.
http://potree.org/, https://github.com/potree
Try some demo, I was able to observe 5 millions of points in 3D at 20-30fps. I believe this is also current technological limit.
I didn't test potree on my own, so I cant say much about this tech. But there is data convertor and viewer (threejs based) so should only figure out how to convert the data.
Briefly about your question
The best way handle large data is group them as quad-tree (2d) or oct-tree (3d). This will allow you to not bother program with part that is too far from camera or not visible at all.
On the other hand, program doesnt like when you do too many webgl calls. Try to understand it like this, you want to do create ~60 images each second. But each time you set some parameter for GPU, program must do some sync. Spliting data means you will need to do more setup so tree must not be too detialed.
Last thing, someone said:
You'll probably want to pass an array of values as one of the shader uniforms
I dont suggest it, bad idea. Texture lookup is quite fast, but attributes are always faster. If we are talking about 4M points, you cant afford reading data from uniforms.
Sorry I cant help you with the code, I could do it without threejs, Im not threejs expert :)
I would recommend trying pixi framework( as mentioned in above comments ).
It has webgl renderer and some benchmarks are very promising.
http://www.goodboydigital.com/pixijs/bunnymark_v3/
It can handle allot of animated sprites.
If your app only displays the squares, and doesnt animate, and they are very simple sprites( only one color ) then it would give better performance than the demo link above.
This is my second time using three.js and I've been playing around for at least 3 hours. I cannot seem to find a direction.
What I should build is something like this:
https://www.g-star.com/nl_nl/newdenimarrivals
I created the scene and everything, but I cannot seem to find a formula or anything on how to arrange the products like that (not to mention that I have to handle click events afterwards and move the camera to that product).
Do you guys have any leads or anything?
EDIT:
This is how I try to arrange the products.
arrangeProducts: function () {
var self = this;
this.products.forEach(function (element, index) {
THREE.ImageUtils.crossOrigin = '';
element.image = 'http://i.imgur.com/CSyFaYS.jpg';
//texture
var texture = THREE.ImageUtils.loadTexture(element.image, null);
texture.magFilter = THREE.LinearMipMapLinearFilter;
texture.minFilter = THREE.LinearMipMapLinearFilter;
//material
var material = new THREE.MeshBasicMaterial({
map: texture
});
//plane geometry
var geometry = new THREE.PlaneGeometry(element.width, element.height);
//plane
var plane = new THREE.Mesh(geometry, material);
plane.overdraw = true;
//set the random locations
/*plane.position.x = Math.random() * (self.container.width - element.width);
plane.position.y = Math.random() * (self.container.height - element.height);*/
plane.position.z = -2500 + (Math.random() * 50) * 50;
plane.position.x = Math.random() * self.container.width - self.container.width / 2;
plane.position.y = Math.random() * 200 - 100;
//add the plane to the scene
self.scene.add(plane);
});
},
EDIT 2:
I figured out: I need to add about 5 transparent concentric cilinders and put the products on each (random location) and have the camera in the center of all the cilinders and just rotate. Buut, how do I put the images on the cilinider randomly? I really have a blockout on that
On the three.js website you find a whole bunch of examples that can show you what is possible and how to do it. Don't expect to be an expert in only 3 hours.