Raycast not detecting GlTF file/ Three.js - javascript

My binary gltf file(modelled in blender and animated using mixamo) is not detecting on raycast.
I read bunch of tutorials and questions about it to try to fix it but it does not work what so ever:(
const loader = new GLTFLoader();
let modelLoader = "/models/c2.glb";
let model, mixer, neck, waist;
loader.load(modelLoader, (gltf) => {
model = gltf.scene;
let fileAnimations = gltf.animations;
model.traverse((o) => {
// console.log(o);
if (o.isBone && o.name === "mixamorigNeck") {
neck = o;
}
if (o.isBone && o.name === "mixamorigSpine") {
waist = o;
}
if (o.isMesh) {
o.material.reflectivity = 1;
}
});
document.addEventListener("click", (e) => raycast(e));
document.addEventListener("touchend", (e) => raycast(e, true));
function raycast(e, touch = false) {
const raycaster = new THREE.Raycaster();
const mouse = new THREE.Vector2();
e.preventDefault();
if (touch) {
mouse.x = 2 * (e.changedTouches[0].clientX / window.innerWidth) - 1;
mouse.y = 1 - 2 * (e.changedTouches[0].clientY / window.innerHeight);
} else {
mouse.x = (e.clientX / window.innerWidth) * 2 - 1;
mouse.y = -(e.clientY / window.innerHeight) * 2 + 1;
}
raycaster.setFromCamera(mouse, camera);
const intersects = raycaster.intersectObjects(scene.children, true);
console.log(scene.children);
if (intersects.length > 0) {
console.log("hi");}}
It does not have error, and if I console.log scene.children outside of ‘if (intersects){}’ and click on the window, it works and contains everything it should, but if I put it inside the ‘if’ and click on the 3d object it is empty array.
I also tried to .push the meshes inside new array and detect but did not work.
Please help!

The problem is that you are performing ray casting against a skinned mesh. And three.js is currently (r128) not able to compute proper bounding volumes for this type of 3D object. Bounding volumes however are important for ray casting since they are used to detect early outs.
The workaround for this issue is to manually define bounding volumes so they properly enclose the skinned mesh. I suggest you traverse through gltf.scene and set the boundingSphere and boundingBox property of the skinned mesh's geometry.
More information at GitHub here: https://github.com/mrdoob/three.js/pull/19178

Related

Three.js Using Raycaster to detect line and cone children of ArrowHelper

I have a functioning Raycaster for a simple painting app. I use it for a "bucket tool" in which the user can click on an object and change its color. It works for geometry objects such as BoxGeometry and CircleGeometry, but I'm struggling to apply it to the children of an ArrowHelper object. Because ArrowHelper isn't a shape and does not possess a geometry attribute, Raycaster does not detect collision with its position when checking scene.children for intersections. However, the children of ArrowHelper objects are always two things: a line and a cone, both of which have geometry, material, and position attributes.
I HAVE TRIED:
Toggling the recursive boolean of the function .intersectObjects(objects: Array, recursive: Boolean, optionalTarget: Array ) to true, so that it includes the children of the objects in the array.
Circumventing the ArrowHelper parent by iterating through scene.children for ArrowHelper objects and adding their lines and cones into a separate array of objects. From there I attempted to check for intersections with only the list of lines and cones, but no intersections were detected.
Raycaster setup:
const runRaycaster = (mouseEvent) => {
... // sets mouse and canvas bounds here
const raycaster = new THREE.Raycaster();
raycaster.setFromCamera(mouse, camera);
const intersects = raycaster.intersectObjects(scene.children, true);
if (intersects.length > 0) {
for (let i = 0; i < intersects.length; i++) {
// works for GEOMETRY ONLY
// needs modifications for checking ArrowHelpers
intersects[i].object.material.color.set(currentColor);
}
}
};
Here's my attempt to check the lines and cones individually, without the ArrowHelper parent:
let arrowObjectsList = [];
for (let i = 0; i < scene.children.length; i++) {
if (scene.children[i].type === 'ArrowHelper') {
arrowObjectsList.push(scene.children[i].line);
arrowObjectsList.push(scene.children[i].cone);
} else {
console.log(scene.children[i].type);
}
}
console.log(arrowObjectsList); // returns 2 objects per arrow on the canvas
// intersectsArrows always returns empty
const intersectsArrows = raycaster.intersectObjects(arrowObjectsList, true);
SOME NOTES:
Every ArrowHelper, its line, and its cone have uniquely identifiable names so they can be recolored/repositioned/deleted later.
The Raycaster runs with every onMouseDown and onMouseMove event.
Notably, the line and cone children of ArrowHelpers are BufferGeometry and CylinderBufferGeometry, respectively, rather than variations of Geometry. I'm wondering if this has anything to do with it. According to this example from the Three.JS documentation website, BufferGeometry can be detected by Raycaster in a similar fashion.
Setting recursion = true worked for me. Run the simple code below, and click on the arrow head. You will see the intersection information printed to the console. (three.js r125)
let W = window.innerWidth;
let H = window.innerHeight;
const renderer = new THREE.WebGLRenderer({
antialias: true,
alpha: true
});
document.body.appendChild(renderer.domElement);
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(28, 1, 1, 1000);
camera.position.set(5, 5, 5);
camera.lookAt(scene.position);
scene.add(camera);
const light = new THREE.DirectionalLight(0xffffff, 1);
light.position.set(0, 0, -1);
camera.add(light);
const mesh = new THREE.ArrowHelper(
new THREE.Vector3(0, 0, 1),
new THREE.Vector3(0, 0, 0),
2,
0xff0000,
1,
1
);
scene.add(mesh);
function render() {
renderer.render(scene, camera);
}
function resize() {
W = window.innerWidth;
H = window.innerHeight;
renderer.setSize(W, H);
camera.aspect = W / H;
camera.updateProjectionMatrix();
render();
}
window.addEventListener("resize", resize);
resize();
render();
// RAYCASTER STUFF
const raycaster = new THREE.Raycaster();
const mouse = new THREE.Vector2();
renderer.domElement.addEventListener('mousedown', function(e) {
mouse.set(
(event.clientX / window.innerWidth) * 2 - 1, -(event.clientY / window.innerHeight) * 2 + 1
);
raycaster.setFromCamera(mouse, camera);
const intersects = raycaster.intersectObjects(scene.children, true);
console.log(intersects);
});
html,
body {
width: 100%;
height: 100%;
padding: 0;
margin: 0;
overflow: hidden;
background: skyblue;
}
<script src="https://threejs.org/build/three.min.js"></script>
After a closer inspection, it was a matter of the set position, not necessarily the arrow. The position of the arrow varied based on user mouse click to specify the start point. However, it still presented several problems: It was very difficult to select the line because the lineWidth value of LineBasicMaterial cannot have any other value besides 1, despite being editable. This is due to a limitation in the OpenGL Core Profile, as addressed in the docs and in this question. Similarly, the cone would not respond to setLength. This limits the customization of the ArrowHelper tool pretty badly.
Because of this, I decided to entirely replace ArrowHelper with two objects coupled together: tubeGeometry and coneGeometry, both assigned a MeshBasicMaterial, in a way which can be accessed by Raycasters out of the box.
... // the pos Float32Array is set according to user mouse coordinates.
const v1 = new THREE.Vector3(pos[0], pos[1], pos[2]);
const v2 = new THREE.Vector3(pos[3], pos[4], pos[5]);
const material = new THREE.MeshBasicMaterial({
color: color,
side: THREE.DoubleSide,
});
// Because there are only two vectors, no actual curve occurs.
// Therefore, it's our straight line.
const tubeGeometry = new THREE.TubeBufferGeometry(
new THREE.CatmullRomCurve3([v1, v2]), 1, 3, 3, false);
const coneGeometry = new THREE.ConeGeometry(10, 10, 3, 1, false);
arrowLine = new THREE.Mesh(tubeGeometry, material);
arrowTip = new THREE.Mesh(coneGeometry, material);
// needs names to be updated later.
arrowLine.name = 'arrowLineName';
arrowTip.name = 'arrowTipName';
When placing the arrow, the user will click and drag to specify the start and end point of the arrow, so the arrow and its tip have to be updated with onMouseMove. We have to use Math.atan2 to get the angle in degrees between v1 and v2, with v1 as the center. Subtracting 90 orients the rotation to the default position.
... // on the onMouseMove event, pos is updated with new coords.
const setDirection = () => {
const v1 = new THREE.Vector3(pos[0], pos[1], pos[2]);
const v2 = new THREE.Vector3(pos[3], pos[4], pos[5]);
// copying the v2 pos ensures that the arrow tip is always at the end.
arrowTip.position.copy(v2);
// rotating the arrow tip according to the angle between start and end
// points, v1 and v2.
let angleDegrees = 180 - (Math.atan2(pos[1] - pos[4], pos[3] - pos[0]) * 180 / Math.PI - 90);
const angleRadians = angleDegrees * Math.PI / 180;
arrowTip.rotation.set(0, 0, angleRadians);
// NOT VERY EFFICIENT, but it does the job to "update" the curve.
arrowLine.geometry.copy( new THREE.TubeBufferGeometry(new THREE.CatmullRomCurve3([v1, v2]),1,3,3,false));
scene.add(arrowLine);
scene.add(arrowTip);
};
Out of the box, this "arrow" allows me to select and edit it with Raycaster without a problem. No worrying about line positioning, line thickness, or line length.

How can you make WebGL text geometry selectable after you add it to the scene of the viewer?

After adding a text geometry object to the viewer, I noticed I am not able to select it with the mouse like I can with the rest of the objects in the viewer. How can I make this selectable? I haven't tried anything, as I have not seen any ideas in the docs. I want to be able to listen for the selection event, which I have down, I just don't know how to make this new TextGeometry object selectable. Here is my code, sorry I didn't include it before.
createText (params) {
const geometry = new TextGeometry(params.text,
Object.assign({}, {
font: new Font(FontJson),
params
}))
const material = this.createColorMaterial(
params.color)
const text = new THREE.Mesh(
geometry , material)
text.position.set(
params.position.x,
params.position.y,
params.position.z)
this.viewer.impl.scene.add(text)
this.viewer.impl.sceneUpdated(true)
}
Thank you!
According to my experience, Forge Viewer will only interact with translated models (SVF, F2D) from Forge Model Derivative service, as well as the Built-in APIs. The text geometry seems like a custom one which is created via THREE.TextGeometry, isn't it? You didn't tell much about that.
If you want to interact with custom geometries in Forge Viewer, you must have to implement a Viewer Tool and add some self-logic in, such as highlight a text geometry while mouse clicking. You can refer here for the more details: https://forge.autodesk.com/cloud_and_mobile/2015/03/creating-tools-for-the-view-data-api.html
Since I didn't see any codes from you here, I assume your text is put into _viewer.sceneAfter and here is an example for mouse left clicking:
// change color of custom geometries while mouse clicking
handleSingleClick( event, button ) {
const _viewer = this.viewer;
const intersectObjects = (function () {
const pointerVector = new THREE.Vector3();
const pointerDir = new THREE.Vector3();
const ray = new THREE.Raycaster();
const camera = _viewer.impl.camera;
return function(pointer, objects, recursive) {
const rect = _viewer.impl.canvas.getBoundingClientRect();
const x = (( pointer.clientX - rect.left) / rect.width ) * 2 - 1;
const y = - (( pointer.clientY - rect.top) / rect.height ) * 2 + 1;
if (camera.isPerspective) {
pointerVector.set( x, y, 0.5 );
pointerVector.unproject( camera );
ray.set( camera.position, pointerVector.sub( camera.position ).normalize() );
} else {
pointerVector.set( x, y, -1 );
pointerVector.unproject( camera );
pointerDir.set( 0, 0, -1 );
ray.set( pointerVector, pointerDir.transformDirection( camera.matrixWorld ) );
}
const intersections = ray.intersectObjects( objects, recursive );
return intersections[0] ? intersections[0] : null;
};
})();
const pointer = event.pointers ? event.pointers[ 0 ] : event;
// Get interseted custom geometries
const result = intersectObjects( pointer, _viewer.sceneAfter.children );
if( result && result.object ) {
const mesh = result.object;
// Change object color
let curColor = mesh.material.color;
curColor = ( curColor.getHex() == 0xff0000 ? 0x00ff00 : 0xff0000 );
mesh.material.color.setHex( curColor );
// Rerender
this.viewer.impl.invalidate( true, true, false );
}
return false;
}
Hope it helps.
I wrote an article dedicated to that topic: Handling custom meshes selection along model components in the Forge Viewer

Picking objects with raycasters

Trying to pick some objects on click, and I'm trying to use the standard code used in every example:
function onMouseDown(evt) {
evt.preventDefault();
canvasAbsoluteHeight = $('canvas').height();
canvasAbsoluteWidth = $('canvas').width();
mouseDown = true;
mouseX = evt.offsetX == undefined ? evt.layerX : evt.offsetX;
mouseY = evt.offsetY == undefined ? evt.layerY : evt.offsetY;
var mouse = new THREE.Vector3();
mouse.x = ( evt.clientX / canvasAbsoluteWidth ) * 2 - 1;
mouse.y = 1 - ( evt.clientY / canvasAbsoluteHeight ) * 2;
mouse.z = 0;
ray = new THREE.Raycaster( mouse, camera );
var intersects = ray.intersectObjects( objects);
console.log('intersects', intersects);
if ( intersects.length > 0 ) {
console.log('intersects', intersects);
}
}
objects is an array of THREE.Object3D which should be able to be picked.
I think it may be something connected with the camera. My camera is a child of THREE.Object3D for easier manipulation, and the parent object is not set at origin.
Other thing is that canvas is not fullscreen, which may have something with mouse position? (it is inside a div with some offset from the edges of the page).
Check this fiddle. There is Picker class that you can use. First of all you have to init this class with camera and domelement
Picker.init(camera,domelement)
and then you have to attach mesh
Picker.attach(mesh)
and after that you have to specify what do you want to do after mosedown/mouseup in Picker methods.

Three.js raycaster to intersect only specified part of shape

I need to get an intersection using raycaster only with vertices of a geometry and not with all the geometry shape. I wrote this code but nothing happen if I click on vertice setted.
function onDocumentMouseClick(event) {
event.preventDefault();
mouse2D.x = (event.clientX / window.innerWidth) * 2 - 1;
mouse2D.y = -(event.clientY / window.innerHeight) * 2 + 1;
raycaster = projector.pickingRay(mouse2D.clone(), camera);
var intersects = raycaster.intersectObjects(objects[0].geometry.vertices[0]);// I want intersection only with vertices
if (intersects.length > 0) {
console.log("ok");}
First, you need to pass an array of objects to raycaster.intersectObjects, as suggested by pixelmike:
var intersects = raycaster.intersectObjects([objects[0]])
You can't get the vertex of the intersection per se, but you can get the face for each intersection:
for ( var i = 0; i < intersects.length; i++ ) {
if ( intersect.face == myTargetFace ) {
console.log( "ok" );
}
}
If you really want the particular object to only check for intersections with a particular subset of faces, you could override the raycast method of that object (a la https://github.com/mrdoob/three.js/blob/master/src/objects/Mesh.js#L56-L340 ) to only search those faces.
three.js r68

Detect clicked object in THREE.js

I have a THREE.js scene where a lot of elements appear, and I need to detect what object the user is clicking on.
What I have done so far is the following. The camera does not move to much - it only changes the vertical position by a limited amount, always looking towards the same point. My approximate method is the following:
I take the coordinates if the click relative to the canvas
I translate them into horizontal and vertical coordinates in the webGL scene by means of a simple rescaling, and add a Z coordinate which is sufficiently far away.
I take a horizontal ray starting from the point above, constructed by THREE.Ray()
I use ray.intersectObjects() to find the first element along the ray.
This method approximately works, but it is sometimes a few pixels away from the actual point.
Is there a more reliable technique to find out the object where a user has clicked?
Depends on what kind of camera are you using.
1) PerspectiveCamera: is ok link that Mr.doob provides.
2) OrthographicCamera: is quite different:
var init = function() {
camera = new THREE.OrthographicCamera( SCREEN_WIDTH / - 2, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2, SCREEN_HEIGHT / - 2, NEAR, FAR);
document.addEventListener( 'mousedown', onDocumentMouseDown, false );
}
function onDocumentMouseDown( e ) {
e.preventDefault();
var mouseVector = new THREE.Vector3();
mouseVector.x = 2 * (e.clientX / SCREEN_WIDTH) - 1;
mouseVector.y = 1 - 2 * ( e.clientY / SCREEN_HEIGHT );
var raycaster = projector.pickingRay( mouseVector.clone(), camera );
var intersects = raycaster.intersectObject( TARGET );
for( var i = 0; i < intersects.length; i++ ) {
var intersection = intersects[ i ],
obj = intersection.object;
console.log("Intersected object", obj);
}
}
Check out this one:
var camera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 1, 5000);
var object; //your object
document.addEventListener('mousedown', onMouseDown, false);
function onMouseDown(e) {
var vectorMouse = new THREE.Vector3( //vector from camera to mouse
-(window.innerWidth/2-e.clientX)*2/window.innerWidth,
(window.innerHeight/2-e.clientY)*2/window.innerHeight,
-1/Math.tan(22.5*Math.PI/180)); //22.5 is half of camera frustum angle 45 degree
vectorMouse.applyQuaternion(camera.quaternion);
vectorMouse.normalize();
var vectorObject = new THREE.Vector3(); //vector from camera to object
vectorObject.set(object.x - camera.position.x,
object.y - camera.position.y,
object.z - camera.position.z);
vectorObject.normalize();
if (vectorMouse.angleTo(vectorObject)*180/Math.PI < 1) {
//mouse's position is near object's position
}
}
Checks for intersection of the mouse and any of the Cubes in 3d space and alters it's color. Maybe this help you.
I ran into problems trying to implement this for a canvas which does not take up the entire width and height of the screen. Here is the solution I found works quite well.
Initialize everything on an existing canvas:
var init = function() {
var canvas_model = document.getElementById('model')
var viewSize = 50 // Depending on object size, canvas size etc.
var camera = new THREE.OrthographicCamera(-canvas_model.clientWidth/viewSize, canvas_model.clientWidth/viewSize, canvas_model.clientHeight/viewSize, -canvas_model.clientHeight/viewSize, 0.01, 2000),
}
Add an event listener to the canvas:
canvas_model.addEventListener('click', function(event){
var bounds = canvas_model.getBoundingClientRect()
mouse.x = ( (event.clientX - bounds.left) / canvas_model.clientWidth ) * 2 - 1;
mouse.y = - ( (event.clientY - bounds.top) / canvas_model.clientHeight ) * 2 + 1;
raycaster.setFromCamera( mouse, camera );
var intersects = raycaster.intersectObjects(scene.children, true);
if (intersects.length > 0) {
// Do stuff
}
}, false)
Or for a 'touchstart' event, change the lines calculating the mouse.x and mouse.y into:
mouse.x = ( (event.touches[0].clientX - bounds.left) / canvas_model.clientWidth ) * 2 - 1;
mouse.y = - ( (event.touches[0].clientY - bounds.top) / canvas_model.clientHeight ) * 2 + 1;

Categories

Resources