How to move an object forward in Three.js? - javascript

Is there any way to move an object forward in Three.js?
Maybe I should convert the rotation.x,y,z to a vector, and deal with it. But I'm beginner, and I don't have any idea how to do it.

Object3D has some handy methods for that.
object.translateZ( 10 );

Please use above answer of #mrdoob, creator of ThreeJS:
object.translateZ( delta );
===OLD ANSWER===
A tutorial that worked for older ThreeJS version:
http://www.aerotwist.com/tutorials/getting-started-with-three-js/
// set position of YOUR_OBJECT
YOUR_OBJECT.position.x = 10;
YOUR_OBJECT.position.y = 50;
YOUR_OBJECT.position.z = 130;
More options:
var STEP = 10;
var newCubeMatrix = cube.matrix;
newCubeMatrix.identity();
//newCubeMatrix.multiplySelf(THREE.Matrix4.rotationYMatrix(cube.rotation.y));
newCubeMatrix.multiplySelf(THREE.Matrix4.translationMatrix(cube.position.x, cube.position.y, cube.position.z + STEP));
cube.updateMatrix();
details posted here https://gamedev.stackexchange.com/questions/7490/translate-object-in-world-space-usings-its-local-rotation

The camera is a point in space.
"Forward" is another point in space.
so you can simply use the coordinates of a second point, and make the camera location closer to the "forward" location.
however, you may also need to turn left and right, which might involve polar coordinates.
adjust these values for your convenience:
var scene;
var camera;
var playerDirection = 0;//angles 0 - 2pi
var dVector;
var angularSpeed = 0.01;
var playerSpeed = 0.075;
var playerBackwardsSpeed = playerSpeed * 0.4;
this function will initialize the scene:
function init(){
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera( 75, window.innerWidth / window.innerHeight, 0.1, 1000 );
renderer = new THREE.WebGLRenderer();
renderer.setSize( window.innerWidth, window.innerHeight );
document.body.appendChild( renderer.domElement );
camera.position.z = 5;
dVector = new THREE.Vector3( 0, 0, 0 ) ;
camera.lookAt( dVector );
animate();
}
movement of the player is stopped when the player presses the up key.
function key_up(event){
playerIsMovingForward = 0;
playerIsMovingBackwards = 0;
playerIsRotatingLeft = 0;
playerIsRotatingRight = 0;
playerGoesUp = 0;
playerGoesDown = 0;
}
when the player is moving, we update the position:
function updatePlayer(){
if(playerIsRotatingLeft){ // rotate left
playerDirection -= angularSpeed;
}
if(playerIsRotatingRight){ // rotate right
playerDirection += angularSpeed;
}
if(playerIsRotatingRight || playerIsRotatingLeft){
setPlayerDirection();
}
if(playerIsMovingForward){ // go forward
moveForward(playerSpeed);
}
if(playerIsMovingBackwards){ // go backwards
moveForward(-playerBackwardsSpeed);
}
}
We assume by forward you meant "using the WASD keys"
function key_down(event){
var W = 87;
var S = 83;
var A = 65;
var D = 68;
var minus = 189;
var plus = 187;
var k = event.keyCode;
console.log(k);
if(k == A){ // rotate left
playerIsRotatingLeft = 1;
}
if(k == D){ // rotate right
playerIsRotatingRight = 1;
}
if(k == W){ // go forward
playerIsMovingForward = 1;
}
if(k == S){ // go back
playerIsMovingBackwards = 1;
}
}
player will only move as fast as his browser.
so maybe adjust this code?
function animate() {
requestAnimationFrame( animate );
updatePlayer();
renderer.render( scene, camera );
}
this is the code that moves the camera into the position of the dVector object
and also repositions the direction vector (dVector), so that it is always forward from the camera.
function moveForward(speed){
var delta_x = speed * Math.cos(playerDirection);
var delta_z = speed * Math.sin(playerDirection);
var new_x = camera.position.x + delta_x;
var new_z = camera.position.z + delta_z;
camera.position.x = new_x;
camera.position.z = new_z;
var new_dx = dVector.x + delta_x;
var new_dz = dVector.z + delta_z;
dVector.x = new_dx;
dVector.z = new_dz;
camera.lookAt( dVector );
}
moving forward usually involves turning left and right, here is some code that does that, it also uses polar coordinates, to move the point in relation to the camera (which is the center of the "circle" by a given amount of degrees (in radians)
function setPlayerDirection(){
//direction changed.
var delta_x = playerSpeed * Math.cos(playerDirection);
var delta_z = playerSpeed * Math.sin(playerDirection);
var new_dx = camera.position.x + delta_x;
var new_dz = camera.position.z + delta_z;
dVector.x = new_dx;
dVector.z = new_dz;
console.log(dVector);
camera.lookAt( dVector );
}
animate();
I hope that helps.

Another option is to use Vector3's set method/function.
position.set(x, y, z);

Related

Issue is offset between mouse pointer and point markers in orthographic mode

My point is not moving under the mouse pointer in orthographic mode.
In perspective camera it is working perfectly while not in the orthographic mode
Below is the image that what is happening.
Image
I have tried the things in this link Orthographic camera and selecting objects with raycast
The code I am trying.
// var mouse = new THREE.Vector2();
// mouse.x = ( event.clientX /
viewer.renderer.domElement.clientWidth ) * 2 - 1;
// mouse.y = - ( event.clientY /
viewer.renderer.domElement.clientHeight ) * 2 + 1;
var vector = new THREE.Vector3( scope.mouse.x, scope.mouse.y, 0.5);
//console.log("mouse values ",scope.mouse.x, scope.mouse.y);
//console.log(scope.scene.cameraP)
let camera = scope.scene.getActiveCamera()
vector.unproject(camera);
console.log(vector)
var direction = vector.sub(camera.position).normalize();
var ray = new THREE.Ray(camera.position, direction);
var pointClouds = [];
scope.scene.referenceFrame.traverse(function(object){
if(object instanceof Potree.PointCloudOctree || object instanceof Potree.PointCloudArena4D){
pointClouds.push(object);
}
});
var closestPoint = null;
var closestPointDistance = null;
for(var i = 0; i < pointClouds.length; i++){
var pointcloud = pointClouds[i];
var point = pointcloud.pick(scope.renderer, scope.scene.cameraP, ray);
if(!point){
continue;
}
var distance = viewer.scene.getActiveCamere().position.distanceTo(point.position);
console.log(distance)
if(!closestPoint || distance < closestPointDistance){
closestPoint = point;
closestPointDistance = distance;
}
}
return closestPoint ? closestPoint : null;
};
Here getActivecamera() is a function which switches between Perspective camera and Orthographic camera.
Below is the image what I want.
Image
Please Help I am stuck for the last 3 week in this.

Three JS Blank (Black) Canvas On Image To Particles

I am using three.js to draw an image on the canvas, collect data from this image (i.e. pixel color) and redraw the image as a collection of particles using the data collected from the image, such as the colors.
I have zero error messages or warnings, just a blank, black canvas.
The code I am using is below:
var xhr = new XMLHttpRequest();
xhr.open("GET", "http://example.com/assets/css/sl.jpg");
xhr.responseType = "blob";
xhr.onload = function()
{
blob = xhr.response;
P.readAsDataURL(blob);
P.onload = function(){
image = document.createElement("img");
image.src = P.result;
setTimeout(function(){
// split the image
addParticles();
}, 100);
}
}
xhr.send();
addLights();
update();
setTimeout(function(){
holdAtOrigin = "next";
},1000)
function addParticles()
{
// draw in the image, and make sure it fits the canvas size :)
var ratio = 1 / Math.max(image.width/500, image.height/500);
var scaledWidth = image.width * ratio;
var scaledHeight = image.height * ratio;
context.drawImage(image,
0,0,image.width,image.height,
(500 - scaledWidth) * .5, (500 - scaledHeight) *.5, scaledWidth, scaledHeight);
// now set up the particle material
var material = new THREE.MeshPhongMaterial( { } );
var geometry = new THREE.Geometry();
var pixels = context.getImageData(0,0,WIDTH,HEIGHT);
var step = DENSITY * 4;
var x = 0, y = 0;
// go through the image pixels
for(x = 0; x < WIDTH * 4; x+= step)
{
for(y = HEIGHT; y >= 0 ; y -= DENSITY)
{
var p = ((y * WIDTH * 4) + x);
// grab the actual data from the
// pixel, ignoring any transparent ones
if(pixels.data[p+3] > 0)
{
var pixelCol = (pixels.data[p] << 16) + (pixels.data[p+1] << 8) + pixels.data[p+2];
var color = new THREE.Color(pixelCol);
var vector = new THREE.Vector3(-300 + x/4, 240 - y, 0);
// push on the particle
geometry.vertices.push(new THREE.Vector3(vector));
geometry.colors.push(color);
}
}
}
// now create a new system
particleSystem = new THREE.Points(geometry, material);
console.log(particleSystem);
particleSystem.sortParticles = true;
// grab a couple of cacheable vals
particles = particleSystem.geometry.vertices;
colors = particleSystem.geometry.colors;
// add some additional vars to the
// particles to ensure we can do physics
// and so on
var ps = particles.length;
while(ps--)
{
var particle = particles[ps];
particle.velocity = new THREE.Vector3();
particle.mass = 5;
particle.origPos = particle.x.clone();
}
// gc and add
pixels = null;
scene.add(particleSystem);
//test render
}
function addLights()
{
// point
pointLight = new THREE.PointLight( 0xFFFFFF );
pointLight.position.x = 300;
pointLight.position.y = 300;
pointLight.position.z = 600;
scene.add( pointLight );
// directional
directionalLight = new THREE.DirectionalLight( 0xFFFFFF );
directionalLight.position.x = -.5;
directionalLight.position.y = -1;
directionalLight.position.z = -.5;
directionalLight.position.normalize();
directionalLight.intensity = .6;
scene.add( directionalLight );
}
function update(){
var ps = particles.length;
while(ps--)
{
var particle = particles[ps];
// if we are holding at the origin
// values, tween the particles back
// to where they should be
if(holdAtOrigin == "start")
{
particle.velocity = new THREE.Vector3();
//particle.position.x += (particle.origPos.x - particle.position.x) * .2;
//particle.position.y += (particle.origPos.y - particle.position.y) * .2;
//particle.position.z += (particle.origPos.z - particle.position.z) * .2;
particle.x.x += (particle.origPos.x - .0000000000000000001) * 2;
particle.x.y += (particle.origPos.y - .0000000000000000001) * 2;
}
else if (holdAtOrigin == "next")
{
particle.velocity = new THREE.Vector3();
particle.x.x += (particle.origPos.x - particle.x.x) * .2;
particle.x.y += (particle.origPos.y - particle.x.y) * .2;
particle.x.z += (particle.origPos.z - particle.x.z) * .2;
}
else{
// get the particles colour and put
// it into an array
var col = colors[ps];
var colArray = [col.r, col.g, col.b];
// go through each component colour
for(var i = 0; i < colArray.length; i++)
{
// only analyse it if actually
// has some of this colour
if(colArray[i] > 0)
{
// get the target based on where it
// is in the array
var target = i == 0 ? redCentre :
i == 1 ? greenCentre :
blueCentre;
// get the distance of the particle to the centre in question
// and add on the resultant acceleration
var dist = particle.position.distanceToSquared(target.position),
force = ((particle.mass * target.mass) / dist) * colArray[i] * AGGRESSION,
acceleration = (new THREE.Vector3())
.sub(target.position,particle.position)
.normalize()
.multiplyScalar(force);
// if we are attracting we add
// the velocity
if(mode == ATTRACT)
{
// note we only need to check the
// squared radius for the collision :)
if(dist > target.boundRadiusSquared) {
particle.velocity.addSelf(acceleration);
}
else if (bounceParticles) {
// bounce, bounce, bounce
particle.velocity.negate();
}
else {
// stop dead
particle.velocity = new THREE.Vector3();
}
}
else {
// push it away
particle.velocity.subSelf(acceleration);
}
particle.position.addSelf(particle.velocity);
}
}
}
}
// set up a request for a render
requestAnimationFrame(update);
render();
}
function render()
{
// only render if we have
// an active image
if(image) {
if(holdAtOrigin=="start")
{
camera.position.z = 900;
}
if(camera.position.z < 200)
{
//do nothing
}
else{
camera.position.z -= 1.7;
};
renderer.render( scene, camera );
}
}
I checked the console log at various intervals and found that the pixel data is being collected appropriately, so I don't know what is wrong.
Is it the material? When I used a normal (light-independent) material the code worked as expected, I could see my particles.
But I wanted it to be affected by lights, so I changed it to var material = new THREE.MeshPhongMaterial( { } ); without any arguments.
Is this my problem or is it elsewhere in the code?
Thank you!
This may also be pertinent: How to get the absolute position of a vertex in three.js?
Because particle.x.x or particle.x.y doesn't look right to me, even though I wrote that code based on logged object contents.
EDIT: I changed the Phong line to THREE.PointsMaterial and amped up the potency of the light, but still a blank, black canvas.
EDIT 2: So I think it may be a problem with the particle coordinates being misconstrued? When I inspect using console.log(particleSystem); I get the following:
Did I used to be that the x,y,z were wrapped in a position property that newer versions of three.js have removed?
For example I've found example code like:
particle.origPos = particle.position.clone();
But I don't see a position property? How would I clone just the x,y and z bits or should I clone the whole vertex? Sorry if this is confusing or irrelevant just trying to chase down why I have a blank canvas.
EDIT 3: I've removed the update function's position alterations but I still get a weird console log for the particle-system even when all I am doing is cloning said particle using particle.origPos = particle.clone();
Basically I have and x,y and z property but the x property is an object with a subsequent x,y and z. Why is this and how do I fix?

Incrementally display three.js TubeGeometry

I am able to display a THREE.TubeGeometry figure as follows
Code below, link to jsbin
<html>
<body>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r75/three.js"></script>
<script>
// global variables
var renderer;
var scene;
var camera;
var geometry;
var control;
var count = 0;
var animationTracker;
init();
drawSpline();
function init()
{
// create a scene, that will hold all our elements such as objects, cameras and lights.
scene = new THREE.Scene();
// create a camera, which defines where we're looking at.
camera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 0.1, 1000);
// create a render, sets the background color and the size
renderer = new THREE.WebGLRenderer();
renderer.setClearColor('lightgray', 1.0);
renderer.setSize(window.innerWidth, window.innerHeight);
// position and point the camera to the center of the scene
camera.position.x = 0;
camera.position.y = 40;
camera.position.z = 40;
camera.lookAt(scene.position);
// add the output of the renderer to the html element
document.body.appendChild(renderer.domElement);
}
function drawSpline(numPoints)
{
var numPoints = 100;
// var start = new THREE.Vector3(-5, 0, 20);
var start = new THREE.Vector3(-5, 0, 20);
var middle = new THREE.Vector3(0, 35, 0);
var end = new THREE.Vector3(5, 0, -20);
var curveQuad = new THREE.QuadraticBezierCurve3(start, middle, end);
var tube = new THREE.TubeGeometry(curveQuad, numPoints, 0.5, 20, false);
var mesh = new THREE.Mesh(tube, new THREE.MeshNormalMaterial({
opacity: 0.9,
transparent: true
}));
scene.add(mesh);
renderer.render(scene, camera);
}
</script>
</body>
</html>
However, I would like to display incrementally, as in, like an arc that is loading, such that it starts as the start point, draws incrementally and finally looks the below arc upon completion.
I have been putting in some effort, and was able to do this by storing all the points/coordinates covered by the arc, and drawing lines between the consecutive coordinates, such that I get the 'arc loading incrementally' feel. However, is there a better way to achieve this? This is the link to jsbin
Adding the code here as well
<!DOCTYPE html>
<html>
<head>
<title>Incremental Spline Curve</title>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r75/three.js"></script>
<style>
body {
margin: 0;
overflow: hidden;
}
</style>
</head>
<script>
// global variables
var renderer;
var scene;
var camera;
var splineGeometry;
var control;
var count = 0;
var animationTracker;
// var sphereCamera;
var sphere;
var light;
function init() {
// create a scene, that will hold all our elements such as objects, cameras and lights.
scene = new THREE.Scene();
// create a camera, which defines where we're looking at.
camera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 0.1, 1000);
// create a render, sets the background color and the size
renderer = new THREE.WebGLRenderer();
// renderer.setClearColor(0x000000, 1.0);
renderer.setClearColor( 0xffffff, 1 );
renderer.setSize(window.innerWidth, window.innerHeight);
// position and point the camera to the center of the scene
camera.position.x = 0;
camera.position.y = 40;
camera.position.z = 40;
camera.lookAt(scene.position);
// add the output of the renderer to the html element
document.body.appendChild(renderer.domElement);
// //init for sphere
// sphereCamera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 1, 1000);
// sphereCamera.position.y = -400;
// sphereCamera.position.z = 400;
// sphereCamera.rotation.x = .70;
sphere = new THREE.Mesh(new THREE.SphereGeometry(0.8,31,31), new THREE.MeshLambertMaterial({
color: 'yellow',
}));
light = new THREE.DirectionalLight('white', 1);
// light.position.set(0,-400,400).normalize();
light.position.set(0,10,10).normalize();
//get points covered by Spline
getSplineData();
}
//save points in geometry.vertices
function getSplineData() {
var curve = new THREE.CubicBezierCurve3(
new THREE.Vector3( -5, 0, 10 ),
new THREE.Vector3(0, 20, 0 ),
new THREE.Vector3(0, 20, 0 ),
new THREE.Vector3( 2, 0, -25 )
);
splineGeometry = new THREE.Geometry();
splineGeometry.vertices = curve.getPoints( 50 );
animate();
}
//scheduler loop
function animate() {
if(count == 50)
{
cancelAnimationFrame(animationTracker);
return;
}
//add line to the scene
drawLine();
renderer.render(scene, camera);
// renderer.render(scene, sphereCamera);
count += 1;
// camera.position.z -= 0.25;
// camera.position.y -= 0.25;
animationTracker = requestAnimationFrame(animate);
}
function drawLine() {
var lineGeometry = new THREE.Geometry();
var lineMaterial = new THREE.LineBasicMaterial({
color: 0x0000ff
});
console.log(splineGeometry.vertices[count]);
console.log(splineGeometry.vertices[count+1]);
lineGeometry.vertices.push(
splineGeometry.vertices[count],
splineGeometry.vertices[count+1]
);
var line = new THREE.Line( lineGeometry, lineMaterial );
scene.add( line );
}
// calls the init function when the window is done loading.
window.onload = init;
</script>
<body>
</body>
</html>
Drawback : The drawback of doing it the above way is that, end of the day, I'm drawing a line between consecutive points, and so I lose out on a lot of the effects possible in TubeGeometry such as, thickness, transparency etc.
Please suggest me an alternative way to get a smooth incremental load for the TubeGeometry.
THREE.TubeGeometry returns a THREE.BufferGeometry.
With THREE.BufferGeometry, you have access to a property drawRange that you can set to animate the drawing of the mesh:
let nEnd = 0, nMax, nStep = 90; // 30 faces * 3 vertices/face
...
const geometry = new THREE.TubeGeometry( path, pathSegments, tubeRadius, radiusSegments, closed );
nMax = geometry.attributes.position.count;
...
function animate() {
requestAnimationFrame( animate );
nEnd = ( nEnd + nStep ) % nMax;
mesh.geometry.setDrawRange( 0, nEnd );
renderer.render( scene, camera );
}
EDIT: For another approach, see this SO answer.
three.js r.144
Normally you would be able to use the method .getPointAt() to "get a vector for point at relative position in curve according to arc length" to get a point at a certain percentage of the length of the curve.
So normally if you want to draw 70% of the curve and a full curve is drawn in 100 segments. Then you could do:
var percentage = 70;
var curvePath = new THREE.CurvePath();
var end, start = curveQuad.getPointAt( 0 );
for(var i = 1; i < percentage; i++){
end = curveQuad.getPointAt( percentage / 100 );
lineCurve = new THREE.LineCurve( start, end );
curvePath.add( lineCurve );
start = end;
}
But I think this is not working for your curveQuad since the getPointAt method is not implemented for this type. A work around is to get a 100 points for your curve in an array like this:
points = curve.getPoints(100);
And then you can do almost the same:
var percentage = 70;
var curvePath = new THREE.CurvePath();
var end, start = points[ 0 ];
for(var i = 1; i < percentage; i++){
end = points[ percentage ]
lineCurve = new THREE.LineCurve( start, end );
curvePath.add( lineCurve );
start = end;
}
now your curvePath holds the line segments you want to use for drawing the tube:
// draw the geometry
var radius = 5, radiusSegments = 8, closed = false;
var geometry = new THREE.TubeGeometry(curvePath, percentage, radius, radiusSegments, closed);
Here a fiddle with a demonstration on how to use this dynamically
I'm not really that familiar with three.js. But I think I can be of assistance. I have two solutions for you. Both based on the same principle: build a new TubeGeometry or rebuild the current one, around a new curve.
Solution 1 (Simple):
var CurveSection = THREE.Curve.create(function(base, from, to) {
this.base = base;
this.from = from;
this.to = to;
}, function(t) {
return this.base.getPoint((1 - t) * this.from + t * this.to);
});
You define a new type of curve which just selects a segment out of a given curve. Usage:
var curve = new CurveSection(yourCurve, 0, .76); // Where .76 is your percentage
Now you can build a new tube.
Solution 2 (Mathematics!):
You are using for your arc a quadratic bezier curve, that's awesome! This curve is a parabola. You want just a segment of that parabola and that is again a parabola, just with other bounds.
What we need is a section of the bezier curve. Let's say the curve is defined by A (start), B (direction), C (end). If we want to change the start to a point D and the end to a point F we need the point E that is the direction of the curve in D and F. So the tangents to our parabola in D and F have to intersect in E. So the following code will give us the desired result:
// Calculates the instersection point of Line3 l1 and Line3 l2.
function intersection(l1, l2) {
var A = l1.start;
var P = l2.closestPointToPoint(A);
var Q = l1.closestPointToPoint(P);
var l = P.distanceToSquared(A) / Q.distanceTo(A);
var d = (new THREE.Vector3()).subVectors(Q, A);
return d.multiplyScalar(l / d.length()).add(A);
}
// Calculate the tangentVector of the bezier-curve
function tangentQuadraticBezier(bezier, t) {
var s = bezier.v0,
m = bezier.v1,
e = bezier.v2;
return new THREE.Vector3(
THREE.CurveUtils.tangentQuadraticBezier(t, s.x, m.x, e.x),
THREE.CurveUtils.tangentQuadraticBezier(t, s.y, m.y, e.y),
THREE.CurveUtils.tangentQuadraticBezier(t, s.z, m.z, e.z)
);
}
// Returns a new QuadraticBezierCurve3 with the new bounds.
function sectionInQuadraticBezier(bezier, from, to) {
var s = bezier.v0,
m = bezier.v1,
e = bezier.v2;
var ns = bezier.getPoint(from),
ne = bezier.getPoint(to);
var nm = intersection(
new THREE.Line3(ns, tangentQuadraticBezier(bezier, from).add(ns)),
new THREE.Line3(ne, tangentQuadraticBezier(bezier, to).add(ne))
);
return new THREE.QuadraticBezierCurve3(ns, nm, ne);
}
This is a very mathematical way, but if you should need the special properties of a Bezier curve, this is the way to go.
Note: The first solution is the simplest. I am not familiar with Three.js so I wouldn't know what the most efficient way to implement the animation is. Three.js doesn't seem to use the special properties of a bezier curve so maybe solution 2 isn't that useful.
I hope you have gotten something useful out of this.

Set animation for camera position from beginning to end in WebGL

I have my current position of the camera, and the final position (focusing on an element). I would like to make an animation of the camera going from the beginning to the end.
Thank you.
An idea is create a unit vector which represents direction from camera to object. Then using this value for changing camera's vector position in time:
var speed = 4;
var p = camera.position;
var dt = p.clone();
dt.normalize();
dt.multiplyScalar(speed);
And then in render function:
function render () {
if (camera.position.length() > 0) {
camera.position.sub(dt);
}
renderer.render(scene, camera);
requestAnimationFrame(render);
}
https://jsfiddle.net/dc3La82y/
Thanks! I applied that, the problem is that I try to render in bucle while the camera is approaching the element, but it only shows it when it reached it, nothing shows in the way to the object.
Here is the code:
var cameraPos = new THREE.Vector3().addVectors(elementCenter, new THREE.Vector3(distance, distance, distance));
var cameraLook = new THREE.Vector3(elementCenter.x, elementCenter.y, elementCenter.z);
//camera.position.set(cameraPos.x, cameraPos.y, cameraPos.z);
camera.lookAt(new THREE.Vector3(cameraLook.x, cameraLook.y, cameraLook.z));
var dx = camera.position.x - cameraPos.x;
var dy = camera.position.y - cameraPos.y;
var dz = camera.position.z - cameraPos.z;
var p = new THREE.Vector3(dx, dy, dz);
var speed = 400;
var dt = p.clone();
dt.normalize();
dt.multiplyScalar(speed);
var times = p.length() / speed;
for ( i = 0; i < times; i++ ) {
camera.position.sub(dt);
camera.lookAt(new THREE.Vector3(cameraLook.x, cameraLook.y, cameraLook.z)); //because it is changed when camera moves, I have to give it its value again
render();
requestAnimationFrame(render);
}

Three.js THREE.Projector has been moved to

I understand there is no THREE.projector in version 71 (see the deprecated list), but I don't understand how to replace it, particularly in this code that detects which object has been clicked on:
var vector = new THREE.Vector3(
(event.clientX / window.innerWidth) * 2 - 1,
-(event.clientY / window.innerHeight) * 2 + 1,
0.5
);
projector.unprojectVector(vector, camera);
var raycaster = new THREE.Raycaster(
camera.position,
vector.sub(camera.position).normalize()
);
var intersects = raycaster.intersectObjects(objects);
if (intersects.length > 0) {
clicked = intersects[0];
console.log("my clicked object:", clicked);
}
There is now an easier pattern that works with both orthographic and perspective camera types:
var raycaster = new THREE.Raycaster(); // create once
var mouse = new THREE.Vector2(); // create once
...
mouse.x = ( event.clientX / renderer.domElement.clientWidth ) * 2 - 1;
mouse.y = - ( event.clientY / renderer.domElement.clientHeight ) * 2 + 1;
raycaster.setFromCamera( mouse, camera );
var intersects = raycaster.intersectObjects( objects, recursiveFlag );
three.js r.84
The THREE.JS raycaster documentation actually gives all the relevant information that is laid out in these answers as well as all the missing points that may be difficult to get your head around.
var raycaster = new THREE.Raycaster();
var mouse = new THREE.Vector2();
function onMouseMove( event ) {
// calculate mouse position in normalized device coordinates
// (-1 to +1) for both components
mouse.x = ( event.clientX / window.innerWidth ) * 2 - 1;
mouse.y = - ( event.clientY / window.innerHeight ) * 2 + 1;
}
function render() {
// update the picking ray with the camera and mouse position
raycaster.setFromCamera( mouse, camera );
// calculate objects intersecting the picking ray var intersects =
raycaster.intersectObjects( scene.children );
for ( var i = 0; i < intersects.length; i++ ) {
intersects[ i ].object.material.color.set( 0xff0000 );
}
renderer.render( scene, camera );
}
window.addEventListener( 'mousemove', onMouseMove, false );
window.requestAnimationFrame(render);`
Hope it helps.
You can use the latest recommended version as stated above.
To get your specific code working, replace:
projector.unprojectVector( vector, camera );
with
vector.unproject(camera);
I noted that all this that was said before is fine in a full window I think, but if you have other things besides a canvas on the page you need to get the click events target's offset and remove it.
e = event and mVec is a THREE.Vector2
let et = e.target, de = renderer.domElement;
let trueX = (e.pageX - et.offsetLeft);
let trueY = (e.pageY - et.offsetTop);
mVec.x = (((trueX / de.width) * 2) - 1);
mVec.y = (((trueY / de.height) * -2) + 1);
wup.raycaster.setFromCamera( mVec, camera );
[Then check for intersections, etc.]
Looks like you need to watch for dragging (mouse movements) too or releasing from a drag will trigger an unwanted click when using window.addEventListener( 'click', function(e) {<code>});
[You'll notice I put the negative sign where it's more logical as well.]
https://github.com/mrdoob/three.js/issues/5587
var vector = new THREE.Vector3();
var raycaster = new THREE.Raycaster();
var dir = new THREE.Vector3();
...
if ( camera instanceof THREE.OrthographicCamera ) {
vector.set( ( event.clientX / window.innerWidth ) * 2 - 1, - ( event.clientY / window.innerHeight ) * 2 + 1, - 1 ); // z = - 1 important!
vector.unproject( camera );
dir.set( 0, 0, - 1 ).transformDirection( camera.matrixWorld );
raycaster.set( vector, dir );
} else if ( camera instanceof THREE.PerspectiveCamera ) {
vector.set( ( event.clientX / window.innerWidth ) * 2 - 1, - ( event.clientY / window.innerHeight ) * 2 + 1, 0.5 ); // z = 0.5 important!
vector.unproject( camera );
raycaster.set( camera.position, vector.sub( camera.position ).normalize() );
}
var intersects = raycaster.intersectObjects( objects, recursiveFlag );
Objects = array of objects of type Object3D to check for intersection with the ray. Can be everything in your scene, but might be inefficient if you have a lot of stuff there.
recursiveFlag = If true, it also checks all descendants. Otherwise it only checks intersection with the object. Default is true.
docs
I wanted to share my experiences with more details doing this since I feel the answers above do not really explain the details to get this going.
In my case I was using STL_Viewer, you can locate it here (https://www.viewstl.com/plugin/), it helps create a threeJS scene and supplies you with the camera and scene that you will need for this to work.
function mouse_click_normalize(clientX,clientY,offset_left,offset_top){
var mouse = new THREE.Vector2(); // create once
var c = document.getElementById("stl_contEye").childNodes[0];//get canvas element created in here
mouse.x = ( (event.clientX-offset_left) / (c.clientWidth) * 2) - 1;
mouse.y = - ( (event.clientY-offset_top) / (c.clientHeight) ) * 2 + 1;
console.log(`clientX=${clientX},clientY=${clientY},mouse.x=${mouse.x},mouse.y=${mouse.y}\nwidth=${c.clientWidth},height=${c.clientHeight},offset_left=${offset_left},offset_top=${offset_top}`);
return mouse;
}
Above is my function that will automatically normalize the x,y coordinates in the space that was clicked (between -1 and 1), as this is what the raycaster requires for it's setFromCamera function.
Because you may be clicking in a section of the screen with offsets I programmed it this way so it will handle no matter how it's positioned in the DOM. Just replace "stl_contEye" with the name of your div that will contain the rendered ThreeJS or STLViewer in my case.
function model_clicked(model_id, e, distance, click_type){
console.log(e);
var pos = new THREE.Vector3(); // create once and reuse
// var vec = new THREE.Vector3(); // create once and reuse
var camera = stl_viewer1.camera;
var scene = stl_viewer1.scene;
// vec.unproject( camera );
// vec.sub( camera.position ).normalize();
// //var distance2 = (distance - camera.position.z) / vec.z;
// pos.copy( camera.position ).add( vec.multiplyScalar( distance ) );
var mouse_coords_normalized = mouse_click_normalize(e.clientX,e.clientY,e.srcElement.offsetLeft,e.srcElement.offsetTop);
var raycaster = new THREE.Raycaster(); // create once
raycaster.setFromCamera( mouse_coords_normalized, camera );
//console.log(raycaster);
//console.log(scene);
var intersects = raycaster.intersectObjects( scene.children, true );
if(intersects.length > 0){
console.log(intersects);
console.log(intersects[0].point);
pos = intersects[0].point
}
}
By doing it this way, you will get the exact point in 3D space from where you clicked. The function model_clicked simply returns an event that holds the clientX or clientY, you just have to get this yourself somehow if you are not using STLViewers event for detecting a click. There are many examples above for this from the other answers.
I hope this helps someone trying to figure this out with or without stl_viewer
camera.shooting = Date.now()
document.getElementById("div5").addEventListener("mousedown", mousedown);
document.getElementById("div5").addEventListener("mouseup", mouseup);
document.getElementById("div5").addEventListener( 'mousemove', renderq);
function mousedown(event) {
camera.calculator = 1;
}
function mouseup(event) {
camera.calculator = 0;
}
function renderq(event){
if( ai2bcz.length > 0 && ai_key3.length > 0 ){
if( camera.calculator > 0 ){
camera.mouse = new THREE.Vector2();
var fleece = document.getElementById("div5").scrollHeight;
var fleeceb = document.getElementById("div5").scrollWidth;
var fees = (event.clientX / fleeceb) - 1;
var feesa = - (event.clientY / fleece) + 1;
camera.mouse.x = fees ; camera.mouse.y = feesa;
var sphereMaterialc = new THREE.MeshBasicMaterial({color: 0x0099FF});
var sphereGeoc = new THREE.SphereGeometry(5, 5, 5);
var spherec = new THREE.Mesh(sphereGeoc, sphereMaterialc);
spherec.position.set(camera.position.x, camera.position.y, camera.position.z);
spherec.raycaster = new THREE.Raycaster();
spherec.raycaster.setFromCamera( camera.mouse, camera);
spherec.owner = 'player';
spherec.health = 100;
bulletsc.push(spherec);
scene.add(spherec);
camera.lastshot = Date.now();
camera.shooting = Date.now();
}
}
}
function render() { const controlscamera = new FirstPersonControls(camera);
controlscamera.update(100);
if( ai2bcz.length > 0 && ai_key3.length > 0 ){
if( camera.calculator > 0 && camera.shooting + 25 < Date.now() ){
renderq(event)
camera.shooting = Date.now();
}}
if(bulletsc.length > 1){
for (var i = 0; i < bulletsc.length - 1; i++) {
var bu = bulletsc[i], pu = bu.position, du = bu.raycaster.ray.direction;
if(bu.owner == "player"){
var enemybulletspeeda = window.document.getElementById("bulletsplayerspeed").value;
bu.translateX(enemybulletspeeda * du.x);
bu.translateY(enemybulletspeeda * du.y);
bu.translateZ(enemybulletspeeda * du.z);
}
}}
renderer.render( scene, camera ); }

Categories

Resources