I would like to figure out how to map out the controls for my oculus quest and other devices, using three.js and webXR. The code works, and allows me to move the controller, maps a cylinder to each control, and allows me to use the trigger to controls to change the color of the cylinders. This is great, but I can't find any documentation on how to use axis controls for the joy stick, the grip and the other buttons. Part of me wants to believe it's as simple as knowing which event to call, because I don't know what other events are available.
Here is a link to the tutorial I based this off of. https://github.com/as-ideas/webvr-with-threejs
Please note that this code works as expected, but I don't know how totake it further and do more.
function createController(controllerID, videoinput) {
//RENDER CONTROLLER AS YELLOW TUBE
const controller = renderer.vr.getController(controllerID);
const cylinderGeometry = new CylinderGeometry(0.025, 0.025, 1, 32);
const cylinderMaterial = new MeshPhongMaterial({ color: 0xffff00 });
const cylinder = new Mesh(cylinderGeometry, cylinderMaterial);
cylinder.geometry.translate(0, 0.5, 0);
cylinder.rotateX(-0.25 * Math.PI);
controller.add(cylinder);
cameraFixture.add(controller);
//TRIGGER
controller.addEventListener('selectstart', () => {
if (controllerID === 0) {
cylinderMaterial.color.set('pink')
} else {
cylinderMaterial.color.set('orange');
videoinput.play()
}
});
controller.addEventListener('selectend', () => {
cylinderMaterial.color.set(0xffff00);
videoinput.pause();
console.log('I pressed play');
});
}
As of three.js 0.119, integrated 'events' from the other buttons, trackpads, haptics, and thumbsticks of a touch controller are not provided, only select and squeeze events are available. three.js has a functional model of 'just working' regardless of what type of input device you have and only provides for managing events that can be produced by all input devices (ie. select)
Luckily, we are not limited by what three.js has made available and can just poll the controller data directly.
Touch controllers follow the model of 'gamepad' controls and just report their instantanous values. We will poll the gamepad for its current values of the various buttons and keep track of their state and create 'events' within our code for button pushes, trackpad and thumbstick axis changes.
To access the instantaneous data from a touch controller while within a webXR session
const session = renderer.xr.getSession();
let i = 0;
if (session) {
for (const source of session.inputSources) {
if (source && source.handedness) {
handedness = source.handedness; //left or right controllers
}
if (!source.gamepad) continue;
const controller = renderer.xr.getController(i++);
const old = prevGamePads.get(source);
const data = {
handedness: handedness,
buttons: source.gamepad.buttons.map((b) => b.value),
axes: source.gamepad.axes.slice(0)
};
//process data accordingly to create 'events'
Haptic feedback is provided through a promise (Note not all browsers currently support the webXR haptic feedback, but Oculus Browser and Firefox Reality on quest do)
When availble, the haptic feedback is produced through a promise:
var didPulse = sourceXR.gamepad.hapticActuators[0].pulse(0.8, 100);
//80% intensity for 100ms
//subsequent promises cancel any previous promise still underway
To demonstrate this solution I have modified threejs.org/examples/#webXR_vr_dragging example by adding the camera to a 'dolly' that can be moved around with the touch controllers thumbsticks when within a webXR session and provide various haptic feedback for events such as raycasting onto an object or axis movements on thumbsticks.
For each frame, we poll the data from the touch controllers and respond accordingly. We have to store the data from frame to frame to detect changes and create our events, and filter out some data (false 0's and up to 20% randomdrift from 0 in thumbstick axis values on some controllers) For proper 'forward and sideways' dolly movement the current heading and attitude of the webXR camera is also needed each frame and accessed via:
let xrCamera = renderer.xr.getCamera(camera);
xrCamera.getWorldDirection(cameraVector);
//heading vector for webXR camera now within cameraVector
Example codepen here:
codepen.io/jason-buchheim/pen/zYqYGXM
With 'ENTER VR' button exposed (debug view) here:cdpn.io/jason-buchheim/debug/zYqYGXM
Full code with modifications of original threejs example highlighted with comment blocks
//// From webxr_vr_dragging example https://threejs.org/examples/#webxr_vr_dragging
import * as THREE from "https://cdn.jsdelivr.net/npm/three#0.119.1/build/three.module.min.js";
import { OrbitControls } from "https://cdn.jsdelivr.net/npm/three#0.119.1/examples/jsm/controls/OrbitControls.min.js";
import { VRButton } from "https://cdn.jsdelivr.net/npm/three#0.119.1/examples/jsm/webxr/VRButton.min.js";
import { XRControllerModelFactory } from "https://cdn.jsdelivr.net/npm/three#0.119.1/examples/jsm/webxr/XRControllerModelFactory.min.js";
var container;
var camera, scene, renderer;
var controller1, controller2;
var controllerGrip1, controllerGrip2;
var raycaster,
intersected = [];
var tempMatrix = new THREE.Matrix4();
var controls, group;
////////////////////////////////////////
//// MODIFICATIONS FROM THREEJS EXAMPLE
//// a camera dolly to move camera within webXR
//// a vector to reuse each frame to store webXR camera heading
//// a variable to store previous frames polling of gamepads
//// a variable to store accumulated accelerations along axis with continuous movement
var dolly;
var cameraVector = new THREE.Vector3(); // create once and reuse it!
const prevGamePads = new Map();
var speedFactor = [0.1, 0.1, 0.1, 0.1];
////
//////////////////////////////////////////
init();
animate();
function init() {
container = document.createElement("div");
document.body.appendChild(container);
scene = new THREE.Scene();
scene.background = new THREE.Color(0x808080);
camera = new THREE.PerspectiveCamera(
50,
window.innerWidth / window.innerHeight,
0.1,
500 //MODIFIED FOR LARGER SCENE
);
camera.position.set(0, 1.6, 3);
controls = new OrbitControls(camera, container);
controls.target.set(0, 1.6, 0);
controls.update();
var geometry = new THREE.PlaneBufferGeometry(100, 100);
var material = new THREE.MeshStandardMaterial({
color: 0xeeeeee,
roughness: 1.0,
metalness: 0.0
});
var floor = new THREE.Mesh(geometry, material);
floor.rotation.x = -Math.PI / 2;
floor.receiveShadow = true;
scene.add(floor);
scene.add(new THREE.HemisphereLight(0x808080, 0x606060));
var light = new THREE.DirectionalLight(0xffffff);
light.position.set(0, 200, 0); // MODIFIED SIZE OF SCENE AND SHADOW
light.castShadow = true;
light.shadow.camera.top = 200; // MODIFIED FOR LARGER SCENE
light.shadow.camera.bottom = -200; // MODIFIED FOR LARGER SCENE
light.shadow.camera.right = 200; // MODIFIED FOR LARGER SCENE
light.shadow.camera.left = -200; // MODIFIED FOR LARGER SCENE
light.shadow.mapSize.set(4096, 4096);
scene.add(light);
group = new THREE.Group();
scene.add(group);
var geometries = [
new THREE.BoxBufferGeometry(0.2, 0.2, 0.2),
new THREE.ConeBufferGeometry(0.2, 0.2, 64),
new THREE.CylinderBufferGeometry(0.2, 0.2, 0.2, 64),
new THREE.IcosahedronBufferGeometry(0.2, 3),
new THREE.TorusBufferGeometry(0.2, 0.04, 64, 32)
];
for (var i = 0; i < 100; i++) {
var geometry = geometries[Math.floor(Math.random() * geometries.length)];
var material = new THREE.MeshStandardMaterial({
color: Math.random() * 0xffffff,
roughness: 0.7,
side: THREE.DoubleSide, // MODIFIED TO DoubleSide
metalness: 0.0
});
var object = new THREE.Mesh(geometry, material);
object.position.x = Math.random() * 200 - 100; // MODIFIED FOR LARGER SCENE
object.position.y = Math.random() * 100; // MODIFIED FOR LARGER SCENE
object.position.z = Math.random() * 200 - 100; // MODIFIED FOR LARGER SCENE
object.rotation.x = Math.random() * 2 * Math.PI;
object.rotation.y = Math.random() * 2 * Math.PI;
object.rotation.z = Math.random() * 2 * Math.PI;
object.scale.setScalar(Math.random() * 20 + 0.5); // MODIFIED FOR LARGER SCENE
object.castShadow = true;
object.receiveShadow = true;
group.add(object);
}
// renderer
renderer = new THREE.WebGLRenderer({ antialias: true });
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.outputEncoding = THREE.sRGBEncoding;
renderer.shadowMap.enabled = true;
renderer.xr.enabled = true;
//the following increases the resolution on Quest
renderer.xr.setFramebufferScaleFactor(2.0);
container.appendChild(renderer.domElement);
document.body.appendChild(VRButton.createButton(renderer));
// controllers
controller1 = renderer.xr.getController(0);
controller1.name="left"; ////MODIFIED, added .name="left"
controller1.addEventListener("selectstart", onSelectStart);
controller1.addEventListener("selectend", onSelectEnd);
scene.add(controller1);
controller2 = renderer.xr.getController(1);
controller2.name="right"; ////MODIFIED added .name="right"
controller2.addEventListener("selectstart", onSelectStart);
controller2.addEventListener("selectend", onSelectEnd);
scene.add(controller2);
var controllerModelFactory = new XRControllerModelFactory();
controllerGrip1 = renderer.xr.getControllerGrip(0);
controllerGrip1.add(
controllerModelFactory.createControllerModel(controllerGrip1)
);
scene.add(controllerGrip1);
controllerGrip2 = renderer.xr.getControllerGrip(1);
controllerGrip2.add(
controllerModelFactory.createControllerModel(controllerGrip2)
);
scene.add(controllerGrip2);
//Raycaster Geometry
var geometry = new THREE.BufferGeometry().setFromPoints([
new THREE.Vector3(0, 0, 0),
new THREE.Vector3(0, 0, -1)
]);
var line = new THREE.Line(geometry);
line.name = "line";
line.scale.z = 50; //MODIFIED FOR LARGER SCENE
controller1.add(line.clone());
controller2.add(line.clone());
raycaster = new THREE.Raycaster();
////////////////////////////////////////
//// MODIFICATIONS FROM THREEJS EXAMPLE
//// create group named 'dolly' and add camera and controllers to it
//// will move dolly to move camera and controllers in webXR
dolly = new THREE.Group();
dolly.position.set(0, 0, 0);
dolly.name = "dolly";
scene.add(dolly);
dolly.add(camera);
dolly.add(controller1);
dolly.add(controller2);
dolly.add(controllerGrip1);
dolly.add(controllerGrip2);
////
///////////////////////////////////
window.addEventListener("resize", onWindowResize, false);
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function onSelectStart(event) {
var controller = event.target;
var intersections = getIntersections(controller);
if (intersections.length > 0) {
var intersection = intersections[0];
var object = intersection.object;
object.material.emissive.b = 1;
controller.attach(object);
controller.userData.selected = object;
}
}
function onSelectEnd(event) {
var controller = event.target;
if (controller.userData.selected !== undefined) {
var object = controller.userData.selected;
object.material.emissive.b = 0;
group.attach(object);
controller.userData.selected = undefined;
}
}
function getIntersections(controller) {
tempMatrix.identity().extractRotation(controller.matrixWorld);
raycaster.ray.origin.setFromMatrixPosition(controller.matrixWorld);
raycaster.ray.direction.set(0, 0, -1).applyMatrix4(tempMatrix);
return raycaster.intersectObjects(group.children);
}
function intersectObjects(controller) {
// Do not highlight when already selected
if (controller.userData.selected !== undefined) return;
var line = controller.getObjectByName("line");
var intersections = getIntersections(controller);
if (intersections.length > 0) {
var intersection = intersections[0];
////////////////////////////////////////
//// MODIFICATIONS FROM THREEJS EXAMPLE
//// check if in webXR session
//// if so, provide haptic feedback to the controller that raycasted onto object
//// (only if haptic actuator is available)
const session = renderer.xr.getSession();
if (session) { //only if we are in a webXR session
for (const sourceXR of session.inputSources) {
if (!sourceXR.gamepad) continue;
if (
sourceXR &&
sourceXR.gamepad &&
sourceXR.gamepad.hapticActuators &&
sourceXR.gamepad.hapticActuators[0] &&
sourceXR.handedness == controller.name
) {
var didPulse = sourceXR.gamepad.hapticActuators[0].pulse(0.8, 100);
}
}
}
////
////////////////////////////////
var object = intersection.object;
object.material.emissive.r = 1;
intersected.push(object);
line.scale.z = intersection.distance;
} else {
line.scale.z = 50; //MODIFIED AS OUR SCENE IS LARGER
}
}
function cleanIntersected() {
while (intersected.length) {
var object = intersected.pop();
object.material.emissive.r = 0;
}
}
function animate() {
renderer.setAnimationLoop(render);
}
function render() {
cleanIntersected();
intersectObjects(controller1);
intersectObjects(controller2);
////////////////////////////////////////
//// MODIFICATIONS FROM THREEJS EXAMPLE
//add gamepad polling for webxr to renderloop
dollyMove();
////
//////////////////////////////////////
renderer.render(scene, camera);
}
////////////////////////////////////////
//// MODIFICATIONS FROM THREEJS EXAMPLE
//// New dollyMove() function
//// this function polls gamepad and keeps track of its state changes to create 'events'
function dollyMove() {
var handedness = "unknown";
//determine if we are in an xr session
const session = renderer.xr.getSession();
let i = 0;
if (session) {
let xrCamera = renderer.xr.getCamera(camera);
xrCamera.getWorldDirection(cameraVector);
//a check to prevent console errors if only one input source
if (isIterable(session.inputSources)) {
for (const source of session.inputSources) {
if (source && source.handedness) {
handedness = source.handedness; //left or right controllers
}
if (!source.gamepad) continue;
const controller = renderer.xr.getController(i++);
const old = prevGamePads.get(source);
const data = {
handedness: handedness,
buttons: source.gamepad.buttons.map((b) => b.value),
axes: source.gamepad.axes.slice(0)
};
if (old) {
data.buttons.forEach((value, i) => {
//handlers for buttons
if (value !== old.buttons[i] || Math.abs(value) > 0.8) {
//check if it is 'all the way pushed'
if (value === 1) {
//console.log("Button" + i + "Down");
if (data.handedness == "left") {
//console.log("Left Paddle Down");
if (i == 1) {
dolly.rotateY(-THREE.Math.degToRad(1));
}
if (i == 3) {
//reset teleport to home position
dolly.position.x = 0;
dolly.position.y = 5;
dolly.position.z = 0;
}
} else {
//console.log("Right Paddle Down");
if (i == 1) {
dolly.rotateY(THREE.Math.degToRad(1));
}
}
} else {
// console.log("Button" + i + "Up");
if (i == 1) {
//use the paddle buttons to rotate
if (data.handedness == "left") {
//console.log("Left Paddle Down");
dolly.rotateY(-THREE.Math.degToRad(Math.abs(value)));
} else {
//console.log("Right Paddle Down");
dolly.rotateY(THREE.Math.degToRad(Math.abs(value)));
}
}
}
}
});
data.axes.forEach((value, i) => {
//handlers for thumbsticks
//if thumbstick axis has moved beyond the minimum threshold from center, windows mixed reality seems to wander up to about .17 with no input
if (Math.abs(value) > 0.2) {
//set the speedFactor per axis, with acceleration when holding above threshold, up to a max speed
speedFactor[i] > 1 ? (speedFactor[i] = 1) : (speedFactor[i] *= 1.001);
console.log(value, speedFactor[i], i);
if (i == 2) {
//left and right axis on thumbsticks
if (data.handedness == "left") {
// (data.axes[2] > 0) ? console.log('left on left thumbstick') : console.log('right on left thumbstick')
//move our dolly
//we reverse the vectors 90degrees so we can do straffing side to side movement
dolly.position.x -= cameraVector.z * speedFactor[i] * data.axes[2];
dolly.position.z += cameraVector.x * speedFactor[i] * data.axes[2];
//provide haptic feedback if available in browser
if (
source.gamepad.hapticActuators &&
source.gamepad.hapticActuators[0]
) {
var pulseStrength = Math.abs(data.axes[2]) + Math.abs(data.axes[3]);
if (pulseStrength > 0.75) {
pulseStrength = 0.75;
}
var didPulse = source.gamepad.hapticActuators[0].pulse(
pulseStrength,
100
);
}
} else {
// (data.axes[2] > 0) ? console.log('left on right thumbstick') : console.log('right on right thumbstick')
dolly.rotateY(-THREE.Math.degToRad(data.axes[2]));
}
controls.update();
}
if (i == 3) {
//up and down axis on thumbsticks
if (data.handedness == "left") {
// (data.axes[3] > 0) ? console.log('up on left thumbstick') : console.log('down on left thumbstick')
dolly.position.y -= speedFactor[i] * data.axes[3];
//provide haptic feedback if available in browser
if (
source.gamepad.hapticActuators &&
source.gamepad.hapticActuators[0]
) {
var pulseStrength = Math.abs(data.axes[3]);
if (pulseStrength > 0.75) {
pulseStrength = 0.75;
}
var didPulse = source.gamepad.hapticActuators[0].pulse(
pulseStrength,
100
);
}
} else {
// (data.axes[3] > 0) ? console.log('up on right thumbstick') : console.log('down on right thumbstick')
dolly.position.x -= cameraVector.x * speedFactor[i] * data.axes[3];
dolly.position.z -= cameraVector.z * speedFactor[i] * data.axes[3];
//provide haptic feedback if available in browser
if (
source.gamepad.hapticActuators &&
source.gamepad.hapticActuators[0]
) {
var pulseStrength = Math.abs(data.axes[2]) + Math.abs(data.axes[3]);
if (pulseStrength > 0.75) {
pulseStrength = 0.75;
}
var didPulse = source.gamepad.hapticActuators[0].pulse(
pulseStrength,
100
);
}
}
controls.update();
}
} else {
//axis below threshold - reset the speedFactor if it is greater than zero or 0.025 but below our threshold
if (Math.abs(value) > 0.025) {
speedFactor[i] = 0.025;
}
}
});
}
///store this frames data to compate with in the next frame
prevGamePads.set(source, data);
}
}
}
}
function isIterable(obj) { //function to check if object is iterable
// checks for null and undefined
if (obj == null) {
return false;
}
return typeof obj[Symbol.iterator] === "function";
}
////
/////////////////////////////////////
Related
I am loading a model through GLTF loader. I want to select a mesh on mouse hover. Everything is going cool, but the main problem is when hovering its changing the color all material whose name is same (as per my researches). When i am debugging its INTERSECTED returning single material. I don't know why its happening. After many researches i am asking this question here.
Please see my code below.
<div id="ThreeJS" style="position: absolute; left:0px; top:0px"></div>
var container, scene, camera, renderer, controls, stats;
var clock = new THREE.Clock();
var xyzz;
// custom global variables
var cube;
var projector,
mouse = {
x: 0,
y: 0
},
INTERSECTED;
init();
animate();
// FUNCTIONS
function init() {
// SCENE
scene = new THREE.Scene();
// CAMERA
var SCREEN_WIDTH = window.innerWidth,
SCREEN_HEIGHT = window.innerHeight;
var VIEW_ANGLE = 45,
ASPECT = SCREEN_WIDTH / SCREEN_HEIGHT,
NEAR = 0.1,
FAR = 20000;
camera = new THREE.PerspectiveCamera(VIEW_ANGLE, ASPECT, NEAR, FAR);
scene.add(camera);
camera.position.set(0, 0, 0);
camera.lookAt(scene.position);
renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setSize(SCREEN_WIDTH, SCREEN_HEIGHT);
container = document.getElementById("ThreeJS");
container.appendChild(renderer.domElement);
// EVENTS
// CONTROLS
controls = new THREE.OrbitControls(camera, renderer.domElement);
// STATS
stats = new Stats();
stats.domElement.style.position = "absolute";
stats.domElement.style.bottom = "0px";
stats.domElement.style.zIndex = 100;
container.appendChild(stats.domElement);
// LIGHT
const skyColor = 0xb1e1ff; // light blue
const groundColor = 0xb97a20; // brownish orange
const intensity = 5;
const light = new THREE.HemisphereLight(
skyColor,
groundColor,
intensity
);
scene.add(light);
scene.background = new THREE.Color("#fff");
// GLTF Loader
function frameArea(sizeToFitOnScreen, boxSize, boxCenter, camera) {
const halfSizeToFitOnScreen = sizeToFitOnScreen * 0.5;
const halfFovY = THREE.Math.degToRad(camera.fov * 0.5);
const distance = halfSizeToFitOnScreen / Math.tan(halfFovY);
// compute a unit vector that points in the direction the camera is now
// in the xz plane from the center of the box
const direction = new THREE.Vector3()
.subVectors(camera.position, boxCenter)
.multiply(new THREE.Vector3(1, 0, 1))
.normalize();
// move the camera to a position distance units way from the center
// in whatever direction the camera was from the center already
camera.position.copy(
direction.multiplyScalar(distance).add(boxCenter)
);
// pick some near and far values for the frustum that
// will contain the box.
camera.near = boxSize / 100;
camera.far = boxSize * 100;
camera.updateProjectionMatrix();
// point the camera to look at the center of the box
// camera.position.set(0, 150, 400);
camera.lookAt(boxCenter.x, boxCenter.y, boxCenter.z);
}
var loader = new THREE.GLTFLoader();
loader.load(
// resource URL
"models/gltf/DamagedHelmet/glTF/50423_ Revit Model.gltf",
// called when the resource is loaded
function(gltf) {
const root = gltf.scene;
scene.add(root);
// console.log(dumpObject(root).join("\n"));
const box = new THREE.Box3().setFromObject(root);
const boxSize = box.getSize(new THREE.Vector3()).length();
const boxCenter = box.getCenter(new THREE.Vector3());
// set the camera to frame the box
frameArea(boxSize * 1, boxSize, boxCenter, camera);
// update the Trackball controls to handle the new size
controls.maxDistance = boxSize * 10;
controls.target.copy(boxCenter);
controls.update();
},
// called while loading is progressing
function(xhr) {
console.log((xhr.loaded / xhr.total) * 100 + "% loaded");
},
// called when loading has errors
function(error) {
debugger;
console.log("An error happened");
}
);
projector = new THREE.Projector();
// when the mouse moves, call the given function
document.addEventListener("mousemove", onDocumentMouseMove, false);
}
function onDocumentMouseMove(event) {
// update the mouse variable
mouse.x = (event.clientX / window.innerWidth) * 2 - 1;
mouse.y = -(event.clientY / window.innerHeight) * 2 + 1;
}
function animate() {
requestAnimationFrame(animate);
render();
update();
}
function update() {
// find intersections
// create a Ray with origin at the mouse position
// and direction into the scene (camera direction)
var vector = new THREE.Vector3(mouse.x, mouse.y, 1);
vector.unproject(camera);
var ray = new THREE.Raycaster(
camera.position,
vector.sub(camera.position).normalize()
);
// create an array containing all objects in the scene with which the ray intersects
var intersects = ray.intersectObjects(scene.children, true);
// INTERSECTED = the object in the scene currently closest to the camera
// and intersected by the Ray projected from the mouse position
// if there is one (or more) intersections
if (intersects.length > 0) {
// if the closest object intersected is not the currently stored intersection object
if (intersects[0].object != INTERSECTED) {
// restore previous intersection object (if it exists) to its original color
if (INTERSECTED) {
INTERSECTED.material.color.setHex(INTERSECTED.currentHex);
}
// store reference to closest object as current intersection object
INTERSECTED = intersects[0].object;
console.log(INTERSECTED);
// store color of closest object (for later restoration)
INTERSECTED.currentHex = INTERSECTED.material.color.getHex();
// set a new color for closest object
INTERSECTED.material.color.setHex(0xffff00);
}
}
// there are no intersections
else {
// restore previous intersection object (if it exists) to its original color
if (INTERSECTED)
INTERSECTED.material.color.setHex(INTERSECTED.currentHex);
// remove previous intersection object reference
// by setting current intersection object to "nothing"
INTERSECTED = null;
}
controls.update();
stats.update();
}
function render() {
renderer.render(scene, camera);
}
function dumpObject(obj, lines = [], isLast = true, prefix = "") {
const localPrefix = isLast ? "└─" : "├─";
lines.push(
`${prefix}${prefix ? localPrefix : ""}${obj.name || "*no-name*"} [${
obj.type
}]`
);
const newPrefix = prefix + (isLast ? " " : "│ ");
const lastNdx = obj.children.length - 1;
obj.children.forEach((child, ndx) => {
const isLast = ndx === lastNdx;
dumpObject(child, lines, isLast, newPrefix);
});
return lines;
}
Please help me out.
I didn't read through all of the code, but I think this might already help:
In your intersection-handler, you are updating the color of the material assigned to the object (INTERSECTED.material.color.setHex(...)). This will cause the problems you describe as identical materials are very likely reused for multiple objects. To prevent that, you could use a different material:
const hightlightMaterial = new MeshStandardMaterial(...);
and instead of just updating the color, replace the material:
INTERSECTED.originalMaterial = INTERSECTED.material;
INTERSECTED.material = highlightMaterial;
Restore the original when "unhighlighting" the object:
INTERSECTED.material = INTERSECTED.originalMaterial;
delete INTERSECTED.originalMaterial;
If you need the highlightMaterial to retain other material-properties from the original, you can do this to copy over all material properties beforehand:
highlightMaterial.copy(INTERSECTED.material);
highlightMaterial.color.copy(highlightColor);
So I am writing a bit of stuff if Three.JS and I seem to have hit a stump with the camera. I'm attempting to attach the camera to an imported model object and it would seem that it IS attaching, however it would seem as if shadows are negated, the distance is far off from the actual field I've created. As well as some other annoying issues like Orbit controls would be inverted and non-functional. Here is my code (with certain things blocked out because I'm hotlinking script files hosted on my server...):
// This is basically everything to setup for a basic THREE.JS field to do our work in
var scene = new THREE.Scene(); // Empty Space
var camera = new THREE.PerspectiveCamera(60, window.innerWidth / window.innerHeight, 0.1, 1000); // Perspective Camera (Args, FOV, Aspect = W/H, Min View Dist, Max View Dist)
//var controls = new THREE.OrbitControls(camera); // We will use this to look around
camera.position.set(0, 2, 5); // Note that depth into positon is mainly the opposite of where you normally want it to be.
camera.rotation.x = -0.3 // This is an attempt to rotate the angle of the camera off of an axis
var renderer = new THREE.WebGLRenderer({antialias: true}); // Our Renderer + Antialiasing
renderer.shadowMap.enabled = true; // This allows shadows to work in our 3D animation
renderer.shadowMap.type = THREE.PCFSoftShadowMap; // This one isn't as blocky as THREE.PCFShadowMap
renderer.setClearColor("#00CCCC"); // Note: same as 0x000000
renderer.setSize(window.innerWidth, window.innerHeight); // Renderer Dimensions
document.getElementById("container").appendChild(renderer.domElement); // Add our renderer creation to our div named "container"
// Lighting (It's not necessary but it looks cool!)
var light = new THREE.PointLight("#FFFFFF", 5, 1000); // Color, intensity, range(lighting will not exceed render distance)
light.castShadow = true;
light.position.set(5, 5, 0); // This will treat the light coming from an angle!
scene.add(light);
light.shadow.mapSize.width = 512;
light.shadow.mapSize.height = 512;
light.shadow.camera.near = 0.5;
light.shadow.camera.far = 500;
// We will make a cube here
var cubeGeo = new THREE.BoxGeometry(1, 1, 1); // This is the shape, width, height and length of our cube. Note BoxGeometry IS the current shape!
var cubeMat = new THREE.MeshStandardMaterial({color: "#FF0000"}); // Create a basic mesh with undefined color, you can also use a singular color using Basic rather than Normal, There is also Lambert and Phong. Lambert is more of a Matte material while Phong is more of a gloss or shine effect.
var cube = new THREE.Mesh(cubeGeo, cubeMat); // Create the object with defined dimensions and colors!
cube.castShadow = true; // This will allow our cube to cast a shadow outward.
cube.recieveShadow = false // This will make our cube not recieve shadows from other objects (Although it isn't needed because it's default, you show make a habit of writing it anyways as some things default to true!)
scene.add(cube); // scene.add(object) is what we will use for almost every object we create in THREE.JS
//cube.add(camera); // This is an attempt to attach the camera to the cube...
// Loader
var ship;
var loader = new THREE.GLTFLoader();
loader.load("http://ipaddress:port/files/models/raven/scene.gltf", function(gltf) {
scene.add(gltf.scene);
ship = gltf.scene;
ship.scale.multiplyScalar(0.005);
ship.rotation.y = Math.PI;
}, undefined, function(error) {
console.error(error);
});
// Lest make a floor to show the shadow!
var floorGeo = new THREE.BoxGeometry(1000, 0.1, 1000);
var floorMat = new THREE.MeshStandardMaterial({color: "#0000FF"});
var floor = new THREE.Mesh(floorGeo, floorMat);
floor.recieveShadow = true; // This will allow the shadow from the cube to portray itself unto it.
floor.position.set(0, -3, 0);
scene.add(floor);
// Now let's create an object on the floor so that we can distance ourself from our starting point.
var buildingGeo = new THREE.BoxGeometry(10, 100, 10);
var buildingMat = new THREE.MeshNormalMaterial();
var building = new THREE.Mesh(buildingGeo, buildingMat);
building.position.z = -100;
scene.add(building);
var rotation = 0;
// Controls
var keyState = {};
window.addEventListener('keydown',function(e){
keyState[e.keyCode || e.which] = true;
},true);
window.addEventListener('keyup',function(e){
keyState[e.keyCode || e.which] = false;
},true);
document.addEventListener("keydown", function(event) {
console.log(event.which);
});
var camAdded = false;
var render = function() {
requestAnimationFrame(render); // This grabs the browsers frame animation function.
if (rotation == 1) {
ship.rotation.x += 0.01; // rotation is treated similarly to how two dimensional objects' location is treated
ship.rotation.y += 0.01; // however it will be based on an axis point plus the width/height and subtract but keep it's indice location!
ship.rotation.z += 0.01;
}
if (keyState[87]) { // Up
ship.rotateX(0.01);
}
if (keyState[83]) { // Down
ship.rotateX(-0.01);
}
if (keyState[65]) { // Left
ship.rotateY(0.03);
}
if (keyState[68]) { // Right
ship.rotateY(-0.03);
}
if (keyState[81]) {
ship.rotateZ(0.1);
}
if (keyState[69]) {
ship.rotateZ(-0.1);
}
if (keyState[82]) { // Reset
for (var i = 0; i < 10; i++) {
if (!ship.rotation.x == 0) {
if (ship.rotation.x > 0) {
ship.rotation.x -= 0.005;
} else if (ship.rotation.x < 0){
ship.rotation.x += 0.005;
}
}
if (!ship.rotation.z == 0) {
if (ship.rotation.z > 0) {
ship.rotation.z -= 0.01;
} else if (ship.rotation.z < 0){
ship.rotation.z += 0.01;
}
}
}
}
ship.translateZ(0.2); // This will translate our ship forward in the direction it's currently facing so that it will look as if it is flyimg.
renderer.render(scene, camera); // This will render the scene after the effects have changed (rotation!)
window.addEventListener('resize', onWindowResize, false);
}
render(); // Finally, we need to loop the animation otherwise our object will not move on it's own!
function onWindowResize() {
var sceneWidth = window.innerWidth - 20;
var sceneHeight = window.innerHeight - 20;
renderer.setSize(sceneWidth, sceneHeight);
camera.aspect = sceneWidth / sceneHeight;
camera.updateProjectionMatrix();
}
<!DOCTYPE htm>
<html>
<head>
<meta charset="utf-8">
<title>Basic Three.JS</title>
</head>
<body style="background-color: #2B2B29; color: #FFFFFF; text-align: center;">
<div id="container"></div>
<script>
window.onload = function() {
document.getElementById("container").width = window.innerWidth - 20;
document.getElementById("container").height = window.innerHeight - 20;
}
</script>
<script src="http://ipaddress:port/files/scripts/three.min.js"></script>
<script src="http://ipaddress:port/files/scripts/GLTFLoader.js"></script>
<script src="http://ipaddress:port/files/scripts/OrbitControls.js"></script>
<script src="http://ipaddress:port/files/scripts/basicthree.js"></script> <!-- This is the code below -->
</body>
</html>
Nevermind, I have found a solution - shoddy as it may be...
if (typeof ship != "undefined") {
// Previous code inside of the main three.js loop...
ship.translateZ(0.2); // Move ship
camera.position.set(ship.position.x, ship.position.y, ship.position.z); // Set the camera's position to the ships position
camera.translateZ(10); // Push the camera back a bit so it's not inside the ship
camera.rotation.set(ship.rotation.x, ship.rotation.y, ship.rotation.z); // Set the rotation of the ship to be the exact same as the ship
camera.rotateX(0.3); // Tilt the camera downwards so that it's viewing over the ship
camera.rotateY(Math.PI); // Flip the camera so it's not facing the head of the ship model.
// Note: many bits of code I have are inverted due to the ship's model being backwards (or so it seems)...
}
Based on a previous question I had recently posted:
How to create lines between nearby particles in ThreeJS?
I was able to create individual lines joining nearby particles. However, the lines are being drawn twice because of the logic of the particle system. This is because of how the original 2D particle system worked:
https://awingit.github.io/particles/
This also draws the lines twice. For each pair of particles connecting a line, the line is drawn.
I do not think this is ideal for performance. How would I only draw a line once for each joining points?
P.S. Here is exactly the effect I would like to achieve, but cannot make sense of the code:
http://freelance-html-developer.com/clock/
I would like to understand the fundamental logic.
UPDATE:
I have created a jsfiddle with my progress.
var canvas, canvasDom, ctx, scene, renderer, camera, controls, geocoder, deviceOrientation = false;
var width = 800,
height = 600;
var particleCount = 20;
var pMaterial = new THREE.PointsMaterial({
color: 0x000000,
size: 0.5,
blending: THREE.AdditiveBlending,
//depthTest: false,
//transparent: true
});
var particles = new THREE.Geometry;
var particleSystem;
var line;
var lines = {};
var lineGroup = new THREE.Group();
var lineMaterial = new THREE.LineBasicMaterial({
color: 0x000000,
linewidth: 1
});
var clock = new THREE.Clock();
var maxDistance = 15;
function init() {
canvasDom = document.getElementById('canvas');
setupStage();
setupRenderer();
setupCamera();
setupControls();
setupLights();
clock.start();
window.addEventListener('resize', onWindowResized, false);
onWindowResized(null);
createParticles();
scene.add(lineGroup);
animate();
}
function setupStage() {
scene = new THREE.Scene();
}
function setupRenderer() {
renderer = new THREE.WebGLRenderer({
canvas: canvasDom,
logarithmicDepthBuffer: true
});
renderer.setSize(width, height);
renderer.setClearColor(0xfff6e6);
}
function setupCamera() {
camera = new THREE.PerspectiveCamera(70, width / height, 1, 10000);
camera.position.set(0, 0, -60);
}
function setupControls() {
if (deviceOrientation) {
controls = new THREE.DeviceOrientationControls(camera);
controls.connect();
} else {
controls = new THREE.OrbitControls(camera, renderer.domElement);
controls.target = new THREE.Vector3(0, 0, 0);
}
}
function setupLights() {
var light1 = new THREE.AmbientLight(0xffffff, 0.5); // soft white light
var light2 = new THREE.PointLight(0xffffff, 1, 0);
light2.position.set(100, 200, 100);
scene.add(light1);
scene.add(light2);
}
function animate() {
requestAnimationFrame(animate);
controls.update();
animateParticles();
updateLines();
render();
}
function render() {
renderer.render(scene, camera);
}
function onWindowResized(event) {
width = window.innerWidth;
height = window.innerHeight;
camera.aspect = width / height;
camera.updateProjectionMatrix();
renderer.setSize(width, height);
}
function createParticles() {
for (var i = 0; i < particleCount; i++) {
var pX = Math.random() * 50 - 25,
pY = Math.random() * 50 - 25,
pZ = Math.random() * 50 - 25,
particle = new THREE.Vector3(pX, pY, pZ);
particle.diff = Math.random() + 0.2;
particle.default = new THREE.Vector3(pX, pY, pZ);
particle.offset = new THREE.Vector3(0, 0, 0);
particle.velocity = {};
particle.velocity.y = particle.diff * 0.5;
particle.nodes = [];
particles.vertices.push(particle);
}
particleSystem = new THREE.Points(particles, pMaterial);
particleSystem.position.y = 0;
scene.add(particleSystem);
}
function animateParticles() {
var pCount = particleCount;
while (pCount--) {
var particle = particles.vertices[pCount];
var move = Math.sin(clock.getElapsedTime() * (1 * particle.diff)) / 4;
particle.offset.y += move * particle.velocity.y;
particle.y = particle.default.y + particle.offset.y;
detectCloseByPoints(particle);
}
particles.verticesNeedUpdate = true;
particleSystem.rotation.y += 0.01;
lineGroup.rotation.y += 0.01;
}
function updateLines() {
for (var _lineKey in lines) {
if (!lines.hasOwnProperty(_lineKey)) {
continue;
}
lines[_lineKey].geometry.verticesNeedUpdate = true;
}
}
function detectCloseByPoints(p) {
var _pCount = particleCount;
while (_pCount--) {
var _particle = particles.vertices[_pCount];
if (p !== _particle) {
var _distance = p.distanceTo(_particle);
var _connection = checkConnection(p, _particle);
if (_distance < maxDistance) {
if (!_connection) {
createLine(p, _particle);
}
} else if (_connection) {
removeLine(_connection);
}
}
}
}
function checkConnection(p1, p2) {
var _childNode, _parentNode;
_childNode = p1.nodes[particles.vertices.indexOf(p2)] || p2.nodes[particles.vertices.indexOf(p1)];
if (_childNode && _childNode !== undefined) {
_parentNode = (_childNode == p1) ? p2 : p1;
}
if (_parentNode && _parentNode !== undefined) {
return {
parent: _parentNode,
child: _childNode,
lineId: particles.vertices.indexOf(_parentNode) + '-' + particles.vertices.indexOf(_childNode)
};
} else {
return false;
}
}
function removeLine(_connection) {
// Could animate line out
var childIndex = particles.vertices.indexOf(_connection.child);
_connection.parent.nodes.splice(childIndex, 1);
deleteLine(_connection.lineId);
}
function deleteLine(_id) {
lineGroup.remove(lines[_id]);
delete lines[_id];
}
function addLine(points) {
var points = points || [new THREE.Vector3(Math.random() * 10, Math.random() * 10, Math.random() * 10), new THREE.Vector3(0, 0, 0)];
var _lineId = particles.vertices.indexOf(points[0]) + '-' + particles.vertices.indexOf(points[1]);
var lineGeom = new THREE.Geometry();
if (!lines[_lineId]) {
lineGeom.dynamic = true;
lineGeom.vertices.push(points[0]);
lineGeom.vertices.push(points[1]);
var curLine = new THREE.Line(lineGeom, lineMaterial);
curLine.touched = false;
lines[_lineId] = curLine;
lineGroup.add(curLine);
return curLine;
} else {
return false;
}
}
function createLine(p1, p2) {
p1.nodes[particles.vertices.indexOf(p2)] = p2;
addLine([p1, p2]);
}
$(document).ready(function() {
init();
});
I am really close, but I am not sure if its optimized. There seem to be flickering lines, and sometimes a line just stays stuck in place.
So here are my thoughts. I clicked that all I have to do is make the Vector3 points of the lines equal to the relevant particle Vector3 points. I just need to update each lines geometry.verticesNeedUpdate = true;
Also, how I manage the lines is I create a unique ID using the indexes of the 2 points, e.g. lines['8-2'] = line
The problem you're actually trying to solve is that while looping through your list of points, you're doubling the number of successful matches.
Example:
Consider a list of points, [A, B, C, D]. Your looping tests each point against all other points. For this example, A and C are the only points close enough to be considered nearby.
During the first iteration, A vs. all, you find that A and C are nearby, so you add a line. But when you're doing your iteration for C, you also find that A is nearby. This causes the second line, which you want to avoid.
Fixing it:
The solution is simple: Don't re-visit nodes you already checked. This works because the answer of distance from A to C is no different from distance from C to A.
The best way to do this is adjust your indexing for your check loop:
// (Note: This is example code, and won't "just work.")
for(var check = 0, checkLength = nodes.length; check < checkLength; ++check){
for(var against = check + 1, against < checkLength; ++against){
if(nodes[check].distanceTo(nodes[against]) < delta){
buildThatLine(nodes[check], nodes[against]);
}
}
}
In the inner loop, the indexing is set to:
Skip the current node
Skip all nodes before the current node.
This is done by initializing the inner indexing to the outer index + 1.
Caveat:
This particular logic assumes that you discard all your lines for every frame. It's not the most efficient way to achieve the effect, but I'll leave making it more efficient as an exercise for you.
I am using three.js to draw an image on the canvas, collect data from this image (i.e. pixel color) and redraw the image as a collection of particles using the data collected from the image, such as the colors.
I have zero error messages or warnings, just a blank, black canvas.
The code I am using is below:
var xhr = new XMLHttpRequest();
xhr.open("GET", "http://example.com/assets/css/sl.jpg");
xhr.responseType = "blob";
xhr.onload = function()
{
blob = xhr.response;
P.readAsDataURL(blob);
P.onload = function(){
image = document.createElement("img");
image.src = P.result;
setTimeout(function(){
// split the image
addParticles();
}, 100);
}
}
xhr.send();
addLights();
update();
setTimeout(function(){
holdAtOrigin = "next";
},1000)
function addParticles()
{
// draw in the image, and make sure it fits the canvas size :)
var ratio = 1 / Math.max(image.width/500, image.height/500);
var scaledWidth = image.width * ratio;
var scaledHeight = image.height * ratio;
context.drawImage(image,
0,0,image.width,image.height,
(500 - scaledWidth) * .5, (500 - scaledHeight) *.5, scaledWidth, scaledHeight);
// now set up the particle material
var material = new THREE.MeshPhongMaterial( { } );
var geometry = new THREE.Geometry();
var pixels = context.getImageData(0,0,WIDTH,HEIGHT);
var step = DENSITY * 4;
var x = 0, y = 0;
// go through the image pixels
for(x = 0; x < WIDTH * 4; x+= step)
{
for(y = HEIGHT; y >= 0 ; y -= DENSITY)
{
var p = ((y * WIDTH * 4) + x);
// grab the actual data from the
// pixel, ignoring any transparent ones
if(pixels.data[p+3] > 0)
{
var pixelCol = (pixels.data[p] << 16) + (pixels.data[p+1] << 8) + pixels.data[p+2];
var color = new THREE.Color(pixelCol);
var vector = new THREE.Vector3(-300 + x/4, 240 - y, 0);
// push on the particle
geometry.vertices.push(new THREE.Vector3(vector));
geometry.colors.push(color);
}
}
}
// now create a new system
particleSystem = new THREE.Points(geometry, material);
console.log(particleSystem);
particleSystem.sortParticles = true;
// grab a couple of cacheable vals
particles = particleSystem.geometry.vertices;
colors = particleSystem.geometry.colors;
// add some additional vars to the
// particles to ensure we can do physics
// and so on
var ps = particles.length;
while(ps--)
{
var particle = particles[ps];
particle.velocity = new THREE.Vector3();
particle.mass = 5;
particle.origPos = particle.x.clone();
}
// gc and add
pixels = null;
scene.add(particleSystem);
//test render
}
function addLights()
{
// point
pointLight = new THREE.PointLight( 0xFFFFFF );
pointLight.position.x = 300;
pointLight.position.y = 300;
pointLight.position.z = 600;
scene.add( pointLight );
// directional
directionalLight = new THREE.DirectionalLight( 0xFFFFFF );
directionalLight.position.x = -.5;
directionalLight.position.y = -1;
directionalLight.position.z = -.5;
directionalLight.position.normalize();
directionalLight.intensity = .6;
scene.add( directionalLight );
}
function update(){
var ps = particles.length;
while(ps--)
{
var particle = particles[ps];
// if we are holding at the origin
// values, tween the particles back
// to where they should be
if(holdAtOrigin == "start")
{
particle.velocity = new THREE.Vector3();
//particle.position.x += (particle.origPos.x - particle.position.x) * .2;
//particle.position.y += (particle.origPos.y - particle.position.y) * .2;
//particle.position.z += (particle.origPos.z - particle.position.z) * .2;
particle.x.x += (particle.origPos.x - .0000000000000000001) * 2;
particle.x.y += (particle.origPos.y - .0000000000000000001) * 2;
}
else if (holdAtOrigin == "next")
{
particle.velocity = new THREE.Vector3();
particle.x.x += (particle.origPos.x - particle.x.x) * .2;
particle.x.y += (particle.origPos.y - particle.x.y) * .2;
particle.x.z += (particle.origPos.z - particle.x.z) * .2;
}
else{
// get the particles colour and put
// it into an array
var col = colors[ps];
var colArray = [col.r, col.g, col.b];
// go through each component colour
for(var i = 0; i < colArray.length; i++)
{
// only analyse it if actually
// has some of this colour
if(colArray[i] > 0)
{
// get the target based on where it
// is in the array
var target = i == 0 ? redCentre :
i == 1 ? greenCentre :
blueCentre;
// get the distance of the particle to the centre in question
// and add on the resultant acceleration
var dist = particle.position.distanceToSquared(target.position),
force = ((particle.mass * target.mass) / dist) * colArray[i] * AGGRESSION,
acceleration = (new THREE.Vector3())
.sub(target.position,particle.position)
.normalize()
.multiplyScalar(force);
// if we are attracting we add
// the velocity
if(mode == ATTRACT)
{
// note we only need to check the
// squared radius for the collision :)
if(dist > target.boundRadiusSquared) {
particle.velocity.addSelf(acceleration);
}
else if (bounceParticles) {
// bounce, bounce, bounce
particle.velocity.negate();
}
else {
// stop dead
particle.velocity = new THREE.Vector3();
}
}
else {
// push it away
particle.velocity.subSelf(acceleration);
}
particle.position.addSelf(particle.velocity);
}
}
}
}
// set up a request for a render
requestAnimationFrame(update);
render();
}
function render()
{
// only render if we have
// an active image
if(image) {
if(holdAtOrigin=="start")
{
camera.position.z = 900;
}
if(camera.position.z < 200)
{
//do nothing
}
else{
camera.position.z -= 1.7;
};
renderer.render( scene, camera );
}
}
I checked the console log at various intervals and found that the pixel data is being collected appropriately, so I don't know what is wrong.
Is it the material? When I used a normal (light-independent) material the code worked as expected, I could see my particles.
But I wanted it to be affected by lights, so I changed it to var material = new THREE.MeshPhongMaterial( { } ); without any arguments.
Is this my problem or is it elsewhere in the code?
Thank you!
This may also be pertinent: How to get the absolute position of a vertex in three.js?
Because particle.x.x or particle.x.y doesn't look right to me, even though I wrote that code based on logged object contents.
EDIT: I changed the Phong line to THREE.PointsMaterial and amped up the potency of the light, but still a blank, black canvas.
EDIT 2: So I think it may be a problem with the particle coordinates being misconstrued? When I inspect using console.log(particleSystem); I get the following:
Did I used to be that the x,y,z were wrapped in a position property that newer versions of three.js have removed?
For example I've found example code like:
particle.origPos = particle.position.clone();
But I don't see a position property? How would I clone just the x,y and z bits or should I clone the whole vertex? Sorry if this is confusing or irrelevant just trying to chase down why I have a blank canvas.
EDIT 3: I've removed the update function's position alterations but I still get a weird console log for the particle-system even when all I am doing is cloning said particle using particle.origPos = particle.clone();
Basically I have and x,y and z property but the x property is an object with a subsequent x,y and z. Why is this and how do I fix?
An example of what I'm trying to achieve: https://workshop.chromeexperiments.com/examples/guiVR/#1--Basic-Usage
How could I get the Google Cardboard crosshair, (gaze)pointer, reticle, whatever you want to call it effect in three.js? I would like to make a dot, as crosshair, in the center of the screen in my scene. Like that I would like to use a raycaster to identify what I'm looking at in VR. Which way would be best to go here?
Do I fake an X and Y position of my mouse? Because I found other people have answered how to cover this by adding an event listener to the mousemove event. But this works on desktop, and I want to bring this to mobile.
Here are the main parts needed for building your own gaze cursor:
An object that serves as the indicator (cursor) for user feedback
An array of objects that you want the cursor to interact with
A loop to iterate over the array of interactive objects to test if the cursor is pointing at them
Here's an example of how this can be implemented.
Gaze cursor indicator
Using a ring here so it can be animated, as you typically want to give the user some feedback that the cursor is about to select something, instead of instantly triggering the interaction.
const cursor = new THREE.Mesh(
new THREE.RingBufferGeometry(0.1, 0.15),
new THREE.MeshBasicMaterial({ color: "white" })
);
Interactive Objects
Keep track of the objects you want to make interactive, and the actions they should execute when they are looked at.
const selectable = [];
const cube = new THREE.Mesh(
new THREE.BoxBufferGeometry(1, 1, 1),
new THREE.MeshNormalMaterial()
);
selectable.push({
object: cube,
action() {
console.log("Cube selected");
},
});
Checking Interactive Objects
Check for interactions on every frame and execute the action.
const raycaster = new THREE.Raycaster();
(function animate() {
for (let i = 0, length = selectable.length; i < length; i++) {
const camPosition = camera.position.clone();
const objectPosition = selectable[i].object.position.clone();
raycaster.set(camPosition, camera.getWorldDirection(objectPosition));
const intersects = raycaster.intersectObject(selectable[i].object);
const selected = intersects.length > 0;
// Visual feedback to inform the user they have selected an object
cursor.material.color.set(selected ? "crimson" : "white");
// Execute object action once
if (selected && !selectable[i].selected) {
selectable[i].action();
}
selectable[i].selected = selected;
}
})();
Here's a demo of this in action:
* {
box-sizing: border-box;
margin: 0;
padding: 0;
}
canvas {
display: block;
}
<script type="module">
import * as THREE from "https://cdn.jsdelivr.net/npm/three#0.121.1/build/three.module.js";
import { OrbitControls } from "https://cdn.jsdelivr.net/npm/three#0.121.1/examples/jsm/controls/OrbitControls.js";
const renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
const cameraMin = 0.0001;
const aspect = window.innerWidth / window.innerHeight;
const camera = new THREE.PerspectiveCamera(75, aspect, cameraMin, 1000);
const controls = new OrbitControls(camera, renderer.domElement);
const scene = new THREE.Scene();
camera.position.z = 5;
scene.add(camera);
const cube = new THREE.Mesh(
new THREE.BoxBufferGeometry(),
new THREE.MeshNormalMaterial()
);
cube.position.x = 1;
cube.position.y = 0.5;
scene.add(cube);
const cursorSize = 1;
const cursorThickness = 1.5;
const cursorGeometry = new THREE.RingBufferGeometry(
cursorSize * cameraMin,
cursorSize * cameraMin * cursorThickness,
32,
0,
Math.PI * 0.5,
Math.PI * 2
);
const cursorMaterial = new THREE.MeshBasicMaterial({ color: "white" });
const cursor = new THREE.Mesh(cursorGeometry, cursorMaterial);
cursor.position.z = -cameraMin * 50;
camera.add(cursor);
const selectable = [
{
selected: false,
object: cube,
action() {
console.log("Cube selected");
},
}
];
const raycaster = new THREE.Raycaster();
let firstRun = true;
(function animate() {
requestAnimationFrame(animate);
renderer.setSize(window.innerWidth, window.innerHeight);
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
controls.update();
if (!firstRun) {
for (let i = 0, length = selectable.length; i < length; i++) {
const camPosition = camera.position.clone();
const objectPosition = selectable[i].object.position.clone();
raycaster.set(camPosition, camera.getWorldDirection(objectPosition));
const intersects = raycaster.intersectObject(selectable[i].object);
const selected = intersects.length > 0;
cursor.material.color.set(selected ? "crimson" : "white");
if (selected && !selectable[i].selected) {
selectable[i].action();
}
selectable[i].selected = selected;
}
}
renderer.render(scene, camera);
firstRun = false;
})();
</script>
This library works like a charm - https://github.com/skezo/Reticulum