Is there a reason why server side usage of three.js 's collision detection should be different from the client side usage? We are using the same scene with the same setup client and server side.
The thing which we are trying todo is determine on the server side if there is collision, with the world. To make this simple we only use 2 boxes for our world. The code used is taken from Lee Stemkoski collision detection example (for which we thank him - it is excellent and clear).
The client side code runs smooth and without trouble, but the serverside code, which is initiated exactly the same way does not detect collisions.
In our demo the player uses his arrows to move. this movement is sent to the server, which has exactly the same scene as the client. Then the transformations are applied (rotations, position changes etc) and then these new position are sent back. The server and client are in sync up to here. However the client detects the hits with our objects in the world (2 boxes) and the server does not.
Clientside:
socket.on("update", function(data){
var delta = clock.getDelta(); // seconds.
var moveDistance = 200 * delta; // 200 pixels per second
var rotateAngle = Math.PI / 2 * delta; // pi/2 radians (90 degrees) per second
if( data.type == "rot" ){
MovingCube.rotation.x = data.x;
MovingCube.rotation.y = data.y;
MovingCube.rotation.z = data.z;
}
if( data.type == "pos" ){
MovingCube.position.x = data.x;
MovingCube.position.y = data.y;
MovingCube.position.z = data.z;
}
var originPoint = MovingCube.position.clone();
for (var vertexIndex = 0; vertexIndex < MovingCube.geometry.vertices.length; vertexIndex++){
var localVertex = MovingCube.geometry.vertices[vertexIndex].clone();
var globalVertex = localVertex.applyMatrix4( MovingCube.matrix );
var directionVector = globalVertex.sub( MovingCube.position );
var ray = new THREE.Raycaster( originPoint, directionVector.clone().normalize() );
var collisionResults = ray.intersectObjects( collidableMeshList );
if ( collisionResults.length > 0 && collisionResults[0].distance < directionVector.length() )
console.log(" Hit ");
}
})
serverside code
socket.on("update", function(data){
console.log("updating location");
var delta = 0.1 ;//clock.getDelta(); // seconds.
var moveDistance = 200 * delta; // 200 pixels per second
var rotateAngle = Math.PI / 2 * delta; // pi/2 radians (90 degrees) per second
if( data == "A" ){
MovingCube.rotation.y += rotateAngle;
socket.emit("update",{"type":"rot","x":MovingCube.rotation.x,"y":MovingCube.rotation.y,"z":MovingCube.rotation.z});
}
if( data == "D" ){
MovingCube.rotation.y -= rotateAngle;
socket.emit("update",{"type":"rot","x":MovingCube.rotation.x,"y":MovingCube.rotation.y,"z":MovingCube.rotation.z});
}
if ( data == "left" ){
MovingCube.position.x -= moveDistance;
socket.emit("update",{"type":"pos","x":MovingCube.position.x,"y":MovingCube.position.y,"z":MovingCube.position.z});
}
if ( data == "right" ){
MovingCube.position.x += moveDistance;
socket.emit("update",{"type":"pos","x":MovingCube.position.x,"y":MovingCube.position.y,"z":MovingCube.position.z});
}
if ( data == "up" ){
MovingCube.position.z -= moveDistance;
socket.emit("update",{"type":"pos","x":MovingCube.position.x,"y":MovingCube.position.y,"z":MovingCube.position.z});
}
if ( data == "down" ){
MovingCube.position.z += moveDistance;
socket.emit("update",{"type":"pos","x":MovingCube.position.x,"y":MovingCube.position.y,"z":MovingCube.position.z});
}
var originPoint = MovingCube.position.clone();
for (var vertexIndex = 0; vertexIndex < MovingCube.geometry.vertices.length; vertexIndex++){
var localVertex = MovingCube.geometry.vertices[vertexIndex].clone();
var globalVertex = localVertex.applyMatrix4( MovingCube.matrix );
var directionVector = globalVertex.sub( MovingCube.position );
var ray = new THREE.Raycaster( originPoint, directionVector.clone().normalize() );
var collisionResults = ray.intersectObjects( collidableMeshList );
if ( collisionResults.length > 0 && collisionResults[0].distance < directionVector.length() )
console.log(" Hit ");
}
})
Any help would be great. This has been eating my time for 2 weeks now, there is no error message and i cannot figure out what it is that is going wrong.
Floating point calculations can produce different results on different machines, let me try to find a good article for you.
Here you go, Floating point determinism
Hope it helps
Actually the real problem here is that your server side code probably does not call the render loop from threejs, (which would break of course)
However, the render loop does some additional work for you, it calls the method updateMatrixWorld() on each object -
So serverside, just before doing your raytrace (which uses the world matrix and not the actual position) - just be sure to call
your_objects_you_want_to_raytrace.updateMatrixWorld();
before you do the actual raytrace.
in your case, MovingCube.updateMatrixWorld();
Related
(question rewritten integrating bits of information from answers, plus making it more concise.)
I use analyser=audioContext.createAnalyser() in order to process audio data, and I'm trying to understand the details better.
I choose an fftSize, say 2048, then I create an array buffer of 2048 floats with Float32Array, and then, in an animation loop
(called 60 times per second on most machines, via window.requestAnimationFrame), I do
analyser.getFloatTimeDomainData(buffer);
which will fill my buffer with 2048 floating point sample data points.
When the handler is called the next time, 1/60 second has passed. To calculate how much that is in units of samples,
we have to divide it by the duration of 1 sample, and get (1/60)/(1/44100) = 735.
So the next handler call takes place (on average) 735 samples later.
So there is overlap between subsequent buffers, like this:
We know from the spec (search for 'render quantum') that everything happens in "chunck sizes" which are multiples of 128.
So (in terms of audio processing), one would expect that the next handler call will usually be either 5*128 = 640 samples later,
or else 6*128 = 768 samples later - those being the multiples of 128 closest to 735 samples = (1/60) second.
Calling this amount "Δ-samples", how do I find out what it is (during each handler call), 640 or 768 or something else?
Reliably, like this:
Consider the 'old buffer' (from previous handler call). If you delete "Δ-samples" many samples at the beginning, copy the remainder, and then append "Δ-samples" many new samples, that should be the current buffer. And indeed, I tried that,
and that is the case. It turns out "Δ-samples" often is 384, 512, 896. It is trivial but time consuming to determine
"Δ-samples" in a loop.
I would like to compute "Δ-samples" without performing that loop.
One would think the following would work:
(audioContext.currentTime() - (result of audioContext.currentTime() during last time handler ran))/(duration of 1 sample)
I tried that (see code below where I also "stich together" the various buffers, trying to reconstruct the original buffer),
and - surprise - it works about 99.9% of the time in Chrome, and about 95% of the time in Firefox.
I also tried audioContent.getOutputTimestamp().contextTime, which does not work in Chrome, and works 9?% in Firefox.
Is there any way to find "Δ-samples" (without looking at the buffers), which works reliably?
Second question, the "reconstructed" buffer (all the buffers from callbacks stitched together), and the original sound buffer
are not exactly the same, there is some (small, but noticable, more than usual "rounding error") difference, and that is bigger in Firefox.
Where does that come from? - You know, as I understand the spec, those should be the same.
var soundFile = 'https://mathheadinclouds.github.io/audio/sounds/la.mp3';
var audioContext = null;
var isPlaying = false;
var sourceNode = null;
var analyser = null;
var theBuffer = null;
var reconstructedBuffer = null;
var soundRequest = null;
var loopCounter = -1;
var FFT_SIZE = 2048;
var rafID = null;
var buffers = [];
var timesSamples = [];
var timeSampleDiffs = [];
var leadingWaste = 0;
window.addEventListener('load', function() {
soundRequest = new XMLHttpRequest();
soundRequest.open("GET", soundFile, true);
soundRequest.responseType = "arraybuffer";
//soundRequest.onload = function(evt) {}
soundRequest.send();
var btn = document.createElement('button');
btn.textContent = 'go';
btn.addEventListener('click', function(evt) {
goButtonClick(this, evt)
});
document.body.appendChild(btn);
});
function goButtonClick(elt, evt) {
initAudioContext(togglePlayback);
elt.parentElement.removeChild(elt);
}
function initAudioContext(callback) {
audioContext = new AudioContext();
audioContext.decodeAudioData(soundRequest.response, function(buffer) {
theBuffer = buffer;
callback();
});
}
function createAnalyser() {
analyser = audioContext.createAnalyser();
analyser.fftSize = FFT_SIZE;
}
function startWithSourceNode() {
sourceNode.connect(analyser);
analyser.connect(audioContext.destination);
sourceNode.start(0);
isPlaying = true;
sourceNode.addEventListener('ended', function(evt) {
sourceNode = null;
analyser = null;
isPlaying = false;
loopCounter = -1;
window.cancelAnimationFrame(rafID);
console.log('buffer length', theBuffer.length);
console.log('reconstructedBuffer length', reconstructedBuffer.length);
console.log('audio callback called counter', buffers.length);
console.log('root mean square error', Math.sqrt(checkResult() / theBuffer.length));
console.log('lengths of time between requestAnimationFrame callbacks, measured in audio samples:');
console.log(timeSampleDiffs);
console.log(
timeSampleDiffs.filter(function(val) {
return val === 384
}).length,
timeSampleDiffs.filter(function(val) {
return val === 512
}).length,
timeSampleDiffs.filter(function(val) {
return val === 640
}).length,
timeSampleDiffs.filter(function(val) {
return val === 768
}).length,
timeSampleDiffs.filter(function(val) {
return val === 896
}).length,
'*',
timeSampleDiffs.filter(function(val) {
return val > 896
}).length,
timeSampleDiffs.filter(function(val) {
return val < 384
}).length
);
console.log(
timeSampleDiffs.filter(function(val) {
return val === 384
}).length +
timeSampleDiffs.filter(function(val) {
return val === 512
}).length +
timeSampleDiffs.filter(function(val) {
return val === 640
}).length +
timeSampleDiffs.filter(function(val) {
return val === 768
}).length +
timeSampleDiffs.filter(function(val) {
return val === 896
}).length
)
});
myAudioCallback();
}
function togglePlayback() {
sourceNode = audioContext.createBufferSource();
sourceNode.buffer = theBuffer;
createAnalyser();
startWithSourceNode();
}
function myAudioCallback(time) {
++loopCounter;
if (!buffers[loopCounter]) {
buffers[loopCounter] = new Float32Array(FFT_SIZE);
}
var buf = buffers[loopCounter];
analyser.getFloatTimeDomainData(buf);
var now = audioContext.currentTime;
var nowSamp = Math.round(audioContext.sampleRate * now);
timesSamples[loopCounter] = nowSamp;
var j, sampDiff;
if (loopCounter === 0) {
console.log('start sample: ', nowSamp);
reconstructedBuffer = new Float32Array(theBuffer.length + FFT_SIZE + nowSamp);
leadingWaste = nowSamp;
for (j = 0; j < FFT_SIZE; j++) {
reconstructedBuffer[nowSamp + j] = buf[j];
}
} else {
sampDiff = nowSamp - timesSamples[loopCounter - 1];
timeSampleDiffs.push(sampDiff);
var expectedEqual = FFT_SIZE - sampDiff;
for (j = 0; j < expectedEqual; j++) {
if (reconstructedBuffer[nowSamp + j] !== buf[j]) {
console.error('unexpected error', loopCounter, j);
// debugger;
}
}
for (j = expectedEqual; j < FFT_SIZE; j++) {
reconstructedBuffer[nowSamp + j] = buf[j];
}
//console.log(loopCounter, nowSamp, sampDiff);
}
rafID = window.requestAnimationFrame(myAudioCallback);
}
function checkResult() {
var ch0 = theBuffer.getChannelData(0);
var ch1 = theBuffer.getChannelData(1);
var sum = 0;
var idxDelta = leadingWaste + FFT_SIZE;
for (var i = 0; i < theBuffer.length; i++) {
var samp0 = ch0[i];
var samp1 = ch1[i];
var samp = (samp0 + samp1) / 2;
var check = reconstructedBuffer[i + idxDelta];
var diff = samp - check;
var sqDiff = diff * diff;
sum += sqDiff;
}
return sum;
}
In above snippet, I do the following. I load with XMLHttpRequest a 1 second mp3 audio file from my github.io page (I sing 'la' for 1 second). After it has loaded, a button is shown, saying 'go', and after pressing that, the audio is played back by putting it into a bufferSource node and then doing .start on that. the bufferSource is the fed to our analyser, et cetera
related question
I also have the snippet code on my github.io page - makes reading the console easier.
I think the AnalyserNode is not what you want in this situation. You want to grab the data and keep it synchronized with raf. Use a ScriptProcessorNode or AudioWorkletNode to grab the data. Then you'll get all the data as it comes. No problems with overlap, or missing data or anything.
Note also that the clocks for raf and audio may be different and hence things may drift over time. You'll have to compensate for that yourself if you need to.
Unfortunately there is no way to find out the exact point in time at which the data returned by an AnalyserNode was captured. But you might be on the right track with your current approach.
All the values returned by the AnalyserNode are based on the "current-time-domain-data". This is basically the internal buffer of the AnalyserNode at a certain point in time. Since the Web Audio API has a fixed render quantum of 128 samples I would expect this buffer to evolve in steps of 128 samples as well. But currentTime usually evolves in steps of 128 samples already.
Furthermore the AnalyserNode has a smoothingTimeConstant property. It is responsible for "blurring" the returned values. The default value is 0.8. For your use case you probably want to set this to 0.
EDIT: As Raymond Toy pointed out in the comments the smoothingtimeconstant only has an effect on the frequency data. Since the question is about getFloatTimeDomainData() it will have no effect on the returned values.
I hope this helps but I think it would be easier to get all the samples of your audio signal by using an AudioWorklet. It would definitely be more reliable.
I'm not really following your math, so I can't tell exactly what you had wrong, but you seem to look at this in a too complicated manner.
The fftSize doesn't really matter here, what you want to calculate is how many samples have been passed since the last frame.
To calculate this, you just need to
Measure the time elapsed from last frame.
Divide this time by the time of a single frame.
The time of a single frame, is simply 1 / context.sampleRate.
So really all you need is currentTime - previousTime * ( 1 / sampleRate) and you'll find the index in the last frame where the data starts being repeated in the new one.
And only then, if you want the index in the new frame you'd subtract this index from the fftSize.
Now for why you sometimes have gaps, it's because AudioContext.prototype.currentTime returns the timestamp of the beginning of the next block to be passed to the graph.
The one we want here is AudioContext.prototype.getOuputTimestamp().contextTime which represents the timestamp of now, on the same same base as currentTime (i.e the creation of the context).
(function loop(){requestAnimationFrame(loop);})();
(async()=>{
const ctx = new AudioContext();
const buf = await fetch("https://upload.wikimedia.org/wikipedia/en/d/d3/Beach_Boys_-_Good_Vibrations.ogg").then(r=>r.arrayBuffer());
const aud_buf = await ctx.decodeAudioData(buf);
const source = ctx.createBufferSource();
source.buffer = aud_buf;
source.loop = true;
const analyser = ctx.createAnalyser();
const fftSize = analyser.fftSize = 2048;
source.loop = true;
source.connect( analyser );
source.start(0);
// for debugging we use two different buffers
const arr1 = new Float32Array( fftSize );
const arr2 = new Float32Array( fftSize );
const single_sample_dur = (1 / ctx.sampleRate);
console.log( 'single sample duration (ms)', single_sample_dur * 1000);
onclick = e => {
if( ctx.state === "suspended" ) {
ctx.resume();
return console.log( 'starting context, please try again' );
}
console.log( '-------------' );
requestAnimationFrame( () => {
// first frame
const time1 = ctx.getOutputTimestamp().contextTime;
analyser.getFloatTimeDomainData( arr1 );
requestAnimationFrame( () => {
// second frame
const time2 = ctx.getOutputTimestamp().contextTime;
analyser.getFloatTimeDomainData( arr2 );
const elapsed_time = time2 - time1;
console.log( 'elapsed time between two frame (ms)', elapsed_time * 1000 );
const calculated_index = fftSize - Math.round( elapsed_time / single_sample_dur );
console.log( 'calculated index of new data', calculated_index );
// for debugging we can just search for the first index where the data repeats
const real_time = fftSize - arr1.indexOf( arr2[ 0 ] );
console.log( 'real index', real_time > fftSize ? 0 : real_time );
if( calculated_index !== real_time > fftSize ? 0 : real_time ) {
console.error( 'different' );
}
});
});
};
document.body.classList.add('ready');
})().catch( console.error );
body:not(.ready) pre { display: none; }
<pre>click to record two new frames</pre>
I have to create a animation for waves . I need to control the speed of the waves depends on the availability of the data. Is it possible to speed up the waves. I'm using canvas for the waves.
Thanks in advance
Fiddle:https://jsfiddle.net/Chaitanya_Kumar/6ztr0Lfh/
function animate() {
if (x > data.length - 1) {
return;
}
if (continueAnimation) {
requestAnimationFrame(animate);
}
if (x++ < panAtX) {
var temp = data[x];
var final = constant-(temp);
ctx.fillRect(x, final, 1, 1);
ctx.lineTo(x, final);
ctx.stroke();
} else {
ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.beginPath(); // reset the path
for (var xx = 0; xx < panAtX; xx++) {
var y = data[x - panAtX + xx];
var final = constant - (y);
ctx.fillRect(xx, final, 1, 1);
ctx.lineTo(xx, final);
}
ctx.stroke();
}
}
Sub sampling data
Below is an example of data sampling. It uses linear interpolation to subsample a data source and display that data on a rolling graph display.
Regularly interval data.
The data from your question and fiddle suggest that you have a constant sample rate interval and that you want to vary the display rate of that data. This is what I have done in the demo below.
About the demo
The graph is a real-time display of the data and its speed from left to right is dependent on the rate at which you call the sample function.
displayBuffer.readFrom(dataSource, dataSpeed, samplesPerFrame)
displayBuffer is the object that holds the displayable data
dataSource is the source of data and has a read and seek function and a readPos You seek to a position dataSource.seek(0.01); move ahead 0.01 data samples and then read the data dataSource.read(); and the linear interpolated value is returned.
This allows you to speed up or slow down data streaming from the source data.
The data reader object
//------------------------------------------------------------------------------
// data reader reads from a data source
const dataReader = {
readPos : 0,
seek(amount){ // moves read pos forward or back
if(this.data.length === 0){
this.readPos = 0;
return 0;
}
this.readPos += amount;
this.readPos = this.readPos < 0 ? 0 :this.readPos >= this.data.length ? this.data.length - 1 : this.readPos;
return this.readPos;
},
// this function reads the data at read pos. It is a linear interpolation of the
// data and does nor repressent what the actual data may be at fractional read positions
read(){
var fraction = this.readPos % 1;
var whole = Math.floor(this.readPos);
var v1 = this.data[Math.min(this.data.length-1,whole)];
var v2 = this.data[Math.min(this.data.length-1,whole + 1)];
return (v2 - v1) * fraction + v1;
},
}
Timestamped data source.
The demo can be adapted by adding to the dataReader.
If your data sample rate is irregular than you will need to add a timestamp for each sample. You then add a timeSeek function that is similare to seek but uses the slope between time samples to calculate the read position for a given time. It will require sampling of each sample from the current sampled time to the next (in the seek direction) making CPU cycles needed to seek indeterminant.
The following is an example seekTime that finds the readPos (from above dataReader object) for time shifted forward by the timeShift argument. the object's readTime and readPos properties are updated and the next read() call will return the data at dataSource.readTime.
readTime : 0, // current seeked time
seekTime(timeShift){ // Example is forward seek only
if(this.timeStamps.length === 0){
this.readPos = 0;
return 0;
}
this.readTime += timeShift; // set new seeked time
var readPos = Math.floor(this.readPos);
// move read pos forward until at correct sample
while(this.timeStamps[readPos] > this.readTime &&
readPos++ < this.timeStamps.length);
// Warning you could be past end of buffer
// you will need to check and set seek time to the last
// timestamp value and exit. Code below the following line
// will crash if you dont vet here.
//if(readPos === this.timeStamps.length)
// now readPos points to the first timeStamp less than the needed
// time position. The next read position should be a time ahead of the
// needed time
var t1 = this.timeStamps[readPos]; // time befor seekTime
var t2 = this.timeStamps[readPos+1]; // time after seekTime
// warning divide by zero if data bad
var fraction = (this.readTime-t1)/(t2-t1)); // get the sub sample fractional location for required time.
this.readPos = readPos + fraction;
return this.readPos;
},
Warning I have omitted all safety checks. You should check for buffer end, bad time shift values. If time stamped data has bad samples you will get a divide by zero that will make the dataReader return only NaN from that point on and throw for any reads. So vet for safety.
Note For the above time stamped function to work you will need to ensure that for each data sample there is a corresponding timeStamp. If there is not a one to one matching time stamp of each sample the above code will not work.
Changes to the dataDisplay are simple. Just change the seek call in the function
dataDisplay.readFrom(dataSource,speed,samples) to dataSource.seekTime(speed / samples) the speed now represents time rather than samples. (or I just overwrite the seek() function with seekTime() if I have time stamps) this allows the dataDisplay object to handle both timeStamped and regular interval data as is.
Demo
The example samples random data and displays it at variable speed and sampling rates. Use left right to set display speed. The framerate is 60fps thereabouts but you can make the speed variable scaled to the time between frames.
var ctx = canvas.getContext("2d");
window.focus();
//==============================================================================
// the current data read speed
var dataSpeed = 1;
var samplesPerFrame = 1;
requestAnimationFrame(mainLoop); // start animation when code has been parsed and executed
//------------------------------------------------------------------------------
// data reader reads from a data source
const dataReader = {
readPos : 0,
seek(amount){ // moves read pos forward or back
if(this.data.length === 0){
this.readPos = 0;
return 0;
}
this.readPos += amount;
this.readPos = this.readPos < 0 ? 0 :this.readPos >= this.data.length ? this.data.length - 1 : this.readPos;
return this.readPos;
},
// this function reads the data at read pos. It is a linear interpolation of the
// data and does nor repressent what the actual data may be at fractional read positions
read(){
var fraction = this.readPos % 1;
var whole = Math.floor(this.readPos);
var v1 = this.data[Math.min(this.data.length-1,whole)];
var v2 = this.data[Math.min(this.data.length-1,whole + 1)];
return (v2 - v1) * fraction + v1;
},
}
//------------------------------------------------------------------------------
// Create a data source and add a dataReader to it
const dataSource = Object.assign({
data : [],
},dataReader
);
// fill the data source with random data
for(let i = 0; i < 100000; i++ ){
// because random data looks the same if sampled every 1000 or 1 unit I have added
// two waves to the data that will show up when sampling at high rates
var wave = Math.sin(i / 10000) * 0.5;
wave += Math.sin(i / 1000) * 0.5;
// high frequency data shift
var smallWave = Math.sin(i / 100) * (canvas.height / 5);
// get a gaussian distributed random value
dataSource.data[i] = Math.floor(smallWave + ((wave + Math.random()+Math.random()+Math.random()+Math.random()+Math.random()) / 5) * canvas.height);
}
//------------------------------------------------------------------------------
// Data displayer used to display a data source
const dataDisplay = {
writePos : 0,
width : 0,
color : "black",
lineWidth : 1,
// this function sets the display width which limits the data buffer
// when it is called all buffers are reset
setDisplayWidth(width){
this.data.length = 0;
this.width = width;
this.writePos = 0;
if(this.lastRead === undefined){
this.lastRead = {};
}
this.lastRead.mean = 0;
this.lastRead.max = 0;
this.lastRead.min = 0;
},
// this draws the buffered data scrolling from left to right
draw(){
var data = this.data; // to save my self from writing this a zillion times
const ch = canvas.height / 2;
if(data.length > 0){ // only if there is something to draw
ctx.beginPath();
ctx.lineWidth = this.lineWidth;
ctx.strokeStyle = this.color;
ctx.lineJoin = "round";
if(data.length < this.width){ // when buffer is first filling draw from start
ctx.moveTo(0, data[0])
for(var i = 1; i < data.length; i++){
ctx.lineTo(i, data[i])
}
}else{ // buffer is full and write position is chasing the tail end
ctx.moveTo(0, data[this.writePos])
for(var i = 1; i < data.length; i++){
ctx.lineTo(i, data[(this.writePos + i) % data.length]);
}
}
ctx.stroke();
}
},
// this reads data from a data source (that has dataReader functionality)
// Speed is in data units,
// samples is number of samples per buffer write.
// samples is only usefull if speed > 1 and lets you see the
// mean, min, and max of the data over the speed unit
// If speed < 1 and sample > 1 the data is just a linear interpolation
// so the lastRead statistics are meaningless (sort of)
readFrom(dataSource,speed,samples){ // samples must be a whole positive number
samples = Math.floor(samples);
var value = 0;
var dataRead;
var min;
var max;
for(var i = 0; i < samples; i ++){ // read samples
dataSource.seek(speed / samples); // seek to next sample
dataRead = dataSource.read(); // read the sample
if(i === 0){
min = dataRead;
max = dataRead;
}else{
min = Math.min(dataRead,min);
max = Math.min(dataRead,max);
}
value += dataRead;
}
// write the samples data and statistics.
this.lastRead.min = min;
this.lastRead.max = max;
this.lastRead.delta = value / samples - this.lastRead.mean;
this.lastRead.mean = value / samples;
this.data[this.writePos] = value / samples;
this.writePos += 1;
this.writePos %= this.width;
}
}
// display data buffer
var displayBuffer = Object.assign({ // this data is displayed at 1 pixel per frame
data : [], // but data is written into it at a variable speed
},
dataDisplay // add display functionality
);
//------------------------------------------------------------------------------
// for control
const keys = {
ArrowLeft : false,
ArrowRight : false,
ArrowUp : false,
ArrowDown : false,
}
function keyEvent(event){
if(keys[event.code] !== undefined){
event.preventDefault();
keys[event.code] = true;
}
}
addEventListener("keydown",keyEvent);
//------------------------------------------------------------------------------
function mainLoop(time){
ctx.clearRect(0,0,canvas.width,canvas.height);
if(canvas.width !== displayBuffer.width){
displayBuffer.setDisplayWidth(canvas.width);
}
displayBuffer.readFrom(dataSource,dataSpeed,samplesPerFrame);
displayBuffer.draw();
//-----------------------------------------------------------------------------
// rest is display UI and stuff like that
ctx.font = "16px verdana";
ctx.fillStyle = "black";
//var dataValue =displayBuffer.lastRead.mean.toFixed(2);
//var delta = displayBuffer.lastRead.delta.toFixed(4);
var readPos = dataSource.readPos.toFixed(4);
//if(displayBuffer.lastRead.delta > 0){ delta = "+" + delta }
// ctx.fillText("Data : " + dataValue + " ( " +delta +" )" ,4,18);
ctx.setTransform(0.9,0,0,0.89,4,18);
ctx.fillText("Speed : " + dataSpeed.toFixed(3) + ", Sample rate :" +samplesPerFrame + ", Read # "+readPos ,0,0);
ctx.setTransform(0.7,0,0,0.7,4,32);
if(samplesPerFrame === 1){
ctx.fillText("Keyboard speed -left, +right Sample rate +up",0,0);
}else{
ctx.fillText("Keyboard speed -left, +right Sample rate -down, +up",0,0);
}
ctx.setTransform(1,0,0,1,0,0);
if(keys.ArrowLeft){
keys.ArrowLeft = false;
if(dataSpeed > 1){
dataSpeed -= 1;
}else{
dataSpeed *= 1/1.2;
}
}
if(keys.ArrowRight){
keys.ArrowRight = false;
if(dataSpeed >= 1){
dataSpeed += 1;
}else{
dataSpeed *= 1.2;
if(dataSpeed > 1){ dataSpeed = 1 }
}
}
if(keys.ArrowUp){
keys.ArrowUp = false;
samplesPerFrame += 1;
}
if(keys.ArrowDown){
keys.ArrowDown = false;
samplesPerFrame -= 1;
samplesPerFrame = samplesPerFrame < 1 ? 1 : samplesPerFrame;
}
requestAnimationFrame(mainLoop);
}
canvas {
border : 2px black solid;
}
<canvas id=canvas width=512 height=200></canvas>
Reading and displaying data this way is quick and simple. It is easy it add grid markings and data processing to the data source and display data. The demo (regular interval data) can easily handle displaying large data sources while zooming in and out on data. Note that for timeStamped data the above seekTime function is not suitable for large datasets. You will need to index such data for more effective seek times.
I'm writing an A* pathing script for a game set on a 7x7 grid of tiles with the player always in the middle (tile 24). Zeros are added as a visual and it's actually one array, not a 7x7 2D array.
[00,01,02,03,04,05,06]
[07,08,09,10,11,12,13]
[14,15,16,17,18,19,20]
[21,22,23,24,25,26,27]
[28,29,30,31,32,33,34]
[35,36,37,38,39,40,41]
[42,43,44,45,46,47,48]
The game is server-driven so the player uses relative coordinates. What that means is, if the player moves, tile[0] changes. The short version of that is the player will always move from tile 24, which is the center tile. The grid is hard coded in, but if I post it publicly I'll change the code a little; no problem.
The function should take a destination and find a good path from tile 24 to that square but what it actually does it return "undefined".
If I input 24 I want the game to output an array like this
[18,12,6]
Here's the code:
z = 0;
function pathTo(goal){
var createPath = function (goal){
var createNode = function(i){
this.id = i;
this.g = Infinity;
this.f = Infinity;
this.parent = null;
this.open = null;
};
this.nodes = Array(49);
for(i=0;i<this.nodes.length;i++){
this.nodes[i] = new createNode(i);
}
this.start = this.nodes[24];
this.start.g = 0;
this.currentNodeId = 24;
this.goal = this.nodes[goal];
this.bestPath = null;
};//end createPath
var getBestNeighbor = function(nodeId){
z++
if(z>50){throw z}debugger;
console.log(nodeId);
var getG = function(parentG){
//here you can check the map for water, sand, and ruins penalties
/*
default = 1
path = .9
water = 3
*/
return (parentG + 1);
};
var closeNode = function (node){
node.open = false;
};//end closeNode
var getF = function(startId,endId,g){
if(g>9){
throw g;
}
var startX = startId % 7;
var startY = (startId - startX) / 7;
var endX = endId % 7;
var endY = (endId - endX) / 7;
var h = Math.sqrt( Math.pow((startX - endX) , 2 ) + Math.pow(( startY - endY ), 2 ) );
console.log("Start.id:"+startId+"H:"+h+" Start.id.g:"+g);
return (h + g);
};//end getF
var tracePath = function(tracedNode){
path.bestPath = [];
while(tracedNode != path.start){
path.bestPath.unshift(tracedNode.id);
tracedNode = tracedNode.parent;
}
return path.bestPath;
};//end tracePath
var getNeighborNodeId = function(x,y,currentId){return currentId + (y*7) + x;};//end getNeighborNodeId
if(path.bestPath === null){
var neighborNode = {};
var bestNode = {f: Infinity};
if(nodeId == path.goal.id){//may need to pass path
return tracePath(path.nodes[nodeId]);
}else{
for(x=-1;x<=1;x++){
for(y=-1;y<=1;y++){
var nnId = getNeighborNodeId(x,y,nodeId);
if(nnId==24){debugger}
if( ( (x!=0) && (y!=0) ) ||( (nnId>=0) && (nnId<=48))){
var neighborNode = path.nodes[nnId];
if(neighborNode.open === null){ neighborNode.open = true; }
if(neighborNode.open === true ){//don't check closed neighbors
if(typeof neighborNode === "object"){
neighborNode.parent = path.nodes[nodeId]
debugger;
neighborNode.g = getG(neighborNode.parent.g);
neighborNode.f = getF(neighborNode.id , path.goal.id , neighborNode.g);
if( neighborNode.f < bestNode.f){
bestNode = neighborNode;
}//endif
}//endif
}//endif Note: if the node isn't null or true, it's false.
}
}//endfor
}//endfor - Note: Here we should have the best neighbor
if(bestNode.f == Infinity){
closeNode(path.nodes[nodeId]);//need escape for no possible path
return;
}else{
//bestNode.parent = path.nodes[nodeId];
path.currentNodeId = bestNode.id;
getBestNeighbor(bestNode.id);
}//endelse
}//endelse
}//endif
};//end getBestNeighbor
var path = new createPath(goal);
while(path.bestPath === null){
getBestNeighbor(path.currentNodeId);
}//end while
return path.bestPath;
}//end pathTo
console.log(pathTo(41)); //testing with 6
and a JSFiddle link: https://jsfiddle.net/jb4xtf3h/
It's my first time not just slapping globals everywhere, so it may have a scope issue I'm not familiar with.
Most likely my issue is in the getNeighborId function; I don't think I have anything declaring a good node's parent.
The problem is that it goes NW three times instead of NE three times. That probably means I have a mistake in the getBestNeighbor function where I'm reading a -1 as a 1.
Also I don't think I'm escaping the recursive function correctly.
For some reason, when I put in 41 it gets really confused. This either has to do with how I set G and H which are classically used in A* to record distance traveled on this path and the estimated distance remaining. Specifically the G number is wrong because it's taking bad steps for some reason.
Here is the working code. I didn't implement walls or anything but I do show where you would do that. All you need to do is close all the nodes that are walls before you begin pathing and you can assign movement penalties if you want the AI to "know" to avoid water or sand.
I actually can't pin down a single problem but a major one was the way the statement:
if( ( (x!=0) && (y!=0) ) ||( (nnId>=0) && (nnId<=48))){
was changed to:
if( ( !(x==0 && y==0) && ( nnId>=0 && nnId<=48))){
The intent of this line was to prevent searching the tile you are standing on x,y = (0,0) and also to make sure that the neighbor you want to look at is on the grid (7x7 grid has 49 squares numbered 0-48)
What I was trying to say is "IF X & Y ARE BOTH NOT ZERO" apparently that actually makes it the same as an or statement so if either square was 0 it skipped it and tiles that needed that space were having problems since there were several directions that weren't working.
I hope that helps somebody if they need a nice simple pathing script I tried really hard to make the code readable and I'm not the strongest coder in the world but a working A* script in 100 lines that I think is fairly easy to follow. If you are reading this and you're not familiar with A* pathing what you might need to know is
H is your heuristic value it's an estimation of the remaining distance form a tile. In this code it's under the path object path.nodes[array#].h
G is the distance you've moved so far to get to that square path.nodes[array#].g.
F just adds h+g for the total value. This pseudocode on Wikipedia helped me write it.
var z = 0;
function pathTo(goal){
var createPath = function (goal){
var createNode = function(i){
this.id = i;
this.g = Infinity;
this.f = Infinity;
this.parent = null;
this.open = null;
};
this.nodes = Array(49);
for(i=0;i<this.nodes.length;i++){
this.nodes[i] = new createNode(i);
}
this.start = this.nodes[24];
this.start.g = 0;
this.currentNodeId = 24;
this.goal = this.nodes[goal];
this.bestPath = null;
};//end createPath
var path = new createPath(goal);
var getBestNeighbor = function(nodeId){
var getG = function(parentG){
//here you can check the map for water, sand, and ruins penalties
/*
default = 1
path = .9
water = 3
*/
return (parentG + 1);
};
var closeNode = function (node){
node.open = false;
};//end closeNode
var getF = function(startId,endId,g){
var startX = startId % 7;
var startY = (startId - startX) / 7;
var endX = endId % 7;
var endY = (endId - endX) / 7;
var h = Math.sqrt( Math.pow((startX - endX) , 2 ) + Math.pow(( startY - endY ), 2 ) );
return (h + g);
};//end getF
var tracePath = function(tracedNode){
path.bestPath = [];
while(tracedNode != path.start){
path.bestPath.unshift(tracedNode.id);
tracedNode = tracedNode.parent;
}
return path.bestPath;
};//end tracePath
var getNeighborNodeId = function(x,y,currentId){return currentId + (y*7) + x;};//end getNeighborNodeId
debugger;
z++
if(z>50){throw z}
if(path.bestPath === null){
var neighborNode = {};
var bestNode = {f: Infinity};
if(nodeId == path.goal.id){//may need to pass path
return tracePath(path.nodes[nodeId]);
}else{
for(y=-1;y<=1;y++){
for(x=-1;x<=1;x++){
var nnId = getNeighborNodeId(x,y,nodeId);
if( ( !(x==0 && y==0) && ( nnId>=0 && nnId<=48))){
var neighborNode = path.nodes[nnId];
if(path.nodes[nodeId].parent!=neighborNode){
if(neighborNode.open === null){ neighborNode.open = true; }
if(neighborNode.open === true ){//don't check closed neighbors
if(typeof neighborNode === "object"){
neighborNode.parent = path.nodes[nodeId]
neighborNode.g = getG(neighborNode.parent.g);
neighborNode.f = getF(neighborNode.id , path.goal.id , neighborNode.g);
if( neighborNode.f < bestNode.f){
bestNode = neighborNode;
}//endif
}//endif
}
}//endif Note: if the node isn't null or true, it's false.
}
}//endfor
}//endfor - Note: Here we should have the best neighbor
if(bestNode.f >= 50){
closeNode(path.nodes[nodeId]);//need escape for no possible path
return;
}else{
path.currentNodeId = bestNode.id;
getBestNeighbor(bestNode.id);
}//endelse
}//endelse
}//endif
};//end getBestNeighbor
while(path.bestPath === null){
getBestNeighbor(path.currentNodeId);
}//end while
return path.bestPath;
}//end pathTo
myPath = pathTo(41); //testing with 6
console.log("path2:"+myPath);
I'm building an application in three.js, however I'm having real problems with performance. This part of the application is based upon the Voxel Painter example. In my version, the user clicks on a cell to begin placement, drags the cursor to where they wish to end placement, and clicks to end.
function onDocumentMouseMove(event) {
//set up mouse and raycaster
event.preventDefault();
mouse.set((event.clientX / window.innerWidth) * 2 - 1, -(event.clientY / window.innerHeight) * 2 + 1);
raycaster.setFromCamera(mouse, camera);
switch (buildMode) {
case buildModes.CORRIDOR:
scene.add(rollOverFloor);
var intersects = raycaster.intersectObjects(gridObject);
if (intersects.length > 0) {
var intersect = intersects[0];
if (beginPlace == true) {
//store the intersection position
var endPlace = new THREE.Vector3(0, 0, 0);
endPlace.copy(intersect.point).add(intersect.face.normal);
endPlace.divideScalar(step).floor().multiplyScalar(step).addScalar(step / step);
endPlace.set(endPlace.x, 0, endPlace.z);
corridorDrag(endPlace);
}
//if user hasn't begun to place the wall
else {
//show temporary wall on grid
rollOverFloor.position.copy(intersect.point).add(intersect.face.normal);
rollOverFloor.position.divideScalar(step).floor().multiplyScalar(step).addScalar(step / step);
rollOverFloor.position.set(rollOverFloor.position.x, 0, rollOverFloor.position.z);
}
}
break;
}
render();
}
The code above is called when the user moves the mouse (there are many buildmodes in the main application, but I have not included them here). This function simply gets a start and end point, the corridorDrag() function fills in the cells between the start and end points:
function corridorDrag(endPlace) {
deleteFromScene(stateType.CORRIDOR_DRAG);
var startPoint = startPlace;
var endPoint = endPlace;
var zIntersect = new THREE.Vector3(startPoint.x, 0, endPoint.z);
var xIntersect = new THREE.Vector3(endPoint.x, 0, startPoint.z);
var differenceZ = Math.abs(startPlace.z - zIntersect.z);
var differenceX = Math.abs(startPlace.x - xIntersect.x);
var mergedGeometry = new THREE.Geometry();
for (var i = 0; i <= (differenceZ / step); i++) {
for (var j = 0; j <= (differenceX / step); j++) {
var x = startPlace.x;
var y = startPlace.y;
var z = startPlace.z;
if (endPoint.x <= (startPlace.x )) {
if (endPoint.z <= (startPlace.z)) {
x = x - (step * j);
z = z - (step * i);
}
else if (endPoint.z >= (startPlace.z)) {
x = x - (step * j);
z = z + (step * i);
}
} else if (endPoint.x >= (startPlace.x)) {
if (endPoint.z <= (startPlace.z)) {
x = x + (step * j);
z = z - (step * i);
}
else if (endPoint.z >= (startPlace.z)) {
x = x + (step * j);
z = z + (step * i);
}
}
floorGeometry.translate(x, y, z);
mergedGeometry.merge(floorGeometry);
floorGeometry.translate(-x, -y, -z);
}
}
var voxel = new THREE.Mesh(mergedGeometry, tempMaterial);
voxel.state = stateType.CORRIDOR_DRAG;
scene.add(voxel);
tempObjects.push(voxel);
}
Firstly, the deleteFromScene() function removes all current highlighted cells from the scene (see below). The code then (I believe), should create a number of meshes, depending on the start and end points, and add them to the scene.
function deleteFromScene(state) {
tempObjects = [];
var i = scene.children.length;
while (i--) {
if (scene.children[i].state != undefined)
if (scene.children[i].state == state)
scene.children.splice(i, 1);
}
}
As I said, it is very, very slow. It also appears to be adding an obscene amount of vertices to the renderer, as seen in the WebGLRenderer stats window. I have no idea why it's adding so many vertices, but I'm assuming that's why it's rendering so slowly.
The application can be viewed here - the problem can be seen by clicking on one cell, dragging the cursor to the other end of the grid, and observing the time taken to fill in the cells.
Thank you in advance, this really is a last resort.
A few years ago Twitter put out an update. In this update they had just introduced infinite scrolling and on the day of its release the update was crashing users browsers. Twitter engineers did some investigating and found that the crashes were the result of the scroll event firing hundreds of times a second.
Mouse events can fire many MANY times a second and can cause your code to execute too often, which slows down the browser and (in many cases) crashes it. The solution for Twitter (and hopefully you) was simple: Poll your event.
Inside your mousemove event handler check that it has been some number of milliseconds since the last move event.
var lastMove = Date.now();
function onDocumentMouseMove(event) {
if (Date.now() - lastMove < 31) { // 32 frames a second
return;
} else {
lastMove = Date.now();
}
// your code here
}
I hope that helps!
I would like to be able to select a point from my pointCloud. To do that, I found a lot of examples:
Interactive particles example
Interactive raycasting pointcloud example
So I wrote the following code:
function intersectionCheck(event)
{
if(pointClouds != null)
{
event.preventDefault();
var mouse = new THREE.Vector2();
var raycaster = new THREE.Raycaster();
mouse.x = ( event.clientX / window.innerWidth ) * 2 - 1;
mouse.y = - ( event.clientY / window.innerHeight ) * 2 + 1;
raycaster.setFromCamera( mouse, camera );
var intersections = raycaster.intersectObjects( pointClouds );
intersection = ( intersections.length ) > 0 ? intersections[ 0 ] : null;
if(intersection != null)
{
console.log(intersection.point);
sphere.position.copy( intersection.point );
}
}
}
That code was supposed to place a green sphere on the screen only if the user clicked on an entity from the pointcloud.
But it was false, the sphere appeard even if there was no entity, as shows the following screenshot:
It seemed that there was a problem with the size of my entities, because the function returned me a position even if I was far away from any entity.
So I changed the way that the position was selected. I checked if the distanceToRay of the point was smaller than sizeOfMyEntities/2.
if(intersection != null)
{
for(var i = 0; i < intersections.length; i++)
{
var testPoint = intersections[i];
if(material.size/2 > testPoint.distanceToRay)
{
point = intersections[i].point;
console.log(point);
sphere.position.copy(point);
break;
}
}
}
Now it works fine, but I'd like to understand why it was not working before. Why is that verification not done in the intersection process?
And also I would like to know if my second function was ok, or if it's a weird way to do what I want.
Is there a better way to do the same?
ps: I am new with all that kind of stuff, so if I am wrong please explain me :D