I'm using Three.js to procedurally generate a regular N-gon based on a user-provided number of sides. The long-term goal is to use this as the first step in rendering a polyhedral prism.
I'm using the solution discussed here to calculate the vertices of the N-gon.
I'm then using the technique discussed here to generate faces on the N-gon.
My first attempt to produce the necessary Geometry object resulted in the following, which doesn't seem to render anything after being added to a Mesh:
function createGeometry (n, circumradius) {
var geometry = new THREE.Geometry(),
vertices = [],
faces = [],
x;
// Generate the vertices of the n-gon.
for (x = 1; x <= n; x++) {
geometry.vertices.push(new THREE.Vector3(
circumradius * Math.sin((Math.PI / n) + (x * ((2 * Math.PI)/ n))),
circumradius * Math.cos((Math.PI / n) + (x * ((2 * Math.PI)/ n))),
0
));
}
// Generate the faces of the n-gon.
for (x = 0; x < n-2; x++) {
geometry.faces.push(new THREE.Face3(0, x + 1, x + 2));
}
geometry.computeBoundingSphere();
return geometry;
}
After toying with that for too long, I discovered the ShapeGeometry class. This uses the same vertex algorithm as the above example, but this one renders properly after being added to a Mesh:
function createShapeGeometry (n, circumradius) {
var shape = new THREE.Shape(),
vertices = [],
x;
// Calculate the vertices of the n-gon.
for (x = 1; x <= sides; x++) {
vertices.push([
circumradius * Math.sin((Math.PI / n) + (x * ((2 * Math.PI)/ n))),
circumradius * Math.cos((Math.PI / n) + (x * ((2 * Math.PI)/ n)))
]);
}
// Start at the last vertex.
shape.moveTo.apply(shape, vertices[sides - 1]);
// Connect each vertex to the next in sequential order.
for (x = 0; x < n; x++) {
shape.lineTo.apply(shape, vertices[x]);
}
// It's shape and bake... and I helped!
return new THREE.ShapeGeometry(shape);
}
What's wrong with the Geometry example that's resolved with the ShapeGeometry example?
I don't think it's an issue with camera or positioning because replacing the complex vertex calculations with simpler whole numbers produces a polygon without an issue, provided the values make sense.
The reason I'm asking is because, as I mentioned initially, I'd like to eventually use this as the first step in rendering a polyhedron. ShapeGeometry objects can be extruded to give them depth, but even with the options that Three.js makes available, this may not be enough for my needs in the long run as the required polyhedra become more irregular.
Any thoughts?
You can create prisms using THREE.CylinderGeometry; for an n-sided prism, you could use
// radiusAtTop, radiusAtBottom, height, segmentsAroundRadius, segmentsAlongHeight
var nPrism = new THREE.CylinderGeometry( 30, 30, 80, n, 4 );
You can also use CylinderGeometry to create pyramids and frustums; for more examples of built-in shapes, you can check out:
http://stemkoski.github.io/Three.js/Shapes.html
Since you also sound like you may be interested in more general polyhedra, you might also want to check out:
http://stemkoski.github.io/Three.js/Polyhedra.html
which includes models of the Platonic Solids, Archimedean Solids, Prisms, Antiprisms, and Johnson Solids; however, in that program the polyhedra are "thick" from using spheres for vertices and cylinders for edges.
Hope this helps!
Your function works as expected.
Look at this fiddle http://jsfiddle.net/Elephanter/mUah5/
there is a modified threejs fiddle with your createGeometry function
So you have problem in another place, not at createGeometry function
Related
Forgive me for the long code example, but I couldn't figure out how to properly explain my question with any less code:
let c = document.querySelector("canvas");
let ctx = c.getContext("2d");
class BezierCurve {
constructor(x1, y1, cpX, cpY, x2, y2) {
this.f = 0;
this.x1 = x1;
this.y1 = y1;
this.cpX = cpX;
this.cpY = cpY;
this.x2 = x2;
this.y2 = y2;
this.pointCache = this.calcPoints();
}
calcX(t) { return (1 - t) * (1 - t) * this.x1 + 2 * (1 - t) * t * this.cpX + t * t * this.x2; }
calcY(t) { return (1 - t) * (1 - t) * this.y1 + 2 * (1 - t) * t * this.cpY + t * t * this.y2; }
calcPoints() {
const step = 0.001, segments = [];
for (let i = 0; i <= 1 - step; i += step) {
let dx = this.calcX(i) - this.calcX(i + step);
let dy = this.calcY(i) - this.calcY(i + step);
segments.push(Math.sqrt(dx * dx + dy * dy));
}
const len = segments.reduce((a, c) => a + c, 0);
let result = [], l = 0, co = 0;
for (let i = 0; i < segments.length; i++) {
l += segments[i];
co += step;
result.push({ t: l / len, co });
}
return result;
}
draw() {
ctx.beginPath();
ctx.moveTo(this.x1, this.y1);
ctx.quadraticCurveTo(this.cpX, this.cpY, this.x2, this.y2);
ctx.stroke();
}
tick(amount = 0.001) {
this.f = this.f < 1 ? this.f + amount : 0;
}
}
function drawCircle(x, y, r) {
ctx.beginPath();
ctx.arc(x, y, r, 0, 2 * Math.PI);
ctx.fill();
}
let a = new BezierCurve(25, 25, 80, 250, 100, 50);
let b = new BezierCurve(225, 25, 280, 250, 300, 50);
function draw(curve, fraction) {
let x = curve.calcX(fraction);
let y = curve.calcY(fraction);
curve.draw();
drawCircle(x, y, 5);
curve.tick();
}
// Inefficient but using this instead of binary search just to save space in code example
function findClosestNumInArray(arr, goal) {
return arr.reduce((prev, cur) => Math.abs(cur.t - goal) < Math.abs(prev.t - goal) ? cur : prev);
}
function drawLoop(elapsed) {
c.width = 600;
c.height = 600;
draw(a, a.f);
let closest = findClosestNumInArray(b.pointCache, b.f).co;
draw(b, closest);
requestAnimationFrame(drawLoop);
}
drawLoop(0);
<canvas></canvas>
Okay, so, to explain what's going on: if you hit Run code snippet you'll see that there are two curves, which I'll refer to as a (left one) and b (right one).
You may notice that the dot moving along a's curve starts off fast, then slows down around the curve, and then speeds up again. This is despite the fractional part being incremented by a constant 0.001 each frame.
The dot for b on the other hand moves at a constant velocity throughout the entire iteration. This is because for b I use the pointCache mapping that I precompute for the curve. This function calcPoints generates a mapping such that the input fractional component t is associated with the "proper" actual percentage along the curve co.
Anyways, this all works, but my issue is that the precomputation calcPoints is expensive, and referencing a lookup table to find the actual fractional part along the line for a percentage is inexact and requires significant memory usage. I was wondering if there was a better way.
What I'm looking for is a way to do something like curve.calcX(0.5) and actually get the 50% mark along the curve. Because currently the existing equation does not do this, and I instead have to do this costly workaround.
We can try to modify your method to be a bit more efficient. It is still not the exact solution you hope for but it might do the trick.
Instead of repeatedly evaluating the Bézier curve at parameter values differing by 0.001 (where you do not reuse the computation from the previous step) we could use the idea of subdivision. Do you know De Casteljau's algorithm? It not only evaluates the Bézier curve at a given parameter t, it also provides you with means to subdivide the curve in two: one Bézier curve that equals the original curve on the interval [0, t] and another one that equals the original curve on [t, 1]. Their control polygons are a much better approximation of the curves than the original control polygon.
So, you would proceed as follows:
Use De Casteljau's algorithm to subdivide the curve at t=0.5.
Use De Casteljau's algorithm to subdivide the first segment at t=0.25.
Use De Casteljau's algorithm to subdivide the second segment at t=0.75.
Proceed recursively in the same manner until prescribed depth. This depends on the precision you would like to achieve.
The control polygons of these segments will be your (piecewise linear) approximation of the original Bézier curve. Either use them to precompute the parameters as you have done so far; or plot this approximation directly instead of using quadraticCurveTo with the original curve. Generating this approximation should be much faster than your procedure.
You can read more about this idea in Sections 3.3, 3.4 and 3.5 of Prautzsch, Boehm and Paluszny: Bézier and B-spline techniques. They also provide an estimate how quickly does this procedure converge to the original curve.
Not totally sure this will work, but are you aware of Horner's Scheme for plotting Bezier points?
/***************************************************************************
//
// This routine plots out a bezier curve, with multiple calls to hornbez()
//
//***************************************************************************
function bezierCalculate(context, NumberOfDots, color, dotSize) {
// This routine uses Horner's Scheme to draw entire Bezier Line...
for (var t = 0.0; t < 1.0001; t = t + 1.0 / NumberOfDots) {
xTemp = hornbez(numberOfControlPoints - 1, "x", t);
yTemp = hornbez(numberOfControlPoints - 1, "y", t);
drawDot(context, xTemp, yTemp, dotSize, color);
}
}
//***************************************************************************
//
// This routine uses Horner's scheme to compute one coordinate
// value of a Bezier curve. Has to be called
// for each coordinate (x,y, and/or z) of a control polygon.
// See Farin, pg 59,60. Note: This technique is also called
// "nested multiplication".
// Input: degree: degree of curve.
// coeff: array with coefficients of curve.
// t: parameter value.
// Output: coordinate value.
//
//***************************************************************************
function hornbez(degree, xORy, t) {
var i;
var n_choose_i; /* shouldn't be too large! */
var fact, t1, aux;
t1 = 1 - t;
fact = 1;
n_choose_i = 1;
var aux = FrameControlPt[0][xORy] * t1;
/* starting the evaluation loop */
for (i = 1; i < degree; i++) {
fact = fact * t;
n_choose_i = n_choose_i * (degree - i + 1) / i; /* always int! */
aux = (aux + fact * n_choose_i * FrameControlPt[i][xORy]) * t1;
}
aux = aux + fact * t * FrameControlPt[degree][xORy];
return aux;
}
Not sure exactly where you are going here, but here's a reference of something I wrote a while ago... And for the contents of just the Bezier iframe, see this... My implied question? Is Bezier the right curve for you?
I want to draw StackOverflow's logo with this Neural Network:
The NN should ideally become [r, g, b] = f([x, y]). In other words, it should return RGB colors for a given pair of coordinates. The FFNN works pretty well for simple shapes like a circle or a box. For example after several thousands epochs a circle looks like this:
Try it yourself: https://codepen.io/adelriosantiago/pen/PoNGeLw
However since StackOverflow's logo is far more complex even after several thousands of iterations the FFNN's results are somewhat poor:
From left to right:
StackOverflow's logo at 256 colors.
With 15 hidden neurons: The left handle never appears.
50 hidden neurons: Pretty poor result in general.
0.03 as learning rate: Shows blue in the results (blue is not in the orignal image)
A time-decreasing learning rate: The left handle appears but other details are now lost.
Try it yourself: https://codepen.io/adelriosantiago/pen/xxVEjeJ
Some parameters of interest are synaptic.Architect.Perceptron definition and learningRate value.
How can I improve the accuracy of this NN?
Could you improve the snippet? If so, please explain what you did. If there is a better NN architecture to tackle this type of job could you please provide an example?
Additional info:
Artificial Neural Network library used: Synaptic.js
To run this example in your localhost: See repository
By adding another layer, you get better results :
let perceptron = new synaptic.Architect.Perceptron(2, 15, 10, 3)
There are small improvements that you can do to improve efficiency (marginally):
Here is my optimized code:
const width = 125
const height = 125
const outputCtx = document.getElementById("output").getContext("2d")
const iterationLabel = document.getElementById("iteration")
const stopAtIteration = 3000
let perceptron = new synaptic.Architect.Perceptron(2, 15, 10, 3)
let iteration = 0
let inputData = (() => {
const tempCtx = document.createElement("canvas").getContext("2d")
tempCtx.drawImage(document.getElementById("input"), 0, 0)
return tempCtx.getImageData(0, 0, width, height)
})()
const getRGB = (img, x, y) => {
var k = (height * y + x) * 4;
return [
img.data[k] / 255, // R
img.data[k + 1] / 255, // G
img.data[k + 2] / 255, // B
//img.data[(height * y + x) * 4 + 3], // Alpha not used
]
}
const paint = () => {
var imageData = outputCtx.getImageData(0, 0, width, height)
for (let x = 0; x < width; x++) {
for (let y = 0; y < height; y++) {
var rgb = perceptron.activate([x / width, y / height])
var k = (height * y + x) * 4;
imageData.data[k] = rgb[0] * 255
imageData.data[k + 1] = rgb[1] * 255
imageData.data[k + 2] = rgb[2] * 255
imageData.data[k + 3] = 255 // Alpha not used
}
}
outputCtx.putImageData(imageData, 0, 0)
setTimeout(train, 0)
}
const train = () => {
iterationLabel.innerHTML = ++iteration
if (iteration > stopAtIteration) return
let learningRate = 0.01 / (1 + 0.0005 * iteration) // Attempt with dynamic learning rate
//let learningRate = 0.01 // Attempt with non-dynamic learning rate
for (let x = 0; x < width; x += 1) {
for (let y = 0; y < height; y += 1) {
perceptron.activate([x / width, y / height])
perceptron.propagate(learningRate, getRGB(inputData, x, y))
}
}
paint()
}
const startTraining = (btn) => {
btn.disabled = true
train()
}
EDIT : I made another CodePen with even better results:
https://codepen.io/xurei/pen/KKzWLxg
It is likely to be over-fitted BTW.
The perceptron definition:
let perceptron = new synaptic.Architect.Perceptron(2, 8, 15, 7, 3)
Taking some insights from the lecture/slides of Bhiksha Raj (from slides 62 onwards), and summarizing as below:
Each node can be assumed like a linear classifier, and combination of several nodes in a single layer of neural networks can approximate any basic shapes. For example, a rectangle can be formed by 4 nodes for each lines, assuming each nodes contributes to one line, and the shape can be approximated by the final output layer.
Falling back to the summary of complex shapes such as circle, it may require infinite nodes in a layer. Or this would likely hold true for a single layer with two disjoint shapes (A non-overlapping triangle and rectangle). However, this can still be learnt using more than 1 hidden layers. Where, the 1st layer learns the basic shapes, followed by 2nd layer approximating their disjoint combinations.
Thus, you can assume that this logo is combination of disjoint rectangles (5 rectangles for orange and 3 rectangles for grey). We can use atleast 32 nodes in 1st hidden layer and few nodes in the 2nd hidden layer. However, we don't have control over what each node learns. Hence, a few more number of neurons than required neurons should be helpful.
I have a Mesh created with a BufferGeometry.
I also have the coordinates of where my mouse intersects the Mesh, using the Raycaster.
I am trying to detect faces within(and touching) a radius from the intersection point.
Once I detect the "tangent" faces, I then want to color the faces. Because I am working with a BufferGeometry, I am manipulating the buffer attributes on my geometry.
Here is my code:
let vertexA;
let vertexB;
let vertexC;
let intersection;
const radius = 3;
const color = new THREE.Color('red');
const positionsAttr = mesh.geometry.attributes.position;
const colorAttr = mesh.geometry.attributes.color;
// on every mouseMove event, do below:
vertexA = new THREE.Vector3();
vertexB = new THREE.Vector3();
vertexC = new THREE.Vector3();
intersection = raycaster.intersectObject(mesh).point;
// function to detect tangent edge
function isEdgeTouched(v1, v2, point, radius) {
const line = new THREE.Line3();
const closestPoint = new THREE.Vector3();
line.set(v1, v2);
line.closestPointToPoint(point, true, closestPoint);
return point.distanceTo(closestPoint) < radius;
}
// function to color a face
function colorFace(faceIndex) {
colorAttr.setXYZ(faceIndex * 3 + 0, color.r, color.g, color.b);
colorAttr.setXYZ(faceIndex * 3 + 0, color.r, color.g, color.b);
colorAttr.setXYZ(faceIndex * 3 + 0, color.r, color.g, color.b);
colorAttr.needsUpdate = true;
}
// iterate over each face, color it if tangent
for (let i=0; i < (positionsAttr.count) /3); i++) {
vertexA.fromBufferAttribute(positionsAttr, i * 3 + 0);
vertexB.fromBufferAttribute(positionsAttr, i * 3 + 1);
vertexC.fromBufferAttribute(positionsAttr, i * 3 + 2);
if (isEdgeTouched(vertexA, vertexB, point, radius)
|| isEdgeTouched(vertexA, vertexB, point, radius)
|| isEdgeTouched(vertexA, vertexB, point, radius)) {
colorFace(i);
}
While this code works, it seems to be very poor in performance especially when I am working with a geometry with many many faces. When I checked the performance monitor on Chrome DevTools, I notices that both the isEdgeTouched and colorFace functions take up too much time on each iteration for a face.
Is there a way to improve this algorithm, or is there a better algorithm to use to detect adjacent faces?
Edit
I got some help from the THREE.js slack channel, and modified the algorithm to use Three's Sphere. I am now no longer doing "edge" detection, but instead checking whether a face is within the Sphere
Updated code below:
const sphere = new THREE.Sphere(intersection, radius);
// now checking if each vertex of a face is within sphere
// if all are, then color the face at index i
for (let i=0; i < (positionsAttr.count) /3); i++) {
vertexA.fromBufferAttribute(positionsAttr, i * 3 + 0);
vertexB.fromBufferAttribute(positionsAttr, i * 3 + 1);
vertexC.fromBufferAttribute(positionsAttr, i * 3 + 2);
if (sphere.containsPoint(vertexA)
&& sphere.containsPoint(vertexA)
&& sphere.containsPoint(vertexA)) {
colorFace(i);
}
When I tested this in my app, I noticed that the performance has definitely improved from the previous version. However, I am still wondering if I could improve this further.
This seem to be a classic Nearest Neighbors problem.
You can narrow the search by finding the nearest triangles to a given point very fast by building a Bounding Volume Hierarchy (BVH) for the mesh, such as the AABB-tree.
BVH:
https://en.m.wikipedia.org/wiki/Bounding_volume_hierarchy
AABB-Tree:
https://www.azurefromthetrenches.com/introductory-guide-to-aabb-tree-collision-detection/
Then you can query against the BVH a range query using a sphere or a box of a given radius. That amounts to traverse the BVH using a sphere/box "query" which is used to discard quickly and very early the Bounding Volume Nodes that does not clip the sphere/box "query". At the end the real distance or intersection test is made only with triangles whose BV intersect the sphere/box "query", typically a very small fraction of the triangles.
The complexity of the query against the BVH is O(log n) in contrast with your approach which is O(n).
EDIT: I updated the program with the answer and it works great!
I am making a program (feel free to try it out) that lets users draw polygons which it then triangulates. They can click to add vertices and hit enter to triangulate. Anyways, the algorithm works fine as long as I tell it if the points were drawn in a clockwise or counterclockwise fashion (right now I have it set only to work with clockwise polygons). I have been trying to figure this out for days, but have no idea how to determine whether the points are clockwise or counterclockwise. Try drawing shapes with the program mentioned earlier to get a better idea, you can experience what I am talking about better than I can try to explain it.
Here is how the points are defined:
function Point(x, y) {
this.x = x;
this.y = y;
}
var vertices = [];
// Called on click
function addPoint(mouseX, mouseY) {
vertices.push(new Point(mouseX, mouseY));
}
Here is an image of a clockwise polygon:
Here is an image of a counterclockwise polygon:
If you could help me figure out how to determine the "clockwise-ness" of the points, I would be very grateful!
Compute the polygon area using the shoelace formula, but without the absolute value sign. If the result is positive, the points are ordered counterclockwise, and if negative - clockwise.
function polygonArea() {
var area = 0;
for (var i = 0; i < vertices.length; i++) {
j = (i + 1) % vertices.length;
area += vertices[i].x * vertices[j].y;
area -= vertices[j].x * vertices[i].y;
}
return area / 2;
}
var clockwise = polygonArea() > 0;
In case someone is using three.js the ShapeUtils comes with an inbuilt isClockWise method which internally uses the area method to determine the sign of the calculated area.
isClockWise: function ( pts ) {
return ShapeUtils.area( pts ) < 0;
}
The ShapeUtils.isClockWise Method can be found here.
area: function ( contour ) {
var n = contour.length;
var a = 0.0;
for ( var p = n - 1, q = 0; q < n; p = q ++ ) {
a += contour[ p ].x * contour[ q ].y - contour[ q ].x * contour[ p ].y;
}
return a * 0.5;
},
The ShapeUtils.area Method can be found here.
A general idea would be to take a look at the convex hull of your polygone and guess the orientation from there. However, I think that you do not need to build the whole hull to find the orientation, but just one segment belonging to it.
So:
Find two points of your polygones so that all the other points are on one side of this line.
If all the points are on the left (just check one of the points), it's counterclockwise. If they are on the right, it's clockwise.
Example:
On the top figure: 4-5 let the figure on the right, 5-11 let the figure on the right, ...
On the bottom figure: 6-7 let the figure on the left, 7-14 let the figure on the left, ...
Warning: While "walking" on your polygon, do not restart the numeration, otherwise it will be wrong. On the top figure, 4-(n-1) let the figure on the left!
Your intuitive definition of clockwisedness is not well defined. For example, If I draw a horseshoe:
/---a-b--\
/ _d_c_ \
/ / \ \
| | | |
| | | |
\ \ / /
\ \ / /
-- --
If 0 = a < b < b < d and I look at a and b I would conclude from your description that the shape has been drawn clockwise, but if 0 = c < d < a < b I would conclude that the shape has been drawn anticlockwise. Since both of these scenarios involve the same direction in which the points were drawn, just from different starting points, I can only conclude that your definition is lacking.
The horseshoe I drew isn't the best; the idea is that it is almost a circle with just a small hole at the bottom, to allow the other side to be drawn in the opposite direction.
If you are interested in defining things more strictly, then I suggest something along the following lines:
Considering any finite simple polygon as separating the plane into two distinct areas (one finite and one infinite), we can always consider the finite area to be the interior of the polygon. In such a scenario we define a vertex ordering to be clockwise iff the order of the points runs with the exterior along its right-hand side. This is called curve orientation.
Once you have this more solid definition, implementation can be as simple as counting the winding number. Take the midpoint of any ordered pair, say 0 and 1, take a line segment to the right of the ordered pair (at any angle, say perpendicular), and count how many intersections it has with other line segments: The curve is clockwise iff the number is odd.
This is simple to implement, linear in time O(n), and adds constant space O(1).
This a function function that specialized for OpenLayers. As You Can See The Condition Of Clockwise Polygon Is area<0 This Reference Confirm It.
function IsClockwise(feature)
{
if(feature.geometry==null)return -1;
var vertices=feature.geometry.getVertices();
var area=0;
for (var i = 0; i < (vertices.length); i++)
{
j = (i + 1) % vertices.length;
area += vertices[i].x * vertices[j].y;
area -= vertices[j].x * vertices[i].y;
// console.log(area);
}
return (area < 0);
}
I've been banging my head on the keyboard for about one week now and I can't figure a proper solution for my problem. I think it's more Math related than HTML Canvas... hopefully someone can point me into the right direction.
I'm having an HTML Canvas where users can draw lines using they mouse and the very simple moveTo() and lineTo() functions. When the user is done I save the coords in a MongoDB. When the user hits the page later again I want to display his drawing BUT I don't want to load the entire picture with all stored coordinates at once, I want to return it in tiles (for better performance by caching each tile).
The tiles are 200x200 pixels (fixed offsets and width, starting at 0 -> 200-> 400 ->...).
Now, when the user draws a line from let's say 50,50(x/y) to 250,250(x/y) there's only one dot in each bounding box (tile). I need to split the lines and calculate the start and ending points of each line in each bounding box (tile). Otherwise I can't draw the image partially (in tiles). It get's even more complicated when a single line crosses multiple bounding boxes (tiles). For instance: 100,100 (x/y) -> -1234,-300 (x/y).
The lines can go from any point (+/-) to ANY direction for ANY distance.
Of course I looked at Bresenham's good old algorithm and it worked - partially, but it seems like the longest and most resource-hungry solution to me.
So, the reason I'm here is that I hope someone can point me into the right direction with (perhaps) another approach of calculating the start/ending points of my lines for each bounding box.
Code examples are very welcome in JavaScript or PHP.
Thank you for reading and thinking about it :)
tl;dr: Use planes, maths explained below. There's a canvas example at the bottom.
Given that all of your cells are axis-aligned bounding boxes, you could use the plane equation to find the intersection of your line with the edges.
Planes
You can think of your box as a set of four geometric planes. Each plane has a normal, or a vector of length one, indicating which direction is the "front" of the plane. The normals for the planes that make up your cell's sides would be:
top = {x: 0, y: -1};
bottom = {x: 0, y: 1};
left = {x: -1, y: 0};
right = {x: 1, y: 0};
Given a point on the plane, the plane has the equation:
distance = (normal.x * point.x) + (normal.y * point.y)
You can use this equation to calculate the distance of the plane. In this case, you know the top-left corner of your box (let's say x is 10 and y is 100) is on the top plane, so you can do:
distance = (0 * 10) + (-1 * 100)
distance = -100
Checking a point against a plane
Once you have the distance, you can reuse the equation to check where any point is, relative to the plane. For a random point p (where x is -50 and y is 90), you can do:
result = (normal.x * p.x) + (normal.y * p.y) - distance
result = (0 * -50) + (-1 * 90) - (-100)
result = 0 + (-90) - (-100)
result = -90 + 100
result = 10
There are two possible results:
if (result >= 0) {
// point is in front of the plane, or coplanar.
// zero means it is coplanar, but we don't need to distinguish.
} else {
// point is behind the plane
}
Checking a line against a plane
You can check both endpoints of a line from a to b in this way:
result1 = (normal.x * a.x) + (normal.y * a.y) - distance
result2 = (normal.x * b.x) + (normal.y * b.y) - distance
There are four possible results:
if (result1 >= 0 && result2 >= 0) {
// the line is completely in front of the plane
} else if (result1 < 0 && result2 < 0) {
// the line is completely behind the plane
} else if (result1 >= 0 && result2 < 0) {
// a is in front, but b is behind, line is entering the plane
} else if (result1 < 0 && result2 >= 0) {
// a is behind, but b is in front, line is exiting the plane
}
When the line intersects the plane, you want to find the point of intersection. It helps to think of a line in vector terms:
a + t * (b - a)
If t == 0, you are at the start of the line, and t == 1 is the end of the line. In this context, you can calculate the point of intersection as:
time = result1 / (result1 - result2)
And the point of intersection as:
hit.x = a.x + (b.x - a.x) * time
hit.y = a.y + (b.y - a.y) * time
Checking a line against the box
With that math, you can figure out the lines of intersection with your box. You just need to test the endpoints of your line against each plane, and find the minimum and maximum values of time.
Because your box is a convex polygon, there is an early out in this check: if the line is completely in front of any one plane in your box, it cannot intersect with your box. You can skip checking the rest of the planes.
In JavaScript, your result might look something like this:
/**
* Find the points where a line intersects a box.
*
* #param a Start point for the line.
* #param b End point for the line.
* #param tl Top left of the box.
* #param br Bottom right of the box.
* #return Object {nearTime, farTime, nearHit, farHit}, or false.
*/
function intersectLineBox(a, b, tl, br) {
var nearestTime = -Infinity;
var furthestTime = Infinity;
var planes = [
{nx: 0, ny: -1, dist: -tl.y}, // top
{nx: 0, ny: 1, dist: br.y}, // bottom
{nx: -1, ny: 0, dist: -tl.x}, // left
{nx: 1, ny: 0, dist: br.x} // right
];
for (var i = 0; i < 4; ++i) {
var plane = planes[i];
var nearDist = (plane.nx * a.x + plane.ny * a.y) - plane.dist;
var farDist = (plane.nx * b.x + plane.ny * b.y) - plane.dist;
if (nearDist >= 0 && farDist >= 0) {
// both are in front of the plane, line doesn't hit box
return false;
} else if (nearDist < 0 && farDist < 0) {
// both are behind the plane
continue;
} else {
var time = nearDist / (nearDist - farDist);
if (nearDist >= farDist) {
// entering the plane
if (time > nearestTime) {
nearestTime = time;
}
} else {
// exiting the plane
if (time < furthestTime) {
furthestTime = time;
}
}
}
}
if (furthestTime < nearestTime) {
return false;
}
return {
nearTime: nearestTime,
farTime: furthestTime,
nearHit: {
x: a.x + (b.x - a.x) * nearestTime,
y: a.y + (b.y - a.y) * nearestTime
},
farHit: {
x: a.x + (b.x - a.x) * furthestTime,
y: a.y + (b.y - a.y) * furthestTime
}
};
}
If this is still too slow, you can also do broadphase culling by dividing the world up into big rects, and assigning lines to those rects. If your line and cell aren't in the same rect, they don't collide.
I've uploaded a canvas example of this.
This looks like you'd have to figure out at what point each line intersects with the bounds of each tile.
Check out the answer to this question: Is there an easy way to detect line segment intersections?
The answers don't provide code, but it shouldn't be too hard to convert the equations to PHP or Javascript...
EDIT:
Why, exactly, do you want to split the lines? I understand you don't want to load all the lines at once, since that could take a while. But what's wrong with just loading and drawing the first few lines, and drawing the remainder later on?
Methinks that would be a lot simpler than having to cut up each line to fit in a specific tile. Tiling is a nice way of optimizing bitmap loading; I don't think it's very appropriate for vector-based drawings.
You could also consider sending an Ajax request, and start drawing the whole thing whenever it comes in; this would not interfere with the loading of the page.