I'd like to get help from Geometry / Wolfram Mathematica people.
I want to visualize this 3D Rose in JavaScript (p5.js) environment.
This figure is originally generated using wolfram language by Paul Nylanderin 2004-2006, and below is the code:
Rose[x_, theta_] := Module[{
phi = (Pi/2)Exp[-theta/(8 Pi)],
X = 1 - (1/2)((5/4)(1 - Mod[3.6 theta, 2 Pi]/Pi)^2 - 1/4)^2},
y = 1.95653 x^2 (1.27689 x - 1)^2 Sin[phi];
r = X(x Sin[phi] + y Cos[phi]);
{r Sin[theta], r Cos[theta], X(x Cos[phi] - y Sin[phi]), EdgeForm[]
}];
ParametricPlot3D[
Rose[x, theta], {x, 0, 1}, {theta, -2 Pi, 15 Pi},
PlotPoints -> {25, 576}, LightSources -> {{{0, 0, 1}, RGBColor[1, 0, 0]}},
Compiled -> False
]
I tried implement that code in JavaScript like this below.
function rose(){
for(let theta = 0; theta < 2700; theta += 3){
beginShape(POINTS);
for(let x = 2.3; x < 3.3; x += 0.02){
let phi = (180/2) * Math.exp(- theta / (8*180));
let X = 1 - (1/2) * pow(((5/4) * pow((1 - (3.6 * theta % 360)/180), 2) - 1/4), 2);
let y = 1.95653 * pow(x, 2) * pow((1.27689*x - 1), 2) * sin(phi);
let r = X * (x*sin(phi) + y*cos(phi));
let pX = r * sin(theta);
let pY = r * cos(theta);
let pZ = (-X * (x * cos(phi) - y * sin(phi)))-200;
vertex(pX, pY, pZ);
}
endShape();
}
}
But I got this result below
Unlike original one, the petal at the top is too stretched.
I suspected that the
let y = 1.95653 * pow(x, 2) * pow((1.27689*x - 1), 2) * sin(phi);
may should be like below...
let y = pow(1.95653*x, 2*pow(1.27689*x - 1, 2*sin(theta)));
But that went even further away from the original.
Maybe I'm asking a dumb question, but I've been stuck for several days.
If you see a mistake, please let me know.
Thank you in advanse🙏
Update:
I changed the x range to 0~1 as defined by the original one.
Also simplified the JS code like below to find the error.
function rose_debug(){
for(let theta = 0; theta < 15*PI; theta += PI/60){
beginShape(POINTS);
for(let x = 0.0; x < 1.0; x += 0.005){
let phi = (PI/2) * Math.exp(- theta / (8*PI));
let y = pow(x, 4) * sin(phi);
let r = (x * sin(phi) + y * cos(phi));
let pX = r * sin(theta);
let pY = r * cos(theta);
let pZ = x * cos(phi) - y * sin(phi);
vertex(pX, pY, pZ);
}
endShape();
}
}
But the result still keeps the wrong proportion↓↓↓
Also, when I remove the term "sin(phi)" in the line "let y =..." like below
let y = pow(x, 4);
then I got a figure somewhat resemble the original like below🤣
At this moment I was starting to suspect the mistake on the original equation, but I found another article by Jorge García Tíscar(Spanish) that implemented the exact same 3D rose in wolfram language successfully.
So, now I really don't know how the original is formed by the equation😇
Update2: Solved
I followed a suggestion by Trentium (Answer No.2 below) that stick to 0 ~ 1 as the range of x, then multiply the r and X by an arbitrary number.
for(let x = 0; x < 1; x += 0.05){
r = r * 200;
X = X * 200;
Then I got this correct result looks exactly the same as the original🥳
Simplified final code:
function rose_debug3(){
for(let x = 0; x <= 1; x += 0.05){
beginShape(POINTS);
for(let theta = -2*PI; theta <= 15*PI; theta += 17*PI/2000){
let phi = (PI / 2) * Math.exp(- theta / (8 * PI));
let X = 1 - (1/2) * ((5/4) * (1 - ((3.6 * theta) % (2*PI))/PI) ** 2 - 1/4) ** 2;
let y = 1.95653 * (x ** 2) * ((1.27689*x - 1) ** 2) * sin(phi);
let r = X * (x * sin(phi) + y * cos(phi));
if(0 < r){
const factor = 200;
let pX = r * sin(theta)*factor;
let pY = r * cos(theta)*factor;
let pZ = X * (x * cos(phi) - y * sin(phi))*factor;
vertex(pX, pY, pZ);
}
}
endShape();
}
}
The reason I got the vertically stretched figure at first was the range of the x. I thought that changing the range of the x just affect the whole size of the figure. But actually, the range affects like this below.
(1): 0 ~ x ~ 1, (2): 0 ~ x ~ 1.2
(3): 0 ~ x ~ 1.5, (4): 0 ~ x ~ 2.0
(5): flipped the (4)
So far I saw the result like (5) above, didn't realize that the correct shape was hiding inside that figure.
Thank you Trentium so much for kindly helping me a lot!
Since this response is a significant departure from my earlier response, am adding a new answer...
In rendering the rose algorithm in ThreeJS (sorry, I'm not a P5 guy) it became apparent that when generating the points, that only the points with a positive radius are to be rendered. Otherwise, superfluous points are rendered far outside the rose petals.
(Note: When running the code snippet, use the mouse to zoom and rotate the rendering of the rose.)
<script type="module">
import * as THREE from 'https://cdn.jsdelivr.net/npm/three#0.115.0/build/three.module.js';
import { OrbitControls } from 'https://cdn.jsdelivr.net/npm/three#0.115.0/examples/jsm/controls/OrbitControls.js';
//
// Set up the ThreeJS environment.
//
var renderer = new THREE.WebGLRenderer();
renderer.setSize( window.innerWidth, window.innerHeight );
document.body.appendChild( renderer.domElement );
var camera = new THREE.PerspectiveCamera( 45, window.innerWidth / window.innerHeight, 1, 500 );
camera.position.set( 0, 0, 100 );
camera.lookAt( 0, 0, 0 );
var scene = new THREE.Scene();
let controls = new OrbitControls(camera, renderer.domElement);
//
// Create the points.
//
function rose( xLo, xHi, xCount, thetaLo, thetaHi, thetaCount ){
let vertex = [];
let colors = [];
let radius = [];
for( let x = xLo; x <= xHi; x += ( xHi - xLo ) / xCount ) {
for( let theta = thetaLo; theta <= thetaHi; theta += ( thetaHi - thetaLo ) / thetaCount ) {
let phi = ( Math.PI / 2 ) * Math.exp( -theta / ( 8 * Math.PI ) );
let X = 1 - ( 1 / 2 ) * ( ( 5 / 4 ) * ( 1 - ( ( 3.6 * theta ) % ( 2 * Math.PI ) ) / Math.PI ) ** 2 - 1 / 4 ) ** 2;
let y = 1.95653 * ( x ** 2 ) * ( (1.27689 * x - 1) ** 2 ) * Math.sin( phi );
let r = X * ( x * Math.sin( phi ) + y * Math.cos( phi ) );
//
// Fix: Ensure radius is positive, and scale up accordingly...
//
if ( 0 < r ) {
const factor = 20;
r = r * factor;
radius.push( r );
X = X * factor;
vertex.push( r * Math.sin( theta ), r * Math.cos( theta ), X * ( x * Math.cos( phi ) - y * Math.sin( phi ) ) );
}
}
}
//
// For the fun of it, lets adjust the color of the points based on the radius
// of the point such that the larger the radius, the deeper the red.
//
let rLo = Math.min( ...radius );
let rHi = Math.max( ...radius );
for ( let i = 0; i < radius.length; i++ ) {
let clr = new THREE.Color( Math.floor( 0x22 + ( 0xff - 0x22 ) * ( ( radius[ i ] - rLo ) / ( rHi - rLo ) ) ) * 0x10000 + 0x002222 );
colors.push( clr.r, clr.g, clr.b );
}
return [ vertex, colors, radius ];
}
//
// Create the geometry and mesh, and add to the THREE scene.
//
const geometry = new THREE.BufferGeometry();
let [ positions, colors, radius ] = rose( 0, 1, 20, -2 * Math.PI, 15 * Math.PI, 2000 );
geometry.setAttribute( 'position', new THREE.Float32BufferAttribute( positions, 3 ) );
geometry.setAttribute( 'color', new THREE.Float32BufferAttribute( colors, 3 ) );
const material = new THREE.PointsMaterial( { size: 4, vertexColors: true, depthTest: false, sizeAttenuation: false } );
const mesh = new THREE.Points( geometry, material );
scene.add( mesh );
//
// Render...
//
var animate = function () {
requestAnimationFrame( animate );
renderer.render( scene, camera );
};
animate();
</script>
Couple of notables:
When calling rose( xLo, xHi, xCount, thetaLo, thetaHi, thetaCount ), the upper range thetaHi can vary from Math.PI to 15 * Math.PI, which varies the number of petals.
Both xCount and thetaCount vary the density of the points. The Wolfram example uses 25 and 576, respectively, but this is to create a geometry mesh, whereas if creating a point field the density of points needs to be increased. Hence, in the code the values are 20 and 2000.
Enjoy!
Presumably the algorithm above is referencing cos() and sin() functions that handle the angles in degrees rather than radians, but wherever using angles while employing non-trigonometric transformations, the result will be incorrect.
For example, the following formula using radians...
phi = (Pi/2)Exp[-theta/(8 Pi)]
...has been incorrectly translated to...
phi = ( 180 / 2 ) * Math.exp( -theta / ( 8 * 180 ) )
To test, let's assume theta = 2. Using the original formula in radians...
phi = ( Math.PI / 2 ) * Math.exp( -2 / ( 8 * Math.PI ) )
= 1.451 rad
= 83.12 deg
...and now the incorrect version using degrees, which returns a different angle...
phi = ( 180 / 2 ) * Math.exp( -2 / ( 8 * 180 ) )
= 89.88 deg
= 1.569 rad
A similar issue will occur with the incorrectly translated expression...
pow( ( 1 - ( 3.6 * theta % 360 ) / 180 ), 2 )
Bottom line: Stick to radians.
P.S. Note that there might be other issues, but using radians rather than degrees needs to be corrected foremost...
Consider the following snippet: Why are there visible points outside the rectangle?
Is the switching of the context color slower than the drawing of the rectangle?
const templateCanvas = document.getElementById( "template" );
const tctx = templateCanvas.getContext( "2d" );
tctx.fillStyle = "red";
tctx.fillRect( 300, 300, 200, 200 )
const canvas = document.getElementById( "canvas" );
const ctx = canvas.getContext( "2d" );
const max = {
x: 800,
y: 800
};
const sites = [];
const points = 10000;
for ( let i = 0; i < points; i++ ) sites.push( {
x: Math.floor( Math.random() * max.x ),
y: Math.floor( Math.random() * max.y )
} );
const c = ( alpha ) => 'rgba(255,0,0,' + alpha + ')';
const c2 = ( alpha ) => {
let colors = [
'rgba(78,9,12,' + alpha + ')',
'rgba(161,34,19,' + alpha + ')',
'rgba(171,95,44,' + alpha + ')',
'rgba(171,95,44,' + alpha + ')',
'rgba(252,160,67,' + alpha + ')'
]
return colors[ Math.round( Math.random() * colors.length ) ];
}
sites.forEach( p => {
let imgData = tctx.getImageData( p.x, p.y, 1, 1 ).data;
ctx.fillStyle = ( imgData[ 0 ] == 255 ) ? c2( 1 ) : c2( 0 );
ctx.fillRect( p.x, p.y, 2, 2 )
} );
<canvas id="canvas" width="800" height="800"></canvas>
<canvas id="template" width="800" height="800"></canvas>
I think what's happening is that your random color function sometimes returns an invalid color, because it's fetching from an undefined array element. That's caused by the use of Math.round() instead of Math.floor():
return colors[ Math.round( Math.random() * colors.length ) ];
Because of that, every once in a while a bad color expression will be used for the fill style, and that will be ignored by the canvas mechanism. Thus you get some dots outside the area covered by red pixels (the square).
When Y is 100 the maximum height of the curve will be (+/-) 60. I need a way to calculate Y when I have the maximum height of the curve.
Code:
point1 and point2 have x, y and z coordinates
this.drawLine = function(point1, point2) {
context = this.getContext();
context.beginPath();
context.moveTo(this.getX(point1), this.getY(point1));
point3 = {
x: ( point1.x + point2.x ) / 2,
y: ( point1.y + point2.y ) / 2,
z: ( point1.z + point2.z ) / 2
}
context.quadraticCurveTo( this.getX(point3), this.getY(point3) + point3.z * 0, this.getX(point2), this.getY(point2));
context.stroke();
}
I need the line of the curve to hit the coordinates of point3 instead of it not reaching the coordinates.
There are still many possible curves with the same maximums. Therefore, you cannot isolate a single curve to figure out your Y value.
I would suggest finding a way to obtain more information about your curve such as a point, property or relation.
Check out these links:
http://www.personal.kent.edu/~bosikiew/Algebra-handouts/quad-extval.pdf
http://hotmath.com/hotmath_help/topics/graphing-quadratic-equations-using-transformations.html
Found my answer: here
this.drawLine = function(point1, point2, style) {
context = this.getContext();
context.beginPath();
context.moveTo(this.getX(point1), this.getY(point1));
point3 = {
x: ( point1.x + point2.x ) / 2,
y: ( point1.y + point2.y ) / 2,
z: ( point1.z + point2.z ) / 2
}
context.strokeStyle = style;
x = this.getX(point3) * 2 - ( this.getX(point1) + this.getX(point2) ) / 2;
y = this.getY(point3) * 2 - ( this.getY(point1) + this.getY(point2) ) / 2;
context.quadraticCurveTo( x, y, this.getX(point2), this.getY(point2));
context.stroke();
}
I'm trying to create a buffergeometry plane, I'm having troubles with the uv coordinates though. I've tried to follow Correct UV mapping Three.js yet I don't get a correct result.
The uv code is below. I also saved the entire buffergeometry code at http://jsfiddle.net/94xaL/.
I would very much appreciate a hint on what I'm doing wrong here!
Thanks!
var uvs = terrainGeom.attributes.uv.array;
var gridX = gridY = TERRAIN_RES - 1;
for ( iy = 0; iy < gridY; iy++ ) {
for ( ix = 0; ix < gridX; ix++ ) {
var i = (iy * gridY + ix) * 12;
//0,0
uvs[ i ] = ix / gridX
uvs[ i + 1 ] = iy / gridY;
//0,1
uvs[ i + 2 ] = ix / gridX
uvs[ i + 3 ] = ( iy + 1 ) / gridY;
//1,0
uvs[ i + 4 ] = ( ix + 1 ) / gridX
uvs[ i + 5 ] = iy / gridY;
//0,1
uvs[ i + 6 ] = ix / gridX
uvs[ i + 7 ] = ( iy + 1 ) / gridY;
//1,1
uvs[ i + 8 ] = ( ix + 1 ) / gridX
uvs[ i + 9 ] = ( iy + 1 ) / gridY;
//1,0
uvs[ i + 10 ] = ( ix + 1 ) / gridX
uvs[ i + 11 ] = iy / gridY;
}
}
The latest three.js version in the dev branch now builds planes with BufferGeometry: https://github.com/mrdoob/three.js/blob/dev/src/extras/geometries/PlaneGeometry.js
If you still want to build your own you can get some inspiration there.
I found this script by mrdoob that generates a web of lines. I've figured out where a line begins and where it ends. Now I want to extrude faces from these shapes, however all I have are lines and vertices. I'm trying to read through some half-edges theory, but i don't think I understand it that well.
Is it a matter of following a line until it's a rectangle, checking if it intersects a line or subdivides? I need a bump in the right direction.
// Based on Jared Tarbell's Substrate algorithm concept.
// http://www.complexification.net/gallery/machines/substrate/index.php
var Boid = function ( x, y, angle ) {
this.x = x;
this.y = y;
this.angle = Math.pow( Math.random(), 20 ) + angle;
this.dx = Math.cos( this.angle );
this.dy = Math.sin( this.angle );
this.life = Math.random() * 100 + 100;
this.dead = false;
this.update = function () {
context.strokeStyle = '#000000';
context.beginPath();
context.moveTo( this.x, this.y );
this.x += this.dx * 2;
this.y += this.dy * 2;
this.life -= 2;
context.lineTo( this.x, this.y );
context.stroke();
var index = ( Math.floor( this.x ) + width * Math.floor( this.y ) ) * 4;
if ( this.life <= 0 ) this.kill();
if ( data[ index + 3 ] > 0 ) this.kill();
if ( this.x < 0 || this.x > width ) this.kill();
if ( this.y < 0 || this.y > height ) this.kill();
}
this.kill = function () {
boids.splice( boids.indexOf( this ), 1 );
this.dead = true;
}
}
var width = window.innerWidth;
var height = window.innerHeight;
var canvas = document.getElementById( 'world' );
canvas.width = width;
canvas.height = height;
var context = canvas.getContext( '2d' );
var image, data;
var boids = [];
boids.push( new Boid( width / 2, height / 2, Math.random() * 360 * Math.PI / 180 ) );
setInterval( function () {
image = context.getImageData( 0, 0, width, height );
data = image.data;
for ( var i = 0; i < boids.length; i ++ ) {
var boid = boids[ i ];
boid.update();
if ( !boid.dead && Math.random() > 0.5 && boids.length < 500 ) {
boids.push( new Boid( boid.x, boid.y, ( Math.random() > 0.5 ? 90 : - 90 ) * Math.PI / 180 + boid.angle ) );
}
}
}, 1000 / 60 );
This looks more complicated than I tough. I'm not sure if it is the answer you are asking for, but may help you to decide the next step:
If you have to use this algorithm: I think you are going to need to keep track of every pair of points that makes an edge: the first point at the beginning of the Boid function and the second when the Boid is killed; both points (or the x1, x2, y1 and y2 values) are saved in a new edge object that will be added to an edges array (every edge will be kind of the soul of a death Boid).
There are two problems before applying the half-edges theory: you have an array of edges, but you need to know what other edges are connected to the beginning or end of a given edge. The other problem is that the "collision" between two Boids only affects the Boid currently being updated, which is killed during the collision. In order to use the half-edge theory you'll have to "notify" the other Boid/edge about this collision and split it at that point: a collision point is a vertex of three edges, the one that collides and the two in which the one being collided was split.
Also note that the shapes (faces) are not necessarily made of four edges, I opened the link you provider and there where a lot of shapes with tree and five edges.
If you can use a different algorithm for generating the mesh then you may have a better representation of the edges and vertexes that will help you to find the "corners" that makes every shape.