Phaser.js Implementing Minecraft lighting - javascript

So, I was looking into making minecraft/terraria block lighting
(like how some tiles are dark)
So far, I've seen the high level idea being
"set the sky block to the max light level, and reduce by x for every block, get light level by averaging light levels"
So my question is Mainly, How can I implement this in phaser? would I need to give every tile the value and calculate it?

There is an official example, that covers answers you question (if I undertand your question correct).
Here a slightly alter version of the example:
(it is a very crude example, to show case the idea)
document.body.style = 'margin:0;';
var config = {
type: Phaser.AUTO,
width: 536,
height: 183,
scene: {
preload,
create
}
};
var game = new Phaser.Game(config);
function preload (){
this.load.image('tiles', 'https://labs.phaser.io/assets/tilemaps/tiles/catastrophi_tiles_16.png');
this.load.tilemapCSV('map', 'https://labs.phaser.io/assets/tilemaps/csv/catastrophi_level2.csv');
}
function create () {
this.add.text(10, 10, 'Click to change light position')
.setOrigin(0)
.setDepth(100)
.setStyle({fontFamily: 'Arial'});
let map = this.make.tilemap({ key: 'map', tileWidth: 16, tileHeight: 16 });
let tileset = map.addTilesetImage('tiles');
map.createLayer(0, tileset, 0, 0);
let lightSource = {x: 0, y: 0};
updateMap(map, lightSource);
this.input.on('pointerdown', (pointer) => updateMap(map, pointer));
}
function updateMap (map, lightSource) {
let fullLightDistanceInTiles = 3;
let origin = map.getTileAtWorldXY(lightSource.x, lightSource.y);
map.forEachTile(function (tile) {
var dist = Phaser.Math.Distance.Chebyshev(
origin.x,
origin.y,
tile.x,
tile.y
) - fullLightDistanceInTiles;
tile.setAlpha(1 - 0.1 * dist);
});
}
<script src="//cdn.jsdelivr.net/npm/phaser#3.55.2/dist/phaser.js"></script>
It basically just calculates the distance from the player to the tiles and set the alpha value. You would have to modify it so, that the distance is caluclate from a fixed sun/light position or so. (and recalculation would only be necessary if the sun/light position move)
if you don't want to set the alpha on the map tiles you could create a second layer with only black/grey tiles above, and set the alpha on this layer, but this would muddy the colors abit.
Alternatively, if: the light is horizontal and has the same level, you could simply overlay a gradient image or so. This would be better for performance and less work.
Here a demo using a phaser generated gradient:
document.body.style = 'margin:0;';
var config = {
type: Phaser.AUTO,
width: 536,
height: 183,
scene: {
preload,
create
}
};
var game = new Phaser.Game(config);
function preload (){
this.load.image('tiles', 'https://labs.phaser.io/assets/tilemaps/tiles/catastrophi_tiles_16.png');
this.load.tilemapCSV('map', 'https://labs.phaser.io/assets/tilemaps/csv/catastrophi_level2.csv');
}
function create () {
this.add.text(10, 10, 'fixed horizontal darkness')
.setOrigin(0)
.setDepth(100)
.setStyle({fontFamily: 'Arial'});
let map = this.make.tilemap({ key: 'map', tileWidth: 16, tileHeight: 16 });
let tileset = map.addTilesetImage('tiles');
map.createLayer(0, tileset, 0, 0);
var graphicsOverlay = this.add.graphics();
graphicsOverlay.fillGradientStyle(0x0, 0x0, 0x0, 0x0, 0,0,1,1);
graphicsOverlay.fillRect(0, config.height/3, config.width, config.height*2/3);
}
<script src="//cdn.jsdelivr.net/npm/phaser#3.55.2/dist/phaser.js"></script>

Related

How to get the correct tile position in Phaser.js with a zoomed camera?

I am using Phaser.js 3.55 to build a tile-based game. I have a tilemap, a layer, and a player sprite. I want to be able to click on a tile and have the player sprite move to that tile.
However, I'm having trouble getting the correct tile position when the camera is zoomed in or out. I've tried using layer.worldToTileX and layer.worldToTileY to calculate the tile position based on the pointer position, but it gives me different results depending on the zoom level.
Here's my code for the pointerdown event in the update-function:
function update (time, delta) {
controls.update(delta);
zoomLevel = this.cameras.main.zoom;
this.input.on("pointerdown", function (pointer) {
var x = layer.worldToTileX(pointer.x / zoomLevel );
var y = layer.worldToTileY(pointer.y / zoomLevel );
if (map.getTileAt(x, y)) {
var goalX = map.getTileAt(x, y).x * 32 + 16;
var goalY = map.getTileAt(x, y).y * 32 + 16;
this.tweens.add({
targets: player,
x: goalX,
y: goalY,
duration: 1000,
ease: 'Power4',
});
}
}, this);
}
What is the correct way to get the tile position based on the pointer position and the zoom level in Phaser.js?
At first I wrote the pointerdown-event in the create() function, which didn't update the zoom-scale I got by zoomLevel = this.cameras.main.zoom;. I took it to the update-function where zoomLevel would be updated, then trying to divide the current zoomLevel with the pointer.x and pointer.y. I expected the pointer so give me the currently clicked tile. Outcome: It shifted, depending on the zoomLevel.
My other code besides the update()-function
var game = new Phaser.Game(config);
var map;
var layer;
var player;
function preload () {
this.load.tilemapTiledJSON('tilemap', '/img/phaser/map.json');
this.load.image('base_tiles', '/img/phaser/outdoors.png');
this.load.spritesheet('player', '/img/phaser/dude.jpg', { frameWidth: 32, frameHeight: 48 });
}
function create () {
map = this.make.tilemap({ key: 'tilemap' });
const tileset = map.addTilesetImage('outdoors', 'base_tiles');
layer = map.createLayer(0, tileset, 0, 0);
player = this.add.sprite(500, 100, 'player', 4);
var cursors = this.input.keyboard.createCursorKeys();
var controlConfig = {
camera: this.cameras.main,
left: cursors.left,
right: cursors.right,
up: cursors.up,
down: cursors.down,
zoomIn: this.input.keyboard.addKey(Phaser.Input.Keyboard.KeyCodes.Q),
zoomOut: this.input.keyboard.addKey(Phaser.Input.Keyboard.KeyCodes.E),
acceleration: 0.06,
drag: 0.0005,
maxSpeed: 1.0
};
controls = new Phaser.Cameras.Controls.SmoothedKeyControl(controlConfig);
}
You can get the position with the function getWorldPoint of the camera (link to the documentation), to get the exact position. No calculation needed
Here a demo showcasing this:
(use the Arrow UP/DOWN to zoom in or out, and click to select tile)
document.body.style = 'margin:0;';
var config = {
type: Phaser.AUTO,
width: 5 * 16,
height: 3 * 16,
zoom: 4,
pixelart: true,
scene: {
preload,
create
},
banner: false
};
function preload (){
this.load.image('tiles', 'https://labs.phaser.io/assets/tilemaps/tiles/super-mario.png');
}
function create () {
let infoText = this.add.text(2, 2, )
.setOrigin(0)
.setScale(.5)
.setDepth(100)
.setStyle({fontStyle: 'bold', fontFamily: 'Arial'});
// Load a map from a 2D array of tile indices
let level = [ [ 0, 0, 0, 0, 0],[ 0, 14, 13, 14, 0],[ 0, 0, 0, 0, 0]];
let map = this.make.tilemap({ data: level, tileWidth: 16, tileHeight: 16 });
let tiles = map.addTilesetImage('tiles');
let layer = map.createLayer(0, tiles, 0, 0);
const cam = this.cameras.main;
let tileSelector = this.add.rectangle(0, 0, 16, 16, 0xffffff, .5).setOrigin(0);
this.input.keyboard.on('keyup-UP', function (event) {
cam.zoom+=.25;
});
this.input.keyboard.on('keyup-DOWN', function (event) {
if(cam.zoom > 1){
cam.zoom-=.25;
}
});
this.input.on('pointerdown', ({x, y}) => {
let {x:worldX, y:worldY} = cam.getWorldPoint(x, y)
let tile = layer.getTileAtWorldXY(worldX, worldY);
tileSelector.setPosition(tile.pixelX, tile.pixelY);
infoText.setText(`selected TileID:${tile.index}`);
});
}
new Phaser.Game(config);
<script src="//cdn.jsdelivr.net/npm/phaser/dist/phaser.min.js"></script>

Can I use an image (.svg) or svg path(?) in Konva.Group()'s clipFunc?

Suppose I have a map of the world.
And I'd like each continent to be an area where I could attach shapes to and drag/reshape them, while always being clipped by the continent's shape borders/limits.
Here's what I have so far:
const stage = new Konva.Stage({
container: 'stage',
width: window.innerWidth,
height: window.innerHeight
});
const layer = new Konva.Layer();
const group = new Konva.Group({
clipFunc: function (ctx) {
ctx.arc(250, 120, 50, 0, Math.PI * 2, false);
ctx.arc(150, 120, 60, 0, Math.PI * 2, false);
},
});
const shape = new Konva.Rect({
x: 150,
y: 70,
width: 100,
height: 50,
fill: "green",
stroke: "black",
strokeWidth: 4,
draggable: true,
});
group.add(shape);
layer.add(group);
stage.add(layer);
body {
margin: 0;
padding: 0;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/konva/8.4.0/konva.min.js"></script>
<div id="stage"></div>
My question is, how could I use the clipFunc to draw a continent's limits? Could I use and image? svg path? I can't seem to find the answer in the docs.
[Edit: Added a new option 2, added demos for options 2 & 3 codepen + snippets.]
TLDR: Nothing totally automatic but two possible options.
Just to confirm - based on
And I'd like each continent to be an area where I could attach shapes
to and drag/reshape them, while always being clipped by the
continent's shape borders/limits.
I think you are asking how to limit the boundaries for dragging a shape to an 'arbitrary' region. I say arbitrary because it is a non-geometric region (not a square, circle, pentagon etc).
It would be fabulous to have a baked-in function to achieve this but sadly I am not aware that it is possible. Here's why:
Dragbounds limits: In terms of what you get 'out of the box', how Konva handles constraining drag position is via the node.dragBoundFunc(). Here is the example from the Konva docs which is straightforward.
// get drag bound function
var dragBoundFunc = node.dragBoundFunc();
// create vertical drag and drop
node.dragBoundFunc(function(pos){
// important pos - is absolute position of the node
// you should return absolute position too
return {
x: this.absolutePosition().x,
y: pos.y
};
});
The gist of this is that we are able to use the code in the dragBoundFunc function to decide if we like the position the shape is being dragged to, or not. If not we can override that 'next' position with our own.
Ok - so that is how dragging is constrained via dragBoundFunc. You can also use the node.on('dragmove') to achieve the same effect - the code would be very similar.
Hit testing
To decide in the dragBoundFunc whether to accept the proposed position of the shape being dragged, we need to carry out 'hit testing'.
[Aside: An important consideration is that, to make a pleasing UI, we should be hit testing at the boundary of the shape that is being dragged - not where the mouse pointer or finger are positioned. Example - think of a circle being dragged with the mouse pointer at its center - we want to show the user the 'hit' UI when the perimeter of the circle goes 'out of bounds' from the perspective of the dragBoundFunc, not when the center hits that point. What this means in effect is that our logic should check the perimeter of the shape for collision with the boundary - that might be simple or more difficult, depending on the shape.]
So we know we want to hit test our dragging shape against an arbitrary, enclosing boundary (the country border).
Option #1: Konva built-in method.
[Update] On developing the demo for this option I discovered that its mainstay, getIntersection(pt), is deliberately disabled (will always return null) when used in a dragmove situation. This is by design and done for performance because the overhead for the process is so costly.
What getIntersection does is to look at a given pixel, from topmost shape down, of the shapes that might overlap the given x, y point. It stops at the first hit. To do this is draws in an off-screen canvas each shape, checks the pixel, and repeats until no shapes remain. As you can tell, quite a costly process to run in-between mousemove steps.
The proposal for this option was to check a bunch of static border points on the stage via getIntersection - if the dragging shape came up as the hit then we would know the border was being crossed.
What point do we give it to check ? So here's the rub - you would have to predefine points on your map that were on the borders. How many points? Enough so that your draggable shapes can't stray very far over the mesh of border points without the hit-test firing. Do it correctly and this would be a very efficient method of hit testing. And it's not as if the borders will be changing regularly.
I made a simple point creator here. This is the view after I created the points around Wombania.
** Option #2: The concept is to create an off-screen canvas the same size as the client rect of the shape being dragged and create a clone of the drag shape therein. Space around the shape would be transparent, shape itself would be colored. We now use a set of pre-defined points along the country boundary, example above. We filter the points inside the shape's clientRect - so we only have a handful to test. We then 'translate' those points to the appropriate location in the off-screen hit canvas and check the color of the pixels at those points - any color whatsoever indicates the dragging shape is 'over' the point we are testing. Any hit means we can break out of the loop and report a boundary collision.
This is optimised in the following ways:
1 - we only make the offscreen canvas once at the dragstart.
2 - we only test the minimum number of boundary points - only those falling in the bounding box of the dragging shape.
Demo here at codepen. Snippet below - best consumed full screen.
const scale = 1,
stage = new Konva.Stage({
container: "container",
width: 500,
height: 400,
draggable: false
}),
layer = new Konva.Layer({
draggable: false
}),
imageShape = new Konva.Image({
x: 0,
y: 0,
draggable: false
}),
// Rect drawn to show client rect of dragging shape
theShapeRect = new Konva.Rect({
stroke: "silver",
strokeWidth: 1,
listening: false
}),
// small dots to show check points
pointCircle = new Konva.Circle({
radius: 30,
fill: "silver",
draggable: false
}),
// the three draggable shape defs - select by button
dragShapes = {
circle: new Konva.Circle({
radius: 30,
fill: "lime",
draggable: true,
visible: false
}),
rectangle: new Konva.Rect({
width: 60,
height: 60,
fill: "lime",
draggable: true,
visible: false
}),
star: new Konva.Star({
numPoints: 6,
innerRadius: 40,
outerRadius: 70,
fill: "lime",
draggable: true,
visible: false
})
},
// data for the check points.
data = `{"pt0":{"x":85.5,"y":44.5},"pt1":{"x":76,"y":62},"pt2":{"x":60,"y":78},"pt3":{"x":47,"y":94},"pt4":{"x":33,"y":115},"pt5":{"x":26,"y":133},"pt6":{"x":17,"y":149},"pt7":{"x":27,"y":171},"pt8":{"x":45,"y":186},"pt9":{"x":69,"y":187},"pt10":{"x":87,"y":191},"pt11":{"x":104,"y":194},"pt12":{"x":123,"y":214},"pt13":{"x":124,"y":238},"pt14":{"x":120,"y":260},"pt15":{"x":94,"y":265},"pt16":{"x":92,"y":275},"pt17":{"x":113,"y":281},"pt18":{"x":130,"y":280},"pt19":{"x":148,"y":280},"pt20":{"x":156,"y":261},"pt21":{"x":169,"y":248},"pt22":{"x":188,"y":251},"pt23":{"x":201,"y":263},"pt24":{"x":207,"y":274},"pt25":{"x":195,"y":281},"pt26":{"x":181,"y":285},"pt27":{"x":183,"y":291},"pt28":{"x":194,"y":293},"pt29":{"x":222,"y":293},"pt30":{"x":242,"y":284},"pt31":{"x":245,"y":257},"pt32":{"x":247,"y":238},"pt33":{"x":263,"y":236},"pt34":{"x":278,"y":240},"pt35":{"x":293,"y":239},"pt36":{"x":305,"y":238},"pt37":{"x":315,"y":237},"pt38":{"x":333,"y":236},"pt39":{"x":337,"y":248},"pt40":{"x":324,"y":258},"pt41":{"x":303,"y":263},"pt42":{"x":314,"y":267},"pt43":{"x":326,"y":273},"pt44":{"x":347,"y":273},"pt45":{"x":364,"y":273},"pt46":{"x":378,"y":260},"pt47":{"x":401,"y":263},"pt48":{"x":422,"y":272},"pt49":{"x":429,"y":278},"pt50":{"x":414,"y":281},"pt51":{"x":400,"y":287},"pt52":{"x":411,"y":294},"pt53":{"x":434,"y":292},"pt54":{"x":462,"y":287},"pt55":{"x":478,"y":275},"pt56":{"x":474,"y":259},"pt57":{"x":466,"y":233},"pt58":{"x":470,"y":208},"pt59":{"x":483,"y":189},"pt60":{"x":484,"y":169},"pt61":{"x":494,"y":153},"pt62":{"x":496,"y":129},"pt63":{"x":489,"y":106},"pt64":{"x":472,"y":91},"pt65":{"x":458,"y":78},"pt66":{"x":443,"y":65},"pt67":{"x":428,"y":54},"pt68":{"x":412,"y":41},"pt69":{"x":394,"y":31},"pt70":{"x":369,"y":23},"pt71":{"x":346,"y":22},"pt72":{"x":323,"y":22},"pt73":{"x":300,"y":23},"pt74":{"x":278,"y":24},"pt75":{"x":265,"y":26},"pt76":{"x":251,"y":30},"pt77":{"x":235,"y":32},"pt78":{"x":220,"y":38},"pt79":{"x":203,"y":44},"pt80":{"x":189,"y":53},"pt81":{"x":174,"y":57},"pt82":{"x":163,"y":51},"pt83":{"x":148,"y":53},"pt84":{"x":128,"y":52},"pt85":{"x":100,"y":51}}`,
// load the data into an object.
pointsList = JSON.parse(data);
// shape is set when the shape-type button is clicked.
let theShape = undefined;
// Add shapes to the layer and layer to stage
layer.add(
imageShape,
dragShapes.circle, // not visible at this point
dragShapes.rectangle, // not visible at this point
dragShapes.star, // not visible at this point
theShapeRect
);
stage.add(layer);
// Make the hit stage where we will do color sampling
const hitStage = new Konva.Stage({
container: "container2",
width: 300,
height: 300,
draggable: true
}),
hitLayer = new Konva.Layer(),
ctx = hitLayer.getCanvas().getContext(); // Get the convas context for access to pixel data
hitStage.add(hitLayer);
// Make an HTML image variable to act as the image loader, load the image
const img = new Image();
img.crossOrigin = "Anonymous";
img.onload = function () {
imageShape.image(img); // when loaded give the image to the Konva image shape
};
img.src = "https://assets.codepen.io/255591/map_of_wombania2.svg"; // start image loading - fires onload above.
// draw a small grey dot centered on each test point
for (const [key, pt] of Object.entries(pointsList)) {
layer.add(
pointCircle.clone({
name: key + " point",
radius: 5,
x: pt.x,
y: pt.y
})
);
}
// Function to get the color data for given point on a given canvas context
function getRGBAInfo(ctx, point) {
// get the image data for one pixel at the computed point
const pixel = ctx.getImageData(point.x, point.y, 1, 1);
const data = pixel.data;
// for fun, we show the rgba value at the pixel
const rgba =
"pt " +
JSON.stringify(point) +
` rgba(${data[0]}, ${data[1]}, ${data[2]}, ${data[3] / 255})`;
// console.log(rgba);
return data;
}
// function to reset collided point colors
function clearPoints() {
// clear the collision point colors
const points = stage.find(".point");
for (const point of points) {
point.fill("silver");
}
}
// variable to track whether we collided or not.
let hit = false;
// user clicks a shape-select button
$(".shapeButton").on("click", function () {
setShape($(this).data("shape"));
});
// Set the active shape.
function setShape(shapeName) {
clearPoints();
if (theShape) {
theShape.visible(false);
}
theShape = dragShapes[shapeName];
// Somewhere in Wombania....
theShape.position({
x: 300,
y: 120
});
// finally we see the shape !
theShape.visible(true);
// and set the bounding rect visualising rect
theShapeRect.position(theShape.getClientRect());
theShapeRect.size(theShape.getClientRect());
// better clear any listeners on the shape just in case
theShape.off();
// fires once as the drag commences
theShape.on("dragstart", function (evt) {
// clear the hitLayer for color testing
hitLayer.destroyChildren();
// make a copy of the dragging shape, positioned at top-left of hit canvas
// Note I fill shape with solid color - if you drag a Konva.Group then make a filled rect
// the pos & size of the group.getClientRect and add that into the group after cloning.
const clone = evt.target.clone({ fill: "red", stroke: "red" });
clone.position({
x: clone.width() / 2,
y: clone.height() / 2
});
hitLayer.add(clone);
// cloning copies some events so better clear them as they are not needed on the clone.
clone.off();
// reset the boundary point color
clearPoints();
// position the client rect visulaiser
theShapeRect.position(theShape.getClientRect());
theShapeRect.size(theShape.getClientRect());
});
// Will run on each drag move event
theShape.on("dragmove", function (evt) {
// assume no collisions - we will know by the end of the event
hit = false;
// position the client rect visulaiser
theShapeRect.position(theShape.getClientRect());
// Get the translation vector from the drag shape in the main canvas to the location
// in the hit canvas. We use thit to translate the check points in the main canvas
// to their positions in the hit canvas
const translateDist = {
x: -this.position().x + this.width() / 2,
y: -this.position().y + this.width() / 2
};
// get a rect around the current pos of the draggging shape, use to check if points
// are within this rect. If YES then process them, otherwise ignore.
const checkRect = this.getClientRect();
// Walk the set of check points...
for (const [key, pt] of Object.entries(pointsList)) {
// Is this point in the client rect of the dragging shape ?...
if (
checkRect.x < pt.x &&
checkRect.y < pt.y &&
checkRect.x + checkRect.width > pt.x &&
checkRect.y + checkRect.height > pt.y
) {
//...yes - so we pocess it
// translate the point to its position in the hit canvas.
let pointTranslated = {
x: pt.x + translateDist.x,
y: pt.y + translateDist.y
};
// get the color info of the point
const colorInfo = getRGBAInfo(ctx, pointTranslated);
// Is there any color there, anything, at all, maybe ?
if (colorInfo[0] + colorInfo[1] + colorInfo[2] + colorInfo[3] > 0) {
// if we find color then we have a collision!
hit = true;
// set the color of the collided point to visualise it
stage.findOne("." + key).fill("black");
// !Important: In live code we could 'break' here because it is not
// important to know _all_ the hits. I will process them all for demo purposes.
// break;
}
}
}
// Phew - after all that point fettling, if we got a hit then say so !
if (hit) {
$("#alarm").html("Boundary collision");
evt.target.fill("red");
} else {
evt.target.fill("lime");
$("#alarm").html("Still good");
}
});
}
body {
margin: 10px;
background-color: #f0f0f0;
}
.container {
border: 1px solid black;
display: inline-block;
}
alarm {
color: red;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script src="https://unpkg.com/konva#8/konva.min.js"></script>
<p><span id='info'>Pick a shape, drag it around the country without hitting the edges!</span></p>
<p><span id='alarm'>.</span></p>
<p>
<button class="shapeButton" data-shape='circle'>Circle</button>
<button class="shapeButton" data-shape='rectangle'>Rectangle</button>
<button class="shapeButton" data-shape='star'>Star</button>
</span></p>
<div id="container" class='container'></div>
<div id="container2" class='container'></div>
Option #3: Alpha value checking.
The gist of this method is to have the color fill of each country have a specific alpha value in its RGBA setting. You can then check the colors at specific points on the perimeter of your dragging shape. Lets say we set the alpha for France to 250,
the Channel is 249, Spain 248, Italy 247, etc. If you are dragging your shape around 'inside' France, you expect an alpha value of 250. If you see anything else under any of those perimeter points then some part of your shape has crossed the border. [In practice, the HTML canvas will add some antialiasing along the border line so you will see some values outside those that you set but these have a low impact affect and can be ignored.]
One point is that you can't test the color on the main canvas if the shape being dragged is visible - because you will be getting the fill, stroke, or antialised pixel color of the shape!
To solve this you need a second stage - this can be memory only, so not visible on the page - where you load either a copy of the main stage with the dragging shape invisible, or you load the image of the map only. Let's call this the hit-stage. Assuming you keep the position of the hit-stage in line with the main-stage, then everything will work. Based on the location of the dragging shape and its perimeter points, you check the pixel colors on the hit-canvas. If the values match the country you are expecting then no hit, but if you see a different alpha value then you hit or passed the border. Actually you don't even need to know the color for the starting country - just note the color under the mouse point when the drag commences and look out for a different alpha value under the perimeter points.
There's a working demo of the 2-stage approach at codePen here. The demo just uses a country boundary and 'elsewhere' but you would use the same technique to construct an atlas of countries with different alpha values for your needs.
This is the JavaScript from the codepen demo. Best seen in full screen though when I checked it after copying from codepen some of the detections on right hand side did not fire, so maybe view the codepen if you can.
const countries = [
{ name: "wombania", alpha: 252 },
// add more countries as required
{ name: "Elsewhere", alpha: 0 }
],
scale = 1,
stage = new Konva.Stage({
container: "container",
width: 500,
height: 400,
draggable: false,
scale: {
x: scale,
y: scale
}
}),
layer = new Konva.Layer({
draggable: false
}),
imageShape = new Konva.Image({
x: 0,
y: 0,
draggable: false
}),
circle = new Konva.Circle({
radius: 30,
fill: "lime",
draggable: true,
x: 300,
y: 120,
scale: {
x: scale,
y: scale
}
});
let currentCountry = undefined;
const hitStage = new Konva.Stage({
container: "container2",
width: 500,
height: 400,
draggable: false
}),
hitLayer = new Konva.Layer(),
hitImage = new Konva.Image(),
ctx = hitLayer.getCanvas().getContext(); // Get the convas context for access to pixel data
layer.add(imageShape, circle);
stage.add(layer);
hitLayer.add(hitImage);
hitStage.add(hitLayer);
// Make an HTML image variable to act as the image loader, load the image
const img = new Image();
img.crossOrigin = "Anonymous";
img.onload = function () {
imageShape.image(img); // when loaded give the image to the Konva image shape
hitImage.image(img); // and to the hit canvas
const hitImageObj = new Image();
};
img.src = "https://assets.codepen.io/255591/map_of_wombania2.svg"; // start image loading - fires onload above.
// Will run on each drag move event
circle.on("dragmove", function () {
// get 20 points on the perimeter to check.
let hitCountry = currentCountry;
for (let angle = 0; angle < 360; angle = angle + 18) {
const angleRadians = (angle * Math.PI) / 180;
let point = {
x: parseInt(
circle.position().x + Math.cos(angleRadians) * circle.radius(),
10
),
y: parseInt(
circle.position().y + Math.sin(angleRadians) * circle.radius(),
10
)
};
// get the image data for one pixel at the computed point
const pixel = ctx.getImageData(point.x, point.y, 1, 1);
const data = pixel.data;
// for fun, we show the rgba value at the pixel
const rgba = `rgba(${data[0]}, ${data[1]}, ${data[2]}, ${data[3] / 255})`;
// console.log("color at (" + point.x + ", " + point.y + "):", rgba);
// Here comes the good part.
// We know the alpha value for the current country - any other value means
// we crossed the border!
let country = getCountryAtPoint(point);
if (country && country.name !== currentCountry.name) {
hitCountry = country;
break; // jump out of the loop now because we know we got a hit.
}
}
// After checking the points what did the hit indicator show ?
if (hitCountry.alpha !== currentCountry.alpha) {
circle.fill("magenta");
$("#alarm").html("You crossed the border into " + hitCountry.name);
} else {
circle.fill("lime");
$("#alarm").html("Still inside " + hitCountry.name);
}
});
function getRGBAInfo(ctx, point) {
// get the image data for one pixel at the computed point
const pixel = ctx.getImageData(point.x, point.y, 1, 1);
const data = pixel.data;
// for fun, we show the rgba value at the pixel
const rgba = `rgba(${data[0]}, ${data[1]}, ${data[2]}, ${data[3] / 255})`;
return data;
}
imageShape.on("mousemove", function () {
const point = stage.getPointerPosition();
getRGBAInfo(ctx, point);
});
function getCountryAtPoint(point) {
const colorInfo = getRGBAInfo(ctx, point);
for (const country of countries) {
if (country.alpha === colorInfo[3]) {
$("#info2").html("Selected: " + country.name);
return country;
}
}
}
imageShape.on("mousedown", function () {
currentCountry = getCountryAtPoint(stage.getPointerPosition());
});
circle.on("mousedown", function () {
currentCountry = getCountryAtPoint(stage.getPointerPosition());
});
body {
margin: 10px;
background-color: #f0f0f0;
}
.container {
border: 1px solid black;
display: inline-block;
}
alarm {
color: red;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script src="https://unpkg.com/konva#8/konva.min.js"></script>
<p><span id='info'>Drag the circle around the country without hitting the edges!</span></p>
<p><span id='info2'>Selected: none</span> <span id='alarm'></span></p>
<div id="container" class='container'></div>
<div id="container2" class='container'></div>
PS. As a bonus, knowing the alpha values of the countries gives you an instant way to know which country the user clicks on. See the mousedown event.
To use an image, you can use the drawImage method of the canvas context in the clipFunc
const image = new Image();
image.src = 'image.png';
const group = new Konva.Group({
clipFunc: function (ctx) {
ctx.drawImage(image, 0, 0);
},
});
To use an SVG path, you can use the clip method of the canvas context in the clipFunc
const group = new Konva.Group({
clipFunc: function (ctx) {
ctx.clip('M10,10 h80 v80 h-80 Z');
},
});

THREE.JS & Reality Capture - Rotation issue photogrammetry reference camera's in a 3D space

Thanks for taking the time to review my post. I hope that this post will not only yield results for myself but perhaps helps others too!
Introduction
Currently I am working on a project involving pointclouds generated with photogrammetry. It consists of photos combined with laser scans. The software used in making the pointcloud is Reality Capture. Besides the pointcloud export one can export "Internal/External camera parameters" providing the ability of retrieving photos that are used to make up a certain 3D point in the pointcloud. Reality Capture isn't that well documented online and I have also posted in their forum regarding camera variables, perhaps it can be of use in solving the issue at hand?
Only a few variables listed in the camera parameters file are relevant (for now) in referencing camera positioning such as filename, x,y,alt for location, heading, pitch and roll as its rotation.
Currently the generated pointcloud is loaded into the browser compatible THREE.JS viewer after which the camera parameters .csv file is loaded and for each known photo a 'PerspectiveCamera' is spawned with a green cube. An example is shown below:
The challenge
As a matter of fact you might already know what the issue might be based on the previous image (or the title of this post of course ;P) Just in case you might not have spotted it, the direction of the cameras is all wrong. Let me visualize it for you with shabby self-drawn vectors that rudimentary show in what direction it should be facing (Marked in red) and how it is currently vectored (green).
Row 37, DJI_0176.jpg is the most right camera with a red reference line row 38 is 177 etc. The last picture (Row 48 is DJI_189.jpg) and corresponds with the most left image of the clustured images (as I didn't draw the other two camera references within the image above I did not include the others).
When you copy the data below into an Excel sheet it should display correctly ^^
#name x y alt heading pitch roll f px py k1 k2 k3 k4 t1 t2
DJI_0174.JPG 3.116820957 -44.25690188 14.05258109 -26.86297007 66.43104338 1.912026354 30.35179628 7.25E-03 1.45E-03 -4.02E-03 -2.04E-02 3.94E-02 0 0 0
DJI_0175.JPG -5.22E-02 -46.97266554 14.18056658 -16.2033133 66.11532302 3.552072396 30.28063771 4.93E-03 4.21E-04 1.38E-02 -0.108013599 0.183136287 0 0 0
DJI_0176.JPG -3.056586953 -49.00754998 14.3474763 4.270483155 65.35247679 5.816970677 30.50596933 -5.05E-03 -3.53E-03 -4.94E-03 3.24E-02 -3.84E-02 0 0 0
DJI_0177.JPG -6.909437337 -50.15910066 14.38391206 19.4459053 64.26828897 6.685020944 30.6994734 -1.40E-02 4.72E-03 -5.33E-04 1.90E-02 -1.74E-02 0 0 0
DJI_0178.JPG -11.23696688 -50.36025313 14.56924433 19.19192622 64.40188316 6.265995184 30.7665397 -1.26E-02 2.41E-03 1.24E-04 -4.63E-03 2.84E-02 0 0 0
DJI_0179.JPG -16.04060554 -49.92320365 14.69721478 19.39979452 64.85507307 6.224929846 30.93772566 -1.19E-02 -4.31E-03 -1.27E-02 4.62E-02 -4.48E-02 0 0 0
DJI_0180.JPG -20.95614556 -49.22915437 14.92273203 20.39327092 65.02028543 6.164031482 30.99807237 -1.02E-02 -7.70E-03 1.44E-03 -2.22E-02 3.94E-02 0 0 0
DJI_0181.JPG -25.9335097 -48.45330177 15.37330388 34.24388008 64.82707628 6.979877709 31.3534556 -1.06E-02 -1.19E-02 -5.44E-03 2.39E-02 -2.38E-02 0 0 0
DJI_0182.JPG -30.40507957 -47.21269946 15.67804925 49.98858409 64.29238807 7.449650513 31.6699868 -8.75E-03 -1.31E-02 -4.57E-03 2.31E-02 2.68E-03 0 0 0
DJI_0183.JPG -34.64277285 -44.84034207 15.89229254 65.84203906 62.9109777 7.065942792 31.78292476 -8.39E-03 -2.94E-03 -1.40E-02 8.96E-02 -0.11801932 0 0 0
DJI_0184.JPG -39.17179024 -40.22577764 16.28164396 65.53938063 63.2592604 6.676581293 31.79546988 -9.81E-03 -8.13E-03 1.01E-02 -8.44E-02 0.179931606 0 0 0
DJI_0185.JPG -43.549378 -33.09364534 16.64130671 68.61427166 63.15205908 6.258411625 31.75339036 -9.78E-03 -7.12E-03 4.75E-03 -6.25E-02 0.1541638 0 0 0
DJI_0186.JPG -46.5381556 -24.2992233 17.2286956 74.42382577 63.75110346 6.279208736 31.88862443 -1.01E-02 -1.73E-02 1.02E-02 -6.15E-02 4.89E-02 0 0 0
DJI_0187.JPG -48.18737751 -14.67333218 17.85446854 79.54477952 63.0503902 5.980759013 31.69602914 -8.83E-03 -1.01E-02 -7.63E-03 -7.49E-03 2.71E-02 0 0 0
DJI_0188.JPG -48.48581505 -13.79840485 17.84756621 93.43316271 61.87561678 5.110113503 31.6671977 1.99E-03 -9.40E-04 2.40E-02 -0.180515731 0.32814456 0 0 0
DJI_0189.JPG -48.32815991 -13.88055437 17.77818573 106.3277582 60.87171036 4.039469869 31.50757712 2.84E-03 4.12E-03 8.54E-03 -1.32E-02 3.89E-02 0 0 0
Things tried so far
Something we discovered was that the exported model was mirrored from reality however this did not affect the placement of the camera references as they aligned perfectly. We attempted to mirror the referenced cameras, pointcloud and viewport camera but this did not seem to fix the issue at hand. (hence the camera.applyMatrix4(new THREE.Matrix4().makeScale(-1, 1, 1));)
So far we attempted to load Euler angles, set angles directly or convert and apply a Quaternion sadly without any good results. The camera reference file is being parsed with the following logic:
// Await the .csv file being parsed from the server
await new Promise((resolve) => {
(file as Blob).text().then((csvStr) => {
const rows = csvStr.split('\n');
for (const row of rows) {
const col = row.split(',');
if (col.length > 1) {
const suffixes = col[0].split('.');
const extension = suffixes[suffixes.length - 1].toLowerCase();
const validExtensions = ['jpeg', 'jpg', 'png'];
if (!validExtensions.includes(extension)) {
continue;
}
// == Parameter index by .csv column names ==
// 0: #name; 1: x; 2: y; 3: alt; 4: heading; 5: pitch; 6: roll; 7:f (focal);
// == Non .csv param ==
// 8: bool isRadianFormat default false
this.createCamera(col[0], parseFloat(col[1]), parseFloat(col[2]), parseFloat(col[3]), parseFloat(col[4]), parseFloat(col[5]), parseFloat(col[6]), parseFloat(col[7]));
}
}
resolve(true);
});
});
}
Below you will find the code snippet for instantiating a camera with its position and rotation. I left some additional comments to elaborate it somewhat more. I left the commented code lines in as well to see what else we have been trying:
private createCamera(fileName: string, xPos: number, yPos: number, zPos: number, xDeg: number, yDeg: number, zDeg: number, f: number, isRadianFormat = false) : void {
// Set radials as THREE.JS explicitly only works in radians
const xRad = isRadianFormat ? xDeg : THREE.MathUtils.degToRad(xDeg);
const yRad = isRadianFormat ? yDeg : THREE.MathUtils.degToRad(yDeg)
const zRad = isRadianFormat ? zDeg : THREE.MathUtils.degToRad(zDeg)
// Create camera reference and extract frustum
// Statically set the FOV and aspectratio; Near is set to 0,1 by default and Far is dynamically set whenever a point is clicked in a 3D space.
const camera = new THREE.PerspectiveCamera(67, 5280 / 2970, 0.1, 1);
const pos = new THREE.Vector3(xPos, yPos, zPos); // Reality capture z = up; THREE y = up;
/* ===
In order to set an Euler angle one must provide the heading (x), pitch (y) and roll(z) as well as the order (variable four 'XYZ') in which the rotations will be applied
As a last resort we even tried switching the x,y and zRad variables as well as switching the orientation orders.
Possible orders:
XYZ
XZY
YZX
YXZ
ZYX
ZXY
=== */
const rot = new THREE.Euler(xRad, yRad, zRad, 'XYZ');
//camera.setRotationFromAxisAngle(new THREE.Vector3(0,))
//camera.applyMatrix4(new THREE.Matrix4().makeScale(-1, 1, 1));
// const rot = new THREE.Quaternion();
// rot.setFromAxisAngle(new THREE.Vector3(1, 0, 0), zRad);
// rot.setFromAxisAngle(new THREE.Vector3(0, 1, 0), xRad);
// rot.setFromAxisAngle(new THREE.Vector3(0, 0, 1), yRad);
// XYZ
// === Update camera frustum ===
camera.position.copy(pos);
// camera.applyQuaternion(rot);
camera.rotation.copy(rot);
camera.setRotationFromEuler(rot);
camera.updateProjectionMatrix(); // TODO: Assert whether projection update is required here
/* ===
The camera.applyMatrix listed below was an attempt in rotating several aspects of the 3D viewer.
An attempt was made to rotate each individual photo camera position, the pointcloud itself aswell as the viewport camera both separately
as well as solo. It made no difference however.
=== */
//camera.applyMatrix4(new THREE.Matrix4().makeScale(-1, 1, 1));
// Instantiate CameraPosition instance and push to array
const photo: PhotoPosition = {
file: fileName,
camera,
position: pos,
rotation: rot,
focal: f,
width: 5120, // Statically set for now
height: 5120, // Statically set for now
};
this.photos.push(photo);
}
The cameras created in the snippet above are then grabbed by the next piece of code which passes the cameras to the camera manager and draws a CameraHelper (displayed in both 3D viewer pictures above). It is written within an async function awaiting the csv file to be loaded before proceeding to initialize the cameras.
private initializeCameraPoses(url: string, csvLoader: CSVLoader) {
const absoluteUrl = url + '\\references.csv';
(async (scene, csvLoader, url, renderer) => {
await csvLoader.init(url);
const photos = csvLoader.getPhotos(); // The cameras created by the createCamera() method
this.inspectionRenderer = new InspectionRenderer(scene); // InspectionRenderer manages all further camera operations
this.inspectionRenderer.populateCameras(photos);
for (const photoData of photos) {
// Draw the green cube
const geometry = new THREE.BoxGeometry(0.5, 0.5, 0.5);
const material = new THREE.MeshBasicMaterial({ color: 0x00ff00 });
const cube = new THREE.Mesh(geometry, material);
scene.add(cube);
cube.position.copy(photoData.position);
photoData.camera.updateProjectionMatrix();
// Draws the yellow camera viewport to the scene
const helper = new CameraHelper(photoData.camera);
renderer.render(scene, photoData.camera);
scene.add(helper);
}
})(this.scene, csvLoader, absoluteUrl, this.renderer);
}
Marquizzo's code snippet
The below posted code snippet of Marquizzo seems to bring us a lot closer towards a solution. The cameras seem to be orientated in the correct direction. However the pitch seems to a little off somehow. Below I will include the source image of DJI_0189.jpg. Note that for this example the FOV is currently not being set as it looks chaotic when for every camera position a camera helper is being rendered. For this example I have rendered only the DJI_0189 camera helper.
The edit #Marquizzo provided inverting the pitch (const rotX = deg2rad(photo.pitch * -1);) would result in the midpoint intersection point always being slightly lower as expected:
When the pitch is adjusted to const rotX = deg2rad(photo.pitch * -.5); you'll see that the midpoint intersection is (closer) as that of the source image:
Somehow I think that a solution is within reach and that in the end it'll come down to some very small detail that has been overlooked. I'm really looking forward into seeing a reply. If something is still unclear please say so and I'll provide the necessary details if required ^^
Thanks for reading this post so far!
At first glance, I see three possibilities:
It's hard to see where the issue is without showing how you're using the createCamera() method. You could be swapping pitch with heading or something like that. In Three.js, heading is rotation around the Y-axis, pitch around X-axis, and roll around Z-axis.
Secondly, do you know in what order the heading, pitch, roll measurements were taken by your sensor? That will affect the way in which you initiate your THREE.Euler(xRad, yRad, zRad, 'XYZ'), since the order in which to apply rotations could also be 'YZX', 'ZXY', 'XZY', 'YXZ' or 'ZYX'.
Finally, you have to think "What does heading: 0 mean to the sensor?" It could mean different things between real-world and Three.js coordinate system. A camera with no rotation in Three.js is looking straight down towards -Z axis, but your sensor might have it pointing towards +Z, or +X, etc.
Edit:
I added a demo below, I think this is what you needed from the screenshots. Notice I multiplied pitch * -1 so the cameras "Look down", and added +180 to the heading so they're pointing in the right... heading.
const DATA = [
{name: "DJI_0174.JPG", x: 3.116820957, y: -44.25690188, alt: 14.05258109, heading: -26.86297007, pitch: 66.43104338, roll: 1.912026354},
{name: "DJI_0175.JPG", x: -5.22E-02, y: -46.97266554, alt: 14.18056658, heading: -16.2033133, pitch: 66.11532302, roll: 3.552072396},
{name: "DJI_0176.JPG", x: -3.056586953, y: -49.00754998, alt: 14.3474763, heading: 4.270483155, pitch: 65.35247679, roll: 5.816970677},
{name: "DJI_0177.JPG", x: -6.909437337, y: -50.15910066, alt: 14.38391206, heading: 19.4459053, pitch: 64.26828897, roll: 6.685020944},
{name: "DJI_0178.JPG", x: -11.23696688, y: -50.36025313, alt: 14.56924433, heading: 19.19192622, pitch: 64.40188316, roll: 6.265995184},
{name: "DJI_0179.JPG", x: -16.04060554, y: -49.92320365, alt: 14.69721478, heading: 19.39979452, pitch: 64.85507307, roll: 6.224929846},
{name: "DJI_0180.JPG", x: -20.95614556, y: -49.22915437, alt: 14.92273203, heading: 20.39327092, pitch: 65.02028543, roll: 6.164031482},
{name: "DJI_0181.JPG", x: -25.9335097, y: -48.45330177, alt: 15.37330388, heading: 34.24388008, pitch: 64.82707628, roll: 6.979877709},
{name: "DJI_0182.JPG", x: -30.40507957, y: -47.21269946, alt: 15.67804925, heading: 49.98858409, pitch: 64.29238807, roll: 7.449650513},
{name: "DJI_0183.JPG", x: -34.64277285, y: -44.84034207, alt: 15.89229254, heading: 65.84203906, pitch: 62.9109777, roll: 7.065942792},
{name: "DJI_0184.JPG", x: -39.17179024, y: -40.22577764, alt: 16.28164396, heading: 65.53938063, pitch: 63.2592604, roll: 6.676581293},
{name: "DJI_0185.JPG", x: -43.549378, y: -33.09364534, alt: 16.64130671, heading: 68.61427166, pitch: 63.15205908, roll: 6.258411625},
{name: "DJI_0186.JPG", x: -46.5381556, y: -24.2992233, alt: 17.2286956, heading: 74.42382577, pitch: 63.75110346, roll: 6.279208736},
{name: "DJI_0187.JPG", x: -48.18737751, y: -14.67333218, alt: 17.85446854, heading: 79.54477952, pitch: 63.0503902, roll: 5.980759013},
{name: "DJI_0188.JPG", x: -48.48581505, y: -13.79840485, alt: 17.84756621, heading: 93.43316271, pitch: 61.87561678, roll: 5.110113503},
{name: "DJI_0189.JPG", x: -48.32815991, y: -13.88055437, alt: 17.77818573, heading: 106.3277582, pitch: 60.87171036, roll: 4.039469869},
];
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(
45,
window.innerWidth / window.innerHeight,
1,
1000
);
camera.position.z = 100;
const renderer = new THREE.WebGLRenderer({
antialias: true,
canvas: document.querySelector("#canvas")
});
renderer.setSize(window.innerWidth, window.innerHeight);
const controls = new THREE.OrbitControls( camera, renderer.domElement );
// Helpers
const axesHelper = new THREE.AxesHelper( 20 );
scene.add(axesHelper);
const plane = new THREE.Plane( new THREE.Vector3( 0, 1, 0 ), 0 );
const planeHelper = new THREE.PlaneHelper( plane, 50, 0xffff00 );
scene.add(planeHelper);
let deg2rad = THREE.MathUtils.degToRad;
function createCam(photo) {
let tempCam = new THREE.PerspectiveCamera(10, 2.0, 1, 30);
// Altitude is actually y-axis,
// "y" is actually z-axis
tempCam.position.set(photo.x, photo.alt, photo.y);
// Modify pitch & heading so it matches Three.js coordinates
const rotX = deg2rad(photo.pitch * -1);
const rotY = deg2rad(photo.heading + 180);
const rotZ = deg2rad(photo.roll);
tempCam.rotation.set(rotX, rotY, rotZ, "YXZ");
let helper = new THREE.CameraHelper(tempCam);
scene.add(tempCam);
scene.add(helper);
}
for(let i = 0; i < DATA.length; i++) {
createCam(DATA[i]);
}
function animate() {
renderer.render(scene, camera);
requestAnimationFrame(animate);
}
animate();
html, body { margin:0; padding:0;}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script><script src="https://rawgit.com/mrdoob/three.js/dev/examples/js/controls/OrbitControls.js"></script>
<canvas id="canvas"></canvas>

Cannon.js is there a way to check if an object is colliding with 2 other objects at the same time?

I'm using Cannon.js to make a game where a ball bounces of of your head and the sides of the screen have boxes as boundaries. when the ball collides with the head it adds one more to the score. I want to check if the the ball is colliding with the one of the walls and the head at the same time, because when this happens I don't want it to add one to the score. does someone know how to create a check for this?
My code:
const UIManager = require("./UIManager.js");
var playing = false;
// Create cannon world and setting gravity
const world = new CANNON.World();
world.gravity.set(0, -0.52, 0);
// Create sphere body and setting its shape and properties
var mat1 = new CANNON.Material();
// Radius for the ball object
const Radius = 0.05;
// Radius for the head hitbox
const Radius2 = 0.08;
const BallProp = {
mass: 2,
position: new CANNON.Vec3(0, 1.1, 0),
radius: Radius,
material: mat1,
shape: new CANNON.Sphere(Radius),
}
// Add the ball to the physics world
const BallBody = new CANNON.Body(BallProp);
world.addBody(BallBody);
// Create a material for the head hitbox
var HeadMaterial = new CANNON.Material();
// Create ground body and settings its shape and properties
const HeadProp = {
mass: 0,
position: new CANNON.Vec3(0, 0, 0),
radius: Radius2,
material: HeadMaterial,
shape: new CANNON.Sphere(Radius2),
}
const HeadBody = new CANNON.Body(HeadProp);
// Rotate the ground so it is flat (facing upwards)
const angle = -Math.PI / 2;
const xAxis = new CANNON.Vec3(1, 0, 0);
HeadBody.quaternion.setFromAxisAngle(xAxis, angle);
// Add the hitbox to the physics world
world.addBody(HeadBody);
// Create a new material for the walls
var WallMaterial = new CANNON.Material();
// Create walls and settings its shape and properties
const WallProp1 = {
mass: 0,
position: new CANNON.Vec3(-1.19, 0.6, 0),
material: WallMaterial,
shape: new CANNON.Box(new CANNON.Vec3(1, 1, 1)),
}
const WallProp2 = {
mass: 0,
position: new CANNON.Vec3(1.19, 0.6, 0),
material: WallMaterial,
shape: new CANNON.Box(new CANNON.Vec3(1, 1, 1)),
}
const Wall1Body = new CANNON.Body(WallProp1);
const Wall2Body = new CANNON.Body(WallProp2);
// Add the walls to the physics world
world.addBody(Wall1Body);
world.addBody(Wall2Body);
// Create a death plane and settings its shape and properties
const deathProps = {
mass: 0,
position: new CANNON.Vec3(0, -0.35, 0),
shape: new CANNON.Plane(),
}
const DeathBody = new CANNON.Body(deathProps);
// Set the rotation correctly
DeathBody.quaternion.setFromAxisAngle(xAxis, angle);
// Add the deathplane to the physics world
world.addBody(DeathBody);
// Add new settings to the materials
var mat1_ground = new CANNON.ContactMaterial(HeadMaterial, mat1, { friction: 0.0, restitution: 1});
var mat1_wall = new CANNON.ContactMaterial(WallMaterial, mat1, { friction: 0.0, restitution: 0.65});
world.addContactMaterial(mat1_ground);
world.addContactMaterial(mat1_wall);
// Configure time step for Cannon
const fixedTimeStep = 1.0 / 60.0;
const maxSubSteps = 3;
const timeInterval = 30;
let lastTime;
var score = 0;
//checks for collision with the head hitbox
HeadBody.addEventListener("collide",function(e){
if(playing)
{
score++;
Diagnostics.log("Bounced amount / Score = " + score);
}
else
{
BallBody.velocity.set(0,0,0);
BallBody.position.set(0,1,0);
}
});
//checks for collision with the death plane
DeathBody.addEventListener("collide",function(e){
if(playing)
{
EndGame(score);
}
else
{
BallBody.velocity.set(0,0,0);
BallBody.position.set(0,1,0);
}
});
this might work:
step 1 : create a variable colliding and set it to false
step 2 : set colliding to false every frame
step 3 : in headBody event listener, if it collides with the ball, set colliding to true
step 4 : in deathPlane, check if it collides with the ball and if colliding is true, it means the ball touches both
(you might have to switch, i.e. setting colliding in deathPlane and checking it in headBody)
That's how i used to check if 2 specific items collide, and that check involved checking 2 collisions at same frame + some other verifications.
If you don't know how to check if a body collides with the ball, the condition is event.body.id == ballBody.id with ballBody being the body of the ball use for physical calculations.

Attract one set of bodies to another set of bodies, but not to other bodies in the same set

I'm looking to implement a planet-like system, whereby you have large, mostly-fixed objects which have lots of smaller objects around them. The idea is that the smaller objects could orbit around the larger objects, but also change which of the larger objects they're orbiting around. I'm attempting to use the newtonian behaviour type on the smaller objects, but it's making those smaller objects try to orbit around everything, rather than just the large objects. Is there a way to define a behaviour on a set of objects (I have them in an array) and have them only be attracted to objects in another set of objects?
To give you a visual idea, this is how it currently looks
(https://i.imgur.com/DqizSFO.png):
I want the orange objects to orbit the blue objects, but I don't want the orange or blue objects to be attracted to other orange or blue objects respectively. Is this possible with PhysicsJS?
Here is my current progress.
To view the script in action, view it on CodePen):
var magnets = []
var particles = []
function rand (from, to) {
return Math.floor(Math.random() * to) + from
}
function generateMagnet (world, renderer) {
var magnet = Physics.body('circle', {
x: rand(0, renderer.width),
y: rand(0, renderer.height),
radius: 50,
mass: 2,
vx: 0.001,
vy: 0.001,
treatment: 'static',
styles: {
fillStyle: '#6c71c4'
}
})
magnets.push(magnet)
world.add(magnet)
}
function generateParticle (world, renderer) {
var particle = Physics.body('circle', {
x: rand(0, renderer.width),
y: rand(0, renderer.height),
vx: rand(-100, 100) / 1000,
vy: rand(-100, 100) / 1000,
mass: 1,
radius: 5,
styles: {
fillStyle: '#cb4b16'
}
})
particles.push(particle)
world.add(particle)
}
Physics(function (world) {
// bounds of the window
var el = document.getElementById('matterContainer')
var viewportBounds = Physics.aabb(0, 0, el.scrollWidth, el.scrollHeight)
var edgeBounce
var renderer
// create a renderer
renderer = Physics.renderer('canvas', {
'el': el
})
el.childNodes[0].style.left = 0
// add the renderer
world.add(renderer)
// render on each step
world.on('step', function () {
world.render()
})
// varrain objects to these bounds
edgeBounce = Physics.behavior('edge-collision-detection', {
aabb: viewportBounds,
restitution: 0.99,
cof: 0.8
})
// resize events
window.addEventListener('resize', function () {
// as of 0.7.0 the renderer will auto resize... so we just take the values from the renderer
viewportBounds = Physics.aabb(0, 0, renderer.width, renderer.height)
// update the boundaries
edgeBounce.setAABB(viewportBounds)
}, true)
for (var i = 0; i < 5; i++) generateMagnet(world, renderer)
for (var i = 0; i < 100; i++) generateParticle(world, renderer)
// add things to the world
var newton = Physics.behavior('newtonian', { strength: 0.05 })
// newton.applyTo(particles)
var attractor = Physics.behavior('attractor', {
order: 0,
strength: 0.1
}).applyTo(magnets);
world.add([
Physics.behavior('body-impulse-response'),
newton,
edgeBounce
])
// subscribe to ticker to advance the simulation
Physics.util.ticker.on(function (time) {
world.step(time)
})
})
<script src="http://wellcaffeinated.net/PhysicsJS/assets/scripts/vendor/physicsjs-current/physicsjs-full.min.js"></script>
<section id="matterContainer" class="sct-HomepageHero pt-0 pb-0"></section>
<style>
.sct-HomepageHero.pt-0.pb-0 {
min-height: 600px;
padding-top: 0;
padding-bottom: 0;
}
</style>

Categories

Resources