Running an animated loop in PIXI.JS - javascript

I am trying to create a bottle pouring animation that loops through 5 different bottles, each bottle is sideways and has a different amount of liquid pouring out. I am trying to display the first bottle and then the second after 60ms, then the 3rd after 60ms, and so on and so on. I need to remove the previous bottle and add the next bottle in the exact same place. I am wondering what the most concise way to do with would be, I have tried with several setTimout functions but the code has some bugs and is not concise at all. I have researched PIXI.Timer but am struggling to understand how to set 5 different sprites and loop through them. Let me know if you have any ideas or direction. I will post my function with setTimout used below:
setTimeout(() => {
let pour1Texture = new PIXI.Texture.from(require('#/assets/items/bottle/pouring/pouring bottle1.png'))
let pour1 = new PIXI.Sprite.from(pour1Texture)
sprites.push(pour1)
pour1.position.x = 438;
pour1.position.y = -40;
labBenchComponent.pixiApp.stage.addChild(
pour1
);
},1000)
setTimeout(() => {
labBenchComponent.pixiApp.stage.removeChild(sprites.pop())
const pour2Texture = new PIXI.Texture.from(require('#/assets/items/bottle/pouring/pouring bottle2.png'))
const pour2 = new PIXI.Sprite.from(pour2Texture)
pour2.position.x = 438;
pour2.position.y = -10;
sprites.push(pour2)
labBenchComponent.pixiApp.stage.addChild(
pour2
);
}, 1000)
setTimeout(() => {
labBenchComponent.pixiApp.stage.removeChild(sprites.pop())
const pour3Texture = new PIXI.Texture.from(require('#/assets/items/bottle/pouring/pouring bottle2.png'))
const pour3 = new PIXI.Sprite.from(pour3Texture)
pour3.position.x = 438;
pour3.position.y = 10;
sprites.push(pour3)
labBenchComponent.pixiApp.stage.addChild(
pour3
);
}, 1000)

I figured it out. Instead of using a tick use PIXI.AnimatedSprite like the following:
import * as PIXI from 'pixi.js-legacy';
export default function pourBottle() {
let textureArray = [];
for (let i = 0; i < 5; i++)
{
let texture = {
texture: PIXI.Texture.from(require(`#/assets/items/bottle/pouring/pouring bottle${i+1}.png`)),
time: 100,
};
textureArray.push(texture);
};
let animatedSprite = new PIXI.AnimatedSprite(textureArray);
return animatedSprite;
}

Related

How to create sliding window on real time data with cutom overlap size

I was trying to create sliding window on a real time signal.
For now, I have just simulated real time data getting pushed into a signal array. I am trying to capture a part of the real time signal using sliding window method in a buffer.
The buffer part is in red, and signal is in blue.
Here is the visualization in P5JS Sketch Link
Just to implement the code and visualize the output, I have used p5js. Can anyone please check my code and tell me if I am doing it correctly?
Here is my code --
let sampleLength = 0
let signal = []
let buffer = []
let windowSize = 50
let overlappingSize = 20
// let toggle = false
let t = 0
let pos = 0
function setup() {
createCanvas(500, 400)
sampleLength = width
// create buffer filled with zero
windowSize = floor(sampleLength/2)
for(let i=0;i<windowSize;i++){
buffer.push(100)
}
// setInterval(()=>{
// toggle=!toggle
// },100);
}
function draw() {
background(240);
translate(0,height>>1)
drawWave(buffer,0.5,'red')
drawWave(signal,1,'blue')
generateSignal()
if(pos>(windowSize-overlappingSize)){
buffer = signal.slice(0,windowSize)
pos= 0
}else{
pos++
}
}
function generateSignal(){
if(signal.length<sampleLength){
signal.push(100*sin(2*radians(t)))
t++;
}else{
signal.shift()
}
}
function drawWave(arr,scale,color){
stroke(color);
noFill()
beginShape()
for(let i=0;i<arr.length-1;i++){
vertex(i,arr[i]*scale)
}
endShape()
}
Edit: New example has been added -
As an example without visualization, it seems what below code does is correct.
const arr = [1,2,3,4,5,6,7,8]
const bufferSize = 8
var buffr = []
const overlap = 2
const slideStep = bufferSize-overlap
let i = 0
while(i<32){
buffr = arr.slice(0+i*slideStep,bufferSize+i*slideStep);
console.log(i, buffr);
pushNewValue()
i++
}
function pushNewValue(){
for(let k=0;k<8;k++){
arr.push(arr[arr.length-1]+k)
}
}
Edit 2: Update the code for sliding window and created a class which is applied in a example. Please check the link. Let me know if that is a correct code.
function DSP( windowSize, overlap){
this.arr = Array(windowSize)
.fill()
.map(() => 0);
this.windowSize = windowSize
this.slideStep = windowSize-overlap
}
DSP.prototype.slideWindow = function(currVal, callback){
if (this.arr.length == this.windowSize) {
callback(this.arr.slice())
let del = this.slideStep;
while (del--) {
this.arr.shift();
}
}else{
this.arr.push(currVal)
}
}

All mesh instances reset to (0,0,0) before they lerp to new position when count changes

I am currently working on my graduation project in which mesh instances are re positioned over time. Besides the positions of the instances, the count of the mesh instances can also change over time.
Based on the following code examples, I managed to build this functionality.
https://jsfiddle.net/ew1tyz63/2/
https://threejs.org/examples/?q=dynami#webgl_instancing_dynamic
However, the problem appears when I want to lerp the positions of the instances. The position of all instances is resets to (0,0,0) when the count of the mesh instances changes.
I've created a codesandbox that reproduces this. The code has been forked from https://codesandbox.io/s/x8ric by James Wesc and tweaked a bit to clarify the issue.
My problem appears when you change the count of the instances by dragging the slider. The position of all instances is resets to (0,0,0).
Is there a way to stop the reset and only update the new instances when the count changes?
This is a link to the code sandbox.
https://codesandbox.io/s/instanced-mesh-lerping-positions-forked-d03ckr?file=/src/App.tsx
I added a snippet to the code as well!
Thanks in advance!!
const tempObject = new Object3D();
const tempMatrix = new Matrix4();
const tempVector = new Vector3();
const tempVector2 = new Vector3();
type XYZ = [number, number, number];
const data = dataJSON as Array<{ p1: XYZ; p2: XYZ }>;
const pos = new Vector3(10, 1, 1);
const YourCanvas = withControls(Canvas);
const Boxes: React.FC = () => {
const count = useControl("count", {
type: "number",
value: 1000,
min: 100,
max: 1000,
distance: 0.1
});
const ref = useRef<InstancedMesh>(null!);
React.useEffect(() => {
if (ref.current) {
ref.current.instanceMatrix.setUsage(THREE.DynamicDrawUsage);
}
}, []);
useFrame(({ clock: { elapsedTime } }) => {
const t = Math.floor(elapsedTime / 5) % 2;
for (let i = 0; i < count; i++) {
ref.current.getMatrixAt(i, tempMatrix);
tempVector.setFromMatrixPosition(tempMatrix);
const toPosition = t ? data[i].p1 : data[i].p2;
// Resets positions of all instances when count changes
// tempVector2.set(toPosition[0], toPosition[1], toPosition[2])
// tempObject.position.lerpVectors(tempVector, tempVector2, 0.01)
// Only updating positions of new instances when count changes
tempObject.position.set(toPosition[0], toPosition[1], toPosition[2]);
tempObject.updateMatrix();
ref.current.setMatrixAt(i, tempObject.matrix);
}
ref.current.instanceMatrix.needsUpdate = true;
});
return (
<instancedMesh
ref={ref}
args={[
new THREE.BoxGeometry(1.0, 1.0, 1.0, 1.0),
new THREE.MeshStandardMaterial({ color: new THREE.Color("#00ff00") }),
count
]}
></instancedMesh>
);
};

Passing parameters into ES6 closure (for multiple P5.js sketches)

I am trying to make a 'generic' P5.js sketch that I can tweak based on a passed-in parameter, with the intent being to be able to generate multiple sketches on a single page to show how different inputs work side-by-side.
Following the guide I see syntax like this (and I've extended it to fill in multiple divs:
const s = ( sketch ) => {
let x = 100;
let y = 100;
sketch.setup = () => {
sketch.createCanvas(500, 500);
console.log(idx);
};
sketch.draw = () => {
sketch.background(100);
sketch.fill(255);
sketch.rect(x,y,50,50);
sketch.text
};
};
let myp5_1 = new p5(s, document.getElementById('p5-sketch1'));
let myp5_2 = new p5(s, document.getElementById('p5-sketch2'));
let myp5_3 = new p5(s, document.getElementById('p5-sketch3'));
I am not great with ES6, but I'm struggling with passing a set of parameters in to be able to tweak the P5.js code.
What I would like to do is to pass in, say, an ID variable into each instance of s and have the sketch execute differently, rather than making three separate const s calls and duplicating data.
Create a function that takes idx and returns the original function.
const s = (idx) => ( sketch ) => {
let x = 100;
let y = 100;
sketch.setup = () => {
sketch.createCanvas(500, 500);
console.log(idx);
};
sketch.draw = () => {
sketch.background(100);
sketch.fill(255);
sketch.rect(x,y,50,50);
sketch.text
};
};
let myp5_1 = new p5(s(0), document.getElementById('p5-sketch1'));
let myp5_2 = new p5(s(1), document.getElementById('p5-sketch2'));
let myp5_3 = new p5(s(2), document.getElementById('p5-sketch3'))

Dynamically create any number of sequentially firing functions

(JavaScript) I have a function that deals player cards in a nice sequential fashion: Player, Dealer, Player, Dealer. Below is that part of the function which sequentially moves cards to the viewport.
setTimeout(()=>{
player1.style.top = `70%`;
player1.style.left = `30%`;
player1.style.transform = `rotate(${Math.floor(Math.random() * rotationMax)}deg)`;
setTimeout(() => {
dealer1.style.top = `8%`;
dealer1.style.left = `30%`
dealer1.style.transform = `rotate(${Math.floor(Math.random() * rotationMax)+1}deg)`;
setTimeout(() => {
player2.style.top = `70%`;
player2.style.left = `50%`
player2.style.transform = `rotate(${Math.floor(Math.random() * rotationMax)}deg)`;
setTimeout(() => {
flippedCard.style.top = '8%';
flippedCard.style.left = '44%';
}, 200)}, 200)}, 100)}, 200)
You can see that this block works only with a set number of cards (in this case 4). I am not yet good enough in Javascript to create function that would dynamically generate any number of cards to be dealt.
Can someone point me in the right direction? Specific question: how do you dynamically generate tasks that run one after another.
Make an array of dealer cards and player cards, and figure out the differences in the left you want for each. Then iterate over the arrays, delaying with await to make the code flat and readable:
const delay200 = () => new Promise(res => setTimeout(res, 200);
const playerCards = [player1, player2];
const dealerCards = [dealer1, dealer2];
const playerLeftIncrement = 20; // eg: 30%, then 50%, then 70%; adjust as needed
const dealerLeftIncrement = 14; // eg: 30%, then 44%, then 58%; adjust as needed
const applyStyle = (card, left) => {
Object.assign(
card.style,
{
top: '70%',
left,
transform: `rotate(${Math.floor(Math.random() * rotationMax)}deg)`,
}
);
};
for (let i = 0; i < playerCards.length; i++) {
applyStyle(playerCards[i], `${30 + i * playerLeftIncrement}%`);
await delay200();
applyStyle(dealerCards[i], `${30 + i * dealerLeftIncrement}%`);
await delay200();
}
It would be useful to have a function that looks something like:
callFunctionsWithDelays(functions, delays)
That would avoid the nested look to your code, and make it easy to dynamically generate. I'd write this using async/await syntax:
async function callFunctionsWithDelays(functions, delays) {
for (i = 0; i < functions.length; i++) {
functions[i].call()
await new Promise(resolve, setTimeout(resolve, delays[i]))
}
}

Computing the gradient of the loss using Tensorflow.js

I am trying to compute the gradient of a loss, with relation to a network's trainable weights using Tensorflow.js in order to apply these gradients to my network's weight. In python, this is easily done using the tf.gradients() functions, which takes two minimum inputs representing dx and dy.
However, I am not able to reproduce the behavior in Tensorflow.js. I am not sure wether my understanding of the gradient of the loss w.r.t the weights is wrong, or if my code contains mistakes.
I have spent some time analysing the core code of the tfjs-node package to understand how it is done when we call the function tf.model.fit(), but with little success so far.
let model = build_model(); //Two stacked dense layers followed by two parallel dense layers for the output
let loss = compute_loss(...); //This function returns a tf.Tensor of shape [1] containing the mean loss for the batch.
const f = () => loss;
const grad = tf.variableGrads(f);
grad(model.getWeights());
The model.getWeights() function returns an array of tf.variable(), so I assumed the function would compute dL/dW for each layer, which I could apply later to my network's weights, however, that's not quite the case as I get this error :
Error: Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.
I don't quite understand what does this error means.
How am I supposed to compute the gradient (analog to tf.gradients() in Python) of the loss using Tensorflow.js then ?
Edit :
This is the function computing the loss :
function compute_loss(done, new_state, memory, agent, gamma=0.99) {
let reward_sum = 0.;
if(done) {
reward_sum = 0.;
} else {
reward_sum = agent.call(tf.oneHot(new_state, 12).reshape([1, 9, 12]))
.values.flatten().get(0);
}
let discounted_rewards = [];
let memory_reward_rev = memory.rewards;
for(let reward of memory_reward_rev.reverse()) {
reward_sum = reward + gamma * reward_sum;
discounted_rewards.push(reward_sum);
}
discounted_rewards.reverse();
let onehot_states = [];
for(let state of memory.states) {
onehot_states.push(tf.oneHot(state, 12));
}
let init_onehot = onehot_states[0];
for(let i=1; i<onehot_states.length;i++) {
init_onehot = init_onehot.concat(onehot_states[i]);
}
let log_val = agent.call(
init_onehot.reshape([memory.states.length, 9, 12])
);
let disc_reward_tensor = tf.tensor(discounted_rewards);
let advantage = disc_reward_tensor.reshapeAs(log_val.values).sub(log_val.values);
let value_loss = advantage.square();
log_val.values.print();
let policy = tf.softmax(log_val.logits);
let logits_cpy = log_val.logits.clone();
let entropy = policy.mul(logits_cpy.mul(tf.scalar(-1)));
entropy = entropy.sum();
let memory_actions = [];
for(let i=0; i< memory.actions.length; i++) {
memory_actions.push(new Array(2000).fill(0));
memory_actions[i][memory.actions[i]] = 1;
}
memory_actions = tf.tensor(memory_actions);
let policy_loss = tf.losses.softmaxCrossEntropy(memory_actions.reshape([memory.actions.length, 2000]), log_val.logits);
let value_loss_copy = value_loss.clone();
let entropy_mul = (entropy.mul(tf.scalar(0.01))).mul(tf.scalar(-1));
let total_loss_1 = value_loss_copy.mul(tf.scalar(0.5, dtype='float32'));
let total_loss_2 = total_loss_1.add(policy_loss);
let total_loss = total_loss_2.add(entropy_mul);
total_loss.print();
return total_loss.mean();
}
EDIT 2:
I managed to use the compute_loss as the loss function specified on model.compile(). But then, it is required that it takes only two inputs (predictions, labels), so it's not working out for me, as I want to input multiple parameters.
I am trully lost on the matter.
The error says it all.
Your issue has to do with tf.variableGrads. loss should be a scalar computed using all available tf tensors operators. loss should not return a tensor as indicated in your question.
Here is an example of what loss should be:
const a = tf.variable(tf.tensor1d([3, 4]));
const b = tf.variable(tf.tensor1d([5, 6]));
const x = tf.tensor1d([1, 2]);
const f = () => a.mul(x.square()).add(b.mul(x)).sum(); // f is a function
// df/da = x ^ 2, df/db = x
const {value, grads} = tf.variableGrads(f); // gradient of f as respect of each variable
Object.keys(grads).forEach(varName => grads[varName].print());
/!\ Notice that the gradient is calculated as respect of variables created using tf.variable
Update:
You're not computing the gradients as it should be. Here is the fix.
function compute_loss(done, new_state, memory, agent, gamma=0.99) {
const f = () => { let reward_sum = 0.;
if(done) {
reward_sum = 0.;
} else {
reward_sum = agent.call(tf.oneHot(new_state, 12).reshape([1, 9, 12]))
.values.flatten().get(0);
}
let discounted_rewards = [];
let memory_reward_rev = memory.rewards;
for(let reward of memory_reward_rev.reverse()) {
reward_sum = reward + gamma * reward_sum;
discounted_rewards.push(reward_sum);
}
discounted_rewards.reverse();
let onehot_states = [];
for(let state of memory.states) {
onehot_states.push(tf.oneHot(state, 12));
}
let init_onehot = onehot_states[0];
for(let i=1; i<onehot_states.length;i++) {
init_onehot = init_onehot.concat(onehot_states[i]);
}
let log_val = agent.call(
init_onehot.reshape([memory.states.length, 9, 12])
);
let disc_reward_tensor = tf.tensor(discounted_rewards);
let advantage = disc_reward_tensor.reshapeAs(log_val.values).sub(log_val.values);
let value_loss = advantage.square();
log_val.values.print();
let policy = tf.softmax(log_val.logits);
let logits_cpy = log_val.logits.clone();
let entropy = policy.mul(logits_cpy.mul(tf.scalar(-1)));
entropy = entropy.sum();
let memory_actions = [];
for(let i=0; i< memory.actions.length; i++) {
memory_actions.push(new Array(2000).fill(0));
memory_actions[i][memory.actions[i]] = 1;
}
memory_actions = tf.tensor(memory_actions);
let policy_loss = tf.losses.softmaxCrossEntropy(memory_actions.reshape([memory.actions.length, 2000]), log_val.logits);
let value_loss_copy = value_loss.clone();
let entropy_mul = (entropy.mul(tf.scalar(0.01))).mul(tf.scalar(-1));
let total_loss_1 = value_loss_copy.mul(tf.scalar(0.5, dtype='float32'));
let total_loss_2 = total_loss_1.add(policy_loss);
let total_loss = total_loss_2.add(entropy_mul);
total_loss.print();
return total_loss.mean().asScalar();
}
return tf.variableGrads(f);
}
Notice that you can quickly run into a memory consumption issue. It will advisable to surround the function differentiated with tf.tidy to dispose of the tensors.

Categories

Resources