Improving rudimentary AI of Angular based chess game - javascript

I have created a chess game with Angular and chess.js and am trying to improve its rudimentary AI. The un-improved code currently lives at: https://gist.github.com/dexygen/8a19eba3c58fa6a9d0ff (or https://gist.githubusercontent.com/dexygen/8a19eba3c58fa6a9d0ff/raw/d8ee960cde7d30850c0f00f511619651396f5215/ng-chess)
What the AI currently consists of is checking whether the computer (black) has a move that checkmates (using chess.js' in_checkmate() method), and if so, mating the human (white), otherwise making a random move. To improve this I thought that instead of merely making a random move, I would have the AI check for white's counters to black's responses. Then, if White has checkmate, not including those black responses in the moves to randomly select from.
I would like to improve the AI within makeMove() (which currently merely delegates to makeRandomMove()) but I am finding this to be harder than expected. What I expected to be able to do was, not unlike mateNextMove() (refer to lines 155-168 of the gist), to check for in_checkmate() within a loop, except the loop will be nested to account for black responses and white counters to those responses.
Here is my first attempt at what I expected would work but it does not avoid checkmate when possible.
function makeMove(responses) {
var evaluator = new Chess();
var response;
var allowsMate;
var counters = [];
var candidates = [];
for (var i=0, n=responses.length; i<n; i++) {
response = responses[i];
allowsMate = false;
evaluator.load(chess.fen());
evaluator.move(response);
counters = evaluator.moves();
//console.log(evaluator.ascii());
//console.log(counters);
for (var j=0, k=counters.length; j<k; j++) {
evaluator.move(counters[j]);
if (evaluator.in_checkmate()) {
//console.log('in_checkmate');
allowsMate = true;
break;
}
}
if (!allowsMate) {
candidates.push(response);
}
}
return makeRandomMove(candidates);
}
In order to debug/test taking advantage of a little knowledge helps, specifically attempting an early "Scholar's Mate", see: http://en.wikipedia.org/wiki/Scholar%27s_mate. If Black's random moves make this impractical just start over, the opportunity presents itself as often as not. Qxf7# is the notation for the mating move of Scholars mate both in the wikipedia article and also as returned by chess.moves(). So I've tried to modify the inner for loop as follows:
for (var j=0, k=counters.length; j<k; j++) {
evaluator.move(counters[j]);
if (counters[j] == 'Qxf7#') {
console.log(evaluator.in_checkmate());
}
}
But I've had this return false and allow me to deliver the mate. What am I doing wrong (and who possibly wants to help me on this project)?

It seems to me from the code you posted that you are not undoing the moves you make. When you loop through all possible moves, you make that move, then check for a threat. You should then unmake the move. That is probably why your last test didn't work as well.

Related

A few questions on prototyping NEAT in JavaScript

I've recently read the original paper about NeuroEvolution
of Augmenting Topologies by Kenneth O. Stanley and am now trying to prototype it myself in JavaScript. I stumbled across a few questions I can't answer.
My questions:
What is the definition of "structural innovation", and how do I store these so I can check if an innovation has already happened before?
However,
by keeping a list of the innovations that occurred in the current generation, it
is possible to ensure that when the same structure arises more than once through independent
mutations in the same generation, each identical mutation is assigned the
same innovation number
Is there a reason for storing the type of a node (input, hidden, output)?
In the original paper, only connections have an innovation number, but in other sources, nodes do as well. Is this necessary for crossover? (This has already been asked here.)
How could I limit the mutation functions to not add recurrent connections?
I think that's it for now. All help is appreciated.
The relevant parts of my code:
Genome
class Genome {
constructor(inputs, outputs) {
this.inputs = inputs;
this.outputs = outputs;
this.nodes = [];
this.connections = [];
for (let i = 0; i < inputs + outputs; i++) {
this.nodes.push(new Node());
}
for (let i = 0; i < inputs; i++) {
for (let o = 0; o < outputs; o++) {
let c = new Connection(this.nodes[i], this.nodes[inputs + o], outputs * i + o);
this.connections.push(c);
}
}
innovation = inputs * outputs;
}
weightMutatePerturb() {
let w = this.connections[Math.floor(random(this.connections.length))].weight;
w += random(-0.5, 0.5);
}
weightMutateCreate() {
this.connections[Math.floor(random(this.connections.length))].weight = random(-2, 2);
}
connectionMutate() {
let i = this.nodes[Math.floor(random(this.nodes.length))];
let o = this.nodes[Math.floor(random(this.inputs, this.nodes.length))];
let c = Connection.exists(this.connections, i, o);
if (c) {
c.enabled = true;
} else {
this.connections.push(new Connection(i, o, innovation));
innovation++;
}
}
nodeMutate() {
let oldCon = this.connections[Math.floor(Math.random(this.connections.length))];
oldCon.enabled = false;
let newNode = new Node();
this.nodes.push(newNode);
this.connections.push(new Connection(oldCon.input, newNode, innovation, 1));
innovation++;
this.connections.push(new Connection(newNode, oldCon.output, innovation, oldCon.weight));
innovation++;
}
}
Node
class Node {
constructor() {
this.value = 0;
this.previousValue = 0;
}
}
Connection
class Connection {
constructor(input, output, innov, weight) {
this.input = input;
this.output = output;
this.innov = innov;
this.weight = weight ? weight : random(-2, 2);
this.enabled = true;
}
static exists(connections, i, o) {
for (let c = 0; c < connections.length; c++) {
if (connections[c].input === i && connections[c].output === o) {
return connections[c];
}
}
return false;
}
}
All answers an sources are welcome. (You are an awesome person!)
First, I would very strongly advice against implementing NEAT yourself. If you take a look at the (many) available implementations, it is quite a large project!
A structural innovation is any new node or connection that is added to a genome and that has not been seen before. Imagine you have input nodes 1, 2, 3 and output nodes 4, 5. If only connection 2-4 is available, introducing connection 3-4 would be an structural innovation. To check for novelty you need to store all seen structures (i.e., a list of all connections and nodes) with a unique ID for each (this is the core idea behind NEAT, actually!). In our example, connection 2-4 may take ID=1, and connection 3-4 would take ID=2. You can see the connection is new in that no other connection in the list connects 2 and 4. Nodes are normally introduced by creating "a stop" in a connection and simply take the next available ID. For example, connection 2-4 would be deleted and you would have connections 2-5 and 5-4, where node ID=5 is created in the process (as well as two new connections). Note the IDs for nodes and connections may be independent (that is: if you use IDs for connections at all).
I'm struggling to think of a hard requirement for this. In principle you could simply store nodes in fixed order (input first, output next, then hidden) and then guess their type given their index, which is how you normally do it anyway for performance reasons (imagine trying to remove a node, you would only want to select a hidden node, so you would restrict search to those indices). Some tasks may be more efficient having that info, though, for example checking for recurrent connections (see 4).
IDs are useful in crossover, as they allow to quickly know which elements are common between two genomes. Whether to have IDs for nodes as well as connections is an open implementation decision. No IDs for connections makes simpler code (connections are identified by the IDs of the nodes they connect). But you lose the ability to tell apart two connections that connect the same nodes. There is an argument that says that a connection between two given nodes does not necessarily mean the same at different times in evolution (see how your quote mentions "in the same generation"). This is probably not a relevant factor, though! As I said, the convenience for IDs for both nodes and connections is still debated in the NEAT community.
In many cases you do not want to allow recurrent connections. The standard way to do this is to check for recurrence every time you try to add a connection. This is a costly step, yes!
If you have more doubts, I recommend you take a look at this implementation by Colin Green for reference. If he is not the person who knows more about NEAT implementation, he comes close.
This is not the average JS question! Thanks for the links, it's a really interesting paper. I can't claim to be an expert, I have only done toy GA problems, but I did read this paper and related ones. Here is what I understand:
I think all you need to worry about is whether a parent, by mutation, produces the same novel gene more than once in a generation. That is, two children, whose gene with the newest innovation number are identical. You can cull those right away. I think they say that it is possible for the same gene to appear in two species at the same time, and they basically say that's fine, that's rare enough not to worry about.
I can find at least one reason: "In NEAT, a bias is a node that can connect to any node other than inputs."
I believe your question is "must nodes have an innovation number to do crossover?" The answer is no. In the original paper (e.g. Figure 4) they show crossover implemented in a way where only connections have innovation numbers.
If you want to change the mutation function to be architecture aware, rather than avoiding recurrent structure, you might want to explicitly add structures you do want. Suppose you want to avoid recurrent connections because you are evolving an image classifier, and you know that convolutions are more suited to the task. In this case, you want your mutation function to be able to add/remove layers (and the needed connections). This was explored in detail last year by Google Brain:
Some of the mutations acting on this DNA are reminiscent of NEAT. However, instead of single nodes, one mutation can insert whole layers—i.e. tens to hundreds of nodes at a time. We also allow for these layers to be removed, so that the evolutionary process can simplify an architecture in addition to complexifying it.
Based on your comment about your motivation for question 4, I think you are mistaken. In the XOR example in the original paper, figure 5, they show a starting phenotype that involves no hidden layer. This starting phenotype is not a solution to the XOR problem, but it provides a good starting point: "NEAT is very consistent in finding a solution. It did not fail once in 100 simulations." That is without any penalization for recurrence.

Is javascript not fast enough for doing fluid simulation?

I am currently trying to implement a small fluid simulation on P5js. I tried to render 20K squares with a random colour. I got a frame rate of 2.xxx.
var sim;
var xdim = 200; var xLength;
var ydim = 100; var yLength;
function setup() {
createCanvas(800,400);
sim = new Sim(xdim, ydim);
}
function draw() {
xLength = width/xdim;
yLength = height/ydim;
for (var i = 0; i < xdim; ++i) for (var j = 0; j < ydim; ++j) {
fill(100);
rect(i*xLength, j*yLength, xLength, yLength);
}
console.log(frameRate());
}
What is the problem behind? Is the library not good enough? Or, the poor configuration of my computer? Or, javascript is not suitable for these kinds of implementation?
We can't help debug your code without an MCVE. Specifically, you haven't provided the Sim class, so we can't run your code at all.
But you need to take a step back and ask yourself this: what performance do you expect? You can't really complain about performance if you didn't have any expectations going in.
Also, you might want to figure out how many squares you can display before seeing a performance hit.
From there it's a game of looking for optimizations. You're going to have to do some profiling to understand exactly where your performance hit is. Maybe you display fewer squares, or maybe you lower the framerate, or maybe you do some pre-rendering. Again, what you do depends on exactly what your expectations and goals are.
I will say that you should take that call to console.log() out of your draw() loop. You should only use that for debugging, and it's not going to improve your performance to call that every single frame.

Best way to share out many points on a few features?

I have 5000+ LatLng points, and for each of them I would like to find out which feature (region) they belong to. The features come from a kmz layer by Philippe Ivaldi, converted to GeoJSON.
Currently, I am doing this with turfjs in a double for loop. As expected, the calculation freezes the browser for ten minutes, which ain't very convenient.
Here's my code :
function countCeaByLayer(geoJsonLayer){
jQuery.getJSON('http://localhost/server/retrieveData.php', function(data){
var turfPoints = [];
for(var i = 0; i < data.length; i++){
turfPoints.push(turf.point([data[i].longitudeWGS84, data[i].latitudeWGS84]));
}
var features = geoJsonLayer.toGeoJSON().features;
for(var i = 0; i < features.length; i++){
var turfPointsNew = [];
for(var j = 0; j < turfPoints.length; j++){
var isInside = turf.inside(turfPoints[j], features[i]);
if(!isInside) turfPointsNew.push(turfPoints[j]);
}
turfPoints = turfPointsNew;
}
console.log("done");
});
}
What can I do to avoid freezing the browser?
Make it async?
Do the calculation with node and turfjs on a server?
Or deploy leafletjs on a server with node and leaflet-headless?
...or should I just deal with it?
Thanks!
To optimize your code, you should do something like this.
Loop over the points.
For each point, when you iterate over polygons to know if the point is inside one of them, first get the polygon Bounds and see if the point is within the bounds.
If not, you can skip going further and go to the next polygons.
If it's within the bounds, go for a plain check if it is inside the polygon itself.
If it's the case, break the loop iterating over polygons and switch to the next point.
For example, it could be:
points.forEach(function(point) {
polygons.some(function(polygon) {
if (polygon.getBounds().contains(point)) { // or other method if you are not playing with Leaflet features
if (turf.isInside(polygon, point) { // for example, not sure this method actually exists but you get the concept
// point is within the polygon, do tuff
return true; // break the some loop
}
}
});
});
I've myself developped something that exactly does the same thing also based on turf, I run it on the client side (and my loops are made with .some, not classical for loop, so it could even go further in terms of performance) and I never experienced freeze.
From my point of view, 5000 points is peanut for browser to handle, but if your polygons are really complex (dozen of hundreds of thousands of vertices), this can slow down the process of course.
Br,
Vincent
If Stranded Kid's answer is overkill for you,
geoJsonLayer.eachLayer(function(layer){
var within = turf.within(turf.featureCollection(turfPoints),turf.featureCollection([layer.toGeoJSON()]));
console.dir(within);
});
And make sure your coordinates are floats and not strings, because that's what caused the slowdown for me.

Why is my Conway's Game of Life acting strangely?

I am trying to replicate Conway's Game of Life and though my code seems to be kosher it appears not to behave like it should.
This is the basic meat of it:
if (pressingSpace) {
running = true;
} else {
running = false;
};
if (running) {
for (var i=0; i<tiles.length; i++) {
var tile = tiles[i];
if (tile.alive && (tile.neighborsAlive() < 2 || tile.neighborsAlive() > 3)) {
tile.die();
};
if (!tile.alive && tile.neighborsAlive() == 3) {
tile.comeToLife();
};
};
};
for (var i=0; i<tiles.length; i++) {
var key = tiles[i];
if (!key.alive && mouseDown && key.sprite.contains([cursorX,cursorY]) && cursorX != null) {
key.comeToLife();
}
};
All the functions in play have been thoroughly tested and seem to work as expected. But when this is run, the "alive" squares seem to overrun the screen much more easily than they should. In particular when a column is made three tiles high, it vanishes on the next frame when it should, per the rules, produce two "alive" tiles on either side at the same time.
I suspect this has something to do with the order of operations. Do I need to "mark" appropriate tiles for a state change, instead of change them on the spot? I know there are a lot of implementations of Conway out there but I'm trying to develop this on my own-ish. That said, any nudges in the right direction would be greatly appreciated. Thanks!
You can see it in action here: http://www.eggborne.com/tiles
As far as I can see, it's because of you change tilemap while iterating over it.
E.g. I assume .comeToLife() method changes .alive field to true, and if .neighborsAlive() return non-cached value, but calculates .alive tiles around, you are essentially changing your playfield while iterating, and newly changed cells ruin the whole picture.
Easiest solution will be to create 'old' and 'new' tilemaps, and iterate over 'old' one, while bringing changes to 'new' only. Caching 'neighborsAlive' is essentially creating two arrays, just in different way — that way you're also creating a tilemap, with each tile holding a value of how many neighbors are alive at this moment — and you have to determine this value before everything else changes. If you don't, you will have the same issue, as you currently have.
For demonstration of your problem, make your playfield update with each tile change — you will see your problem animated :)
Hope this helps your issue.

Javascript animation with recursion, strange behavior

Im trying to do code in javascript a ruzzle solver. For now it just dig through the maze and find every possible path ( in the future I will match them against a dictionary to find the real valid words in it)
You can see it here : http://178.239.177.105/ruzzle/
I wanted to do it with an animation that show how the algorithm works on it, but im issuing a problem.
If you load it, the page just dont show anything, and my browser crash after a while.
BUT...
if you set an alert("") function, somewhere in the middle of the recursion function, you would be able to go through any step in the algorithm.
Especially if you set the browser to prevent to show any further alert messages, you'll finally see the animation working on the maze.
I was actually trying to do this via setInterval(), but is not working.
So I have two questions:
- Why do the script cause the page to crash, or not if there's an alert?
- How can I properly show the animation using some kind on wait() mechanism?
Thanks
You can see all the code by going on the page and look at the source code, however for the sake of clarity I'll paste the relevant code here:
You can also play with the code here : http://jsfiddle.net/Gcw2U/
(you will have to uncomment the last line in the to make it run)
//this matrix of chars rapresent the 4x4 puzzle
var ruzle_model = [["w","a","l","k"],["m","o","o","n"],["h","a","t","e"],["r","o","p","e"]];
// ""offsets" rapresent the four motion vector(up,down,left,right)
// used to visit the matrix
var offsets = [[1,0],[0,1],[-1,0],[0,-1]];
//recursive function to dig the maze
function path(m,i,j,paths,checkeds){
alert("SET BROWSER TO AVOID NEXT ALERTS MSGs!");
//base case, if not hitting a wall or already checked cell
if ( ! (i<=3 && i>=0 && j>=0 && j<=3) || isChecked(checkeds,i,j)){
terminal.innerHTML = terminal.innerHTML + "-"+ paths;
uncheckAllCells();
return paths;
}
//call path for every direction (up,down,left,right) stored in offsets
var tmp = [];
for (var c=0; c<offsets.length;++c){
var offset = offsets[c];
checkCells(i,j);
checkeds.push(new Array(i,j));
tmp.push(path(m,i+offset[0],j+offset[1],paths + m[i][j],copy(checkeds)));
}
return tmp;
}
//call path on every cell in the maze
function ruzzle(r){
var sol = []
for(var i=0; i<4; ++i){
for(var j=0; j<4; ++j){
var checkeds = new Array();
sol.push(path(r,i,j,'',checkeds));
}
}
terminal.innerHTML = sol;
return sol;
}
Javascript loops and recursions inhibit rendering of the page, so any changes made will stay invisible until the script stops executing, like when you spawn an alert. When a user sets "do not show alert messages", the alert still yields execution time to the underlying eventloop, which will update the page.
For as-fast-as-possible (high fps) animations, use requestAnimationFrame().
In your case, setTimeout() is the best way to go. Set a timeout on the recursive call to path.
function recursive(args) {
// do stuff to args
setTimeout(function () {
recursive(args);
}, 5);
}
Example

Categories

Resources