Max-Heap implementation: How to make first 3 nodes always the highest? - javascript

So I've been trying to implement the Max Heap. The use I want to give to it is that, at any one time, I want the first 3 elements of the heap (that is, the root and its two children) to always be the highest in the whole heap.
I thought the heap property would guarantee this, but not a single implementation example I have come across has been able to solve the fact that sometimes, there are elements in one level of the heap that are lower than an element in a higher level.
This is the implementation I've been using, basically by using examples I have found on the internet as reference:
let createHeap = function(){
return {
arr:[],
size: 0,
getParent: function(i) { return Math.floor((i-1)/2) },
getLeft: function(i) { return (2*i + 1) },
getRight: function(i) { return (2*i + 2) },
insert: function(val){
let i = this.size
this.size++
this.arr[i] = val;
if(i!=0){
for(let j = this.getParent(i);j>=0;j--){
this.heapify(j)
}
}
},
heapify: function(i){
let largest = i;
let leftIndex = this.getLeft(i)
let rightIndex = this.getRight(i)
if (leftIndex < this.size && this.arr[leftIndex] > this.arr[largest])
largest = leftIndex
if (rightIndex < this.size && this.arr[rightIndex] > this.arr[largest])
largest = rightIndex;
if (largest != i) {
let temp = this.arr[largest];
this.arr[largest] = this.arr[i]
this.arr[i] = temp
this.heapify(largest);
}
}
}
}
Now, the problem is, when I insert the following values in this order: 1, 2, 3, 4, 5
The result I get for the heap is:
5
|\
4 2
|\
1 3
This is clear when you read what the code does, but doesn't seem to conserve heap property as far as I got to understand what heap property means. The code is missing something to get to do what I want it to do, but I don't want to implement changes that would increase the complexity too badly, so I wanted to ask:
Does anyone know if the implementation is wrong or if it was never meant to do what I want it to do? (first 3 elements of the heap are always the highest in the whole heap)
What would be the best way of implementing this without increasing the complexity too much?

Related

Finding a hamiltonian path with Javascript. How to improve efficiency?

I'm trying to solve this kata:
Given an integer N (<1000), return an array of integers 1..N where the sum of each 2 consecutive numbers is a perfect square. If that's not possible, return false.
For example, if N=15, the result should be this array: [9, 7, 2, 14, 11, 5, 4, 12, 13, 3, 6, 10, 15, 1, 8]. Below N=14, there's no answer, so the function should return false.
I thought 'how hard can this be?' and it's been long days in the rabbit hole. I've been programming for just a few months and don't have a background of CS so I'll write what I understand so far of the problem trying to use the proper concepts but please feel free to tell me if any expression is not correct.
Apparently, the problem is very similar to a known problem in graph theory called TSP. In this case, the vertices are connected if the sum of them is a perfect square. Also, I don't have to look for a cycle, just find one Hamiltonian Path, not all.
I understand that what I'm using is backtracking. I build an object that represents the graph and then try to find the path recursively. This is how I build the object:
function buildAdjacentsObject (limit) {
const potentialSquares = getPotentialSquares(limit)
const adjacents = {}
for (let i = 0; i < (limit + 1); i++) {
adjacents[i] = {}
for (let j = 0; j < potentialSquares.length; j++) {
if (potentialSquares[j] > i) {
const dif = potentialSquares[j] - i
if (dif <= limit) {
adjacents[i][dif] = 1
} else {
break
}
}
}
}
return adjacents
}
function getPotentialSquares (limit) {
const maxSum = limit * 2 - 1
let square = 4
let i = 3
const potentialSquares = []
while (square <= maxSum) {
potentialSquares.push(square)
square = i * i
i++
}
return potentialSquares
}
At first I was using a hash table with an array of adjacent nodes on each key. But when my algorithm had to delete vertices from the object, it had to look for elements in arrays several times, which took linear time every time. I made the adjacent vertices hashable and that improved my execution time. Then I look for the path with this function:
function findSquarePathInRange (limit) {
// Build the graph object
const adjacents = buildAdjacentsObject(limit)
// Deep copy the object before making any changes
const adjacentsCopy = JSON.parse(JSON.stringify(adjacents))
// Create empty path
const solution = []
// Recursively complete the path
function getSolution (currentCandidates) {
if (solution.length === limit) {
return solution
}
// Sort the candidate vertices to start with the ones with less adjacent vert
currentCandidates = currentCandidates.sort((a, b) => {
return Object.keys(adjacentsCopy[a]).length -
Object.keys(adjacentsCopy[b]).length
})
for (const candidate of currentCandidates) {
// Add the candidate to the path
solution.push(candidate)
// and delete it from the object
for (const candidateAdjacent in adjacents[candidate]) {
delete adjacentsCopy[candidateAdjacent][candidate]
}
if (getSolution(Object.keys(adjacentsCopy[candidate]))) {
return solution
}
// If not solution was found, delete the element from the path
solution.pop()
// and add it back to the object
for (const candidateAdjacent in adjacents[candidate]) {
adjacentsCopy[candidateAdjacent][candidate] = 1
}
}
return false
}
const endSolution = getSolution(
Array.from(Array(limit).keys()).slice(1)
)
// The elements of the path can't be strings
return (endSolution) ? endSolution.map(x => parseInt(x, 10)) : false
}
My solution works 'fast' but it's not fast enough. I need to pass more than 200 tests in less than 12 seconds and so far it's only passing 150. Probably both my algorithm and my usage of JS can be improved, so, my questions:
Can you see a bottleneck in the code? The sorting step should be the one taking more time but it also gets me to the solution faster. Also, I'm not sure if I'm using the best data structure for this kind of problem. I tried classic looping instead of using for..in and for..of but it didn't change the performance.
Do you see any place where I can save previous calculations to look for them later?
Regarding the last question, I read that there is a dynamic solution to the problem but everywhere I found one, it looks for minimum distance, number of paths or existence of path, not the path itself. I read this everywhere but I'm unable to apply it:
Also, a dynamic programming algorithm of Bellman, Held, and Karp can be used to solve the problem in time O(n2 2n). In this method, one determines, for each set S of vertices and each vertex v in S, whether there is a path that covers exactly the vertices in S and ends at v. For each choice of S and v, a path exists for (S,v) if and only if v has a neighbor w such that a path exists for (S − v,w), which can be looked up from already-computed information in the dynamic program.
I just can't get the idea on how to implement that if I'm not looking for all the paths. I found this implementation of a similar problem in python that uses a cache and some binary but again, I could translate it from py but I'm not sure how to apply those concepts to my algorithm.
I'm currently out of ideas so any hint of something to try would be super helpful.
EDIT 1:
After Photon comment, I tried going back to using a hash table for the graph, storing adjacent vertices as arrays. Also added a separate array of bools to keep track of the remaining vertices.
That improved my efficiency a lot. With these changes I avoided the need to convert object keys to arrays all the time, no need to copy the graph object as it was not going to be modified and no need to loop after adding one node to the path. The bad thing is that then I needed to check that separate object when sorting, to check which adjacent vertices were still available. Also, I had to filter the arrays before passing them to the next recursion.
Yosef approach from the first answer of using an array to store the adjacent vertices and access them by index prove even more efficient. My code so far (no changes to the square finding function):
function square_sums_row (limit) {
const adjacents = buildAdjacentsObject(limit)
const adjacentsCopy = JSON.parse(JSON.stringify(adjacents))
const solution = []
function getSolution (currentCandidates) {
if (solution.length === limit) {
return solution
}
currentCandidates = currentCandidates.sort((a, b) => {
return adjacentsCopy[a].length - adjacentsCopy[b].length
})
for (const candidate of currentCandidates) {
solution.push(candidate)
for (const candidateAdjacent of adjacents[candidate]) {
adjacentsCopy[candidateAdjacent] = adjacentsCopy[candidateAdjacent]
.filter(t => t !== candidate)
}
if (getSolution(adjacentsCopy[candidate])) {
return solution
}
solution.pop()
for (const candidateAdjacent of adjacents[candidate]) {
adjacentsCopy[candidateAdjacent].push(candidate)
}
}
return false
}
return getSolution(Array.from(Array(limit + 1).keys()).slice(1))
}
function buildAdjacentsObject (limit) {
const potentialSquares = getPotentialSquares(limit)
const squaresLength = potentialSquares.length
const adjacents = []
for (let i = 1; i < (limit + 1); i++) {
adjacents[i] = []
for (let j = 0; j < squaresLength; j++) {
if (potentialSquares[j] > i) {
const dif = potentialSquares[j] - i
if (dif <= limit) {
adjacents[i].push(dif)
} else {
break
}
}
}
}
return adjacents
}
EDIT 2:
The code performs fine in most of the cases, but my worst case scenarios suck:
// time for 51: 30138.229ms
// time for 77: 145214.155ms
// time for 182: 22964.025ms
EDIT 3:
I accepted Yosef answer as it was super useful to improve the efficiency of my JS code. Found a way to tweak the algorithm to avoid paths with dead ends using some of the restrictions from this paper A Search Procedure for Hamilton Paths and Circuits..
Basically, before calling another recursion, I check 2 things:
If there is any node with no edges that's not part of the path till now and the path is missing more than 1 node
If there were more than 2 nodes with 1 edge (one can be following node, that had 2 edges before deleting the edge to the current node, and other can be the last node)
Both situations make it impossible to find a Hamiltonian path with the remaining nodes and edges (if you draw the graph it'll be clear why). Following that logic, there's another improvement if you check nodes with only 2 edges (1 way to get in and other to go out). I think you can use that to delete other edges in advance but it was not necessary at least for me.
Now, the algorithm performs worse in most cases, where just sorting by remaining edges was good enough to predict the next node and extra work was added, but it's able to solve the worst cases in a much better time. For example, limit = 77 it's solved in 15ms but limit=1000 went from 30ms to 100ms.
This is a really long post, if you have any edit suggestions, let me know. I don't think posting the final code it's the best idea taking into account that you can't check the solutions in the platform before solving the kata. But the accepted answer and this final edit should be good advice to think about this last part while still learning something. Hope it's useful.
By replacing the object by an array you save yourself from convert the object to an array every time you want to find the length (which you do a lot - in any step of the sort algorithm), or when you want to get the keys for the next candidates. in my tests the code below has been a lot more effective in terms of execution time
(0.102s vs 1.078s for limit=4500 on my machine)
function buildAdjacentsObject (limit) {
const potentialSquares = getPotentialSquares(limit)
const adjacents = [];
for (let i = 0; i < (limit + 1); i++) {
adjacents[i] = [];
for (let j = 0; j < potentialSquares.length; j++) {
if (potentialSquares[j] > i) {
const dif = potentialSquares[j] - i
if (dif <= limit) {
adjacents[i].push(dif)
} else {
break
}
}
}
}
return adjacents
}
function getPotentialSquares (limit) {
const maxSum = limit * 2 - 1
let square = 4
let i = 3
const potentialSquares = []
while (square <= maxSum) {
potentialSquares.push(square)
square = i * i
i++
}
return potentialSquares
}
function findSquarePathInRange (limit) {
// Build the graph object
const adjacents = buildAdjacentsObject(limit)
// Deep copy the object before making any changes
const adjacentsCopy = JSON.parse(JSON.stringify(adjacents))
// Create empty path
const solution = [];
// Recursively complete the path
function getSolution (currentCandidates) {
if (solution.length === limit) {
return solution
}
// Sort the candidate vertices to start with the ones with less adjacent vert
currentCandidates = currentCandidates.sort((a, b) => {
return adjacentsCopy[a].length - adjacentsCopy[b].length
});
for (const candidate of currentCandidates) {
// Add the candidate to the path
solution.push(candidate)
// and delete it from the object
for (const candidateAdjacent of adjacents[candidate]) {
adjacentsCopy[candidateAdjacent] = adjacentsCopy[candidateAdjacent].filter(t=>t!== candidate)
}
if (getSolution(adjacentsCopy[candidate])) {
return solution
}
// If not solution was found, delete the element from the path
solution.pop()
// and add it back to the object
for (const candidateAdjacent of adjacents[candidate]) {
adjacentsCopy[candidateAdjacent].push(candidate);
}
}
return false
}
const endSolution = getSolution(
Array.from(Array(limit).keys()).slice(1)
)
// The elements of the path can't be strings
return endSolution
}
var t = new Date().getTime();
var res = findSquarePathInRange(4500);
var t2 = new Date().getTime();
console.log(res, ((t2-t)/1000).toFixed(4)+'s');

Fibonacci for large numbers in Javascript

I have the following code:
function fib(n) {
let first=BigInt(0);
let snd=BigInt(1);
let currentNumber;
let countMax=Math.abs(n)+1;
let counter=2;
if(n==0){
return first;
}
else if (n==1||n==-1){
return snd;
}
while(counter<countMax)
{
currentNumber=first+snd;
first=snd;
snd=currentNumber;
counter++;
}
if((n<0) && (n % 2 ==0))
{
return -currentNumber;
}
return currentNumber;
}
That returns the fibonacci number for the given (n).
My issue is that I have to improve the performance of this code. I tried to use different fibonacci formulas (exponential ones) but I lose a lot of precision cause phi number has infinite decimals, so I have to truncate and for big numbers I lost a lot of precision.
When I execute for instance fib(200000) I get the huge number but the code spends more than 12000 ms.
For other hand I tried using recursion but the performance decreases.
Could you provide me an article or clue to follow?
Thanks & Regards.
First of all, you can refer the answer here which says that
Fib(-n) = -Fib(n)
Here's the recursive implementation which is not efficient as you mentioned
function fib(n) {
// This is to handle positive and negative numbers
var sign = n >= 0 ? 1 : -1;
n = Math.abs(n);
// Now the usual Fibonacci function
if(n < 2)
return sign*n;
return sign*(fib(n-1) + fib(n-2));
}
This is pretty straightforward and I leave it without explaining because if you know Fibonacci series, you know what the above code does. As you already know, this is not good for very large numbers as it recursively calculate the same thing again and again. But we'll use it in our approach later on.
Now coming towards a better approach. See the below code similar to your code just a bit concise.
function fib(n) {
if(n == 0)
return 0;
var a = 1;
var b = 1;
while(n > 2) {
b = a + b;
a = b - a;
}
// If n is negative then return negative of fib(n)
return n < 0 ? -1*b : b;
}
This code is better to use when you want to call this function only a few times. But if you want to call it for frequently, then you'll end up calculating the same thing many times. Here you should keep track of already calculated values.
For example, if you call fib(n) it will calculate nth Fibonacci number and return it. For the next time if you call fib(n) it will again calculate it and return the result.
What if we store this value somewhere and next time retrieve it whenever required?
This will also help in calculating Fibonacci numbers greater than nth Fibonacci number.
How?
Say we have to calculate fib(n+1), then by definition fib(n+1) = fib(n) + fib(n-1). Because, we already have fib(n) calculated and stored somewhere we can just use that stored value. Also, if we have fib(n) calculated and stored, we already have fib(n-1) calculated and stored. Read it again.
We can do this by using a JavaScript object and the same recursive function we used above (Yes, the recursive one!). See the below code.
// This object will store already calculated values
// This should be in the global scope or atleast the parent scope
var memo = {};
// We know fib(0) = 0, fib(1) = 1, so store it
memo[0] = 0;
memo[1] = 1;
function fib(n) {
var sign = n >= 0 ? 1 : -1;
n = Math.abs(n);
// If we already calculated the value, just use the same
if(memo[n] !== undefined)
return sign*memo[n];
// Else we will calculate it and store it and also return it
return sign*(memo[n] = fib(n-1) + fib(n-2));
}
// This will calculate fib(2), fib(3), fib(4) and fib(5).
// Why it does not calculate fib(0) and fib(1) ?
// Because it's already calculated, goto line 1 of this code snippet
console.log(fib(5)); // 5
// This will not calculate anything
// Because fib(-5) is -fib(5) and we already calculated fib(5)
console.log(fib(-5)); // -5
// This will also not calculate anything
// Because we already calculated fib(4) while calculating fib(5)
console.log(fib(4)); // 3
// This will calculate only fib(6) and fib(7)
console.log(fib(7)); // 13
Try out some test cases. It's easy to understand why this is faster.
Now you know you can store the already calculated values, you can modify your solution to use this approach without using recursion as for large numbers the recursive approach will throw Uncaught RangeError. I leave this to you because it's worth trying on your own!
This solution uses a concept in programming called Dynamic Programming. You can refer it here.
If you just add the previous value to the current one and then use the old current value as the previous one you get a significant improvement in performance.
function fib(n) {
var current = 1;
var previous = 0;
while (--n) {
var temp = current;
current += previous;
previous = temp;
}
return current;
}
console.log(fib(1)); // 1
console.log(fib(2)); // 1
console.log(fib(3)); // 2
console.log(fib(4)); // 3
console.log(fib(5)); // 5
You can also use an array in the parent scope to store the previous values to avoid redoing the same calculations.
var fibMap = [1, 1];
function fib(n) {
var current = fibMap[fibMap.length - 1];
var previous = fibMap[fibMap.length - 2];
while (fibMap.length < n) {
var temp = current;
current += previous;
previous = temp;
fibMap.push(current);
}
return fibMap[n - 1];
}
console.log(fib(1)); // 1
console.log(fib(2)); // 1
console.log(fib(3)); // 2
console.log(fib(4)); // 3
console.log(fib(5)); // 5
Benchmark for getting the 1000th number 3 times

Compute every combination of 6 numbers

I'm more of a media developer and not the best coder, but I find myself needing to learn javascript better. I'm creating a math card game where the human player and the automated player are each dealt 6 cards. Each player must combine (concatenate) three of the cards to make a top number and the other three for the bottom number. Those two numbers are then subtracted. For the automated player, I have to go through ever possible combination of the six cards, so when the two numbers are subtracted, it gets as close as possible to a target number. I'm not very good with arrays, so I started testing every possible combination and then comparing which one was closer (See example below). This is a very inefficient way of coding this, but I'm just not sure how to do it otherwise. Any help would be greatly appreciated.
The variables have already been declared.
alienTopNum = "" + alienNum1 + alienNum2 + alienNum3;
alienBottomNum = "" + alienNum4 + alienNum5 + alienNum6;
oldDiff = targetNum - (alienTopNum - alienBottomNum);
player.SetVar("AC1R1", alienNum1);
player.SetVar("AC2R1", alienNum2);
player.SetVar("AC3R1", alienNum3);
player.SetVar("AC4R1", alienNum4);
player.SetVar("AC4R1", alienNum5);
player.SetVar("AC4R1", alienNum6);
player.SetVar("ATR1", alienTopNum - alienBottomNum);
alienTopNum = "" + alienNum1 + alienNum2 + alienNum3;
alienBottomNum = "" + alienNum4 + alienNum6 + alienNum5;
newDiff = targetNum - (alienTopNum - alienBottomNum);
if (Math.abs(newDiff) < Math.abs(oldDiff)) {
oldDiff = newDiff;
player.SetVar("AC1R1", alienNum1);
player.SetVar("AC2R1", alienNum2);
player.SetVar("AC3R1", alienNum3);
player.SetVar("AC4R1", alienNum4);
player.SetVar("AC4R1", alienNum6);
player.SetVar("AC4R1", alienNum5);
player.SetVar("ATR1", alienTopNum - alienBottomNum);
}
etc....
Store the dealt cards in an array rather than in individual variables, because that makes them a lot easier to handle when generating permutations. You don't say what values the cards can have, but as an example, given a "hand" of [1,2,3,4,5,6] if you get the permutations as an array of arrays:
[ [1,2,3,4,5,6], [1,2,3,4,6,5], [1,2,3,5,4,6], ...etc. ]
Then you can loop through that to process each permutation to take the first three "cards" and last three to get the current iteration's two numbers, subtract them, and see if the result is closer to the target than previous iterations' results.
The following does that, making use of the array permutation function that I found in this answer to another question. I'm not going to explain that algorithm because you can easily google up various permutation algorithms for yourself, but I have put comments in my bestPlay() function to explain how I process the permutations to figure out which is the best score for a hand.
I haven't tried to use your player or player.SetVar() method, but hopefully if you study this you can adapt it to use with your objects.
You didn't say what values the cards could have, so I've assumed a deck of twenty cards that repeats the numbers 0-9 twice.
function bestPlay(hand, target) {
var perms = permutator(hand); // Get all permutations for hand
var best = perms[0]; // Use the first as initial best
var bestDiff = difference(best);
for (var i = 1; i < perms.length; i++) { // Loop over the rest of the permutations
var diff = difference(perms[i]); // Get diff for current permutation
if (Math.abs(target - diff) < Math.abs(target - bestDiff)) { // Check if
best = perms[i]; // current beats previous best
bestDiff = diff; // and if so make it new best
}
}
// Output the results for this hand:
console.log(`Hand: ${hand.join(" ")}`);
console.log(`Best Numbers: ${best.slice(0,3).join("")} ${best.slice(3).join("")}`);
console.log(`Difference: ${bestDiff}`);
}
var hands = deal();
var target = 112;
console.log(`Target: ${target}`);
bestPlay(hands[1], target);
bestPlay(hands[2], target);
function difference(cards) {
return Math.abs(cards.slice(0,3).join("") - cards.slice(3).join(""));
}
function deal() {
var cards = [1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0];
// shuffle
cards.sort(function() { return Math.random() - 0.5; });
// first hand is first six cards, second hand is next six
return {
1: cards.slice(0,6),
2: cards.slice(6, 12)
};
}
function permutator(inputArr) {
var results = [];
function permute(arr, memo) {
var cur, memo = memo || [];
for (var i = 0; i < arr.length; i++) {
cur = arr.splice(i, 1);
if (arr.length === 0) {
results.push(memo.concat(cur));
}
permute(arr.slice(), memo.concat(cur));
arr.splice(i, 0, cur[0]);
}
return results;
}
return permute(inputArr);
}
If you click the "Run Code Snippet" button several times you'll see that sometimes a given hand has a combination of numbers that exactly matches the target, sometimes it doesn't.

Javascript performance array of objects preassignment vs direct use

I have a doubt about how can be affected to speed the use of object data arrays, that is, use it directly or preasign them to simple vars.
I have an array of elements, for example 1000 elements.
Every array item is an object with 10 properties (for example).
And finally I use some of this properties to do 10 calculations.
So I have APPROACH1
var nn = myarray.lenght;
var a1,a2,a3,a4 ... a10;
var cal1,cal2,.. cal10
for (var x=0;x<nn;x++)
{ // assignment
a1=my_array[x].data1;
..
a10 =my_array[x].data10;
// calculations
cal1 = a1*a10 +a2*Math.abs(a3);
...
cal10 = (a8-a7)*4 +Math.sqrt(a9);
}
And APPROACH2
var nn = myarray.lenght;
for (var x=0;x<nn;x++)
{
// calculations
cal1 = my_array[x].data1*my_array[x].data10 +my_array[x].data2*Math.abs(my_array[x].data3);
...
cal10 = (my_array[x].data8-my_array[x].data7)*4 +Math.sqrt(my_array[x].data9);
}
Assign a1 ... a10 values from my_array and then make calculations is faster than make the calculations using my_array[x].properties; or the right is the opposite ?????
I dont know how works the 'js compiler' ....
The kind of short answer is: it depends on your javascript engine, there is no right and wrong here, only "this has worked in the past" and "this don't seem to speed thing up no more".
<tl;dr> If i would not run a jsperf test, i would go with "Cached example" 1 example down: </tl;dr>
A general rule of thumb is(read: was) that if you are going to use an element in an array more then once, it could be faster to cache it in a local variable, and if you were gonna use a property on an object more then once it should also be cached.
Example:
You have this code:
// Data generation (not discussed here)
function GetLotsOfItems() {
var ret = [];
for (var i = 0; i < 1000; i++) {
ret[i] = { calc1: i * 4, calc2: i * 10, calc3: i / 5 };
}
return ret;
}
// Your calculation loop
var myArray = GetLotsOfItems();
for (var i = 0; i < myArray.length; i++) {
var someResult = myArray[i].calc1 + myArray[i].calc2 + myArray[i].calc3;
}
Depending on your browser (read:this REALLY depends on your browser/its javascript engine) you could make this faster in a number of different ways.
You could for example cache the element being used in the calculation loop
Cached example:
// Your cached calculation loop
var myArray = GetLotsOfItems();
var element;
var arrayLen = myArray.length;
for (var i = 0; i < arrayLen ; i++) {
element = myArray[i];
var someResult = element.calc1 + element.calc2 + element.calc3;
}
You could also take this a step further and run it like this:
var myArray = GetLotsOfItems();
var element;
for (var i = myArray.length; i--;) { // Start at last element, travel backwards to the start
element = myArray[i];
var someResult = element.calc1 + element.calc2 + element.calc3;
}
What you do here is you start at the last element, then you use the condition block to see if i > 0, then AFTER that you lower it by one (allowing the loop to run with i==0 (while --i would run from 1000 -> 1), however in modern code this is usually slower because you will read an array backwards, and reading an array in the correct order usually allow for either run-time or compile-time optimization (which is automatic, mind you, so you don't need to do anything for this work), but depending on your javascript engine this might not be applicable, and the backwards going loop could be faster..
However this will, by my experience, run slower in chrome then the second "kinda-optimized" version (i have not tested this in jsperf, but in an CSP solver i wrote 2 years ago i ended caching array elements, but not properties, and i ran my loops from 0 to length.
You should (in most cases) write your code in a way that makes it easy to read and maintain, caching array elements is in my opinion as easy to read (if not easier) then non-cached elements, and they might be faster (they are, at least, not slower), and they are quicker to write if you use an IDE with autocomplete for javascript :P

JavaScript; n-dimensional array creation

In the process of building a JavaScript interpreter for a simple language, I've faced the following problem;
After parsing, we get an array of indices that specifies the element in an n-dimensional array to be modified. For instance, after parsing this:
a[1, 1, 1]
We get an array [1, 1, 1]. The language I'm working on doesn't have variable definitions, so variables get initialized on their first use. My goal is to be able to create this n-dimensional array so that I can place it in the variable table (in the example above, we'd need to create a 3-dimensional array).
The short question:Is there a way to create an n-dimensional array in JavaScript without using eval()?
Tested in Chrome:
function createNDimArray(dimensions) {
if (dimensions.length > 0) {
var dim = dimensions[0];
var rest = dimensions.slice(1);
var newArray = new Array();
for (var i = 0; i < dim; i++) {
newArray[i] = createNDimArray(rest);
}
return newArray;
} else {
return undefined;
}
}
Then createNDimArray([3, 2, 5]) returns a 3x2x5 array.
You can use a similar recursive procedure to access an element whose index is in an array:
function getElement(array, indices) {
if (indices.length == 0) {
return array;
} else {
return getElement(array[indices[0]], indices.slice(1));
}
}
Setting an element is similar, and left as an exercise for the reader.
There's nothing built in, but it's pretty easy to create a function that would do the job:
var genArray = function () {
var arr, len, i;
if(arguments.length > 0) {
len = [].slice.call(arguments, 0, 1)[0];
arr = new Array(len);
for(i = 0; i < len; i++) {
arr[i] = genArray.apply(null, [].slice.call(arguments, 1));
}
} else {
return null; //or whatever you want to initialize values to.
}
return arr;
};
var a = genArray(3, 2); //is [[null, null],[null, null],[null, null]]
var b = genArray(3, 1, 1); //is [[[null]],[[null]],[[null]]]
a[0][1]; //is null
b[1][0][0]; //is null
b[1][0][0] = 3;
b[1][0][0]; //is 3;
b; //is [[[null]],[[3]],[[null]]]
Maybe that will help?
PS --
I know this might seem like more effort than is necessary. But unfortunately, JavaScript arrays are not really "arrays" (if by "array" you mean a contiguous, indexed, immutable memory block). They're more like "maps" in most languages. So there's a certain amount of effort involved in creating them. Most languages have no problem creating multi-dimensional arrays because they're just doing some simple multiplication followed by an malloc(). But with JavaScript, you really have to go recursively generate your arrays if you want to have them pre-constructed. It's a pain, but it does demonstrate the effort required by the interpreter.
Go figure.
For creating an n-dimensional array:
function createNDimArray(dimensions) {
var ret = undefined;
if(dimensions.length==1){
ret = new Array(dimensions[0]);
for (var i = 0; i < dimensions[0]; i++)
ret[i]=null; //or another value
return ret;
}
else{
//recursion
var rest = dimensions.slice(1);
ret = new Array(dimensions[0]);
for (var i = 0; i < dimensions[0]; i++)
ret[i]=createNDimArray(rest);
return ret;
}
}
EDIT: Due to the fact that any recursive solution will have a limit to the size of the array you can create... I made another solution in my
PJs # GitHub library. This one runs at pseudo-instant speed and can create and manage a multidimensional array of any size, any structure, with any dimensions at any branch. It also can simulate prefilling and/or use a node object of custom design. Check it out here: https://github.com/PimpTrizkit/PJs/wiki/14.-Complex-Multidimensional-Object--(pCMO.js)
Using a modified version of jfabrizio's solution:
function createNDimArray(dimensions) {
var t, i = 0, s = dimensions[0], arr = new Array(s);
if ( dimensions.length < 3 ) for ( t = dimensions[1] ; i < s ; ) arr[i++] = new Array(t);
else for ( t = dimensions.slice(1) ; i < s ; ) arr[i++] = createNDimArray(t);
return arr;
}
Usages:
var arr = createNDimArray([3, 2, 3]);
// arr = [[[,,],[,,]],[[,,],[,,]],[[,,],[,,]]]
console.log(arr[2][1]); // in FF: Array [ <3 empty slots> ]
console.log("Falsy = " + (arr[2][1][0]?true:false) ); // Falsy = false
I found this to be quite a bit faster. I might stretch to say that it could be the fastest way possible to generate a N Dimensional array in Javascript. This refactoring above had some good speed increases. But, the best speed increase came from not prefilling, of course. This version doesn't prefill the array. It only returns a fully created N dimensional array of Ns lengths where the last level is just an empty array. I would hope that arr[x][y][z]?arr[x][y][z]:null is sufficient if you really need the null value. It is for my uses. :)
If you need prefilling, use his original version.
And, if you don't really care about what I did; then stop reading.
Want more geek talk? A little something about recursion for those learning out there. Alright here are the tactics. When doing deep recursion, keep in mind the final level. Its where most of the work is done. In this case its the Nth dimension, literally. This is your "payload", the rest is logistics. In jfab's function, when dimensions.length gets to 1, its last dimension, its in the Nth dimension and performs the payload. Which is to create the array of nulls, or in my case, an empty array. Since the recursion gets so deep each dimension is a factor of the last one. By the time you get to the Nth dimension you will have a lot of function calls and logistics gets cumbersome to the computer. And at the Nth dimension you will call your base recursion function (createNDimArray in our case) more times for the payload than you will for logistics. Now, as in jfab's original solution, putting the execution of the payload as the very first thing you do in recursion (if possible) is usually a good thing, especially if its simple. Here, by making the payload a building of the final 2D array (instead of just a 1D array by simply returning a new Array() only). Then the excessive amount of function calls now don't have to happen at this level. Now, of course, if you want to prefill the array, then this shortcut doesn't always help. But more to the point, prefilling the array would be the appropriate payload. By not visiting every item on the Nth dimension we have effectively removed it. That way there is one less level of function calls and basically the Nth dimension's payload is actually done on the N-1 th Dimension. And we are never calling the recursive function again just to deliver the new Array(). Unfortunately, the call to new Array(x) (in general) doesn't see it that way. Its execution time does increase with a larger x. Which is effectively still visiting every item in the Nth Dimension, but now we do it only once and with native code and wrapped in a tight and light loop. Now we require that createNDimArray can only be called with N > 1, ie never used to create 1D arrays. Theoretically you could require a larger N, and unroll even more dimensions at the end. Basically, the line with if ( dimensions.length < 3 ) will read something like < 4 or < 5 and you would have to wrap that many more for loops around the one thats there, and they would each all need their own set of vars --- so I'm not sure how efficient it all might be, as you are trading excessive function call and stack space/manipulation with a similar idea but in embedded for loops --- But I suppose it could speed up some environments if you know that N is always above a certain level or if its only for the final dimensions. Like here, I did it for the last two dimensions. But if you unroll too much, then your payload itself is a bear. Only testing will tell if thats worth it. It does seem that stack space is limited, and I think I remember having been able to make larger arrays with more unrolling. There is a limit to how big you can make an array. And recursion solutions that call themselves for each item at the Nth level had the lowest limit if I do.. recall.. correctly.... much lower.
The next part in revising his solution is just the logistics, its was just a simple refactor to get rid of excessive blocks and code. Join all the var work together and thats it. Since you need a arr to return, once the looping is over, might as well do all your var work on one line first and luckily, three of the four vars have the same initialization. Remember, Javascript can optimize code when joining with , if possible. This also makes for smaller code as well.
PT
One more version of createNDimArray using map, apply and bind functions:
function createNDimArray(dims) {
return dims.length === 1
? new Array(dims[0])
: Array.apply(null, Array(dims[0])).map(createNDimensionalArray.bind(null, dims.slice(1)));
}
createNDimArray([3, 2, 5]); // returns 3x2x5 array
Creating an ND Array requires cloning nested ND arrays. Accordingly you will need a proper Array.prototype.clone() method and the rest is easy. To my knowledge the following is the simplest and most efficient way in JS.
Array.prototype.clone = function(){
return this.reduce((p,c,i) => (p[i] = Array.isArray(c) ? c.clone() : c, p),[])
}
function arrayND(...n){
return n.reduceRight((p,c) => c = (new Array(c)).fill().map(e => Array.isArray(p) ? p.clone() : p ));
}
var NDarr = arrayND(4,4,4,4,"."); // size of each dimension and the init value at the end
console.log(JSON.stringify(NDarr))
NDarr[0][1][2][3] = "kitty"; //access any location and change.
console.log(JSON.stringify(NDarr))
Reason For Anwser
There are good answers here but as JavaScript has changed here is an additional method of tackling this problem with some of the updated features in JavaScript.
function nArray (dem, size=dem, fill=null, currDepth=0) {
const arr = new Array(size).fill(fill);
return (currDepth+1 === dem) ? arr : arr.map(i => nArray(dem, size, fill, currDepth+1));
};
Notes
dem is the dimensions of the array.
size is the size of each dimension by default it is the dem value.
fill is the value that will be the default filled value.
currDepth is not to be used it is for the recursive nature of the function.
Create n dimensional matrix array with default values
function arr (arg, def = 0){
if (arg.length > 2){
return Array(arg[0]).fill().map(()=>arr(arg.slice(1)));
} else {
return Array(arg[0]).fill().map(()=>Array(arg[1]).fill(def));
}
}
//simple usage -> fills with 0
var s = arr([3,5,8,4]) // 4 dimensions
var t = arr([5,7]) // 2 dimensions
//fill with null
var k = arr([4,7,9] , null) // 3 dimensions
If you need to create 4d Array with index from 0 to 4 in the each cluster just do this code:
function createNDimArray(dimensions) {
if (dimensions.length > 0) {
var dim = dimensions[0];
var rest = dimensions.slice(1);
var newArray = new Array();
for (var i = 0; i < dim; i++) {
newArray[i] = createNDimArray(rest);
}
return newArray;
} else {
return undefined;
}
}
var MyArray=createNDimArray([5, 5, 5, 5]);
//returns a 5x5x5x5 array with index from 0 to 4;
MyArray[4][4][4][4]="MyArray 4d MyValue";
alert(MyArray[4][4][4][4]);
//For 5-demension array with this param.: 5x4x3x2x2 -> do this:
var MyArray_5d=createNDimArray([5, 4, 3, 2, 2]);
MyArray_5d[4][3][2][1][1]="MyArray 5d MyValue";
alert(MyArray_5d[4][3][2][1][1]);
MULTIDIMENSIONAL ARRAYS can be seen as EMBEDED ARRAYS.
See if the following can help.
<script type="text/javascript">"use strict";
const arr = [
["D1","D2","D3"],
[
["T11","T12","T13"],
["T21","T22","T23"]
]
];
for(let k=0;k<arr[0].length;k++)console.log(arr[0][k]);
// D1
// D2
// D3
for(let k=0;k<arr[1].length;k++)console.log(arr[1][k]);
// Array(3) [ "T11", "T12", "T13" ]
// Array(3) [ "T21", "T22", "T23" ]
for(let k=0;k<arr[1].length;k++)console.log(arr[1][0][k]);
// T11
// T12
for(let k=0;k<arr[1].length;k++)console.log(arr[1][1][k]);
// T21
// T22
for(let k=0;k<arr[1][0].length;k++)console.log(arr[1][0][k]);
// T11
// T12
// T13
for(let k=0;k<arr[1][1].length;k++)console.log(arr[1][1][k]);
// T21
// T22
// T23
</script>
// // // // // // // // // //
// // // // // // // // // //
And from the same point of vue, a MULTIDIMENSIONAL OBJECT !
<script type="text/javascript">"use strict";
const o = {
un:{u1:"U1",u2:"U2",u3:"U3"},
deux:{
trois : {d11:"D11",d12:"D12",d13:"D13"},
quatre: {t21:"T21",t22:"T22",t23:"T23"}
}
};
let ref = Object.keys(o);
for(let k=0;k<ref.length;k++)
console.log(ref[k] , ":" ,
Object.values(o)[k]);
// un : Object { u1: "U1", u2: "U2", u3: "U3" }
// deux : Object { trois: {…}, quatre: {…} }
// quatre: Object { t21: "T21", t22: "T22", t23: "T23" }
// trois : Object { d11: "D11", d12: "D12", d13: "D13" }
ref = Object.keys(o["un"]);
for(let k=0;k<ref.length;k++)
console.log(ref[k] , ":" ,
Object.values(o["un"])[k]);
// u1 : U1
// u2 : U2
// u3 : U3
ref = Object.keys(o["deux"]);
for(let k=0;k<ref.length;k++)
console.log(ref[k] , ":" ,
Object.values(o["deux"])[k]);
// trois : Object { d11: "D11", d12: "D12", d13: "D13" }
// quatre : Object { t21: "T21", t22: "T22", t23: "T23" }
ref = Object.keys(o["deux"]["trois"]);
for(let k=0;k<ref.length;k++)
console.log(ref[k] , ":" ,
Object.values(o["deux"]["trois"])[k]);
// d11 : D11
// d12 : D12
// d13 : D13
ref = Object.keys(o["deux"]["quatre"]);
for(let k=0;k<Object.keys(ref).length;k++)
console.log(ref[k] , ":" ,
Object.values(o["deux"]["quatre"])[k]);
// t21 : T21
// t22 : T22
// t23 : T23
ref = Object.keys(o["deux"]["trois"]);
console.log(ref[0] , ":" ,
Object.values(o["deux"]["trois"])[0]);
// d11 : D11
ref = Object.values(o["deux"]["quatre"]);
console.log(Object.keys(o["deux"]["quatre"])[ref.length-1] ,
":" , ref[ref.length-1] );
// t23 : T23
</script>

Categories

Resources