Finding a hamiltonian path with Javascript. How to improve efficiency? - javascript

I'm trying to solve this kata:
Given an integer N (<1000), return an array of integers 1..N where the sum of each 2 consecutive numbers is a perfect square. If that's not possible, return false.
For example, if N=15, the result should be this array: [9, 7, 2, 14, 11, 5, 4, 12, 13, 3, 6, 10, 15, 1, 8]. Below N=14, there's no answer, so the function should return false.
I thought 'how hard can this be?' and it's been long days in the rabbit hole. I've been programming for just a few months and don't have a background of CS so I'll write what I understand so far of the problem trying to use the proper concepts but please feel free to tell me if any expression is not correct.
Apparently, the problem is very similar to a known problem in graph theory called TSP. In this case, the vertices are connected if the sum of them is a perfect square. Also, I don't have to look for a cycle, just find one Hamiltonian Path, not all.
I understand that what I'm using is backtracking. I build an object that represents the graph and then try to find the path recursively. This is how I build the object:
function buildAdjacentsObject (limit) {
const potentialSquares = getPotentialSquares(limit)
const adjacents = {}
for (let i = 0; i < (limit + 1); i++) {
adjacents[i] = {}
for (let j = 0; j < potentialSquares.length; j++) {
if (potentialSquares[j] > i) {
const dif = potentialSquares[j] - i
if (dif <= limit) {
adjacents[i][dif] = 1
} else {
break
}
}
}
}
return adjacents
}
function getPotentialSquares (limit) {
const maxSum = limit * 2 - 1
let square = 4
let i = 3
const potentialSquares = []
while (square <= maxSum) {
potentialSquares.push(square)
square = i * i
i++
}
return potentialSquares
}
At first I was using a hash table with an array of adjacent nodes on each key. But when my algorithm had to delete vertices from the object, it had to look for elements in arrays several times, which took linear time every time. I made the adjacent vertices hashable and that improved my execution time. Then I look for the path with this function:
function findSquarePathInRange (limit) {
// Build the graph object
const adjacents = buildAdjacentsObject(limit)
// Deep copy the object before making any changes
const adjacentsCopy = JSON.parse(JSON.stringify(adjacents))
// Create empty path
const solution = []
// Recursively complete the path
function getSolution (currentCandidates) {
if (solution.length === limit) {
return solution
}
// Sort the candidate vertices to start with the ones with less adjacent vert
currentCandidates = currentCandidates.sort((a, b) => {
return Object.keys(adjacentsCopy[a]).length -
Object.keys(adjacentsCopy[b]).length
})
for (const candidate of currentCandidates) {
// Add the candidate to the path
solution.push(candidate)
// and delete it from the object
for (const candidateAdjacent in adjacents[candidate]) {
delete adjacentsCopy[candidateAdjacent][candidate]
}
if (getSolution(Object.keys(adjacentsCopy[candidate]))) {
return solution
}
// If not solution was found, delete the element from the path
solution.pop()
// and add it back to the object
for (const candidateAdjacent in adjacents[candidate]) {
adjacentsCopy[candidateAdjacent][candidate] = 1
}
}
return false
}
const endSolution = getSolution(
Array.from(Array(limit).keys()).slice(1)
)
// The elements of the path can't be strings
return (endSolution) ? endSolution.map(x => parseInt(x, 10)) : false
}
My solution works 'fast' but it's not fast enough. I need to pass more than 200 tests in less than 12 seconds and so far it's only passing 150. Probably both my algorithm and my usage of JS can be improved, so, my questions:
Can you see a bottleneck in the code? The sorting step should be the one taking more time but it also gets me to the solution faster. Also, I'm not sure if I'm using the best data structure for this kind of problem. I tried classic looping instead of using for..in and for..of but it didn't change the performance.
Do you see any place where I can save previous calculations to look for them later?
Regarding the last question, I read that there is a dynamic solution to the problem but everywhere I found one, it looks for minimum distance, number of paths or existence of path, not the path itself. I read this everywhere but I'm unable to apply it:
Also, a dynamic programming algorithm of Bellman, Held, and Karp can be used to solve the problem in time O(n2 2n). In this method, one determines, for each set S of vertices and each vertex v in S, whether there is a path that covers exactly the vertices in S and ends at v. For each choice of S and v, a path exists for (S,v) if and only if v has a neighbor w such that a path exists for (S − v,w), which can be looked up from already-computed information in the dynamic program.
I just can't get the idea on how to implement that if I'm not looking for all the paths. I found this implementation of a similar problem in python that uses a cache and some binary but again, I could translate it from py but I'm not sure how to apply those concepts to my algorithm.
I'm currently out of ideas so any hint of something to try would be super helpful.
EDIT 1:
After Photon comment, I tried going back to using a hash table for the graph, storing adjacent vertices as arrays. Also added a separate array of bools to keep track of the remaining vertices.
That improved my efficiency a lot. With these changes I avoided the need to convert object keys to arrays all the time, no need to copy the graph object as it was not going to be modified and no need to loop after adding one node to the path. The bad thing is that then I needed to check that separate object when sorting, to check which adjacent vertices were still available. Also, I had to filter the arrays before passing them to the next recursion.
Yosef approach from the first answer of using an array to store the adjacent vertices and access them by index prove even more efficient. My code so far (no changes to the square finding function):
function square_sums_row (limit) {
const adjacents = buildAdjacentsObject(limit)
const adjacentsCopy = JSON.parse(JSON.stringify(adjacents))
const solution = []
function getSolution (currentCandidates) {
if (solution.length === limit) {
return solution
}
currentCandidates = currentCandidates.sort((a, b) => {
return adjacentsCopy[a].length - adjacentsCopy[b].length
})
for (const candidate of currentCandidates) {
solution.push(candidate)
for (const candidateAdjacent of adjacents[candidate]) {
adjacentsCopy[candidateAdjacent] = adjacentsCopy[candidateAdjacent]
.filter(t => t !== candidate)
}
if (getSolution(adjacentsCopy[candidate])) {
return solution
}
solution.pop()
for (const candidateAdjacent of adjacents[candidate]) {
adjacentsCopy[candidateAdjacent].push(candidate)
}
}
return false
}
return getSolution(Array.from(Array(limit + 1).keys()).slice(1))
}
function buildAdjacentsObject (limit) {
const potentialSquares = getPotentialSquares(limit)
const squaresLength = potentialSquares.length
const adjacents = []
for (let i = 1; i < (limit + 1); i++) {
adjacents[i] = []
for (let j = 0; j < squaresLength; j++) {
if (potentialSquares[j] > i) {
const dif = potentialSquares[j] - i
if (dif <= limit) {
adjacents[i].push(dif)
} else {
break
}
}
}
}
return adjacents
}
EDIT 2:
The code performs fine in most of the cases, but my worst case scenarios suck:
// time for 51: 30138.229ms
// time for 77: 145214.155ms
// time for 182: 22964.025ms
EDIT 3:
I accepted Yosef answer as it was super useful to improve the efficiency of my JS code. Found a way to tweak the algorithm to avoid paths with dead ends using some of the restrictions from this paper A Search Procedure for Hamilton Paths and Circuits..
Basically, before calling another recursion, I check 2 things:
If there is any node with no edges that's not part of the path till now and the path is missing more than 1 node
If there were more than 2 nodes with 1 edge (one can be following node, that had 2 edges before deleting the edge to the current node, and other can be the last node)
Both situations make it impossible to find a Hamiltonian path with the remaining nodes and edges (if you draw the graph it'll be clear why). Following that logic, there's another improvement if you check nodes with only 2 edges (1 way to get in and other to go out). I think you can use that to delete other edges in advance but it was not necessary at least for me.
Now, the algorithm performs worse in most cases, where just sorting by remaining edges was good enough to predict the next node and extra work was added, but it's able to solve the worst cases in a much better time. For example, limit = 77 it's solved in 15ms but limit=1000 went from 30ms to 100ms.
This is a really long post, if you have any edit suggestions, let me know. I don't think posting the final code it's the best idea taking into account that you can't check the solutions in the platform before solving the kata. But the accepted answer and this final edit should be good advice to think about this last part while still learning something. Hope it's useful.

By replacing the object by an array you save yourself from convert the object to an array every time you want to find the length (which you do a lot - in any step of the sort algorithm), or when you want to get the keys for the next candidates. in my tests the code below has been a lot more effective in terms of execution time
(0.102s vs 1.078s for limit=4500 on my machine)
function buildAdjacentsObject (limit) {
const potentialSquares = getPotentialSquares(limit)
const adjacents = [];
for (let i = 0; i < (limit + 1); i++) {
adjacents[i] = [];
for (let j = 0; j < potentialSquares.length; j++) {
if (potentialSquares[j] > i) {
const dif = potentialSquares[j] - i
if (dif <= limit) {
adjacents[i].push(dif)
} else {
break
}
}
}
}
return adjacents
}
function getPotentialSquares (limit) {
const maxSum = limit * 2 - 1
let square = 4
let i = 3
const potentialSquares = []
while (square <= maxSum) {
potentialSquares.push(square)
square = i * i
i++
}
return potentialSquares
}
function findSquarePathInRange (limit) {
// Build the graph object
const adjacents = buildAdjacentsObject(limit)
// Deep copy the object before making any changes
const adjacentsCopy = JSON.parse(JSON.stringify(adjacents))
// Create empty path
const solution = [];
// Recursively complete the path
function getSolution (currentCandidates) {
if (solution.length === limit) {
return solution
}
// Sort the candidate vertices to start with the ones with less adjacent vert
currentCandidates = currentCandidates.sort((a, b) => {
return adjacentsCopy[a].length - adjacentsCopy[b].length
});
for (const candidate of currentCandidates) {
// Add the candidate to the path
solution.push(candidate)
// and delete it from the object
for (const candidateAdjacent of adjacents[candidate]) {
adjacentsCopy[candidateAdjacent] = adjacentsCopy[candidateAdjacent].filter(t=>t!== candidate)
}
if (getSolution(adjacentsCopy[candidate])) {
return solution
}
// If not solution was found, delete the element from the path
solution.pop()
// and add it back to the object
for (const candidateAdjacent of adjacents[candidate]) {
adjacentsCopy[candidateAdjacent].push(candidate);
}
}
return false
}
const endSolution = getSolution(
Array.from(Array(limit).keys()).slice(1)
)
// The elements of the path can't be strings
return endSolution
}
var t = new Date().getTime();
var res = findSquarePathInRange(4500);
var t2 = new Date().getTime();
console.log(res, ((t2-t)/1000).toFixed(4)+'s');

Related

Max-Heap implementation: How to make first 3 nodes always the highest?

So I've been trying to implement the Max Heap. The use I want to give to it is that, at any one time, I want the first 3 elements of the heap (that is, the root and its two children) to always be the highest in the whole heap.
I thought the heap property would guarantee this, but not a single implementation example I have come across has been able to solve the fact that sometimes, there are elements in one level of the heap that are lower than an element in a higher level.
This is the implementation I've been using, basically by using examples I have found on the internet as reference:
let createHeap = function(){
return {
arr:[],
size: 0,
getParent: function(i) { return Math.floor((i-1)/2) },
getLeft: function(i) { return (2*i + 1) },
getRight: function(i) { return (2*i + 2) },
insert: function(val){
let i = this.size
this.size++
this.arr[i] = val;
if(i!=0){
for(let j = this.getParent(i);j>=0;j--){
this.heapify(j)
}
}
},
heapify: function(i){
let largest = i;
let leftIndex = this.getLeft(i)
let rightIndex = this.getRight(i)
if (leftIndex < this.size && this.arr[leftIndex] > this.arr[largest])
largest = leftIndex
if (rightIndex < this.size && this.arr[rightIndex] > this.arr[largest])
largest = rightIndex;
if (largest != i) {
let temp = this.arr[largest];
this.arr[largest] = this.arr[i]
this.arr[i] = temp
this.heapify(largest);
}
}
}
}
Now, the problem is, when I insert the following values in this order: 1, 2, 3, 4, 5
The result I get for the heap is:
5
|\
4 2
|\
1 3
This is clear when you read what the code does, but doesn't seem to conserve heap property as far as I got to understand what heap property means. The code is missing something to get to do what I want it to do, but I don't want to implement changes that would increase the complexity too badly, so I wanted to ask:
Does anyone know if the implementation is wrong or if it was never meant to do what I want it to do? (first 3 elements of the heap are always the highest in the whole heap)
What would be the best way of implementing this without increasing the complexity too much?

How can I efficiently search a string for occurrences of words?

Essentially, I have a Set of words, about 250,000 of them and want to be able to return a list of which ones are found in a given string.
eg. input string is 'APPLEASEDITION', I want to return
[APP,APPLE,PLEA, PLEAS,PLEASE,PLEASED,lEA,LEAS,LEASE,LEASED,EA,EAS,EASE,EASED,AS,SEDITION,EDITION,IT,TI,ON]
I came up with this code, which works faster than the method mentioned above for shorter input strings (up to 15 characters), but doubles in execution time with each added letter:
const findWords = (instring, solutions = null) => {
if (!solutions) solutions = new Set();
if (!instring) {
return new Set();
}
if (words[instring]) {
solutions.add(instring);
}
const suffix = instring.slice(1);
const prefix = instring.slice(0, instring.length - 1);
if (!solutions.has(prefix))
solutions = new Set([...solutions, ...findWords(prefix, solutions)]);
if (!solutions.has(suffix))
solutions = new Set([...solutions, ...findWords(suffix, solutions)]);
return solutions;
};
Wondering if anyone can help me out optimizing the code?
Edit:
I made a different solution, it works much better
const getAllSubstrings = (str) => {
let result = [];
for (let i = 0; i < str.length; i++) {
for (let j = i + 1; j < str.length + 1; j++) {
result.push(str.slice(i, j));
}
}
return result;
}
const findWords = (instring) => {
const solutions = []
let subs = getAllSubstrings(instring)
for (let sub of subs) {
if (words[sub])
solutions.push(sub)
}
return solutions
}
Still open to possible improvements, but this works well enough for my use case
As it stands your logic assumes your input starts or ends with the phrase, but doesn't consider words in the middle - you'll need to generate permutations
Convert your dictionary to a hash where the words are keys - O(n) => O(1) - you can check if possible words are in the dictionary by checking dictionary[possibleWord]
You could convert your array of dictionary words into a binary search tree or a trie - there might be a performance benefit to converting your source text to a collection of BSTs/Tries, where each one represents a possible word/permutation, and then comparing BSTs/Tries rather than strings, but I'm not sure how that'd be faster than string comparison at the moment.
You can limit the length to the max length of a given permutation to the words in your dictionary. You'll end up with a lot of permutations, but possibly less than you have currently.
As the comments state you may want to do this server side for more power/in a language more efficient than JS, or using WASM.
Some javascript libraries that have binary search tree tools:
https://developers.google.com/closure/library/
https://www.npmjs.com/package/binary-search-tree
https://www.npmjs.com/package/trie-search
Alternatively, you might be able to create two hashes (one of permutations, one of dictionary words), or another data structure that's made for creating a "diff" or "overlap", and extract the keys that are in both sets.

Is it worth it to convert array into set to search in NodeJS

I would like to know if it is worth to convert an array into a set in order to search using NodeJS.
My use case is that this search is done lot of times, but not necessary on big sets of data (can go up to ~2000 items in the array from time to time).
Looking for a specific id in a list.
Which approach is better :
const isPresent = (myArray, id) => {
return Boolean(myArray.some((arrayElement) => arrayElement.id === id);
}
or
const mySet = new Set(myArray)
const isPresent = (mySet, id) => {
return mySet.has(id);
}
I know that theoretically the second approach is better as it is O(1) and O(n) for the first approach. But can the instantiation of the set offset the gain on small arrays?
#jonrsharpe - particularly for your case, I found that converting an array of 2k to Set itself is taking ~1.15ms. No doubt searching Set is faster than an Array but in your case, this additional conversion can be little costly.
You can run below code in your browser console to check. new Set(arr) is taking almost ~1.2ms
var arr = [], set = new Set(), n = 2000;
for (let i = 0; i < n; i++) {
arr.push(i);
};
console.time('Set');
set = new Set(arr);
console.timeEnd('Set');
Adding element in the Set is always costly.
Below code shows the time required to insert an item in array/set. Which shows Array insertion is faster than Set.
var arr = [], set = new Set(), n = 2000;
console.time('Array');
for (let i = 0; i < n; i++) {
arr.push(i);
};
console.timeEnd('Array');
console.time('Set');
for (let i = 0; i < n; i++) {
set.add(i);
};
console.timeEnd('Set');
I run the following code to analyze the speed of locating an element in the array and set. Found that set is 8-10 time faster than the array.
You can copy-paste this code in your browser to analyze further
var arr = [], set = new Set(), n = 100000;
for (let i = 0; i < n; i++) {
arr.push(i);
set.add(i);
}
var result;
console.time('Array');
result = arr.indexOf(12313) !== -1;
console.timeEnd('Array');
console.time('Set');
result = set.has(12313);
console.timeEnd('Set');
So for your case array.some is better!
I will offer a different upside for using Set: your code is now more semantic, easier to know what it does.
Other than that this post has a nice comparison - Javascript Set vs. Array performance but make your own measurements if you really feel that this is your bottleneck. Don't optimise things that are not your bottleneck!
My own heuristic is a isPresent-like utility for nicer code but if the check is done in a loop I always construct a Set before.

Compute every combination of 6 numbers

I'm more of a media developer and not the best coder, but I find myself needing to learn javascript better. I'm creating a math card game where the human player and the automated player are each dealt 6 cards. Each player must combine (concatenate) three of the cards to make a top number and the other three for the bottom number. Those two numbers are then subtracted. For the automated player, I have to go through ever possible combination of the six cards, so when the two numbers are subtracted, it gets as close as possible to a target number. I'm not very good with arrays, so I started testing every possible combination and then comparing which one was closer (See example below). This is a very inefficient way of coding this, but I'm just not sure how to do it otherwise. Any help would be greatly appreciated.
The variables have already been declared.
alienTopNum = "" + alienNum1 + alienNum2 + alienNum3;
alienBottomNum = "" + alienNum4 + alienNum5 + alienNum6;
oldDiff = targetNum - (alienTopNum - alienBottomNum);
player.SetVar("AC1R1", alienNum1);
player.SetVar("AC2R1", alienNum2);
player.SetVar("AC3R1", alienNum3);
player.SetVar("AC4R1", alienNum4);
player.SetVar("AC4R1", alienNum5);
player.SetVar("AC4R1", alienNum6);
player.SetVar("ATR1", alienTopNum - alienBottomNum);
alienTopNum = "" + alienNum1 + alienNum2 + alienNum3;
alienBottomNum = "" + alienNum4 + alienNum6 + alienNum5;
newDiff = targetNum - (alienTopNum - alienBottomNum);
if (Math.abs(newDiff) < Math.abs(oldDiff)) {
oldDiff = newDiff;
player.SetVar("AC1R1", alienNum1);
player.SetVar("AC2R1", alienNum2);
player.SetVar("AC3R1", alienNum3);
player.SetVar("AC4R1", alienNum4);
player.SetVar("AC4R1", alienNum6);
player.SetVar("AC4R1", alienNum5);
player.SetVar("ATR1", alienTopNum - alienBottomNum);
}
etc....
Store the dealt cards in an array rather than in individual variables, because that makes them a lot easier to handle when generating permutations. You don't say what values the cards can have, but as an example, given a "hand" of [1,2,3,4,5,6] if you get the permutations as an array of arrays:
[ [1,2,3,4,5,6], [1,2,3,4,6,5], [1,2,3,5,4,6], ...etc. ]
Then you can loop through that to process each permutation to take the first three "cards" and last three to get the current iteration's two numbers, subtract them, and see if the result is closer to the target than previous iterations' results.
The following does that, making use of the array permutation function that I found in this answer to another question. I'm not going to explain that algorithm because you can easily google up various permutation algorithms for yourself, but I have put comments in my bestPlay() function to explain how I process the permutations to figure out which is the best score for a hand.
I haven't tried to use your player or player.SetVar() method, but hopefully if you study this you can adapt it to use with your objects.
You didn't say what values the cards could have, so I've assumed a deck of twenty cards that repeats the numbers 0-9 twice.
function bestPlay(hand, target) {
var perms = permutator(hand); // Get all permutations for hand
var best = perms[0]; // Use the first as initial best
var bestDiff = difference(best);
for (var i = 1; i < perms.length; i++) { // Loop over the rest of the permutations
var diff = difference(perms[i]); // Get diff for current permutation
if (Math.abs(target - diff) < Math.abs(target - bestDiff)) { // Check if
best = perms[i]; // current beats previous best
bestDiff = diff; // and if so make it new best
}
}
// Output the results for this hand:
console.log(`Hand: ${hand.join(" ")}`);
console.log(`Best Numbers: ${best.slice(0,3).join("")} ${best.slice(3).join("")}`);
console.log(`Difference: ${bestDiff}`);
}
var hands = deal();
var target = 112;
console.log(`Target: ${target}`);
bestPlay(hands[1], target);
bestPlay(hands[2], target);
function difference(cards) {
return Math.abs(cards.slice(0,3).join("") - cards.slice(3).join(""));
}
function deal() {
var cards = [1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0];
// shuffle
cards.sort(function() { return Math.random() - 0.5; });
// first hand is first six cards, second hand is next six
return {
1: cards.slice(0,6),
2: cards.slice(6, 12)
};
}
function permutator(inputArr) {
var results = [];
function permute(arr, memo) {
var cur, memo = memo || [];
for (var i = 0; i < arr.length; i++) {
cur = arr.splice(i, 1);
if (arr.length === 0) {
results.push(memo.concat(cur));
}
permute(arr.slice(), memo.concat(cur));
arr.splice(i, 0, cur[0]);
}
return results;
}
return permute(inputArr);
}
If you click the "Run Code Snippet" button several times you'll see that sometimes a given hand has a combination of numbers that exactly matches the target, sometimes it doesn't.

JavaScript; n-dimensional array creation

In the process of building a JavaScript interpreter for a simple language, I've faced the following problem;
After parsing, we get an array of indices that specifies the element in an n-dimensional array to be modified. For instance, after parsing this:
a[1, 1, 1]
We get an array [1, 1, 1]. The language I'm working on doesn't have variable definitions, so variables get initialized on their first use. My goal is to be able to create this n-dimensional array so that I can place it in the variable table (in the example above, we'd need to create a 3-dimensional array).
The short question:Is there a way to create an n-dimensional array in JavaScript without using eval()?
Tested in Chrome:
function createNDimArray(dimensions) {
if (dimensions.length > 0) {
var dim = dimensions[0];
var rest = dimensions.slice(1);
var newArray = new Array();
for (var i = 0; i < dim; i++) {
newArray[i] = createNDimArray(rest);
}
return newArray;
} else {
return undefined;
}
}
Then createNDimArray([3, 2, 5]) returns a 3x2x5 array.
You can use a similar recursive procedure to access an element whose index is in an array:
function getElement(array, indices) {
if (indices.length == 0) {
return array;
} else {
return getElement(array[indices[0]], indices.slice(1));
}
}
Setting an element is similar, and left as an exercise for the reader.
There's nothing built in, but it's pretty easy to create a function that would do the job:
var genArray = function () {
var arr, len, i;
if(arguments.length > 0) {
len = [].slice.call(arguments, 0, 1)[0];
arr = new Array(len);
for(i = 0; i < len; i++) {
arr[i] = genArray.apply(null, [].slice.call(arguments, 1));
}
} else {
return null; //or whatever you want to initialize values to.
}
return arr;
};
var a = genArray(3, 2); //is [[null, null],[null, null],[null, null]]
var b = genArray(3, 1, 1); //is [[[null]],[[null]],[[null]]]
a[0][1]; //is null
b[1][0][0]; //is null
b[1][0][0] = 3;
b[1][0][0]; //is 3;
b; //is [[[null]],[[3]],[[null]]]
Maybe that will help?
PS --
I know this might seem like more effort than is necessary. But unfortunately, JavaScript arrays are not really "arrays" (if by "array" you mean a contiguous, indexed, immutable memory block). They're more like "maps" in most languages. So there's a certain amount of effort involved in creating them. Most languages have no problem creating multi-dimensional arrays because they're just doing some simple multiplication followed by an malloc(). But with JavaScript, you really have to go recursively generate your arrays if you want to have them pre-constructed. It's a pain, but it does demonstrate the effort required by the interpreter.
Go figure.
For creating an n-dimensional array:
function createNDimArray(dimensions) {
var ret = undefined;
if(dimensions.length==1){
ret = new Array(dimensions[0]);
for (var i = 0; i < dimensions[0]; i++)
ret[i]=null; //or another value
return ret;
}
else{
//recursion
var rest = dimensions.slice(1);
ret = new Array(dimensions[0]);
for (var i = 0; i < dimensions[0]; i++)
ret[i]=createNDimArray(rest);
return ret;
}
}
EDIT: Due to the fact that any recursive solution will have a limit to the size of the array you can create... I made another solution in my
PJs # GitHub library. This one runs at pseudo-instant speed and can create and manage a multidimensional array of any size, any structure, with any dimensions at any branch. It also can simulate prefilling and/or use a node object of custom design. Check it out here: https://github.com/PimpTrizkit/PJs/wiki/14.-Complex-Multidimensional-Object--(pCMO.js)
Using a modified version of jfabrizio's solution:
function createNDimArray(dimensions) {
var t, i = 0, s = dimensions[0], arr = new Array(s);
if ( dimensions.length < 3 ) for ( t = dimensions[1] ; i < s ; ) arr[i++] = new Array(t);
else for ( t = dimensions.slice(1) ; i < s ; ) arr[i++] = createNDimArray(t);
return arr;
}
Usages:
var arr = createNDimArray([3, 2, 3]);
// arr = [[[,,],[,,]],[[,,],[,,]],[[,,],[,,]]]
console.log(arr[2][1]); // in FF: Array [ <3 empty slots> ]
console.log("Falsy = " + (arr[2][1][0]?true:false) ); // Falsy = false
I found this to be quite a bit faster. I might stretch to say that it could be the fastest way possible to generate a N Dimensional array in Javascript. This refactoring above had some good speed increases. But, the best speed increase came from not prefilling, of course. This version doesn't prefill the array. It only returns a fully created N dimensional array of Ns lengths where the last level is just an empty array. I would hope that arr[x][y][z]?arr[x][y][z]:null is sufficient if you really need the null value. It is for my uses. :)
If you need prefilling, use his original version.
And, if you don't really care about what I did; then stop reading.
Want more geek talk? A little something about recursion for those learning out there. Alright here are the tactics. When doing deep recursion, keep in mind the final level. Its where most of the work is done. In this case its the Nth dimension, literally. This is your "payload", the rest is logistics. In jfab's function, when dimensions.length gets to 1, its last dimension, its in the Nth dimension and performs the payload. Which is to create the array of nulls, or in my case, an empty array. Since the recursion gets so deep each dimension is a factor of the last one. By the time you get to the Nth dimension you will have a lot of function calls and logistics gets cumbersome to the computer. And at the Nth dimension you will call your base recursion function (createNDimArray in our case) more times for the payload than you will for logistics. Now, as in jfab's original solution, putting the execution of the payload as the very first thing you do in recursion (if possible) is usually a good thing, especially if its simple. Here, by making the payload a building of the final 2D array (instead of just a 1D array by simply returning a new Array() only). Then the excessive amount of function calls now don't have to happen at this level. Now, of course, if you want to prefill the array, then this shortcut doesn't always help. But more to the point, prefilling the array would be the appropriate payload. By not visiting every item on the Nth dimension we have effectively removed it. That way there is one less level of function calls and basically the Nth dimension's payload is actually done on the N-1 th Dimension. And we are never calling the recursive function again just to deliver the new Array(). Unfortunately, the call to new Array(x) (in general) doesn't see it that way. Its execution time does increase with a larger x. Which is effectively still visiting every item in the Nth Dimension, but now we do it only once and with native code and wrapped in a tight and light loop. Now we require that createNDimArray can only be called with N > 1, ie never used to create 1D arrays. Theoretically you could require a larger N, and unroll even more dimensions at the end. Basically, the line with if ( dimensions.length < 3 ) will read something like < 4 or < 5 and you would have to wrap that many more for loops around the one thats there, and they would each all need their own set of vars --- so I'm not sure how efficient it all might be, as you are trading excessive function call and stack space/manipulation with a similar idea but in embedded for loops --- But I suppose it could speed up some environments if you know that N is always above a certain level or if its only for the final dimensions. Like here, I did it for the last two dimensions. But if you unroll too much, then your payload itself is a bear. Only testing will tell if thats worth it. It does seem that stack space is limited, and I think I remember having been able to make larger arrays with more unrolling. There is a limit to how big you can make an array. And recursion solutions that call themselves for each item at the Nth level had the lowest limit if I do.. recall.. correctly.... much lower.
The next part in revising his solution is just the logistics, its was just a simple refactor to get rid of excessive blocks and code. Join all the var work together and thats it. Since you need a arr to return, once the looping is over, might as well do all your var work on one line first and luckily, three of the four vars have the same initialization. Remember, Javascript can optimize code when joining with , if possible. This also makes for smaller code as well.
PT
One more version of createNDimArray using map, apply and bind functions:
function createNDimArray(dims) {
return dims.length === 1
? new Array(dims[0])
: Array.apply(null, Array(dims[0])).map(createNDimensionalArray.bind(null, dims.slice(1)));
}
createNDimArray([3, 2, 5]); // returns 3x2x5 array
Creating an ND Array requires cloning nested ND arrays. Accordingly you will need a proper Array.prototype.clone() method and the rest is easy. To my knowledge the following is the simplest and most efficient way in JS.
Array.prototype.clone = function(){
return this.reduce((p,c,i) => (p[i] = Array.isArray(c) ? c.clone() : c, p),[])
}
function arrayND(...n){
return n.reduceRight((p,c) => c = (new Array(c)).fill().map(e => Array.isArray(p) ? p.clone() : p ));
}
var NDarr = arrayND(4,4,4,4,"."); // size of each dimension and the init value at the end
console.log(JSON.stringify(NDarr))
NDarr[0][1][2][3] = "kitty"; //access any location and change.
console.log(JSON.stringify(NDarr))
Reason For Anwser
There are good answers here but as JavaScript has changed here is an additional method of tackling this problem with some of the updated features in JavaScript.
function nArray (dem, size=dem, fill=null, currDepth=0) {
const arr = new Array(size).fill(fill);
return (currDepth+1 === dem) ? arr : arr.map(i => nArray(dem, size, fill, currDepth+1));
};
Notes
dem is the dimensions of the array.
size is the size of each dimension by default it is the dem value.
fill is the value that will be the default filled value.
currDepth is not to be used it is for the recursive nature of the function.
Create n dimensional matrix array with default values
function arr (arg, def = 0){
if (arg.length > 2){
return Array(arg[0]).fill().map(()=>arr(arg.slice(1)));
} else {
return Array(arg[0]).fill().map(()=>Array(arg[1]).fill(def));
}
}
//simple usage -> fills with 0
var s = arr([3,5,8,4]) // 4 dimensions
var t = arr([5,7]) // 2 dimensions
//fill with null
var k = arr([4,7,9] , null) // 3 dimensions
If you need to create 4d Array with index from 0 to 4 in the each cluster just do this code:
function createNDimArray(dimensions) {
if (dimensions.length > 0) {
var dim = dimensions[0];
var rest = dimensions.slice(1);
var newArray = new Array();
for (var i = 0; i < dim; i++) {
newArray[i] = createNDimArray(rest);
}
return newArray;
} else {
return undefined;
}
}
var MyArray=createNDimArray([5, 5, 5, 5]);
//returns a 5x5x5x5 array with index from 0 to 4;
MyArray[4][4][4][4]="MyArray 4d MyValue";
alert(MyArray[4][4][4][4]);
//For 5-demension array with this param.: 5x4x3x2x2 -> do this:
var MyArray_5d=createNDimArray([5, 4, 3, 2, 2]);
MyArray_5d[4][3][2][1][1]="MyArray 5d MyValue";
alert(MyArray_5d[4][3][2][1][1]);
MULTIDIMENSIONAL ARRAYS can be seen as EMBEDED ARRAYS.
See if the following can help.
<script type="text/javascript">"use strict";
const arr = [
["D1","D2","D3"],
[
["T11","T12","T13"],
["T21","T22","T23"]
]
];
for(let k=0;k<arr[0].length;k++)console.log(arr[0][k]);
// D1
// D2
// D3
for(let k=0;k<arr[1].length;k++)console.log(arr[1][k]);
// Array(3) [ "T11", "T12", "T13" ]
// Array(3) [ "T21", "T22", "T23" ]
for(let k=0;k<arr[1].length;k++)console.log(arr[1][0][k]);
// T11
// T12
for(let k=0;k<arr[1].length;k++)console.log(arr[1][1][k]);
// T21
// T22
for(let k=0;k<arr[1][0].length;k++)console.log(arr[1][0][k]);
// T11
// T12
// T13
for(let k=0;k<arr[1][1].length;k++)console.log(arr[1][1][k]);
// T21
// T22
// T23
</script>
// // // // // // // // // //
// // // // // // // // // //
And from the same point of vue, a MULTIDIMENSIONAL OBJECT !
<script type="text/javascript">"use strict";
const o = {
un:{u1:"U1",u2:"U2",u3:"U3"},
deux:{
trois : {d11:"D11",d12:"D12",d13:"D13"},
quatre: {t21:"T21",t22:"T22",t23:"T23"}
}
};
let ref = Object.keys(o);
for(let k=0;k<ref.length;k++)
console.log(ref[k] , ":" ,
Object.values(o)[k]);
// un : Object { u1: "U1", u2: "U2", u3: "U3" }
// deux : Object { trois: {…}, quatre: {…} }
// quatre: Object { t21: "T21", t22: "T22", t23: "T23" }
// trois : Object { d11: "D11", d12: "D12", d13: "D13" }
ref = Object.keys(o["un"]);
for(let k=0;k<ref.length;k++)
console.log(ref[k] , ":" ,
Object.values(o["un"])[k]);
// u1 : U1
// u2 : U2
// u3 : U3
ref = Object.keys(o["deux"]);
for(let k=0;k<ref.length;k++)
console.log(ref[k] , ":" ,
Object.values(o["deux"])[k]);
// trois : Object { d11: "D11", d12: "D12", d13: "D13" }
// quatre : Object { t21: "T21", t22: "T22", t23: "T23" }
ref = Object.keys(o["deux"]["trois"]);
for(let k=0;k<ref.length;k++)
console.log(ref[k] , ":" ,
Object.values(o["deux"]["trois"])[k]);
// d11 : D11
// d12 : D12
// d13 : D13
ref = Object.keys(o["deux"]["quatre"]);
for(let k=0;k<Object.keys(ref).length;k++)
console.log(ref[k] , ":" ,
Object.values(o["deux"]["quatre"])[k]);
// t21 : T21
// t22 : T22
// t23 : T23
ref = Object.keys(o["deux"]["trois"]);
console.log(ref[0] , ":" ,
Object.values(o["deux"]["trois"])[0]);
// d11 : D11
ref = Object.values(o["deux"]["quatre"]);
console.log(Object.keys(o["deux"]["quatre"])[ref.length-1] ,
":" , ref[ref.length-1] );
// t23 : T23
</script>

Categories

Resources