Adding Duplicated array values - javascript

I Have an array [[Food , quantity]] with duplicated values and i want to add the quantities of the same food in the array but i can't seem to find a way to do that
I want to do this using JavaScript and the array looks like this:
[
["Burger", 5],
["Pizza", 10],
["Coke", 13],
["Burger", 7],
["Soda", 10],
["Pizza", 4],
["Burger", 12]
]
and i want the result to be like:
[
["Burger", 24],
["Pizza", 14],
["Coke", 13],
["Soda", 10]
]
And then I want to display the result on a table

You could use reduce to group each food. Create an accumulator with each food as key and the sum of quantity as value. If the key is already added, increment it. Else, add the key with quantity as value. Then use Object.entries() to get a 2D array of food - total quantity pairs
const input=[["Burger",5],["Pizza",10],["Coke",13],["Burger",7],["Soda",10],["Pizza",4],["Burger",12]]
const counter = input.reduce((acc, [food, value]) => {
acc[food] = acc[food] + value || value;
return acc;
}, {});
const ouptut = Object.entries(counter)
console.log(JSON.stringify(ouptut))
This is what the accumulator/ counter object will look like:
{
"Burger": 24,
"Pizza": 14,
"Coke": 13,
"Soda": 10
}

You can try something like -
const arr = [["Burger" , 5], ["Pizza" , 10], ["Coke" , 13], ["Burger" , 7], ["Soda" , 10], ["Pizza" , 4], ["Burger" , 12]];
let ans = [];
arr.map((x) => {
const [name, qty] = x;
const found = ans.find((y) => y[0] === name);
if(found){
found[1] = found[1] + qty;
} else {
ans.push(x);
}
});
console.log(ans);

You may try out like,
let array = [["Burger" , 5], ["Pizza" , 10], ["Coke" , 13], ["Burger" , 7], ["Soda" , 10], ["Pizza" , 4], ["Burger" , 12]];
let itemObj = {};
for(let item of array){
if(itemObj.hasOwnProperty(item[0]))
itemObj[item[0]] += item[1];
else
itemObj[item[0]] = item[1];
}
console.log(itemObj);
let newArray = Object.keys(itemObj).map(function(key) {
return [key, itemObj[key]];
});
console.log(newArray);

Related

max with condition in Javascript

I have two arrays:
search array: [['#S!', 1, 1], ['#$#', 2, 5], ['#S!', 10, 12], ['#$#', 21, 5]]
and key array: ['#S!','#$#']
I want to look up into search array based on the key array element and create a resultant array which looks like this:
[[key array element,max while lookup for value at index 1 in search array, max while lookup for value at index 2 in search array], [...]]
Here is my code for the same:
let resultant = [];
keys.forEach((ele, ind) => {
resultant[ind] = [
ele,
Math.max(searchArray.filter(element => element[0] === ele)),
Math.max(searchArray.filter(element => element[0] === ele))
];
});
Now I am confused in these statements:
Math.max(newSet.filter(element => element[0] === ele)),
Math.max(newSet.filter(element => element[0] === ele))
Because filter will return the entire array but I want to find max of element with index 1 and in second statement I want to return max of element with index 2 which have the element with index 0 same as the key which I have provided.
Here is one simple test case:
search Array: [["A",1,2],["A",12,23],["A",11,23],["A",14,42],["A",71,32],["B",113,42],["B",145,62],["C",91,32],["C",14,222],["C",111,2]]
keys Array: ["A","B","C"]
Output: [["A",71,42],["B",145,62],["C",111,222]]
As you can see max corresponding to the elements are mapped to the same elements. Can someone help me with this? Is there a better or more optimized algorithm for the same than what I am using?
You could take a dynamic approach with an object for the wanted keys.
function max(search, keys) {
const temp = search.reduce((r, [key, ...data]) => {
if (!r[key]) r[key] = [key, ...data];
else data.forEach((v, i) => { if (r[key][i + 1] < v) r[key][i + 1] = v; });
return r;
}, {});
return keys.map(key => temp[key]);
}
console.log(max([['#S!', 1, 1], ['#$#', 2, 5], ['#S!', 10, 12], ['#$#', 21, 5]], ['#S!','#$#']));
console.log(max([["A", 1, 2],["A", 12, 23],["A", 11, 23], ["A", 14, 42], ["A", 71, 32], ["B", 113, 42], ["B", 145, 62], ["C", 91, 32], ["C", 14, 222], ["C", 111, 2]], ["A", "B", "C"]));
.as-console-wrapper { max-height: 100% !important; top: 0; }
Try use js array flatten method to do this,
let searchArray = [["A",1,2],["A",12,23],["A",11,23],["A",14,42],["A",71,32],["B",113,42],["B",145,62],["C",91,32],["C",14,222],["C",111,2]];
let keysArray = ["A","B","C"];
console.clear();
let output = [];
keysArray.forEach(item => {
let groupSearchArray = searchArray.filter(f => f[0] === item);
let sortedArray = groupSearchArray.flat(1).filter(f => f !== item).sort().reverse();
output.push([item, sortedArray[0], sortedArray[1]]);
});
console.log(output);

How to transpose an m*n matrix using recursion?

I'm trying to transpose a matrix using recursion. Now, I know that under normal circumstances this isn't a good idea and a nested loop/nested map, or a similar approach is superior, but I need to learn this for educational purposes.
To show that I did my homework, here is the nested loop approach:
const arrMatrix = [
[3, 6, 7, 34],
[6, 3, 5, 2],
[2, 6, 8, 3]
];
const transposedMatrix = []
for (let i = 0; i < arrMatrix[0].length; i++) {
const tempCol = [];
for (let j = 0; j < arrMatrix.length; j++) {
tempCol.push(arrMatrix[j][i]);
}
transposedMatrix.push(tempCol);
}
console.log(transposedMatrix);
Here's another approach using nested maps:
const arrMatrix = [
[3, 6, 7, 34],
[6, 3, 5, 2],
[2, 6, 8, 3]
];
const transposedMatrix = arrMatrix[0].map((_, i) =>
arrMatrix.map((_, j) => arrMatrix[j][i])
);
console.log(transposedMatrix);
Here are some resources that I went through but didn't manage to come up with a solution using them.
If possible, in addition to the algorithm/code, please give me some explanation and resources to learn more about it.
const map = ([head, ...tail], mapper) => tail.length ? [mapper(head), ...map(tail, mapper)] : [mapper(head)];
const transpose = matrix =>
matrix[0].length
? [map(matrix, row => row.shift()), ...transpose(matrix)]
: [];
How it works:
From a given matrix, we always take out the first column (matrix.map(row => row.shift()), then we continue recursively:
[[1, 1, 1], -> [[1, 1], -> [[1], -> [[],
[2, 2, 2], [2, 2], [2], [],
[3, 3, 3]] [3, 3]] [3]] []]
Then the base case gets reached, the matrix is empty (matrix[0].length is 0 = falsy) and an empty array gets returned. Now at every step, the column taken out gets added to that array, and thus it's a row now:
[[1, 2, 3], <- [[1, 2, 3], <- [[1, 2, 3]] <- []
[1, 2, 3], [1, 2, 3]]
[1, 2, 3]]
Note: This destroys the original array.
const transpose = matrix => {
const row = (x) => x >= matrix[0].length ? [] : [col(x, 0), ...row(x + 1)];
const col = (x, y) => y >= matrix.length ? [] : [matrix[y][x], ...col(x, y + 1)];
return row(0);
};
That version does not mutate the original array. You can take that even a step further, than it is purely functional, but thats a bit overkill:
const transpose = matrix => (
(row, col) => row(row)(col)(0)
)(
row => col => (x) => x >= matrix[0].length ? [] : [col(col)(x, 0), ...row(row)(col)(x + 1)],
col => (x, y) => y >= matrix.length ? [] : [matrix[y][x], ...col(col)(x, y + 1)]
);
This is very similar to hitmands' solution, and would likely be less performant, but I think it's slightly cleaner to avoid working with column indices:
const head = xs => xs[0]
const tail = xs => xs.slice(1)
const transpose = (m) => head(m).length
? [m.map(head), ...transpose(m.map(tail))]
: []
const matrix = [
[3, 6, 7, 34],
[6, 3, 5, 2],
[2, 6, 8, 3]
]
console .log (
transpose (matrix)
)
This version transposes the first column into a row (via .map(head)) and then recurs on the remaining matrix (via .map(tail)), bottoming out when the first row is empty.
You can inline those helper functions if you choose, so that it looks like this:
const transpose = (m) => m[0].length
? [m.map(xs => xs[0]), ...transpose(m.map(xs => xs.slice(1)))]
: []
..but I wouldn't recommend it. The first version seems more readable, and head and tail are easily reusable.
Update
user633183 suggests an alternative escape condition. It's a good question whether or not it's a better result for ill-formed data, but it's certainly a useful possible variant:
const head = xs => xs[0]
const tail = xs => xs.slice(1)
const empty = xs => xs.length == 0
const transpose = (m) => m.some(empty)
? []
: [m.map(head), ...transpose(m.map(tail))]
(This could also be done with m.every(nonempty) by reversing the consequent and alternative in the conditional expression, but I think it would be slightly harder to read.)
I would write it like this,
assuming that all the rows inside the matrix have the same length:
check if there still are rows to process
create a row from each colum at the given index
increment column index by 1
call transpose with the new index
const transpose = (m, ci = 0) => ci >= m[0].length
? []
: [m.map(r => r[ci]), ...transpose(m, ci + 1)]
;
const matrix = [
[3, 6, 7, 34],
[6, 3, 5, 2],
[2, 6, 8, 3]
];
console.log(
transpose(matrix),
);
I had an idea to write transpose using the Maybe monad. I'll start using functional operations and then refactor to clean up the code -
Dependencies -
const { Just, Nothing } =
require("data.maybe")
const safeHead = (a = []) =>
a.length
? Just(a[0])
: Nothing()
const tail = (a = []) =>
a.slice(1)
Without refactors -
const column = (matrix = []) =>
matrix.reduce
( (r, x) =>
r.chain(a => safeHead(x).map(x => [ ...a, x ]))
, Just([])
)
const transpose = (matrix = []) =>
column(matrix)
.map(col =>
[ col, ...transpose(matrix.map(tail)) ]
)
.getOrElse([])
Refactor column using generic append and lift2 -
const append = (a = [], x) =>
[ ...a, x ]
const lift2 = f =>
(mx, my) =>
mx.chain(x => my.map(y => f(x, y)))
const column = (matrix = []) =>
matrix.reduce
( (r, x) =>
lift2(append)(r, safeHead(x))
, Just([])
)
const transpose = (matrix = []) =>
column(matrix)
.map(col =>
[ col, ...transpose(matrix.map(tail)) ]
)
.getOrElse([])
Refactor column again using generic transducer mapReduce -
const mapReduce = (map, reduce) =>
(r, x) => reduce(r, map(x))
const column = (matrix = []) =>
matrix.reduce
( mapReduce(safeHead, lift2(append))
, Just([])
)
const transpose = (matrix = []) =>
column(matrix)
.map(col =>
[ col, ...transpose(matrix.map(tail)) ]
)
.getOrElse([])
transpose stays the same in each refactoring step. It produces the following outputs -
transpose
( [ [ 1, 2, 3, 4 ]
, [ 5, 6, 7, 8 ]
, [ 9, 10, 11, 12 ]
]
)
// [ [ 1, 5, 9 ]
// , [ 2, 6, 10 ]
// , [ 3, 7, 11 ]
// , [ 4, 8, 12 ]
// ]
transpose
( [ [ 1, 2, 3, 4 ]
, [ 5 ]
, [ 9, 10, 11, 12 ]
]
)
// [ [ 1, 5, 9 ] ]

multidimensional default array

I've got two multidimensional arrays:
const defaultData = [
["ad", 0],
["ae", 0],
["af", 0]
]
const data = [
["az", 20],
["ad", 50]
]
The desired result is:
const expected = [
["ad", 50],
["ae", 0],
["af", 0]
]
Here az is ignored because its not valid because its not in the defaultData.
ad is valid and therefore the 0 is overwritten with the value of 50.
How would I best produce this result. Thanks?
Please assume neither array is sorted. The expected result need not be sorted either.
Either vanilla JS or Lodash is fine.
Convert the data to an object (dataByKey) with _.fromPairs(). Map the defaultData, and check if the "key" (index 0) exists in dataByKey. If it is, create a new array with the value from dataByKey. If not return the original array.
const defaultData = [
["ad", 0],
["ae", 0],
["af", 0]
]
const data = [
["az", 20],
["ad", 50]
]
const dataByKey = _.fromPairs(data)
const result = defaultData.map(o => o[0] in dataByKey ? [o[0], dataByKey[o[0]]] : o)
console.log(result)
<script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
Use map to iterate over the defaultData, and find to match each iterated array against data:
const defaultData = [['ad', 0],['ae', 0],['af', 0]];
const data = [['az', 20],['ad', 50]];
const expected = defaultData.map(arr => {
// Destructure the first element from `arr`
// and assign it to `code`
const [ code ] = arr;
// `find` the array in `data` where the first element
// matches `code`
const found = data.find(el => el[0] === code);
// If `found` is undefined return the whole array
// otherwise return a new array with the code and the value
// from the found array
return found ? [code, found[1]] : arr;
});
console.log(expected);
You could first create one object from data and then use map method on defaultData to create new array and get value from data by key.
const defaultData = [
["ad", 0],
["ae", 0],
["af", 0]
]
const data = [
["az", 20],
["ad", 50]
].reduce((r, [key, value]) => {
r[key] = (r[key] || 0) + value;
return r;
}, {})
const expected = defaultData.map(([key, value]) => {
return [key, key in data ? data[key] : value]
})
console.log(expected)
Try this out, I am using map() to create a new Array and find() to find elements from the second array.
I compare the first index item of each sub array from the default array, if any of the items from second array has the same element in its first index then I return it, if none matches, nothing is returned.
If I have a matched item then I create a new array using its elements.
If I don't have a matched item I create a new using the default elements of the default item.
const defaultData = [
["ad", 0],
["ae", 0],
["af", 0]
]
const data = [
["az", 20],
["ad", 50]
]
let newData = defaultData.map(eachDefault => {
let found = data.find(eachData => {
if (eachData[0] == eachDefault[0]) {
return true
}
})
if (found) {
return [...found]
}
return [...eachDefault]
})
console.log(newData)
You can use map/reduce like this:
const defaultData = [
["ad", 0],
["ae", 0],
["af", 0]
]
const data = [
["az", 20],
["ad", 50]
]
const result = defaultData.map(arr => data.reduce(([dKey, dVal], [key, val]) => [ dKey, dKey === key ? val : dVal ], arr));
console.log(result);

Skipping comparison arrays when they are empty

I have a case that has to compare some arrays together and find the common element between all of them. The following code is working fine as long as all the array are loaded. But what if one (or even 5 of arrays) of the array is/are still empty and not loaded?
In case of having only two arrays I could do something like
if ((arr1.length > 0) && (arr2.length === 0)) {
newArr =arr1;
}
but it is going be a big conditional snippet to check all 6 arrays in this way! How can I fix this so the code runs comparison against arrays only when they are loaded and skip the array(s) when they are empty?
let newArr = [];
function common(arr1, arr2, arr3, arr4,arr5,arr6) {
newArr = arr1.filter(function(e) {
return arr2.indexOf(e) > -1 &&
arr3.indexOf(e) > -1 &&
arr4.indexOf(e) > -1 &&
arr4.indexOf(e) > -1 &&
arr5.indexOf(e) > -1 &&
arr6.indexOf(e) > -1;
});
}
common( [1, 2, 6, 5, 9,8],
[1, 2, 3, 6, 5, 9,8],
[6, 5, 4, 5,8],
[8, 2, 1, 6, 4],
[8, 2, 1, 6, 4],
//[8]
[]
);
$('div').text(newArr);
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<p> Returns Nothing Because 6th Array is Empty</p>
<div></div>
You are doing it very hard way. Below is a very simple way.You do it in following steps:
Use Rest Parameters instead of arr1,arr2,....
Remove the empty arrays using filter(). Like Array.filter(x => x.length)
Create a object which will contain keys as number. And value as their count.
Use forEach() on array of arrays.
Increment the count of the object by apply forEach() on each of array.
At last filter those keys of object which have count greater than given object.
function common(...arrays) {
let obj = {};
let temp = arrays.filter(x => x.length);
//the below line will check if all the arrays empty
if(!temp.length) console.log("All empty")
temp.forEach(arr => {
arr.forEach(x => {
obj[x] = obj[x] + 1 || 1
})
})
//console.log(temp)
//console.log(Object.keys(obj))
return Object.keys(obj).filter(a => obj[a] === temp.length).map(x => +x || x);
}
let newArr = common( [1, 2, 6, 5, 9,8],
[1, 2, 3, 6, 5, 9,8],
[6, 5, 4, 5,8,1],
[8, 2, 1, 6, 4],
[8, 2, 1, 6, 4],
//[8]
);
console.log(newArr)
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<div></div>
Since your code is written in ES6, there is a very ES6-way to go about your problem.
What you basically want is to check all items in the first array against n number of arrays provided in the function's parameters/arguments and only return the item that is present in all arrays.
// Use ...arrs to store all subsequent arrays in parameter
function common(arr1, ...arrs) {
return arr1.map(item => {
const nonEmptyArrs = arrs.filter(arr => arr.length);
// Go through all OTHER arrays and see if your item is present in them
const isItemFound = nonEmptyArrs.forEach(arr => {
return arr.indexOf(item) > -1;
});
// Now filter the array to remove all `false` entries
const itemFoundCount = isItemFound.filter(i => i).length;
// Only return item if it is found in ALL arrays
// i.e. itemFoundCount must match the number of arrays
if (itemFoundCount === nonEmptyArrs.length) {
return item;
}
})
.filter(item => item); // Remove undefined entries
}
See proof-of-concept below:
function common(arr1, ...arrs) {
return arr1.map(item => {
const nonEmptyArrs = arrs.filter(arr => arr.length);
const itemFoundCount = nonEmptyArrs
.map(arr => arr.includes(item))
.filter(i => i)
.length;
// Only return item if it is found in ALL arrays
if (itemFoundCount === nonEmptyArrs.length) {
return item;
}
}).filter(item => item);
}
const newArr = common([1, 2, 6, 5, 9, 8], [1, 2, 3, 6, 5, 9, 8], [6, 5, 4, 5, 8], [8, 2, 1, 6, 4], [8, 2, 1, 6, 4], []);
console.log(newArr);
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>

How can I turn an array into an object with the name being the first value in the array and the properties an array of the subarrays?

What is the best way to take a multidimensional array with an unknown list of elements and group it into an object to remove repeated values in the first element of the subarray:
For example, I'd like to turn this:
const arr = [[a, 1, 4], [b, 3, 4], [c, 1, 7], [a, 2, 5], [c, 3, 5]]
Into this:
arrResult = {a:[[1, 4],[2, 5]], b:[[3, 4]], c:[[1, 7],[3, 5]]}
I thought about sorting this and then splitting it or running some kind of reduce operation but couldn't figure out exactly how to accomplish it.
You only need to use reduce (and slice), no need for sorting or splitting
var arr = [['a', 1, 4], ['b', 3, 4], ['c', 1, 7], ['a', 2, 5], ['c', 3, 5]];
var arrResult = arr.reduce((result, item) => {
var obj = result[item[0]] = result[item[0]] || [];
obj.push(item.slice(1));
return result;
}, {});
console.log(JSON.stringify(arrResult));
You can use reduce like this:
const arr = [["a", 1, 4], ["b", 3, 4], ["c", 1, 7], ["a", 2, 5], ["c", 3, 5]];
var result = arr.reduce((obj, sub) => {
var key = sub[0]; // key is the first item of the sub-array
if(obj[key]) obj[key].push(sub.slice(1)); // if the there is already an array for that key then push this sub-array (sliced from the index 1) to it
else obj[key] = [sub.slice(1)]; // otherwise create a new array that initially contain the sliced sub-array
return obj;
}, {});
console.log(result);
you could use reduce and destructuring like this:
const arr = [['a', 1, 4],['b', 3, 4],['c', 1, 7],['a', 2, 5],['c', 3, 5]]
function sub(arr) {
return arr.reduce((obj, [key, ...value]) => {
obj[key] ? obj[key].push(value) : obj[key] = [value]
return obj
}, {})
}
console.log(sub(arr));
I like this solution better because it abstracts away the collation but allows you to control how items are collated using a higher-order function.
Notice how we don't talk about the kind or structure of data at all in the collateBy function – this keeps our function generic and allows for it to work on data of any shape.
// generic collation procedure
const collateBy = f => g => xs => {
return xs.reduce((m,x) => {
let v = f(x)
return m.set(v, g(m.get(v), x))
}, new Map())
}
// generic head/tail functions
const head = ([x,...xs]) => x
const tail = ([x,...xs]) => xs
// collate by first element in an array
const collateByFirst = collateBy (head)
// your custom function, using the collateByFirst collator
// this works much like Array.prototype.reduce
// the first argument is your accumulator, the second argument is your array value
// note the acc=[] seed value used for the initial empty collation
const foo = collateByFirst ((acc=[], xs) => [...acc, tail(xs)])
const arr = [['a', 1, 4], ['b', 3, 4], ['c', 1, 7], ['a', 2, 5], ['c', 3, 5]]
let collation = foo(arr);
console.log(collation.get('a')) // [ [1,4], [2,5] ]
console.log(collation.get('b')) // [ [3,4] ]
console.log(collation.get('c')) // [ [1,7], [3,5] ]
Of course you could write it all in one line if you didn't want to give names to the intermediate functions
let collation = collateBy (head) ((acc=[], xs) => [...acc, tail(xs)]) (arr)
console.log(collation.get('a')) // [ [1,4], [2,5] ]
Lastly, if you want the object, simply convert the Map type to an Object
let obj = Array.from(collation).reduce((acc, [k,v]) =>
Object.assign(acc, { [k]: v }), {})
console.log(obj)
// { a: [ [1,4], [2,5] ],
// b: [ [3,4] ],
// c: [ [1,7], [3,5] ] }
Higher order functions demonstrate how powerful generic procedures likes collateBy can be. Here's another example using the exact same collateBy procedure but performing a very different collation
const collateBy = f => g => xs => {
return xs.reduce((m,x) => {
let v = f(x)
return m.set(v, g(m.get(v), x))
}, new Map())
}
const collateEvenOdd = collateBy (x => x % 2 === 0 ? 'even' : 'odd')
const sumEvenOdd = collateEvenOdd ((a=0, b) => a + b)
let data = [2,3,4,5,6,7]
let collation = sumEvenOdd (data)
let even = collation.get('even')
let odd = collation.get('odd')
console.log('even sum', even) // 2 + 4 + 6 === 12
console.log('odd sum', odd) // 3 + 5 + 7 === 15

Categories

Resources