One of my node js libraries is returning some data I need in the wrong format like this:
{"a":["1","2"],"b":["3","4"],"c":["5","6"]}
(note that the values don't matter)
but I need to loop this array in a way, that I find my B for my A that has a certain value (in this case e.g. '2', I would need '4') and all other parts of my program are so far using arrays like this:
[{"a":"1", "b":"3", "c":"5"}, {"a":"2", "b":"4", "c":"6"}]
and it would be my preferred approach.
Also note that the amount of data in a is always the same as b and c, but itself is variable.
So what would be the "best" way to accomplish this in ES6/JS (before I start messing with for-loops)?
If you are looking to transform an object like
{"a":["1","2"],"b":["3","4"],"c":["5","6"]}
Into a array like
[{"a":"1","b":"3","c":"5"},{"a":"2","b":"4","c":"6"}]
Something like this is the simplest way I can think of
function formatData (data) {
return Object.keys(data).reduce((arr, key) => {
data[key].forEach((value, i) => {
const iObj = arr[i] || (arr[i] = {});
iObj[key] = value;
});
return arr;
}, []);
}
Related
I am learning functional programming in Javascript and using Ramda. I have this object
var fieldvalues = { name: "hello there", mobile: "1234",
meta: {status: "new"},
comments: [ {user: "john", comment: "hi"},
{user:"ram", comment: "hello"}]
};
to be converted like this:
{
comments.0.comment: "hi",
comments.0.user: "john",
comments.1.comment: "hello",
comments.1.user: "ram",
meta.status: "new",
mobile: "1234",
name: "hello there"
}
I have tried this Ramda source, which works.
var _toDotted = function(acc, obj) {
var key = obj[0], val = obj[1];
if(typeof(val) != "object") { // Matching name, mobile etc
acc[key] = val;
return acc;
}
if(!Array.isArray(val)) { // Matching meta
for(var k in val)
acc[key + "." + k] = val[k];
return acc;
}
// Matching comments
for(var idx in val) {
for(var k2 in val[idx]) {
acc[key + "." + idx + "." + k2] = val[idx][k2];
}
}
return acc;
};
// var toDotted = R.pipe(R.toPairs, R.reduce(_toDotted, {}));
var toDotted = R.pipe(R.toPairs, R.curry( function(obj) {
return R.reduce(_toDotted, {}, obj);
}));
console.log(toDotted(fieldvalues));
However, I am not sure if this is close to Functional programming methods. It just seems to be wrapped around some functional code.
Any ideas or pointers, where I can make this more functional way of writing this code.
The code snippet available here.
UPDATE 1
Updated the code to solve a problem, where the old data was getting tagged along.
Thanks
A functional approach would
use recursion to deal with arbitrarily shaped data
use multiple tiny functions as building blocks
use pattern matching on the data to choose the computation on a case-by-case basis
Whether you pass through a mutable object as an accumulator (for performance) or copy properties around (for purity) doesn't really matter, as long as the end result (on your public API) is immutable. Actually there's a nice third way that you already used: association lists (key-value pairs), which will simplify dealing with the object structure in Ramda.
const primitive = (keys, val) => [R.pair(keys.join("."), val)];
const array = (keys, arr) => R.addIndex(R.chain)((v, i) => dot(R.append(keys, i), v), arr);
const object = (keys, obj) => R.chain(([v, k]) => dot(R.append(keys, k), v), R.toPairs(obj));
const dot = (keys, val) =>
(Object(val) !== val
? primitive
: Array.isArray(val)
? array
: object
)(keys, val);
const toDotted = x => R.fromPairs(dot([], x))
Alternatively to concatenating the keys and passing them as arguments, you can also map R.prepend(key) over the result of each dot call.
Your solution is hard-coded to have inherent knowledge of the data structure (the nested for loops). A better solution would know nothing about the input data and still give you the expected result.
Either way, this is a pretty weird problem, but I was particularly bored so I figured I'd give it a shot. I mostly find this a completely pointless exercise because I cannot picture a scenario where the expected output could ever be better than the input.
This isn't a Rambda solution because there's no reason for it to be. You should understand the solution as a simple recursive procedure. If you can understand it, converting it to a sugary Rambda solution is trivial.
// determine if input is object
const isObject = x=> Object(x) === x
// flatten object
const oflatten = (data) => {
let loop = (namespace, acc, data) => {
if (Array.isArray(data))
data.forEach((v,k)=>
loop(namespace.concat([k]), acc, v))
else if (isObject(data))
Object.keys(data).forEach(k=>
loop(namespace.concat([k]), acc, data[k]))
else
Object.assign(acc, {[namespace.join('.')]: data})
return acc
}
return loop([], {}, data)
}
// example data
var fieldvalues = {
name: "hello there",
mobile: "1234",
meta: {status: "new"},
comments: [
{user: "john", comment: "hi"},
{user: "ram", comment: "hello"}
]
}
// show me the money ...
console.log(oflatten(fieldvalues))
Total function
oflatten is reasonably robust and will work on any input. Even when the input is an array, a primitive value, or undefined. You can be certain you will always get an object as output.
// array input example
console.log(oflatten(['a', 'b', 'c']))
// {
// "0": "a",
// "1": "b",
// "2": "c"
// }
// primitive value example
console.log(oflatten(5))
// {
// "": 5
// }
// undefined example
console.log(oflatten())
// {
// "": undefined
// }
How it works …
It takes an input of any kind, then …
It starts the loop with two state variables: namespace and acc . acc is your return value and is always initialized with an empty object {}. And namespace keeps track of the nesting keys and is always initialized with an empty array, []
notice I don't use a String to namespace the key because a root namespace of '' prepended to any key will always be .somekey. That is not the case when you use a root namespace of [].
Using the same example, [].concat(['somekey']).join('.') will give you the proper key, 'somekey'.
Similarly, ['meta'].concat(['status']).join('.') will give you 'meta.status'. See? Using an array for the key computation will make this a lot easier.
The loop has a third parameter, data, the current value we are processing. The first loop iteration will always be the original input
We do a simple case analysis on data's type. This is necessary because JavaScript doesn't have pattern matching. Just because were using a if/else doesn't mean it's not functional paradigm.
If data is an Array, we want to iterate through the array, and recursively call loop on each of the child values. We pass along the value's key as namespace.concat([k]) which will become the new namespace for the nested call. Notice, that nothing gets assigned to acc at this point. We only want to assign to acc when we have reached a value and until then, we're just building up the namespace.
If the data is an Object, we iterate through it just like we did with an Array. There's a separate case analysis for this because the looping syntax for objects is slightly different than arrays. Otherwise, it's doing the exact same thing.
If the data is neither an Array or an Object, we've reached a value. At this point we can assign the data value to the acc using the built up namespace as the key. Because we're done building the namespace for this key, all we have to do compute the final key is namespace.join('.') and everything works out.
The resulting object will always have as many pairs as values that were found in the original object.
I'm playing with Immutable.js. I came across this problem and I wasn't able to find a nice solution: I have two lists, A and B, and I want to filter out some elements from the list A using a custom predicate function and add them to the list B. Both are immutable.
The obvious problem here is that the return value of A.filter(predicate) is a new updated instance and the removed elements are lost. I could first add those filtered elements:
B = B.concat(A.filterNot(predicate));
A = A.filter(predicate);
That would mean cycling over the original list twice. The only way around this is to add a side effect to the filtering function:
let tmp = [];
B = B.filter(el => {
if (!predicate(el)) {
tmp.push(el);
return false;
} else return true;
});
A = A.concat(tmp);
That however looks a bit hacky. I don't think the filter method is supposed to be used this way. Is there a better solution?
Assuming here B is the array you want to filter, and A gets the filtered elements concatenated to it: (like your second code example), I think this is the best you can do.
A.withMutations( (list) => {
B = B.filter(
(el) => {
if (!predicate(el)) {
list.push(el);
return false;
} else return true;
}
);
return list;
});
or, arguably more readable:
A.withMutations( (list) => {
B = B.filter( (el) => { return (!predicate(el)) ? !!list.push(el) : true;
});
return list;
});
If you find yourself moving items from one list to another often, it is probably best to write a method, transferTo that does the above.
From withMutations:
Note: Not all methods can be used on a mutable collection or within
withMutations! Only set, push, pop, shift, unshift and merge may be
used mutatively.
The array is this:
[{name:'name1',id:'id1',val:'001},
...
{name:'name10',id:'id10',val:'010}]
We want to store in the database as a long text string:
name1,id1,001;...;name10,id10,010
values separated by semicolon and comma.
Is there any convenient join or something else to achieve it? (I guess there must be something better than for loop.)
function joinValues(arr, j1, j2) {
return arr.map(function(o) {
return Object.keys(o).map(function(k) {
return o[k];
}).join(j1)
}).join(j2);
}
var obj = [{a:1,b:2},{a:'x',b:'y'}];
joinValues(obj, ',', ';'); // => "1,2;x,y"
array.map(function(o){
var values = Object.keys(o).map(function(key){
return o[key];
});
return values.join(',');
}).reduce(function(a,b){
return a + ';' + b;
});
Beware there might be compat issues depending on your platform (map, reduce, Object.keys not available everywhere yet!)
Also, take into account that properties order in regular objects is not guaranteed. With this approach, theoretically you could end up having name1,id1,001;...;id10,name10,010. That might be an issue when mapping the string back to objects.
I'm afraid there isn't a built-in language feature for that, but if you can use ES6, it can look quite elegant
Object.values = obj => Object.keys(obj).map(key => obj[key]);
// so to convert your data
data.map(o => Object.values(o).join(',')).join(';');
Here is a first sample of code that work as intended: on the rest of the code this is used as a filter and will match 2 items from myids, those 2 where objectId match tWOsQhsP2Z and sStYrIU6lJ:
return myids.objectId === "tWOsQhsP2Z" || myids.objectId === "sStYrIU6lJ";
Because I need to pass arbitrary number of ids from an array, i'm trying to refactor code like so:
return myids.objectId === ("tWOsQhsP2Z" || "sStYrIU6lJ");
Problem with this new code is that filter that use return value will return only one item, the one with objectId that is tWOsQhsP2Z.
Do you know a way how I could refactor this second code so I keep single code "myids.objectId" but return match for ALL objectIds values ?
Sounds like you need something like underscore.js contains() method, would make things a lot simpler all round.
e.g.
return _.contains(arrayOfIds, myids.objectId);
You can use a switch:
switch (myids.objectId) {
case "tWOsQhsP2Z":
case "sStYrIU6lJ":
return true;
}
return false;
If you have an array of values to search, and the list is long and/or you're searching frequently, you can convert the list to an object and then do a property lookup. It's much more efficient that searching through an array.
For a simple constant case, your example would look like:
return myids.objectId in {"tWOsQhsP2Z": 1, "sStYrIU6lJ": 1};
If you start with an array that's server-generated or dynamic:
var knownIds = [ ... ];
then you can convert that to a map:
var idMap = knownIds.reduce(function(m, v) {
m[v] = 1;
return m;
}, {});
Now your lookup would be simply:
return myids.objectId in idMap;
So I'm trying to think of a better way to do this with underscore:
state.attributes = _.reduce(list, function(memo, item){
memo['neighborhood'] = (memo['neighborhood'] || []);
var isNew = true;
_.each(memo['neighborhood'], function(hood){
if (hood.name === item.data.neighborhood) {
hood.count++; isNew=false;
}
});
if(isNew){
memo['neighborhood'].push({name:item.data.neighborhood, count:1});
}
return memo;
});
I would like to combine the various names of the list into a list of unique names with a count of how many times each unique name occurs. It seems like exactly the kind of problem underscore was designed to solve, yet the best solution I could think of seems less than elegant.
I'm not an underscore.js user, but I guess _.groupBy() suits this scenario:
var attributes = _.groupBy(list, function (item) {
return item.data.neighborhood
})
It doesn't returns an array in the exact way you want, but it contains all the information you need. So you have in attributes["foo"] all the items that have "foo" as neighborhood value property, and therefore in attributes["foo"].length the count of them.
Maybe there is a better underscore.js way, but you can already apply other optimizations:
Instead of using an array to keep track of the name and the count, use a name: count map. This can be easily done with an object:
state.attributes = _.reduce(list, function(memo, item){
var n = item.data.neighborhood;
memo['neighborhood'] = (memo['neighborhood'] || {});
memo['neighborhood'][n] = memo['neighborhood'][n] + 1 || 1;
return memo;
});
This works, because if item.data.neighborhood is not in the list yet, memo['neighborhood'][item.data.neighborhood] will return undefined and undefined + 1 returns NaN.
Since NaN evaluates to false, the expression NaN || 1 will result in 1.