JAWR i18n: unit testing javascript when using messages - javascript

Our application currently shares messages between the Java and Javascript side. They are stored as resource bundles in the class path, and we have a custom controller that returns all the messages as Json. The client side code look like this:
// This calls the controller to get all the messages
var messages = MessageBundle();
var text = messages.get('my.message', 1);
This is great because we can mock "messages" in our unit tests.
I want to start using JAWR for this, since we already use it for other things. The problem is JAWR generates the following Javascript object:
var text = messages.my.message(1);
This means the code cannot be unit tested anymore unless the unit tests also define a global "messages" variable with the right nested objects. Is there a way around this? Any idea how to extend JAWR to make this unit-testable?
Currently my work around is:
function messages() {
var args = Array.prototype.slice.call(arguments);
var messageId = args.shift();
var messageFunc = window.messages;
messageId.split('.').forEach(function(part) {
messageFunc = messageFunc[part];
});
return messageFunc(args);
}
// Same syntax as the old one, but uses the JAWR object behind the scenes
// This function is easy to mock for a unit test
var text = messages('my.message', 1);
Thanks for any ideas!

Maybe next samples can help you.
1)
function messagesTester(funcPath,id) {
var args=funcPath.split('.'),root=window.messages;
for(var i=0;i<args.length;i++)root=root[args[i]];
return root(id);
// or if more that one parameter for *func*, then, for example:
// return root.apply(null,Array.prototype.slice(arguments,1));
}
var text = messagesTester('my.message',1);
2)
function messagesTester(funcPath) {
var args=funcPath.split('.'),root=window.messages;
for(var i=0;i<args.length;i++)root=root[args[i]];
return root;
}
// var text = messagesTester('my.message')( /*arguments list*/ );
var text = messagesTester('my.message')(1);

Related

Issue with pulling JSON data in SuiteScript - NetSuite

I am trying to see if a value in the "apply" sublist for customer payment data has changed and do some action based on it.
My SuiteScript is as follows:
define(['N/record', 'N/https'],
function(record,https)
{
function afterSubmit(context)
{
var oldRec = context.oldRecord;
log.debug({title: 'oldRec ', details: oldRec });
// This log shows that the JSON has an
// attribute called sublists which contains "apply" which has all the applied payments
// eg: {"id":"1234", "type":"customerpayment", "fields":{all the fields},
// "sublists": {"apply" : {"line 1"...}}}
var oldRecSublists = oldRec.sublists;
log.debug({title: 'oldRecApply ', details: oldRecSublists });
// This returns empty or null though there is data
What am I doing wrong here?
Basically what I am trying to achieve is compare the context.oldRecord.sublists.apply and context.newRecord.sublists.apply to find if the amt has changed or not.
Is there a better way to do this is SuiteScript 2.0?
Thanks in advance!
Part of what is going on there is that it looks like you are trying to spelunk the NS data structure by what you see in the print statement. You are not using the NS api at all.
When you send the NS object to the log function I believe it goes through a custom JSON.stringify process so if you want to just inspect values you can do:
var oldRecObj = JSON.parse(JSON.stringify(oldRec));
now oldRecObj can be inspected as though it were a simple object. But you won't be able to manipulate it at all.
You should be using the NS schema browser
and referring to the help docs for operations on N/record
A snippet I often use for dealing with sublists is:
function iter(rec, listName, cb){
var lim = rec.getLineCount({sublistId:listName});
var i = 0;
var getV = function (fld){
return rec.getSublistValue({sublistId:listName, fieldId:fld, line:i});
};
var setV = function(fld, val){
rec.setSublistValue({sublistId:listName, fieldId:fld, line:i, value:val});
};
for(; i< lim; i++){
cb(i, getV, setV);
}
}
and then
iter(oldRec, 'apply', function(idx, getV, setV){
var oldApplied = getV('applied');
});

flatbuffers is not defined

I'm writing a nodeJs application that uses google flat buffer.
I installed flatc on my macbook pro and compiled the following schema:
namespace MyAlcoholist;
table Drink {
drink_type_name: string;
drink_company_name: string;
drink_brand_name: string;
drink_flavor_type_name : string;
liquid_color_type_name : string;
liquid_color_is_transparent : bool;
alcohol_vol : float;
calories_for_100g : uint;
global_image_id: ulong;
drink_flavor_id: ulong;
}
table Drinks { drinks:[Drink]; }
root_type Drinks;
the schema file is called drink.fbs and it generated a javascript file called drink_generated.js
I include this file in my nodejs application and add data to it using the following code.. this is my flatBufferUtil.js utility file.
var flatbuffers = require('../js/flatbuffers').flatbuffers;
var builder = new flatbuffers.Builder();
var drinks = require('../fbs/drinks_generated').MyAlcoholist; // Generated by `flatc`.
function drinkArrayToBuffer(drinkArray) {
var drinksVectArray = [];
drinkArray.forEach(function (element, index, array) {
var drinkObj = element;
var drinkBrandName = builder.createString(drinkObj.drink_brand_name);
var drinkCompanyName = builder.createString(drinkObj.drink_company_name);
var drinkflavorTypeName = builder.createString(drinkObj.drink_flavor_type_name);
var drinkTypeName = builder.createString(drinkObj.drink_type_name);
var liquidColorTypeName = builder.createString(drinkObj.liquid_color_type_name);
drinks.Drink.startDrink(builder);
drinks.Drink.addAlcoholVol(builder, drinkObj.alcohol_vol);
drinks.Drink.addCaloriesFor100g(builder,drinkObj.calories_for_100g);
drinks.Drink.addDrinkBrandName(builder,drinkBrandName);
drinks.Drink.addDrinkCompanyName(builder,drinkCompanyName);
drinks.Drink.addDrinkFlavorId(builder,drinkObj.drink_flavor_id);
drinks.Drink.addDrinkFlavorTypeName(builder, drinkflavorTypeName);
drinks.Drink.addDrinkTypeName(builder,drinkTypeName);
drinks.Drink.addGlobalImageId(builder,drinkObj.global_image_id);
drinks.Drink.addLiquidColorIsTransparent(builder,drinkObj.is_transparent);
drinks.Drink.addLiquidColorTypeName(builder,liquidColorTypeName);
var drink = drinks.Drink.endDrink(builder);
drinksVectArray.push(drink);
})
var drinksVect = drinks.createDrinksVector(builder,drinksVectArray);
builder.finish(drinksVect);
var buf = builder.dataBuffer();
return buf;
}
module.exports.drinkArrayToBuffer=drinkArrayToBuffer;
now when I execute this function it fails with the error flatbuffers is not defined.
I debugged my code and I saw that it files on the following line of code:
drinks.Drink.addDrinkFlavorId(builder,drinkObj.drink_flavor_id);
if i get inside addDrinkFlavorId function i see this code in drinks_generted.js:
MyAlcoholist.Drink.addDrinkFlavorId = function(builder, drinkFlavorId) {
builder.addFieldInt64(9, drinkFlavorId, flatbuffers.Long.ZERO);
};
as you can see it uses flatbuffers.Long.ZERO but flatbuffers is not defined in that file at all. the compilation did not provide any errors so what do I miss?
It seems to me like it is a bug... The generated file appears to be meant to exist autonomously from the flatbuffers require. However for the custom flatbuffers.Long class, the default of flatbuffers.Long.ZERO bleeds into the generated file.
While this isn't a solution per-say, one workaround is to manually add the flatbuffers require to the generated file; it's ugly, but it might be better than being blocked until a more appropriate answer (or fix) comes around.
// In `drinks_generated.js`
var flatbuffers = require('../js/flatbuffers').flatbuffers;
Note:
The drinks.Drink.addDrinkFlavorId() and drinks.Drink.addGlobalImageId() functions expect flatbuffers.Long values to be passed into them, because they were specified as ulong in the schema (fbs file). So you will need to ensure that you are not trying to pass in a simple number type.
For example:
var my_long = flatbuffers.Long(100, 0); // low = 100, high = 0
drinks.Drink.addDrinkFlavorId(builder, my_long);
As a result, another possible workaround is to change the datatype of those fields in the schema to avoid using ulong until it becomes more clear what is going on here.
P.S. I am pretty sure drinks.createDrinksVector on line 30 of that snippet should be drinks.Drinks.createDrinksVector.

Assemble paginated ajax data in a Bacon FRP stream

I'm learning FRP using Bacon.js, and would like to assemble data from a paginated API in a stream.
The module that uses the data has a consumption API like this:
// UI module, displays unicorns as they arrive
beautifulUnicorns.property.onValue(function(allUnicorns){
console.log("Got "+ allUnicorns.length +" Unicorns");
// ... some real display work
});
The module that assembles the data requests sequential pages from an API and pushes onto the stream every time it gets a new data set:
// beautifulUnicorns module
var curPage = 1
var stream = new Bacon.Bus()
var property = stream.toProperty()
var property.onValue(function(){}) # You have to add an empty subscriber, otherwise future onValues will not receive the initial value. https://github.com/baconjs/bacon.js/wiki/FAQ#why-isnt-my-property-updated
var allUnicorns = [] // !!! stateful list of all unicorns ever received. Is this idiomatic for FRP?
var getNextPage = function(){
/* get data for subsequent pages.
Skipping for clarity */
}
var gotNextPage = function (resp) {
Array.prototype.push.apply(allUnicorns, resp) // just adds the responses to the existing array reference
stream.push(allUnicorns)
curPage++
if (curPage <= pageLimit) { getNextPage() }
}
How do I subscribe to the stream in a way that provides me a full list of all unicorns ever received? Is this flatMap or similar? I don't think I need a new stream out of it, but I don't know. I'm sorry, I'm new to the FRP way of thinking. To be clear, assembling the array works, it just feels like I'm not doing the idiomatic thing.
I'm not using jQuery or another ajax library for this, so that's why I'm not using Bacon.fromPromise
You also may wonder why my consuming module wants the whole set instead of just the incremental update. If it were just appending rows that could be ok, but in my case it's an infinite scroll and it should draw data if both: 1. data is available and 2. area is on screen.
This can be done with the .scan() method. And also you will need a stream that emits items of one page, you can create it with .repeat().
Here is a draft code (sorry not tested):
var itemsPerPage = Bacon.repeat(function(index) {
var pageNumber = index + 1;
if (pageNumber < PAGE_LIMIT) {
return Bacon.fromCallback(function(callback) {
// your method that talks to the server
getDataForAPage(pageNumber, callback);
});
} else {
return false;
}
});
var allItems = itemsPerPage.scan([], function(allItems, itemsFromAPage) {
return allItems.concat(itemsFromAPage);
});
// Here you go
allItems.onValue(function(allUnicorns){
console.log("Got "+ allUnicorns.length +" Unicorns");
// ... some real display work
});
As you noticed, you also won't need .onValue(function(){}) hack, and curPage external state.
Here is a solution using flatMap and fold. When dealing with network you have to remember that the data can come back in a different order than you sent the requests - that's why the combination of fold and map.
var pages = Bacon.fromArray([1,2,3,4,5])
var requests = pages.flatMap(function(page) {
return doAjax(page)
.map(function(value) {
return {
page: page,
value: value
}
})
}).log("Data received")
var allData = requests.fold([], function(arr, data) {
return arr.concat([data])
}).map(function(arr) {
// I would normally write this as a oneliner
var sorted = _.sortBy(arr, "page")
var onlyValues = _.pluck(sorted, "value")
var inOneArray = _.flatten(onlyValues)
return inOneArray
})
allData.log("All data")
function doAjax(page) {
// This would actually be Bacon.fromPromise($.ajax...)
// Math random to simulate the fact that requests can return out
// of order
return Bacon.later(Math.random() * 3000, [
"Page"+page+"Item1",
"Page"+page+"Item2"])
}
http://jsbin.com/damevu/4/edit

Breeze Partial initializer

I have a Single Page Application that is working pretty well so far but I have run into an issue I am unable to figure out. I am using breeze to populate a list of projects to be displayed in a table. There is way more info than what I actually need so I am doing a projection on the data. I want to add a knockout computed onto the entity. So to accomplish this I registered and entity constructor like so...
metadataStore.registerEntityTypeCtor(entityNames.project, function () { this.isPartial = false; }, initializeProject);
The initializeProject function uses some of the values in the project to determine what the values should be for the computed. For example if the Project.Type == "P" then the rowClass should = "Red".
The problem I am having is that all the properties of Project are null except for the ProjNum which happens to be the key. I believe the issue is because I am doing the projection because I have registered other initializers for other types and they work just fine. Is there a way to make this work?
EDIT: I thought I would just add a little more detail for clarification. The values of all the properties are set to knockout observables, when I interrogate the properties using the javascript debugger in Chrome the _latestValue of any of the properties is null. The only property that is set is the ProjNum which is also the entity key.
EDIT2: Here is the client side code that does the projection
var getProjectPartials = function (projectObservable, username, forceRemote) {
var p1 = new breeze.Predicate("ProjManager", "==", username);
var p2 = new breeze.Predicate("ApprovalStatus", "!=", "X");
var p3 = new breeze.Predicate("ApprovalStatus", "!=", "C");
var select = 'ProjNum,Title,Type,ApprovalStatus,CurrentStep,StartDate,ProjTargetDate,CurTargDate';
var isQaUser = cookies.getCookie("IsQaUser");
if (isQaUser == "True") {
p1 = new breeze.Predicate("QAManager", "==", username);
select = select + ',QAManager';
} else {
select = select + ',ProjManager';
}
var query = entityQuery
.from('Projects')
.where(p1.and(p2).and(p3))
.select(select);
if (!forceRemote) {
var p = getLocal(query);
if (p.length > 1) {
projectObservable(p);
return Q.resolve();
}
}
return manager.executeQuery(query).then(querySucceeded).fail(queryFailed);
function querySucceeded(data) {
var list = partialMapper.mapDtosToEntities(
manager,
data.results,
model.entityNames.project,
'ProjNum'
);
if (projectObservable) {
projectObservable(list);
}
log('Retrieved projects using breeze', data, true);
}
};
and the code for the partialMapper.mapDtosToEntities function.
var defaultExtension = { isPartial: true };
function mapDtosToEntities(manager,dtos,entityName,keyName,extendWith) {
return dtos.map(dtoToEntityMapper);
function dtoToEntityMapper(dto) {
var keyValue = dto[keyName];
var entity = manager.getEntityByKey(entityName, keyValue);
if (!entity) {
extendWith = $.extend({}, extendWith || defaultExtension);
extendWith[keyName] = keyValue;
entity = manager.createEntity(entityName, extendWith);
}
mapToEntity(entity, dto);
entity.entityAspect.setUnchanged();
return entity;
}
function mapToEntity(entity, dto) {
for (var prop in dto) {
if (dto.hasOwnProperty(prop)) {
entity[prop](dto[prop]);
}
}
return entity;
}
}
EDIT3: Looks like it was my mistake. I found the error when I looked closer at initializeProject. Below is what the function looked like before i fixed it.
function initializeProject(project) {
project.rowClass = ko.computed(function() {
if (project.Type == "R") {
return "project-list-item info";
} else if (project.Type == "P") {
return "project-list-item error";
}
return "project-list-item";
});
}
the issue was with project.Type I should have used project.Type() since it is an observable. It is a silly mistake that I have made too many times since starting this project.
EDIT4: Inside initializeProject some parts are working and others aren't. When I try to access project.ProjTargetDate() I get null, same with project.StartDate(). Because of the Null value I get an error thrown from the moment library as I am working with these dates to determine when a project is late. I tried removing the select from the client query and the call to the partial entity mapper and when I did that everything worked fine.
You seem to be getting closer. I think a few more guard clauses in your initializeProject method would help and, when working with Knockout, one is constantly battling the issue of parentheses.
Btw, I highly recommend the Knockout Context Debugger plugin for Chrome for diagnosing binding problems.
Try toType()
You're working very hard with your DTO mapping, following along with John's code from his course. Since then there's a new way to get projection data into an entity: add toType(...) to the end of the query like this:
var query = entityQuery
.from('Projects')
.where(p1.and(p2).and(p3))
.select(select)
.toType('Project'); // cast to Project
It won't solve everything but you may be able to do away with the dto mapping.
Consider DTOs on the server
I should have pointed this out first. If you're always cutting this data down to size, why not define the client-facing model to suit your client. Create DTO classes of the right shape(s) and project into them on the server before sending data over the wire.
You can also build metadata to match those DTOs so that Project on the client has exactly the properties it should have there ... and no more.
I'm writing about this now. Should have a page on it in a week or so.

MongoDB JavaScript Yield large set of results

I'm trying to query a large set of results from a MongoDB over Python. I do this via JavaScript, because I want to get something like the grandchildren in a tree-like structure. My code looks like the following:
col = db.getCollection(...)
var res = new Array();
col.find( { "type" : ["example"] } ).forEach(
function(entry)
{
v1 = col.find( {"_id" : entry["..."]} )
... (walk through the structure) ...
vn = ...
res.push([v1["_id"], vn["data"]]);
}
);
return res;
Now, I'm having the problem, that the resulting array becomes very (too) large and the memory gets exceeded. Is there a way, to yield the results instead of pushing them into an array?
Alright, I think I know, what you mean. I created a structure like the following:
var bulksize = 1000;
var col = db.getCollection("..");
var queryRes = col.find( { ... } )
process = function(entity) { ... }
nextEntries = function()
{
var res = new Array();
for(var i=0; i<bulksize; i++)
{
if(hasNext())
res.push(process(queryRes.next()));
else
break;
}
return res;
}
hasNext = function()
{
return queryRes.hasNext();
}
The script separates the results into bulks of 1000 entries. And from Python side eval the noted script and then I do the following:
while database.eval('hasNext()'):
print "test"
for res in database.eval('return nextEntries()'):
doSth(res)
The interesting thing is, that the console always says:
test
test
test
test
test
test
Then I get the error:
pymongo.errors.OperationFailure: command SON([('$eval', Code('return nextEntries()', {})), ('args', ())]) failed: invoke failed: JS Error: ReferenceError: nextEntries is not defined nofile_a:0
This means, that the first calls of nextEntries() work, but then the function is not there, anymore. Could it be, that MongoDB does something like a clearing of the JavaScript cache? The problem does not depend on the bulksize (tested with 10, 100, 1000, 10000 and always the same result).
Alright, I found a line in the source code of MongoDB, which clears all JavaScripts that are used more than 10 times. So if no changes on the database server are wanted, it is necessary to query the database multiple times and send bulks to the client by selecting amounts of items with help of the skip() and limit() functions. This works surprisingly fast. Thanks for your help.

Categories

Resources