I need to do a search of more than 6000 elements, and generate a PDF with their information, everything works fine when there are few elements, but with large quantities I have this problem, I am a newbie in this, I am not sure if the time can be increased runtime or the search result can be divided in some way,, thanks in advance, here is my code:
var objSearch = search.load({
id: 'customsearcht_itemid_lp'
});
var paginado = objSearch.runPaged({
pageSize: 1000,
});
paginado.pageRanges.forEach(function (pageRange) {
var myPage = paginado.fetch({index: pageRange.index});
myPage.data.forEach(function (result) {
var iditem = result.getValue({name: 'internalid'});
var bprice = result.getValue({name: 'baseprice'});
var dname = result.getValue({name: 'displayname'});
var vinopuntos = Math.round(parseFloat(Buquedadeporcentaje(iditem,Ubicacion)) * parseFloat(bprice));
if (!vinopuntos){vinopuntos=0;}
log.audit({title: 'vino', details: vinopuntos});
result.datositem = {
iditem: iditem,
basep: bprice,
dname: dname,
vinopuntos: vinopuntos }
arraypag.push(result.datositem);
});
log.audit({title: 'Número de página', details: arraypag});
});
var renderData = {
cantidad: arraypag.length,
articulos: arraypag
};
var renderPdf = render.create();
renderPdf.setTemplateById(268);
renderPdf.addCustomDataSource({
format: render.DataSource.OBJECT,
alias: "ETIQUETAS",
data: renderData
});
var transactionFile = renderPdf.renderAsPdf();
context.response.writeFile({
file: transactionFile,
isInline: true
});
Just for reference: the Time Limit for the SuiteLet script is 5 minutes, please see https://netsuite.custhelp.com/app/answers/detail/a_id/45311 suite answer for the details on script time limits.
First, we need to identify what exactly causes the time limit hit. It is either running the search or generating the PDF. Can you please add the log message after the search code block and run your scripts for 6000 records. If you do not see that log message (please see the Execution log of the script) the problem is in search. So maybe the Saved Search that you use have some complex joins or returns the data that you do not really need in your script. In this case, I would recommend just set up your search in the code using N/Search module and put only those columns you really need.
Please, let me know if it helps.
Related
I am trying to use the "pi-camera" library which is working and allowing me to record video in a raw h264 format on my r-pi. However, the node js library "gifify" continuously gives me the error "RangeError: Maximum call stack size exceeded" looking this error up it seems to be related calling many functions within functions multiple times or something related to this. However, my code only uses one function which contains a simple command to take the video and then convert it.
const PiCamera = require('pi-camera');
var fs = require('fs');
var gifify = require('gifify');
var path = require('path');
var sleep = require('system-sleep');
const myCamera = new PiCamera({
mode: 'video',
output: `/home/pi/Videos/video.h264`,
width: 640,
height: 480,
time: 5000,
nopreview: true,
vflip: true,
});
var input = path.join('/home/pi/Videos', 'video.h264');
var output = path.join('/home/pi/Videos', 'daily.gif');
var gif = fs.createWriteStream(output);
var options = {
speed: 5,
text: 'Daily Plant GIF'
};
sleep(5000);
setInterval(vid, 10000);
function vid(){
myCamera.record()
.then((result) => {
console.log('success');
gifify(input, options).pipe(gif);
})
.catch((error) => {
console.log(error);
});
}
any information on what this error truly means in this scenario/ how to fix it would be much appreciated. Thank you!
an error can be related not to your code only but also to libraries you are using.
I see at least few issues been reported to gifyfy about "maximum stack exceeded"
open one:
https://github.com/vvo/gifify/issues/94
I'm not sure if there is any workaround in your case. maybe you need trying different parameters or look for different library
I'm using the CLI version of Google's Lighthouse performance testing tool to measure certain attributes of a large list of websites. I'm passing the results as JSON to STDOUT then onto a Node script that plucks the values that I'm interested in out to a CSV file.
One of the measures collecting is audits.mobile-friendly.rawValue, which I was expecting to be a flag for either passing Google's mobile friendly test. So the assumption is that value would be true for a mobile optimized site. I collected this value for ~2,000 websites, and all came back false.
Here's an example call that I am making to the command line:
lighthouse http://nytimes.com --disable-device-emulation --disable-network-throttling --chrome-flags="--headless" --output "json" --quiet --output-path "stdout" | node lighthouse_parser.js >> speed_log.csv
and here's the output of that command:
"data_url","data_score","data_total_byte_weight","data_consistently_interactive_time","data_first_interactive_time","data_is_on_https","data_redirects_http","data_mobile_friendly","timestamp"
"https://www.nytimes.com/",18.181818181818183,4211752,,18609.982,false,true,false,"2018-04-02T17:16:39-04:00"
Here's the code for my lighthouse_parser.js:
var moment = require('moment');
var getStdin = require('get-stdin');
var json2csv = require('json2csv');
var timestamp = moment().format();
getStdin().then(str => {
try {
process_files(str);
} catch (error) {
console.error(error);
}
});
function process_files(this_file) {
var obj = JSON.parse(this_file);
var data_url = obj.url;
var data_score = obj.score;
var data_total_byte_weight = obj.audits['total-byte-weight'].rawValue;
var data_consistently_interactive_time = obj.audits['consistently-interactive'].rawValue;
var data_first_interactive_time = obj.audits['first-interactive'].rawValue;
var data_is_on_https = obj.audits['is-on-https'].rawValue;
var data_redirects_http = obj.audits['redirects-http'].rawValue;
var data_mobile_friendly = obj.audits['mobile-friendly'].rawValue;
var the_result = {
"data_url": data_url,
"data_score": data_score,
"data_total_byte_weight": data_total_byte_weight,
"data_consistently_interactive_time": data_consistently_interactive_time,
"data_first_interactive_time": data_first_interactive_time,
"data_is_on_https": data_is_on_https,
"data_redirects_http": data_redirects_http,
"data_mobile_friendly": data_mobile_friendly,
"timestamp": timestamp,
};
var return_this = json2csv({
data: the_result,
header: false
});
console.log(return_this);
}
I haven't been able to get one true value for audits.mobile-friendly.rawValue on ANY site.
Any thoughts on what I'm doing wrong?
The mobile-friendly audit result you're looking at here is this one:
It's essentially a placeholder audit that tells you to use the Mobile-Friendly Test. So, indeed, it's value will never change. ;)
The viewport, content-width and (to some degree) font-size audits can be used to provide a definition of mobile friendliness, which is comparable with what the dedicated MFT returns.
I'm following the Progressive Web App lab from Google and it says that it's using localStorage for simplicity but that we should change it to idb.
Basically, we want to store a list of cities to display their weather information.
I tried using plain idb following the info here but I think I'm too new to this and I couldn't get any of this. Am I supposed to do:
const dbPromise = idb.open('keyval-store', 1, upgradeDB => {
upgradeDB.createObjectStore('keyval');
});
and would keyval be the name of my variable where I would use keyval.get() or keyval.set() to get and store values?
I decided to move on to the simpler idbKeyval, I'm doing:
app.saveSelectedCities = function() {
var selectedCities = JSON.stringify(app.selectedCities);
idbKeyval.set(selectedCities);
};
instead of the localStorage example:
app.saveSelectedCities = function() {
var selectedCities = JSON.stringify(app.selectedCities);
localStorage.selectedCities = selectedCities;
};
and
app.selectedCities = idbKeyval.keys().then(keys => console.log(keys)).catch(err => console.log('It failed!', err));
instead of the localStorage example:
app.selectedCities = localStorage.selectedCities;
But my app is not loading any data, and in the developer tools console, I get:
app.js:314 Uncaught ReferenceError: idbKeyval is not defined(…)
I'm sure I'm missing something trivial but these are my first steps with javascript and the likes, so please, any help with any of the points touched here would be greatly appreciated!
Given the error you're seeing, it looks like you've forgotten to include the idb-keyval library.
I too was going through this, and wanted it to work with localForage. Took a bit, because I'm new to it too, but here is what I used for the save and load functions which made it all work.
// TODO add saveSelectedCities function here
// Save list of cities to localStorage
app.saveSelectedCities = function() {
var selectedCities = JSON.stringify(app.selectedCities);
localforage.setItem('selectedCities', selectedCities);
//localStorage.selectedCities = selectedCities;
}
localforage.getItem('selectedCities').then(function(cityList) {
app.selectedCities = cityList;
app.selectedCities.forEach(function(city) {
app.getForecast(city.key, city.label);
});
}).catch(function(err) {
app.updateForecastCard(initialWeatherForecast);
app.selectedCities = [
{key: initialWeatherForecast.key, label: initialWeatherForecast.label}
];
app.saveSelectedCities();
});
I'm trying to write a little script to make my coworkers and mine lives easier. I am trying to append lines to a spreadsheet based on information entered into a custom form. The code posted below just the doPost block which should be appending the google spreadsheet.
function doPost(form) {
var PN = form.PartNumber;
var REV = form.Revision;
var DATE = form.RevisionDate;
var DESC = form.Description;
var NOTE = form.PartNotes;
var URL = form.myFile.getURL();
var ss = SpreadsheetApp.openById("ID HERE"); // removed ID for sake of safety (let me be paranoid)
var sheet = ss.getSheetName('Uploads');
sheet.appendRow([PN,REV,DATE,DESC,NOTE,URL]);
}
I am unsure why it isn't writing to the spreadsheet but it isn't throwing me any errors. If you can offer any insight as to what is wrong I would greatly appreciate it; there are many guides online but most seem to be based on deprecated functions/code/etc.
Thanks for your time.
Instead of using doPost, set up a "On form submit" trigger.
You need to get the namedValues to be able to pull specific values and take the first output.
Also, it should be "getSheetByName('Uploads')" .
As pointed out in the previous answer, it is unclear what you are trying to achieve by "form.myFile.getURL();" If you want to get the form url you might as well create it as a string, as it always stays the same.
Here is a working example of your code:
function doPost(form) {
var formResponses = form.namedValues;
var PN = formResponses.PartNumber[0];
var REV = formResponses.Revision[0];
var DATE = formResponses.RevisionDate[0];
var DESC = formResponses.Description[0];
var NOTE = formResponses.PartNotes[0];
//var URL = form.myFile.getURL(); //Not sure what you are tyring to get here as form URL will always be the same.
var URL = "Your form's url"; //You can put the form url in here so it will be pushed in to every row.
var ss = SpreadsheetApp.openById("ID HERE"); // removed ID for sake of safety (let me be paranoid)
var sheet = ss.getSheetByName('Uploads');
sheet.appendRow([PN,REV,DATE,DESC,NOTE,URL]);
}
The form fields are nested in a "parameter" property in the doPost parameter.
So, you should access them using:
function doPost(form) {
var actualForm = form.parameter;
var PN = actualForm.PartNumber;
//etc
To double check all parameters your receiving and their names, you could append to your sheet everything stringfied, like this:
sheet.appendRow([JSON.stringify(form)]);
--edit
This form.myFile.getURL() also looks odd. I guess another good debugging trick you could do is to wrap everything in a try-catch and email yourself any errors you get. For example:
function doPost(form) {
try {
//all your code
} catch(err) {
MailApp.sendMail('yourself#etc', 'doPost error', err+'\n\n'+JSON.stringify(form));
}
}
On form submit
onFormSubmit works. "doPost" looks wrong.
Simple example:
function Initialize() {
var triggers = ScriptApp.getProjectTriggers();
for(var i in triggers) {
ScriptApp.deleteTrigger(triggers[i]);
}
ScriptApp.newTrigger("SendGoogleForm")
.forSpreadsheet(SpreadsheetApp.getActiveSpreadsheet())
.onFormSubmit()
.create();
}
function SendGoogleForm(e)
{
try
{
Full example - Scroll down to the code http://www.labnol.org/internet/google-docs-email-form/20884/ (Note: example sends email)
Trigger docs: https://developers.google.com/apps-script/guides/triggers/events
Notes: I think the problem is doPost, Does it work with google Forms? Never seen it used with google forms.
First and foremost, thank you everyone who has responded with information thus far. None of the solutions posted here worked for my particular implementation (my implementation is probably to blame, it is very crude), but they definitely set me down the path to a working version of my form which we now lightly use. I have posted some of the code below:
function sheetFill(form, link) {
try {
var formResponses = form.namedValues;
var toForm = [0,0,0,0,0,0,0];
for (i=0;i < form.PartNumber.length;i++){
toForm[0] = toForm[0]+form.PartNumber[i];
}
... (several for loops later)
var d = new Date();
var ss = SpreadsheetApp.openById("IDHERE");
var sheet = ss.getCurrentSheet;
ss.appendRow([toForm[0], toForm[1], toForm[2], toForm[3], toForm[4], toForm[5], toForm[6], link, d]);
} catch(err) {
MailApp.sendEmail('EMAIL', 'doPost error', err+'\n\n'+JSON.stringify(form));
}
}
It is not very versatile or robust and isn't elegant, but it is a starting point.
I'm trying to query a large set of results from a MongoDB over Python. I do this via JavaScript, because I want to get something like the grandchildren in a tree-like structure. My code looks like the following:
col = db.getCollection(...)
var res = new Array();
col.find( { "type" : ["example"] } ).forEach(
function(entry)
{
v1 = col.find( {"_id" : entry["..."]} )
... (walk through the structure) ...
vn = ...
res.push([v1["_id"], vn["data"]]);
}
);
return res;
Now, I'm having the problem, that the resulting array becomes very (too) large and the memory gets exceeded. Is there a way, to yield the results instead of pushing them into an array?
Alright, I think I know, what you mean. I created a structure like the following:
var bulksize = 1000;
var col = db.getCollection("..");
var queryRes = col.find( { ... } )
process = function(entity) { ... }
nextEntries = function()
{
var res = new Array();
for(var i=0; i<bulksize; i++)
{
if(hasNext())
res.push(process(queryRes.next()));
else
break;
}
return res;
}
hasNext = function()
{
return queryRes.hasNext();
}
The script separates the results into bulks of 1000 entries. And from Python side eval the noted script and then I do the following:
while database.eval('hasNext()'):
print "test"
for res in database.eval('return nextEntries()'):
doSth(res)
The interesting thing is, that the console always says:
test
test
test
test
test
test
Then I get the error:
pymongo.errors.OperationFailure: command SON([('$eval', Code('return nextEntries()', {})), ('args', ())]) failed: invoke failed: JS Error: ReferenceError: nextEntries is not defined nofile_a:0
This means, that the first calls of nextEntries() work, but then the function is not there, anymore. Could it be, that MongoDB does something like a clearing of the JavaScript cache? The problem does not depend on the bulksize (tested with 10, 100, 1000, 10000 and always the same result).
Alright, I found a line in the source code of MongoDB, which clears all JavaScripts that are used more than 10 times. So if no changes on the database server are wanted, it is necessary to query the database multiple times and send bulks to the client by selecting amounts of items with help of the skip() and limit() functions. This works surprisingly fast. Thanks for your help.