I am trying to use the "pi-camera" library which is working and allowing me to record video in a raw h264 format on my r-pi. However, the node js library "gifify" continuously gives me the error "RangeError: Maximum call stack size exceeded" looking this error up it seems to be related calling many functions within functions multiple times or something related to this. However, my code only uses one function which contains a simple command to take the video and then convert it.
const PiCamera = require('pi-camera');
var fs = require('fs');
var gifify = require('gifify');
var path = require('path');
var sleep = require('system-sleep');
const myCamera = new PiCamera({
mode: 'video',
output: `/home/pi/Videos/video.h264`,
width: 640,
height: 480,
time: 5000,
nopreview: true,
vflip: true,
});
var input = path.join('/home/pi/Videos', 'video.h264');
var output = path.join('/home/pi/Videos', 'daily.gif');
var gif = fs.createWriteStream(output);
var options = {
speed: 5,
text: 'Daily Plant GIF'
};
sleep(5000);
setInterval(vid, 10000);
function vid(){
myCamera.record()
.then((result) => {
console.log('success');
gifify(input, options).pipe(gif);
})
.catch((error) => {
console.log(error);
});
}
any information on what this error truly means in this scenario/ how to fix it would be much appreciated. Thank you!
an error can be related not to your code only but also to libraries you are using.
I see at least few issues been reported to gifyfy about "maximum stack exceeded"
open one:
https://github.com/vvo/gifify/issues/94
I'm not sure if there is any workaround in your case. maybe you need trying different parameters or look for different library
Related
I need to do a search of more than 6000 elements, and generate a PDF with their information, everything works fine when there are few elements, but with large quantities I have this problem, I am a newbie in this, I am not sure if the time can be increased runtime or the search result can be divided in some way,, thanks in advance, here is my code:
var objSearch = search.load({
id: 'customsearcht_itemid_lp'
});
var paginado = objSearch.runPaged({
pageSize: 1000,
});
paginado.pageRanges.forEach(function (pageRange) {
var myPage = paginado.fetch({index: pageRange.index});
myPage.data.forEach(function (result) {
var iditem = result.getValue({name: 'internalid'});
var bprice = result.getValue({name: 'baseprice'});
var dname = result.getValue({name: 'displayname'});
var vinopuntos = Math.round(parseFloat(Buquedadeporcentaje(iditem,Ubicacion)) * parseFloat(bprice));
if (!vinopuntos){vinopuntos=0;}
log.audit({title: 'vino', details: vinopuntos});
result.datositem = {
iditem: iditem,
basep: bprice,
dname: dname,
vinopuntos: vinopuntos }
arraypag.push(result.datositem);
});
log.audit({title: 'Número de página', details: arraypag});
});
var renderData = {
cantidad: arraypag.length,
articulos: arraypag
};
var renderPdf = render.create();
renderPdf.setTemplateById(268);
renderPdf.addCustomDataSource({
format: render.DataSource.OBJECT,
alias: "ETIQUETAS",
data: renderData
});
var transactionFile = renderPdf.renderAsPdf();
context.response.writeFile({
file: transactionFile,
isInline: true
});
Just for reference: the Time Limit for the SuiteLet script is 5 minutes, please see https://netsuite.custhelp.com/app/answers/detail/a_id/45311 suite answer for the details on script time limits.
First, we need to identify what exactly causes the time limit hit. It is either running the search or generating the PDF. Can you please add the log message after the search code block and run your scripts for 6000 records. If you do not see that log message (please see the Execution log of the script) the problem is in search. So maybe the Saved Search that you use have some complex joins or returns the data that you do not really need in your script. In this case, I would recommend just set up your search in the code using N/Search module and put only those columns you really need.
Please, let me know if it helps.
I have a Nodejs server that is being used to create about 1200 pdf forms that can be downloaded by a client later. They are being created using pdfmake and then output to a server folder. When I execute the code as written at about 350 documents, Nodejs runs out of memory. I know there must be a better way to save, but I cannot seem to figure it out.
The below method is being called by a map of an array of data from a Mongoose query. The relevant code for creating and saving the form is as follows:
const whichForm = certList => {
certList.map(cert => {
if (cert.Cert_Details !== null) {
switch (cert.GWMA) {
case 'OA':
case 'PC':
// Don't provide reports for Feedlots
if (cert.Cert_Details.cert_type !== null) {
if (cert.Cert_Details.cert_type === 'Irrigation') {
createOAReport(cert);
}
}
break;
case 'FA':
// Don't provide reports for Feedlots
if (cert.Cert_Details.cert_type === 'Irrigation') {
createFAReport(cert);
}
break;
}
}
}
}
Different File:
const PdfPrinter = require('pdfmake/src/printer');
const fs = require('fs');
const createOAReport = data => {
console.log('PC or OA Cert ', data.Cert_ID);
// console.log(data);
let all_meters_maint = [];
data.Flowmeters.map(flowmeter => {
// Each Flow meter
// console.log(`Inside Flowmeter ${flowmeter}`);
if (flowmeter.Active === true) {
let fm_maint = [];
fm_maint.push({
text: `Meter Serial Number: ${flowmeter.Meter_Details.Serial_num}`
});
fm_maint.push({
text: `Type of Meter: ${flowmeter.Meter_Details.Manufacturer}`
});
fm_maint.push({ text: `Units: ${flowmeter.Meter_Details.units}`});
fm_maint.push({ text: `Factor: ${flowmeter.Meter_Details.factor}`});
all_meters_maint.push(fm_maint);
}
docDefinition.content.push({
style: 'tableExample',
table: {
widths: [200, 200, '*', '*'],
body: all_meters_maint
},
layout: 'noBorders'
});
const fonts = {
Roboto: {
normal: path.join(__dirname, '../', '/fonts/Roboto-
Regular.ttf'),
bold: path.join(__dirname, '../', '/fonts/Roboto-Medium.ttf'),
italics: path.join(__dirname, '../', '/fonts/Roboto-Italic.ttf'),
bolditalics: path.join(__dirname, '../', '/fonts/Roboto-
MediumItalic.ttf')
}
};
const printer = new PdfPrinter(fonts);
const pdfDoc = printer.createPdfKitDocument(docDefinition);
// Build file path
const fullfilePath = path.join(
__dirname,
'../',
'/public/pdffiles/',
`${data.Cert_ID}.pdf`
);
pdfDoc.pipe(fs.createWriteStream(fullfilePath));
pdfDoc.end();
};
Is there a different way to save the files that don't force them to be in a stream and will not be kept in memory?
Before we get to the answer, I'm making one huge assumption based on the information in the question. The question states create about 1200 pdf forms. Which means I'm assuming in the function whichForm the parameter certList is an array of 1200 items. Or should I say 1200 items that will call the createOAReport method. You get the idea. I'm assuming the problem is that we are calling that method to create the PDFs 1200 times within that Array.map method. Which makes sense I believe given the question and context of the code.
On to the answer. The major problem is you aren't just trying to create 1200 pdfs. You are trying to create 1200 pdfs asynchronously, which of course puts a strain on the system trying to do all of that work all at once. Maybe even more so on a single thread system like Node.js.
The easy hacky solution is to just increase the memory of Node.js. By using the --max-old-space-size flag and setting the memory size in MB when running your node command. You can find more information about this at this tutorial. But the short version is a command like node --max-old-space-size=8192 main.js. That would increase the memory size of Node.js to 8192 MB or 8 GB.
Few problems with that method. Mainly it's not super scalable. What if someday you have 5000 pdfs you want to create? You'd have to increase that memory size again. And maybe increase the specs on the machine it's being run on.
The second solution, which you could actually probably do with the first solution, is to make this process not asynchronous. Depending on many factors and how optimized the current system is, chances are this will increase the amount of time it takes to create all of these PDFs.
This process is kinda a two step process to code it in. First is to setup your createOAReport function to return a promise to indicate when it's done. The second step is to change your whichForm function to limit how many items can be running asynchronously at any single point in time.
You will have to of course play around with the system to determine how many items you want to run at one time without overloading the system. Fine-tuning that number is not something I focused on, and of course you could probably increase that number by increasing the memory you give Node.js as well.
And of course, there are TONS of different ways to do this. I have a few ideas of methods that are better than the one I'm going to show here, but are a lot more complicated. The foundational idea of limiting how many items are running at once remains the same tho. You can optimize it to fit your needs.
I've developed systems like this before, but I don't think the way I've done it is the best or cleanest way to do it. But at the end of this question I've attached some sample code for your example trying to illustrate my point.
const _ = require('lodash');
const MAX_RUNNING_PROMISES = 10; // You will have to play with this number to get it right for your needs
const whichForm = async certList => {
// If certList is ["a", "b", "c", "d"]
// And we run the following function with MAX_RUNNING_PROMISES = 2
// array would equal [["a", "b"], ["c", "d"]]
certList = _.chunk(certList, MAX_RUNNING_PROMISES);
// Of course you can use something other than Lodash here, but I chose it because it's the first thing that came to mind
for (let i = 0; i < certList.length; i++) {
const certArray = certList[i];
// The following line will wait until all the promises have been resolved or completed before moving on
await Promise.all(certArray.map(cert => {
if (cert.Cert_Details !== null) {
switch (cert.GWMA) {
case 'OA':
case 'PC':
// Don't provide reports for Feedlots
if (cert.Cert_Details.cert_type !== null) {
if (cert.Cert_Details.cert_type === 'Irrigation') {
return createOAReport(cert);
}
}
break;
case 'FA':
// Don't provide reports for Feedlots
if (cert.Cert_Details.cert_type === 'Irrigation') {
return createFAReport(cert);
}
break;
}
}
}));
}
}
Then for your other file. We just have to convert it to return a promise.
const PdfPrinter = require('pdfmake/src/printer');
const fs = require('fs');
const createOAReport = data => {
return new Promise((resolve, reject) => {
console.log('PC or OA Cert ', data.Cert_ID);
// console.log(data);
let all_meters_maint = [];
const flowmeter = data.Flowmeters[0];
if (flowmeter.Active === true) {
let fm_maint = [];
fm_maint.push({
text: `Meter Serial Number: ${flowmeter.Meter_Details.Serial_num}`
});
fm_maint.push({
text: `Type of Meter: ${flowmeter.Meter_Details.Manufacturer}`
});
fm_maint.push({
text: `Units: ${flowmeter.Meter_Details.units}`
});
fm_maint.push({
text: `Factor: ${flowmeter.Meter_Details.factor}`
});
all_meters_maint.push(fm_maint);
}
docDefinition.content.push({
style: 'tableExample',
table: {
widths: [200, 200, '*', '*'],
body: all_meters_maint
},
layout: 'noBorders'
});
const fonts = {
Roboto: {
normal: path.join(__dirname, '../', '/fonts/Roboto-Regular.ttf'),
bold: path.join(__dirname, '../', '/fonts/Roboto-Medium.ttf'),
italics: path.join(__dirname, '../', '/fonts/Roboto-Italic.ttf'),
bolditalics: path.join(__dirname, '../', '/fonts/Roboto-MediumItalic.ttf')
}
};
const printer = new PdfPrinter(fonts);
const pdfDoc = printer.createPdfKitDocument(docDefinition);
// Build file path
const fullfilePath = path.join(
__dirname,
'../',
'/public/pdffiles/',
`${data.Cert_ID}.pdf`
);
pdfDoc.pipe(fs.createWriteStream(fullfilePath));
pdfDoc.on('finish', resolve); // This is where we tell it to resolve the promise when it's finished
pdfDoc.end();
});
};
I just realized after getting really far into this answer that my original assumption is incorrect. Since some of those pdfs might be created within the second function and the data.Flowmeters.map system. So although I'm not going to demonstrate it, you will have to apply the same ideas I have given throughout this answer to that system as well. For now, I have removed that section and am just using the first item in that array, since it's just an example.
You might want to restructure your code once you have an idea of this and just have one function that handles creating the PDF, and not have as many .map method calls all over the place. Abstract the .map methods out and keep it separate from the PDF creation process. That way it'd be easier to limit how many PDFs are being created at a single time.
It'd also be a good idea to add in some error handling around all of these processes.
NOTE I didn't actually test this code at all, so there might be some bugs with it. But the overall ideas and principals still apply.
I'm using the CLI version of Google's Lighthouse performance testing tool to measure certain attributes of a large list of websites. I'm passing the results as JSON to STDOUT then onto a Node script that plucks the values that I'm interested in out to a CSV file.
One of the measures collecting is audits.mobile-friendly.rawValue, which I was expecting to be a flag for either passing Google's mobile friendly test. So the assumption is that value would be true for a mobile optimized site. I collected this value for ~2,000 websites, and all came back false.
Here's an example call that I am making to the command line:
lighthouse http://nytimes.com --disable-device-emulation --disable-network-throttling --chrome-flags="--headless" --output "json" --quiet --output-path "stdout" | node lighthouse_parser.js >> speed_log.csv
and here's the output of that command:
"data_url","data_score","data_total_byte_weight","data_consistently_interactive_time","data_first_interactive_time","data_is_on_https","data_redirects_http","data_mobile_friendly","timestamp"
"https://www.nytimes.com/",18.181818181818183,4211752,,18609.982,false,true,false,"2018-04-02T17:16:39-04:00"
Here's the code for my lighthouse_parser.js:
var moment = require('moment');
var getStdin = require('get-stdin');
var json2csv = require('json2csv');
var timestamp = moment().format();
getStdin().then(str => {
try {
process_files(str);
} catch (error) {
console.error(error);
}
});
function process_files(this_file) {
var obj = JSON.parse(this_file);
var data_url = obj.url;
var data_score = obj.score;
var data_total_byte_weight = obj.audits['total-byte-weight'].rawValue;
var data_consistently_interactive_time = obj.audits['consistently-interactive'].rawValue;
var data_first_interactive_time = obj.audits['first-interactive'].rawValue;
var data_is_on_https = obj.audits['is-on-https'].rawValue;
var data_redirects_http = obj.audits['redirects-http'].rawValue;
var data_mobile_friendly = obj.audits['mobile-friendly'].rawValue;
var the_result = {
"data_url": data_url,
"data_score": data_score,
"data_total_byte_weight": data_total_byte_weight,
"data_consistently_interactive_time": data_consistently_interactive_time,
"data_first_interactive_time": data_first_interactive_time,
"data_is_on_https": data_is_on_https,
"data_redirects_http": data_redirects_http,
"data_mobile_friendly": data_mobile_friendly,
"timestamp": timestamp,
};
var return_this = json2csv({
data: the_result,
header: false
});
console.log(return_this);
}
I haven't been able to get one true value for audits.mobile-friendly.rawValue on ANY site.
Any thoughts on what I'm doing wrong?
The mobile-friendly audit result you're looking at here is this one:
It's essentially a placeholder audit that tells you to use the Mobile-Friendly Test. So, indeed, it's value will never change. ;)
The viewport, content-width and (to some degree) font-size audits can be used to provide a definition of mobile friendliness, which is comparable with what the dedicated MFT returns.
I am working with pngjs through many of it's methods. Most of the time, they work fine. However, like in the following example, I get an error: "Stream is not writable"
var fs = require('fs'),
PNG = require('pngjs').PNG;
var dst = new PNG({width: 100, height: 50});
fs.createReadStream('http://1.1m.yt/hry7Eby.png') //download this picture in order to examine the code.
.pipe(new PNG())
.on('parsed', function(data) {
console.log(data);
});
This case is not singular, I get this error on 1 random png image once a day, through all of pngjs methods, and that error obviously crashes my app.
(note: you can't use the http link I gave you with a readStream, you will have to download & rename it and do something like):
fs.createReadStream('1.png')
Thank you for your time and effort.
This seems to be a bug in the library, though I'm wary of saying so as I'm no expert in PNGs. The parser seems to complete while the stream is still writing. It encounters the IEND, and so calls this:
ParserAsync.prototype._finished = function() {
if (this.errord) {
return;
}
if (!this._inflate) {
this.emit('error', 'No Inflate block');
}
else {
// no more data to inflate
this._inflate.end();
}
this.destroySoon();
};
If you comment out the this.destroySoon(); it finishes the image correctly, instead of eventually calling this function:
ChunkStream.prototype.end = function(data, encoding) {
if (data) {
this.write(data, encoding);
}
this.writable = false;
// already destroyed
if (!this._buffers) {
return;
}
// enqueue or handle end
if (this._buffers.length === 0) {
this._end();
}
else {
this._buffers.push(null);
this._process();
}
};
...which would otherwise end up setting the stream.writeable to false, or, if you comment that out, to pushing a null value into the _buffers array and screwing up the ChunkStream._processRead.
I'm fairly certain this is a synchronicity problem between the time the zlib parser takes to complete and the time the stream takes to complete, since if you do this synchronously it works fine:
var data = fs.readFileSync('pic.png');
var png = PNG.sync.read(data);
var buff = PNG.sync.write(png);
fs.writeFileSync('out2.png', buff);
I'm new to nodejs and jquery, and I'm trying to update one single html object using a script.
I am using a Raspberry pi 2 and a ultrasonic sensor, to measure distance. I want to measure continuous, and update the html document at the same time with the real time values.
When I try to run my code it behaves like a server and not a client. Everything that i console.log() prints in the cmd and not in the browesers' console. When I run my code now i do it with "sudo node surveyor.js", but nothing happens in the html-document. I have linked it properly to the script. I have also tried document.getElementsByTagName("h6").innerHTML = distance.toFixed(2), but the error is "document is not defiend".
Is there any easy way to fix this?
My code this far is:
var statistics = require('math-statistics');
var usonic = require('r-pi-usonic');
var fs = require("fs");
var path = require("path");
var jsdom = require("jsdom");
var htmlSource = fs.readFileSync("../index.html", "utf8");
var init = function(config) {
usonic.init(function (error) {
if (error) {
console.log('error');
} else {
var sensor = usonic.createSensor(config.echoPin, config.triggerPin, config.timeout);
//console.log(config);
var distances;
(function measure() {
if (!distances || distances.length === config.rate) {
if (distances) {
print(distances);
}
distances = [];
}
setTimeout(function() {
distances.push(sensor());
measure();
}, config.delay);
}());
}
});
};
var print = function(distances) {
var distance = statistics.median(distances);
process.stdout.clearLine();
process.stdout.cursorTo(0);
if (distance < 0) {
process.stdout.write('Error: Measurement timeout.\n');
} else {
process.stdout.write('Distance: ' + distance.toFixed(2) + ' cm');
call_jsdom(htmlSource, function (window) {
var $ = window.$;
$("h6").replaceWith(distance.toFixed(2));
console.log(documentToSource(window.document));
});
}
};
function documentToSource(doc) {
// The non-standard window.document.outerHTML also exists,
// but currently does not preserve source code structure as well
// The following two operations are non-standard
return doc.doctype.toString()+doc.innerHTML;
}
function call_jsdom(source, callback) {
jsdom.env(
source,
[ 'jquery-1.7.1.min.js' ],
function(errors, window) {
process.nextTick(
function () {
if (errors) {
throw new Error("There were errors: "+errors);
}
callback(window);
}
);
}
);
}
init({
echoPin: 15, //Echo pin
triggerPin: 14, //Trigger pin
timeout: 1000, //Measurement timeout in µs
delay: 60, //Measurement delay in ms
rate: 5 //Measurements per sample
});
Node.js is a server-side implementation of JavaScript. It's ok to do all the sensors operations and calculations on server-side, but you need some mechanism to provide the results to your clients. If they are going to use your application by using a web browser, you must run a HTTP server, like Express.js, and create a route (something like http://localhost/surveyor or just http://localhost/) that calls a method you have implemented on server-side and do something with the result. One possible way to return this resulting data to the clients is by rendering an HTML page that shows them. For that you should use a Template Engine.
Any DOM manipulation should be done on client-side (you could, for example, include a <script> tag inside your template HTML just to try and understand how it works, but it is not recommended to do this in production environments).
Try searching google for Node.js examples and tutorials and you will get it :)