Show webcam feed, capture images and save them locally in node.js? - javascript

I am trying to build a facial recognition module for using it in my project. This module will later be used in electron.js for building a cross-platform application.
The basic idea is:
The user is presented with a webpage that shows his/her webcam feed. S/he can click on the capture button which will save the image on the server side. This will be repeated a number of times to get training data to train the facial recognition model. I implemented the image capture part using a third-party npm module called 'node-webcam':
const nodeWebCam = require('node-webcam');
const fs = require('fs');
const app = require('express')();
const path = require('path');
// specifying parameters for the pictures to be taken
var options = {
width: 1280,
height: 720,
quality: 100,
delay: 1,
saveShots: true,
output: "jpeg",
device: false,
callbackReturn: "location"
};
// create instance using the above options
var webcam = nodeWebCam.create(options);
// capture function that snaps <amount> images and saves them with the given name in a folder of the same name
var captureShot = (amount, i, name) => {
var path = `./images/${name}`;
// create folder if and only if it does not exist
if(!fs.existsSync(path)) {
fs.mkdirSync(path);
}
// capture the image
webcam.capture(`./images/${name}/${name}${i}.${options.output}`, (err, data) => {
if(!err) {
console.log('Image created')
}
console.log(err);
i++;
if(i <= amount) {
captureShot(amount, i, name);
}
});
};
// call the capture function
captureShot(30, 1, 'robin');
app.get('/', (req, res) => {
res.sendFile(path.join(__dirname, 'index.html'));
});
app.listen(3000, () => {
console.log("Listening at port 3000....");
});
However, I am lost after this part. I don't know how to get the live feed to be displayed on the webpage which the user sees. Also, I realized later that this is a server side code, and there is no way to call the captureShot() function from client side. Any help would be appreciated.

Turn the capture shot into a promise, then render it in the route. We are going to setup a route that runs a function, then returns the path to image with a HTML string. I do not know how the data is returned from your function to make the image. But assuming it returns the exact path, you resolve the path for the callback.
You also need to make a static directory served by express. So you can use http://localhost:3000/myimage.jpg
const nodeWebCam = require('node-webcam');
const fs = require('fs');
const app = require('express')();
const path = require('path');
app.use(express.static('images')) // images folder to be served
// Now we can just say localhost:3000/image.jpg
// specifying parameters for the pictures to be taken
var options = {
width: 1280,
height: 720,
quality: 100,
delay: 1,
saveShots: true,
output: "jpeg",
device: false,
callbackReturn: "location"
};
// create instance using the above options
var webcam = nodeWebCam.create(options);
// capture function that snaps <amount> images and saves them with the given name in a folder of the same name
var captureShot = (amount, i, name) => {
// Make sure this returns a real url to an image.
return new Promise(resolve => {
var path = `./images/${name}`;
// create folder if and only if it does not exist
if(!fs.existsSync(path)) {
fs.mkdirSync(path);
}
// capture the image
webcam.capture(`./images/${name}/${name}${i}.${options.output}`, (err, data) => {
if(!err) {
console.log('Image created')
}
console.log(err);
i++;
if(i <= amount) {
captureShot(amount, i, name);
}
resolve('/path/to/image.jpg')
});
})
};
// call the capture function
app.get('/', (req, res) => {
captureShot(30, 1, 'robin');
.then((response) => {
// Whatever we resolve in captureShot, that's what response will contain
res.send('<img src="${response}"/>')
})
});
app.listen(3000, () => {
console.log("Listening at port 3000....");
});
If you are trying to design a page, with specific dynamic content. Use a templating engine with express such as EJS. http://ejs.co Then you can render the page, with dynamic objects. And set a <img src=<%= image %>/> dynamically to the user after taking the picture.
I put an example of a promise, then using then using a static directory with express. You can get the idea of what I am saying.
function create() {
return new Promise(resolve => {
if (true) {
resolve('https://example.com/image.jpg')
} else {
reject('Error')
}
})
}
create()
.then((response) => {
console.log(`<img src="${response}"/>`)
})
.catch((error) => {
// Error
console.log(error)
})

I'm working on a solution based on puppeteer (headless google chrome) which is super portable and streams video acceptably fast (40fps 800x600 frames). Is super easy to install and use, and I'm already using it to capture video, audio and desktop on desktop apps based on gtk, cairo, opengl, and qt without problems.
https://www.npmjs.com/package/camera-capture
I'm familiar with opencv, but libraries based on that are sincerely hard to install by new users. My project doesn't require any native dependency or client-server comunication although it could be non lightweight for embedding in small devices (puppeteer size is around 80mb). Feedback is welcome! Thanks

Related

Persistent file storage across electon app updates with electron-builder electron-updater

When I update an Electron app using the electron-builder autoUpdater, all the file storage I defined is overwritten. What settings do I need to make these files persist?
Here is an MCVE example (for the main process):
const { app, BrowserView, BrowserWindow,ipcMain } = require('electron');
const log = require("electron-log");
const { autoUpdater } = require("electron-updater");
const fs = require( 'fs');
const path = require('path');
let win
let rand = String(Math.floor(Math.random() * 10000));
console.log('Generated random number',rand)
app.whenReady().then(async () => {
win = new BrowserWindow({
fullscreen: false,
webPreferences: {
nodeIntegration: false,
preload: path.join(__dirname, 'preload.js')
}
})
win.loadFile('index.html');
setInterval(function() {
let a = autoUpdater.checkForUpdatesAndNotify();
}, 60000);
let exists = false;
fs.stat('persistentFile',(err, stats) => {
if (err == null){
exists = true
}
if (!exists){
fs.writeFile('persistentFile',rand,(err)=>{
if (err) throw err;
win.webContents.send('console_message', 'Persistent file has been created!',rand);
console.log('Persistent file has been created!',rand);
})
}
else {
fs.readFile('persistentFile',(err,data)=>{
if (err) throw err;
win.webContents.send('console_message', 'Persistent already exists',data);
console.log('Persistent already exists',data);
})
}
})
win.webContents.send('console_message', 'Random No is' + String(rand));
})
In this example I'd like the file persistentFile to persist across updates, so that it contains the number generated from the first version. However, at the moment, the file is overwritten every update.
How would you ensure persistent file storage across electron-builder autoupdates?
I managed to do this after looking at Cameron Nokes' excellent blog:
cameronnokes.com/blog/how-to-store-user-data-in-electron:
Storing user data in the operating system’s designated location for
user’s app data is the idiomatic way for native app’s to persist user
data because:
when we auto-update the app, our source files may get moved or delete
changing or adding to an app’s internal files will invalidate the code
signature
To do this I used const userDataPath = app.getPath('userData'); to get a path to the OS' app storage location. and then changed the references to 'persistentFile' to String(userDataPath)+'/persistentFile':

Is it possible to not rebuild the whole app but generate new static files only in Nextjs?

I've just started using NextJs getStaticProps, and static files generated at build time is neat. But my contents just don't stay unchanged, I need static files to be updated but rebuilding the app everytime there's a modification is costly. Is there a way to generate new static files only. getServerSideProps turned out to be taking a big amount of time til first byte.
Depending on type of you content Incremental Statatic Regeneration could be a good solution. But in my experience it introduced other problems when rendering catalog/category pages. As nextjs has no idea if one part of you data is dependent on some other it may render an old catalog page with a link to post or product which no longer exists. This usually happens in combination with a 'fallback' feature for dynamic routes.
It also won't refresh you page right after you made changes so you will see a result only after some time.
Possible workaround is to load posts/product on category page dynamically via ajax. You will lose a part of UX and SEO but on the other hand this solution is relatively easy to maintain.
There is also an option or rather hack to rebuild parts of saved content by directly accessing the cache in custom server. Add 'purge=1' to the page address you want to refreshed.
const { ServerResponse, createServer } = require('http')
const next = require('next')
const { promises } = require('fs')
const path = require('path')
const dev = process.env.NODE_ENV !== 'production'
const app = next({ dev })
const handle = app.getRequestHandler()
const purgeByPath = async url => {
const pathname = url == '/' ? '/index' : url
const fullPathname = `.next/server/pages${pathname}`
const fullPathHTML = `${fullPathname}.html`
const fullPathJSON = `${fullPathname}.json`
try {
await promises.unlink(fullPathHTML)
await promises.unlink(fullPathJSON)
} catch (err) {
console.log(`Could not unlink cache files: ${err}`)
}
await app.incrementalCache.cache.del(pathname)
let req = new Request(process.env.BASE_HOST + url)
let res = new ServerResponse(req)
await app.renderToHTML(req, res, url, { _nextDataReq: true })
}
app.prepare().then(() => {
createServer(async (req, res) => {
const url = new URL(req.url, "http://localhost:3000/")
if (req.method == 'POST' && req.url == '/purge-cache') {
let raw = ''
req.on('data', chunk => raw += chunk)
req.on('end', async () => {
const data = JSON.parse(raw)
if (!data || !data.urls) {
res.statusCode = 400
res.end()
return
}
for (let url of data.urls) {
console.log(`Cleaning cache on: ${url}`)
await purgeByPath(url)
}
res.end()
})
} else if (url.searchParams.get('purge') == '1') {
await purgeByPath(req.url)
res.end()
} else {
handle(req, res)
}
}).listen(3000, (err) => {
if (err) throw err
console.log(`> Ready on http://localhost:3000/`)
})
})
The static cache consists of two parts:
Static cache files located at .next/server/pages where each route will probably have two files html and json. Most likely you'll need to delete both.
In-memory cache. Once you app cached a page and saved it to memory it won't go to files on hard drive but instead will load content from memory. So you need to delete it as well.
Cache implementation is not documented an might be swapped or removed in future versions.
This won't work on vercel and has some issues with scaling. You should also add some kind of security token to purge routes.
If I understand it correctly, you are looking for Incremental Statatic Regeneration.
To enable it, you need to add a revalidate time in getStaticProps. As your content is changed and after the revalidation time is reached, a new static page will be generated and served by the server. Depends on how often your content changes, you can change the revalidation time accordingly.
export async function getStaticProps() {
const res = await fetch('https://.../posts')
const posts = await res.json()
return {
props: {
posts,
},
// Next.js will attempt to re-generate the page:
// - When a request comes in
// - At most once every 10 seconds
revalidate: 10, // In seconds
}
}
Reference
https://nextjs.org/docs/basic-features/data-fetching#incremental-static-regeneration

Electron: How to securely inject global variable into BrowserWindow / BrowserView?

I want to load an external webpage in Electron using BrowserView. It has pretty much the same API as BrowserWindow.
const currentWindow = remote.getCurrentWindow();
const view = new remote.BrowserView({
webPreferences: {
// contextIsolation: true,
partition: 'my-view-partition',
enableRemoteModule: false,
nodeIntegration: false,
preload: `${__dirname}/preload.js`,
sandbox: true,
},
});
view.setAutoResize({ width: true, height: true });
view.webContents.loadURL('http://localhost:3000');
In my preload.js file, I simply attach a variable to the global object.
process.once('loaded', () => {
global.baz = 'qux';
});
The app running on localhost:3000 is a React app which references the value like this:
const sharedString = global.baz || 'Not found';
The problem is I have to comment out the setting contextIsolation: true when creating the BrowserView. This exposes a security vulnerability.
Is it possible to (one way - from Electron to the webpage) inject variables into a BrowserView (or BrowserWindow) while still using contextIsolation to make the Electron environment isolated from any changes made to the global environment by the loaded content?
Update:
One possible approach could be intercepting the network protocol, but I'm not sure about this 🤔
app.on('ready', () => {
const { protocol } = session.fromPartition('my-partition')
protocol.interceptBufferProtocol('https', (req, callback) => {
if (req.uploadData) {
// How to handle file uploads?
callback()
return
}
// This is electron.net, docs: https://electronjs.org/docs/api/net
net
.request(req)
.on('response', (res) => {
const chunks = []
res.on('data', (chunk) => {
chunks.push(Buffer.from(chunk))
})
res.on('end', () => {
const blob = Buffer.concat(chunks)
const type = res.headers['content-type'] || []
if (type.includes('text/html') && blob.includes('<head>')) {
// FIXME?
const pos = blob.indexOf('<head>')
// inject contains the Buffer with the injected HTML script
callback(Buffer.concat([blob.slice(0, pos), inject, blob.slice(pos)]))
} else {
callback(blob)
}
})
})
.on('error', (err) => {
console.error('error', err)
callback()
})
.end()
})
})
After doing some digging, I found a few pull requests for Electron that detail the issue you are having. The first describes a reproducible example very similar to the problem you are describing.
Expected Behavior
https://electronjs.org/docs/tutorial/security#3-enable-context-isolation-for-remote-content
A preload script should be able to attach anything to the window or document with contextIsolation: true.
Actual behavior
Anything attached to the window in the preload.js just disappears in the renderer.
It seems the final comment explains that the expected behavior no longer works
It was actually possible until recently, a PR with Isolated Worlds has changed the behavior.
The second has a user suggest what they have found to be their solution:
After many days of research and fiddling with the IPC, I've concluded that the best way is to go the protocol route.
I looked at the docs for BrowserWindow and BrowserView as well as an example that shows the behavior that you desire, but these PRs suggest this is no longer possible (along this route).
Possible Solution
Looking into the documentation, the webContents object you get from view.webContents has the function executeJavaScript, so you could try the following to set the global variable.
...
view.setAutoResize({ width: true, height: true });
view.webContents.loadURL('http://localhost:3000');
view.webContents.executeJavaScript("global.baz = 'qux';");
...
Other answers are outdated, use contextBridge be sure to use sendToHost() instead of send()
// Preload (Isolated World)
const { contextBridge, ipcRenderer } = require('electron')
contextBridge.exposeInMainWorld(
'electron',
{
doThing: () => ipcRenderer.sendToHost('do-a-thing')
}
)
// Renderer (Main World)
window.electron.doThing()
So, executeJavaScript as suggested by Zapparatus ended up being part of the solution.
This is what's going on in renderer.js.
view.webContents.executeJavaScript(`
window.communicator = {
request: function(data) {
const url = 'prefix://?data=' + encodeURIComponent(JSON.stringify(data))
const req = new XMLHttpRequest()
req.open('GET', url)
req.send();
},
receive: function(data) {
alert('got: ' + JSON.stringify(data))
}
};
`)
const setContent = data => view.webContents.executeJavaScript(
`window.communicator.receive(${JSON.stringify(data)})`
)
ipcRenderer.on('communicator', (event, message) => {
setContent(`Hello, ${message}!`)
})
We ended up setting up a custom protocol, similar to how its been done here. In your main.js file set up the following:
const { app, session, protocol } = require('electron')
const { appWindows } = require('./main/app-run')
const { URL } = require('url')
protocol.registerSchemesAsPrivileged([
{
scheme: 'prefix',
privileges: {
bypassCSP: true, // ignore CSP, we won't need to patch CSP
secure: true // allow requests from https context
}
}
])
app.on('ready', () => {
const sess = session.fromPartition('my-view-partition')
// https://electronjs.org/docs/tutorial/security#4-handle-session-permission-requests-from-remote-content
sess.setPermissionRequestHandler((webContents, permission, callback) => {
// Denies the permissions request
const decision = false
return callback(decision)
})
sess.protocol.registerStringProtocol('prefix', (req, callback) => {
const url = new URL(req.url)
try {
const data = JSON.parse(url.searchParams.get('data'))
appWindows.main.webContents.send('prefix', data)
} catch (e) {
console.error('Could not parse prefix request!')
}
const response = {
mimeType: 'text/plain',
data: 'ok'
}
callback(response)
})
})
No preload.js or postMessage needed.

Saving JSON in Electron

I am building an app using Electron. In this app, I am building a data structure using JSON. My data structure looks like this:
{
items: [
{ id:1, name:'football' },
{ id:2, name:'soccer ball' },
{ id:3, name:'basketball' }
]
}
I want to save this JSON to a file called "data.json". I want to save it to a file because I want to load the next time the application starts. My challenge is, I do not know how to save the data. In fact, I'm not sure where I should even save the file. Do I save it in the same directory as the app? Or is there some cross-platform approach I should use?
Currently, I have the following:
saveClick: function() {
var json = JSON.stringify(this.data);
// assume json matches the JSON provided above.
// Now, I'm not sure how to actually save the file.
}
So, how / where do I save JSON to the local file system for use at a later time?
Electron lacks an easy way to persist and read user settings for your application. electron-json-storage implements an API somehow similar to localStorage to write and read JSON objects to/from the operating system application data directory, as defined by app.getPath('userData').
Electron uses node.js as its core. You can use the following:
var fs = require("fs");
read_file = function(path){
return fs.readFileSync(path, 'utf8');
}
write_file = function(path, output){
fs.writeFileSync(path, output);
}
For write_file(), you can either pass "document.txt" as the path and it will write it to the same directory the html file it was run from. You can also put in a full path like "C:/Users/usern/document.txt" and it will write to the specific location you want.
Also, you can choose any file extention you want, (ie. ".txt", ".js", ".json", etc.). You can even make up your own!
I wrote a simple library that you can use, with a simple interface, it also creates subdirectories and works with promises/callbacks.
it will save the data into app.getPath("appData") as the root folder.
https://github.com/ran-y/electron-storage
Installation
$ npm install --save electron-storage
usage
const storage = require('electron-storage');
API
storage.get(filePath, (err, data) => {
if (err) {
console.error(err)
} else {
console.log(data);
}
});
storage.get(filePath)
.then(data => {
console.log(data);
})
.catch(err => {
console.error(err);
});
storage.set(filePath, data, (err) => {
if (err) {
console.error(err)
}
});
storage.set(filePath, data)
.then(data => {
console.log(data);
})
.catch(err => {
console.error(err);
});
`const fs = require('fs');
let student = {
name: 'Mike',
age: 23,
gender: 'Male',
department: 'English',
car: 'Honda'
};
let data = JSON.stringify(student, null, 2);
fs.writeFile('student-3.json', data, (err) => {
if (err) throw err;
console.log('Data written to file');
});
console.log('This is after the write call');`
There are multiple steps:
Step 1: As of version 5, the default for nodeIntegration changed from true to false. You can enable it when creating the Browser Window:
const createWindow = () => {
const win = new BrowserWindow({
width: 1000,
height: 800,
webPreferences: {
nodeIntegration: true,
contextIsolation: false,
}
})
}
Step 2:
function writetofile() {
let configsettings = {
break: output.innerHTML,
alwaysonoff: toggleoutput.innerHTML,
};
let settings_data = JSON.stringify(configsettings, null, 2);
const fs = require("fs");
fs.writeFileSync("assets/configs/settings.json", settings_data);
}

SVG + HTML into PDF in Ruby on Rails or JAVASCRIPT [duplicate]

I'm looking to create a printable pdf version of my website webpages. Something like express.render() only render the page as pdf
Does anyone know a node module that does that ?
If not, how would you go about implementing one ? I've seen some methods talk about using headless browser like phantom.js, but not sure whats the flow.
Extending upon Mustafa's answer.
A) Install http://phantomjs.org/ and then
B) install the phantom node module https://github.com/amir20/phantomjs-node
C) Here is an example of rendering a pdf
var phantom = require('phantom');
phantom.create().then(function(ph) {
ph.createPage().then(function(page) {
page.open("http://www.google.com").then(function(status) {
page.render('google.pdf').then(function() {
console.log('Page Rendered');
ph.exit();
});
});
});
});
Output of the PDF:
EDIT: Silent printing that PDF
java -jar pdfbox-app-2.0.2.jar PrintPDF -silentPrint C:\print_mypdf.pdf
Phantom.js is an headless webkit server and it will load any web page and render it in memory, although you might not be able to see it, there is a Screen Capture feature, in which you can export the current view as PNG, PDF, JPEG and GIF. Have a look at this example from phantom.js documentation
If you want to export HTML to PDF. You have many options. without node even
Option 1: Have a button on your html page that calls window.print() function. use the browsers native html to pdf. use media queries to make your html page look good on a pdf. and you also have the print before and after events that you can use to make changes to your page before print.
Option 2. htmltocanvas or rasterizeHTML. convert your html to canvas , then call toDataURL() on the canvas object to get the image . and use a JavaScript library like jsPDF to add that image to a PDF file. Disadvantage of this approach is that the pdf doesnt become editable. If you want data extracted from PDF, there is different ways for that.
Option 3. #Jozzhard answer
Try to use Puppeteer to create PDF from HTML
Example from here https://github.com/chuongtrh/html_to_pdf
Or https://github.com/GoogleChrome/puppeteer
The best solution I found is html-pdf. It's simple and work with big html.
https://www.npmjs.com/package/html-pdf
Its as simple as that:
pdf.create(htm, options).toFile('./pdfname.pdf', function(err, res) {
if (err) {
console.log(err);
}
});
NOTE:
This package has been deprecated
Author message: Please migrate your projects to a newer library like puppeteer
Package
I used html-pdf
Easy to use and allows not only to save pdf as file, but also pipe pdf content to a WriteStream (so I could stream it directly to Google Storage to save there my reports).
Using css + images
It takes css into account. The only problem I faced - it ignored my images. The solution I found was to replace url in src attrribute value by base64, e.g.
<img src="data:image/png;base64,iVBOR...kSuQmCC">
You can do it with your code or to use one of online converters, e.g. https://www.base64-image.de/
Compile valid html code from html fragment + css
I had to get a fragment of my html document (I just appiled .html() method on jQuery selector).
Then I've read the content of the relevant css file.
Using this two values (stored in variables html and css accordingly) I've compiled a valid html code using Template string
var htmlContent = `
<!DOCTYPE html>
<html>
<head>
<style>
${css}
</style>
</head>
<body id=direct-sellers-bill>
${html}
</body>
</html>`
and passed it to create method of html-pdf.
Create PDF from External URL
Here's an adaptation of the previous answers which utilizes html-pdf, but also combines it with requestify so it works with an external URL:
Install your dependencies
npm i -S html-pdf requestify
Then, create the script:
//MakePDF.js
var pdf = require('html-pdf');
var requestify = require('requestify');
var externalURL= 'http://www.google.com';
requestify.get(externalURL).then(function (response) {
// Get the raw HTML response body
var html = response.body;
var config = {format: 'A4'}; // or format: 'letter' - see https://github.com/marcbachmann/node-html-pdf#options
// Create the PDF
pdf.create(html, config).toFile('pathtooutput/generated.pdf', function (err, res) {
if (err) return console.log(err);
console.log(res); // { filename: '/pathtooutput/generated.pdf' }
});
});
Then you just run from the command line:
node MakePDF.js
Watch your beautify pixel perfect PDF be created for you (for free!)
For those who don't want to install PhantomJS along with an instance of Chrome/Firefox on their server - or because the PhantomJS project is currently suspended, here's an alternative.
You can externalize the conversions to APIs to do the job. Many exists and varies but what you'll get is a reliable service with up-to-date features (I'm thinking CSS3, Web fonts, SVG, Canvas compatible).
For instance, with PDFShift (disclaimer, I'm the founder), you can do this simply by using the request package:
const request = require('request')
request.post(
'https://api.pdfshift.io/v2/convert/',
{
'auth': {'user': 'your_api_key'},
'json': {'source': 'https://www.google.com'},
'encoding': null
},
(error, response, body) => {
if (response === undefined) {
return reject({'message': 'Invalid response from the server.', 'code': 0, 'response': response})
}
if (response.statusCode == 200) {
// Do what you want with `body`, that contains the binary PDF
// Like returning it to the client - or saving it as a file locally or on AWS S3
return True
}
// Handle any errors that might have occured
}
);
Use html-pdf
var fs = require('fs');
var pdf = require('html-pdf');
var html = fs.readFileSync('./test/businesscard.html', 'utf8');
var options = { format: 'Letter' };
pdf.create(html, options).toFile('./businesscard.pdf', function(err, res) {
if (err) return console.log(err);
console.log(res); // { filename: '/app/businesscard.pdf' }
});
const fs = require('fs')
const path = require('path')
const utils = require('util')
const puppeteer = require('puppeteer')
const hb = require('handlebars')
const readFile = utils.promisify(fs.readFile)
async function getTemplateHtml() {
console.log("Loading template file in memory")
try {
const invoicePath = path.resolve("./invoice.html");
return await readFile(invoicePath, 'utf8');
} catch (err) {
return Promise.reject("Could not load html template");
}
}
async function generatePdf() {
let data = {};
getTemplateHtml()
.then(async (res) => {
// Now we have the html code of our template in res object
// you can check by logging it on console
// console.log(res)
console.log("Compiing the template with handlebars")
const template = hb.compile(res, { strict: true });
// we have compile our code with handlebars
const result = template(data);
// We can use this to add dyamic data to our handlebas template at run time from database or API as per need. you can read the official doc to learn more https://handlebarsjs.com/
const html = result;
// we are using headless mode
const browser = await puppeteer.launch();
const page = await browser.newPage()
// We set the page content as the generated html by handlebars
await page.setContent(html)
// we Use pdf function to generate the pdf in the same folder as this file.
await page.pdf({ path: 'invoice.pdf', format: 'A4' })
await browser.close();
console.log("PDF Generated")
})
.catch(err => {
console.error(err)
});
}
generatePdf();
In case you arrive here looking for a way to make PDF from view templates in Express, a colleague and I made express-template-to-pdf
which allows you to generate PDF from whatever templates you're using in Express - Pug, Nunjucks, whatever.
It depends on html-pdf and is written to use in your routes just like you use res.render:
const pdfRenderer = require('#ministryofjustice/express-template-to-pdf')
app.set('views', path.join(__dirname, 'views'))
app.set('view engine', 'pug')
app.use(pdfRenderer())
If you've used res.render then using it should look obvious:
app.use('/pdf', (req, res) => {
res.renderPDF('helloWorld', { message: 'Hello World!' });
})
You can pass options through to html-pdf to control the PDF document page size etc
Merely building on the excellent work of others.
In my view, the best way to do this is via an API so that you do not add a large and complex dependency into your app that runs unmanaged code, that needs to be frequently updated.
Here is a simple way to do this, which is free for 800 requests/month:
var CloudmersiveConvertApiClient = require('cloudmersive-convert-api-client');
var defaultClient = CloudmersiveConvertApiClient.ApiClient.instance;
// Configure API key authorization: Apikey
var Apikey = defaultClient.authentications['Apikey'];
Apikey.apiKey = 'YOUR API KEY';
var apiInstance = new CloudmersiveConvertApiClient.ConvertWebApi();
var input = new CloudmersiveConvertApiClient.HtmlToPdfRequest(); // HtmlToPdfRequest | HTML to PDF request parameters
input.Html = "<b>Hello, world!</b>";
var callback = function(error, data, response) {
if (error) {
console.error(error);
} else {
console.log('API called successfully. Returned data: ' + data);
}
};
apiInstance.convertWebHtmlToPdf(input, callback);
With the above approach you can also install the API on-premises or on your own infrastructure if you prefer.
In addition to #Jozzhart Answer, you can make a local html; serve it with express; and use phantom to make PDF from it; something like this:
const exp = require('express');
const app = exp();
const pth = require("path");
const phantom = require('phantom');
const ip = require("ip");
const PORT = 3000;
const PDF_SOURCE = "index"; //index.html
const PDF_OUTPUT = "out"; //out.pdf
const source = pth.join(__dirname, "", `${PDF_SOURCE}.html`);
const output = pth.join(__dirname, "", `${PDF_OUTPUT}.pdf`);
app.use("/" + PDF_SOURCE, exp.static(source));
app.use("/" + PDF_OUTPUT, exp.static(output));
app.listen(PORT);
let makePDF = async (fn) => {
let local = `http://${ip.address()}:${PORT}/${PDF_SOURCE}`;
phantom.create().then((ph) => {
ph.createPage().then((page) => {
page.open(local).then(() =>
page.render(output).then(() => { ph.exit(); fn() })
);
});
});
}
makePDF(() => {
console.log("PDF Created From Local File");
console.log("PDF is downloadable from link:");
console.log(`http://${ip.address()}:${PORT}/${PDF_OUTPUT}`);
});
and index.html can be anything:
<h1>PDF HEAD</h1>
LINK
result:
https://www.npmjs.com/package/dynamic-html-pdf
I use dynamic-html-pdf, this is simple and also able to pass dynamic variable to html.
var html = fs.readFileSync('./uploads/your-html-tpl.html', 'utf8');
var options = {
format: "A4",
orientation: "portrait"
// border: "10mm"
};
var document = {
type: 'file', // 'file' or 'buffer'
template: html,
context: {
'your_key':'your_values'
},
path: '/pdf/1.pdf' // pdf save path
};
pdf.create(document, options)
.then(res => {
console.log(res)
}).catch(error => {
console.error(error)
});
On html you can use {{your_key}}
I've written hpdf lib for generating PDF from HTLM or URL.
It supports configurable pool of headless browsers (as resources) in the background.
import fs from 'fs';
import { PdfGenerator } from './src';
const start = async () => {
const generator = new PdfGenerator({
min: 3,
max: 10,
});
const helloWorld = await generator.generatePDF('<html lang="html">Hello World!</html>');
const github = await generator.generatePDF(new URL('https://github.com/frimuchkov/hpdf'));
await fs.promises.writeFile('./helloWorld.pdf', helloWorld);
await fs.promises.writeFile('./github.pdf', github);
await generator.stop();
}
I wanted to add to this since I did not see the option to created pdfs from liquid templates yet, but the solution also works with normal html or urls as well.
Lets say this is our html template. Which could be anything really but see that the code include double curly braces. The key inside the braces will be looked up in the liquid_data parameter of the request and replaced by the value.
<html>
<body>
<h1>{{heading}}</h1>
<img src="{{img_url}}"/>
</body>
</html>
The corresponding liquid_data object looks like this:
{
"heading":"Hi Stackoverflow!",
"img_url":"https://stackoverflow.design/assets/img/logos/so/logo-stackoverflow.svg"
}
This is the example I want to create a PDF for. Using pdfEndpoint and the Playground creating a pdf from that template from above is very simple.
const axios = require("axios");
const options = {
method: "POST",
url: "https://api.pdfendpoint.com/v1/convert",
headers: {
"Content-Type": "application/json",
"Authorization": "Bearer SIGN-UP-FOR-KEY"
},
data: {
"delivery_mode": "json",
"page_size": "A4",
"margin_top": "1cm",
"margin_bottom": "1cm",
"margin_left": "1cm",
"margin_right": "1cm",
"orientation": "vertical",
"html": "<html><body> <h1>{{heading}}</h1> <img src=\"{{img_url}}\"/> </body>\</html>",
"parse_liquid": true,
"liquid_data": "{ \"heading\":\"Hi Stackoverflow!\", \"img_url\":\"https://stackoverflow.design/assets/img/logos/so/logo-stackoverflow.svg\"}"
}
};
axios.request(options).then(function (response) {
console.log(response.data);
}).catch(function (error) {
console.error(error);
});
The service will the return a rendered pdf like this:
You can also use pdf node creator package
Package URL -
https://www.npmjs.com/package/pdf-creator-node

Categories

Resources