NodeJS - How to download from a blob? - javascript

How can I return a file from a BLOB column using NodeJS?
I'm using the oracledb library to handle the database operations and I have the following code:
async function getFile(req, res) {
let filename = req.params.filename;
let file = await selectFileFromDb(filename);
file = file.rows[0][0]; //Column that contains the blob content
//I would like to return something like this
res.download(file);
}
What should I do to read the BLOB content from the column and return as a download to the requester?
Thank you.

You have to send content header as the type of file that you have to download and then send the buffer (asuming what you got from the db is a buffer ) in the body . Finally end the response after sending the code. Here is a sample code .
async function getFile(req, res) {
let filename = req.params.filename;
let file = await selectFileFromDb(filename);
file = file.rows[0][0]; //Column that contains the blob content
res.setHeader('Content-Length', file.length);
res.write(file, 'binary');
res.end();
}
HOW TO GET THE BLOB CONTENT AS A BUFFER
Do not forget to set the oracledb.fetchAsBuffer property:
const oracledb = require('oracledb');
oracledb.fetchAsBuffer = [oracledb.BLOB];

Related

How to read a large csv as a stream

I am using the #aws-sdk/client-s3 to read a json file from S3, take the contents and dump it into dynamodb. This all currently works fine using:
const data = await (await new S3Client(region).send(new GetObjectCommand(bucketParams)));
And then deserialising the response body etc.
However, I'm looking to migrate to use jsonlines format, effectiely csv, in the sense it needs to be streamed in line by line or in chunks of lines and processed. I can't seem to find a way of doing this that doesnt load the entire file into memory (using response.text() etc).
Ideally, I would like to pipe the response into a createReadStream, and go from there.
I found this example with createReadStream() form module fs in node.js:
import fs from 'fs';
function read() {
let data = '';
const readStream = fs.createReadStream('business_data.csv', 'utf-8');
readStream.on('error', (error) => console.log(error.message));
readStream.on('data', (chunk) => data += chunk);
readStream.on('end', () => console.log('Reading complete'));
};
read();
You can modify it for your use. Hope this helps.
Connection to your S3 you can do by:
var s3 = new AWS.S3({apiVersion: '2006-03-01'});
var params = {Bucket: 'myBucket', Key: 'myImageFile.jpg'};
var file = require('fs').createWriteStream('/path/to/file.jpg');
s3.getObject(params).createReadStream().pipe(file);
see here

Downloading Image locally from GitHub Raw link using fs.writeFileSync() JS

Currently trying to download image from GitHub locally. Everything seems to work, the fetch goes through with a 200 OK response, however, I don't understand how to store image itself:
const rawGitLink = "https://raw.githubusercontent.com/cardano-foundation/CIPs/master/CIP-0001/CIP_Flow.png"
const folder = "/Folder"
const imageName = "/Test"
const imageResponse = await axios.get(rawGitLink)
fs.writeFileSync(___dirname + folder + imageName, imageResponse, (err) => {
//Error handling
}
)
Four problems had to be fixed:
Image name must include png format for this case
The response must be in the correct format as a buffer for an image
You must write the response data and not the object itself
__dirname only needs two underscores
const rawGitLink = "https://raw.githubusercontent.com/cardano-foundation/CIPs/master/CIP-0001/CIP_Flow.png"
const folder = "/Folder"
const imageName = "/Test.png"
const imageResponse = await axios.get(rawGitLink, { responseType: 'arraybuffer' });
fs.writeFileSync(__dirname + folder + imageName, imageResponse.data)
Axios returns a special object: https://github.com/axios/axios#response-schema
let {data} = await axios.get(...)
await fs.writeFile(filename, data) // you can use fs.promises instead of sync
As #Leau said you should include the extension on the filename
Another sugestion is to use the path module to create the filename:
filename = path.join(__dirname, "/Folder", "Test.png")

Express.js and PDFKit: saving as file works, stream produces empty file

I am trying to create a PDF on a server with express.js and PDFKit. If I save the PDF as a file, it works as expected. However, if I try to send the content to the browser as a stream, I get a PDF, but it is empty (the 3 text blocks are missing).
function createPDF(req, res) {
return new Promise(resolve => {
res.setHeader('content-type', 'application/pdf');
const doc = new PDFDocument;
const stream = doc.pipe(res);
//const stream = doc.pipe(fs.createWriteStream('sample.pdf'));
// end res if pdf document finished
stream.on('finish', () => {
res.end();
resolve(true);
});
// fill pdf
doc.text('text');
doc.text('text');
doc.text('text');
doc.end();
});
}
app.use('/pdf-download', new PdfDownload(options, app), createPDF);
PdfDownload is my class which gets data for the pdf from my database. I'll later access these data by res.data.
Thanks for your help
Marc

not able to fetch text data from web url using javascript

I need to extract text data from web url (http://www.africau.edu/images/default/sample.pdf)
I used two node_module.
1) crawler-Request
it('Read Pdf Data using crawler',function(){
const crawler = require('crawler-request');
function response_text_size(response){
response["size"] = response.text.length;
return response;
}
crawler("http://www.africau.edu/images/default/sample.pdf",response_text_size).then(function(response){
// handle response
console.log("Reponse =" + response.size);
});
});
What happen for this it will not print anything on console.
2) pfd2json/pdfparser
it('Read Data from url',function(){
var request = require('request');
var pdf = require('pfd2json/pdfparser');
var fs = require('fs');
var pdfUrl = "http://www.africau.edu/images/default/sample.pdf";
let databuffer = fs.readFileSync(pdfUrl);
pdf(databuffer).then(function(data){
var arr:Array<String> = data.text;
var n = arr.includes('Thursday 02 May');
console.log("Print Array " + n);
});
});
Failed: ENOENT: no such file or directory, open 'http://www.africau.edu/images/default/sample.pdf'
I am able to access data from local path but not able to extract it from url.
The issue here is that you are using the fs module (File System) to read a file on a distant server.
You also mistyped the pdf2json module, which should give you an error ?
You did require the request module. This module will make it possible to access that distant file. Here's one way to do this :
it('Read Data from url', function () {
var request = require('request');
var PDFParser = require('pdf2json');
var pdfUrl = 'http://unec.edu.az/application/uploads/2014/12/pdf-sample.pdf';
var pdfParser = new PDFParser(this, 1);
// executed if the parser fails for any reason
pdfParser.on("pdfParser_dataError", errData => console.error(errData.parserError));
// executed when the parser finished
pdfParser.on("pdfParser_dataReady", pdfData => console.log(pdfParser.getRawTextContent()));
// request to get the pdf's file content then call the pdf parser on the retrieved buffer
request({ url: pdfUrl, encoding: null }, (error, response, body) => pdfParser.parseBuffer(body));
});
This will make it possible to load the distant .pdf file in your program.
I'd recommend looking at the pdf2json documentation if you want to do more. This will simply output the textual content of the .pdf file when the parser has completed reading data.

Upload a file stream to S3 without a file and from memory

I'm trying to create a csv from a string and upload it to my S3 bucket. I don't want to write a file. I want it all to be in memory.
I don't want to read from a file to get my stream. I would like to make a stream with out a file. I would like this method createReadStream, but instead of a file, I would like to pass a string with my stream's contents.
var AWS = require('aws-sdk'),
zlib = require('zlib'),
fs = require('fs');
s3Stream = require('s3-upload-stream')(new AWS.S3()),
// Set the client to be used for the upload.
AWS.config.loadFromPath('./config.json');
// Create the streams
var read = fs.createReadStream('/path/to/a/file');
var upload = s3Stream.upload({
"Bucket": "bucket-name",
"Key": "key-name"
});
// Handle errors.
upload.on('error', function (error) {
console.log(error);
});
upload.on('part', function (details) {
console.log(details);
});
upload.on('uploaded', function (details) {
console.log(details);
});
read.pipe(upload);
You can create a ReadableStream and push your string directly to it which, can then be consumed by your s3Stream instance.
const Readable = require('stream').Readable
let data = 'this is your data'
let read = new Readable()
read.push(data) // Push your data string
read.push(null) // Signal that you're done writing
// Create upload s3Stream instance and attach listeners go here
read.pipe(upload)

Categories

Resources