Buffer not recognized as buffer on nodejs with sails js - javascript

I'm trying to get the buffer from a blob being sent to my SailsJs server.
An example of what is being sent to the server is this:
Blob(3355) {size: 3355, type: "video/x-matroska;codecs=avc1,opus"}
Once on the server side, I do the following:
let body = new Array(0);
let buffer;
let readable = req.file('recordingPart');
readable.on('data', (chunk) => {
body.push(new Buffer(chunk));
});
readable.on('end', () => {
buffer = Buffer.concat(body);
console.log('There will be no more data.', buffer.length, buffer);
});
When running this part of the code I get the error:
buffer.js:226
throw new errors.TypeError(
^
TypeError [ERR_INVALID_ARG_TYPE]: The first argument must be one of type string, buffer, arrayBuffer, array, or array-like object. Received type object
at Function.from (buffer.js:226:9)
at new Buffer (buffer.js:174:17)
...
In this case the error is at the body.push(new Buffer(chunk)); on new Buffer(chunk)
My first approach was similar:
let body = [];
let buffer;
let readable = req.file('recordingPart');
readable.on('data', (chunk) => {
body.push(chunk);
});
readable.on('end', () => {
buffer = Buffer.concat(body);
console.log('There will be no more data.', buffer.length, buffer);
});
but I've got this error:
buffer.js:475
throw kConcatErr;
^
TypeError [ERR_INVALID_ARG_TYPE]: The "list" argument must be one of type array, buffer, or uint8Array
at buffer.js:450:20
In this one the error pops at Buffer.concat(body);
I got some guidance from this answer Node.js: How to read a stream into a buffer?
Can anyone help me in getting the buffer from that req.file.

You can get uploadedFile as below.
req.file('recordingPart').upload(function (err, uploadedFiles){
if (err) return res.serverError(err);
// Logic with uploadedFiles goes here
});
You can get file descriptor from uploadedFiles[0].fd and use it to read/stream the file as below.
fs.readFile(uploadedFiles[0].fd, 'utf8', function (err,data) {
// Logic with data goes here
var myBuffer = Buffer.from(data);
});
To use fs as above create fs instance as below.
var fs = require('fs');

Your current upload approach will work but there's another new way you might want to consider:
// Upload the image.
var info = await sails.uploadOne(photo, {
maxBytes: 3000000
})
// Note: E_EXCEEDS_UPLOAD_LIMIT is the error code for exceeding
// `maxBytes` for both skipper-disk and skipper-s3.
.intercept('E_EXCEEDS_UPLOAD_LIMIT', 'tooBig')
.intercept((err)=>new Error('The photo upload failed: '+util.inspect(err)));
Full Example Here
Also check out the Sails.JS Platzi Course for video tutorials on this latest upload functionality using the example project Ration.

Related

create-react-app javascript convert file to a Uint8Array

I have a create-react-app that updates firmware of connected bluetooth devices.
In order to do this, I need to convert the firmware file (.zip) to a Uint8Array.
The firmware file is saved locally in my public/ folder
And so I try to extract these bytes by with this function:
var fimware_zip = process.env.PUBLIC_URL + '/ZioV8_1.2.7.zip'
this.loadFile(fimware_zip)
With loadFile defined as:
// Load a file, set the bytes to firmware_byte_array
loadFile = async (my_file) => {
console.log(my_file)
var fr = new FileReader();
fr.onload = (e) =>
{
var arrayBuffer = e.target.result;
var array = new Uint8Array(arrayBuffer);
this.setState({ firmware_byte_array: array})
}
fr.readAsArrayBuffer(my_file);
}
However I get the following error:
Unhandled Rejection (TypeError): Failed to execute 'readAsArrayBuffer' on 'FileReader': parameter 1 is not of type 'Blob'.
I've searched high and low looking how to convert a file into a Blob type and I just cant do it.
I've also tried putting the .zip file in the src/ folder and importing it in using
import fimware_zip from './ZioV8_1.2.7.zip'
But that also doesnt work
Any help would be greatly appreciated
You can only use readAsArrayBuffer on Blobs or File objects (such as those you get from input type="file" elements).
I assume there's some kind of server process involved in this app, in which case you can use fetch:
const loadFile = async (my_file) => {
const response = await fetch(my_file);
if (!response.ok) {
throw new Error("HTTP error " + response.status);
}
const array = new Uint8Array(await response.arrayBuffer());
// ...use or return `array` here...
};

Create image from ArrayBuffer in Nodejs

I'm trying to create an image file from chunks of ArrayBuffers.
all= fs.createWriteStream("out."+imgtype);
for(i=0; i<end; i++){
all.write(picarray[i]);
}
all.end();
where picarray contains ArrayBuffer chunks. However, I get the error TypeError: Invalid non-string/buffer chunk.
How can I convert ArrayBuffer chunks into an image?
Have you tried first converting it into a node.js. Buffer? (this is the native node.js Buffer interface, whereas ArrayBuffer is the interface for the browser and not completely supported for node.js write operations).
Something along the line of this should help:
all= fs.createWriteStream("out."+imgtype);
for(i=0; i<end; i++){
var buffer = new Buffer( new Uint8Array(picarray[i]) );
all.write(buffer);
}
all.end();
after spending some time i got this, it worked for me perfectly.
as mentioned by #Nick you will have to convert buffer array you recieved from browser in to nodejs Buffer.
var readWriteFile = function (req) {
var fs = require('fs');
var data = new Buffer(req);
fs.writeFile('fileName.png', data, 'binary', function (err) {
if (err) {
console.log("There was an error writing the image")
}
else {
console.log("The sheel file was written")
}
});
});
};
Array Buffer is browser supported which will be unsupportable for writing file, we need to convert to Buffer native api of NodeJs runtime engine.
This few lines of code will create image.
const fs = require('fs');
let data = arrayBuffer // you image stored on arrayBuffer variable;
data = Buffer.from(data);
fs.writeFile(`Assets/test.png`, data, err => { // Assets is a folder present in your root directory
if (err) {
console.log(err);
} else {
console.log('File created successfully!');
}
});

Node.js - Check if stream has error before piping response

In Node.js, say that I want to read a file from somewhere and stream the response (e.g., from the filesystem using fs.createReadStream()).
application.get('/files/:id', function (request, response) {
var readStream = fs.createReadStream('/saved-files/' + request.params.id);
var mimeType = getMimeTypeSomehow(request.params.id);
if (mimeType === 'application/pdf') {
response.set('Content-Range', ...);
response.status(206);
} else {
response.status(200);
}
readStream.pipe(response);
});
However, I want to detect if there is an error with the stream before sending my response headers. How do I do that?
Pseudocode:
application.get('/files/:id', function (request, response) {
var readStream = fs.createReadStream('/saved-files/' + request.params.id);
readStream.on('ready', function () {
var mimeType = getMimeTypeSomehow(request.params.id);
if (mimeType === 'application/pdf') {
response.set('Content-Range', ...);
response.status(206);
} else {
response.status(200);
}
readStream.pipe(response);
});
readStream.on('error', function () {
response.status(404).end();
});
});
Write stream is ended when readStream ends or has an error. You can prevent this default behaviour by passing end:false during pipe and end the write stream manually.
So even if the error occurs, your write stream is still open and you can do other stuff(e.g. sending 404 status) with writestream in the error callback.
var readStream = fs.createReadStream('/saved-files/' + request.params.id);
readStream.on('error', function () {
res.status(404).end();
});
readStream.on('end', function(){
res.end(); //end write stream manually when readstream ends
})
readStream.pipe(res,{end:false}); // prevent default behaviour
Update 1: For file streams, you can listen for open event to check if the file is ready to read:
readStream.on('open', function () {
// set response headers and status
});
Update 2: As OP mentioned there may be no open event for other streams, we may use the following if the stream is inherited from node's stream module. The trick is we write the data manually instead of pipe() method. That way we can do some 'initialization' on writable before starting to write first byte.
So we bind once('data') first and then bind on('data'). First one will be called before actual writing is happened.
readStream
.on('error',function(err) {
res.status(404).end();
})
.once('data',function(){
//will be called once and before the on('data') callback
//so it's safe to set headers here
res.set('Content-Type', 'text/html');
})
.on('data', function(chunk){
//now start writing data
res.write(chunk);
})
.on('end',res.end.bind(res)); //ending writable when readable ends

node crypto decipher stream throws EVP_DecryptFinal_ex:wrong final block length if stream will be interrupted

I have a node.js client which downloads and decrypts an AES encrypted file from another host.
var base64 = require('base64-stream');
var crypto = require('crypto');
var aes = crypto.createDecipher('aes-256-cbc', crypto.createHash('sha256').update(pass).digest('hex'));
// file stream
var file = fs.createWriteStream(params.target);
var base64reader = base64.decode();
response.pipe(base64reader) // decode base64
.pipe(aes) // decrypt
.pipe(file); // write in file
// on last data chunk received: file load complete
aes.on('end', function (chunk) {
if (typeof params.success !== 'undefined')
params.success();
});
If the other host close his connection unexpectedly before finishing the request, the code above throws this error:
TypeError: error:0606506D:digital envelope routines:EVP_DecryptFinal_ex:wrong final block length
at Decipher.Cipher._flush (crypto.js:262:27)
at Decipher.eval (_stream_transform.js:130:12)
at Decipher.g (events.js:187:16)
at Decipher.EventEmitter.emit (events.js:95:17)
at prefinish (_stream_writable.js:427:12)
at finishMaybe (_stream_writable.js:435:7)
at afterWrite (_stream_writable.js:317:3)
at onwrite (_stream_writable.js:307:7)
at WritableState.onwrite (_stream_writable.js:100:5)
at afterTransform (_stream_transform.js:99:5)
at TransformState.afterTransform (_stream_transform.js:74:12)
at Decipher.Cipher._transform (crypto.js:258:3)
at Decipher.Transform._read (_stream_transform.js:179:10)
at Decipher.Readable.read (_stream_readable.js:334:10)
at flow (_stream_readable.js:743:26)
at WriteStream.eval (_stream_readable.js:601:7)
I tried to add an aes.on('error', function(() {...}); handler but it will not be called. I also tried adding
response.on('end', function() { aes.emit('close'); });
response.on('close', function() { aes.emit('close'); });
but then aes.on('end', ...); will not be called. Adding aes.emit('end') to this statements make no sense, because this will be also called in case of an error which leads to the error above.
response.on('end', function() { aes.emit('end'); aes.emit('close'); });
response.on('close', function() { aes.emit('end'); aes.emit('close'); });
Does anybody have an idea how I can catch this error?
Thanks a lot!!
Its a bug in node.js v0.11.9 which is solved in v0.11.13. Then aes.on('error', ...) will be called correctly.

How to download and unzip a zip file in memory in NodeJs?

I want to download a zip file from the internet and unzip it in memory without saving to a temporary file. How can I do this?
Here is what I tried:
var url = 'http://bdn-ak.bloomberg.com/precanned/Comdty_Calendar_Spread_Option_20120428.txt.zip';
var request = require('request'), fs = require('fs'), zlib = require('zlib');
request.get(url, function(err, res, file) {
if(err) throw err;
zlib.unzip(file, function(err, txt) {
if(err) throw err;
console.log(txt.toString()); //outputs nothing
});
});
[EDIT]
As, suggested, I tried using the adm-zip library and I still cannot make this work:
var ZipEntry = require('adm-zip/zipEntry');
request.get(url, function(err, res, zipFile) {
if(err) throw err;
var zip = new ZipEntry();
zip.setCompressedData(new Buffer(zipFile.toString('utf-8')));
var text = zip.getData();
console.log(text.toString()); // fails
});
You need a library that can handle buffers. The latest version of adm-zip will do:
npm install adm-zip
My solution uses the http.get method, since it returns Buffer chunks.
Code:
var file_url = 'http://notepad-plus-plus.org/repository/7.x/7.6/npp.7.6.bin.x64.zip';
var AdmZip = require('adm-zip');
var http = require('http');
http.get(file_url, function(res) {
var data = [], dataLen = 0;
res.on('data', function(chunk) {
data.push(chunk);
dataLen += chunk.length;
}).on('end', function() {
var buf = Buffer.alloc(dataLen);
for (var i = 0, len = data.length, pos = 0; i < len; i++) {
data[i].copy(buf, pos);
pos += data[i].length;
}
var zip = new AdmZip(buf);
var zipEntries = zip.getEntries();
console.log(zipEntries.length)
for (var i = 0; i < zipEntries.length; i++) {
if (zipEntries[i].entryName.match(/readme/))
console.log(zip.readAsText(zipEntries[i]));
}
});
});
The idea is to create an array of buffers and concatenate them into a new one at the end. This is due to the fact that buffers cannot be resized.
Update
This is a simpler solution that uses the request module to obtain the response in a buffer, by setting encoding: null in the options. It also follows redirects and resolves http/https automatically.
var file_url = 'https://github.com/mihaifm/linq/releases/download/3.1.1/linq.js-3.1.1.zip';
var AdmZip = require('adm-zip');
var request = require('request');
request.get({url: file_url, encoding: null}, (err, res, body) => {
var zip = new AdmZip(body);
var zipEntries = zip.getEntries();
console.log(zipEntries.length);
zipEntries.forEach((entry) => {
if (entry.entryName.match(/readme/i))
console.log(zip.readAsText(entry));
});
});
The body of the response is a buffer that can be passed directly to AdmZip, simplifying the whole process.
Sadly you can't pipe the response stream into the unzip job as node zlib lib allows you to do, you have to cache and wait the end of the response. I suggest you to pipe the response to a fs stream in case of big files, otherwise you will full fill your memory in a blink!
I don't completely understand what you are trying to do, but imho this is the best approach. You should keep your data in memory only the time you really need it, and then stream to the csv parser.
If you want to keep all your data in memory you can replace the csv parser method fromPath with from that takes a buffer instead and in getData return directly unzipped
You can use the AMDZip (as #mihai said) instead of node-zip, just pay attention because AMDZip is not yet published in npm so you need:
$ npm install git://github.com/cthackers/adm-zip.git
N.B. Assumption: the zip file contains only one file
var request = require('request'),
fs = require('fs'),
csv = require('csv')
NodeZip = require('node-zip')
function getData(tmpFolder, url, callback) {
var tempZipFilePath = tmpFolder + new Date().getTime() + Math.random()
var tempZipFileStream = fs.createWriteStream(tempZipFilePath)
request.get({
url: url,
encoding: null
}).on('end', function() {
fs.readFile(tempZipFilePath, 'base64', function (err, zipContent) {
var zip = new NodeZip(zipContent, { base64: true })
Object.keys(zip.files).forEach(function (filename) {
var tempFilePath = tmpFolder + new Date().getTime() + Math.random()
var unzipped = zip.files[filename].data
fs.writeFile(tempFilePath, unzipped, function (err) {
callback(err, tempFilePath)
})
})
})
}).pipe(tempZipFileStream)
}
getData('/tmp/', 'http://bdn-ak.bloomberg.com/precanned/Comdty_Calendar_Spread_Option_20120428.txt.zip', function (err, path) {
if (err) {
return console.error('error: %s' + err.message)
}
var metadata = []
csv().fromPath(path, {
delimiter: '|',
columns: true
}).transform(function (data){
// do things with your data
if (data.NAME[0] === '#') {
metadata.push(data.NAME)
} else {
return data
}
}).on('data', function (data, index) {
console.log('#%d %s', index, JSON.stringify(data, null, ' '))
}).on('end',function (count) {
console.log('Metadata: %s', JSON.stringify(metadata, null, ' '))
console.log('Number of lines: %d', count)
}).on('error', function (error) {
console.error('csv parsing error: %s', error.message)
})
})
If you're under MacOS or Linux, you can use the unzip command to unzip from stdin.
In this example I'm reading the zip file from the filesystem into a Buffer object but it works
with a downloaded file as well:
// Get a Buffer with the zip content
var fs = require("fs")
, zip = fs.readFileSync(__dirname + "/test.zip");
// Now the actual unzipping:
var spawn = require('child_process').spawn
, fileToExtract = "test.js"
// -p tells unzip to extract to stdout
, unzip = spawn("unzip", ["-p", "/dev/stdin", fileToExtract ])
;
// Write the Buffer to stdin
unzip.stdin.write(zip);
// Handle errors
unzip.stderr.on('data', function (data) {
console.log("There has been an error: ", data.toString("utf-8"));
});
// Handle the unzipped stdout
unzip.stdout.on('data', function (data) {
console.log("Unzipped file: ", data.toString("utf-8"));
});
unzip.stdin.end();
Which is actually just the node version of:
cat test.zip | unzip -p /dev/stdin test.js
EDIT: It's worth noting that this will not work if the input zip is too big to be read in one chunk from stdin. If you need to read bigger files, and your zip file contains only one file, you can use funzip instead of unzip:
var unzip = spawn("funzip");
If your zip file contains multiple files (and the file you want isn't the first one) I'm afraid to say you're out of luck. Unzip needs to seek in the .zip file since zip files are just a container, and unzip may just unzip the last file in it. In that case you have to save the file temporarily (node-temp comes in handy).
Two days ago the module node-zip has been released, which is a wrapper for the JavaScript only version of Zip: JSZip.
var NodeZip = require('node-zip')
, zip = new NodeZip(zipBuffer.toString("base64"), { base64: true })
, unzipped = zip.files["your-text-file.txt"].data;

Categories

Resources