I am having trouble uploading a file with nodeJS and Angular.
I found solutions but it's only with Ajax which I don't know about. Is it possible to do without?
With the following code I get this error :
POST http://localhost:2000/database/sounds 413 (Payload Too Large)
Code:
HTML:
<div class="form-group">
<label for="upload-input">This needs to be a .WAV file</label>
<form enctype="multipart/form-data" action="/database/sounds" method="post">
<input type="file" class="form-control" name="uploads[]" id="upload-input" multiple="multiple">
</form>
<button class="btn-primary" ng-click="uploadSound()">UPLOAD</button>
</div>
Javascript:
$scope.uploadSound = function(){
var x = document.getElementById("upload-input");
if ('files' in x) {
if (x.files.length == 0) {
console.log("Select one or more files.");
} else {
var formData = new FormData();
for (var i = 0; i < x.files.length; i++) {
var file = x.files[i];
if(file.type==("audio/wav")){
console.log("Importing :");
if ('name' in file) {
console.log("-name: " + file.name);
}
if ('size' in file) {
console.log("-size: " + file.size + " bytes");
}
formData.append('uploads[]', file, file.name);
}else{
console.log("Error with: '"+file.name+"': the type '"+file.type+"' is not supported.");
}
}
$http.post('/database/sounds', formData).then(function(response){
console.log("Upload :");
console.log(response.data);
});
}
}
}
NodeJS:
//Upload a sound
app.post('/database/sounds', function(req, res){
var form = new formidable.IncomingForm();
// specify that we want to allow the user to upload multiple files in a single request
form.multiples = true;
// store all uploads in the /uploads directory
form.uploadDir = path.join(__dirname, '/database/sounds');
// every time a file has been uploaded successfully,
// rename it to it's orignal name
form.on('file', function(field, file) {
fs.rename(file.path, path.join(form.uploadDir, file.name));
});
// log any errors that occur
form.on('error', function(err) {
console.log('An error has occured: \n' + err);
});
// once all the files have been uploaded, send a response to the client
form.on('end', function() {
res.end('success');
});
// parse the incoming request containing the form data
form.parse(req);
});
EDIT:
The error became
POST http://localhost:2000/database/sounds 400 (Bad Request)
If your are using bodyParser
app.use(bodyParser.urlencoded({limit: '100mb',extended: true}));
app.use(bodyParser.json({limit: '100mb'}));
This will allow you to upload files upto 100mb
For json/urlencoded limit, it’s recommended to configure them in server/config.json as follows:
{
“remoting”: {
“json”: {“limit”: “50mb”},
“urlencoded”: {“limit”: “50mb”, “extended”: true}
}
Please note loopback REST api has its own express router with bodyParser.json/urlencoded middleware. When you add a global middleware, it has to come before the boot() call.
var loopback = require('loopback');
var boot = require('loopback-boot');
var app = module.exports = loopback();
//request limit 1gb
app.use(loopback.bodyParser.json({limit: 524288000}));
app.use(loopback.bodyParser.urlencoded({limit: 524288000, extended: true}));
With regards to checking that the data is actually a WAV file, your best bet is to look at the contents of the file and determine if it looks like a WAV file or not.
The WAVE PCM soundfile format article goes into the details of the format.
To be absolutely sure that this is a proper WAV file, and it's not broken in some way, you need to check that all of the fields defined there make sense. But a quick solution, might be to just check that the first four bytes of the content are the letters 'RIFF'. It won't guard against corrupted files or malicious content, but it's a good place to start I think.
I tried to change the object sent to url params as said in Very Simple AngularJS $http POST Results in '400 (Bad Request)' and 'Invalid HTTP status code 400' :
$http.post({
method: 'POST',
url: '/upload',
data: formData,
headers: {'Content-Type': 'application/x-www-form-urlencoded'},
transformRequest: function(obj) {
var str = [];
for(var p in obj)
str.push(encodeURIComponent(p) + "=" + encodeURIComponent(obj[p]));
return str.join("&");
}
}).success(function(response){
console.log("Uploaded :");
console.log(response.data);
});
But I get a bad request error
Why is there no data received ? The console.log before this shows I have one file in my formData.
Error: $http:badreq
Bad Request Configuration
Http request configuration url must be a string or a $sce trusted
object. Received: {"method":"POST","url":"/upload","data":
{},"headers":{"Content-Type":"application/x-www-form-urlencoded"}}
Related
I am uploading a csv file using FormData and XmlHttpRequest. Here is the code for that.
I have a form wrapped around an html input type file, whose onchange event I am executing this code. I have tried to send the form directly as well and also read the form element into the FormData object.
let formData = new FormData();
let file = e.target.files[0];
var blob = new Blob([file],{type: 'text/csv'});
formData.append("payoutUpload", blob, 'processed.csv');
let uri = encodeURI(`${window.serviceUri}${path}`);
var req = new XMLHttpRequest();
req.onload = (result) => {
if (req.status === 500 && result && result.code === 'ECONNRESET') {
console.log(
'Connection was reset, hence retry the sendRequest function'
);
} else if (req.status === 200) {
} else {
console.log("Error while retrieving data");
}
}
req.onerror = (e) => {
console.log('There was an error while retrieving data from service', e);
};
req.open('POST', uri, true);
req.setRequestHeader('Content-Type', 'multipart/form-data');
req.setRequestHeader('Authorization', 'Bearer ' + token);
req.send(formData);
When I send the request, I can see that the file is being sent in the form of Request Payload.
On the NodeJs backend, I am running Express and Formidable. I am not using body-parser, I am using express's inbuilt json and urlencoding methods.
Here is the formidable part.
const form = formidable({multiples: true});
form.parse(req, (err, fields, files) => {
console.log(`error is ${JSON.stringify(err)}`);
console.log(`fields is ${JSON.stringify(fields)}`);
console.log(`files JSON: ${JSON.stringify(files)}`);
console.log('file in request: ' + files.payoutUpload);
console.log(`req.body: ${req.body}`);
options.file = files.payoutUpload;
});
I get err, fields and files as empty. I have searched through all similar questions and set the request headers correctly(which is usually the issue). I can see that the request.body still has the file payload on the server end. But formidable does not parse this. Can anyone tell what I am doing wrong?
UPDATE: I have tried other packages for parsing the file, like multer, express-fileupload, all of them return files as empty. I have also tried fetch API to send my request, but with no luck.
req.setRequestHeader('Content-Type', 'multipart/form-data')
When you send multipart/form-data you must include a boundary parameter in the header however you can't know what value you need to set for this.
Don't set the Content-Type header at all. Allow XMLHttpRequest to generate it automatically from the FormData object.
I have two servers that communicate with each other. Server1 requests for parts of the file from Server2 and store the data received into one file. Server2 is supposed to receive each of these requests and create a stream pipes the data over.
Suppose the files stored(directory) in Server 2 are as following
bigfile.gz
bigfile.gz.part-0
bigfile.gz.part-1
bigfile.gz.part-2
......
So Server1 will send a request for part-0 then part-1 and so on to the Server2. Hence the use of the loop to make requests.
Server 1 (code snippet)
for (var i in requestInfo['blockName']) {
var blockName = i;
var IP = requestInfo['blockName'][i][0];
var fileData = JSON.stringify({
blockName: blockName,
fileName: requestInfo['fileName']
});
makeRequest(fileData, IP);
console.log(counter);
}
function makeRequest(fileData, IP) {
var options = {
host: IP,
port: 5000,
path: '/read',
method: 'POST',
headers: {
'Content-Type': 'application/json'
}
};
var req = http.request(options, function(res) {
var data = '';
res.on('data', function(chunk) {
data += chunk;
});
res.on('end', function() {
console.log(data.length);
//fs.appendFileSync(fileName, data);
var writeStream = fs.createWriteStream(fileName, { "flags": 'a' });
writeStream.write(data);
writeStream.end();
});
});
req.write(fileData);
req.end();
}
Server 2 (code snippet)
app.post('/read', function(req, res) {
var dataBody = req.body;
fs.createReadStream(dataBody.fileName + '/' + dataBody.blockName).pipe(res);
});
The one above works for when I test it with a 100MB txt file. But it fails when i have 1GB .gz file or even when I test it with a .zip file the output the final .zip generated on the Server 1 side is the incorrect size.
I am not sure what I am doing wrong here or is the alternate solution
EDIT:
Also my Server1 crashes when dealing with the big 1GB .gz file
Your main problem here is that you treating your data as string by appending chunks to a string.
By rewriting this should be
var req = http.request(options, function(res) {
var data = [];
res.on('data', function(chunk) {
data.push(chunk);
});
res.on('end', function() {
fs.writeFile(fileName, Buffer.concat(data), function() {
console.log("write end")
});
});
});
That way we are creating a big array of binary chunks, and when the download is complete we write the concatenation of all the chunks to a file.
But notice the word big
If you stick with this implementation you are risking to get out of memory, especially if you are dealing with large (>500mb) files.
Streams to the rescue
var req = https.request(options, function(res) {
res.pipe(fs.createWriteStream(fileName)).on("close", function() {
console.log("write end");
});
});
Using the above implementation memory footprint should stay low. Because the moment you get a specific amount of data from your download, you write them to the file. That way you never keep the whole file into the program's memory.
I'm certain I'm missing something obvious, but the gist of the problem is I'm receiving a PNG from a Mapbox call with the intent of writing it to the file system and serving it to the client. I've successfully relayed the call, received a response of raw data and written a file. The problem is that my file ends up truncated no matter what path I take, and I've exhausted the answers I've found skirting the subject. I've dumped the raw response to the log, and it's robust, but any file I make tends to be about a chunk's worth of unreadable data.
Here's the code I've got at present for the file making. I tried this buffer move as a last ditch after several failed and comparably fruitless iterations. Any help would be greatly appreciated.
module.exports = function(req, res, cb) {
var cartography = function() {
return https.get({
hostname: 'api.mapbox.com',
path: '/v4/mapbox.wheatpaste/' + req.body[0] + ',' + req.body[1] + ',6/750x350.png?access_token=' + process.env.MAPBOX_API
}, function(res) {
var body = '';
res.on('data', function(chunk) {
body += chunk;
});
res.on('end', function() {
var mapPath = 'map' + req.body[0] + req.body[1] + '.png';
var map = new Buffer(body, 'base64');
fs.writeFile(__dirname + '/client/images/maps/' + mapPath, map, 'base64', function(err) {
if (err) throw err;
cb(mapPath);
})
})
});
};
cartography();
};
It is possible to rewrite your code in more compact subroutine:
const fs = require('fs');
const https = require('https');
https.get(url, (response)=> { //request itself
if(response) {
let imageName = 'image.png'; // for this purpose I usually use crypto
response.pipe( //pipe response to a write stream (file)
fs.createWriteStream( //create write stream
'./public/' + imageName //create a file with name image.png
)
);
return imageName; //if public folder is set as default in app.js
} else {
return false;
}
})
You could get original name and extension from url, but it safer to generate a new name with crypto and get file extension like i said from url or with read-chunk and file-type modules.
I am using the javascript version of the aws sdk to upload a file to an amazon s3 bucket.
code :
AWS.config.update({
accessKeyId : 'access-key',
secretAccessKey : 'secret-key'
});
AWS.config.region = 'region';
var bucket = new AWS.S3({params: {Bucket: 'bucket-name'}});
//var fileChooser = document.getElementById('file');
var files = event.target.files;
$.each(files, function(i, file){
//console.log(file.name);
if (file) {
var params = {Key: file.name, ContentType: file.type, Body: file};
bucket.upload(params).on('httpUploadProgress', function(evt) {
console.log("Uploaded :: " + parseInt((evt.loaded * 100) / evt.total)+'%');
if("Uploaded :: " + parseInt((evt.loaded * 100) / evt.total)+'%' == 'Uploaded :: 20%'){
console.log("abort upload");
bucket.abort.bind(bucket);
}
}).send(function(err, data) {
if(err != "null"){
console.log(data);
//alert("Upload Success \nETag:"+ data.ETag + "\nLocation:"+ data.Location);
var filename = data.Location.substr(data.Location.lastIndexOf("/")+1, data.Location.length-1);
console.log(filename);
fileData = filename;
filename = filename.replace("%20"," ");
$('.aws-file-content').append('<i id="delete-aws-file'+i+'" class="delete-aws-file icon-remove-sign" data-filename=' + fileData +'></i><a href="'+data.Location+'" target=_blank >'+filename+'</a><br>');
}else{
console.log(err);
}
});
}
});
While the file is uploading parts of the file successfully and is still in progress, I want to abort/stop the file upload.
I tried:
bucket.abort();// not working
bucket.abort.bind(bucket); //not working.
Thanks for help.
Found the solution :
// replaced bucket.upload() with bucket.putObject()
var params = {Key: file.name, ContentType: file.type, Body: file};
request = bucket.putObject(params);
then for abort the request:
abort: function(){
request.abort();
}
You cannot bind from the bucket which is your S3 object, it must be called for the upload part.
change for something like this
var upload = bucket.upload(params)
upload.send(....)
so you can bind on upload like
upload.abort.bind(upload);
you can call within an timeout method as crowned in the example
// abort request in 1 second
setTimeout(upload.abort.bind(upload), 1000);
Calling abort() in the browser environment will not abort any requests that are already in flight. If a multipart upload was created, any parts not yet uploaded will not be sent, and the multipart upload will be cleaned up.
Default value for part size is (5 * 1024 * 1024)
Through dumb luck I've stumbled upon a way to do this for multipart uploads.
The accepted answer forces you to use the putObject method, which does not chunk uploads and sends them using the multipart upload API.
The following solution uses the s3.upload method of the AWS S3 SDK for Javascript in the Browser. And it seems to work just fine, even though the example from the official documentation doesn't work.
var bucket = new AWS.S3({params: {Bucket: 'bucket-name'}});
var params = {Key: file.name, ContentType: file.type, Body: file};
var bucket.upload(params).send();
setTimeout(bucket.abort, 1000);
That's it. I just tried calling bucket.abort() and it just worked. Not sure why AWS hasn't documented this.
I'm having trouble issuing a POST command that downloads a file.
On the client side, I'm trying to POST to a specific URL, including a param that specifies the file to download.
var req = $.ajax({
type: 'POST',
url : '/click',
data: { 'path' : filename }
});
req.done(function(data) {
// Download the file here?
The server eventually fires off a method which does this:
function downloadFile(req, res) {
var dir = req.session.currentdir + req.body.path;
mimetype = (shell.exec("file --mime-type '" + dir + "'", {silent:true}).output);
mimetype = mimetype.substring(mimetype.indexOf(": ") + 2, mimetype.length);
var stat = fs.statSync(dir);
res.writeHead(200, {'Content-Type' : mimetype,
'Content-Length': stat.size });
var fileStream = fs.createReadStream(dir);
fileStream.pipe(res);
};
Now I can't seem to get the client side to accept the file I'm trying to pipe back . . it just hangs for an incredibly long time before closing. What is the appropriate way to get the client to download the file I'm trying to send back?
Much thanks for taking the time to read.
1.
resp.setHeader( "Content-Disposition", "attachment; filename=\"xxxx.xxx\"" );
2.
better to use Get