I've created a promise that uses the aws-sdk to upload to my S3 bucket. My application is a simple command line script to add images to S3 and update the database. I have anywhere between 300-1000 images to upload every run.
The problem I am having is it uploads 5-10 images but then seems to hang. I've confirmed this by placing a console.log(data) after the error checking in the promise below.
The first 5 images upload quickly, the sixth takes about a minute, the seventh a lot longer and at which point it just hangs.
s3-upload-promise.js
'use strict'
let AWS = require('aws-sdk')
let s3 = new AWS.S3()
module.exports = function(params) {
return new Promise(function(resolve, reject) {
s3.upload(params, function(err, data) {
if(err) return reject(Error(err))
resolve(data)
})
})
}
And here is the code that calls the promise:
let s3UploadPromise = require('../src/s3-upload-promise/s3-upload-promise')
// Get all PNG files from given path and upload to bucket
listpng(process.argv[2]).then(function(files) {
let promises = []
files.forEach(function(el, i) {
promises.push(el, s3UploadPromise({
Bucket: process.env.S3_BUCKET,
Key: 'templates/' + randomstring.generate() + '.png',
Body: fs.createReadStream(process.argv[2] + el),
ACL: 'public-read'
}))
});
Promise.all(promises).then(function(values) {
return console.log(values)
})
})
Any ideas what I'm doing wrong? Does this have anything to do with me not closing createReadStream?
Edit
I've tried closing the stream within the the s3.upload callback. It didn't make any difference and it still hung:
s3.upload(params, function(err, data) {
params.Body.close()
if(err) return reject(Error(err))
resolve(data)
})
Edit 2
I added some error checking and I am getting the following error:
{
message: 'Your socket connection to the server was not read from or written to within the timeout period. Idle connections w
ill be closed.',
code: 'RequestTimeout',
region: null,
time: 2016-06-19T13:14:39.223Z,
requestId: 'F7E64E8F99E774F3',
extendedRequestId: 'PW/mPy6t3w9U1uJc8xYKhUGi/KiSY+6yK6nq0RB21Ke1KqRmTWjjm3KXEp0qAEPDadypw+kiwCEP3upER1uecEP4Sl9Tk/lt',
cfId: undefined,
statusCode: 400,
retryable: true
}
A comment on an issue on Github mentions:
I've been noticing this a lot when using concurrency > 1, on several systems. Most of the time an uploaded folder will begin to get 400's due to timeouts after the first 10 or so requests.
Maybe adding a Content-Length somewhere will help. Will try.
Edit 3
I decided to try a different library called knox. I get the same problem! This is crazy. It must surely an Amazon issue if two different libraries are facing the same problem?
s3-upload-promise.js
'use strict'
let knox = require('knox')
let process = require('process')
var client = knox.createClient({
key: process.env.AWS_KEY,
secret: process.env.AWS_SECRET,
bucket: process.env.S3_BUCKET,
});
module.exports = function(params) {
return new Promise(function(resolve, reject) {
let headers = {
'Content-Length': params.contentLength,
'Content-Type': params.contentType,
'x-amz-acl': params.permissions
}
client.putBuffer(params.buffer, params.key, headers, function(err, res){
if(err) return reject(err)
resolve(res)
});
})
}
Calling code ...
// Get all PNG files from given path
listpng(process.argv[2]).then(function(files) {
let promises = []
files.forEach(function(el, i) {
let file = process.argv[2] + el;
fs.readFile(file, function(err, data) {
if(err) return console.log(err)
fs.stat(process.argv[2] + el, function(err, stats) {
if(err) return console.log(err)
let key = process.env.S3_TEMPLATES + '/'
let buffer = fs.createReadStream(process.argv[2] + el)
let params = {
buffer: data,
key: key + randomstring.generate() + '.png',
contentLength: stats.size,
contentType: 'image/png',
permissions: 'public-read',
}
promises.push(s3Uploader(params))
Promise.all(promises).then(function(values) {
return console.log(values)
})
})
})
})
})
Not sure what else to do now.
Related
I have been struggling with various FTP Node modules to try and get anything working in AWS Lambda. The best and most popular seems to be "Basic-FTP" that also supports async/await. But I just cannot get it to download files when any code is added beneath the FTP function.
I don't want to add the fs functions within the FTP async function as I need to solve what is causing the break when any code below is added and I also have other bits of code to add and work with the downloaded file and it's content later:
FTP SUCCESS - When the async function is used only with no fs code beneath it
FTP FAILURE - Adding the fs readdir/readFile functions or any other code below
ERROR Error: ENOENT: no such file or directory, open '/tmp/document.txt'
https://github.com/patrickjuchli/basic-ftp
const ftp = require("basic-ftp");
const fs = require("fs");
var FileNameWithExtension = "document.txt";
var ftpTXT;
exports.handler = async (event, context, callback) => {
example();
async function example() {
const client = new ftp.Client();
//client.ftp.verbose = true;
try {
await client.access({
host: host,
user: user,
password: password,
//secure: true
});
console.log(await client.list());
await client.download(fs.createWriteStream('/tmp/' + FileNameWithExtension), FileNameWithExtension);
}
catch (err) {
console.log(err);
}
client.close();
}
// Read the content from the /tmp/ directory to check FTP was succesful
fs.readdir("/tmp/", function (err, data) {
if (err) {
return console.error("There was an error listing the /tmp/ contents.");
}
console.log('Contents of AWS Lambda /tmp/ directory: ', data);
});
// Read TXT file and convert into string format
fs.readFile('/tmp/' + FileNameWithExtension, 'utf8', function (err, data) {
if (err) throw err;
ftpTXT = data;
console.log(ftpTXT);
});
// Do other Node.js coding with the downloaded txt file and it's contents
};
The problem is that you are getting lost when creating an async function inside your handler. Since example() is async, it returns a Promise. But you don't await on it, so the way it has been coded, it's kind of a fire and forget thing. Also, your Lambda is being terminated before your callbacks are triggered, so even if it got to download you would not be able to see it.
I suggest you wrap your callbacks in Promises so you can easily await on them from your handler function.
I have managed to make it work: I have used https://dlptest.com/ftp-test/ for testing, so change it accordingly. Furthermore, see that I have uploaded the file myself. So if you want to replicate this example, just create a readme.txt on the root of your project and upload it. If you already have this readme.txt file on your FTP server, just delete the line where it uploads the file.
Here's a working example:
const ftp = require("basic-ftp");
const fs = require("fs");
const FileNameWithExtension = "readme.txt";
module.exports.hello = async (event) => {
const client = new ftp.Client();
try {
await client.access({
host: 'ftp.dlptest.com',
user: 'dlpuser#dlptest.com',
password: 'puTeT3Yei1IJ4UYT7q0r'
});
console.log(await client.list());
await client.upload(fs.createReadStream(FileNameWithExtension), FileNameWithExtension)
await client.download(fs.createWriteStream('/tmp/' + FileNameWithExtension), FileNameWithExtension);
}
catch (err) {
console.log('logging err')
console.log(err);
}
client.close();
console.log(await readdir('/tmp/'))
console.log(await readfile('/tmp/', FileNameWithExtension))
return {
statusCode: 200,
body: JSON.stringify({message: 'File downloaded successfully'})
}
};
const readdir = dir => {
return new Promise((res, rej) => {
fs.readdir(dir, function (err, data) {
if (err) {
return rej(err);
}
return res(data)
});
})
}
const readfile = (dir, filename) => {
return new Promise((res, rej) => {
fs.readFile(dir + filename, 'utf8', function (err, data) {
if (err) {
return rej(err);
}
return res(data)
})
})
}
Here is the output of the Lambda function:
And here are the complete CloudWatch logs:
My file contains nothing but a 'hello' inside it. You can see it on the logs.
Do keep in mind that, in Lambda Functions, you have a 512MB limit when downloading anything to /tmp. You can see the limits in the docs
we'e writing a serverless function in AWS Lambda that does the following:
Fetches JSON files from AWS S3 (Multiple files).
Merges JSON files into one single file.
Uploads the new file from (2) back to S3.
To handle this flow, we're trying to use the async library in Nodejs within our Lambda. Here's the code we have so far:
'use-strict';
const AWS = require('aws-sdk');
const async = require('async');
const BUCKET = 'sample-bucket';
const s3path = 'path/to/directory';
exports.handler = (event, context, callback) => {
let filepaths = []; // Stores filepaths
let all_data = []; // Array to store all data retrieved
let s3Client = new AWS.S3({ params: { Bucket: BUCKET } }); // S3 bucket config
// Runs functions in the series provided to ensure each function runs one after the other
async.series([
// Read file IDs from the event and saves in filepaths array
function(callbackSeries) {
console.log('Starting function...');
event.forEach(id => {
filepaths.push(`${id}.json`);
});
console.log('All filenames pushed.');
callbackSeries();
},
// Fetches each file from S3 using filepaths
// Then pushes objects from each file to the all_data array
function(callbackSeries) {
async.each(filepaths, (file, callback) => {
let params = {
'Bucket': BUCKET,
'Key': s3path + file
}
s3Client.getObject(params, (err, data) => {
if(err) {
throw err;
} else {
if(data.Body.toString()) {
console.log('Fetching data...');
all_data.push(JSON.parse(data.Body.toString()));
callback();
console.log('Fetched.');
}
}
});
},
(err) => {
if (err) throw err;
else callbackSeries();
});
},
// NOTHING IS BEING EXECUTED AFTER THIS
// Puts object back into bucket as one merged file
function(callbackSeries) {
console.log('Final function.')
// Concatenates multiple arrays in all_data into a single array
all_data = Array.prototype.concat(...all_data);
let params = {
'Bucket': BUCKET,
'Body': JSON.stringify(all_data),
'Key': 'obj.json'
};
s3Client.putObject(params, (err, data) => {
if(err) throw err;
else console.log('Success!');
})
}
], (err) => {
if(err) throw err;
else console.log('End.');
})
}
The first two functions in our async.series are running normally and all the console.log statements are executed. But our third function i.e.:
// Puts object back into bucket as one merged file
function(callbackSeries) {
console.log('Final function.')
// Concatenates multiple arrays in all_data into a single array
all_data = Array.prototype.concat(...all_data);
let params = {
'Bucket': BUCKET,
'Body': JSON.stringify(all_data),
'Key': 'obj.json'
};
s3Client.putObject(params, (err, data) => {
if(err) throw err;
else console.log('Success!');
})
}
is not being executed at all. We added a console.log statements but it doesn't seem to be executing at all.
I've consulted both AWS Support as well as async's documentation but can't seem to figure out the problem.
Any assistance would be highly appreciated.
Many thanks.
I'm doing an application with react-native. Now I'm trying to send an image from the mobile to the server (Node Js). For this I'm using react-native-image-picker. And the problem is that when I send the image it save a file but it's empty not contain the photo. I think that the problem probably is that the server can't access to the path of the image because is in a different device. But I don't know how I can do it.
React-Native:
openImagePicker(){
const options = {
title: 'Select Avatar',
storageOptions: {
skipBackup: true,
path: 'images'
}
}
ImagePicker.showImagePicker(options, (imagen) =>{
if (imagen.didCancel) {
console.log('User cancelled image picker');
}
else if (imagen.error) {
console.log('ImagePicker Error: ', imagen.error);
}
else if (imagen.customButton) {
console.log('User tapped custom button: ', imagen.customButton);
}
else {
let formdata = new FormData();
formdata.append("file[name]", imagen.fileName);
formdata.append("file[path]", imagen.path);
formdata.append("file[type]", imagen.type);
fetch('http://X/user/photo/58e137dd5d45090d0b000006', {
method: 'PUT',
headers: {
'Content-Type': 'multipart/form-data'
},
body: formdata
})
.then(response => {
console.log("ok");
})
.catch(function(err) {
console.log(err);
})
}})}
Node Js:
addPhotoUser = function (req, res) {
User.findById(req.params.id, function(err, user) {
fs.readFile(req.body.file.path, function (err, data) {
var pwd = 'home/ubuntu/.../';
var newPath = pwd + req.body.file.name;
fs.writeFile(newPath, data, function (err) {
imageUrl: URL + req.body.file.name;
user.save(function(err) {
if(!err) {
console.log('Updated');
} else {
console.log('ERROR: ' + err);
}
res.send(user);
});
});
});
});
};
Yes, the problem is that the filepath is on the local device and not the server. You want to send the actual data returned to you by react-native-image-picker not the uri. It looks like that library encodes the data with base64 so you're going to want send that to your server, not the uri returned from the library because it won't be accessible on a remote server.
What this means is that you won't be reading any files on your server but instead just decoding a base64 string in the response body and writing that to your filesystem.
For the client side:
let formdata = new FormData();
formdata.append("file[name]", imagen.fileName);
formdata.append("file[data]", imagen.data); // this is base64 encoded!
formdata.append("file[type]", imagen.type);
fetch('http://X/user/photo/58e137dd5d45090d0b000006', {
method: 'PUT',
headers: {
'Content-Type': 'multipart/form-data'
},
body: formdata
})
On the server side atob to decode from base64 before writing to the filesystem:
let decoded = atob(req.body.data)
// now this is binary and can written to the filesystem
From there:
fs.writeFile(newPath, decoded, function (err) {
imageUrl: newPath;
user.save(function(err) {
if(!err) {
console.log('Updated');
} else {
console.log('ERROR: ' + err);
}
res.send(user);
});
});
Note, you don't need the filesystem write that's in your code because you're decoding the image that was sent as a b64 string in your request.
There also seems to be some oddities with how you're using that user object. You seem to be only passing a function that handles errors and not any actual data. I don't know what ORM you're using so it's hard to say how it should work. Maybe something like this?
user.save({imageUrl:uriReturnedByFsWrite}, (err, data)=>{...})
Good luck :)
Make an object then send that object to the server. The object will consist of name,path and type, like this:
var imageData = {name: 'image1', path: uri, type: 'image/jpeg'}
Above is a one way to send the image data. The other way is to convert it into BLOB so that server side programmer doesn't have to do this task on their end. You can make BLOB by use of react-native-fetch-blob.
One more way is to directly upload the images to the amazon server(s3) and send the link to the backend..
Function that returns base64 string:
var RNFetchBlob = require('react-native-fetch-blob').default;
getImageAttachment: function(uri_attachment, mimetype_attachment) {
return new Promise((RESOLVE, REJECT) => {
// Fetch attachment
RNFetchBlob.fetch('GET', config.apiRoot+'/app/'+uri_attachment)
.then((response) => {
let base64Str = response.data;
var imageBase64 = 'data:'+mimetype_attachment+';base64,'+base64Str;
// Return base64 image
RESOLVE(imageBase64)
})
}).catch((error) => {
// error handling
console.log("Error: ", error)
});
},
Cheers :)
I am trying to upload multiple files from Angular 2 and Sails Js server. I want to place file inside public folder of SailJs App.
The file is uploaded from Angular 2 App by getting file from an event fired. the code for single file upload is as follows:
Angular 2 service:
fileChange(event: any): Promise<string> {
let fileList: FileList = event.target.files;
if(fileList.length > 0) {
let file: File = fileList[0];
let formData:FormData = new FormData();
formData.append('myFile', file, file.name);
let headers = new Headers();
let cToken = this.cookie.getCookie("token");
headers.append('Authorization', 'Bearer ' + cToken);
headers.append('Content-Type', undefined);
//headers.append('Content-Type', 'multipart/form-data');
headers.append('Accept', 'application/json');
let options: RequestOptionsArgs = { headers: headers, withCredentials: true }
return new Promise((resolve, reject) => {
this.http.post( this.apiEndpoint + "project/reffile/add/all", formData, options).toPromise()
.then(response => {
// The promise is resolved once the HTTP call is successful.
let jsonData = response.json();
if (jsonData.apiStatus == 1) {
resolve(jsonData);
}
else reject(jsonData.message);
})
// The promise is rejected if there is an error with the HTTP call.
// if we don't get any answers the proxy/api will probably be down
.catch(reason => reject(reason.statusText));
});
}
}
SailsJs method:
/**
* `FileController.upload()`
*
* Upload file(s) to the server's disk.
*/
addAll: function (req, res) {
// e.g.
// 0 => infinite
// 240000 => 4 minutes (240,000 miliseconds)
// etc.
//
// Node defaults to 2 minutes.
res.setTimeout(0);
console.log("req.param('filename')");
console.log(req.param('filename'));
req.file('myFile')
.upload({
// You can apply a file upload limit (in bytes)
maxBytes: 1000000
}, function whenDone(err, uploadedFiles) {
if (err) return res.serverError(err);
else return res.json({
files: uploadedFiles,
textParams: req.allParams()
});
});
},
after posting form, I didn't get file in HTTP call response also not able to console.log(req.param('filename'));.
please help me what I am doing wrong here. I also tried changing/removing header, but still not working,
some expert says that HTTP currently cant upload files, need to use native XHR request for this. please view Thierry Templier's answer here
Try specifying a directory for file upload:
req.file('file').upload({
dirname: '../../assets/uploads'
},function (err, files) {
if (err) return res.serverError(err);
var fileNameArray = files[0].fd.split("/");
var fileName = fileNameArray[fileNameArray.length - 1];
console.log("fileName: ",fileName);
});
To access the uploaded file - you can append the fileName to the upload directory that you have specified. File will be accessible
So I've created this nice little lambda, which runs great locally, however not so much when actually out in the wild.
The lambda takes an event, with html in the event source, converts that html to a PDF (using the html-pdf node module), passes that pdf to an s3 bucket, and then hands back a signed url that expires in 60 seconds.
Or at least that is what ought to happen (again, works locally). When testing on Lambda, I get the following error:
{
"errorMessage": "spawn EACCES",
"errorType": "Error",
"stackTrace": [
"exports._errnoException (util.js:870:11)",
"ChildProcess.spawn (internal/child_process.js:298:11)",
"Object.exports.spawn (child_process.js:362:9)",
"PDF.PdfExec [as exec] (/var/task/node_modules/html-pdf/lib/pdf.js:87:28)",
"PDF.PdfToFile [as toFile] (/var/task/node_modules/html-pdf/lib/pdf.js:83:8)",
"/var/task/index.js:72:43",
"Promise._execute (/var/task/node_modules/bluebird/js/release/debuggability.js:272:9)",
"Promise._resolveFromExecutor (/var/task/node_modules/bluebird/js/release/promise.js:473:18)",
"new Promise (/var/task/node_modules/bluebird/js/release/promise.js:77:14)",
"createPDF (/var/task/index.js:71:19)",
"main (/var/task/index.js:50:5)"
]
}
Here's the code itself (not compiled, there's a handy gulp task for that)
if(typeof regeneratorRuntime === 'undefined') {
require("babel/polyfill")
}
import fs from 'fs'
import pdf from 'html-pdf'
import md5 from 'md5'
import AWS from 'aws-sdk'
import Promise from 'bluebird'
import moment from 'moment'
const tempDir = '/tmp'
const config = require('./config')
const s3 = new AWS.S3()
export const main = (event, context) => {
console.log("Got event: ", event)
AWS.config.update({
accessKeyId: config.awsKey,
secretAccessKey: config.awsSecret,
region: 'us-east-1'
})
const filename = md5(event.html) + ".pdf"
createPDF(event.html, filename).then(function(result) {
uploadToS3(filename, result.filename).then(function(result) {
getOneTimeUrl(filename).then(function(result) {
return context.succeed(result)
}, function(err) {
console.log(err)
return context.fail(err)
})
}, function(err) {
console.log(err)
return context.fail(err)
})
}, function(err) {
console.log(err)
return context.fail(err)
})
}
const createPDF = (html, filename) => {
console.log("Creating PDF")
var promise = new Promise(function(resolve, reject) {
pdf.create(html).toFile(filename, function(err, res) {
if (err) {
reject(err)
} else {
resolve(res)
}
})
})
return promise
}
const uploadToS3 = (filename, filePath) => {
console.log("Pushing to S3")
var promise = new Promise(function(resolve, reject) {
var fileToUpload = fs.createReadStream(filePath)
var expiryDate = moment().add(1, 'm').toDate()
var uploadParams = {
Bucket: config.pdfBucket,
Key: filename,
Body: fileToUpload
}
s3.upload(uploadParams, function(err, data) {
if(err) {
reject(err)
} else {
resolve(data)
}
})
})
return promise
}
const getOneTimeUrl = (filename) => {
var promise = new Promise(function(resolve, reject) {
var params = {
Bucket: config.pdfBucket,
Key: filename,
Expires: 60
}
s3.getSignedUrl('getObject', params, function(err, url) {
if (err) {
reject(err)
} else {
resolve(url)
}
})
})
return promise
}
Seems like a problem within html-pdf. I thought it might be a problem with PhantomJS (which html-pdf depends on) due to some reading I did here: https://engineering.fundingcircle.com/blog/2015/04/09/aws-lambda-for-great-victory/ , however, since Lambda has bumped the max zip size to 50mb, I don't have a problem uploading the binary.
Any thoughts?
html-pdf uses phantomjs under the hood, which needs to compile some binaries when being installed. I guess your problem is that you are deploying those locally compiled binaries but Lambda needs the binaries compiled on Amazon Linux.
You can solve this problem by building your deploy package on an EC2 instance that is running Amazon Linux and then e.g. directly deploy it from there like it is explained in this tutorial.
Also check out this answer on a similar problem.