Uploading files with graphql to mongodb with mongoose - javascript

I want to upload file to mongodb with graphql resolver.
In server.js I have this help function to store file, which is exported to use it in my resolver.
The function is basing on what I found here: https://github.com/jaydenseric/graphql-upload/issues/8), but now some things have changed in graphql. For example destructurising file object. I don't know what should be found at path variable and how should I use this createReadStream(function which was destructurized from file).
const mongoose = require('mongoose');
const Grid = require('gridfs-stream');
const fs = require('fs');
//...
// Connect to Mongo
mongoose
.connect(process.env.mongoURI, {
useNewUrlParser: true,
useCreateIndex: true,
useUnifiedTopology: true,
useFindAndModify: false
}) // Adding new mongo url parser
.then(() => console.log('MongoDB Connected...'))
.catch(err => console.log(err));
const storeFile = async (upload) => {
const { filename, createReadStream, mimetype } = await upload.then(result => result);
const bucket = new mongoose.mongo.GridFSBucket(mongoose.connection.db, { bucketName: 'files' });
const uploadStream = bucket.openUploadStream(filename, {
contentType: mimetype
});
createReadStream()
.pipe(uploadStream)
.on('error', console.log('error'))
.on('finish', console.log('finish'));
}
module.exports = { storeFile }
//...
My resolver(here it's minimal version, because now I want only to upload file into my database. In one of my tries, it even created fs.files and fs.chunks collections, but without a data):
Mutation: {
uploadFile: async (_, { file }) => {
console.log(file);
const fileId = await storeFile(file);
return true;
}
}
I have this error now:
Unhandled Rejection (Error): GraphQL error: The "listener" argument
must be of type function. Received undefined
and in terminal I have printed 'error'(like in pipe.on('error', console.log('error') statement )
And I can upload only small files( max 60 kb), all larger just don't upload, but errors are showing on all tries.

Ok, I managed to solve it.
resolver mutation:
const { storeFile } = require('../../server');
//...
uploadFile: async (_, { file }) => {
const fileId = await storeFile(file).then(result => result);
return true;
// later I will return something more and create some object etc.
}
supporting function from server.js
const storeFile = async (upload) => {
const { filename, createReadStream, mimetype } = await upload.then(result => result);
const bucket = new mongoose.mongo.GridFSBucket(mongoose.connection.db, { bucketName: 'files' });
const uploadStream = bucket.openUploadStream(filename, {
contentType: mimetype
});
return new Promise((resolve, reject) => {
createReadStream()
.pipe(uploadStream)
.on('error', reject)
.on('finish', () => {
resolve(uploadStream.id)
})
})
}
module.exports = { storeFile }

Related

Resizing images with sharp before uploading to google cloud storage

I tried to resize or compress an image before uploading to the google cloud storage.
The upload works fine but the resizing does not seem to work.
Here is my code:
const uploadImage = async (file) => new Promise((resolve, reject) => {
let { originalname, buffer } = file
sharp(buffer)
.resize(1800, 948)
.toFormat("jpeg")
.jpeg({ quality: 80 })
.toBuffer()
const blob = bucket.file(originalname.replace(/ /g, "_"))
const blobStream = blob.createWriteStream({
resumable: false
})
blobStream.on('finish', () => {
const publicUrl = format(
`https://storage.googleapis.com/${bucket.name}/${blob.name}`
)
resolve(publicUrl)
}).on('error', () => {
reject(`Unable to upload image, something went wrong`)
})
.end(buffer)
})
I ran into the same issue with a project I was working on. After lots of trial and error I found the following solution. It might not be the most elegant, but it worked for me.
In my upload route function I created a new thumbnail image object with the original file values and passed it as the file parameter to the uploadFile function for google cloud storage.
Inside my upload image route function:
const file = req.file;
const thumbnail = {
fieldname: file.fieldname,
originalname: `thumbnail_${file.originalname}`,
encoding: file.encoding,
mimetype: file.mimetype,
buffer: await sharp(file.buffer).resize({ width: 150 }).toBuffer()
}
const uploadThumbnail = await uploadFile(thumbnail);
My google cloud storage upload file function:
const uploadFile = async (file) => new Promise((resolve, reject) => {
const gcsname = file.originalname;
const bucketFile = bucket.file(gcsname);
const stream = bucketFile.createWriteStream({
resumable: false,
metadata: {
contentType: file.mimetype
}
});
stream.on('error', (err) => {
reject(err);
});
stream.on('finish', (res) => {
resolve({
name: gcsname
});
});
stream.end(file.buffer);
});
I think the problem is with toFormat(). That function does not exist in the Docs. Can you try to remove it and check if it would work?
sharp(buffer)
.resize(1800, 948)
.jpeg({ quality: 80 })
.toBuffer()
Modify the metadata once you have finished uploading the image.
import * as admin from "firebase-admin";
import * as functions from "firebase-functions";
import { log } from "firebase-functions/logger";
import * as sharp from "sharp";
export const uploadFile = functions.https.onCall(async (data, context) => {
const bytes = data.imageData;
const bucket = admin.storage().bucket();
const buffer = Buffer.from(bytes, "base64");
const bufferSharp = await sharp(buffer)
.png()
.resize({ width: 500 })
.toBuffer();
const nombre = "IMAGE_NAME.png";
const fileName = `img/${nombre}.png`;
const fileUpload = bucket.file(fileName);
const uploadStream = fileUpload.createWriteStream();
uploadStream.on("error", async (err) => {
log("Error uploading image", err);
throw new functions.https.HttpsError("unknown", "Error uploading image");
});
uploadStream.on("finish", async () => {
await fileUpload.setMetadata({ contentType: "image/png" });
log("Upload success");
});
uploadStream.end(bufferSharp);
});

What am I missing here to get data out of this spawned Node.js child process?

I'm trying to use a spawned command-line lzip process to expand an lzipped data stream, as I haven't found any good native JavaScript tools to do the job.
I can get this to work using files and file descriptors, but it seems stupid to have to write out, and read back in, a bunch of temporary scratch files. I want to do all of the work I can in memory.
So here's the code I'm trying to use:
import { requestBinary } from 'by-request';
import { spawn } from 'child_process';
import { min } from '#tubular/math';
export async function tarLzToZip(url: string): Promise<void> {
const lzData = await requestBinary(url, { headers: { 'User-Agent': 'curl/7.64.1' } });
const lzipProc = spawn('lzip', ['-d'], { stdio: ['pipe', 'pipe', process.stderr] });
let tarContent = Buffer.alloc(0);
lzipProc.stdout.on('data', data => {
tarContent = Buffer.concat([tarContent, data], tarContent.length + data.length);
});
for (let offset = 0; offset < lzData.length; offset += 4096) {
await new Promise<void>((resolve, reject) => {
lzipProc.stdin.write(lzData.slice(offset, min(offset + 4096, lzData.length)), err => {
if (err)
reject(err);
else
resolve();
});
});
}
await new Promise<void>((resolve, reject) => {
lzipProc.stdin.end((err: any) => {
if (err)
reject(err);
else
resolve();
});
});
console.log('data length:', tarContent.length);
}
When I step through with a debugger everything seems to be going well with the sending data into lzipProc.stdin. (I've tried doing both chunks like this, and all data in one go.) lzipProc.stdout.on('data', data =>, however, never gets called. When I get to the end, tarContent is empty.
What's missing here? Do I need a different stdio config? Are there different stream objects I should be using? Do I need to more goats to sacrifice under the light of a full moon?
UPDATE
My solution based on Matt's excellent answer posted below, with all of the particulars for my use case:
import archiver from 'archiver';
import fs, { ReadStream } from 'fs';
import fsp from 'fs/promises';
import needle from 'needle';
import path from 'path';
import { spawn } from 'child_process';
import tar from 'tar-stream';
const baseUrl = 'https://data.iana.org/time-zones/releases/';
export async function codeAndDataToZip(version: string): Promise<ReadStream> {
return compressedTarToZip(`${baseUrl}tzdb-${version}.tar.lz`);
}
export async function codeToZip(version: string): Promise<ReadStream> {
return compressedTarToZip(`${baseUrl}tzcode${version}.tar.gz`);
}
export async function dataToZip(version: string): Promise<ReadStream> {
return compressedTarToZip(`${baseUrl}tzdata${version}.tar.gz`);
}
async function compressedTarToZip(url: string): Promise<ReadStream> {
const fileName = /([-a-z0-9]+)\.tar\.[lg]z$/i.exec(url)[1] + '.zip';
const filePath = path.join(process.env.TZE_ZIP_DIR || path.join(__dirname, 'tz-zip-cache'), fileName);
if (await fsp.stat(filePath).catch(() => false))
return fs.createReadStream(filePath);
const [command, args] = url.endsWith('.lz') ? ['lzip', ['-d']] : ['gzip', ['-dc']];
const originalArchive = needle.get(url, { headers: { 'User-Agent': 'curl/7.64.1' } });
const tarExtract = tar.extract({ allowUnknownFormat: true });
const zipPack = archiver('zip');
const writeFile = fs.createWriteStream(filePath);
const commandProc = spawn(command, args);
commandProc.stderr.on('data', msg => { throw new Error(`${command} error: ${msg}`); });
commandProc.stderr.on('error', err => { throw err; });
originalArchive.pipe(commandProc.stdin);
commandProc.stdout.pipe(tarExtract);
tarExtract.on('entry', (header, stream, next) => {
zipPack.append(stream, { name: header.name, date: header.mtime });
stream.on('end', next);
});
tarExtract.on('finish', () => zipPack.finalize());
zipPack.pipe(writeFile);
return new Promise<ReadStream>((resolve, reject) => {
const rejectWithError = (err: any): void =>
reject(err instanceof Error ? err : new Error(err.message || err.toString()));
writeFile.on('error', rejectWithError);
writeFile.on('finish', () => resolve(fs.createReadStream(filePath)));
tarExtract.on('error', err => {
// tar-stream has a problem with the format of a few of the tar files
// dealt with here, which nevertheless are valid archives.
if (/unexpected end of data|invalid tar header/i.test(err.message))
console.error('Archive %s: %s', url, err.message);
else
reject(err);
});
zipPack.on('error', rejectWithError);
zipPack.on('warning', rejectWithError);
commandProc.on('error', rejectWithError);
commandProc.on('exit', err => err && reject(new Error(`${command} error: ${err}`)));
originalArchive.on('error', rejectWithError);
});
}
I would leave the streaming to node or packages, unless you have specific processing that needs to be done. Just wrap the whole stream setup in a promise.
If you also stream the request/response, it can be piped into the decompresser. Then stdout from the decompressor can be piped to the archive stream handlers.
import fs from 'fs'
import { spawn } from 'child_process'
import needle from 'needle'
import tar from 'tar-stream'
import archiver from 'archiver'
export function tarLzToZip(url) {
return new Promise((resolve, reject) => {
// Setup streams
const res = needle.get(url)
const lzipProc = spawn('lzip', ['-dc'], { stdio: ['pipe','pipe',process.stderr] })
const tarExtract = tar.extract()
const zipPack = archiver('zip')
const writeFile = fs.createWriteStream('tardir.zip')
// Pipelines and processing
res.pipe(gzipProc.stdin)
lzipProc.stdout.pipe(tarExtract)
// tar -> zip (simple file name)
tarExtract.on('entry', function(header, stream, next) {
console.log('entry', header)
zipPack.append(stream, { name: header.name })
stream.on('end', () => next())
})
tarExtract.on('finish', function() {
zipPack.finalize()
})
zipPack.pipe(writeFile)
// Handle the things
writeFile.on('error', reject)
writeFile.on('close', () => console.log('write close'))
writeFile.on('finish', resolve(true))
tarExtract.on('error', reject)
zipPack.on('error', reject)
zipPack.on('warning', reject)
lzipProc.on('error', reject)
lzipProc.on('exit', code => {if (code !== 0) reject(new Error(`lzip ${code}`))})
res.on('error', reject)
res.on('done', ()=> console.log('request done', res.request.statusCode))
})
}
You might want to be a bit more verbose about logging errors and stderr as the singular promise reject can easily hide what actually happened across the multiple streams.

multi file upload with skipper-better-s3 and sailjs returns the same key

As seen in the title, I am currently using sailjs + skipper-better-s3 for s3 upload. Started with uploading one file which works great, then because change request the need of multi-file upload at once so I added a for loop but by doing this, all keys will be the same and ended up the only one file is uploaded which is the last uploaded file but with the first upload filename.
I did read some articles and people are saying something like The problem is because for loop is synchronous and file upload is asynchronous and people saying the result of this is using a recursion which I tried too but no luck though, the same thing happens.
My recursive code is below...
s3_upload_multi: async (req, res) => {
const generatePath = (rootPath, fieldName) => {
let path;
// this is just a switch statement here to check which fieldName is provided then value of path will depend on it
// as for the other two variable is just checking if upload content type is correct
return { path };
};
const processUpload = async ({
fieldName,
awsOp,
fileExtension,
rootPath,
fileName,
}) => {
return new Promise(function (resolve, reject) {
req.file(fieldName).upload(awsOp, async (err, filesUploaded) => {
if (err) reject(err);
const filesUploadedF = filesUploaded[0]; // F = first file
const response = {
status: true,
errCode: 200,
msg: 'OK',
response: {
url: filesUploadedF.extra.Location,
size: filesUploadedF.size,
type: fileExtension,
filename: filesUploadedF.filename,
key: filesUploadedF.extra.Key,
field: fieldName,
}
};
resolve(response);
});
});
}
const process_recur = async (files, fieldName) => {
if (files.length <= 0) return;
const fileUpload = files[0].stream;
const rootPath = `${sails.config.aws.upload.path.root}`;
const fileCType = fileUpload.headers['content-type'];
// console.log(fileCType, 'fileCType');
const { path } = generatePath(rootPath, fieldName);
const fileName = fileUpload.filename;
const fileExtension = fileUpload.filename.split('.').pop();
const genRan = await UtilsService.genRan(8);
const fullPath = `${path}${genRan}-${fileName}`;
const awsOp = {
adapter: require('skipper-better-s3'),
key: sails.config.aws.access_key,
secret: sails.config.aws.secret_key,
saveAs: fullPath,
bucket: sails.config.aws.bucket,
s3params: {
ACL: 'public-read'
},
};
const config = {
fieldName,
awsOp,
fileExtension,
rootPath,
fileName,
}
const procceed = await processUpload(config);
files.shift();
await process_recur(files, fieldName);
};
try {
const fieldName = req._fileparser.upstreams[0].fieldName;
const files = req.file(fieldName)._files;
await process_recur(files, fieldName);
} catch (e) {
console.log(e, 'inside UploadService');
return false;
}
}
below is the code for me using for loop which is quite similiar from above though
s3_upload_multi: async (req, res) => {
const generatePath = (rootPath, fieldName) => {
let path;
// this is just a switch statement here to check which fieldName is provided then value of path will depend on it
// as for the other two variable is just checking if upload content type is correct
return { path };
};
const processUpload = async ({
fieldName,
awsOp,
fileExtension,
rootPath,
fileName,
}) => {
return new Promise(function (resolve, reject) {
req.file(fieldName).upload(awsOp, async (err, filesUploaded) => {
if (err) reject(err);
const filesUploadedF = filesUploaded[0]; // F = first file
const response = {
status: true,
errCode: 200,
msg: 'OK',
response: {
url: filesUploadedF.extra.Location,
size: filesUploadedF.size,
type: fileExtension,
filename: filesUploadedF.filename,
key: filesUploadedF.extra.Key,
field: fieldName,
}
};
resolve(response);
});
});
}
try {
const fieldName = req._fileparser.upstreams[0].fieldName;
const files = req.file(fieldName)._files;
for (const file of files) {
const fileUpload = file.stream;
const rootPath = `${sails.config.aws.upload.path.root}`;
const fileCType = fileUpload.headers['content-type'];
// console.log(fileCType, 'fileCType');
const fileName = fileUpload.filename;
const { path } = generatePath(rootPath, fieldName);
const fileExtension = fileUpload.filename.split('.').pop();
// using a variable here because if this is an image, a thumbnail will be created with the same name as the original one
const genRan = await UtilsService.genRan(8);
const fullPath = await `${path}${genRan}-${fileName}`;
const awsOp = {
adapter: require('skipper-better-s3'),
key: sails.config.aws.access_key,
secret: sails.config.aws.secret_key,
saveAs: fullPath,
bucket: sails.config.aws.bucket,
s3params: {
ACL: 'public-read'
},
};
const config = {
fieldName,
awsOp,
fileExtension,
rootPath,
fileName,
}
const procceed = await processUpload(config);
console.log(procceed, 'procceed');
}
} catch (e) {
console.log(e, 'inside UploadService');
return false;
}
}
Which part am I making mistake that's causing such behavior? I checked my path it's totally correct with correct filename too when I console.log
Thanks in advance for any suggestions and help.
Took me quite a lot of time to figure this out ages ago.
Especially you are using skipper-better-s3 which did not conclude as much detailed documentation as skipper, going back to look into skipper documentation actually the saveAs field doesn't only take string but also a function which you can then use that to get each file's filename and return it as needed so actually you do not even need to use neither resursive or for loop at all.
for example with some of your codes
const awsOp = {
adapter: require('skipper-better-s3'),
key: sails.config.aws.access_key,
secret: sails.config.aws.secret_key,
saveAs: (__newFileStream, next) => {
// generatePath is what you wrote
// __newFileStream.filename would the filename of each each before uploading
// the path is pretty much the s3 key which includes your filename too
const { path } = generatePath(rootPath, __newFileStream.filename, fieldName);
return next(undefined, path);
},
bucket: sails.config.aws.bucket,
s3params: {
ACL: 'public-read'
},
};
skipper documentation https://www.npmjs.com/package/skipper#customizing-at-rest-filenames-for-uploads

Google Cloud Function errors with ChildProcessError

This is my cloud function that is supposed to generate a watermarked image and store it in firebase storage everytime an image is uploaded.
exports.generateWatermark = functions.storage
.object()
.onFinalize(async object => {
try {
const fileBucket = object.bucket; // The Storage bucket that contains the file.
const filePath = object.name; // File path in the bucket.
const contentType = object.contentType; // File content type.
const metageneration = object.metageneration; // Number of times metadata has been generated. New objects have a value of 1.
// Exit if this is triggered on a file that is not an image.
if (!contentType.startsWith('image/')) {
return console.log('This is not an image.');
}
// Get the file name.
const fileName = path.basename(filePath);
// Exit if the image is already a watermarked image.
if (fileName.startsWith('watermark_')) {
return console.log('Already a Watermarked image.');
}
if (!filePath.startsWith('pets')) {
return console.log('Not a pet image: ', filePath);
}
// Download file from bucket.
const bucket = admin.storage().bucket(fileBucket);
const tempFilePath = path.join(os.tmpdir(), fileName);
const tempWatermarkPath = path.join(os.tmpdir(), 'watermark.png');
const metadata = {
contentType: contentType,
};
// Generate a watermarked image using Jimp
await bucket.file(filePath).download({destination: tempFilePath});
await bucket
.file('logo/cbs.png')
.download({destination: tempWatermarkPath});
console.log('Image downloaded locally to', tempFilePath, filePath);
await spawn('convert', [
tempFilePath,
'-gravity',
'NorthWest',
'-draw',
`"image Over 10,10,200,200 ${tempWatermarkPath}"`,
tempFilePath,
]);
console.log('Watermarked image created at', tempFilePath);
// We add a 'watermark_' prefix
const watermarkFileName = `watermark_${fileName}`;
const watermarkFilePath = path.join(
path.dirname(filePath),
watermarkFileName,
);
// Uploading the watermarked image.
await bucket.upload(tempFilePath, {
destination: watermarkFilePath,
metadata: metadata,
});
// Once the watermarked image has been uploaded delete the local file to free up disk space.
fs.unlinkSync(tempFilePath);
return fs.unlinkSync(tempWatermarkPath);
} catch (err) {
console.log('GENERATE WATERMARK ERROR: ', err);
throw err;
}
});
The part of the code that errors out is the imagemagick part:
await spawn('convert', [
tempFilePath,
'-gravity',
'NorthWest',
'-draw',
`"image Over 10,10,200,200 ${tempWatermarkPath}"`,
tempFilePath,
]);
This is the error that I'm getting:
Is there a way I could get more info about the error? The error is not even reaching my catch block..
childprocess.spawn uses the observer pattern.
The return value from invoking childprocess.spawn is a ChildProcess object with stdout and stderr which are EventEmitters.
You'll need an extra step to promisify the existing interface before you can await it. For example,
const spawn = (command, args) => new Promise((resolve, reject) => {
const cp = require('child_process').spawn(command, args);
let err = null, out = null;
cp.stdout.on('data', data => out += data.toString());
cp.stdout.on('error', data => err += data.toString());
cp.on('error', data => err += data.toString());
cp.on('close', code => {
(code === 0) ? resolve(out) : reject(err)
});
})
childprocess.execFile on the other hand uses callbacks. This makes it easily promisifiable uses util.promisify function. For example
const util = require('util');
const execFile = util.promisify(require('child_process').execFile);
exports.generateWatermark = functions.storage
.object()
.onFinalize(async object => {
try {
//...
await execFile('convert', [
tempFilePath,
'-gravity',
'NorthWest',
'-draw',
`"image Over 10,10,200,200 ${tempWatermarkPath}"`,
tempFilePath,
]);
//...
} catch (err) {
console.log('GENERATE WATERMARK ERROR: ', err);
throw err;
}
});

Upload entire directory tree to S3 using AWS sdk in node js

I currently upload single objects to S3 using like so:
var options = {
Bucket: bucket,
Key: s3Path,
Body: body,
ACL: s3FilePermissions
};
S3.putObject(options,
function (err, data) {
//console.log(data);
});
But when I have a large resources folder for example, I use the AWS CLI tool.
I was wondering, is there a native way to do the same thing with the aws sdk (upload entire folders to s3)?
Old-school recursive way I whipped up in a hurry. Only uses core node modules and standard AWS sdk.
var AWS = require('aws-sdk');
var path = require("path");
var fs = require('fs');
const uploadDir = function(s3Path, bucketName) {
let s3 = new AWS.S3();
function walkSync(currentDirPath, callback) {
fs.readdirSync(currentDirPath).forEach(function (name) {
var filePath = path.join(currentDirPath, name);
var stat = fs.statSync(filePath);
if (stat.isFile()) {
callback(filePath, stat);
} else if (stat.isDirectory()) {
walkSync(filePath, callback);
}
});
}
walkSync(s3Path, function(filePath, stat) {
let bucketPath = filePath.substring(s3Path.length+1);
let params = {Bucket: bucketName, Key: bucketPath, Body: fs.readFileSync(filePath) };
s3.putObject(params, function(err, data) {
if (err) {
console.log(err)
} else {
console.log('Successfully uploaded '+ bucketPath +' to ' + bucketName);
}
});
});
};
uploadDir("path to your folder", "your bucket name");
Special thanks to Ali from this post with helping get the filenames
async/await + Typescript
If you need a solution that uses modern JavaScript syntax and is compatible with TypeScript, I came up with the following code. The recursive getFiles is borrowed from this answer (After all that years, recursion still gives me headache, lol).
import { promises as fs, createReadStream } from 'fs';
import * as path from 'path';
import { S3 } from 'aws-sdk';
async function uploadDir(s3Path: string, bucketName: string) {
const s3 = new S3();
// Recursive getFiles from
// https://stackoverflow.com/a/45130990/831465
async function getFiles(dir: string): Promise<string | string[]> {
const dirents = await fs.readdir(dir, { withFileTypes: true });
const files = await Promise.all(
dirents.map((dirent) => {
const res = path.resolve(dir, dirent.name);
return dirent.isDirectory() ? getFiles(res) : res;
})
);
return Array.prototype.concat(...files);
}
const files = (await getFiles(s3Path)) as string[];
const uploads = files.map((filePath) =>
s3
.putObject({
Key: path.relative(s3Path, filePath),
Bucket: bucketName,
Body: createReadStream(filePath),
})
.promise()
);
return Promise.all(uploads);
}
await uploadDir(path.resolve('./my-path'), 'bucketname');
here is a cleaned up/debugged/working version of #Jim's solution
function uploadArtifactsToS3() {
const artifactFolder = `logs/${config.log}/test-results`;
const testResultsPath = './test-results';
const walkSync = (currentDirPath, callback) => {
fs.readdirSync(currentDirPath).forEach((name) => {
const filePath = path.join(currentDirPath, name);
const stat = fs.statSync(filePath);
if (stat.isFile()) {
callback(filePath, stat);
} else if (stat.isDirectory()) {
walkSync(filePath, callback);
}
});
};
walkSync(testResultsPath, async (filePath) => {
let bucketPath = filePath.substring(testResultsPath.length - 1);
let params = {
Bucket: process.env.SOURCE_BUCKET,
Key: `${artifactFolder}/${bucketPath}`,
Body: fs.readFileSync(filePath)
};
try {
await s3.putObject(params).promise();
console.log(`Successfully uploaded ${bucketPath} to s3 bucket`);
} catch (error) {
console.error(`error in uploading ${bucketPath} to s3 bucket`);
throw new Error(`error in uploading ${bucketPath} to s3 bucket`);
}
});
}
I was just contemplating this problem the other day, and was thinking something like this:
...
var async = require('async'),
fs = require('fs'),
path = require("path");
var directoryName = './test',
directoryPath = path.resolve(directoryName);
var files = fs.readdirSync(directoryPath);
async.map(files, function (f, cb) {
var filePath = path.join(directoryPath, f);
var options = {
Bucket: bucket,
Key: s3Path,
Body: fs.readFileSync(filePath),
ACL: s3FilePermissions
};
S3.putObject(options, cb);
}, function (err, results) {
if (err) console.error(err);
console.log(results);
});
Here's a version that contains a Promise on the upload method. This version allows you to perform an action when all uploads are complete Promise.all().then...
const path = require('path');
const fs = require('fs');
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
const directoryToUpload = 'directory-name-here';
const bucketName = 'name-of-s3-bucket-here';
// get file paths
const filePaths = [];
const getFilePaths = (dir) => {
fs.readdirSync(dir).forEach(function (name) {
const filePath = path.join(dir, name);
const stat = fs.statSync(filePath);
if (stat.isFile()) {
filePaths.push(filePath);
} else if (stat.isDirectory()) {
getFilePaths(filePath);
}
});
};
getFilePaths(directoryToUpload);
// upload to S3
const uploadToS3 = (dir, path) => {
return new Promise((resolve, reject) => {
const key = path.split(`${dir}/`)[1];
const params = {
Bucket: bucketName,
Key: key,
Body: fs.readFileSync(path),
};
s3.putObject(params, (err) => {
if (err) {
reject(err);
} else {
console.log(`uploaded ${params.Key} to ${params.Bucket}`);
resolve(path);
}
});
});
};
const uploadPromises = filePaths.map((path) =>
uploadToS3(directoryToUpload, path)
);
Promise.all(uploadPromises)
.then((result) => {
console.log('uploads complete');
console.log(result);
})
.catch((err) => console.error(err));
You might try the node-s3-client.
UPDATE: Available on npm here
From the sync a directory to s3 docs:
UPDATE: Added client inialization code.
var client = s3.createClient({
maxAsyncS3: 20, // this is the default
s3RetryCount: 3, // this is the default
s3RetryDelay: 1000, // this is the default
multipartUploadThreshold: 20971520, // this is the default (20 MB)
multipartUploadSize: 15728640, // this is the default (15 MB)
s3Options: {
accessKeyId: "YOUR ACCESS KEY",
secretAccessKey: "YOUR SECRET ACCESS KEY"
}
});
var params = {
localDir: "some/local/dir",
deleteRemoved: true, // default false, whether to remove s3 objects
// that have no corresponding local file.
s3Params: {
Bucket: "s3 bucket name",
Prefix: "some/remote/dir/",
// other options supported by putObject, except Body and ContentLength.
// See: http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#putObject-property
},
};
var uploader = client.uploadDir(params);
uploader.on('error', function(err) {
console.error("unable to sync:", err.stack);
});
uploader.on('progress', function() {
console.log("progress", uploader.progressAmount, uploader.progressTotal);
});
uploader.on('end', function() {
console.log("done uploading");
});
This works for me (you'll need to add walkSync package):
async function asyncForEach(array, callback) {
for (let index = 0; index < array.length; index++) {
await callback(array[index], index, array);
}
}
const syncS3Directory = async (s3Path, endpoint) => {
await asyncForEach(walkSync(s3Path, {directories: false}), async (file) => {
const filePath = Path.join(s3Path, file);
const fileContent = fs.readFileSync(filePath);
const params = {
Bucket: endpoint,
Key: file,
Body: fileContent,
ContentType: "text/html",
};
let s3Upload = await s3.upload(params).promise();
s3Upload ? undefined : Logger.error("Error synchronizing the bucket");
});
console.log("S3 bucket synchronized!");
};
const AWS = require("aws-sdk");
const fs = require("fs");
const path = require("path");
const async = require("async");
const readdir = require("recursive-readdir");
// AWS CRED
const ID = "<accessKeyId>";
const SECRET = "<secretAccessKey>";
const rootFolder = path.resolve(__dirname, "../");
const uploadFolder = "./sources";
// The name of the bucket that you have created
const BUCKET_NAME = "<Bucket_Name>";
const s3 = new AWS.S3({
accessKeyId: ID,
secretAccessKey: SECRET
});
function getFiles(dirPath) {
return fs.existsSync(dirPath) ? readdir(dirPath) : [];
}
async function uploadToS3(uploadPath) {
const filesToUpload = await getFiles(path.resolve(rootFolder, uploadPath));
console.log(filesToUpload);
return new Promise((resolve, reject) => {
async.eachOfLimit(
filesToUpload,
10,
async.asyncify(async file => {
const Key = file.replace(`${rootFolder}/`, "");
console.log(`uploading: [${Key}]`);
return new Promise((res, rej) => {
s3.upload(
{
Key,
Bucket: BUCKET_NAME,
Body: fs.readFileSync(file)
},
err => {
if (err) {
return rej(new Error(err));
}
res({ result: true });
}
);
});
}),
err => {
if (err) {
return reject(new Error(err));
}
resolve({ result: true });
}
);
});
}
uploadToS3(uploadFolder)
.then(() => {
console.log("upload complete!");
process.exit(0);
})
.catch(err => {
console.error(err.message);
process.exit(1);
});

Categories

Resources