I'm trying to look for files in a specific folder on Drive using the API. The reason herefor is that my files can have copies with the same name in multiple folders. This is a sniplet of what I wrote to check if the files exist but the resulting query is Undifined. Anyone got an idea?
function checkFile(filename, auth, folderId){
var service = google.drive('v3');
console.log("folderId: " + folderId);
var objectExists = false;
var fetchPage = function(pageToken, pageFn, callback) {
service.files.list({auth: auth,
resource: { parents: [ folderId ] },
q: "mimeType='application/pdf' and trashed=false",
fields: 'nextPageToken, files(id, name)',
spaces: 'drive',
pageToken: pageToken,
}, function(err, res) {
if(err) {
callback(err);
} else {
console.log(res)
if(res.files != undefined){
res.files.forEach(function(file) {
if(file.name == filename)
{
console.log('Found file: ', file.name, file.id);
objectExists = true;
}
});
if (res.nextPageToken) {
pageFn(res.nextPageToken, pageFn, callback);
} else {
callback();
}
}
else {
callback();
}
}
});
};
fetchPage(null, fetchPage, function(err) {
if (err) {
// Handle error
console.log(err);
} else {
if(!objectExists)
createFile(filename, auth, folderId)
}
});
}
Solved it by adding more data in fields:
fields: 'nextPageToken, files(id, name),files/parents',
Followed by adding an extra check-value:
if(file.name == filename && file.parents[0] == folderId)
Hi Akorna I found your question relating to my search would you mind If I ask for a beginner level code for searching on GOOGLE drive API in Nodejs
I have a skeleton code ready and have configured the drive for it through service account.
const { google } = require('googleapis') ;
const readLine = require('readline');
const path = require('path');
const fs = require('fs');
const dotenv = require('dotenv');
const { file } = require('googleapis/build/src/apis/file');
dotenv.config();
const KEYFILEPATH = path.join(__dirname, '/credentials.json');
const SCOPES = ['https://www.googleapis.com/auth/drive', 'profile'];
const auth = new google.auth.GoogleAuth({
keyFile: KEYFILEPATH,
scopes: SCOPES
});
const drive = google.drive({version: 'v3', auth});
Related
I am writing a lambda function to add hosts to a SQS queue for a rolling restart. The code I have written works individually, but not together. Even when I hard code values in the constructor. This doesn't appear to be a memory/CPU. I tried running the function with 1GB of memory, even though it only uses about 80MB. The average execution time for the individual functions is about 0.5 seconds (shouldn't take more than about 1.5 seconds to execute in total). I did trying running this function with a 30 second timeout, but it still timed out.
I work behind a corporate proxy, and have to hand jam the code. I don't have an IDE or intellisense on my internet facing network. There may be typos here, but not in the actual code. I have omitted my module imports and variable declarations to save time. It isn't relevant to the issue at hand.
EDIT: I added the module imports and variable declarations to the first example to hopefully alleviate some confusion.
Here are just a few things I have tried. This does not work (timing out):
// Custom lambda layer
const { marklogic, aws } = require('nodejs-layer-lib');
const { HOSTS, DOMAIN, PORT, USERNAME, PASSWORD, RESTART_QUEUE_NAME } = process.env;
const params = [
'format=json'
];
const options = {
port: PORT,
params: params,
httpOptions: {
headers: {
'Authorization': `Basic ${Buffer.from(`${USERNAME}:${PASSWORD}`).toString('base64')}`
},
method: 'GET'
}
};
const taskServers = (HOSTS.split(',') || []).map(host => {
const _host = host.split(':');
return {
id: _host[0],
name: `http://${_host[1].toLowerCase()}.${DOMAIN}`
};
});
exports.handler = async () => {
let hosts, queueUrl, addToQueueResults;
try {
hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
} catch (e) { console.error('hosts', e); }
try {
queueUrl = await aws.sqs.getQueueUrlByName(RESTART_QUEUE_NAME);
} catch (e) { console.error('queueUrl ', e); }
try {
addToQueueResults = await aws.sqs.addMessages(queueURL, hosts);
} catch (e) { console.error('addToQueueResults ', e); }
return {
status: 200,
body: addToQueueResults
};
}
This does not work (timing out):
// Modules imports and variable declarations here...
exports.handler = async () => {
const hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
const queueUrl = await aws.sqs.getQueueUrlByName(RESTART_QUEUE_NAME);
const addToQueueResults = await aws.sqs.addMessages(queueURL, hosts);
return {
status: 200,
body: addToQueueResults
};
}
This does not work (timing out):
// Modules imports and variable declarations here...
exports.handler = async () => {
const hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
const queueUrl = await aws.sqs.getQueueUrlByName('my-queue-name');
const addToQueueResults = await aws.sqs.addMessages('http://queueurl.com', ['anything', 'in', 'here']); // Doesn't even need the queueUrl or hosts anymore
return {
status: 200,
body: addToQueueResults
};
}
This works. It will return the host objects I am expecting in the response:
// Modules imports and variable declarations here...
exports.handler = async () => {
const hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
return {
status: 200,
body: hosts
};
}
This works. It will get the queue url, then add messages to my SQS queue and return the SQS response:
// Modules imports and variable declarations here...
exports.handler = async () => {
const queueUrl = await aws.sqs.getQueueUrlByName(RESTART_QUEUE_NAME);
const addToQueueResults = await aws.sqs.addMessages(queueUrl , ['anything', 'in', 'here']);
return {
status: 200,
body: addToQueueResults
};
}
I tried implementing the Async handler in AWS Lambda function handler in Node.js and reviewed many AWS Lambda execution troubleshooting documents. The marklogic management API runs on port 8002 by default and I think the aws-sdk module uses http/https (80/443), so I don't think the ports are getting tied up.
What am I missing here?
EDIT 2: This has something to do with how promises are handled with AWS Lambda. I cannot find much information about this. Even following the instructions in AWS Lambda function handler in Node.js for "Async Handlers" I cannot get this to work. It works perfectly fine locally with or without my custom lambda layer.
Node.js runtime: 12.x (I didn't mention this before)
This also doesn't work (timing out):
// Modules imports and variable declarations here...
exports.handler = async function (event) {
const promise = function () {
return new Promise(async function (resolve, reject) {
try {
const hosts = await marklogic.hosts.getHosts(taskServers, options) || [];
const queueUrl = await aws.sqs.getQueueUrlByName(RESTART_QUEUE_NAME);
const addToQueueResults = await aws.sqs.addMessages(queueUrl, hosts);
resolve({
status: 200,
body: addToQueueResults
});
} catch (error) {
reject({
status: 500,
error: error
});
}
});
};
return promise(); // Throws error without constructor despite the AWS doc example
}
Unless someone AWS Lambda genius has ran into a similar issue before with Node.js, I am just going to convert it into 2 lambda functions and use Step Functions to process them.
There was a typo in queueUrl (I imagine not that, but worth a try!)
Please run:
// Custom lambda layer
const { marklogic, aws } = require('nodejs-layer-lib');
const { HOSTS, DOMAIN, PORT, USERNAME, PASSWORD, RESTART_QUEUE_NAME } = process.env;
const params = [
'format=json'
];
const options = {
port: PORT,
params,
httpOptions: {
headers: {
Authorization: `Basic ${Buffer.from(`${USERNAME}:${PASSWORD}`).toString('base64')}`
},
method: 'GET'
}
};
const taskServers = (HOSTS.split(',') || []).map(host => {
const _host = host.split(':');
return {
id: _host[0],
name: `http://${_host[1].toLowerCase()}.${DOMAIN}`
};
});
exports.handler = async () => {
let hosts, queueUrl, addToQueueResults;
try {
hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
} catch (e) { console.error('hosts', e); }
try {
queueUrl = await aws.sqs.getQueueUrlByName(RESTART_QUEUE_NAME);
} catch (e) { console.error('queueUrl ', e); }
try {
addToQueueResults = await aws.sqs.addMessages(queueUrl, hosts);
} catch (e) { console.error('addToQueueResults ', e); }
return {
status: 200,
body: JSON.stringify(addToQueueResults)
};
};
// keeping the same format.. ^^
If no luck - what is on my mind, is as aws-sdk is included out of the box in lambda.. it's not customary to require it extraneously via a layer and although it may not look to be imported at top level by marklogic, it may be bundled deep within marklogic, then when you import AWS and change config (in the layer) it overwrites it
Let's find out..:
Step 1:
So, this you say should work.. if we ignore the AWS import, and just import marklogic?
// Custom lambda layer
// const { marklogic, aws } = require('nodejs-layer-lib'); // ignoring AWS for now
const { marklogic } = require('nodejs-layer-lib');
const { HOSTS, DOMAIN, PORT, USERNAME, PASSWORD, RESTART_QUEUE_NAME } = process.env;
const params = [
'format=json'
];
const options = {
port: PORT,
params,
httpOptions: {
headers: {
Authorization: `Basic ${Buffer.from(`${USERNAME}:${PASSWORD}`).toString('base64')}`
},
method: 'GET'
}
};
const taskServers = (HOSTS.split(',') || []).map(host => {
const _host = host.split(':');
return {
id: _host[0],
name: `http://${_host[1].toLowerCase()}.${DOMAIN}`
};
});
exports.handler = async () => {
// let hosts, queueUrl, addToQueueResults;
let hosts;
try {
hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
console.log('hosts => ', hosts);
// queueUrl = await aws.sqs.getQueueUrlByName(RESTART_QUEUE_NAME);
// addToQueueResults = await aws.sqs.addMessages(queueUrl, hosts);
return {
status: 200,
body: JSON.stringify(hosts)
};
} catch (error) {
console.log('error => ', error);
throw error;
}
};
Ok, so if that works..:
Step 2 (Please set the region for SQS and also hard code in the queueUrl):
// Custom lambda layer
// const { marklogic, aws } = require('nodejs-layer-lib');
const { marklogic } = require('nodejs-layer-lib');
const AWS = require('aws-sdk');
AWS.config.update({ region: 'eu-west-1' }); // Please set region accordingly
const sqs = new AWS.SQS({ apiVersion: '2012-11-05' });
const { HOSTS, DOMAIN, PORT, USERNAME, PASSWORD, RESTART_QUEUE_NAME } = process.env;
const params = [
'format=json'
];
const options = {
port: PORT,
params,
httpOptions: {
headers: {
Authorization: `Basic ${Buffer.from(`${USERNAME}:${PASSWORD}`).toString('base64')}`
},
method: 'GET'
}
};
const taskServers = (HOSTS.split(',') || []).map(host => {
const _host = host.split(':');
return {
id: _host[0],
name: `http://${_host[1].toLowerCase()}.${DOMAIN}`
};
});
exports.handler = async () => {
let hosts, addToQueueResults;
try {
hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
console.log('hosts => ', hosts);
const queueUrl = 'Please hard code the queueUrl for now';
const sqsParams = {
MessageBody: hosts,
QueueUrl: queueUrl
};
addToQueueResults = await sqs.sendMessage(sqsParams).promise();
console.log('addToQueueResults => ', addToQueueResults);
return {
status: 200,
body: JSON.stringify(addToQueueResults)
};
} catch (error) {
console.log('error => ', error);
throw error;
}
};
IF.. that doesn't work.. then Step 3.. move the require of marklogic to below the require of AWS and setting the region in this last example.. (so any deeply nested marklogic AWS logic we're unaware of now overwrites your AWS require..) re-run it.. fingers crossed :-)
I created a custom middleware that makes the api & admin unreachable when I enable it.
The middleware is pretty simple, it adds a request id to incoming request on the server:
const { createNamespace } = require('cls-hooked');
const { v4: uuidv4 } = require('uuid');
const loggerNamespace = createNamespace('logger');
module.exports = (strapi) => {
return {
initialize() {
strapi.app.use((ctx, next) => {
const reqId = ctx.request.get('X-Request-Id') || uuidv4();
ctx.response.set('X-Request-Id', reqId);
loggerNamespace.run(() => {
loggerNamespace.set('requestId', reqId);
next();
});
});
},
};
};
It's enable using the config file ./config/middleware.json:
module.exports = {
settings: {
addRequestId: {
enabled: true,
},
},
};
Then, when enabled, calling an API endpoint or trying to connect to admin results in 404 Not Found.
I use strapi 3.6.8 with node 14.18.1.
Any idea why?
PS: I suspected cls-hooked to be the culprit but removing it for testing with an anemic middleware doesn't work either.
#Salvino put me on the right track suggesting that I should await the execution of next.
Digging in the code of cls-hooked, I found the method runPromise that is similar to run and returns a Promise that can be awaited. It solved the issue.
Fixed code:
const { createNamespace } = require('cls-hooked');
const { v4: uuidv4 } = require('uuid');
const loggerNamespace = createNamespace('logger');
module.exports = (strapi) => {
return {
initialize() {
strapi.app.use(async (ctx, next) => {
const reqId = ctx.request.get('X-Request-Id') || uuidv4();
ctx.response.set('X-Request-Id', reqId);
await loggerNamespace.runPromise(async () => {
loggerNamespace.set('requestId', reqId);
await next();
});
});
},
};
};
I have come across the below code on the internet.
const { Octokit } = require("#octokit/rest");
const { Base64 } = require("js-base64");
const fs = require("fs");
const { Octokit } = require("#octokit/rest");
const { Base64 } = require("js-base64");
const fs = require("fs");
require("dotenv").config();
const octokit = new Octokit({
auth: process.env.GITHUB_ACCESS_TOKEN,
});
const main = async () => {
try {
const content = fs.readFileSync("./input.txt", "utf-8");
const contentEncoded = Base64.encode(content);
const { data } = await octokit.repos.createOrUpdateFileContents({
// replace the owner and email with your own details
owner: "your-github-account",
repo: "octokit-create-file-example",
path: "OUTPUT.md",
message: "feat: Added OUTPUT.md programatically",
content: contentEncoded,
committer: {
name: `Octokit Bot`,
email: "your-email",
},
author: {
name: "Octokit Bot",
email: "your-email",
},
});
console.log(data);
} catch (err) {
console.error(err);
}
};
main();
I was successfully able to create a file in GitHub. Is there a way to update an existing file (like adding some data to the file) with Octokit js?
I believe you do the same thing but you have to provide a sha property in your call to createOrUpdateFileContents which contains the sha of the blob you are replacing:
https://docs.github.com/en/rest/reference/repos#create-or-update-file-contents
I am fairly new to node.js and i am wondering how to (or even if) i can read and write to a JSON file. I am trying to create an accessible punishment history.
Ideally i would want to be able to create something along the lines of this:
{
"punishments": {
"users": {
"<example user who has a punishment history>": {
"punishment-1567346": {
"punishment-id": "1567346",
"punishment-type": "mute",
"punishment-reason": "<reason>"
},
"punishment-1567347": {
"punishment-id": "1567347",
"punishment-type": "ban",
"punishment-reason": "<reason>"
}
}
}
}
}
Then i would have a way to access the formatted punishment history. I genuinely have no clue where to start.
You can use a NodeJS built-in library called fs to do read/write operations.
Step #1 - Import fs
const fs = require('fs');
Step #2 - Read the file
let rawdata = fs.readFileSync('punishmenthistory.json');
let punishments= JSON.parse(rawdata);
console.log(punishments);
Now you can use the punishments variable to check the data inside the JSON File. Also, you can change the data but it only resides inside the variable for now.
Step #3 - Write to the File
let data = JSON.stringify(punishments);
fs.writeFileSync('punishmenthistory.json', data);
Full code:
const fs = require('fs');
let rawdata = fs.readFileSync('punishmenthistory.json');
let punishments= JSON.parse(rawdata);
console.log(punishments);
let data = JSON.stringify(punishments);
fs.writeFileSync('punishmenthistory.json', data);
References:
https://stackabuse.com/reading-and-writing-json-files-with-node-js/
Use NodeJS File System https://nodejs.org/dist/latest-v14.x/docs/api/fs.html.
Here I have used writeFileSync API to write to file and readFileSync to read from file. Also, when writing don't forget to JSON.stringify(data) because you are writing the data to a JSON file.
const fs = require("fs");
const path = require("path");
// Write Data
const data = {
"punishments": {
"users": {
"<example user who has a punishment history>": {
"punishment-1567346": {
"punishment-id": "1567346",
"punishment-type": "mute",
"punishment-reason": "<reason>"
},
"punishment-1567347": {
"punishment-id": "1567347",
"punishment-type": "ban",
"punishment-reason": "<reason>"
}
}
}
}
};
fs.writeFileSync(path.join(__dirname, "outputfilepath", "outputfile.json"), JSON.stringify(data), "utf8");
// Read data
const rData = fs.readFileSync(path.join(__dirname, "outputfilepath", "outputfile.json"), "utf8");
const jsonData = JSON.parse(rData);
Here is the working example,
https://repl.it/repls/OutrageousInbornBruteforceprogramming#index.js
you can do something like this for reading:
const fs = require('fs')
function jsonReader(filePath, cb) {
fs.readFile(filePath, (err, fileData) => {
if (err) {
return cb && cb(err)
}
try {
const object = JSON.parse(fileData)
return cb && cb(null, object)
} catch(err) {
return cb && cb(err)
}
})
}
jsonReader('./customer.json', (err, customer) => {
if (err) {
console.log(err)
return
}
console.log(customer.address) // => "Infinity Loop Drive"
})
and like this for writing:
const fs = require('fs')
const customer = {
name: "Newbie Co.",
order_count: 0,
address: "Po Box City",
}
const jsonString = JSON.stringify(customer)
fs.writeFile('./newCustomer.json', jsonString, err => {
if (err) {
console.log('Error writing file', err)
} else {
console.log('Successfully wrote file')
}
})
I currently upload single objects to S3 using like so:
var options = {
Bucket: bucket,
Key: s3Path,
Body: body,
ACL: s3FilePermissions
};
S3.putObject(options,
function (err, data) {
//console.log(data);
});
But when I have a large resources folder for example, I use the AWS CLI tool.
I was wondering, is there a native way to do the same thing with the aws sdk (upload entire folders to s3)?
Old-school recursive way I whipped up in a hurry. Only uses core node modules and standard AWS sdk.
var AWS = require('aws-sdk');
var path = require("path");
var fs = require('fs');
const uploadDir = function(s3Path, bucketName) {
let s3 = new AWS.S3();
function walkSync(currentDirPath, callback) {
fs.readdirSync(currentDirPath).forEach(function (name) {
var filePath = path.join(currentDirPath, name);
var stat = fs.statSync(filePath);
if (stat.isFile()) {
callback(filePath, stat);
} else if (stat.isDirectory()) {
walkSync(filePath, callback);
}
});
}
walkSync(s3Path, function(filePath, stat) {
let bucketPath = filePath.substring(s3Path.length+1);
let params = {Bucket: bucketName, Key: bucketPath, Body: fs.readFileSync(filePath) };
s3.putObject(params, function(err, data) {
if (err) {
console.log(err)
} else {
console.log('Successfully uploaded '+ bucketPath +' to ' + bucketName);
}
});
});
};
uploadDir("path to your folder", "your bucket name");
Special thanks to Ali from this post with helping get the filenames
async/await + Typescript
If you need a solution that uses modern JavaScript syntax and is compatible with TypeScript, I came up with the following code. The recursive getFiles is borrowed from this answer (After all that years, recursion still gives me headache, lol).
import { promises as fs, createReadStream } from 'fs';
import * as path from 'path';
import { S3 } from 'aws-sdk';
async function uploadDir(s3Path: string, bucketName: string) {
const s3 = new S3();
// Recursive getFiles from
// https://stackoverflow.com/a/45130990/831465
async function getFiles(dir: string): Promise<string | string[]> {
const dirents = await fs.readdir(dir, { withFileTypes: true });
const files = await Promise.all(
dirents.map((dirent) => {
const res = path.resolve(dir, dirent.name);
return dirent.isDirectory() ? getFiles(res) : res;
})
);
return Array.prototype.concat(...files);
}
const files = (await getFiles(s3Path)) as string[];
const uploads = files.map((filePath) =>
s3
.putObject({
Key: path.relative(s3Path, filePath),
Bucket: bucketName,
Body: createReadStream(filePath),
})
.promise()
);
return Promise.all(uploads);
}
await uploadDir(path.resolve('./my-path'), 'bucketname');
here is a cleaned up/debugged/working version of #Jim's solution
function uploadArtifactsToS3() {
const artifactFolder = `logs/${config.log}/test-results`;
const testResultsPath = './test-results';
const walkSync = (currentDirPath, callback) => {
fs.readdirSync(currentDirPath).forEach((name) => {
const filePath = path.join(currentDirPath, name);
const stat = fs.statSync(filePath);
if (stat.isFile()) {
callback(filePath, stat);
} else if (stat.isDirectory()) {
walkSync(filePath, callback);
}
});
};
walkSync(testResultsPath, async (filePath) => {
let bucketPath = filePath.substring(testResultsPath.length - 1);
let params = {
Bucket: process.env.SOURCE_BUCKET,
Key: `${artifactFolder}/${bucketPath}`,
Body: fs.readFileSync(filePath)
};
try {
await s3.putObject(params).promise();
console.log(`Successfully uploaded ${bucketPath} to s3 bucket`);
} catch (error) {
console.error(`error in uploading ${bucketPath} to s3 bucket`);
throw new Error(`error in uploading ${bucketPath} to s3 bucket`);
}
});
}
I was just contemplating this problem the other day, and was thinking something like this:
...
var async = require('async'),
fs = require('fs'),
path = require("path");
var directoryName = './test',
directoryPath = path.resolve(directoryName);
var files = fs.readdirSync(directoryPath);
async.map(files, function (f, cb) {
var filePath = path.join(directoryPath, f);
var options = {
Bucket: bucket,
Key: s3Path,
Body: fs.readFileSync(filePath),
ACL: s3FilePermissions
};
S3.putObject(options, cb);
}, function (err, results) {
if (err) console.error(err);
console.log(results);
});
Here's a version that contains a Promise on the upload method. This version allows you to perform an action when all uploads are complete Promise.all().then...
const path = require('path');
const fs = require('fs');
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
const directoryToUpload = 'directory-name-here';
const bucketName = 'name-of-s3-bucket-here';
// get file paths
const filePaths = [];
const getFilePaths = (dir) => {
fs.readdirSync(dir).forEach(function (name) {
const filePath = path.join(dir, name);
const stat = fs.statSync(filePath);
if (stat.isFile()) {
filePaths.push(filePath);
} else if (stat.isDirectory()) {
getFilePaths(filePath);
}
});
};
getFilePaths(directoryToUpload);
// upload to S3
const uploadToS3 = (dir, path) => {
return new Promise((resolve, reject) => {
const key = path.split(`${dir}/`)[1];
const params = {
Bucket: bucketName,
Key: key,
Body: fs.readFileSync(path),
};
s3.putObject(params, (err) => {
if (err) {
reject(err);
} else {
console.log(`uploaded ${params.Key} to ${params.Bucket}`);
resolve(path);
}
});
});
};
const uploadPromises = filePaths.map((path) =>
uploadToS3(directoryToUpload, path)
);
Promise.all(uploadPromises)
.then((result) => {
console.log('uploads complete');
console.log(result);
})
.catch((err) => console.error(err));
You might try the node-s3-client.
UPDATE: Available on npm here
From the sync a directory to s3 docs:
UPDATE: Added client inialization code.
var client = s3.createClient({
maxAsyncS3: 20, // this is the default
s3RetryCount: 3, // this is the default
s3RetryDelay: 1000, // this is the default
multipartUploadThreshold: 20971520, // this is the default (20 MB)
multipartUploadSize: 15728640, // this is the default (15 MB)
s3Options: {
accessKeyId: "YOUR ACCESS KEY",
secretAccessKey: "YOUR SECRET ACCESS KEY"
}
});
var params = {
localDir: "some/local/dir",
deleteRemoved: true, // default false, whether to remove s3 objects
// that have no corresponding local file.
s3Params: {
Bucket: "s3 bucket name",
Prefix: "some/remote/dir/",
// other options supported by putObject, except Body and ContentLength.
// See: http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#putObject-property
},
};
var uploader = client.uploadDir(params);
uploader.on('error', function(err) {
console.error("unable to sync:", err.stack);
});
uploader.on('progress', function() {
console.log("progress", uploader.progressAmount, uploader.progressTotal);
});
uploader.on('end', function() {
console.log("done uploading");
});
This works for me (you'll need to add walkSync package):
async function asyncForEach(array, callback) {
for (let index = 0; index < array.length; index++) {
await callback(array[index], index, array);
}
}
const syncS3Directory = async (s3Path, endpoint) => {
await asyncForEach(walkSync(s3Path, {directories: false}), async (file) => {
const filePath = Path.join(s3Path, file);
const fileContent = fs.readFileSync(filePath);
const params = {
Bucket: endpoint,
Key: file,
Body: fileContent,
ContentType: "text/html",
};
let s3Upload = await s3.upload(params).promise();
s3Upload ? undefined : Logger.error("Error synchronizing the bucket");
});
console.log("S3 bucket synchronized!");
};
const AWS = require("aws-sdk");
const fs = require("fs");
const path = require("path");
const async = require("async");
const readdir = require("recursive-readdir");
// AWS CRED
const ID = "<accessKeyId>";
const SECRET = "<secretAccessKey>";
const rootFolder = path.resolve(__dirname, "../");
const uploadFolder = "./sources";
// The name of the bucket that you have created
const BUCKET_NAME = "<Bucket_Name>";
const s3 = new AWS.S3({
accessKeyId: ID,
secretAccessKey: SECRET
});
function getFiles(dirPath) {
return fs.existsSync(dirPath) ? readdir(dirPath) : [];
}
async function uploadToS3(uploadPath) {
const filesToUpload = await getFiles(path.resolve(rootFolder, uploadPath));
console.log(filesToUpload);
return new Promise((resolve, reject) => {
async.eachOfLimit(
filesToUpload,
10,
async.asyncify(async file => {
const Key = file.replace(`${rootFolder}/`, "");
console.log(`uploading: [${Key}]`);
return new Promise((res, rej) => {
s3.upload(
{
Key,
Bucket: BUCKET_NAME,
Body: fs.readFileSync(file)
},
err => {
if (err) {
return rej(new Error(err));
}
res({ result: true });
}
);
});
}),
err => {
if (err) {
return reject(new Error(err));
}
resolve({ result: true });
}
);
});
}
uploadToS3(uploadFolder)
.then(() => {
console.log("upload complete!");
process.exit(0);
})
.catch(err => {
console.error(err.message);
process.exit(1);
});