Unable to succesfully upload images to s3 and then view them - javascript

I'm trying to upload images to a s3 bucket as part of the application.
index.js
function upImg(req) {
if(req.files.img) {
var img = req.files.image;
var name = Math.round(Math.random()*10000).toString(); // Returns a random 5 digit number
if(myDB.uploadImg(img, name)) {
return name;
} else {
return "";
}
} else {
return "";
}
}
app.post('/newEV*', isLoggedIn, function(req, res) {
var myURL = req.path.replace('/newEV', '');
var imgPath = upImg(req);
fetch(myURL).then(function (events){
var myID;
var x = 0;
while(!myID) {
if(!events[x]) {
myID = x;
} else {
x++;
}
}
myDB.newEvent(myURL, req.body.name, req.body.desc, req.body.loc, imgPath, req.body.link, req.body.cap, req.body.date, req.body.time, myID, events);
res.redirect('/edit' + myURL);
});
});
myDB file
function signs3(file, name) {
devs3();
const s3 = new aws.S3();
const s3Params = {
Body: file,
Bucket: S3_BUCKET,
Key: name
};
s3.putObject(s3Params, function(err, data) {
if(err) {
throw err;
} else {
console.log("Data from putObject:" + JSON.stringify(data));
}
});
}
module.exports = {
uploadImg : function(file, name) {
var nName = "imgs/" + name;
console.log(nName);
signs3(file, nName);
return true;
}
}
I know that the signs3 function works because I can use it in other bits of my application to upload JSON files. Whenever I post to the URL, weirdly enough I can see in the console the 'data from putObject', however what I can't see is the nName. I don't understand this, as the console.log(nName) line should be run before the other one. When I go to look at bucket, the image hasn't uploaded (despite me getting an ETag from the console), and the page does not display it as there (I know this also works because it can display images already uploaded to the bucket).

You want to do something like this, soliciting events from the Request object created when you call putObject.
const req = s3.putObject( s3Params )
req.on('success', res => {
console.log ('upload complete! );
});
req.on ('error', res => {
console.error (res.error');
});
req.send();
Why does this appear to work differently for small files (JSON files) and large files (images)? Because the large files take longer to upload.

Related

Upload byte array from axios to Node server

Background
Javascript library for Microsoft Office add-ins allows you to get raw content of the DOCX file through getFileAsync() api, which returns a slice of up to 4MB in one go. You keep calling the function using a sliding window approach till you have reed entire content. I need to upload these slices to the server and the join them back to recreate the original DOCX file.
My attempt
I'm using axios on the client-side and busboy-based express-chunked-file-upload middleware on my node server. As I call getFileAsync recursively, I get a raw array of bytes that I then convert to a Blob and append to FormData before posting it to the node server. The entire thing works and I get the slice on the server. However, the chunk that gets written to the disk on the server is much larger than the blob I uploaded, normally of the order of 3 times, so it is obviously not getting what I sent.
My suspicion is that this may have to do with stream encoding, but the node middleware does not expose any options to set encoding.
Here is the current state of code:
Client-side
public sendActiveDocument(uploadAs: string, sliceSize: number): Promise<boolean> {
return new Promise<boolean>((resolve) => {
Office.context.document.getFileAsync(Office.FileType.Compressed,
{ sliceSize: sliceSize },
async (result) => {
if (result.status == Office.AsyncResultStatus.Succeeded) {
// Get the File object from the result.
const myFile = result.value;
const state = {
file: myFile,
filename: uploadAs,
counter: 0,
sliceCount: myFile.sliceCount,
chunkSize: sliceSize
} as getFileState;
console.log("Getting file of " + myFile.size + " bytes");
const hash = makeId(12)
this.getSlice(state, hash).then(resolve(true))
} else {
resolve(false)
}
})
})
}
private async getSlice(state: getFileState, fileHash: string): Promise<boolean> {
const result = await this.getSliceAsyncPromise(state.file, state.counter)
if (result.status == Office.AsyncResultStatus.Succeeded) {
const data = result.value.data;
if (data) {
const formData = new FormData();
formData.append("file", new Blob([data]), state.filename);
const boundary = makeId(12);
const start = state.counter * state.chunkSize
const end = (state.counter + 1) * state.chunkSize
const total = state.file.size
return await Axios.post('/upload', formData, {
headers: {
"Content-Type": `multipart/form-data; boundary=${boundary}`,
"file-chunk-id": fileHash,
"file-chunk-size": state.chunkSize,
"Content-Range": 'bytes ' + start + '-' + end + '/' + total,
},
}).then(async res => {
if (res.status === 200) {
state.counter++;
if (state.counter < state.sliceCount) {
return await this.getSlice(state, fileHash);
}
else {
this.closeFile(state);
return true
}
}
else {
return false
}
}).catch(err => {
console.log(err)
this.closeFile(state)
return false
})
} else {
return false
}
}
else {
console.log(result.status);
return false
}
}
private getSliceAsyncPromise(file: Office.File, sliceNumber: number): Promise<Office.AsyncResult<Office.Slice>> {
return new Promise(function (resolve) {
file.getSliceAsync(sliceNumber, result => resolve(result))
})
}
Server-side
This code is totally from the npm package (link above), so I'm not supposed to change anything in here, but still for reference:
makeMiddleware = () => {
return (req, res, next) => {
const busboy = new Busboy({ headers: req.headers });
busboy.on('file', (fieldName, file, filename, _0, _1) => {
if (this.fileField !== fieldName) { // Current field is not handled.
return next();
}
const chunkSize = req.headers[this.chunkSizeHeader] || 500000; // Default: 500Kb.
const chunkId = req.headers[this.chunkIdHeader] || 'unique-file-id'; // If not specified, will reuse same chunk id.
// NOTE: Using the same chunk id for multiple file uploads in parallel will corrupt the result.
const contentRangeHeader = req.headers['content-range'];
let contentRange;
const errorMessage = util.format(
'Invalid Content-Range header: %s', contentRangeHeader
);
try {
contentRange = parse(contentRangeHeader);
} catch (err) {
return next(new Error(errorMessage));
}
if (!contentRange) {
return next(new Error(errorMessage));
}
const part = contentRange.start / chunkSize;
const partFilename = util.format('%i.part', part);
const tmpDir = util.format('/tmp/%s', chunkId);
this._makeSureDirExists(tmpDir);
const partPath = path.join(tmpDir, partFilename);
const writableStream = fs.createWriteStream(partPath);
file.pipe(writableStream);
file.on('end', () => {
req.filePart = part;
if (this._isLastPart(contentRange)) {
req.isLastPart = true;
this._buildOriginalFile(chunkId, chunkSize, contentRange, filename).then(() => {
next();
}).catch(_ => {
const errorMessage = 'Failed merging parts.';
next(new Error(errorMessage));
});
} else {
req.isLastPart = false;
next();
}
});
});
req.pipe(busboy);
};
}
Update
So it looks like I have found the problem at least. busboy appears to be writing my array of bytes as text in the output file. I get 80,75,3,4,20,0,6,0,8,0,0,0,33,0,44,25 (as text) when I upload the array of bytes [80,75,3,4,20,0,6,0,8,0,0,0,33,0,44,25]. Now need to figure out how to force it to write it as a binary stream.
Figured out. Just in case it helps anyone, there was no problem with busboy or office.js or axios. I just had to convert the incoming chunk of data to Uint8Array before creating a blob from it. So instead of:
formData.append("file", new Blob([data]), state.filename);
like this:
const blob = new Blob([ new Uint8Array(data) ])
formData.append("file", blob, state.filename);
And it worked like a charm.

How to read multiple json file using fs and bulk request

I'm using elasticsearch search engine with my react app, I was reading one file at the backend as you see in the code and it work perfectly, but now I want to read three different JSON files to three different indexes using the "fs" package and bulk request, can you please help me?
the code:
// Start reading the json file
fs.readFile("DocRes.json", { encoding: "utf-8" }, function (err, data) {
if (err) {
throw err;
}
// Build up a giant bulk request for elasticsearch.
bulk_request = data.split("\n").reduce(function (bulk_request, line) {
var obj, ncar;
try {
obj = JSON.parse(line);
} catch (e) {
console.log("Done reading 1");
return bulk_request;
}
// Rework the data slightly
ncar = {
id: obj.id,
name: obj.name,
summary: obj.summary,
image: obj.image,
approvetool: obj.approvetool,
num: obj.num,
date: obj.date,
};
bulk_request.push({
index: { _index: "ncar_index", _type: "ncar", _id: ncar.id },
});
bulk_request.push(ncar);
return bulk_request;
}, []);
// A little voodoo to simulate synchronous insert
var busy = false;
var callback = function (err, resp) {
if (err) {
console.log(err);
}
busy = false;
};
// Recursively whittle away at bulk_request, 1000 at a time.
var perhaps_insert = function () {
if (!busy) {
busy = true;
client.bulk(
{
body: bulk_request.slice(0, 1000),
},
callback
);
bulk_request = bulk_request.slice(1000);
console.log(bulk_request.length);
}
if (bulk_request.length > 0) {
setTimeout(perhaps_insert, 100);
} else {
console.log("Inserted all records.");
}
};
perhaps_insert();
});
You can create multiple promises for each file read and feed it to the elastic search bulk_request.
const fsPromises = require('fs').promises,
files = ['filename1', 'filename1'],
response = [];
const fetchFile = async (filename) => {
return new Promise((resolve, reject) => {
const path = path.join(__dirname, filename);
try {
const data = await fsPromises.readFile(path)); // make sure path is correct
resolve(data);
} catch (e) {
reject(e)
}
});
files.forEach((fileName) => results.push(fetchFile()));
Promise.all(results).then(data => console.log(data)).catch(e => console.log(e));
}
Once you get data from all the promises pass it to the elastic search.

multi file upload with skipper-better-s3 and sailjs returns the same key

As seen in the title, I am currently using sailjs + skipper-better-s3 for s3 upload. Started with uploading one file which works great, then because change request the need of multi-file upload at once so I added a for loop but by doing this, all keys will be the same and ended up the only one file is uploaded which is the last uploaded file but with the first upload filename.
I did read some articles and people are saying something like The problem is because for loop is synchronous and file upload is asynchronous and people saying the result of this is using a recursion which I tried too but no luck though, the same thing happens.
My recursive code is below...
s3_upload_multi: async (req, res) => {
const generatePath = (rootPath, fieldName) => {
let path;
// this is just a switch statement here to check which fieldName is provided then value of path will depend on it
// as for the other two variable is just checking if upload content type is correct
return { path };
};
const processUpload = async ({
fieldName,
awsOp,
fileExtension,
rootPath,
fileName,
}) => {
return new Promise(function (resolve, reject) {
req.file(fieldName).upload(awsOp, async (err, filesUploaded) => {
if (err) reject(err);
const filesUploadedF = filesUploaded[0]; // F = first file
const response = {
status: true,
errCode: 200,
msg: 'OK',
response: {
url: filesUploadedF.extra.Location,
size: filesUploadedF.size,
type: fileExtension,
filename: filesUploadedF.filename,
key: filesUploadedF.extra.Key,
field: fieldName,
}
};
resolve(response);
});
});
}
const process_recur = async (files, fieldName) => {
if (files.length <= 0) return;
const fileUpload = files[0].stream;
const rootPath = `${sails.config.aws.upload.path.root}`;
const fileCType = fileUpload.headers['content-type'];
// console.log(fileCType, 'fileCType');
const { path } = generatePath(rootPath, fieldName);
const fileName = fileUpload.filename;
const fileExtension = fileUpload.filename.split('.').pop();
const genRan = await UtilsService.genRan(8);
const fullPath = `${path}${genRan}-${fileName}`;
const awsOp = {
adapter: require('skipper-better-s3'),
key: sails.config.aws.access_key,
secret: sails.config.aws.secret_key,
saveAs: fullPath,
bucket: sails.config.aws.bucket,
s3params: {
ACL: 'public-read'
},
};
const config = {
fieldName,
awsOp,
fileExtension,
rootPath,
fileName,
}
const procceed = await processUpload(config);
files.shift();
await process_recur(files, fieldName);
};
try {
const fieldName = req._fileparser.upstreams[0].fieldName;
const files = req.file(fieldName)._files;
await process_recur(files, fieldName);
} catch (e) {
console.log(e, 'inside UploadService');
return false;
}
}
below is the code for me using for loop which is quite similiar from above though
s3_upload_multi: async (req, res) => {
const generatePath = (rootPath, fieldName) => {
let path;
// this is just a switch statement here to check which fieldName is provided then value of path will depend on it
// as for the other two variable is just checking if upload content type is correct
return { path };
};
const processUpload = async ({
fieldName,
awsOp,
fileExtension,
rootPath,
fileName,
}) => {
return new Promise(function (resolve, reject) {
req.file(fieldName).upload(awsOp, async (err, filesUploaded) => {
if (err) reject(err);
const filesUploadedF = filesUploaded[0]; // F = first file
const response = {
status: true,
errCode: 200,
msg: 'OK',
response: {
url: filesUploadedF.extra.Location,
size: filesUploadedF.size,
type: fileExtension,
filename: filesUploadedF.filename,
key: filesUploadedF.extra.Key,
field: fieldName,
}
};
resolve(response);
});
});
}
try {
const fieldName = req._fileparser.upstreams[0].fieldName;
const files = req.file(fieldName)._files;
for (const file of files) {
const fileUpload = file.stream;
const rootPath = `${sails.config.aws.upload.path.root}`;
const fileCType = fileUpload.headers['content-type'];
// console.log(fileCType, 'fileCType');
const fileName = fileUpload.filename;
const { path } = generatePath(rootPath, fieldName);
const fileExtension = fileUpload.filename.split('.').pop();
// using a variable here because if this is an image, a thumbnail will be created with the same name as the original one
const genRan = await UtilsService.genRan(8);
const fullPath = await `${path}${genRan}-${fileName}`;
const awsOp = {
adapter: require('skipper-better-s3'),
key: sails.config.aws.access_key,
secret: sails.config.aws.secret_key,
saveAs: fullPath,
bucket: sails.config.aws.bucket,
s3params: {
ACL: 'public-read'
},
};
const config = {
fieldName,
awsOp,
fileExtension,
rootPath,
fileName,
}
const procceed = await processUpload(config);
console.log(procceed, 'procceed');
}
} catch (e) {
console.log(e, 'inside UploadService');
return false;
}
}
Which part am I making mistake that's causing such behavior? I checked my path it's totally correct with correct filename too when I console.log
Thanks in advance for any suggestions and help.
Took me quite a lot of time to figure this out ages ago.
Especially you are using skipper-better-s3 which did not conclude as much detailed documentation as skipper, going back to look into skipper documentation actually the saveAs field doesn't only take string but also a function which you can then use that to get each file's filename and return it as needed so actually you do not even need to use neither resursive or for loop at all.
for example with some of your codes
const awsOp = {
adapter: require('skipper-better-s3'),
key: sails.config.aws.access_key,
secret: sails.config.aws.secret_key,
saveAs: (__newFileStream, next) => {
// generatePath is what you wrote
// __newFileStream.filename would the filename of each each before uploading
// the path is pretty much the s3 key which includes your filename too
const { path } = generatePath(rootPath, __newFileStream.filename, fieldName);
return next(undefined, path);
},
bucket: sails.config.aws.bucket,
s3params: {
ACL: 'public-read'
},
};
skipper documentation https://www.npmjs.com/package/skipper#customizing-at-rest-filenames-for-uploads

How to unzip file to disk and save path to database

I'm trying to create an app where a user can upload a zipped file, the app will unzip the file and save it to disk, and a path to the file will be saved to MongoDB for later retrieval.
I'm having a hard time getting the upload from form, unzipping, saving to disk, and uploading path of the unzipped file to the database all in one function. I'm really new to this and and am trying to learn about callbacks and such, I can't find any working solution for what I'm trying to do.
This is what my functions currently looks like:
// Multer is a form handling middleware
var storage = multer.diskStorage({
destination: function (req, file, cb) {
console.log(file)
cb(null, './uploads/unzip')
},
filename: function (req, file, cb) {
cb(null, file.fieldname + '-' + Date.now() + path.extname(file.originalname))
},
})
const upload = multer({ storage }).single('file'); //this is the 1st func in the route
const unzipp = async (req, res, next) => { //second func in route
try {
const dir = 'uploads/unzipped/';
var stream = fs.createReadStream(req.file.path)
stream.pipe(unzip.Extract({path: dir}))
.on('entry', function () {
var fileName = entry.path;
var type = entry.type;
var size = entry.size;
console.log(fileName, type, size)
if (type.isDirectory) {
postfile() //TRYING TO CALL POSTFILE() HERE
console.log('unzipped and path saved')
} else {
res.error('Failed unzipping')
}
fs.unlink(req.file.path, function (e) {
if (e) throw e;
console.log('successfully deleted '+req.file.path);
});
})
} catch (e) {
console.error(e)
}
next();
}
//Upload is a mongoDB cluster Schema
async function postfile () {
try{
let newUpload = new Upload(req.body); //new instance of uplaod based on the model based on req.body
newUpload.title = req.body.title;
newUpload.description = req.body.description;
newUpload.labels = req.body.labels;
newUpload.filePath = fileName; //ASSIGN FILEPATH IN DB SCHEMA TO UNZIPPED FILE PATH
console.log("filePath saved")
newUpload.save()
.then(newUpload => {
res.status(200).json({file: "File added successfully"})
})
.catch(err => {
res.status(400).send('File upload failed to save to DB :(')
})
} catch (e) {
console.error(e);
}
}
As you can see I'm trying to call the function to save the mongo schema in unzipp function. This is the post route in a separate folder:
router.post('/upload', FileCtrl.upload, FileCtrl.unzipp)
I've also tried saving the entry path of the unzipped file as a global var (fileName) and assigning the path in the Schema as fileName, but it doesn't work either:
const unzipp = async (req, res, next) => {
try {
const dir = 'uploads/unzipped/';
var stream = fs.createReadStream(req.file.path)
stream.pipe(unzip.Extract({path: dir}))
.on('entry', function () {
fileName = entry.path;
type = entry.type;
size = entry.size;
console.log(fileName, type, size)
// if (type.isDirectory) {
// console.log('unzipped and path saved')
// } else {
// res.error('Failed unzipping')
// }
result = {
file: fileName,
message:"File has been extracted"
};
//var file = req.file
fs.unlink(req.file.path, function (e) {
if (e) throw e;
console.log('successfully deleted '+req.file.path);
});
res.json(result);
})
} catch (e) {
console.error(e)
}
next();
}
const postfile = async (req, res) => {
try{
console.log("Posting to DB")
let newUpload = new Upload(req.body); //new instance of uplaod based on the model based on req.body
newUpload.title = req.body.title;
newUpload.description = req.body.description;
newUpload.labels = req.body.labels;
newUpload.filePath = fileName;
console.log("Ok so far")
newUpload.save()
.then(newUpload => {
res.status(200).json({file: "File added successfully"})
})
.catch(err => {
res.status(400).send('File upload failed to save to DB :(')
})
} catch (e) {
console.error(e);
}
}
this gives the error " ReferenceError: fileName is not defined "
the new route looks like this:
router.post('/upload', FileCtrl.upload, FileCtrl.unzipp, FileCtrl.postfile)
I've been trying to solve this for a really long time and would really appreciate some advice.
EDIT:
For testing purposes I hardcoded the filepath and it saved to the DB perfectly...
const postfile = async (req, res) => {
try{
console.log("Posting to DB")
//var stream = fs.readdirSync('./uploads/unzipped/Nancy_Collins_118226967_v2')
let newUpload = new Upload(req.body); //new instance of uplaod based on the model based on req.body
newUpload.title = req.body.title;
newUpload.description = req.body.description;
newUpload.labels = req.body.labels;
newUpload.filePath = './uploads/unzipped/Nancy_Collins_118226967_v2';
console.log("Ok so far")
newUpload.save()
.then(newUpload => {
res.status(200).json({file: "File added successfully"})
})
.catch(err => {
res.status(400).send('File upload failed to save to DB :(')
})
} catch (e) {
console.error(e);
}
}
Obviously this isn't practical or dynamic, but it's possible.

How do I save image locally in electron app

I am building a electron app with vue-cli-electron-builder.
User submits form with name, email and image(photo) which is stored in local mysql database and later on it will be synchronize to cloud.
Only image name will be stored in the database, the problem is
where and how do I save the actual image locally ?
how do i synchronize image to cloud ?
In development mode it was stored inside the project but production bundled app doesn't work like that.
I tried multer.diskStorage to save image to './server/uploads'. It works in development mode but in production it doesn't work.
You can store uploaded in userAppData folder. You should try like as below
function uploadFile() {
dialog.showOpenDialog({
properties: ['openFile','multiSelections'],
filters: [{
name: 'Images',
extensions: ['jpg', 'png', 'gif']
}]
},
uploadF, //define callback
)
}
function uploadF(filePaths) {
if (filePaths!=undefined) {
//multiple image upload
for (let i = 0; i < filePaths.length; i++) {
let fileName = path.basename(filePaths[i])
fileName = moment().unix()+'-'+fileName //rename file
let fileUploadPath = app.getPath('userData')+ '' + fileName;
move(filePaths[i],fileUploadPath,cb)
}
}
}
function cb(e) {
console.log("error in upload file",e);
}
function move(oldPath, newPath, callback) {
fs.rename(oldPath, newPath, function (err) {
if (err) {
if (err.code === 'EXDEV') {
copy();
} else {
console.log("err",err);
callback(err);
}
return;
}
callback();
});
function copy() {
var readStream = fs.createReadStream(oldPath);
var writeStream = fs.createWriteStream(newPath);
readStream.on('error', callback);
writeStream.on('error', callback);
readStream.on('close', function () {
let fileName=path.basename(newPath)
console.log(fileName," file uploaded")
//remove path from destination
//fs.unlink(oldPath, callback);
// do your stuff
});
readStream.pipe(writeStream);
}
}

Categories

Resources