How do I save image locally in electron app - javascript

I am building a electron app with vue-cli-electron-builder.
User submits form with name, email and image(photo) which is stored in local mysql database and later on it will be synchronize to cloud.
Only image name will be stored in the database, the problem is
where and how do I save the actual image locally ?
how do i synchronize image to cloud ?
In development mode it was stored inside the project but production bundled app doesn't work like that.
I tried multer.diskStorage to save image to './server/uploads'. It works in development mode but in production it doesn't work.

You can store uploaded in userAppData folder. You should try like as below
function uploadFile() {
dialog.showOpenDialog({
properties: ['openFile','multiSelections'],
filters: [{
name: 'Images',
extensions: ['jpg', 'png', 'gif']
}]
},
uploadF, //define callback
)
}
function uploadF(filePaths) {
if (filePaths!=undefined) {
//multiple image upload
for (let i = 0; i < filePaths.length; i++) {
let fileName = path.basename(filePaths[i])
fileName = moment().unix()+'-'+fileName //rename file
let fileUploadPath = app.getPath('userData')+ '' + fileName;
move(filePaths[i],fileUploadPath,cb)
}
}
}
function cb(e) {
console.log("error in upload file",e);
}
function move(oldPath, newPath, callback) {
fs.rename(oldPath, newPath, function (err) {
if (err) {
if (err.code === 'EXDEV') {
copy();
} else {
console.log("err",err);
callback(err);
}
return;
}
callback();
});
function copy() {
var readStream = fs.createReadStream(oldPath);
var writeStream = fs.createWriteStream(newPath);
readStream.on('error', callback);
writeStream.on('error', callback);
readStream.on('close', function () {
let fileName=path.basename(newPath)
console.log(fileName," file uploaded")
//remove path from destination
//fs.unlink(oldPath, callback);
// do your stuff
});
readStream.pipe(writeStream);
}
}

Related

get list of file names from aws s3 using node js wildcard

I have written a code to fetch a file name from s3 that only gives a single file,
for example I have a file's names as 01-data-dict-2021-04-01.gz, 02-data-dict-2021-04-01.gz and so on
but my code print only 01-data-dict-2021-04-01.gz
This my code
let items = [];
const listS3Objects = (marker) => {
s3.listObjects({
Bucket: 'JobPoy',
Prefix: 'datacenter/01-data-dict-2021-04-01.gz',
Marker: marker
}, (err, s3Items) => {
if (err) {
console.log(err);
process.exit();
}
s3Items.Contents.forEach(i => items.push(i.Key));
if (s3Items.Contents.length === 1000) {
console.log(`Fetching more files ${s3Items.Contents[999].Key}`);
listS3Objects(s3Items.Contents[999].Key)
} else {
console.log("====>",items)
}
});
};
the output of items is [01-data-dict-2021-04-01.gz]
but I want to get [01-data-dict-2021-04-01.gz, 02-data-dict-2021-04-01.gz and so on........]

nodeJS await to move files then compress images

I wrote this code that checks image files sizes in a folder, if the file are bigger than 30000 bytes then moves to a temporary folder called 'before-compress'. The compressImages() function iterates over the 'before-compress' folder and returns the compressed images to the original folder. My question is: How can i await the process of move the exceeded size files and then call the compressImage() function?, as you can see in the code i handle this with a setTimeout once the forEach reaches the last item. Thanks in advance.
const fs = require('fs');
const path = require('path');
const imagemin = require("imagemin");
const imageminMozjpeg = require("imagemin-mozjpeg");
const imageminPngquant = require("imagemin-pngquant");
const imageminGifsicle = require('imagemin-gifsicle');
const directoryPath = path.join(__dirname, 'uploads');
fs.readdir(`${directoryPath}/products`, function (err, files) {
if (err) {
return console.log('Unable to scan directory: ' + err);
}
files.forEach(function (file) {
console.log(`File: ${file} - Size: ${getFilesizeInBytes(file)} bytes`);
if(getFilesizeInBytes(file) > 30000){
moveFile(file)
}
if(files.indexOf(file) == files.length - 1){
//console.log('last index');
setTimeout(() => compressImages(), 4000);
}
});
});
function getFilesizeInBytes(fileName) {
var stats = fs.statSync(`${directoryPath}/products/${fileName}`);
var fileSizeInBytes = stats.size;
return fileSizeInBytes;
}
function moveFile(file){
var oldPath = `${directoryPath}/products/${file}`;
var newPath = `${directoryPath}/before-compress/${file}`;
fs.rename(oldPath, newPath, function (err) {
if (err) throw err;
console.log(`File ${file} moved!`);
})
}
function compressImages(){
fs.readdir(`${directoryPath}/before-compress`, function (err, files) {
if (err) {
return console.log('Unable to scan directory: ' + err);
}
files.forEach(function (file) {
console.log(`File to compress: ${file}`);
let fileExt = file.split('.')[1];
let compressPlugin = fileExt == 'jpg' || fileExt == 'jpeg' ? imageminMozjpeg({quality: 40}) :
fileExt == 'png' ? imageminPngquant({quality: [0.5, 0.6]}) :
fileExt == 'gif' ? imageminGifsicle() : 0;
(async () => {
const files = await imagemin([`./uploads/before-compress/${file}`], {
destination: './uploads/products',
plugins: [ compressPlugin ]
});
fs.unlink(`${directoryPath}/before-compress/${file}`, err => err ? console.log(err) : 0);
})();
});
});
}
This kind of code would become much more readable if you would convert all the functions from using callbacks to using async.
If you want to keep using callbacks however, there are two options:
Make moveFile() to use fs.renameSync() instead of fs.rename(). Normally I would advise against that, but since you are already using fs.statSync() and I suppose you run this as a script with nothing in parallel, maybe that would be an acceptable solution.
Or make moveFile() accept a callback:
function moveFile(file, callback){
// [...]
fs.rename(oldPath, newPath, callback)
}
Now you can use this callback to detect when the file has been moved, for example like this:
// [...]
var done = 0;
var error = false;
files.forEach(function (file) {
if(error) return;
if(getFilesizeInBytes(file) > 30000){
moveFile(file, function(err) {
if (err) { console.log(err); error = true; }
done++;
});
} else {
done++;
}
if(done == files.length) {
compressImages(), 4000);
}
});
});

How to unzip file to disk and save path to database

I'm trying to create an app where a user can upload a zipped file, the app will unzip the file and save it to disk, and a path to the file will be saved to MongoDB for later retrieval.
I'm having a hard time getting the upload from form, unzipping, saving to disk, and uploading path of the unzipped file to the database all in one function. I'm really new to this and and am trying to learn about callbacks and such, I can't find any working solution for what I'm trying to do.
This is what my functions currently looks like:
// Multer is a form handling middleware
var storage = multer.diskStorage({
destination: function (req, file, cb) {
console.log(file)
cb(null, './uploads/unzip')
},
filename: function (req, file, cb) {
cb(null, file.fieldname + '-' + Date.now() + path.extname(file.originalname))
},
})
const upload = multer({ storage }).single('file'); //this is the 1st func in the route
const unzipp = async (req, res, next) => { //second func in route
try {
const dir = 'uploads/unzipped/';
var stream = fs.createReadStream(req.file.path)
stream.pipe(unzip.Extract({path: dir}))
.on('entry', function () {
var fileName = entry.path;
var type = entry.type;
var size = entry.size;
console.log(fileName, type, size)
if (type.isDirectory) {
postfile() //TRYING TO CALL POSTFILE() HERE
console.log('unzipped and path saved')
} else {
res.error('Failed unzipping')
}
fs.unlink(req.file.path, function (e) {
if (e) throw e;
console.log('successfully deleted '+req.file.path);
});
})
} catch (e) {
console.error(e)
}
next();
}
//Upload is a mongoDB cluster Schema
async function postfile () {
try{
let newUpload = new Upload(req.body); //new instance of uplaod based on the model based on req.body
newUpload.title = req.body.title;
newUpload.description = req.body.description;
newUpload.labels = req.body.labels;
newUpload.filePath = fileName; //ASSIGN FILEPATH IN DB SCHEMA TO UNZIPPED FILE PATH
console.log("filePath saved")
newUpload.save()
.then(newUpload => {
res.status(200).json({file: "File added successfully"})
})
.catch(err => {
res.status(400).send('File upload failed to save to DB :(')
})
} catch (e) {
console.error(e);
}
}
As you can see I'm trying to call the function to save the mongo schema in unzipp function. This is the post route in a separate folder:
router.post('/upload', FileCtrl.upload, FileCtrl.unzipp)
I've also tried saving the entry path of the unzipped file as a global var (fileName) and assigning the path in the Schema as fileName, but it doesn't work either:
const unzipp = async (req, res, next) => {
try {
const dir = 'uploads/unzipped/';
var stream = fs.createReadStream(req.file.path)
stream.pipe(unzip.Extract({path: dir}))
.on('entry', function () {
fileName = entry.path;
type = entry.type;
size = entry.size;
console.log(fileName, type, size)
// if (type.isDirectory) {
// console.log('unzipped and path saved')
// } else {
// res.error('Failed unzipping')
// }
result = {
file: fileName,
message:"File has been extracted"
};
//var file = req.file
fs.unlink(req.file.path, function (e) {
if (e) throw e;
console.log('successfully deleted '+req.file.path);
});
res.json(result);
})
} catch (e) {
console.error(e)
}
next();
}
const postfile = async (req, res) => {
try{
console.log("Posting to DB")
let newUpload = new Upload(req.body); //new instance of uplaod based on the model based on req.body
newUpload.title = req.body.title;
newUpload.description = req.body.description;
newUpload.labels = req.body.labels;
newUpload.filePath = fileName;
console.log("Ok so far")
newUpload.save()
.then(newUpload => {
res.status(200).json({file: "File added successfully"})
})
.catch(err => {
res.status(400).send('File upload failed to save to DB :(')
})
} catch (e) {
console.error(e);
}
}
this gives the error " ReferenceError: fileName is not defined "
the new route looks like this:
router.post('/upload', FileCtrl.upload, FileCtrl.unzipp, FileCtrl.postfile)
I've been trying to solve this for a really long time and would really appreciate some advice.
EDIT:
For testing purposes I hardcoded the filepath and it saved to the DB perfectly...
const postfile = async (req, res) => {
try{
console.log("Posting to DB")
//var stream = fs.readdirSync('./uploads/unzipped/Nancy_Collins_118226967_v2')
let newUpload = new Upload(req.body); //new instance of uplaod based on the model based on req.body
newUpload.title = req.body.title;
newUpload.description = req.body.description;
newUpload.labels = req.body.labels;
newUpload.filePath = './uploads/unzipped/Nancy_Collins_118226967_v2';
console.log("Ok so far")
newUpload.save()
.then(newUpload => {
res.status(200).json({file: "File added successfully"})
})
.catch(err => {
res.status(400).send('File upload failed to save to DB :(')
})
} catch (e) {
console.error(e);
}
}
Obviously this isn't practical or dynamic, but it's possible.

Unable to succesfully upload images to s3 and then view them

I'm trying to upload images to a s3 bucket as part of the application.
index.js
function upImg(req) {
if(req.files.img) {
var img = req.files.image;
var name = Math.round(Math.random()*10000).toString(); // Returns a random 5 digit number
if(myDB.uploadImg(img, name)) {
return name;
} else {
return "";
}
} else {
return "";
}
}
app.post('/newEV*', isLoggedIn, function(req, res) {
var myURL = req.path.replace('/newEV', '');
var imgPath = upImg(req);
fetch(myURL).then(function (events){
var myID;
var x = 0;
while(!myID) {
if(!events[x]) {
myID = x;
} else {
x++;
}
}
myDB.newEvent(myURL, req.body.name, req.body.desc, req.body.loc, imgPath, req.body.link, req.body.cap, req.body.date, req.body.time, myID, events);
res.redirect('/edit' + myURL);
});
});
myDB file
function signs3(file, name) {
devs3();
const s3 = new aws.S3();
const s3Params = {
Body: file,
Bucket: S3_BUCKET,
Key: name
};
s3.putObject(s3Params, function(err, data) {
if(err) {
throw err;
} else {
console.log("Data from putObject:" + JSON.stringify(data));
}
});
}
module.exports = {
uploadImg : function(file, name) {
var nName = "imgs/" + name;
console.log(nName);
signs3(file, nName);
return true;
}
}
I know that the signs3 function works because I can use it in other bits of my application to upload JSON files. Whenever I post to the URL, weirdly enough I can see in the console the 'data from putObject', however what I can't see is the nName. I don't understand this, as the console.log(nName) line should be run before the other one. When I go to look at bucket, the image hasn't uploaded (despite me getting an ETag from the console), and the page does not display it as there (I know this also works because it can display images already uploaded to the bucket).
You want to do something like this, soliciting events from the Request object created when you call putObject.
const req = s3.putObject( s3Params )
req.on('success', res => {
console.log ('upload complete! );
});
req.on ('error', res => {
console.error (res.error');
});
req.send();
Why does this appear to work differently for small files (JSON files) and large files (images)? Because the large files take longer to upload.

Google Drive API Node.js Search files in specific location

I'm trying to look for files in a specific folder on Drive using the API. The reason herefor is that my files can have copies with the same name in multiple folders. This is a sniplet of what I wrote to check if the files exist but the resulting query is Undifined. Anyone got an idea?
function checkFile(filename, auth, folderId){
var service = google.drive('v3');
console.log("folderId: " + folderId);
var objectExists = false;
var fetchPage = function(pageToken, pageFn, callback) {
service.files.list({auth: auth,
resource: { parents: [ folderId ] },
q: "mimeType='application/pdf' and trashed=false",
fields: 'nextPageToken, files(id, name)',
spaces: 'drive',
pageToken: pageToken,
}, function(err, res) {
if(err) {
callback(err);
} else {
console.log(res)
if(res.files != undefined){
res.files.forEach(function(file) {
if(file.name == filename)
{
console.log('Found file: ', file.name, file.id);
objectExists = true;
}
});
if (res.nextPageToken) {
pageFn(res.nextPageToken, pageFn, callback);
} else {
callback();
}
}
else {
callback();
}
}
});
};
fetchPage(null, fetchPage, function(err) {
if (err) {
// Handle error
console.log(err);
} else {
if(!objectExists)
createFile(filename, auth, folderId)
}
});
}
Solved it by adding more data in fields:
fields: 'nextPageToken, files(id, name),files/parents',
Followed by adding an extra check-value:
if(file.name == filename && file.parents[0] == folderId)
Hi Akorna I found your question relating to my search would you mind If I ask for a beginner level code for searching on GOOGLE drive API in Nodejs
I have a skeleton code ready and have configured the drive for it through service account.
const { google } = require('googleapis') ;
const readLine = require('readline');
const path = require('path');
const fs = require('fs');
const dotenv = require('dotenv');
const { file } = require('googleapis/build/src/apis/file');
dotenv.config();
const KEYFILEPATH = path.join(__dirname, '/credentials.json');
const SCOPES = ['https://www.googleapis.com/auth/drive', 'profile'];
const auth = new google.auth.GoogleAuth({
keyFile: KEYFILEPATH,
scopes: SCOPES
});
const drive = google.drive({version: 'v3', auth});

Categories

Resources