Azure storage avoid uploading file if name already exists - javascript

I am trying to upload a file to azure storage. I have it working using the package multer-azure however if I upload a file with the same name of one already stored in the storage, the first one will get replaced.
From docs it seems like I need to add a ETagMatch but I am not sure where this should go.
https://azure.github.io/azure-storage-node/global.html#AccessConditions
My code:
var upload = multer({
storage: multerAzure({
account: STORAGE_ACCOUNT_NAME, //The name of the Azure storage account
key: ACCOUNT_ACCESS_KEY, //A key listed under Access keys in the storage account pane
container: 'demo', //Any cntainer name, it will be created if it doesn't exist
blobPathResolver: (_req, file, callback) => {
let path;
if(_req.body.pathToFile) {
path = `${organization}/${_req.body.pathToFile}/${file.originalname}`
} else {
path = `${organization}/${file.originalname}`;
}
// var blobPath = yourMagicLogic(req, file); //Calculate blobPath in your own way.
callback(null, path);
}
})
}).single('file')
upload(req, res, function (err) {
if (err instanceof multer.MulterError) {
return res.status(500).json(err)
} else if (err) {
return res.status(500).json(err)
}
return res.status(200).send(req.file)
})

If the blob already exists on the service, it will be overwritten. You can simply use doesBlobExist method of the SDK to check if a blob exists in a path in NodeJS. If not exist, you could upload a file to Azure storage.
var blobService = azure.createBlobService(storageAccount, accessKey);
blobService.doesBlobExist('azureblob', 'xxx/a.json', function(error, result) {
if (!error) {
if (result.exists) {
console.log('Blob exists...');
} else {
console.log('Blob does not exist...');
}
}
});

Related

Nodejs upload multiple images - create dir when doesnt exist

Im using nodejs and multer to upload multiple images.
Firsat check is if dir exist. If not will be created.
Error: When folder doesnt exist all images are passing the first condition fs.access by giving the message "Directory doesnt exist" but then dir is created so the second image gets an error "Directory exist".
var storage = multer.diskStorage({
destination: (req, file, cb) => {
const userId = encryptFolder(req.params["id"]);
const dtrToCreate = "C:/Users/User/Pictures/" + userId;
fs.access(dtrToCreate, (error) => {
if (error) {
console.log("Directory does not exist.", userId);
fs.mkdirSync(dtrToCreate, (error, data) => {
if (error) {
throw error;
}
cb(null, "C:/Users/User/Pictures/");
});
} else {
console.log("Directory exists.", userId);
cb(null, "C:/Users/User/Pictures/");
}
});
},
When directory exist images are uploaded sucessfully.
Working solution:
Since there are multiple files should be a recusrsive function to check if folder exist each time files passing.
fs.mkdirSync(dtrToCreate, { recursive: true })
return cb(null, dtrToCreate)

Get Azure uploaded blob file url

I'm uploading a data stream to Azure Storage,
I would get the link to the blob file.
let insertFile = async function (blobName,stream){
const containerName= 'texttospeechudio';
try{
await blobService.createContainerIfNotExists(containerName, {
publicAccessLevel: 'blob'},(err,result, response) => {
if(!err) {
console.log(result);
}
});
let resultstream = blobService.createWriteStreamToBlockBlob(containerName, blobName,(err,result, response)=>{
console.log(res)
});
stream.pipe(resultstream);
stream.on('error', function (error) {
console.log(error);
});
stream.once('end', function (end) {
console.log(end)
//OK
});
}
catch(err) {
console.log(err);
}
}
I added createWriteStreamToBlockBlob callback , but I'm not getting inside it.
I would find a way to get uploaded file url.
There is no file URL returned in the response according to put-blob's rest spec.
And Azure storage's resource URL can be commonly composed with following pattern:
https://{myaccount}.blob.core.windows.net/{mycontainer}/{myblob}

Missing required parameter - public_id, and what to assign to imageId for Cloudinary

I'm building a website that users can upload insights and comments on reading novels,
and users are free to fetch images of the novel or not.
If an image is chosen, the post schema has image & imageId attribute for Cloudinary uploads and future manipulation such as changing(update route) or deleting(destroy route) it from the Cloudinary library.
If no image is chosen, then a default image takes place.
Problem is, I managed to make default image function work, however I don't want multiple same default images uploaded to Cloudinary library, so I put default_image.jpg in local server folder (/public/images) to be exact), and this default_image.jpg shouldn't be in Cloudinary library and this should save a lot of capacity.
However, for these posts without a chosen image, I dunno what I should assign for their imageId property.
I tried undefined and null, and of course, they didn't work, cuz' this way it won't be able to find certain novel.imageId if they're all undefined or null.
// Schema for data
var fictionSchema = new mongoose.Schema({
...
...
image: String,
// for cloudinary API to track which image to delete or update
imageId: String,
...
...
});
// CREATE route
router.post("/", middleware.isLoggedIn, upload.single('image'), (req, res) => {
if (req.file){
// req.file.path comes from multer npm
cloudinary.v2.uploader.upload(req.file.path, function (err, result) {
if (err) {
...
}
// add cloudinary url for the image to the novel object under image property
req.body.novel.image = result.secure_url;
req.body.novel.imageId = result.public_id;
});
} else {
// setup default image for new novels
req.body.novel.image = "/images/noimage.jpg";
// imageId here should be ... empty? Undefined? Null? A fixed ID but may be delete when accessing Destroy route?
req.body.novel.imageId = undefined;
}
...
...
Novel.create(req.body.novel, function (err, newlyAddedNovel) {
if (err) {
req.flash('error', err.message);
...
} else {
req.flash("success", "Novel added successfully.");
...
}
});
});
// DESTROY route
router.delete("/:id", middleware.checkNovelOwnership, (req, res) => {
// find and delete the correct novel along with the image on cloudinary
Novel.findById(req.params.id, async (err, novel) => {
if (err) {
req.flash("error", err.message);
return res.redirect("back");
}
try {
await cloudinary.v2.uploader.destroy(novel.imageId);
// delete the novel found
novel.remove();
// delete the image from cloudinary
req.flash("success", "Novel deleted successfully.");
...
...
} catch (err) {
..
}
});
});
Good news, I solved the problem! Her-ray! Took me around 2 hrs though...
So, in the end everything worked just as I wanted, such finesse.
Codes are as below:
• CREATE route
For new posts, I set up every new novel with the default image first, then if there's an image (req.file) given, change it and upload.
Afterwards, remember to create that novel data in mongo database. (Novel.create() in my case.)
router.post("/", middleware.isLoggedIn, upload.single('image'), async function (req, res) {
// set every new novel with default image first
req.body.novel.image = "https://res.cloudinary.com/dvfkbz6la/image/upload/v1565434656/noimage_ew1uri.jpg";
req.body.novel.imageId = "noimage_ew1uri";
req.body.novel.user = {
id: req.user._id,
username: req.user.username
};
if (req.file) {
// req.file.path comes from multer npm
await cloudinary.v2.uploader.upload(req.file.path, function (err, result) {
if (err) {
req.flash('error', err.message);
return res.redirect('back');
}
// add cloudinary url for the image to the novel object under image property
// add image's public_id (imageId in novel model) to novel object
req.body.novel.image = result.secure_url;
req.body.novel.imageId = result.public_id;
});
}
Novel.create(req.body.novel, function (err, newlyAddedNovel) {
if (err) {
...
} else {
...
}
});
});
• UPDATE route
In try block, if (novel.imageId = "DEFAULT_IMAGEID") { } else { } is added.
// UPDATE route
router.put("/:id", middleware.checkNovelOwnership, upload.single('image'), (req, res) => {
// find the correct novel
Novel.findById(req.params.id, async (err, novel) => {
if (err) {
...
} else {
// if there's a req.file, we know user is trying to upload a new image
if (req.file) {
try {
// if imageId is default, await the result to be uploaded
if (novel.imageId = "noimage_ew1uri") {
var result = await cloudinary.v2.uploader.upload(req.file.path);
} else {
// if not default, find the old image using imageId and delete
await cloudinary.v2.uploader.destroy(novel.imageId);
var result = await cloudinary.v2.uploader.upload(req.file.path);
}
novel.imageId = result.public_id;
novel.image = result.secure_url;
} catch (err) {
...
}
}
novel.title = req.body.title;
novel.author = req.body.author;
novel.price = req.body.price;
novel.description = req.body.description;
// remember to save the changed novel
novel.save();
req.flash("success", "Successfully Updated!");
res.redirect("/novels/" + novel._id);
}
});
});
• DESTROY route
In try block, if (novel.imageId != "DEFAULT_IMAGEID") { } else { } is added.
// DESTROY route
router.delete("/:id", middleware.checkNovelOwnership, (req, res) => {
// find and delete the correct novel along with the image on cloudinary
Novel.findById(req.params.id, async (err, novel) => {
if (err) {
req.flash("error", err.message);
return res.redirect("back");
}
try {
// if novel.imageId isn't default, destroy it from Cloudinary
if (novel.imageId != "noimage_ew1uri") {
await cloudinary.v2.uploader.destroy(novel.imageId);
}
// delete the novel found
novel.remove();
...
} catch (err) {
...
}
});
});
Only problem left is, I dunno why but when hitting UPDATE route,
and when the imageId isn't the default one and the user changed the image,
the old image will NOT be destroyed and stayed in Cloudinary library. Such oddness.
I sure have this code set up
Novel.findById(req.params.id, async (err, novel) => {
if (err) {
...
} else {
if (req.file) {
try {
// if imageId is default, await the result to be uploaded
if (novel.imageId = "noimage_ew1uri") {
...
} else {
// if not default, find the old image using imageId and delete
await cloudinary.v2.uploader.destroy(novel.imageId);
var result = await cloudinary.v2.uploader.upload(req.file.path);
}
...
...
Dunno why await cloudinary.v2.uploader.destroy(novel.imageId); isn't working as expected. Hmmm...
Images that were already available in your site, are cached in one or more CDN edges. Therefore, even if you delete the image from your Cloudinary account, cached copies might still be available. In default, all delivered images are cached for 30 days.
When using the destroy API with the invalidate parameter set to true, the image will be deleted and purged from the CDN, allowing for the new image to be displayed.
Alternatively, when updating, you can accomplish the same result by setting the overwrite and invalidate parameters to true when uploading the new file (without the need to call the destroy method).

How to run async.each within async.series in an AWS Lambda function?

we'e writing a serverless function in AWS Lambda that does the following:
Fetches JSON files from AWS S3 (Multiple files).
Merges JSON files into one single file.
Uploads the new file from (2) back to S3.
To handle this flow, we're trying to use the async library in Nodejs within our Lambda. Here's the code we have so far:
'use-strict';
const AWS = require('aws-sdk');
const async = require('async');
const BUCKET = 'sample-bucket';
const s3path = 'path/to/directory';
exports.handler = (event, context, callback) => {
let filepaths = []; // Stores filepaths
let all_data = []; // Array to store all data retrieved
let s3Client = new AWS.S3({ params: { Bucket: BUCKET } }); // S3 bucket config
// Runs functions in the series provided to ensure each function runs one after the other
async.series([
// Read file IDs from the event and saves in filepaths array
function(callbackSeries) {
console.log('Starting function...');
event.forEach(id => {
filepaths.push(`${id}.json`);
});
console.log('All filenames pushed.');
callbackSeries();
},
// Fetches each file from S3 using filepaths
// Then pushes objects from each file to the all_data array
function(callbackSeries) {
async.each(filepaths, (file, callback) => {
let params = {
'Bucket': BUCKET,
'Key': s3path + file
}
s3Client.getObject(params, (err, data) => {
if(err) {
throw err;
} else {
if(data.Body.toString()) {
console.log('Fetching data...');
all_data.push(JSON.parse(data.Body.toString()));
callback();
console.log('Fetched.');
}
}
});
},
(err) => {
if (err) throw err;
else callbackSeries();
});
},
// NOTHING IS BEING EXECUTED AFTER THIS
// Puts object back into bucket as one merged file
function(callbackSeries) {
console.log('Final function.')
// Concatenates multiple arrays in all_data into a single array
all_data = Array.prototype.concat(...all_data);
let params = {
'Bucket': BUCKET,
'Body': JSON.stringify(all_data),
'Key': 'obj.json'
};
s3Client.putObject(params, (err, data) => {
if(err) throw err;
else console.log('Success!');
})
}
], (err) => {
if(err) throw err;
else console.log('End.');
})
}
The first two functions in our async.series are running normally and all the console.log statements are executed. But our third function i.e.:
// Puts object back into bucket as one merged file
function(callbackSeries) {
console.log('Final function.')
// Concatenates multiple arrays in all_data into a single array
all_data = Array.prototype.concat(...all_data);
let params = {
'Bucket': BUCKET,
'Body': JSON.stringify(all_data),
'Key': 'obj.json'
};
s3Client.putObject(params, (err, data) => {
if(err) throw err;
else console.log('Success!');
})
}
is not being executed at all. We added a console.log statements but it doesn't seem to be executing at all.
I've consulted both AWS Support as well as async's documentation but can't seem to figure out the problem.
Any assistance would be highly appreciated.
Many thanks.

Save images on Google Storage on javascript

My goal is to store an image on S3 or Google Storage and save the link to the database. How can I do it? There is some free solution?
Can someone link me a code sample for to do that?
I never used Google Storage or S3 before.
I pick an image like that:
handleImage = e => {
e.preventDefault();
let reader = new FileReader();
let file = e.target.files[0];
reader.onloadend = () => {
this.setState({
file: file,
image: reader.result
});
}
reader.readAsDataURL(file);
this.setState({ imgchange: true })
}
And then send it to server:
this.props.editUser({
img: this.state.image,
})
My server is written with node.js
I've done this exact thing before, though I'm not claiming I had the most straightforward way.
1) sent the image from the client side to the server as a base64 image, took that image, 2) created a buffer, and 3) then used imageMagick to to stream it to my google cloud bucket. Lastly, 4) stored that link to the google cloud bucket on the object in your database as the imgLink or what have you so it can show in your front-end application.
Some important things to require at the top
var gm = require('gm').subClass({ imageMagick: true });
var gcs = require('#google-cloud/storage')({
keyFilename: sails.config.gcloud.keyFileName,
projectId: sails.config.gcloud.projectId
});
var bucket = gcs.bucket(sails.config.gcloud.bucketname);
Step 1 - Sending base64 image to backend service and decoding it
imageControllerFirstHitAtEndpoint: function (req, res) {
PictureService.uploadPictureCDNReturnLink(req.body.picture, function (err, imageLink) {
if (err) {
// Handle error...
}
// Success, save that imageLink to whatever db object you want
}
}
Step 2 and 3 - Create buffer with base64 data and Stream it to Google Cloud Bucket
// Step 2 create buffer with base64 data
uploadPictureCDNReturnLink: function(picDataInBase64, cb) {
var imageBuffer;
imageBuffer = PictureService.bufferFromBase64(picDataInBase64);
var file = bucket.file('cool-file-name');
// Step 3 stream it to where it needs to go
gm(imageBuffer)
.stream()
.pipe(file.createWriteStream())
.on('finish', function() {
file.getMetadata(function(err, metadata) {
if (err) {
console.log("Error getting metadata from google cloud", err);
return cb(err);
}
cb(null, metadata.mediaLink);
});
}).on('error', function(err) {
console.log("Got an error uploading to Cloud Storage:", err);
cb(err);
});
}
Step 4 - Save that imageLink to wherever you want
Won't spell this out totally for you, not that hard. Something kinda like:
Organization.findOne(req.body.orgID).exec(function (err, organization) {
if(!organization) {
return res.json(400, { error: "No organization with id: " + req.param('id') });
}
if (err) {
return res.json(400, err);
}
organization.pictureLink = imageLink;
organization.save(function (err) {
return res.json(organization);
});
});
Hope that helps! Should give you an idea of one way to do it.
P.S. A lot of that stuff might be using Sails-like NodeJS conventions, Sails is my backend framework of choice.

Categories

Resources