I am trying to upload an image to an S3 bucket using a presigned URL generated using boto3 on Python. I have been using the example python code that was provided in the documentation and was successful (the image got correctly uploaded with the correct Content-Type). However, when trying to do this in Javascript for the purposes of our frontend application, I am really struggling to get it to work.
Here's the example dictionary returned by the backend:
{
"fields": {
"AWSAccessKeyId": "AKIAYS3VM3EBIFL7FKE5",
"key": "posts/623255a762fd9bdfbd13f91a",
"policy": "<very long string>",
"signature": "Qvc/sGBHk0uzirzIfR1YmE2kFlo="
},
"url": "https://hotspot-storage.s3.amazonaws.com/"
}
Here is the functioning Python code:
response = <json response object>
object_name = 'playground/example_profile_group.png'
response['fields']['Content-Type'] = "image/png"
# Demonstrate how another Python program can use the presigned URL to upload a file
with open(object_name, 'rb') as f:
files = {'file': (object_name, f)}
http_response = requests.post(response['url'], data=response['fields'], files=files)
# If successful, returns HTTP status code 204
print(http_response)
print(http_response.text)
Here is the non-functioning Javascript code:
const data = response.data;
let payload = data.fields;
payload['Content-Type'] = 'image/jpeg';
const file = {
uri: previewPath,
name: previewPath,
type: 'image/jpeg',
};
payload.file = file;
const url = data.url;
console.log(payload, "MY PAYLOAD")
axios({
method: 'post',
headers: {'Content-Type': 'multipart/form-data'},
url: url,
data: payload,
})
.then(function (response) {
console.log(response.data, 'uploaded');
const data = response.data;
})
.catch(function (error) {
console.log(
'error uploading image',
error.response.data,
);
});
})
.catch(function (error) {
console.log(
'error getting media link',
error.response.data,
);
});
This is the error that keeps getting returned:
error uploading image <?xml version="1.0" encoding="UTF-8"?>
<Error><Code>MalformedPOSTRequest</Code><Message>The body of your POST request is not well-formed multipart/form-data.</Message><RequestId>Q0ES6P4QP75YVVED</RequestId><HostId>eowLxSJQD1xP1EfHPnzGSJzXVGpPjurIMhkdwAD22JMvi9zRoFGg6Bq+mnUt/Lu7DNPY80iBDMc=</HostId></Error>
I have been stuck on this for an absurd amount of time, and cannot tell what I am doing wrong. Any help would be very much appreciated.
In order to send a multipart/form-data request body, you'll need to use a FormData instance instead of a JavaScript object.
For example
const { url, fields } = response.data;
const payload = new FormData();
payload.append("file", file); // this is the file blob, eg from <input type="file">
payload.append("Content-Type", "image/jpeg");
// add all the other fields
Object.entries(fields).forEach(([ key, val ]) => {
payload.append(key, val);
});
// No need to manually set content-type header, your browser knows what to do
const { data: result } = await axios.post(url, payload);
console.log("uploaded", result);
Related
I'm new to JavaScript, and am trying to write some code that uses the google drive API (via the gapi client) to transform an existing slide into a pdf document, upload it to a specific folder, and return the pdf file id. This is all to be done in the browser, if possible.
I've already done this on python for another use case, and the code looks something like this:
import googleapiclient.http as client_methods
from io import BytesIO
...
data = drive.files().export(fileId=slideId, mimeType='application/pdf').execute()
body = {'name': fileName, 'mimeType': 'application/pdf', 'parents': [folderId]}
# wrapping the binary (data) file with BytesIO class
fh = io.BytesIO(data)
# creating the Media Io upload class for the file
media_body = client_methods.MediaIoBaseUpload(fh, mimetype='application/pdf')
pdfFileId = drive.files().create(body=body, media_body=media_body, supportsAllDrives=True).execute(['id'])
I've tried to replicate the same steps using JavaScript and my limited knowledge, and can successfully upload a pdf file into the desired folder, but the file shows as empty (doesn't even open in the drive).
I believe it might be due to the way I'm handling the binary data that I get from exporting the initial slide.
The last iteration of my JavaScript code is shown below (I have all the necessary permissions to use the gapi client):
async function createPdfFile() {
gapi.client.load("drive", "v3", function () {
// Set the MIME type for the exported file
const mimeType = "application/pdf";
// Set the file name for the exported PDF file
const fileName = "Trial upload.pdf";
// Export the Google Slides presentation as a PDF file
gapi.client.drive.files.export({
fileId,
mimeType
}).then(async function (response) {
// Get the binary data of the PDF file
const pdfData = await response.body;
const blob = await new Blob([pdfData], {type: 'application/pdf'})
const file = new File([blob], "presentation.pdf");
// Create a new file in the specified Google Drive folder with the PDF data
await gapi.client.drive.files.create({
name: fileName,
parents: [folderId],
mimeType: mimeType,
media: {mimeType: 'application/pdf', body: file},
supportsAllDrives: true
}).then(function (response) {
// Get the ID of the created PDF file
const pdfFileId = response.result.id;
console.log("PDF file created with ID: " + pdfFileId);
})
})
})
}
await createPdfFile()
As for the output, and as stated, it does create a pdf file, and logs the pdf file id, but the file itself is empty. I'd really appreciate it if someone could help me make sense of this (similar thread here, but can't replicate his success).
I believe your goal is as follows.
You want to convert Google Slides to PDF format using googleapis for Javascript.
Your access token can be exported and uploaded to Google Drive.
Issue and workaround:
When I tested your script, unfortunately, response.body from gapi.client.drive.files.export is binary data, and in this case, this cannot be correctly converted to the blob. And also, in the current stage, it seems that a file cannot be uploaded using gapi.client.drive.files.create. I thought that these might be the reason for your current issue.
From these situations, I would like to propose the flow for achieving your goal using fetch API. The modified script is as follows.
In this case, the access token is retrieved from the client like gapi.auth.getToken().access_token.
Modified script:
Please modify your script as follows.
From:
gapi.client.drive.files.export({
fileId,
mimeType
}).then(async function (response) {
// Get the binary data of the PDF file
const pdfData = await response.body;
const blob = await new Blob([pdfData], { type: 'application/pdf' })
const file = new File([blob], "presentation.pdf");
// Create a new file in the specified Google Drive folder with the PDF data
await gapi.client.drive.files.create({
name: fileName,
parents: [folderId],
mimeType: mimeType,
media: { mimeType: 'application/pdf', body: file },
supportsAllDrives: true
}).then(function (response) {
// Get the ID of the created PDF file
const pdfFileId = response.result.id;
console.log("PDF file created with ID: " + pdfFileId);
})
})
To:
gapi.client.drive.files.get({ fileId, fields: "exportLinks", supportsAllDrives: true }).then(function (response) {
const obj = JSON.parse(response.body);
if (Object.keys(obj).length == 0) throw new Error("This file cannot be converted to PDF format.");
const url = obj.exportLinks["application/pdf"];
if (!url) throw new Error("No exported URL.");
const accessToken = gapi.auth.getToken().access_token;
fetch(url, {
method: 'GET',
headers: { 'Authorization': 'Bearer ' + accessToken },
})
.then(res => res.blob())
.then(blob => {
const metadata = { name: fileName, parents: [folderId], mimeType };
const form = new FormData();
form.append('metadata', new Blob([JSON.stringify(metadata)], { type: 'application/json' }));
form.append('file', blob);
fetch('https://www.googleapis.com/upload/drive/v3/files?uploadType=multipart&supportsAllDrives=true', {
method: 'POST',
headers: { 'Authorization': 'Bearer ' + accessToken },
body: form
})
.then(res => res.json())
.then(obj => console.log("PDF file created with ID: " + obj.id));
});
});
When this script is run, the export URL of PDF data is retrieved from the file ID. And, the PDF data is downloaded and uploaded to Google Drive.
Note:
In your script, fileId is not declared. Please be careful about this.
If the file size is more than 5 MB, please use the resumable upload.
Reference:
Upload file data
Added:
From your following reply,
?uploadType=multipart also returns a 404 type error
I'm worried about that in your situation, new FormData() might not be able to be used. If my understanding is correct, please test the following script. In this script, the request body of multipart/form-data is manually created.
Modified script:
gapi.client.drive.files.get({ fileId, fields: "exportLinks", supportsAllDrives: true }).then(function (response) {
const obj = JSON.parse(response.body);
if (Object.keys(obj).length == 0) throw new Error("This file cannot be converted to PDF format.");
const url = obj.exportLinks["application/pdf"];
if (!url) throw new Error("No exported URL.");
const accessToken = gapi.auth.getToken().access_token;
fetch(url, {
method: 'GET',
headers: { 'Authorization': 'Bearer ' + accessToken },
})
.then(res => res.blob())
.then(blob => {
const metadata = { name: fileName, parents: [folderId], mimeType };
const fr = new FileReader();
fr.onload = e => {
const data = e.target.result.split(",");
const req = "--xxxxxxxx\r\n" +
"Content-Type: application/json\r\n\r\n" +
JSON.stringify(metadata) + "\r\n" +
"--xxxxxxxx\r\n" +
"Content-Transfer-Encoding: base64\r\n\r\n" +
data[1] + "\r\n" +
"--xxxxxxxx--";
fetch('https://www.googleapis.com/upload/drive/v3/files?uploadType=multipart&supportsAllDrives=true', {
method: 'POST',
headers: { 'Authorization': 'Bearer ' + accessToken, "Content-Type": "multipart/related; boundary=xxxxxxxx" },
body: req
})
.then(res => res.json())
.then(obj => {
console.log("PDF file created with ID: " + obj.id)
});
}
fr.readAsDataURL(blob);
});
});
When I tested this script, no error occurs. I confirmed that the Google Slides file could be converted to a PDF file and the PDF file was uploaded to the specific folder.
When attempting to upload a file to Amazon S3 using axios, I have been encountering a very strange issue. Normally, in a web browser, when FormData has binary data in it, the Content-Type header automatically gets set to multipart/form-data; boundary=<some random string>. However, I have been completely unable to achieve that in React Native (testing on an iOS device). The Content-Type is automatically set to application/json, and thus not being detected as a correctly formatted body when uploading to Amazon S3. I have tried specifying a blob in the file parameter in FormData instead of the URI to the file as well to no avail. I have appended my code below, any advice would be very much appreciated.
const uploadFileToS3 = (
presignedPostData,
file) => {
// create a form obj
const formData = new FormData();
// append the fields in presignedPostData in formData
Object.keys(presignedPostData.fields).forEach(
key => {
formData.append(
key,
presignedPostData.fields[key],
);
},
);
// append the file and uplaod
const getBlob = async () => {
const img_url = previewPath;
let result = await fetch(img_url);
const blob = await result.blob();
formData.append('Content-Type', 'image/jpeg');
formData.append('file', {
uri: previewPath,
type: 'image/jpeg',
name: 'test.jpeg',
});
console.log(formData, 'wild');
// post the data on the s3 url
axios
.post(presignedPostData.url, formData)
.then(function (response) {
console.log(response);
})
.catch(function (error) {
console.log(error.response);
});
};
getBlob();
};
I have a component which processes and uploads images. Currently I process the image on my backend and then send it to my frontend and then upload it from there. I would like to do everything on my backend. The only issue is that the upload endpoint requires FormData() object. I found an npm package form-data which I'm using on my backend now, but I'm still getting error.
This is how it currently works:
// frontend logic:
const data = await uploadImage(img);
const file = new File([Buffer.from(data)], `img-${i}.webp`, {
type: "image/webp",
});
const formData = new FormData();
formData.append("path", "images");
formData.append("files", file, file.name);
await axios
.post("http://localhost:1338/api/upload", formData, {
headers: { authorization: `Bearer ${jwtToken}` },
})
.then(({ data }) => {
console.log(data);
})
.catch(console.log);
//
//
// backend logic:
const data = await processImage(img.url);
return data;
This is what im trying to do:
// frontend logic:
const data = await uploadImage(img);
//
//
// backend logic:
const data = await processImage(img.url);
const formData = new FormData();
formData.append("path", "images");
formData.append("files", data, "file.name");
await axios
.post("http://localhost:1338/api/upload", formData, {
headers: { authorization: `Bearer ${process.env.JWT_TOKEN}` },
})
.then(({ data }) => {
console.log(data);
})
.catch(console.log); // I get error: 413 Payload Too Large
I'm trying to do it with the same image which works with the first method. Perhaps I need to create a new File(), but I couldn't find any npm packages which worked for that. What should I do to get this working?
I am currently trying to directly send an image via ngx-webcam without saving it to my backend server and send it to a Face Detection API via my node.js. The problem is that I keep getting an error for my header in my node.js file. How can I resolve this issue?
I noticed that the image url being passed is quite long. Could that be an issue?
Image url:
"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkICQkKDA8MCgsOCwkJDRENDg8QEBEQCgwSExIQEw8QEBD/2wBDAQMDAwQDBAgEBAgQCwkLEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBD/wAARCAHgAoADASIAAhEBAxE..."
My error is:
TypeError [ERR_HTTP_INVALID_HEADER_VALUE]: Invalid value "undefined" for header "Content-Length"
at ClientRequest.setHeader (_http_outgoing.js:473:3)
at FormData.<anonymous> (C:\Users\Roger\Documents\GitHub\angular-face-recognition-app\back-end\node_modules\form-data\lib\form_data.js:321:13)
at C:\Users\Roger\Documents\GitHub\angular-face-recognition-app\back-end\node_modules\form-data\lib\form_data.js:265:7
at C:\Users\Roger\Documents\GitHub\angular-face-recognition-app\back-end\node_modules\form-data\node_modules\async\lib\async.js:251:17
at done (C:\Users\Roger\Documents\GitHub\angular-face-recognition-app\back-end\node_modules\form-data\node_modules\async\lib\async.js:126:15)
at C:\Users\Roger\Documents\GitHub\angular-face-recognition-app\back-end\node_modules\form-data\node_modules\async\lib\async.js:32:16
at C:\Users\Roger\Documents\GitHub\angular-face-recognition-app\back-end\node_modules\form-data\node_modules\async\lib\async.js:248:21
at C:\Users\Roger\Documents\GitHub\angular-face-recognition-app\back-end\node_modules\form-data\node_modules\async\lib\async.js:572:34
at C:\Users\Roger\Documents\GitHub\angular-face-recognition-app\back-end\node_modules\form-data\lib\form_data.js:105:13
at FSReqWrap.oncomplete (fs.js:153:21)
Front end: Angular
Component file:
//captures image function
public handleImage(webcamImage: WebcamImage): void {
//stores it into webcamImageg variable
this.webcamImage = webcamImage;
//uses fda.sendImage function to send webcamImage to api via a service
this.fda.sendImage(this.webcamImage.imageAsDataUrl).subscribe(res => {});
}
Service file
sendImage(imgUrl){
console.log(imgUrl);
const obj = {
url: imgUrl
};
return this.http.post(`${this.uri}`, obj);
}
Backend: node.js
Route file
facedetAPIRoutes.route("/").post(function (req, res){
let imageUrl = req.body.url;
myFaceDetAPI.recognizeImg(imageUrl).then(function(result) {
// here is your response back
res.json(result);
});
});
Function file for api call: uses a promise
//I believe problem lies here somewhere
this.recognizeImg = (url)=>{
let requestString = "https://lambda-face-recognition.p.rapidapi.com/recognize";
let req = unirest("POST", requestString);
let imgURL = url;
let promise = new Promise(function(resolve, reject) {
unirest.post(requestString)
.header("X-RapidAPI-Key", API_KEY)
.attach("files", fs.createReadStream(imgURL))
.field("album", ALBUM_NAME)
.field("albumkey", ALBUM_KEY)
.end(result => {
console.log("successfully recognized image");
resolve(result.body) // giving response back
});
});
return promise;
}
You should try adding x-rapidapi-host and content-type headers.
.headers({
"content-type": "application/x-www-form-urlencoded",
"x-rapidapi-host": "lambda-face-recognition.p.rapidapi.com",
"x-rapidapi-key": "",
"useQueryString": true
})
I'm trying to upload files from browser to s3 amazon, I've modified the CORS policy rules to allow the post for the bucket, but I'm getting the error
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>InvalidArgument</Code><Message>Bucket POST must contain a field named 'key'. If it is specified, please check the order of the fields.</Message>
<ArgumentValue></ArgumentValue><ArgumentName>key</ArgumentName><RequestId>1E0A8DC78C0CEA9A</RequestId><HostId>XN38Qje9hUrGqHNIhtT8CtowX9tXlpyfEoaXb1UNxlsyLOWreh2mKqKVXg1zjLVl</HostId></Error>
Here is my request and response, I'm passing key parameter in the right order by still getting this error
Can anyone tell me whats wrong with it, I'm submitting request using FormData
any help would be greatly appreciated.
Thanks
Edit: here is the code pls check
var form_data = new FormData();
form_data.append('file',hdlr.file);
//form_data.append('crop_type',settings.get_cropped_type());
//form_data.append('attributes',JSON.stringify(file_attr));
$('input:hidden',$form).each(function(){
form_data.append(this.name,this.value);
});
//finally post the file through AJAX
var xhr = new XMLHttpRequest();
xhr.open("POST", $form[0].action, true);
xhr.send(form_data);
It kind of looks like your file form field is appearing first in the request. I can't tell for sure since you have not included the entire request payload in your answer, but it looks like this is appearing just above the "key" field. AWS ignores all fields in the request after the file field, so all other fields must appear before the file.
Thank you Ray Nicholus
It works for me.
{
"formAttributes": {
"action": "https://**.s3.ap-southeast-1.amazonaws.com",
"method": "POST",
"enctype": "multipart/form-data"
},
"formInputs": {
"acl": "public-read",
"key": "users/2/images/drops-of-water-578897_640.jpg",
"X-Amz-Credential": "**",
"X-Amz-Algorithm": "AWS4-HMAC-SHA256",
"X-Amz-Date": "**",
"Policy": "**",
"X-Amz-Signature": "**"
}
}
function uploadFile(event) {
event.preventDefault();
getSignedPost().then(() => {
const fileEl = document.getElementById('id-file');
const file = fileEl.files[0];
const formData = new FormData();
Object.keys(uploadCredentials.formInputs).forEach((key) => {
formData.append(key, uploadCredentials.formInputs[key]);
});
// update key to file name
const key = `users/2/images/${file.name}`;
formData.set('key', key);
uploadCredentials.formInputs.key = key;
// update show data on page
const el = document.getElementById('id-upload-info');
el.innerText = JSON.stringify(uploadCredentials, null, 2);
// IMPORTANCE: https://stackoverflow.com/a/15235866
// AWS ignores all fields in the request after the file field, so all other fields must appear before the file.
formData.append('file', file);
fetch(uploadCredentials.formAttributes.action, {
method: uploadCredentials.formAttributes.method,
// headers: {
// 'Content-Type': 'multipart/form-data',
// },
body: formData,
})
.then((res) => {
if (res.status === 204) {
console.log('Successfully uploaded file');
console.log('-- 204 - no content');
return `Successfully uploaded file: ${key}`;
}
if (res.ok) {
return res.json();
} else {
return res.text();
}
})
.then((res) => {
alert(JSON.stringify(res, null, 2));
})
.catch((err) => {
alert(err.message || err);
});
});
}