How to add watermark in pdf using nodejs? - javascript

I generated pdf through nodejs. i want to add watermark to this generated pdf. I used dynamic-html-pdf plugins in my code. If there is any options for adding watermark in dynamic-html-pdf. Here i display my sample code here.
var path=require('path');
var pdf = require('dynamic-html-pdf');
var html='<!DOCTYPE html><html><head><style>';
html=html+'</style>';
html=html+'</head>';
html=html+'<body>';
html=html+'<div class="divstyle1" id="content">A computer is a device that can be instructed to carry out sequences of arithmetic or logical operations automatically via computer programming. </div>';
html=html+'</body></html>';
var options = {
format: "A4",
orientation: "portrait",
border: "10mm",
base: 'file://' + path.resolve('./public/graph') + '/'
};
var document = {
type: 'file',
template: html,
context: {
img:'./public/graph/logo.jpg'
},
path: "./public/graph/mypdf.pdf"
};
pdf.create(document, options)
.then(res => {
res.status(200).json({
message: 'pdf created'
});
})
.catch(error => {
res.status(200).json({
message: 'error'
});
});

After saving your pdf document. Use image-watermark module to append a watermark to your generated pdf.
var watermark = require('image-watermark');
watermark.embedWatermark('/path/to/your/generated/pdf', {'text' : 'sample watermark'});

After pdf is created, use this package to watermark the pdf file.
pdf-watermark
const PDFWatermark = require('pdf-watermark');
await PDFWatermark({
pdf_path: "./newsletter.pdf",
text: "Gentech", //optional
image_path: "./everest.png",
});

Another solution to add text watermark in PDF document is Aspose.PDF Cloud SDK for Node.js. It is a commercial product but provides 150 free monthly API calls.
Currently, it supports the PDF file processing from Cloud Storages: Aspose internal storage, Amazon S3, DropBox, Google Drive Storage, Google Cloud Storage, Windows Azure Storage and FTP Storage. However, we have a plan to add support to process PDF documents from the request body(stream).
P.S: I'm a developer evangelist at Aspose.
const { PdfApi } = require("asposepdfcloud");
const { TextStamp }= require("asposepdfcloud/src/models/textStamp");
const { TextState }= require("asposepdfcloud/src/models/textState");
const { HorizontalAlignment }= require("asposepdfcloud/src/models/horizontalAlignment");
const { VerticalAlignment }= require("asposepdfcloud/src/models/verticalAlignment");
const { Rotation }= require("asposepdfcloud/src/models/rotation");
// Get Client Id and Client Secret from https://dashboard.aspose.cloud/
pdfApi = new PdfApi("xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxx", "xxxxxxxxxxxxxxxxxxxxxxx");
var fs = require('fs');
const name = "Test.pdf";
const pageNumber = 1;
const remoteTempFolder = "Temp";
const localTestDataFolder = "C:\\Temp";
const path = remoteTempFolder + "\\" + name;
var data = fs.readFileSync(localTestDataFolder + "\\" + name);
// Upload File
pdfApi.uploadFile(path, data).then((result) => {
console.log("Uploaded File");
}).catch(function(err) {
// Deal with an error
console.log(err);
});
// Add Text Stamp
const textState = new TextState();
textState.fontSize = 14;
textState.font = 'Arial';
const stamp = new TextStamp();
stamp.background = true;
stamp.leftMargin = 1;
stamp.rightMargin = 2;
stamp.topMargin = 3;
stamp.bottomMargin = 4;
stamp.horizontalAlignment = HorizontalAlignment.Center;
stamp.verticalAlignment = VerticalAlignment.Center;
stamp.opacity = 1;
stamp.rotate = Rotation.None;
stamp.rotateAngle = 0;
stamp.xIndent = 0;
stamp.yIndent = 0;
stamp.zoom = 1;
stamp.textAlignment = HorizontalAlignment.Center;
stamp.value = "Aspose.PDF Cloud";
stamp.textState = textState;
pdfApi.postPageTextStamps(name, pageNumber,[stamp], null, remoteTempFolder).then((result) => {
console.log(result.body.code);
}).catch(function(err) {
// Deal with an error
console.log(err);
});
//Download file
const localPath = "C:/Temp/textstamp.pdf";
pdfApi.downloadFile(path).then((result) => {
console.log(result.response.statusCode);
console.log(result.body.byteLength);
fs.writeFileSync(localPath, result.body);
console.log("File Downloaded");
}).catch(function(err) {
// Deal with an error
console.log(err);
});

Related

Firebase Functions) Can I exclude certain files when importing them from the Storage bucket?

I'm going to delete old images using the schedule function. Before deleting these images, I have created thumbnail images, and I want to delete only the original images except for these thumbnail images.
The following is part of my code
scedule function
exports.scheduledDeleteFile = functions
.region("asia-northeast3")
.pubsub.schedule("every 5 minutes")
.onRun(async (context) => {
try {
const bucket = firebase.storage().bucket();
// get storage file
const [filesArray] = await bucket.getFiles({
prefix: "chat/files",
});
totalCount = filesArray.length;
// variables with our settings to be reused below
const now = Date.now();
const time_ago = Date.now() - 180000; // 3min test
const TIMESTAMP_AGO = new Date(time_ago); // change datetime
const DELETE_OPTIONS = { ignoreNotFound: true }; // ??
functions.logger.log(
`Found ${totalCount} files that need to be checked.`
);
const deleteOldFileResults = await Promise.all(
filesArray.map(async (file) => {
let metadata;
try {
// 파일에 대한 메타데이터를 가져옴
[metadata] = await file.getMetadata();
// metadata of file
const { temporaryHold, eventBasedHold, timeCreated } = metadata;
const TIME_CREATED = new Date(timeCreated);
const dispose = TIME_CREATED < TIMESTAMP_AGO;
// delete
if (dispose) {
await file.delete(DELETE_OPTIONS);
functions.logger.log("delete");
disposedCount++;
// ===================
// firestore file chat 업데이트
// 트리거 함수를 따로 만들어서 사용
}
return { file, metadata, disposed: dispose, skipped: activeHold };
} catch (error) {}
})
);
} catch (error) {}
});
My thumbnail image is in the same path as the original file. Is there an option to exclude certain files when importing them? (For example, except for a file whose name precedes "thumb_")
await bucket.getFiles({
prefix: "chat/files",
});
The following is a create thumbnail function. I referred to the example provided by firebase.
https://github.com/firebase/functions-samples/tree/main/2nd-gen/thumbnails
// thumb image name size
const THUMB_MAX_HEIGHT = 200;
const THUMB_MAX_WIDTH = 200;
// thumb image name
const THUMB_PREFIX = "thumb_";
exports.generateThumbnail = functions
.region("asia-northeast3")
.storage.object()
.onFinalize(async (object) => {
// custom metadata
const userKey = object.metadata["userKey"];
const chatroomKey = object.metadata["chatroomKey"];
const type = object.metadata["type"];
// File and directory paths.
const filePath = object.name;
const contentType = object.contentType; // This is the image MIME type
const fileDir = path.dirname(filePath);
const fileName = path.basename(filePath);
const thumbFilePath = path.normalize(
// ! if change path, error!
path.join(fileDir, `${THUMB_PREFIX}${fileName}`)
);
const tempLocalFile = path.join(os.tmpdir(), filePath);
const tempLocalDir = path.dirname(tempLocalFile);
const tempLocalThumbFile = path.join(os.tmpdir(), thumbFilePath);
if (!contentType.startsWith("image/")) {
return functions.logger.log("This is not an image.");
}
if (fileName.startsWith(THUMB_PREFIX)) {
return functions.logger.log("Already a Thumbnail.");
}
// Cloud Storage files.
const bucket = admin.storage().bucket(object.bucket);
const file = bucket.file(filePath);
const thumbFile = bucket.file(thumbFilePath);
const metadata = {
contentType: contentType,
};
await mkdirp(tempLocalDir);
// Download file from bucket.
await file.download({ destination: tempLocalFile });
functions.logger.log("The file has been downloaded to", tempLocalFile);
// Generate a thumbnail using ImageMagick.
await spawn(
"convert",
[
tempLocalFile,
"-thumbnail",
`${THUMB_MAX_WIDTH}x${THUMB_MAX_HEIGHT}>`,
tempLocalThumbFile,
],
{ capture: ["stdout", "stderr"] }
);
functions.logger.log("Thumbnail created at", tempLocalThumbFile);
// Uploading the Thumbnail.
await bucket.upload(tempLocalThumbFile, {
destination: thumbFilePath,
metadata: metadata,
});
functions.logger.log("Thumbnail uploaded to Storage at", thumbFilePath);
fs.unlinkSync(tempLocalFile);
fs.unlinkSync(tempLocalThumbFile);
const results = await Promise.all([
thumbFile.getSignedUrl({
action: "read",
expires: "03-01-2500",
}),
file.getSignedUrl({
action: "read",
expires: "03-01-2500",
}),
]);
functions.logger.log("Got Signed URLs.");
const thumbResult = results[0];
const originalResult = results[1];
const thumbFileUrl = thumbResult[0];
const fileUrl = originalResult[0];
await file.delete().then((value) => {
functions.logger.log("원본 삭제 완료");
});
// Add the URLs to the Database
await admin
.database()
.ref("images")
.push({ path: fileUrl, thumbnail: thumbFileUrl });
return functions.logger.log("Thumbnail URLs saved to database.");
});
Is there an option to exclude certain files when importing them? (For example, except for a file whose name precedes "thumb_")
No. You can filter out the objects you don't want in the code that iterates the results.

Create xlsx file, save in tmp and attach it in firebase mail

I've created a Google Cloud Function which, when it is trigged, it creates an xlsx file with exceljs and attaches it in an email sent with firebase-send-mail.
This is my code:
(data is dummy for test)
exports.onEventReservCreate = functions
.region("europe-west2")
.firestore.document(
"foodCourts/{foodCourtId}/events/{eventId}/eventReservations/{evtResId}"
)
.onCreate(async (snap, context) => {
try {
const excel = require("exceljs")
//Creating New Workbook
var workbook = new excel.Workbook()
//Creating Sheet for that particular WorkBook
var sheet = workbook.addWorksheet("Sheet1")
// set path for file
const tempFilePath = path.join(os.tmpdir(), "excel.xlsx")
sheet.columns = [
{ key: "name", header: "name" },
{ key: "age", header: "age" },
]
var data = [
{ name: "Eddy", age: 24 },
{ name: "Paul", age: 24 },
]
//adding each in sheet
data.forEach(el => sheet.addRow(el))
// get the user email from firestore db
const userRef = db.collection(`users`).doc(uid)
const user = (await userRef.get()).data()
workbook.xlsx
.writeFile(tempFilePath)
.then(res => {
// sending email to user
const emailData = {
to: [user.email],
template: {
name: "reportEvents",
data: {
attachmentPath: tempFilePath,
attachmentName: "nome allegato",
date: dateFnsTz.format(
new Date(),
"dd/MM/yyyy - HH:mm"
),
},
},
}
return db.collection("email").doc().set(emailData)
})
.catch(err => console.log("ERROR --> ", err))
} catch (err) {
console.log(
`Error while sending - Error: ${err}`
)
}
})
In functions log i have this error:
Error when delivering message=email/[id]: Error: ENOENT: no such file or directory, open '/tmp/excel.xlsx'
Why /tmp folder doesn't exist?
Thanx
The temp folder has limited and restrictive access, I am not sure how your email script is actually reading the file as it may just be a root folder issue or an instance issue. Instead, I would upload the file to storage with a unique download link and send that within your email.
SOLVED!
This is my code
it will require some packages like node-fs, os, path.
This code will create xslx file, storage it in your bucket, retrieve its url and add in "email" collection a new document that have "url" property that is the url of the xslx file in bucket, so user that will receive the mail can download it.
const admin = require("firebase-admin")
const fs = require("fs")
const path = require("path")
const os = require("os")
const excel = require("exceljs")
const batch = db.batch()
const bucket = admin.storage().bucket("your_bucket's_name")
//Creating New Workbook
var workbook = new excel.Workbook()
//Creating Sheet for that particular WorkBook
var sheet = workbook.addWorksheet("Sheet1")
//Header must be in below format
sheet.columns = [
{ key: "name", header: "name", width: 30 },
...other columns
]
//Data must be look like below, key of data must be match to header.
sheet.addRow({name: "John"}))
const tempFilePath = path.join(os.tmpdir(), "excel.xlsx")
await workbook.xlsx.writeFile(tempFilePath)
const destinationName = `[your_filename].xlsx`
const result = await bucket.upload(tempFilePath, {
destination: destinationName,
})
result[0].getSignedUrl({
action: "read",
expires: "03-17-2025", // choose a date
}).then((url, err) => {
if (err) {
console.log(err)
}
const emailData = {
to: ["user_email"],
template: {
name: "your_template's_name",
data: {
url,
},
}
const emailRef = db.collection("email").doc()
batch.set(emailRef, emailData)
return batch.commit()
}).catch(err => console.log(err))

How do I solve this error in node:sj: ENOENT: no such file or directory

Being new to java script and node.js, I ran into an error.
I'm attempting to scrape newspaper articles from a french newspaper using password and serchterms.
This is the code I'm using:
const fs = require("fs");
const request = require("request");
const cheerio = require("cheerio");
const jsonTab = []; // We create our table
function writeFile() {
// Will write the json file
fs.writeFile("output.json", JSON.stringify(jsonTab, null, 4), (err) => {
console.log("File successfully written!");
});
}
// The URL of the advanced search feature with our keywords
const url = 'http://www.lemonde.fr/recherche/?keywords="Rap+"+"hip-hop"+"hip%20hop"+"rappeur"+"rappeurs"+"raps"+"rappers"&page_num=1&operator=or&exclude_keywords=&qt=recherche_texte_title&author=&period=custom_date&start_day=01&start_month=01&start_year=1970&end_day=20&end_month=09&end_year=2017&sort=asc';
/* The first request call, our goal here is to get the number of results and then
to calculate the number of pages */
request(url, (error, response, html) => {
const $ = cheerio.load(html);
// All the variables we will use later
let number;
let description;
let date;
let title;
let link;
if (!error) {
$(".bg_gris_clair").filter(() => {
/* We want to select all the HTML
elements with the class ".bg_gris_clair" (and we already know there is
only one) */
const data = $(this);
const str = data.children("strong").text().trim();
number = parseInt(str.substring(0, str.indexOf("e")).replace(/\s/g, ""), 10);
});
}
let count = 1;
for (let i = 1; i <= number / 10; i++) {
const urlPerPage = 'http://www.lemonde.fr/recherche/?keywords="Rap+"+"hip-hop"+"hip%20hop"+"rappeur"+"rappeurs"+"raps"+"rappers"&page_num=' + i + "&operator=or&exclude_keywords=&qt=recherche_texte_title&author=&period=custom_date&start_day=01&start_month=01&start_year=1970&end_day=20&end_month=09&end_year=2017&sort=asc";
request(urlPerPage, (err, response2, html2) => {
if (!err) {
const $ = cheerio.load(html2);
$(".grid_11.omega.resultat").filter(() => {
const json = {
date: "",
title: "",
description: "",
url: ""
};
const data = $(this);
title = data.children("h3").children("a").text().trim();
link = "http://lemonde.fr" + data.children("h3").children("a").attr("href").trim();
description = data.children("p").text().trim();
const dateStr = data.children("span").text();
date = dateStr.replace(/.+?(?=\d)/, "");
json.title = title;
json.url = link;
json.description = description;
json.date = date;
jsonTab.push(json);
});
} else if (err) {
console.log(err);
}
count += 1;
// Write the file once we iterated through all the pages.
if (count === parseInt(number / 10, 10)) {
writeFile();
}
});
}
});
const fs = require("fs");
const request = require("request");
const cheerio = require("cheerio");
// Prepare all the variables needed later
let count = 0;
let timeout = 0;
const id = "myusername";
const mdp = "mypassword";
let obj;
// The URLs we will scrape from
const connexionUrl = "https://secure.lemonde.fr/sfuser/connexion";
// Will write an "output.json" file
function writeFile() {
fs.writeFile("output.json", JSON.stringify(obj, null, 4), (err) => {
console.log(
"File successfully written! - Check your project directory for the output.json file"
);
});
}
// creating a clean jar to store the cookies
const j = request.jar();
// First Get Request Call
request(
{
url: connexionUrl,
jar: j
},
(err, httpResponse, html) => {
const $ = cheerio.load(html);
// We use Cheerio to load the HTML and be able to find the connection__token
const token = $("#connection__token")[0].attribs.value; // here is the connection__token
// Construction of the form required in the POST request to login
const form = {
"connection[mail]": id,
"connection[password]": mdp,
"connection[stay_connected]": 1,
"connection[save]": "",
"connection[_token]": token
};
// POST REQUEST to Log IN. Same url with "request headers" and the complete form.
request.post(
{
url: connexionUrl,
jar: j,
headers: {
Accept:
"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "fr-FR,fr;q=0.8,en-US;q=0.6,en;q=0.4",
"Cache-Control": "no-cache",
"Content-Type": "application/x-www-form-urlencoded",
Origin: "http://secure.lemonde.fr/",
Host: "secure.lemonde.fr",
"Upgrade-Insecure-Requests": 1,
"User-Agents":
"Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:42.0) Gecko/20100101 Firefox/42.0",
Connection: "keep-alive",
Pragma: "no-cache",
Referer: "https://secure.lemonde.fr/sfuser/connexion"
},
form: form
},
(error, response, body) => {
// WE ARE CONNECTED :D
/* Second GET request call : this time, we use the response of the POST
request to request the right URL */
request(
{
url: response.headers.location,
jar: j
},
(err, httpResponse, html2) => {
const json = fs.readFileSync("./firstStep.json"); // Load the JSON created in step one
obj = JSON.parse(json); // We create our JSON in a usable javascript object
// forEach loop to iterate through all the object and request each link
obj.forEach((e) => {
let articleUrl = e.url;
/* We use a setTimeout to be sure that all the requests are performed
one by one and not all at the same time */
setTimeout(() => {
request(
{
url: articleUrl,
jar: j
},
(error1, httpResponse, html3) => {
if (!error1) {
const $ = cheerio.load(html3); // load the HTML of the article page
$(".contenu_article.js_article_body").filter(() => {
const data = $(this);
// get the content, remove all the new lines (better for Excel)
let text = data
.text()
.trim()
.replace(/\n/g, "\t");
e.text = text; // push the content in the table
});
$(".txt3.description-article").filter(() => {
const data = $(this);
const description = data
.text()
.trim()
.replace(/\n/g, "\t");
e.description = description;
});
}
}
);
count += 1;
// Write a new JSON file once we get the content of all the articles
if (count === obj.length) {
writeFile();
}
}, timeout);
timeout += 50; // increase the timeout length each time
});
}
);
}
);
}
);
Running step3 it throws an error:
Error: ENOENT: no such file or directory, open './firstStep.json'
The tutorial can be found here: https://www.freecodecamp.org/news/how-i-scraped-7000-articles-from-a-newspaper-website-using-node-1309133a5070/
I am more used to using R, this is a new playing field for me.
I suspect the problem is straight forward, and related to the directory.
Thank you

Each then() should return a value or throw when I use Cloud Function

This is what I am trying to do with Cloud Function.
When some audio file is uploaded on FireStorage it will be converted into mp3 format. After that, I want to get transcript by using CLOUD SPEECH-TO-TEXT.
But I keep getting this error message:
Each then() should return a value or throw
I am not familiar with javascript.
Here is my entire code for cloud function.
'use strict';
const functions = require('firebase-functions');
const admin = require('firebase-admin');
const gcs = require('#google-cloud/storage')();
const speech = require('#google-cloud/speech');
const path = require('path');
const os = require('os');
const fs = require('fs');
const ffmpeg = require('fluent-ffmpeg');
const ffmpeg_static = require('ffmpeg-static');
admin.initializeApp(functions.config().firebase);
var db = admin.firestore();
function promisifyCommand(command) {
return new Promise((resolve, reject) => {
command
.on('end', () => {
resolve();
})
.on('error', (error) => {
reject(error);
})
.run();
});
}
/**
* When an audio is uploaded in the Storage bucket We generate a mono channel audio automatically using
* node-fluent-ffmpeg.
*/
exports.generateMonoAudio = functions.storage.object().onFinalize((object) => {
const fileBucket = object.bucket; // The Storage bucket that contains the file.
const filePath = object.name; // File path in the bucket.
const contentType = object.contentType; // File content type.
const metageneration = object.metageneration; // Number of times metadata has been generated. New objects have a value of 1.
// Exit if this is triggered on a file that is not an audio.
if (!contentType.startsWith('audio/')) {
console.log('This is not an audio.');
return null;
}
// Get the file name.
const fileName = path.basename(filePath);
// Exit if the audio is already converted.
if (fileName.endsWith('_output.mp3')) {
console.log('Already a converted audio.');
return null;
}
// Download file from bucket.
const bucket = gcs.bucket(fileBucket);
const tempFilePath = path.join(os.tmpdir(), fileName);
// We add a '_output.mp3' suffix to target audio file name. That's where we'll upload the converted audio.
const targetTempFileName = fileName.replace(/\.[^/.]+$/, '') + '_output.mp3';
const targetTempFilePath = path.join(os.tmpdir(), targetTempFileName);
const targetStorageFilePath = path.join(path.dirname(filePath), targetTempFileName);
return bucket.file(filePath).download({
destination: tempFilePath,
}).then(() => {
console.log('Audio downloaded locally to', tempFilePath);
// Convert the audio to mono channel using FFMPEG.
let command = ffmpeg(tempFilePath)
.setFfmpegPath(ffmpeg_static.path)
.audioChannels(2)
.audioFrequency(32000)
.format('mp3')
.output(targetTempFilePath);
command = promisifyCommand(command);
return command;
}).then(() => {
console.log('Output audio created at', targetTempFilePath);
// Uploading the audio.
return bucket.upload(targetTempFilePath, {destination: targetStorageFilePath});
}).then(() => {
console.log('Output audio uploaded to', targetStorageFilePath);
// Once the audio has been uploaded delete the local file to free up disk space.
fs.unlinkSync(tempFilePath);
fs.unlinkSync(targetTempFilePath);
getTextFromAudio(targetStorageFilePath) //#### HERE! ERROR
return console.log('Temporary files removed.', targetTempFilePath);
});
});
function getTextFromAudio(paramTargetStorageFilePath) {
// Creates a client
const client = new speech.SpeechClient();
// Reads a local audio file and converts it to base64
const file = fs.readFileSync(paramTargetStorageFilePath);
const audioBytes = file.toString('base64');
// The audio file's encoding, sample rate in hertz, and BCP-47 language code
const audio = {
content: audioBytes,
};
const config = {
encoding: 'LINEAR16',
sampleRateHertz: 16000,
languageCode: 'en-US',
};
const request = {
audio: audio,
config: config,
};
// Detects speech in the audio file
return client
.recognize(request)
.then(data => {
const response = data[0];
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
console.log(`Transcription: ${transcription}`);
}).catch(err => {
console.error('ERROR:', err);
});
}
function postTranscript(transcriptText) {
var docRef = db.collection('users').doc('alovelace');
var setAda = docRef.set({
first: transcriptText
});
}

Convert image path to blob react native

Problem
I am trying to create an app with react native and firebase. One of the features I would like for this app is the ability to upload images. I am having some trouble uploading the images to firebase storage though. I am using expo's image picker to find the path of the image that the user wants to upload, but once I have the path I don't know how to convert that to something I can upload to firebase.
Can somebody help me convert the path of an image to something I can upload to firebase storage with react native?
What I've tried
I tried using:
_pickImage = async () => {
let result = await ImagePicker.launchImageLibraryAsync({
MediaTypeOptions: 'Images',
quality: 0.4,
_uploadAsByteArray = async (pickerResultAsByteArray, progressCallback) => {
try {
var metadata = {
contentType: 'image/jpeg',
};
var storageRef = firebase.storage().ref();
var ref = storageRef.child('images/'+expoID+'/'+this.state.time)
let uploadTask = ref.put(pickerResultAsByteArray, metadata)
uploadTask.on('state_changed', function (snapshot) {
progressCallback && progressCallback(snapshot.bytesTransferred / snapshot.totalBytes)
var progress = (snapshot.bytesTransferred / snapshot.totalBytes) * 100;
console.log('Upload is ' + progress + '% done');
}, function (error) {
console.log("in _uploadAsByteArray ", error)
}, function () {
var downloadURL = uploadTask.snapshot.downloadURL;
console.log("_uploadAsByteArray ", uploadTask.snapshot.downloadURL)
this.setState({imageUploaded:true})
});
} catch (ee) {
console.log("when trying to load _uploadAsByteArray ", ee)
}
}
convertToByteArray = (input) => {
var binary_string = this.atob(input);
var len = binary_string.length;
var bytes = new Uint8Array(len);
for (var i = 0; i < len; i++) {
bytes[i] = binary_string.charCodeAt(i);
}
return bytes
}
atob = (input) => {
const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=';
let str = input.replace(/=+$/, '');
let output = '';
if (str.length % 4 == 1) {
throw new Error("'atob' failed: The string to be decoded is not correctly encoded.");
}
for (let bc = 0, bs = 0, buffer, i = 0;
buffer = str.charAt(i++);
~buffer && (bs = bc % 4 ? bs * 64 + buffer : buffer,
bc++ % 4) ? output += String.fromCharCode(255 & bs >> (-2 * bc & 6)) : 0
) {
buffer = chars.indexOf(buffer);
}
return output;
}
uploadImage(bsfdata){
this.setState({imageUploaded:false})
this._uploadAsByteArray(this.convertToByteArray(bsfdata), (progress) => {
this.setState({ progress:progress })
})
}
base64:true,
});
/* if (!result.cancelled) {
this.setState({ image: result.uri });
let formData = new FormData();
formData.append('photo', {
uri,
name: `photo.${fileType}`,
type: `image/${fileType}`,
});}*/
this.uploadImage(result.base64);
};
}
I've tried it with the commented code added, which doesn't upload anything, and I've tried it with how the code is now, which gives me the error Can currently only create a Blob from other Blobs, and the uploading progress never gets above 0%.
If you are using expo (>=26), then you can do it easily with the following lines of code.
uploadImage = async(imageUri) => {
const response = await fetch(imageUri);
const blob = await response.blob();
var ref = firebase.storage().ref().child("image.jpg");
return ref.put(blob);
}
Reference: https://youtu.be/KkZckepfm2Q
Refer this link - https://github.com/dailydrip/react-native-firebase-storage/blob/master/src/App.js#L43-L69
Following block of code is working fine.
uploadImage(uri, mime = 'application/octet-stream') {
return new Promise((resolve, reject) => {
const uploadUri = Platform.OS === 'ios' ? uri.replace('file://', '') : uri
let uploadBlob = null
const imageRef = FirebaseClient.storage().ref('images').child('image_001')
fs.readFile(uploadUri, 'base64')
.then((data) => {
return Blob.build(data, { type: `${mime};BASE64` })
})
.then((blob) => {
uploadBlob = blob
return imageRef.put(blob, { contentType: mime })
})
.then(() => {
uploadBlob.close()
return imageRef.getDownloadURL()
})
.then((url) => {
resolve(url)
})
.catch((error) => {
reject(error)
})
})
}
You need to install rn-fetch-blob module:
npm install --save rn-fetch-blob
Then, do the following:
import RNFetchBlob from 'rn-fetch-blob';
const Blob = RNFetchBlob.polyfill.Blob;
const fs = RNFetchBlob.fs;
window.XMLHttpRequest = RNFetchBlob.polyfill.XMLHttpRequest;
window.Blob = Blob;
function uploadImage(path) {
const imageFile = RNFetchBlob.wrap(path);
// 'path/to/image' is where you wish to put your image in
// the database, if you would like to put it in the folder
// 'subfolder' inside 'mainFolder' and name it 'myImage', just
// replace it with 'mainFolder/subfolder/myImage'
const ref = firebase.storage().ref('path/to/image');
var uploadBlob = null;
Blob.build(imageFile, { type: 'image/jpg;' })
.then((imageBlob) => {
uploadBlob = imageBlob;
return ref.put(imageBlob, { contentType: 'image/jpg' });
})
.then(() => {
uploadBlob.close();
return ref.getDownloadURL();
})
.((url) => {
// do something with the url if you wish to
})
.catch(() => {
dispatch({
type: UPDATE_PROFILE_INFO_FAIL,
payload: 'Unable to upload profile picture, please try again'
});
});
}
Please do ask if there's any part of the code that you don't understand. To upload multiple images, simply wrap this code with a for loop. Or if you want to make sure that every image is uploaded without any error, use Promise
Not sure whom this might help, but if you're using MediaLibrary to load images from the gallery, then the uri comes in the format of uri = file:///storage/emulated/0/DCIM/Camera/filename.jpg
In this case, using fetch(uri) didn't help me get the blob.
But if you use fetch(uri.replace("file:///","file:/")) and then follow #sriteja Sugoor's answer, you'll be able to upload the file blob.
const Blob = RNFetchBlob.polyfill.Blob;
const fs = RNFetchBlob.fs;
let uploadBlob;
await fs
.readFile(params?.file.path, 'base64')
.then((data) => {
return Blob.build(data, {type: `BASE64`});
})
.then((blob) => {
uploadBlob = blob;
console.log(uploadBlob, 'uploadBlob');
});

Categories

Resources