Unable to load a file into the Adobe PDF JS Embed SDK through my Blazor app.
I currently download the file as a memorystream from Azure Blob Storage, and want to stream it through js interop to the pdf embed viewer.
Current implementation:
using var streamRef = new DotNetStreamReference(stream: file, leaveOpen: false);
await JSRuntime.InvokeVoidAsync("LoadViewer", streamRef);
Now implementation of JS
async function LoadViewer(streamRef) {
console.log(streamRef);
const data = await streamRef.arrayBuffer();
console.log(data);
let promise = new Promise(function (resolve, reject) {
resolve(data);
});
var adobeDCView = new AdobeDC.View({ clientId: "xyz", divId: "adobe-dc-view" });
adobeDCView.previewFile({
content: { promise: promise },
metaData: { fileName: "Test.pdf" }
}, { showAnnotationTools: true, showDownloadPDF: true, showPrintPDF: true });
}
Related
I have two functions in separate files to split up the workflow.
const download = function(url){
const file = fs.createWriteStream("./test.png");
const request = https.get(url, function(response) {
response.pipe(file);
});
}
This function in my fileHelper.js is supposed to take a URL with an image in it and then save it locally to test.png
function uploadFile(filePath) {
fs.readFile('credentials.json', (err, content) => {
if (err) return console.log('Error loading client secret file:', err);
// Authorize a client with credentials, then call the Google Drive API.
authorize(JSON.parse(content), function (auth) {
const drive = google.drive({version: 'v3', auth});
const fileMetadata = {
'name': 'testphoto.png'
};
const media = {
mimeType: 'image/png',
body: fs.createReadStream(filePath)
};
drive.files.create({
resource: fileMetadata,
media: media,
fields: 'id'
}, (err, file) => {
if (err) {
// Handle error
console.error(err);
} else {
console.log('File Id: ', file.id);
}
});
});
});
}
This function in my googleDriveHelper.js is supposed to take the filePath of call and then upload that stream into my google drive. These two functions work on their own but it seems that the https.get works asynchronously and if I try to call the googleDriveHelper.uploadFile(filePath) function after the download, it doesn't have time to get the full file to upload so instead a blank file will be uploaded to my drive.
I want to find a way so that when the fileHelper.download(url) is called, it automatically uploads into my drive.
I also don't know if there is a way to create a readStream directly from the download function to the upload function, so I can avoid having to save the file locally to upload it.
I believe your goal as follows.
You want to upload a file retrieving from an URL to Google Drive.
When you download the file from the URL, you want to upload it to Google Drive without creating the file.
You want to achieve this using googleapis with Node.js.
You have already been able to upload a file using Drive API.
For this, how about this answer?
Modification points:
At download function, the retrieved buffer is converted to the stream type, and the stream data is returned.
At uploadFile function, the retrieved stream data is used for uploading.
When the file ID is retrieved from the response value of Drive API, please use file.data.id instead of file.id.
By above modification, the file downloaded from the URL can be uploaded to Google Drive without creating a file.
Modified script:
When your script is modified, please modify as follows.
download()
const download = function (url) {
return new Promise(function (resolve, reject) {
request(
{
method: "GET",
url: url,
encoding: null,
},
(err, res, body) => {
if (err && res.statusCode != 200) {
reject(err);
return;
}
const stream = require("stream");
const bs = new stream.PassThrough();
bs.end(body);
resolve(bs);
}
);
});
};
uploadFile()
function uploadFile(data) { // <--- Modified
fs.readFile("drive_credentials.json", (err, content) => {
if (err) return console.log("Error loading client secret file:", err);
authorize(JSON.parse(content), function (auth) {
const drive = google.drive({ version: "v3", auth });
const fileMetadata = {
name: "testphoto.png",
};
const media = {
mimeType: "image/png",
body: data, // <--- Modified
};
drive.files.create(
{
resource: fileMetadata,
media: media,
fields: "id",
},
(err, file) => {
if (err) {
console.error(err);
} else {
console.log("File Id: ", file.data.id); // <--- Modified
}
}
);
});
});
}
For testing
For example, when above scripts are tested, how about the following script?
async function run() {
const url = "###";
const data = await fileHelper.download(url);
googleDriveHelper.uploadFile(data);
}
References:
Class: stream.PassThrough
google-api-nodejs-client
I'm totally new in azure and I would like to create azure function, which will read the content from azure storage container file.json.
Folder structure :
Storage account name: storageaccounttest
Container name: test
File name: file.json
File.json:
[
{
"name":"Kate",
"age":"28"
},
{
"name":"John",
"age":"30"
}
]
Cors on storage account: get enabled.
Environemnts variable added: process.env.AZURE_STORAGE_NAME and process.env.AZURE_STORAGE_KEY and process.env.AZURE_CONNECTION_STRING
I'm using VisualStudioCode to deploy the function.
I installed locally the dependencies:
"dependencies": {
"azure-storage": "^2.10.3",
"dotenv": "^8.1.0"
}
I choose the javascript -> HttpTrigger fn-> anonymus options
I'm using getBlobToText fn.
My index.js:
var storage = require('azure-storage');
var blobService = storage.createBlobService();
var containerName = 'test';
var blobName = 'file.json';
module.exports = blobService.getBlobToText(
containerName,
blobName,
function(err, blobContent) {
if (err) {
console.error("Couldn't download blob");
console.error(err);
} else {
console.log("Sucessfully downloaded blob");
console.log(blobContent);
}
});
Fn is deployed successfully, but I'm not able to see results.
After start, fn is finished with status 500, Internal Server Errror, Console: No new trace in the past 1 min(s).
What I made wrong?
Just summarized for helping others who get the same issue.
I think you were using context.binding.response to pass the blobContent value to the output response as the offical document Azure Functions JavaScript developer guide said.
Here is my sample code with Promise feature to solve it.
var azure = require('azure-storage');
var blobService = azure.createBlobService();
var containerName = 'test';
var blobName = 'file.json';
async function getBlobContent(containerName, blobName) {
return new Promise((resolve, reject) => {
blobService.getBlobToText(containerName, blobName, function(err, blobContent) {
if (err) {
reject(err);
} else {
resolve(blobContent);
}
});
});
}
module.exports = async function (context, req) {
await getBlobContent(containerName, blobName).then(
function(content) {
context.res = {
headers: {"Content-Type": "application/json"},
body: content
}
}, function(error) {
context.res = {
status: 400,
body: error
}
}
);
};
It works as the figure below.
I'm trying to capture audio from a user's microphone and send it to a server where it will get sent to Google's Speech-to-Text-API for translation. I'm accessing the audio using navigator.mediaDevices.GetuserMedia() which I capture using a MediaRecorder object. When I run the following code I get an error from Google that says "INVALID_ARGUMENT: RecognitionAudio not set." I'm not sure how to set it as the relavant page (https://cloud.google.com/speech-to-text/docs/reference/rest/v1/RecognitionAudio) doesn't say much about it.
Relevant client side code that runs after user presses stop:
mediaRecorder.onstop = function(e) {
var blob = new Blob(chunks, { type : 'audio/flac' });
var reader = new FileReader();
reader.readAsBinaryString(blob);
reader.onloadend = function() {
base64data = reader.result;
writeBinaryFile(base64data)
}
chunks = []; //array to store recording
}
//asynchronous binary file write
function writeBinaryFile(content) {
$.ajax({
type: "POST",
url: "/voice_api",
data: { content: content }
}).done(function(data) {
// TODO: display success status somewhere
});
Server side code running node.js:
app.post("/voice_api", (req, res) => {
const audioBytes = req.body;
// The audio file's encoding, sample rate in hertz, and BCP-47 language code
const audio = {
content: audioBytes,
};
const config = {
languageCode: 'en-US'
};
const request = {
audio: audio,
config: config
};
// Detects speech in the audio file
client
.recognize(request)
.then(data => {
const response = data[0];
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
console.log(`Transcription: ${transcription}`);
res.send(transcription);
})
.catch(err => {
console.error('ERROR:', err);
});
});
If I run the server code with the line "const audioBytes = req.body;" changed to "const audioBytes = req.body.content;" I get an error message that there is bad encoding. I'm not sure if I'm encoding it properly on the client side or if I'm accessing it properly on the server side. Any help would be appreciated. Thanks!
const config = {
// "enableAutomaticPunctuation": true,
"encoding": "LINEAR16",
"languageCode": "en-US",
"model": "default",
"sampleRateHertz": 44100,
audioChannelCount: 2,
enableSeparateRecognitionPerChannel: true,
};
Background
I want to record students speaking and then upload their recordings automatically to Google drive.
Current state of affairs
I have client side code which can produce a blob containing .ogg recording.
var blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
I can send the blob to a Google apps script standalone script in a doPost() request. In this case I'm using axios with a promise.
axios.post("https://script.google.com/macros/s/SOME_ID/exec", blob)
.then((response)=>{
console.log(response)
}).catch(error =>{
console.log(error)
})
The blob arrives at the standalone script.
The standalone Google apps script
function doPost(e) {
var params = JSON.stringify(e.parameters);
createNewSoundFile(params)
return ContentService.createTextOutput(params);
}
function createNewSoundFile(blob){
var title = 'Was created from a recording'
var folderId = 'SOME_FOLDER_ID'
var resource = {
title: title,
parents: [
{
"id": folderId,
"kind": "drive#fileLink"
}
],
mimeType: 'application/vnd.google-apps.audio',
};
try{
var newfile = Drive.Files.insert(resource, blob).id
} catch(e){
// Send error to Google sheet
// Exception: The mediaData parameter only supports blob types for uploads.
}
}
Problem
The above apps script code says the blob is not a support media type.
Question
How can I create become a new .ogg file in the Google drive from a .ogg blob created in the browser?
How about these modifications?
Modification points :
javascript side :
Encode blob to base64, and sent it as the string data.
GAS side :
Decode base64 to blob, and save it as a ogg file with the mimeType of audio/ogg.
Save the file as audio/ogg.
In my environment, it couldn't convert from audio/ogg to application/vnd.google-apps.audio.
Modified script : javascript side
From :
axios.post("https://script.google.com/macros/s/SOME_ID/exec", blob)
.then((response)=>{
console.log(response)
}).catch(error =>{
console.log(error)
})
To :
var reader = new FileReader();
reader.readAsDataURL(blob);
reader.onloadend = function() {
base64 = reader.result.replace(/^.*,/, "");
let data = new URLSearchParams();
data.append('data', base64);
axios.post(
"https://script.google.com/macros/s/SOME_ID/exec",
data,
{headers: {'Content-Type': 'application/x-www-form-urlencoded'}}
).then((response)=>{
console.log(response)
}).catch(error =>{
console.log(error)
});
}
Modified script : GAS side
To :
function createNewSoundFile(base64){
var data = Utilities.base64Decode(base64.parameters.data); // Added
var blob = Utilities.newBlob(data); // Added
var title = 'Was created from a recording'
var folderId = 'SOME_FOLDER_ID'
var resource = {
title: title,
parents: [
{
"id": folderId,
// "kind": "drive#fileLink" // I didn't know whether this is required.
}
],
mimeType: "audio/ogg", // Modified
};
try{
var newfile = Drive.Files.insert(resource, blob).id
} catch(e){
// Send error to Google sheet
// Exception: The mediaData parameter only supports blob types for uploads.
}
}
Note :
In my environment, {headers: {'Content-Type': 'application/x-www-form-urlencoded'}} was required for the script of javascript side. If it is not required in your environment, please remove it.
If I misunderstand your question, I'm sorry.
So I've created this nice little lambda, which runs great locally, however not so much when actually out in the wild.
The lambda takes an event, with html in the event source, converts that html to a PDF (using the html-pdf node module), passes that pdf to an s3 bucket, and then hands back a signed url that expires in 60 seconds.
Or at least that is what ought to happen (again, works locally). When testing on Lambda, I get the following error:
{
"errorMessage": "spawn EACCES",
"errorType": "Error",
"stackTrace": [
"exports._errnoException (util.js:870:11)",
"ChildProcess.spawn (internal/child_process.js:298:11)",
"Object.exports.spawn (child_process.js:362:9)",
"PDF.PdfExec [as exec] (/var/task/node_modules/html-pdf/lib/pdf.js:87:28)",
"PDF.PdfToFile [as toFile] (/var/task/node_modules/html-pdf/lib/pdf.js:83:8)",
"/var/task/index.js:72:43",
"Promise._execute (/var/task/node_modules/bluebird/js/release/debuggability.js:272:9)",
"Promise._resolveFromExecutor (/var/task/node_modules/bluebird/js/release/promise.js:473:18)",
"new Promise (/var/task/node_modules/bluebird/js/release/promise.js:77:14)",
"createPDF (/var/task/index.js:71:19)",
"main (/var/task/index.js:50:5)"
]
}
Here's the code itself (not compiled, there's a handy gulp task for that)
if(typeof regeneratorRuntime === 'undefined') {
require("babel/polyfill")
}
import fs from 'fs'
import pdf from 'html-pdf'
import md5 from 'md5'
import AWS from 'aws-sdk'
import Promise from 'bluebird'
import moment from 'moment'
const tempDir = '/tmp'
const config = require('./config')
const s3 = new AWS.S3()
export const main = (event, context) => {
console.log("Got event: ", event)
AWS.config.update({
accessKeyId: config.awsKey,
secretAccessKey: config.awsSecret,
region: 'us-east-1'
})
const filename = md5(event.html) + ".pdf"
createPDF(event.html, filename).then(function(result) {
uploadToS3(filename, result.filename).then(function(result) {
getOneTimeUrl(filename).then(function(result) {
return context.succeed(result)
}, function(err) {
console.log(err)
return context.fail(err)
})
}, function(err) {
console.log(err)
return context.fail(err)
})
}, function(err) {
console.log(err)
return context.fail(err)
})
}
const createPDF = (html, filename) => {
console.log("Creating PDF")
var promise = new Promise(function(resolve, reject) {
pdf.create(html).toFile(filename, function(err, res) {
if (err) {
reject(err)
} else {
resolve(res)
}
})
})
return promise
}
const uploadToS3 = (filename, filePath) => {
console.log("Pushing to S3")
var promise = new Promise(function(resolve, reject) {
var fileToUpload = fs.createReadStream(filePath)
var expiryDate = moment().add(1, 'm').toDate()
var uploadParams = {
Bucket: config.pdfBucket,
Key: filename,
Body: fileToUpload
}
s3.upload(uploadParams, function(err, data) {
if(err) {
reject(err)
} else {
resolve(data)
}
})
})
return promise
}
const getOneTimeUrl = (filename) => {
var promise = new Promise(function(resolve, reject) {
var params = {
Bucket: config.pdfBucket,
Key: filename,
Expires: 60
}
s3.getSignedUrl('getObject', params, function(err, url) {
if (err) {
reject(err)
} else {
resolve(url)
}
})
})
return promise
}
Seems like a problem within html-pdf. I thought it might be a problem with PhantomJS (which html-pdf depends on) due to some reading I did here: https://engineering.fundingcircle.com/blog/2015/04/09/aws-lambda-for-great-victory/ , however, since Lambda has bumped the max zip size to 50mb, I don't have a problem uploading the binary.
Any thoughts?
html-pdf uses phantomjs under the hood, which needs to compile some binaries when being installed. I guess your problem is that you are deploying those locally compiled binaries but Lambda needs the binaries compiled on Amazon Linux.
You can solve this problem by building your deploy package on an EC2 instance that is running Amazon Linux and then e.g. directly deploy it from there like it is explained in this tutorial.
Also check out this answer on a similar problem.