Using Google Speech REST APi from Node without helper module - javascript

I'm just getting started with a simple project in node.js.
I'm trying to use Expo for the final app but get lots of dependency conflicts in the modules so was thinking of just calling the REST API via fetch. I have a test bed that works fine using the google-supplied modules, but I always get RecognitionAduio is not supplied as an error message via REST. As you can see in the attached code, the input file, coding etc are all identical.
any views?
async function getAudioTranscription() {
const fetch = require("node-fetch");
try {
var filename = 'C:/Users/SteveRist/Downloads/brooklyn.flac';
var encoding = 'FLAC';
var sampleRateHertz = 16000;
var languageCode = 'en-US';
const fs = require('fs');
const speech = require('#google-cloud/speech');
const client = new speech.SpeechClient();
console.log ('Setting REST config');
const config = {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
};
console.log ('opening ', filename);
const audio = {
content: fs.readFileSync(filename).toString('base64'),
};
const request = {
config: config,
audio: audio,
};
// Detects speech in the audio file. This creates a recognition job that you
// can wait for now, or get its result later.
const [operation] = await client.longRunningRecognize(request);
// Get a Promise representation of the final result of the job
const [response] = await operation.promise();
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
console.log(`Transcription: ${transcription}`);
const transcriptResponse = await fetch(
'https://speech.googleapis.com/v1/speech:recognize?key=xxx8', {
method: 'POST',
request: request
}
);
const data = await transcriptResponse.json();
console.log ('transcriptResponse Google returned' , data);
const userMessage = data.results && data.results[0].alternatives[0].transcript || "";
console.log (userMessage);
} catch (error) {
console.log("There was an error", error);
}
}
getAudioTranscription();

Related

How to Update JSON Data Stored in Network using Node.JS, IPFS, and Web3

I'm building a simple Web3 app that generates JSON data and saves it on the IPFS network.
The JSON data saved on the IPFS network is then updated when new JSON data is generated.
Saving the data is easy.
The problem is I cannot figure out how to update the data already saved in the IPFS network.
Could you help me spot the problem?
I'm using 'ipfs-http-client' as my IPFS module.
The main code:
const { create } = require("ipfs-http-client");
let hashStorage;
async function ipfsClient() {
const ipfs = await create(
{
host: "localhost",
port: 5011,
protocol: "http"
}
);
return ipfs;
}
//saving the file
let fileHash = await saveFile(); // See the save function below
hashStorage=fileHash;
//updating the file
let updateHash = await updateFile(); // See the update function below
The data is saved onto IPFS using the function below:
async function saveFile() {
let ipfs = await ipfsClient();
let data = fs.readFileSync("./config.json")
let options = {
warpWithDirectory: false,
//progress: (prog) => console.log(`Saved :${prog}`)
}
let result = await ipfs.add(data, options);
//console.log(result.path)
return result.path
}
The IPFS data is updated using the function below:
async function updateFile() {
let ipfs = await ipfsClient();
let newData = fs.readFileSync("./newConfig.json")
let options = {
warpWithDirectory: false,
//progress: (prog) => console.log(`Saved :${prog}`)
}
let result = await ipfs.add(newData, options);
//console.log(result.path)
return result.path
}

How can I serve an HTML file from Azure blob storage in a web app using NodeJS?

Apologies if this is straight-forward, I'm very much not a software developer!
I have a web app using Node (Node 16.17.0, npm 8.13.2). I have HTML files that I upload to an Azure Blob Storage container.
I would like to serve the files directly from the storage container to a user.
In the past, I've typically used something like this:
app.get(
'/analysis/example', async (req, res) => {
const a = path.join(__dirname + '/app/analysis/example_file.html');
res.sendFile(a);
}
);
However, I'm struggling a bit finding the appropriate documentation or examples to serve a file from blob storage.
I have managed to find a way to print the list of files and the file itself to the console - so I know for sure that I've managed to gain access properly (Azure documentation is quite good) - but I'm just not sure how to make sure the file is in an appropriate state to be served back.
I've tried this:
// GAIN ACCESS TO THE APPROPRIATE STORAGE ACCOUNT
const blobServiceClient = BlobServiceClient.fromConnectionString(
process.env.AZURE_STORAGE_CONNECTION_STRING
);
const containerClient = blobServiceClient.getContainerClient(
process.env.AZURE_STORAGE_CONTAINER
);
const blobClient = containerClient.getBlobClient(
process.env.AZURE_STORAGE_BLOB
);
// A FUNCTION TO HELP TURN THE STREAM INTO TEXT (TURN THIS INTO SOMETHING ELSE?)
async function streamToText(readable) {
readable.setEncoding('utf8');
let data = '';
for await (const chunk of readable) {
data += chunk;
}
return data;
};
// SERVE THE HTML FILE
app.get(
'/analytics/example', async (req, res) => {
// THIS CHUNK SUCCESSFULLY LISTS AVAILABLE FILES IN THE CONSOLE
// for await (const blob of containerClient.listBlobsFlat()) {
// console.log("\t", blob.name);
// };
const blobDownload = await blobClient.download(0);
const blob = await streamToText(blobDownload.readableStreamBody);
res.sendFile(blob);
}
}
);
I've also tried the final chunk below (I found an online resource that mentioned that DOMParser wouldn't work with Node):
// SERVE THE HTML FILE
app.get(
'/analytics/example', async (req, res) => {
// THIS CHUNK SUCCESSFULLY LISTS AVAILABLE FILES IN THE CONSOLE
// for await (const blob of containerClient.listBlobsFlat()) {
// console.log("\t", blob.name);
// };
const blobDownload = await blobClient.download(0);
const blob = await streamToText(blobDownload.readableStreamBody);
var DOMParser = require('xmldom').DOMParser;
let parser = new DOMParser();
let doc = parser.parseFromString(blob, 'text/html');
res.sendFile(doc.body);
}
}
);
Any help much appreciated.
I've just worked it out - it was simply the "res.sendFile" part, should have been "res.send".
The below is the correct working code to read the file from Azure Storage and serve it back to the app.
// GAIN ACCESS TO THE APPROPRIATE STORAGE ACCOUNT
const blobServiceClient = BlobServiceClient.fromConnectionString(
process.env.AZURE_STORAGE_CONNECTION_STRING
);
const containerClient = blobServiceClient.getContainerClient(
process.env.AZURE_STORAGE_CONTAINER
);
const blobClient = containerClient.getBlobClient(
process.env.AZURE_STORAGE_BLOB
);
// A FUNCTION TO HELP TURN THE STREAM INTO TEXT (TURN THIS INTO SOMETHING ELSE?)
async function streamToText(readable) {
readable.setEncoding('utf8');
let data = '';
for await (const chunk of readable) {
data += chunk;
}
return data;
};
// SERVE THE HTML FILE
app.get(
'/analytics/example', async (req, res) => {
const blobDownload = await blobClient.download(0);
const blob = await streamToText(blobDownload.readableStreamBody);
res.send(blob);
}
}
);

Google Text-to-speech - Loading text from individual lines of a txt file

I am using the Google TextToSpeech API in Node.js to generate speech from text. I was able to get an output file with the same name as the text that is generated for the speech. However, I need to tweak this a bit. I wish I could generate multiple files at the same time. The point is that I have, for example, 5 words (or sentences) to generate, e.g. cat, dog, house, sky, sun. I would like to generate them each to a separate file: cat.wav, dog.wav, etc.
I also want the application to be able to read these words from the * .txt file (each word/sentence on a separate line of the * .txt file).
Is there such a possibility? Below I am pasting the * .js file code and the * .json file code that I am using.
*.js
const textToSpeech = require('#google-cloud/text-to-speech');
const fs = require('fs');
const util = require('util');
const projectId = 'forward-dream-295509'
const keyFilename = 'myauth.json'
const client = new textToSpeech.TextToSpeechClient({ projectId, keyFilename });
const YourSetting = fs.readFileSync('setting.json');
async function Text2Speech(YourSetting) {
const [response] = await client.synthesizeSpeech(JSON.parse(YourSetting));
const writeFile = util.promisify(fs.writeFile);
await writeFile(JSON.parse(YourSetting).input.text + '.wav', response.audioContent, 'binary');
console.log(`Audio content written to file: ${JSON.parse(YourSetting).input.text}`);
}
Text2Speech(YourSetting);
*.json
{
"audioConfig": {
"audioEncoding": "LINEAR16",
"pitch": -2,
"speakingRate": 1
},
"input": {
"text": "Text to Speech"
},
"voice": {
"languageCode": "en-US",
"name": "en-US-Wavenet-D"
}
}
I'm not very good at programming. I found a tutorial on google on how to do this and slightly modified it so that the name of the saved file was the same as the generated text.
I would be very grateful for your help.
Arek
Here ya go - I haven't tested it, but this should show how to read a text file, split into each line, then run tts over it with a set concurrency. It uses the p-any and filenamify npm packages which you'll need to add to your project. Note that google may have API throttling or rate limits that I didn't take into account here - may consider using p-throttle library if that's a concern.
// https://www.npmjs.com/package/p-map
const pMap = require('p-map');
// https://github.com/sindresorhus/filenamify
const filenamify = require('filenamify');
const textToSpeech = require('#google-cloud/text-to-speech');
const fs = require('fs');
const path = require('path');
const projectId = 'forward-dream-295509'
const keyFilename = 'myauth.json'
const client = new textToSpeech.TextToSpeechClient({ projectId, keyFilename });
const rawSettings = fs.readFileSync('setting.json', { encoding: 'utf8'});
// base data for all requests (voice, etc)
const yourSetting = JSON.parse(rawSettings);
// where wav files will be put
const outputDirectory = '.';
async function Text2Speech(text, outputPath) {
// include the settings in settings.json, but change text input
const request = {
...yourSetting,
input: { text }
};
const [response] = await client.synthesizeSpeech(request);
await fs.promises.writeFile(outputPath, response.audioContent, 'binary');
console.log(`Audio content written to file: ${text} = ${outputPath}`);
// not really necessary, but you could return something if you wanted to
return response;
}
// process a line of text - write to file and report result (success/error)
async function processLine(text, index) {
// create output path based on text input (use library to ensure it's filename safe)
const outputPath = path.join(outputDirectory, filenamify(text) + '.wav');
const result = {
text,
lineNumber: index,
path: outputPath,
isSuccess: null,
error: null
};
try {
const response = await Text2Speech(text, outputPath);
result.isSuccess = true;
} catch (error) {
console.warn(`Failed: ${text}`, error);
result.isSuccess = false;
result.error = error;
}
return result;
}
async function processInputFile(filepath, concurrency = 3) {
const rawText = fs.readFileSync(filepath, { encoding: 'utf8'});
const lines = rawText
// split into one item per line
.split(/[\r\n]+/)
// remove surrounding whitespace
.map(s => s.trim())
// remove empty lines
.filter(Boolean);
const results = await pMap(lines, processLine, { concurrency });
console.log('Done!');
console.table(results);
}
// create sample text file
const sampleText = `Hello World
cat
dog
another line of text`;
fs.writeFileSync('./my-text-lines.txt', sampleText);
// process each line in the text file, 3 at a time
processInputFile('./my-text-lines.txt', 3);

Upload .vhd as Page-Blob to azure-blob-storage from Url

i have a bunch of VHD files stored on a private Server, which are accessible through a url.
I am trying upload these vhd files directly to my azure storage account using the azure javascript npm libraries. The vhds have to be uploaded as page-blobs. I tried using the method uploadPagesFromURL() of the pageblobClient but with no success. My code looks roughly like this:
async function uploadVHD(accessToken, srcUrl)
{
try {
// Get credentials from accessToken
const creds = new StorageSharedKeyCredential(storageAccount.name, storageAccount.key);
// Get blobServiceClient
const blobServiceClient = new BlobServiceClient(`https://${storageAccount.name}.blob.core.windows.net`, creds);
// Create Container
const containerClient = blobServiceClient.getContainerClient("vhd-images");
await containerClient.createIfNotExists();
const src = srcUrl.replace('https://', 'https://username:password#');
// Upload to blob storage
const pageBlobClient = containerClient.getPageBlobClient("Test.vhd");
// Get fileSize of vhd
const fileSize = (await axiosRequest(src, { method: "HEAD" })).headers["content-length"];
const uploadResponse = await pageBlobClient.uploadPagesFromURL(src, 0, 0, fileSize);
return uploadResponse;
} catch (error) {
return error;
}
});
It is not possible to upload the Page Blob with your URL directly. You need to read data from the url. Then upload using uploadPages method.
axios.get(URL, {
responseType: 'arraybuffer'
})
.then((response) => {
console.log(response.data)
console.log(response.data.length)
// upload page blob...
}).catch((error) => {
//handle error
});
// uploadPages method
const uploadResponse = pageBlobClient.uploadPages(data, 0, dataLength);

How to build a readable stream in Node.js and TypeScript?

I connected the typescript function to Azure Blobstorage through Rest-API and this works fine for me. Now I want to get each blob contents and read the contents of each blobs.
I try this with this code here, but it returns an error:
const blobServiceClient = new BlobServiceClient(`https://${accountName}.blob.core.windows.net?${sasToken}`,
pipeline)
const containerClient = blobServiceClient.getContainerClient(containerName)
console.log(containerClient)
if (!containerClient.exists()) {
console.log("the container does not exit")
await containerClient.create()
}
const client = containerClient.getBlockBlobClient(this.currentFile.name)
//name of uploded blob
console.log(this.currentFile.name)
//metaata from the blob
console.log(client)
//List each blobs in the container
for await (const blob of containerClient.listBlobsFlat()) {
console.log('\t', blob.name);
const blockBlobClient = containerClient.getBlockBlobClient(blob.name);
const downloadBlockBlobResponse = await blockBlobClient.download(0);
console.log('\nDownloaded blob content...');
console.log('\t', await streamToString(downloadBlockBlobResponse.readableStreamBody));
//end of loop
}
async function streamToString(readableStream) {
return new Promise((resolve, reject) => {
const chunks = [];
readableStream.on("data", (data) => {
chunks.push(data.toString());
});
readableStream.on("end", () => {
resolve(chunks.join(""));
});
readableStream.on("error", reject);
});
}
The error is :
ERROR Error: Uncaught (in promise): TypeError: Cannot read property 'on' of undefined TypeError: Cannot read property 'on' of undefined
So how to solve the problem?
Thanks
Download the official sample code.
It runs normally on my side. Check if your local lack of dependencies, or the permissions in the storage need to be set.
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
/*
Setup: Enter your storage account name and shared key in main()
*/
import {
BlobServiceClient,
StorageSharedKeyCredential,
BlobDownloadResponseModel
} from "#azure/storage-blob";
// Load the .env file if it exists
import * as dotenv from "dotenv";
dotenv.config();
export async function main() {
// Enter your storage account name and shared key
const account = process.env.ACCOUNT_NAME || "pans*****age";
const accountKey = process.env.ACCOUNT_KEY || "IHa48xxo+0anyKQ2GzQ2K*******ZBxgJ0VotCpGs/PMftkebb9UFqyg==";
// Use StorageSharedKeyCredential with storage account and account key
// StorageSharedKeyCredential is only available in Node.js runtime, not in browsers
const sharedKeyCredential = new StorageSharedKeyCredential(account, accountKey);
// ONLY AVAILABLE IN NODE.JS RUNTIME
// DefaultAzureCredential will first look for Azure Active Directory (AAD)
// client secret credentials in the following environment variables:
//
// - AZURE_TENANT_ID: The ID of your AAD tenant
// - AZURE_CLIENT_ID: The ID of your AAD app registration (client)
// - AZURE_CLIENT_SECRET: The client secret for your AAD app registration
//
// If those environment variables aren't found and your application is deployed
// to an Azure VM or App Service instance, the managed service identity endpoint
// will be used as a fallback authentication source.
// const defaultAzureCredential = new DefaultAzureCredential();
// You can find more TokenCredential implementations in the [#azure/identity](https://www.npmjs.com/package/#azure/identity) library
// to use client secrets, certificates, or managed identities for authentication.
// Use AnonymousCredential when url already includes a SAS signature
// const anonymousCredential = new AnonymousCredential();
// List containers
const blobServiceClient = new BlobServiceClient(
// When using AnonymousCredential, following url should include a valid SAS or support public access
`https://${account}.blob.core.windows.net`,
sharedKeyCredential
);
let i = 1;
for await (const container of blobServiceClient.listContainers()) {
console.log(`Container ${i++}: ${container.name}`);
}
// Create a container
const containerName = `newcontainer${new Date().getTime()}`;
const containerClient = blobServiceClient.getContainerClient(containerName);
const createContainerResponse = await containerClient.create();
console.log(`Create container ${containerName} successfully`, createContainerResponse.requestId);
// Create a blob
const content = "hello, 你好";
const blobName = "newblob" + new Date().getTime();
const blockBlobClient = containerClient.getBlockBlobClient(blobName);
const uploadBlobResponse = await blockBlobClient.upload(content, Buffer.byteLength(content));
console.log(`Upload block blob ${blobName} successfully`, uploadBlobResponse.requestId);
// List blobs
i = 1;
for await (const blob of containerClient.listBlobsFlat()) {
console.log(`Blob ${i++}: ${blob.name}`);
}
// Get blob content from position 0 to the end
// In Node.js, get downloaded data by accessing downloadBlockBlobResponse.readableStreamBody
// In browsers, get downloaded data by accessing downloadBlockBlobResponse.blobBody
const downloadBlockBlobResponse: BlobDownloadResponseModel = await blockBlobClient.download(0);
console.log(
"Downloaded blob content",
await streamToString(downloadBlockBlobResponse.readableStreamBody!)
);
// Delete container
await containerClient.delete();
console.log("deleted container");
}
// A helper method used to read a Node.js readable stream into string
async function streamToString(readableStream: NodeJS.ReadableStream) {
return new Promise((resolve, reject) => {
const chunks: string[] = [];
readableStream.on("data", (data) => {
chunks.push(data.toString());
});
readableStream.on("end", () => {
resolve(chunks.join(""));
});
readableStream.on("error", reject);
});
}
main().catch((err) => {
console.error("Error running sample:", err.message);
});

Categories

Resources