I have a cloud function that triggers when new file is created in firebase storage. Inside this function logic I need to collect every other file located at the same path in array. But I don't know how.
exports.testCloudFunc = functions.storage.object().onFinalize(async object => {
const filePath = object.name;
const { Logging } = require('#google-cloud/logging');
console.log(`Logged: ${filePath}`);
let obj = JSON.stringify(object);
console.log(`Logged: ${obj}`);
});
After that I will try to merge all PDFs in one new file and save it back to firebase storage by the same path. Any help is highly appretiated! Thank you in advance for your wisdom )
As per the documentation Doug Stevenson linked (2nd code sample for Node.js), you can list objects from a specified folder within your bucket using prefixes and delimiters.
Sample from the mentioned documentation:
/**
* TODO(developer): Uncomment the following lines before running the sample.
*/
// const bucketName = 'Name of a bucket, e.g. my-bucket';
// const prefix = 'Prefix by which to filter, e.g. public/';
// const delimiter = 'Delimiter to use, e.g. /';
// Imports the Google Cloud client library
const {Storage} = require('#google-cloud/storage');
// Creates a client
const storage = new Storage();
async function listFilesByPrefix() {
/**
* This can be used to list all blobs in a "folder", e.g. "public/".
*
* The delimiter argument can be used to restrict the results to only the
* "files" in the given "folder". Without the delimiter, the entire tree under
* the prefix is returned. For example, given these blobs:
*
* /a/1.txt
* /a/b/2.txt
*
* If you just specify prefix = '/a', you'll get back:
*
* /a/1.txt
* /a/b/2.txt
*
* However, if you specify prefix='/a' and delimiter='/', you'll get back:
*
* /a/1.txt
*/
const options = {
prefix: prefix,
};
if (delimiter) {
options.delimiter = delimiter;
}
// Lists files in the bucket, filtered by a prefix
const [files] = await storage.bucket(bucketName).getFiles(options);
console.log('Files:');
files.forEach(file => {
console.log(file.name);
});
}
listFilesByPrefix().catch(console.error);
Does that mean that all files will be returned at first and then will
be filtered by prefix?
As I see in the code sample above, the array [files] will store the objects that already pass the filter requirements:
const [files] = await storage.bucket(bucketName).getFiles(options);
Related
I have a json file that stores data which is displayed on my page using javascript. This json file and its key val pairs are not visible or accessible in Chrome's Dev Tools. This component manages json files:
/**
* Takes a filename and a JS object and initiates a download through the browser
* #param {String} filename
* #param {any} object JSON serializable object
* #return {undefined}
*/
export const downloadJson = (filename, object) => {
const content = JSON.stringify(object, null, 2);
const el = document.createElement('a');
el.setAttribute('href', `data:application/json;charset=utf-8,${encodeURIComponent(content)}`);
el.setAttribute('download', filename);
el.hidden = true;
document.body.appendChild(el);
el.click();
document.body.removeChild(el);
};
/**
* Gets the `target.result` property from an event, or returns null
* if it fails at any point
* #type {Function}
* #param {Event} event load Event
* #return {File}
*/
const getFileResult = propPathOr(null, ['target', 'result']);
/**
* Takes a file and reads it as JSON, resolving the JSON-parsed
* file contents
* #param {File} file
* #return {Promise<[Object]>} Returns Promise of Array of Archive Entries
*/
export const readFileAsJson = file => {
const reader = new FileReader();
const promise = new Promise((resolve, reject) => {
reader.onload = compose(resolve, JSON.parse, getFileResult);
reader.onerror = reject;
});
reader.readAsText(file);
return promise;
};
export const readFileListAsJson = files =>
Promise.all(
Array.from(files)
.map(readFileAsJson)
)
.catch(console.error);
This is the database component:
// DATABASE functions
import { get, set, keys } from 'idb-keyval';
import { sha1 } from './hash.js';
const getKey = key => get(key);
export const getAllEntries = async () =>
await Promise.all((await keys()).map(getKey));
export const writeMultipleEntries = entries =>
entries.forEach(writeSingleEntry);
/**
* #typedef {Object} ArchiveEntry
* #property {String} date
* #property {String} passage
* #property {String} question
* #property {String} answer
*/
/**
* Writes a single archive entry to idb
* #param {ArchiveEntry} entry
* #return {ArchiveEntry}
*/
export const writeSingleEntry = async ({ date, passage, question, answer }) => {
const hash = await hashEntry({ date, passage, question });
await set(hash, { date, passage, question, answer });
return { date, passage, question, answer };
};
/**
* Generates a hash of an entry to use as it's idb key
* #param {ArchiveEntry} entry
* #return {string}
*/
const hashEntry = ({ date, passage, question }) =>
sha1(`${date}-${passage}-${question}`);
Values are stored using this function:
const updateDb =
({ passage, question }) =>
(answer) =>
writeSingleEntry({ date: new Date(), answer, passage, question });
Storage is handled by its own script:
export const storeOnInput = key => ({ target: { value } }) => writeValue(key, value);
export const readValue = key => localStorage.getItem(key);
export const writeValue = (key, val) => localStorage.setItem(key, val);
It is called in several components. Here to write and read the value of a text passage:
onActiveChanged(active) {
this.passage = readValue('passage-input');
}
onKeyup(event) {
writeValue('passage-input', event.target.value);
}
Here to write and record a question:
onActiveChanged(active) {
this.question = readValue("question-input");
this.passage = readValue("passage-input");
}
onKeyup(event) {
writeValue("question-input", event.target.value);
}
Here to provide an answer and reset the form:
const answer = document.getElementById('answer');
const write = document.getElementById('write');
const question = document.getElementById('question');
const onAnswerSubmitted = ({ detail: answer }) => {
writeValue('answer', answer);
};
onActiveChanged(active) {
if (!active) return;
this.answer = readValue('answer');
}
resetQuestion() {
this.dispatchEvent(new CustomEvent('reset-question'));
writeValue('question-input', '');
writeValue('answer', '');
}
resetWrite() {
this.resetQuestion();
this.dispatchEvent(new CustomEvent('reset-passage'));
writeValue('passage-input', '');
}
Here to get entries:
onActiveChanged(active) {
if (active) this.getEntries();
}
async getEntries() {
this.entries = await getAllEntries();
this.entry = new URLSearchParams(location.search.substring(1)).get("date");
console.log("here are the dates: \n", prettyDate(this.entries[0].date));
console.log("here is an answer: \n", this.entries[0].answer);
}
Here to download and upload the JSON file:
async exportBackup() {
downloadJson(`backup ${new Date()}.json`, await getAllEntries());
}
async importBackup({ target: { files } }) {
return readFileListAsJson(files)
.then(map(writeMultipleEntries));
}
Unlike this question, nothing is showing in Storage > Local Storage, and it is not
a Chrome UI design flaw issue.
It is possible to confirm the values have been written and are are accessible from the json file using functions like:
console.log(this.entries[0].date)
console.log(this.entries[0].answer)
but I would like to be able to debug by viewing the entire json file.
I had the same problem today while working on my webapp :
I could access some data i registered on the localstorage via the console (JSON.parse(localStorage["my-storage-key"])
But in the Chrome dev tools, in the Application tab, the https://localhost:4200 entry was totaly empty, just like in the screen capture you provided.
What fixed the problem for me was to click "Restore defaults and reload" in the preferences of the chrome DevTools, and i could see the entries in the table again.
It doesn't appear as though you have loaded the JSON file into local storage at any point. Perhaps there is driver code which you can share so that your issue can be more easily debugged.
In the meantime, checkout the documentation for localstorage on mdn. I think you may find the answer by reading up on how to set local storage.
I have a function that searches files in folders and recursivly calles itself if a subfolder occurs.
I want to optimize the search algo in that way that i can store the returned data and it's corresponding parameters.
So if a new search is issued. I can check if an equal search was made before and return the saved result instead of doing a new search.
My approach was to push the params into the resulting array at first or last. But this has to happen only one time in the whole recursion process.
This is my function:
/**
* List all files that the matcher has hit
* #param {String} start path from where to start the search
* #param {Object} [options] search options
* #param {RegExp} [options.matcher] expression to match while searching
* #param {Boolean} [options.folders] search in subfolders, default = true
* #returns {Array} files that the matcher has hit
*/
list(start, { matcher, folders = true } = {}) {
if (!fs.existsSync(start)) throw new Error(`${start} doesn't exists.`)
const dir = fs.readdirSync(start)
const files = []
for (let iCnt = 0; iCnt < dir.length; iCnt++) {
const item = path.resolve(start, dir[iCnt])
let stat = fs.statSync(item)
switch (true) {
case stat.isDirectory() && folders:
files.push(...list(item, { matcher, folders }))
break
case matcher && matcher.test(item):
files.push(item)
break
case !matcher:
files.push(item)
break
}
}
return files
}
I thought a lot about it. But can't get my head around.
Does anyone have an idea?
When the first call in a recursive sequence is special, I usually handle it by making the recursive part a worker function, and making the main function a wrapper for it that does the special part.
In your case, that would mean renaming your existing list (perhaps to listWorker) and making a wrapper list function that does the caching. Roughly:
function list(start, { matcher, folders = true } = {}) {
let result = getFromCache(/*...*/);
if (!result) {
result = listWorker(start, {matcher, folders});
putInCache(/*...*/, result);
}
return result;
}
I am trying to reproduce the smart contract process of Ethereum by using NodeJS and the VM.
I have resolved some questions but it also raised a new one.
My goal would be to retrieve the bytecode that I load and execute in a NodeJS VM instance after it's execution;
The initial state would be the smart contract code that I load as bytecode in VM.
I also make two call that are in bytecode. and executed.
How could I retrieve the new state in bytecode from that VM instance?
const vm = require('vm');
const v8 = require('v8');
v8.setFlagsFromString('--no-lazy');
if (Number.parseInt(process.versions.node.split('.')[0], 10) >= 12) {
v8.setFlagsFromString('--no-flush-bytecode'); // Thanks to A-Parser (#a-parser)
}
/**
* Generates v8 bytecode buffer.
* could be the compile for smart contract to compile the bytecode
* the bytecode will go on the blockchain so it can never change.
* The input could be a file with all the JavaScript
* #param {string} javascriptCode JavaScript source that will represent a smart contract on the blockchain
* #returns {Buffer} The generated bytecode.
*/
const compileCode = function (javascriptCode) {
if (typeof javascriptCode !== 'string') {
throw new Error(`javascriptCode must be string. ${typeof javascriptCode} was given.`)
}
let script = new vm.Script(javascriptCode)
let bytecodeBuffer = script.createCachedData()
return bytecodeBuffer;
}
/**
* Runs v8 bytecode buffer and returns the result.
* #param {Buffer} bytecodeBuffer The buffer object that was created using compileCode function.
* #returns {any} The result of the very last statement executed in the script.
*/
const runBytecode = function (bytecodeBuffer) {
if (!Buffer.isBuffer(bytecodeBuffer)) {
throw new Error(`bytecodeBuffer must be a buffer object.`);
}
// fixBytecode(bytecodeBuffer);
let length = readSourceHash(bytecodeBuffer);
let dummyCode = "";
if (length > 1) {
dummyCode = '"' + "\u200b".repeat(length - 2) + '"'; // "\u200b" Zero width space
}
let script = new vm.Script(dummyCode, {
cachedData: bytecodeBuffer
});
if (script.cachedDataRejected) {
throw new Error('Invalid or incompatible cached data (cachedDataRejected)');
}
//console.log(bytecodeBuffer)
return script.runInThisContext();
};
// TODO: rewrite this function
const readSourceHash = function (bytecodeBuffer) {
if (!Buffer.isBuffer(bytecodeBuffer)) {
throw new Error(`bytecodeBuffer must be a buffer object.`)
}
return bytecodeBuffer.slice(8, 12).reduce((sum, number, power) => sum += number * Math.pow(256, power), 0)
}
/*
This is an example of a smart contract in javascript.
It would get stored on the blockchain as bytecode.
It could be compiled from a file to.
*/
let smartContract = `
function setName(_name){
name = _name
console.log('name set')
}
function getName(){
console.log(name)
}
`
// we compile the smart contract into bytecode
// the contract will be loaded in the VM
let bytecode = compileCode(smartContract)
runBytecode(bytecode)
/*
this call will set the name in the Contract that reside in the VM
The name is set but not wrtien anywhere yet. It is only set on this
instance fo the VM. This change the state an we would need to recompile
the contract with the new data and update the blockchain.
*/
let call = "setName('Satoshi')"
let callBytecode = compileCode(call)
runBytecode(callBytecode)
/*
Execute the call in the contract in the VM
the function is just printing out the value that we pass to our javascript contract
the state of the contract did not change
*/
let rCall = "getName()"
let rcallBytecode = compileCode(rCall)
runBytecode(rcallBytecode)
//console.log(bytecode)
How can I retrieve the bytecode that was loaded in the VM and save it with the name remaining there. Imagine an array of name and every time someone load and call that function it adds the name to it. Then save the new bytecode with that extra name in it. Save it and pass it to the next VM that want to run it.
I wanted to delete the file from storage when a data node is deleted from the realtime database. the name of the file to be deleted is taken before deleted. The name of the file is saved in the node in child named "imageTitle". The code works fine before implementing the file delete code. I mean the nodes get deleted perfectly.
When I implement the file delete code the rest doesn't work, but there is no any errors. The code after file delete doesn't work. I dunno why.
This is for an academic final project.
There's a folder named images in the bucket, and the file I need to delete is in there. The file name is taken from the child in the node which is to be deleted in the realtime database named imageTitle:
'use strict';
const functions = require('firebase-functions');
const admin = require('firebase-admin');
admin.initializeApp();
exports.changetime = functions.database.ref('/posts/{pushId}')
.onCreate((snapshot, context) => {
const editDate = Date.now()
datas = snapshot.val();
return snapshot.ref.update({editDate})
})
const CUT_OFF_TIME = 1 * 60 * 1000;
/**
* This database triggered function will check for child nodes that are older than the
* cut-off time. Each child needs to have a `timestamp` attribute.
*/
exports.deleteOldItems = functions.database.ref('/posts/{pushId}').onWrite(async (change,
context) => {
const ref = change.after.ref.parent; // reference to the parent
const now = Date.now();
const id = context.params.pushId;
const cutoff = now - CUT_OFF_TIME;
const oldItemsQuery = ref.orderByChild('createdDate').endAt(cutoff);
const snapshot = await oldItemsQuery.once('value');
const getImageTitle = admin.database().ref(`/posts/${id}/imageTitle`).once('value');
return getImageTitle.then(imageTitle => {
console.log('post author id', imageTitle.val());
const imageName = imageTitle.val();
const filePath = 'images/' + imageName;
const path = `images/${imageName}`;
const bucket = app.storage().bucket();
return bucket.file(path).delete().then(() =>{
console.log(`File deleted successfully in path: ${imageName}`)
/* The problem is here. the code doesn't work after file.delete function. no errors. but doesn't work
if I remove that piece of code the other codes work fine. I mean the realtime database nodes get deleted and updated perfectly */
const updates = {};
snapshot.forEach(child => {
updates[child.key] = null;
if(updates[child.key] == null){
admin.database().ref(/post-likes/+id).remove();
}
})
return ref.update(updates);
});
});
[my storage looks like this][1]});
There's a folder named images in the bucket, and the file I need to delete is in there. The file name is taken from the child in the node which id to be deleted in the realtime database imageTitle:
enter image description here
I think this is the problem:
const filePath = 'images/' + imageName;
const path = `images/${imageName}`;
You should be using filePath not path in your bucket reference. const path = `images/${imageName}`; is wild-card syntax used when querying, not when assigning variables....think you stayed in 'Dart' mode here:-). Not sure what path contains, therefore, but console.log it to see.
Here is some code that I use to delete images from my storage bucket that works perfectly. First, a couple of things to check (best done using console.log, never assume you know what is in the variable), and do:
Ensure imageName, filePath, path and bucket contain what they should contain.
Include the .catch block in your code to check if there actually is an error
I am not clear if console.log(`File deleted successfully in path: ${imageName}`) is executing but if it isn't then your file.delete must be throwing an error which the catch block should trap.
The code snippet:
const oldProfileImagePath = "profileImages/" + authenticatedUserId + "/" + oldProfileImageName;
return bucket.file(oldProfileImagePath).delete()
.then(function () {
console.info("Old Profile Image: " + oldProfileImagePath + " deleted!");
})
.catch(function (error) {
console.error("Remove Old Profile Image: " + oldProfileImagePath +
" failed with " + error.message)
});
I have a JSON file that looks like:
{
"someRandomStuff": "myRandomStuff",
"includeNodesFromFiles": {
"item1": "item1.json",
"item2": "item2.json",
"item3": "item3.json"
}
}
And now I want to replace item1, item2, and item3 with JSON content from each respective file, so the destination file would look like:
{
"someRandomStuff": "myRandomStuff",
"includeNodesFromFiles": {
"item1": {"whatever": "...whatever was in item1.json"},
"item2": {"whatever": "...whatever was in item2.json"},
"item3": {"whatever": "...whatever was in item3.json"},
}
}
Or similarly for an array:
{
"someRandomStuff": "myRandomStuff",
"includeNodesFromFiles": [
"item1.json",
"item2.json",
"item3.json"
]
}
To:
{
"someRandomStuff": "myRandomStuff",
"includeNodesFromFiles": [
{"whatever": "...whatever was in item1.json"},
{"whatever": "...whatever was in item2.json"},
{"whatever": "...whatever was in item3.json"}
]
}
How could I do that with Grunt? I'm not finding a Grunt task that will do that out-of-the-box so far.
New to Grunt, so please bear with me.
Short answer: This is a very custom requirement and there are no existing grunt plugins which will achieve this that I'm aware of.
Solution:
You'll need to create your own grunt plugin to handle this type of requirement. The following steps describe how this can be achieved:
Firstly create a plugin file as follows. Lets name the file json-replace.js:
json-replace.js
/**
* Custom grunt plugin replaces JSON values (filepatha) with JSON file content.
*/
module.exports = function(grunt) {
'use strict';
var path = require('path');
/**
* Custom grunt multi task to replace values in JSON.
*/
grunt.registerMultiTask('jsonReplace', 'Replace values in JSON', function() {
// Read the configuration values.
var src = this.data.src;
var dest = this.data.dest;
var keyName = this.data.key;
var baseDir = path.dirname(src);
// Default options
var opts = this.options({
indent: 2
});
/**
* Determines whether the passed value is an Array.
* #param {*} value - A reference to the value to check.
* #returns {Boolean} - true if the value is an Array, otherwise false.
*/
function isArray(value) {
return Array.isArray(value);
}
/**
* Determines whether the passed value is an Object.
* #param {*} value - A reference to the value to check.
* #returns {Boolean} - true if the value is an Object, otherwise false.
*/
function isObject(value) {
return Object.prototype.toString.call(value) === '[object Object]';
}
/**
* Reads a file's contents, parsing the data as JSON.
* #param {String} srcPath - The filepath to the JSON file to parse.
* #returns {Object}- The parsed JSON data.
*/
function readJson(srcPath) {
return grunt.file.readJSON(srcPath);
}
/**
* Writes JSON data to a file.
* #param {String} destPath - A filepath for where to save the file.
* #param {Object|Array} data - Value to covert to JSON and saved to file.
* #param {Number} [indent=2] - The no. of spaces to indent the JSON.
*/
function writeJson(destPath, data, indent) {
indent = (typeof indent !== 'undefined') ? indent : 2;
grunt.file.write(destPath, JSON.stringify(data, null, indent));
grunt.log.writeln('Saved \x1b[96m1\x1b[0m file');
}
/**
* Checks whether a file exists and logs any missing files to console.
* #param {String} filePath - The filepath to check for its existence.
* #returns {Boolean} - true if the filepath exists, otherwise false.
*/
function fileExists(filePath) {
if (!grunt.file.exists(filePath)) {
grunt.fail.warn('Unable to read \"' + filePath + '\"');
return false;
}
return true;
}
/**
* Checks whether type of value is a string and logs an error if not.
* #param {*} value - The value to check
* #returns {Boolean} - true if type of value is 'string', otherwise false.
*/
function isString(value) {
if (typeof value !== 'string') {
grunt.fail.warn('Value type must be a string: found \"' + value + '\"');
return false;
}
return true;
}
/**
* Processes each Array item for a given key.
* #param {Object} data - The parsed JSON data to process.
* #param {String} keyName - Name of key whose Array values to process.
* #param {String} baseDir - Base directory path of the source json file.
*/
function processArrayItems(data, keyName, baseDir) {
var replacement = [];
data[keyName].forEach(function(item) {
var fullPath = path.join(baseDir, item);
if (isString(item) && fileExists(fullPath)) {
replacement.push(readJson(fullPath));
}
});
data[keyName] = replacement;
writeJson(dest, data, opts.indent);
}
/**
* Processes an Objects key/value pair for a given Object.
* #param {Object} data - The parsed JSON data to process.
* #param {String} keyName - Name of key whose property values to process.
* #param {String} baseDir - Base directory path of the source json file.
*/
function processObjectValues(data, keyName, baseDir) {
var replacement = {};
Object.keys(data[keyName]).forEach(function(key) {
var accessor = data[keyName][key];
var fullPath = path.join(baseDir, accessor);
if (isString(accessor) && fileExists(fullPath)) {
replacement[key] = readJson(fullPath);
}
});
data[keyName] = replacement;
writeJson(dest, data, opts.indent);
}
// Read the source JSON file
var srcData = readJson(src);
// Check if the `key` provided exists in source JSON.
if (!srcData[keyName]) {
grunt.fail.warn('Missing given key "' + keyName + '" in ' + src);
}
// Invoke the appropriate processing for key value.
if (isArray(srcData[keyName])) {
processArrayItems(srcData, keyName, baseDir);
} else if (isObject(srcData[keyName])) {
processObjectValues(srcData, keyName, baseDir);
} else {
grunt.fail.warn('Value for "' + keyName + '" must be object or array');
}
});
};
Save json-replace.js in a folder named custom-grunt-tasks which resides in your projects root directory (i.e. at as the same level as Gruntfile.js and package.json). For instance:
.
├── Gruntfile.js
├── custom-grunt-tasks <---
│ └── json-replace.js <---
├── node_modules
│ └── ...
├── package.json
└── ...
Add the following Task to your Gruntfile.js:
Gruntfile.js
module.exports = function(grunt) {
grunt.loadTasks('custom-grunt-tasks');
grunt.initConfig({
jsonReplace: { // <-- Task
targetA: { // <-- Target
src: 'path/to/source.json',
dest: 'path/to/output/file.json',
key: 'includeNodesFromFiles'
}
}
// ...
});
grunt.registerTask('default', ['jsonReplace']);
}
Notes: (regarding configuration of Gruntfile.js above)
The line which reads grunt.loadTasks('custom-grunt-tasks'); loads the custom plugin (i.e. json-replace.js) from the directory named custom-grunt-tasks.
A Task named jsonReplace is added to grunt.initConfig({...}) which contains one Target arbitrarily named targetA.
The value for the src property should be replaced with a valid filepath which points to your source .json file.
The value for the dest property should be replaced with a filepath for where the new .json file should be saved.
The value for the key should be replaced with a valid keyname. The name of the key provided should be for the key which holds either an Object or Array of .json filepaths (For example; includeNodesFromFiles, as given in your two examples)
Additional info:
json-replace.js is multi-taskable which basically means you can configure multiple Targets inside the jsonReplace task if required. For example:
// ...
jsonReplace: { // <-- Task
targetA: { // <-- Target
src: 'path/to/source.json',
dest: 'path/to/output/file.json',
key: 'includeNodesFromFiles'
},
targetB: { // <-- Another Target
src: 'path/to/another/source.json',
dest: 'path/to/output/another/file.json',
key: 'anotherKeyName'
}
}
// ...
Multiple Targets may be useful if you want to process multiple .json files.
json-replace.js expects the value of the key (e.g. includeNodesFromFiles) to be either:
An Object with nested key/value pairs, (as per your first example), whereby each nested key has a filepath value for an existing .json file.
Or, an Array, (as per your second example), whereby each item of the Array is a filepath value for an existing .json file.
Note: An error will be logged to the console if the structure of the given JSON key does match either of the two aforementioned structures.
json-replace.js indents the content of the resultant .json file(s) using two spaces by default. However, if you want to change this you can utilize the indent option. For example the following Task configuration will indent the resultant .json file by four spaces:
// ...
jsonReplace: {
options: {
indent: 4 // <-- Custom indent option
},
targetA: {
src: 'path/to/source.json',
dest: 'path/to/output/file.json',
key: 'includeNodesFromFiles'
}
}
// ...
Important: When defining the filepath values in the source .json file (e.g. item1.json, item2.json etc) this solution expects them to be relative to the source .json file itself.
This is pretty simple task with just loading file and change value in object(all file paths are relative to gruntfile.js):
grunt.registerTask('mytask', 'My super task', () => {
// file with json with file paths
//{"someRandomStuff":"myRandomStuff","includeNodesFromFiles":{"item1":"item1.json","item2":"item2.json","item3":"item3.json"}}
let main = JSON.parse(fs.readFileSync('myjson.json', 'utf8'));
Object.keys(main.includeNodesFromFiles).forEach((key) => {
main.includeNodesFromFiles[key] = JSON.parse(fs.readFileSync(main.includeNodesFromFiles[key], 'utf8'));
});
//... do some stuff
grunt.log.writeln(JSON.stringify(main)); //{"someRandomStuff":"myRandomStuff","includeNodesFromFiles":{"item1":{},"item2":{},"item3":{}}}
});
This is the solution I came up with. It recursively replaces filenames with files, if they exist. Also accepts a base URL for the files to be included:
grunt.registerTask('buildJson', function(name) {
let config = grunt.config.get('buildJson'); // Get the config options from gruntInitConfig for our task.
let options = name ? (config[name] || config) : config; // If there isn't a config option that matches, use the object itself
function genJson(path, baseUrl = '') {
function checkIfFile(fPath) {
try {
if (fs.lstatSync(fPath).isFile()) {
return true;
} else {
return false;
}
} catch (e) {
if (e.code == 'ENOENT') {
return false;
} else if (e.code == 'ENAMETOOLONG') {
return false;
} else {
console.error(e);
}
}
}
var json = JSON.parse(fs.readFileSync(path, {
encoding: 'utf8'
}));
return JSON.stringify(json, function(key, value) {
if (checkIfFile(baseUrl + value)) {
return JSON.parse(genJson(baseUrl + value));
} else {
return value;
}
}, 2);
}
let files = grunt.file.expand(options.srcFiles);
for (let file in files) {
let srcPath = files[file];
// Determine the output path to write our merged json file.
let outputPath = path.join(options.destDir, path.basename(srcPath));
let destJson = genJson(srcPath, options.baseUrl);
grunt.file.write(outputPath, destJson); // Write to disk.
}
});