Grunt task to assemble JSON files from filenames in JSON - javascript

I have a JSON file that looks like:
{
"someRandomStuff": "myRandomStuff",
"includeNodesFromFiles": {
"item1": "item1.json",
"item2": "item2.json",
"item3": "item3.json"
}
}
And now I want to replace item1, item2, and item3 with JSON content from each respective file, so the destination file would look like:
{
"someRandomStuff": "myRandomStuff",
"includeNodesFromFiles": {
"item1": {"whatever": "...whatever was in item1.json"},
"item2": {"whatever": "...whatever was in item2.json"},
"item3": {"whatever": "...whatever was in item3.json"},
}
}
Or similarly for an array:
{
"someRandomStuff": "myRandomStuff",
"includeNodesFromFiles": [
"item1.json",
"item2.json",
"item3.json"
]
}
To:
{
"someRandomStuff": "myRandomStuff",
"includeNodesFromFiles": [
{"whatever": "...whatever was in item1.json"},
{"whatever": "...whatever was in item2.json"},
{"whatever": "...whatever was in item3.json"}
]
}
How could I do that with Grunt? I'm not finding a Grunt task that will do that out-of-the-box so far.
New to Grunt, so please bear with me.

Short answer: This is a very custom requirement and there are no existing grunt plugins which will achieve this that I'm aware of.
Solution:
You'll need to create your own grunt plugin to handle this type of requirement. The following steps describe how this can be achieved:
Firstly create a plugin file as follows. Lets name the file json-replace.js:
json-replace.js
/**
* Custom grunt plugin replaces JSON values (filepatha) with JSON file content.
*/
module.exports = function(grunt) {
'use strict';
var path = require('path');
/**
* Custom grunt multi task to replace values in JSON.
*/
grunt.registerMultiTask('jsonReplace', 'Replace values in JSON', function() {
// Read the configuration values.
var src = this.data.src;
var dest = this.data.dest;
var keyName = this.data.key;
var baseDir = path.dirname(src);
// Default options
var opts = this.options({
indent: 2
});
/**
* Determines whether the passed value is an Array.
* #param {*} value - A reference to the value to check.
* #returns {Boolean} - true if the value is an Array, otherwise false.
*/
function isArray(value) {
return Array.isArray(value);
}
/**
* Determines whether the passed value is an Object.
* #param {*} value - A reference to the value to check.
* #returns {Boolean} - true if the value is an Object, otherwise false.
*/
function isObject(value) {
return Object.prototype.toString.call(value) === '[object Object]';
}
/**
* Reads a file's contents, parsing the data as JSON.
* #param {String} srcPath - The filepath to the JSON file to parse.
* #returns {Object}- The parsed JSON data.
*/
function readJson(srcPath) {
return grunt.file.readJSON(srcPath);
}
/**
* Writes JSON data to a file.
* #param {String} destPath - A filepath for where to save the file.
* #param {Object|Array} data - Value to covert to JSON and saved to file.
* #param {Number} [indent=2] - The no. of spaces to indent the JSON.
*/
function writeJson(destPath, data, indent) {
indent = (typeof indent !== 'undefined') ? indent : 2;
grunt.file.write(destPath, JSON.stringify(data, null, indent));
grunt.log.writeln('Saved \x1b[96m1\x1b[0m file');
}
/**
* Checks whether a file exists and logs any missing files to console.
* #param {String} filePath - The filepath to check for its existence.
* #returns {Boolean} - true if the filepath exists, otherwise false.
*/
function fileExists(filePath) {
if (!grunt.file.exists(filePath)) {
grunt.fail.warn('Unable to read \"' + filePath + '\"');
return false;
}
return true;
}
/**
* Checks whether type of value is a string and logs an error if not.
* #param {*} value - The value to check
* #returns {Boolean} - true if type of value is 'string', otherwise false.
*/
function isString(value) {
if (typeof value !== 'string') {
grunt.fail.warn('Value type must be a string: found \"' + value + '\"');
return false;
}
return true;
}
/**
* Processes each Array item for a given key.
* #param {Object} data - The parsed JSON data to process.
* #param {String} keyName - Name of key whose Array values to process.
* #param {String} baseDir - Base directory path of the source json file.
*/
function processArrayItems(data, keyName, baseDir) {
var replacement = [];
data[keyName].forEach(function(item) {
var fullPath = path.join(baseDir, item);
if (isString(item) && fileExists(fullPath)) {
replacement.push(readJson(fullPath));
}
});
data[keyName] = replacement;
writeJson(dest, data, opts.indent);
}
/**
* Processes an Objects key/value pair for a given Object.
* #param {Object} data - The parsed JSON data to process.
* #param {String} keyName - Name of key whose property values to process.
* #param {String} baseDir - Base directory path of the source json file.
*/
function processObjectValues(data, keyName, baseDir) {
var replacement = {};
Object.keys(data[keyName]).forEach(function(key) {
var accessor = data[keyName][key];
var fullPath = path.join(baseDir, accessor);
if (isString(accessor) && fileExists(fullPath)) {
replacement[key] = readJson(fullPath);
}
});
data[keyName] = replacement;
writeJson(dest, data, opts.indent);
}
// Read the source JSON file
var srcData = readJson(src);
// Check if the `key` provided exists in source JSON.
if (!srcData[keyName]) {
grunt.fail.warn('Missing given key "' + keyName + '" in ' + src);
}
// Invoke the appropriate processing for key value.
if (isArray(srcData[keyName])) {
processArrayItems(srcData, keyName, baseDir);
} else if (isObject(srcData[keyName])) {
processObjectValues(srcData, keyName, baseDir);
} else {
grunt.fail.warn('Value for "' + keyName + '" must be object or array');
}
});
};
Save json-replace.js in a folder named custom-grunt-tasks which resides in your projects root directory (i.e. at as the same level as Gruntfile.js and package.json). For instance:
.
├── Gruntfile.js
├── custom-grunt-tasks <---
│ └── json-replace.js <---
├── node_modules
│ └── ...
├── package.json
└── ...
Add the following Task to your Gruntfile.js:
Gruntfile.js
module.exports = function(grunt) {
grunt.loadTasks('custom-grunt-tasks');
grunt.initConfig({
jsonReplace: { // <-- Task
targetA: { // <-- Target
src: 'path/to/source.json',
dest: 'path/to/output/file.json',
key: 'includeNodesFromFiles'
}
}
// ...
});
grunt.registerTask('default', ['jsonReplace']);
}
Notes: (regarding configuration of Gruntfile.js above)
The line which reads grunt.loadTasks('custom-grunt-tasks'); loads the custom plugin (i.e. json-replace.js) from the directory named custom-grunt-tasks.
A Task named jsonReplace is added to grunt.initConfig({...}) which contains one Target arbitrarily named targetA.
The value for the src property should be replaced with a valid filepath which points to your source .json file.
The value for the dest property should be replaced with a filepath for where the new .json file should be saved.
The value for the key should be replaced with a valid keyname. The name of the key provided should be for the key which holds either an Object or Array of .json filepaths (For example; includeNodesFromFiles, as given in your two examples)
Additional info:
json-replace.js is multi-taskable which basically means you can configure multiple Targets inside the jsonReplace task if required. For example:
// ...
jsonReplace: { // <-- Task
targetA: { // <-- Target
src: 'path/to/source.json',
dest: 'path/to/output/file.json',
key: 'includeNodesFromFiles'
},
targetB: { // <-- Another Target
src: 'path/to/another/source.json',
dest: 'path/to/output/another/file.json',
key: 'anotherKeyName'
}
}
// ...
Multiple Targets may be useful if you want to process multiple .json files.
json-replace.js expects the value of the key (e.g. includeNodesFromFiles) to be either:
An Object with nested key/value pairs, (as per your first example), whereby each nested key has a filepath value for an existing .json file.
Or, an Array, (as per your second example), whereby each item of the Array is a filepath value for an existing .json file.
Note: An error will be logged to the console if the structure of the given JSON key does match either of the two aforementioned structures.
json-replace.js indents the content of the resultant .json file(s) using two spaces by default. However, if you want to change this you can utilize the indent option. For example the following Task configuration will indent the resultant .json file by four spaces:
// ...
jsonReplace: {
options: {
indent: 4 // <-- Custom indent option
},
targetA: {
src: 'path/to/source.json',
dest: 'path/to/output/file.json',
key: 'includeNodesFromFiles'
}
}
// ...
Important: When defining the filepath values in the source .json file (e.g. item1.json, item2.json etc) this solution expects them to be relative to the source .json file itself.

This is pretty simple task with just loading file and change value in object(all file paths are relative to gruntfile.js):
grunt.registerTask('mytask', 'My super task', () => {
// file with json with file paths
//{"someRandomStuff":"myRandomStuff","includeNodesFromFiles":{"item1":"item1.json","item2":"item2.json","item3":"item3.json"}}
let main = JSON.parse(fs.readFileSync('myjson.json', 'utf8'));
Object.keys(main.includeNodesFromFiles).forEach((key) => {
main.includeNodesFromFiles[key] = JSON.parse(fs.readFileSync(main.includeNodesFromFiles[key], 'utf8'));
});
//... do some stuff
grunt.log.writeln(JSON.stringify(main)); //{"someRandomStuff":"myRandomStuff","includeNodesFromFiles":{"item1":{},"item2":{},"item3":{}}}
});

This is the solution I came up with. It recursively replaces filenames with files, if they exist. Also accepts a base URL for the files to be included:
grunt.registerTask('buildJson', function(name) {
let config = grunt.config.get('buildJson'); // Get the config options from gruntInitConfig for our task.
let options = name ? (config[name] || config) : config; // If there isn't a config option that matches, use the object itself
function genJson(path, baseUrl = '') {
function checkIfFile(fPath) {
try {
if (fs.lstatSync(fPath).isFile()) {
return true;
} else {
return false;
}
} catch (e) {
if (e.code == 'ENOENT') {
return false;
} else if (e.code == 'ENAMETOOLONG') {
return false;
} else {
console.error(e);
}
}
}
var json = JSON.parse(fs.readFileSync(path, {
encoding: 'utf8'
}));
return JSON.stringify(json, function(key, value) {
if (checkIfFile(baseUrl + value)) {
return JSON.parse(genJson(baseUrl + value));
} else {
return value;
}
}, 2);
}
let files = grunt.file.expand(options.srcFiles);
for (let file in files) {
let srcPath = files[file];
// Determine the output path to write our merged json file.
let outputPath = path.join(options.destDir, path.basename(srcPath));
let destJson = genJson(srcPath, options.baseUrl);
grunt.file.write(outputPath, destJson); // Write to disk.
}
});

Related

Save params from the first call of a recursive function

I have a function that searches files in folders and recursivly calles itself if a subfolder occurs.
I want to optimize the search algo in that way that i can store the returned data and it's corresponding parameters.
So if a new search is issued. I can check if an equal search was made before and return the saved result instead of doing a new search.
My approach was to push the params into the resulting array at first or last. But this has to happen only one time in the whole recursion process.
This is my function:
/**
* List all files that the matcher has hit
* #param {String} start path from where to start the search
* #param {Object} [options] search options
* #param {RegExp} [options.matcher] expression to match while searching
* #param {Boolean} [options.folders] search in subfolders, default = true
* #returns {Array} files that the matcher has hit
*/
list(start, { matcher, folders = true } = {}) {
if (!fs.existsSync(start)) throw new Error(`${start} doesn't exists.`)
const dir = fs.readdirSync(start)
const files = []
for (let iCnt = 0; iCnt < dir.length; iCnt++) {
const item = path.resolve(start, dir[iCnt])
let stat = fs.statSync(item)
switch (true) {
case stat.isDirectory() && folders:
files.push(...list(item, { matcher, folders }))
break
case matcher && matcher.test(item):
files.push(item)
break
case !matcher:
files.push(item)
break
}
}
return files
}
I thought a lot about it. But can't get my head around.
Does anyone have an idea?
When the first call in a recursive sequence is special, I usually handle it by making the recursive part a worker function, and making the main function a wrapper for it that does the special part.
In your case, that would mean renaming your existing list (perhaps to listWorker) and making a wrapper list function that does the caching. Roughly:
function list(start, { matcher, folders = true } = {}) {
let result = getFromCache(/*...*/);
if (!result) {
result = listWorker(start, {matcher, folders});
putInCache(/*...*/, result);
}
return result;
}

Get siblings of created file in firebase cloud function triggered by onFinalize

I have a cloud function that triggers when new file is created in firebase storage. Inside this function logic I need to collect every other file located at the same path in array. But I don't know how.
exports.testCloudFunc = functions.storage.object().onFinalize(async object => {
const filePath = object.name;
const { Logging } = require('#google-cloud/logging');
console.log(`Logged: ${filePath}`);
let obj = JSON.stringify(object);
console.log(`Logged: ${obj}`);
});
After that I will try to merge all PDFs in one new file and save it back to firebase storage by the same path. Any help is highly appretiated! Thank you in advance for your wisdom )
As per the documentation Doug Stevenson linked (2nd code sample for Node.js), you can list objects from a specified folder within your bucket using prefixes and delimiters.
Sample from the mentioned documentation:
/**
* TODO(developer): Uncomment the following lines before running the sample.
*/
// const bucketName = 'Name of a bucket, e.g. my-bucket';
// const prefix = 'Prefix by which to filter, e.g. public/';
// const delimiter = 'Delimiter to use, e.g. /';
// Imports the Google Cloud client library
const {Storage} = require('#google-cloud/storage');
// Creates a client
const storage = new Storage();
async function listFilesByPrefix() {
/**
* This can be used to list all blobs in a "folder", e.g. "public/".
*
* The delimiter argument can be used to restrict the results to only the
* "files" in the given "folder". Without the delimiter, the entire tree under
* the prefix is returned. For example, given these blobs:
*
* /a/1.txt
* /a/b/2.txt
*
* If you just specify prefix = '/a', you'll get back:
*
* /a/1.txt
* /a/b/2.txt
*
* However, if you specify prefix='/a' and delimiter='/', you'll get back:
*
* /a/1.txt
*/
const options = {
prefix: prefix,
};
if (delimiter) {
options.delimiter = delimiter;
}
// Lists files in the bucket, filtered by a prefix
const [files] = await storage.bucket(bucketName).getFiles(options);
console.log('Files:');
files.forEach(file => {
console.log(file.name);
});
}
listFilesByPrefix().catch(console.error);
Does that mean that all files will be returned at first and then will
be filtered by prefix?
As I see in the code sample above, the array [files] will store the objects that already pass the filter requirements:
const [files] = await storage.bucket(bucketName).getFiles(options);

How to include js files in header of wordpress pages that are activated on-click

I am attempting to use wordpress to build a website that integrates google maps. I am doing some overlays with the maps and use the google developers API and Python to make the appropriate javascript. I have successfully written the js files and Python necessary to accomplish this.
My website is built in Worpress and I would like add a page (not the home page) that has n links and each one would populate a box with the corresponding map. I can take care of the layout and design issues but I am at a loss on how to:
a) Include the javascript as a file that
b) gets called upon clicking the link and thus populates that map without calling a new page
That is, the javascript is HUGE because it may include thousands of lat/lon points. Therefore including n of these written into the header is unreasonable. I want to simply call it from filename.js when the link is clicked.
There is a plugin that allows me to include whatever I want in the header. So, if I can find out where to put the *.js files (or txt file) in the directory tree and how to have the corresponding file activated upon click I should be good. Thanks!
This Display different maps with onClick event - Google Maps V3. kind of helps with doing an on-click display but everyone's solution was to make one map. I cannot do that. I am overlaying vast amounts of data.
Here is a way you can get that done. (Jump down to the get started part of the script.)
For brevity, I've included a bunch of scripts in one 'file', but you'll want to break them in to individual files.
You may also need to try the html and js in jsbin js bin example, b/c SO may or may not allow the dynamic loading of js.
(function(undefined) {
/**
* #author (#colecmc)
* #method turn collection into an array
* #param {object} collection - NodeList, HTMLCollection, etc. Should have an "item" method and/or a "length" property
*/
ToArray = collection => Array.prototype.slice.call(collection);
/** \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ **/
Observer = (function(undefined) {
/**
* pub sub
*/
'use strict';
var subUid = -1;
return {
topics: {},
subscribe: function(topic, func) {
/**
* #param {string} topic
* #param {function} func
* #returns {string} - a token such as '3'
* #example Observer.subscribe('any-valid-string',function(name,resp){
console.log(resp.prop);
});
*/
if (!Observer.topics[topic]) {
Observer.topics[topic] = [];
}
var token = (++subUid).toString();
Observer.topics[topic].push({
token: token,
func: func
});
return token;
},
publish: function publish(topic, args) {
/**
* #param {string} topic
* #param {object} args
* #returns {boolean} - true if topic is valid, false otherwise
* #example Observer.publish('any-valid-string',{
prop: 'this is a test'
});
*/
if (!Observer.topics[topic]) {
return false;
}
setTimeout(function() {
var subscribers = Observer.topics[topic],
len = subscribers ? subscribers.length : 0;
while (len--) {
subscribers[len].func(topic, args);
}
}, 0);
return true;
},
unsubscribe: function unsubscribe(token) {
/**
* #param {string} token - value should be saved from the original subscription
* #example Observer.unsubscribe('2');
* #example Observer.unsubscribe(member); - where member is the value returned by Observer.subscribe();
*/
var m,
forEachTopic = function(i) {
if (Observer.topics[m][i].token === token) {
Observer.topics[m].splice(i, 1);
return token;
}
};
for (m in Observer.topics) {
if (Observer.topics.hasOwnProperty(m)) {
Observer.topics[m].forEach(forEachTopic);
}
}
return false;
}
};
}(undefined));
/** \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ **/
SetAttributes = function(el, attrs) {
/**
* #author (#colecmc)
* #method simple for in loop to help with creating elements programmatically
* #param {object} el - HTMLElement attributes are getting added to
* #param {object} attrs - object literal with key/values for desired attributes
* #example SetAttributes(info,{
* 'id' : 'utswFormInfo'
* 'class' : 'my-class-name'
* });
*/
'use strict';
var key;
for (key in attrs) {
if (attrs.hasOwnProperty(key)) {
el.setAttribute(key, attrs[key]);
}
}
return el;
};
/** \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ **/
GetScript = function(url, fullPath) {
/**
* #author (#colecmc)
* #version 1.0.4
* #requires Swlxws.SetAttributes, Swlxws.Observer
* #method dynamically add script tags to the page.
* #param {string} url - relative path and file name - do not include extension
* #param {string} fullPath - absolute path
* #example GetScript('myLocalScript');
* #example GetScript('','https://www.google-analytics.com/analytics.js');
*/
'use strict';
function onLoad(event) {
var result;
if (event.type === 'load') {
result = 1;
} else {
result = -1;
}
Observer.publish('get-script-onload-complete', {
successful: result,
eventData: event
});
}
var JSPATH = '/js/',
/* or where ever you keep js files */
el = document.createElement('script'),
attrs = {
defer: true,
src: null,
type: 'text/javascript'
};
/** look for a string based, protocol agnostic, js file url */
if (typeof fullPath === 'string' && fullPath.indexOf('http') === 0) {
attrs.src = fullPath;
}
/** look for any string with at least 1 character and prefix our root js dir, then append extension */
if (typeof url === 'string' && url.length >= 1) {
attrs.src = JSPATH + url + '.js';
}
SetAttributes(el, attrs);
el.addEventListener('load', onLoad);
el.addEventListener('error', onLoad);
document.body.appendChild(el);
return el;
};
/** \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ **/
/**
* Get Started
*/
function onClick(event) {
GetScript('', event.target.dataset.namespaceUrl);
}
Observer.subscribe('get-script-onload-complete', function(name, resp) {
/** check to make resp is what you expect, ie: the correct script loaded */
/** then it is safe to use */
});
ToArray(document.querySelectorAll('.load-scripts')).map(script => script.addEventListener('click', onClick, false));
}(undefined));
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width">
<title>How to include js files in header of wordpress pages that are activated on-click</title>
</head>
<body>
Load Google Analytics
</body>
</html>
You can use the function wp_enqueue_script() to load the necessary JS files on only the templates you want.
As for your large data set, I recommend that you cache it in an external .json file and use wp_enqueue_script() to load it only when necessary.
Well if the onclick event suggestion is pretty much what you want and you are just concerned about the large amount of data. Then there are a few ways to tackle it. I am not sure if the dataset is a js file or php/json files but i came across a similar issue on one of my projects, dont remember properly but i was doing something with maxmind's ip/location data set.
So i just splitted the large file into 3 smaller ones. Then i looped through each of the file and if the stuff that i was looking for was found in the file then i just breaked out. And definitely as Brian suggested caching and using a CDN would help a lot.

How do I generalize this set of Javascript methods, including promises?

I have the following code setup to initialize a single field's autosuggest feature using jQuery and MagicSuggest. It's relatively straight forward. I have modularized a bit of it because I intend on using it to initialize other fields as well with MagicSuggest. One extraneous part is the canonical name conversion, but it's a necessary function for this particular data set I'm working with. (Problem I'm having trouble with explained below ...)
/**
* Initialize Flights From autosuggest feature
* #return {void}
*/
function initFlightsFromAutosuggest() {
// Flights From Typeahead *************************************
var msField = $('#magicsuggest.direct_flights_from');
var ms = msField.magicSuggest({
id : 'direct_flights_from',
name : 'direct_flights_from',
minChars : 1,
highlight : false,
valueField : 'id',
displayField : 'name',
placeholder : getMSPlaceholder(msField, 'City'),
resultAsString: true,
useTabKey : true,
useCommaKey : true,
useZebraStyle : true,
hideTrigger : true,
sortOrder : 'canonical_name',
maxDropHeight : 500,
data : '/api/v1/cities',
defaultValues : msField.attr('data-default').split(','),
renderer : function(data) { return convertCanonical(data.canonical_name) }
});
// Once loaded, add pre-selected values if there are any
$(ms).on('load', addDefaults(ms, msField));
}
/**
* Gets placeholder value for MagicSuggest instances
* #param {element} el DOM element
* #param {string} defaultString Default string to use
* #return {string}
*/
function getMSPlaceholder(el, defaultString) {
if (el.attr('data-default').length > 0) {
return '';
}
return defaultString;
}
/**
* Converts canonical name into city, state string (dropping country, fixing spacing)
* #param {string} canonical_name Full canonical name
* #return {string} Short name, without country
*/
function convertCanonical(canonical_name) {
if (typeof canonical_name !== 'undefined') {
canonical_name = canonical_name.replace(',United States', '');
canonical_name = canonical_name.replace(',', ', ');
return canonical_name;
}
// Not sure what to do if it's undefined
return;
}
That all said, below is what I have to do to pre-populate this one field with data previously submitted.
/**
* Adds pre-selected values (ids) loaded into the 'data-default' attribute into the input field
* #param {object} ms MagicSuggest instantiation
* #param {element} msField DOM element used by MagicSuggest
*/
function addDefaults(ms, msField) {
// Get the default attribute value as an array
var defaultIds = msField.attr('data-default').split(',');
// Setup array of requests
var requests = [];
// Push all the requests into an array
$.each(defaultIds, function(index, id) {
requests.push($.getJSON('/api/v1/cities/' + id));
});
// Create a promise, and when all the requests are done (promises fulfilled)
// Send the args (json) to the .done callback
var promise = $.when.apply($, requests).then(function () {
var args = Array.prototype.slice.call(arguments);
return args.map(function(arg) { return arg[0] });
});
// Setup the callback function for 'done'
promise.done(function(json) {
// Setup results array
var results = [];
// Got an auth error from the API, so return early. No results.
if (typeof(json[0].auth) === 'object') {
return false;
}
// For each item, add the proper structure to the results array
$.each(json, function (index, id) {
results.push({
value: json[index][0]['city']['id'],
name: json[index][0]['city']['name']
});
});
var resultPromise = $.when.apply($, results).then(function () {
var args = Array.prototype.slice.call(arguments);
return args.map(function(arg) { return arg });
});
resultPromise.done(function(results) {
ms.setValue(results);
ms.setDataUrlParams({});
$('.input')
});
});
}
There has to be a way to generalize this, but I'm new at promises and $.Deferred so I've been hitting a wall of understanding.
The other fields I'll be instantiating with MagicSuggest will be using different URLs for the $.getJSON() method (probably all using IDs though) (used for finding what the user had previously chosen, thus what to pre-populate the field with), and will obviously have different JSON responses for those calls. So, the trouble spots for me are how to get this all to work together and still DRY.
As soon as I start breaking apart addDefaults() I hit problems because ms is undefined in resultPromise.done, the URLs with the IDs in them, and the json structure inside the $.each command.
How would you refactor this to be more re-usable? Comments/explanations on promises and deferred are always helpful too.
With a fresh head after a few days rest, and with the insight of this post I realized I didn't need to do all this just to add default values. Thankfully, just adding the following to the init worked perfectly: value: msField.attr('data-default').split(','), (I'm adding the values into the HTML under the data-default attribute via PHP.
Code: deleted.
Problem: solved.

package files by folder with gulp

I want this:
|-- guides
| |-- _guide.hbs
| |-- guide.hbs
| `-- index.hbs
|-- index.hbs
`-- noroute.hbs
turn into this:
|-- common.js
`-- guides.js
As you can see guides folder squashed into guides.js, and . folder squashed into common.js
Below is my ugly solution.
Inspired by this post
function getFolderMap(dir) {
var fs = require('fs');
var path = require('path');
var result = {};
fs.readdirSync(dir)
.filter(function(file) {
if( fs.statSync(path.join(dir, file)).isDirectory()) {
result[file] = file;
}
});
return result;
};
gulp.task('build-dev-templates3', function() {
var mergeStream = require('merge-stream')();
var templatePaths = getFolderMap(paths.src + '/templates');
templatePaths['./'] = 'common';
for (var src in templatePaths) {
var dst = templatePaths[src];
var stream = gulp.src([paths.src + '/templates/' + src + '**/*.hbs'])
.pipe($.process())
.pipe($.concat(dst + '.js'))
.pipe(gulp.dest(paths.dest + '/templates'));
mergeStream.add(stream);
}
return mergeStream;
});
What is the gulp way to achieve this? Please at least share some guideful tools.
Edit2
I need to get rid of getFolderMap function and solve this with pure
streams. The closest i can get is:
var directoryFilter = $.filter(function (file) {
return file.isDirectory();
});
gulp.src([paths.src + + '/templates/**'])
.pipe(directoryFilter)
.pipe(
//here I have list of directories
//I need a gulp plugin to get the contents
// so i can process them.
);
This issue is related https://github.com/gulpjs/gulp/issues/386.
Final Solution
This is my final solution based on spiralx's answer.
https://gist.github.com/eguneys/2fdbe7ac83dfab04748a
Something like this is my first guess, I use the forEachModule function at work to execute a sub-task for every module and then combine the results - helps with some plugins that have issues with paths and includes (gulp-stylus IIRC):
var gulp = require('gulp');
var plugins = require('gulp-load-plugins')();
var es = require('event-stream');
var path = require('path');
var config = require('./build.config');
// ------------------------------------
/*
* For each module execute subtaskFunc and get result stream, then
* combine all of the streams and return that.
*
* #param [{Object}] modules array of module objects
* #param {Function} subtaskFunc function(module) -> stream
* #return {Object} a stream
*/
function forEachModule(modules, subtaskFunc) {
var subtaskStreams = modules.map(subtaskFunc);
return es.concat.apply(null, subtaskStreams);
}
// ------------------------------------
gulp.task('templates', function() {
var dest = path.join(config.dest, 'templates');
return forEachModule(config.modules, function(module) {
var src = path.join(config.src, 'templates', module, '**/*.hbs');
// Compile .hbs files, wrap in a CommonJS module, combine into `<module>.js` and write to dest.
return gulp.src(src)
.pipe(plugins.handlebars().on('error', plugins.util.log))
.pipe(plugins.defineModule('plain'))
.pipe(plugins.declare({
namespace: 'app.templates.' + module
}))
.pipe(plugins.concat(module + '.js'))
.pipe(gulp.dest(dest));
})
// Combine all module template files into one?
.pipe(plugins.concat('templates.js'))
.pipe(gulp.dest(config.dest));
});
Edit:
This code is almost identical to what was there, but looks for directories under 'src/app' for modules instead of pre-defining the list, and doesn't generate any configuration outside of the gulp task function itself.
The reason it's like this to begin with was issues using gulp-stylus on a stream such as src/app/**/*.styl when a) using #include or anything referring to relative paths, or b) passing options to either Stylus itself, or a plugin such as Nib.
var modules = fs.readdirSync('src/app').filter(function(name) {
return fs.statSync('src/app/' + name).isDirectory()
});
// ------------------------------------
gulp.task('styles', function() {
var streams = modules.map(function(name) {
return gulp.src('src/app/' + name + '/**/*.styl')
.pipe(plugins.stylus({
use: [
require('nib')(),
require('bootstrap3-stylus')()
],
paths: [
'src/common'
]
}))
.pipe(plugins.concat(name + '.css'))
.pipe(gulp.dest('dist/css'))
.pipe(plugins.minifyCss())
.pipe(plugins.rename({
suffix: '.min'
}))
.pipe(gulp.dest('dist/css'));
});
return plugins.mergeStream(streams)
.pipe(plugins.concat('app.css'))
.pipe(gulp.dest('dist/css'));
});

Categories

Resources