So I am writing up my gulpfile.js and I have come to a point where I need to avoid JavaScript's asynchronous behavior. In short, this is for file-system read/write.
The problem is that all the solutions I have found online thus far create several sub-tasks; which is something I want to avoid so that I don't have to write any confusing documentation about what tasks should and shouldn't be used in the command line, and what order they need to be run in, etc.
My Question: How can I make the below script run each part synchronously, without creating sub-tasks?
gulp.task('rebuild', function(){
// Remove old build
gulp.src('build/', {read: false}).
pipe(rimraf());
// Copy all of the non generated files
gulp.src('src/**/*').
pipe(gulp.dest('build/'));
// Parse SASS/LESS and minify JS
build_styles();
build_scripts();
});
Well if all else fail, you can always fall back to the callback hell:
gulp.task('rebuild', function(){
// Remove old build
gulp.src('build/', {read: false}).
pipe(rimraf())
.on('end', function () {
// Copy all of the non generated files
gulp.src('src/**/*').
pipe(gulp.dest('build/'))
.on('end', function () {
// Parse SASS/LESS and minify JS
build_styles();
build_scripts();
});
});
});
Related
I've been trying to set up functions to download and then install frameworks into my development workflow with gulp-download. Every time I run gulp-download in series, it always runs last, so the function tries to move the files, then they download.
I tried to use merge and run this in a single function, then split it and used a task in series to run it. Neither way is successful.
// DOWNLOAD BOOTSTRAP
function downloadBootstrap(cb) {
download('https://github.com/twbs/bootstrap/releases/download/v4.0.0/bootstrap-4.0.0-dist.zip')
.pipe(unzip())
.pipe(gulp.dest('downloads/bootstrap'));
cb();
}
// INSTALL BOOTSTRAT
function installBootstrap(cb) {
var css = gulp.src('downloads/bootstrap/css/*.{min.css,min.css.map}')
.pipe(dest('_developer/2-css/0-frameworks/bootstrap'))
var js = gulp.src('downloads/bootstrap/js/*.{min.js,min.js.map}')
.pipe(dest('_developer/3-js/0-frameworks/bootstrap'))
var clear = gulp.src('downloads/bootstrap', {read: false, allowEmpty: true})
.pipe(clean())
return merge(css, js, clear); // Combined src
cb();
}
gulp.task('bootstrap', gulp.series('downloadBootstrap', 'installBootstrap'));
You need to make sure you only call the callback function when your task is complete. Inside download, the function call returning doesn't mean the download has finished. However, since you're using a pipeline here, you can eliminate the callback parameter altogether. (On an unrelated note, I would avoid having two functions named download.)
// DOWNLOAD BOOTSTRAP
function download() {
return download('https://github.com/twbs/bootstrap/releases/download/v4.0.0/bootstrap-4.0.0-dist.zip')
.pipe(unzip())
.pipe(gulp.dest('downloads/bootstrap'));
}
Returning the stream will ensure that the stream has been fully processed before the next task continues. Also, you can just remove the callback from your install function since it will never actually be used. (Nothing runs after your function returns.)
I worked it out, I just had to add return to the download lol. Now I have the download finishing before it moves onto the install, but now only the css is migrated, the js remains and the clean isn't running. I had these working before.
gulp.task("bootstrap", gulp.series(downloadBootstrap, installBootstrap));
function downloadBootstrap() {
return download('https://github.com/twbs/bootstrap/releases/download/v4.0.0/bootstrap-4.0.0-dist.zip')
.pipe(unzip())
.pipe(gulp.dest('downloads/bootstrap'));
}
function installBootstrap() {
return gulp.src('downloads/bootstrap/css/*.{min.css,min.css.map}') // Gather [developer] css files
.pipe(gulp.dest('_developer/2-css/0-frameworks/bootstrap')) // Deliver css files to [build]
return gulp.src('downloads/bootstrap/js/*.{min.js,min.js.map}') // Gather [developer] css files
.pipe(gulp.dest('_developer/3-js/0-frameworks/bootstrap')) // Deliver css files to [build]
return gulp.src('downloads/bootstrap', { allowEmpty: true })
.pipe(clean());
}
So I'm coming back to a personal project after a (prolonged) break, and I'm getting back into the flow of things. One thing that smacks me in the face is that my watchers aren't working. If, for example, I call gulp watch-for-jshint, I expect JSHint to be changed anytime I modify a JS file -- either a root one (gulp.js) or one of my client-app ones (./client-app/js/controllers/register.js). That's not happening. I can see watch is starting, but it never calls JSHint when I change my files!
Anyone have any ideas what is going wrong or how to diagnose? As far as I can tell, I have the syntax right... And because (for testing purposes) I'm directly calling watch-for-jshint, I should be avoiding the usual pitfall of never actually calling my watcher.
Simplified gulp file:
var gulp = require('gulp'),
clean = require('gulp-clean'),
jshint = require('gulp-jshint'),
notify = require('gulp-notify'),
watch = require('gulp-watch'),
gulp.task('jshint', function(){
console.log("Running JSHint");
return gulp.src(['*.js', 'client-app/**/*.js'], ['jshint'])
.pipe(jshint({"moz":true}))
.pipe(jshint.reporter('default'))
.pipe(jshint.reporter('fail'))
.on('error', notify.onError({ sound: "Funk" }));
});
gulp.task('watch-for-jshint', function(){
return watch(['*.js', './client-app/**/*.js'], ['jshint']);
});
I've truncated a lot of extraneous tasks, but what's above should be everything relevant.
And naturally, 5 minutes after posting I notice one difference between my syntax and what a lot of questions are using. Most people are using gulp.watch, I was just following the syntax found at the gulp-watch NPM module page of using just watch.
Not... entirely sure why one works and the other doesn't; odd. Seems like the new version shouldn't work, since it's not referencing the gulp-watch module.
Remove the following line in gulp task, if it is not necessary.
.on('error', notify.onError({ sound: "Funk" }))
This makes the gulp task stop and show error. Hence watch is also stopped.
Replace
return watch(['*.js', './client-app/**/*.js'], ['jshint']);
with
gulp.watch(['*.js', './client-app/**/*.js'], ['jshint']);
I’ve noticed that if I gulp.watch() a directory with ~400 Markdown files, my Gulp task takes a long time to initialize (~19s on my machine). If I remove this specific .watch() call, my task initialization time shrinks to less than 100ms.
gulp.task('my-task', function () {
// these calls are very quick (< 100ms)
gulp.watch('source/styl/*.styl', ['build-css']);
gulp.watch('source/js/index/*.js', ['build-js']);
gulp.watch(['app.js', 'modules/*.js', 'routes/*.js', 'views/*.jade'], [server.run]);
gulp.watch(['source/js/index/*.js', 'app.js', 'modules/*.js', 'routes/*.js', 'gulpfile.js'], ['lint-js']);
gulp.watch('source/img/**/*', ['compress-images']);
// this call takes ~19s to complete
gulp.watch('source/md/releases/*.md', ['build-releases']);
});
Is there anything I can to to resolve this performance issue, or is watching directories with hundreds of files not feasible in Gulp?
Update: I have switched to a callback function:
gulp.watch('source/md/releases/*.md', function (e) {
// console.log(e.path);
});
and I’m still experiencing the same performance issue.
I spent the better part of last month beating my head against the wall before I came up with an easy way to dynamically load, and chain together HTML canvas classes which are stored on the server, but, obviously, initialized on the client (harder than it sounds when the ordering is important in an asynchronous environment).
I was wondering if someone could help me find a way to load simple javascript scripts. Lets define a load('foo.js') function which instructs the client to load script foo.js from the server and execute it as javascript code.
Given the three files, stored on the server:
A.js
a = 10;
B.js
load('A.js');
b = a + 10;
C.js
load('B.js');
c = b + 10;
If the client issues the command load('C.js'); what's the easiest/most reliable way to implement this. One idea I had was to scan the code serverside and return all the scripts at once. This requires the minimal amount of php requests. However, if the client has already requested C.js before, the script should exist client side, and this would be inneficient, especially if C.js and all its dependent files are large. Another option I considered was to wrap all of these serverside scripts in an object like so, for C.js above:
{
depenencies: ['B.js'] ,
code : 'c.age = b.age + 10;'
}
I just don't know how to 'pause' execution of script C.js after the load('B.js') statement, and then resuming it after B.js has been loaded.
EDIT Thanks to redsqaure for suggesting yepnope and requirejs. Unfortunately, I do not like them for several reasons. For one, requirejs is difficult (I am sure I will come under criticism for this one). My main gripe with this is that, if it is so difficult to learn, I might as well recreate it myself, learning it in the process, AND having greater control over it. Second, it requires you to change your style of writing. Switching to Dojo and having to use dojo.declare("ClassName", [ParentA,ParentB], {...}); to declare classes is one thing, but wrapping every snippet of code in require(['A','B',...], function(){}); is another. Finally, I don't know how simple it will be to instruct where to look for files. I want the user to be able to define a 'PATH' variable server side, and have the search occur in each of the folders/subfolders of the 'PATH'
Depends on how optimized you want it to be. Either you can go the route of synchronous XHR or use a callback (async and recommended). If you were to go the second route your code would look something like:
// Say C.js is dependent on A.js and B.js..
load(["A.js","B.js"], function() {
// code goes here
});
EDIT
Taking a second look after you feedback what you want is somewhat possible, but would be brittle and hard to write in javascript. Below i have a sample/untested implementation of a dependency loader where a file can only have one call to load("file.js") possible. This would be more complex for multiple possible dependencies. Also I'm assuming these files are coming from the same domain.
// Usage: load("A.js")
// A.js -> B.js -> C.js
window.load = (function() {
var loaded = {};
return function(str, /* internally used */ callback) {
if(!loaded[str]) {
loaded[str] = true;
$.get(str, function(data) {
var matches = data.match(/load\(['|"](.*)["|']\)/);
if(matches.length > 1) { // has deps
window.load(matches[1], function() {
window.eval(data);
if(!!callback) callback();
});
} else { // no deps
window.eval(data);
}
});
}
}
})();
Why not look into a script loader like yepnope.js or require.js
I started creating a javascript file in Sublime Text with a few lines and a few functions, but over days passed the file was growing and growing and now it has around 600 lines with about 40 functions.
So I keep scrolling up and down for writing or reading code. And I think that that it's not a good workflow. How can i be more organized with javascript code. Is there a technique that professionals use for that, or a tool?
600 lines is not so much, yet. What you can do is namespace your code (separate it according to functionality). For example:
Lets say you have a js file with a bunch of functions
function formatDate(date){ ... }
function calcAge(birthdate){ ... }
function removeAccents(string){ ... }
function resizeImage(img){ ... }
... and many more ...
You can go ahead and separate functions by category, in this case we could group all the functions that deal with dates. All the ones that deal with strings and the ones that handle images.
// we create a global namespace, on main.js
var MyCoolProject = {};
// then we include string.js, and put all the string functions here
MyCoolProject.string = {
removeAccents: function(string){ ... }
};
// on date.js, we put all date functions
MyCoolProject.date = {
formatDate: function(date){ ... },
calcAge: function(birthdate){ ... }
};
// so on with image.js
MyCoolProject.img = {
resizeImage: function(date){ ... }
};
This way you have several smaller files that handle a specific kind of logic and you would call your functions like this:
function doSomethingAwesome(str){
var awesomeString = MyCoolProject.string.awesomize(str);
alert(awesomeString);
}
You also benefit by having more maintainable code and avoid collisions. Collisions happen when you include another script that happens to have a function with the same name as yours. If this happens, only the last included function will be executed. By namespacing your code you prevent this.
Keep in mind
You will have more files, this means more <script> tags in your html in which sometimes the order matters! You should eventually use build tools like grunt or gulp to concatenate and minimize all the js into one single scripts.js file. This way you have full control over your code during development. But once in production your site will make only one request for a js file which should make your site load faster.
Also, the namespacing method used here is my personal preference but in js you can achieve the same behavior through other patterns like prototypes, closures, commonjs, etc so you could research these and see which one fits your personal preference. There is not one better than the other, just one that will serve as a tool to make you build it faster and better.