eslint: howto lint only touched files - javascript

I have recently added eslint, as webpack loader, in a codebase that was never parsed with a linter before.
Obviously the amount of errors triggered are endless: there is any chance to configure eslint to parse only the touched files? I would like the linter to parse every file in which developers make changes and those only.
This is the loader I am using so far (in case can be of interest), very standard configuration:
{test: /\.(jsx|js)$/, loader: "eslint-loader?{cache: true}", exclude: /node_modules/}
Thank you

I accomplished it by using a watcher; this is the solution in the details:
dependencies for the Webpack configuration:
var logger = require('reliable-logger');
var watch = require('watch');
var CLIEngine = require('eslint').CLIEngine
watcher and linter configuration and start; I am pasting it with all the todos, as it is:
var configureLinterAndWatchFiles = function() {
var changedFiles = [];
var formatter;
var report;
var SEPARATOR = "////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////";
// TODO I got the feeling that one of those settings is breaking the
// linter (probably the path resolving?)
var linter = new CLIEngine({
// TODO do I need this? Looks like I don't...
// envs: ["node"],
// TODO what is the default?
useEslintrc: true,
// TODO I find weird that I get no error with this: configFile: "../.eslintrc1111"
// make sure that the configuration file is correctly picked up
configFile: ".eslintrc",
// TODO useless if your root is src
// ignorePath: "node_modules"
// TODO probably both useless... the first I still don't get it,
// the second you are enforcing the filtering yourself by checks
// cache: false,
// extensions: [".js", ".jsx"]
});
var fileUpdatedFn = function(f) {
// TODO I would prefer much more to get the list of changed files from
// git status (how to?). Here I am building my own
// resetting the array only for debug purpose
// changedFiles = [];
if(/.js$/.test(f) || /.jsx$/.test(f)) {
changedFiles.push(f);
logger.info(SEPARATOR);
report = linter.executeOnFiles(changedFiles);
logger.info(formatter(report.results));
}
};
// get the default formatter
formatter = linter.getFormatter();
watch.watchTree('src', function(f, curr, prev) {
if (typeof f == "object" && prev === null && curr === null) {
// Finished walking the tree
} else if (prev === null) {
// f is a new file
} else if (curr.nlink === 0) {
// f was removed
} else {
// f was changed
fileUpdatedFn(f);
}
});
};
in module.exports, as last line:
module.exports = function(callback, options){
// ... more code ...
configureLinterAndWatchFiles();
}
That should be it. As I pointed out in a comment:
I wonder, though, if the cache flag (eslint.org/docs/developer-guide/nodejs-api#cliengine) was the best to be used for the problem. From here (github.com/adametry/gulp-eslint/issues/…): "--cache flag will skip over any files that had no problems in the previous run unless they have been modified": not sure if that is my case but is of interest.

Definitively I'm a little late for the party, but I faced the very same issue today & it seems like there is still no common solution for that.
I ended up monkey patching webpack's devServer with this:
const { exec } = require('child_process');
// ...
devServer: {
hot: false,
inline: false,
publicPath: '/',
historyApiFallback: true,
disableHostCheck: true,
after: (app, server, compiler) => {
compiler.hooks.watchRun.tap(
'EsLint-upon-save',
() => {
// This should only work in dev environment
if (process.env.NODE_ENV !== 'development') {
return;
}
// Credits to:
// https://stackoverflow.com/a/43149576/9430588
const filesChanged = Object.keys(compiler.watchFileSystem.watcher.mtimes);
// Might be empty
if (!filesChanged.length) {
return;
}
filesChanged.forEach((changedFileAbsolutePath) => {
const extension = changedFileAbsolutePath.split('.').pop();
if (extension === 'js' || extension === 'jsx') {
exec(`npx eslint --fix --fix-type suggestion,layout ${changedFileAbsolutePath}`);
}
});
}
);
}
},
It's surely quite quick & dirty type of solution, however it seems to work fine with eslint#7.7.0.

Related

Refactored watch task using gulp v4 doesn't work

I'm refactoring my gulpfile now I'm using gulp v4 and am having an issue with gulp watch not running my stylesCompileIncremental function. Any help or pointers would be much appreciated.
My refactoring includes:
Switching to using functions instead of gulp.task
Using series and parallel as per the docs
Exporting public tasks at the bottom of my gulpfile ie exports.stylesWatch = stylesWatch;
Adding callbacks in functions to tell Gulp the function is complete
The code for the affected tasks is as follows (directory paths are stored in package.json file hence pathConfig.ui... values):
// Compile only particular Sass file that has import of changed file
function stylesCompileIncremental(cb) {
sassCompile({
source: getResultedFilesList(changedFilePath),
dest: pathConfig.ui.core.sass.dest,
alsoSearchIn: [pathConfig.ui.lib.resources]
});
cb();
}
// Compile all Sass files and watch for changes
function stylesWatch(cb) {
createImportsGraph();
var watcher = gulp.watch(pathConfig.ui.core.sass.src + '**/*.scss', gulp.parallel(devServReloadStyles));
watcher.on('change', function(event) {
changedFilePath = event;
});
cb();
}
// reload css separated into own function. No callback needed as returning event stream
function reloadCss() {
return gulp.src(generateFilePath)
.pipe($.connect.reload()); // css only reload
}
function devServReloadStyles(cb) {
gulp.series(stylesCompileIncremental, reloadCss);
cb();
}
When I run gulp stylesWatch using my refactored code I get the below output (notice the stylesCompileIncremental task is not run):
So my watch tasking is successfully running but there's something wrong when the devServReloadStyles is run for the stylesCompileIncremental function to not kick in.
The original code before refactoring (when using gulp v3) is below:
// Compile only particular Sass file that has import of changed file
gulp.task('styles:compile:incremental', () => {
return sassCompile({
source: getResultedFilesList(changedFilePath),
dest: pathConfig.ui.core.sass.dest,
alsoSearchIn: [pathConfig.ui.lib.resources]
});
});
// Compile all Sass files and watch for changes
gulp.task('styles:watch', () => {
createImportsGraph();
gulp.watch(
pathConfig.ui.core.sass.src + '**/*.scss',
['devServ:reload:styles']
).on('change', event => changedFilePath = event.path);
});
// Reload the CSS links right after 'styles:compile:incremental' task is returned
gulp.task('devServ:reload:styles', ['styles:compile:incremental'], () => {
return gulp.src(generateFilePath) // css only reload
.pipe($.connect.reload());
});
The original task output when running styles:watch is this:
And this is the sassCompile variable used inside stylesCompileIncremental which I've currently not changed in anyway.
/**
* Configurable Sass compilation
* #param {Object} config
*/
const sassCompile = config => {
const sass = require('gulp-sass');
const postcss = require('gulp-postcss');
const autoprefixer = require('autoprefixer');
const postProcessors = [
autoprefixer({
flexbox: 'no-2009'
})
];
return gulp.src(config.source)
.pipe($.sourcemaps.init({
loadMaps: true,
largeFile: true
}))
.pipe(sass({
includePaths: config.alsoSearchIn,
sourceMap: false,
outputStyle: 'compressed',
indentType: 'tab',
indentWidth: '1',
linefeed: 'lf',
precision: 10,
errLogToConsole: true
}))
.on('error', function (error) {
$.util.log('\x07');
$.util.log(error.message);
this.emit('end');
})
.pipe(postcss(postProcessors))
.pipe($.sourcemaps.write('.'))
.pipe(gulp.dest(config.dest));
};
UPDATE
This is due to an issue with my devServReloadStyles function, although I'm still unsure why. If I change my stylesWatch function to use the original devServ:reload:styles task stylesCompileIncremental gets run.
// Compile all Sass files and watch for changes
function stylesWatch(cb) {
createImportsGraph();
var watcher = gulp.watch(pathConfig.ui.core.sass.src + '**/*.scss', gulp.parallel('devServ:reload:styles'));
watcher.on('change', function(event) {
changedFilePath = event;
});
cb();
}
It would still be good to not use the old task and have this as a function though.
Can anybody tell me why my refactored version doesn't work and have any suggestions as to how this should look?
I've fixed this now.
gulp.series and gulp.parallel return functions so there was no need to wrap stylesCompileIncremental and reloadCss inside another function ie. devServReloadStyles.
As per Blaine's comment here.
So my function:
function devServReloadStyles(cb) {
gulp.series(stylesCompileIncremental, reloadCss);
cb();
}
Can just be assigned to a variable:
const devServReloadStyles = gulp.series(stylesCompileIncremental, reloadCss);
And my stylesWatch task is already calling devServReloadStyles:
// Compile all Sass files and watch for changes
function stylesWatch(cb) {
createImportsGraph();
var watcher = gulp.watch(pathConfig.ui.core.sass.src + '**/*.scss', gulp.parallel(devServReloadStyles));
watcher.on('change', function(event) {
changedFilePath = event;
});
cb();
}
So running gulp stylesWatch now runs the stylesCompileIncremental job (notice how devServReloadStyles doesn't show as it's not a function).

webpack js compiler issues with spread syntax from npm package

I have the following class:
class AnalyticsService {
/** Log an analytics event. */
log(options) {
return Promise.all(this.logGoogleAnalytics(options), this.logKenticoAnalytics(options));
}
/** Log an analytics event to GA. */
logGoogleAnalytics(options) {
if (!options || !window.ga) {
console.warn('Analytics: Failed to log event:', options);
return Promise.reject(false);
}
const { category, action, label, value } = options;
ga('send', 'event', category, action, label, value);
return Promise.resolve(true);
}
/** Log an analytics event to Kentico. */
logKenticoAnalytics(options) {
if (!options || !window.ga) {
console.warn('Analytics: Failed to log activity:', options);
return Promise.reject(false);
}
const data = {
...options,
referrer: options.referrer || document.referrer,
url: options.url || location.href,
};
return postJSON(`${URL_VIRTUAL_PATH}/activities`, data).then(
response => {
if (!response.ok) {
console.warn('Analytics: Failed to log activity:', response, options);
return Promise.reject(false);
}
return response.json();
},
error => {
console.warn('Analytics: Failed to log activity:', error, options);
return Promise.reject(false);
},
);
}
}
Which if I include in another js file with
import AnalyticsService from './AnalyticsService';
Will compile and work well. However we are trying to get reuse out of our js by exporting it to npm so we can npm it into different projects.
This has all worked well but now if I use
import AnalyticsService from '#jsrepo/analyticsservice/AnalyticsService';
I get a compile error for the spread syntax:
ERROR in ./~/#jsrepo/analyticsservice/AnalyticsService.js
Module parse failed: C:\Web\SiteFiles\src\node_modules\#jsrepo\analyticsservice\AnalyticsService.js Unexpected token (30:6)
You may need an appropriate loader to handle this file type.
|
| const data = {
| ...options,
| referrer: options.referrer || document.referrer,
| url: options.url || location.href,
# ./js/components/init-analytics.js 7:24-79
# ./js/components/index.js
# ./js/main.js
# multi webpack/hot/dev-server webpack-hot-middleware/client?reload=true sass/main.scss js/main
I thought it may be a dependency issue, so I have added
"babel-plugin-transform-object-rest-spread": "^6.23.0"
to the dependencies of the npm package and also added it to the options of the babel loader in webpack config:
use: {
// Use the babel-loader to transpile the JS to browser-compatible syntax.
loader: 'babel-loader',
options: {
plugins: [require('babel-plugin-transform-object-rest-spread')]
// have also tried adding babel-plugin-transform-es2015-spread
}
},
But I am unable to remove the error. Does anyone know how to get the spread syntax to work when importing an npm package or how to edit it so I don't need it - mainly I don't understand this line:
const { category, action, label, value } = options;
in order to assign whatever options is to the data without using the ...
Figured out that
const { category, action, label, value } = options;
Was called object deconstructing. From that I was able to determine what was in the options so just added those to the data var separately in order to get rid of the ...:
const data = {
category: options.category,
action: options.action,
label: options.label,
value: options.value,
referrer: options.referrer || document.referrer,
url: options.url || location.href,
};
If anyone can answer how to get the js loader to work properly, then I will not accept this answer and will accept theirs as it would be better to use the shorthand

karma-test-shim.js calling TestBed.initTestEnvironment, but no effect. TestBed must be initialized again

I am currently trying to get unit testing working with Angular2 final and karma + jasmine.
I have the following problem:
TypeError: Cannot read property 'injector' of null if don't add: TestBed.initTestEnvironment(BrowserDynamicTestingModule, platformBrowserDynamicTesting())
.configureTestingModule({
declarations: [],
providers: [Stuff],
imports: [Stuff]
});
To my test.
But I can only call the initTestEnvironment and configureTestingModule once, so more than 1 test is not possible. And I'd like to prevent having an init test.
Here is my karma-test-shim.js
// #docregion
// /*global jasmine, __karma__, window*/
Error.stackTraceLimit = 0; // "No stacktrace"" is usually best for app testing.
// Uncomment to get full stacktrace output. Sometimes helpful, usually not.
// Error.stackTraceLimit = Infinity; //
jasmine.DEFAULT_TIMEOUT_INTERVAL = 1000;
var builtPath = '/base/app/';
__karma__.loaded = function () { };
function isJsFile(path) {
return path.slice(-3) == '.js';
}
function isSpecFile(path) {
return /\.spec\.(.*\.)?js$/.test(path);
}
function isBuiltFile(path) {
return isJsFile(path) && (path.substr(0, builtPath.length) == builtPath);
}
var allSpecFiles = Object.keys(window.__karma__.files)
.filter(isSpecFile)
.filter(isBuiltFile);
System.config({
baseURL: '/base',
// Extend usual application package list with test folder
packages: { 'testing': { main: 'index.js', defaultExtension: 'js' } },
// Assume npm: is set in `paths` in systemjs.config
// Map the angular testing umd bundles
map: {
'#angular/core/testing': 'npm:#angular/core/bundles/core-testing.umd.js',
'#angular/common/testing': 'npm:#angular/common/bundles/common-testing.umd.js',
'#angular/compiler/testing': 'npm:#angular/compiler/bundles/compiler-testing.umd.js',
'#angular/platform-browser/testing': 'npm:#angular/platform-browser/bundles/platform-browser-testing.umd.js',
'#angular/platform-browser-dynamic/testing': 'npm:#angular/platform-browser-dynamic/bundles/platform-browser-dynamic-testing.umd.js',
'#angular/http/testing': 'npm:#angular/http/bundles/http-testing.umd.js',
'#angular/router/testing': 'npm:#angular/router/bundles/router-testing.umd.js',
'#angular/forms/testing': 'npm:#angular/forms/bundles/forms-testing.umd.js',
},
});
System.import('systemjs.config.js')
.then(importSystemJsExtras)
.then(initTestBed)
.then(initTesting);
/** Optional SystemJS configuration extras. Keep going w/o it */
function importSystemJsExtras(){
return System.import('systemjs.config.extras.js')
.catch(function(reason) {
console.log(
'WARNING: System.import could not load "systemjs.config.extras.js"; continuing without it.'
);
console.log(reason);
});
}
function initTestBed(){
return Promise.all([
System.import('#angular/core/testing'),
System.import('#angular/platform-browser-dynamic/testing')
])
.then(function (providers) {
var coreTesting = providers[0];
var browserTesting = providers[1];
console.log("call initTestEnvironment")
coreTesting.TestBed.initTestEnvironment(
browserTesting.BrowserDynamicTestingModule,
browserTesting.platformBrowserDynamicTesting());
console.log("call configure teting module")
coreTesting.TestBed.configureTestingModule({
declarations: [],
providers: [],
imports: []
})
})
}
// Import all spec files and start karma
function initTesting () {
return Promise.all(
allSpecFiles.map(function (moduleName) {
return System.import(moduleName);
})
)
.then(__karma__.start, __karma__.error);
}
I thought calling the initTestEnvironment in the test shim is enough. I am surprised that I the call in the karma-test-shim.js seems to have no effect.
package.json and code are in a related question: AsyncTestCompleter Browserify Angular2 HTTP Mock Test
Thank you so much for your help.

Why browsersync does not reload page

The problem is in stream reloading page
Just reload method work correctly
But when I user browserSync.stream() (browserSync.reload({stream: true})) it's not working
It's my browser sync init function
function browserSyncInit(baseDir, browser) {
browser = browser === undefined ? 'default' : browser;
var routes = null;
if(baseDir === conf.paths.src || (util.isArray(baseDir) && baseDir.indexOf(conf.paths.src) !== -1)) {
routes = {
'/bower_components': 'bower_components'
};
}
var server = {
baseDir: baseDir,
routes: routes,
middleware: proxyMiddleware('http://0.0.0.0:8080')
};
var nodemonConfig = {
cwd: path.normalize(__dirname + '/../../'),
script: 'server/server.js',
ext: 'js json',
ignore: ['client/**/*.*'],
env: {'PORT': '8080'}
};
var serverStarted;
nodemon(nodemonConfig)
.on('start', function () {
if (serverStarted) return;
browserSync.init(null, {
startPath: '/',
open: false,
server: server,
browser: browser
});
serverStarted = true;
});
}
Proxy server it's Loopback application (may be problem in this)
It's task for reloading styles and scrips
gulp.task('styles-reload', ['styles'], function() {
return buildStyles()
.pipe(browserSync.stream());
});
gulp.task('scripts-reload', ['scripts'], function() {
return buildScripts()
.pipe(browserSync.stream());
});
Streams are for injecting scripts/css/etc., from a task's Gulp stream, which is why in the documentation, it mentions to place it after the gulp.dest.
If you're looking to manually reload the BrowserSync page, you can do that with .reload in your two functions, otherwise, you'll need to pass through the files into your reload tasks, since it looks like you're calling those tasks from elsewhere.
To add to this, I don't see a reason to separate the two tasks (styles/scripts with their respective -reload tasks). You should just pipe it after the dest, so that you don't have to mess with starting a new stream or merging between tasks.

How do I configure different environments in Angular.js?

How do you manage configuration variables/constants for different environments?
This could be an example:
My rest API is reachable on localhost:7080/myapi/, but my friend that works on the same code under Git version control has the API deployed on his Tomcat on localhost:8099/hisapi/.
Supposing that we have something like this :
angular
.module('app', ['ngResource'])
.constant('API_END_POINT','<local_end_point>')
.factory('User', function($resource, API_END_POINT) {
return $resource(API_END_POINT + 'user');
});
How do I dynamically inject the correct value of the API endpoint, depending on the environment?
In PHP I usually do this kind of stuff with a config.username.xml file, merging the basic configuration file (config.xml) with the local environment configuration file recognised by the name of the user. But I don't know how to manage this kind of thing in JavaScript?
I'm a little late to the thread, but if you're using Grunt I've had great success with grunt-ng-constant.
The config section for ngconstant in my Gruntfile.js looks like
ngconstant: {
options: {
name: 'config',
wrap: '"use strict";\n\n{%= __ngModule %}',
space: ' '
},
development: {
options: {
dest: '<%= yeoman.app %>/scripts/config.js'
},
constants: {
ENV: 'development'
}
},
production: {
options: {
dest: '<%= yeoman.dist %>/scripts/config.js'
},
constants: {
ENV: 'production'
}
}
}
The tasks that use ngconstant look like
grunt.registerTask('server', function (target) {
if (target === 'dist') {
return grunt.task.run([
'build',
'open',
'connect:dist:keepalive'
]);
}
grunt.task.run([
'clean:server',
'ngconstant:development',
'concurrent:server',
'connect:livereload',
'open',
'watch'
]);
});
grunt.registerTask('build', [
'clean:dist',
'ngconstant:production',
'useminPrepare',
'concurrent:dist',
'concat',
'copy',
'cdnify',
'ngmin',
'cssmin',
'uglify',
'rev',
'usemin'
]);
So running grunt server will generate a config.js file in app/scripts/ that looks like
"use strict";
angular.module("config", []).constant("ENV", "development");
Finally, I declare the dependency on whatever modules need it:
// the 'config' dependency is generated via grunt
var app = angular.module('myApp', [ 'config' ]);
Now my constants can be dependency injected where needed. E.g.,
app.controller('MyController', ['ENV', function( ENV ) {
if( ENV === 'production' ) {
...
}
}]);
One cool solution might be separating all environment-specific values into some separate angular module, that all other modules depend on:
angular.module('configuration', [])
.constant('API_END_POINT','123456')
.constant('HOST','localhost');
Then your modules that need those entries can declare a dependency on it:
angular.module('services',['configuration'])
.factory('User',['$resource','API_END_POINT'],function($resource,API_END_POINT){
return $resource(API_END_POINT + 'user');
});
Now you could think about further cool stuff:
The module, that contains the configuration can be separated into configuration.js, that will be included at your page.
This script can be easily edited by each of you, as long as you don’t check this separate file into git. But it's easier to not check in the configuration if it is in a separate file. Also, you could branch it locally.
Now, if you have a build-system, like ANT or Maven, your further steps could be implementing some placeholders for the values API_END_POINT, that will be replaced during build-time, with your specific values.
Or you have your configuration_a.js and configuration_b.js and decide at the backend which to include.
For Gulp users, gulp-ng-constant is also useful combined with gulp-concat, event-stream and yargs.
var concat = require('gulp-concat'),
es = require('event-stream'),
gulp = require('gulp'),
ngConstant = require('gulp-ng-constant'),
argv = require('yargs').argv;
var enviroment = argv.env || 'development';
gulp.task('config', function () {
var config = gulp.src('config/' + enviroment + '.json')
.pipe(ngConstant({name: 'app.config'}));
var scripts = gulp.src('js/*');
return es.merge(config, scripts)
.pipe(concat('app.js'))
.pipe(gulp.dest('app/dist'))
.on('error', function() { });
});
In my config folder I have these files:
ls -l config
total 8
-rw-r--r--+ 1 .. ci.json
-rw-r--r--+ 1 .. development.json
-rw-r--r--+ 1 .. production.json
Then you can run gulp config --env development and that will create something like this:
angular.module("app.config", [])
.constant("foo", "bar")
.constant("ngConstant", true);
I also have this spec:
beforeEach(module('app'));
it('loads the config', inject(function(config) {
expect(config).toBeTruthy();
}));
To achieve that, I suggest you to use AngularJS Environment Plugin: https://www.npmjs.com/package/angular-environment
Here's an example:
angular.module('yourApp', ['environment']).
config(function(envServiceProvider) {
// set the domains and variables for each environment
envServiceProvider.config({
domains: {
development: ['localhost', 'dev.local'],
production: ['acme.com', 'acme.net', 'acme.org']
// anotherStage: ['domain1', 'domain2'],
// anotherStage: ['domain1', 'domain2']
},
vars: {
development: {
apiUrl: '//localhost/api',
staticUrl: '//localhost/static'
// antoherCustomVar: 'lorem',
// antoherCustomVar: 'ipsum'
},
production: {
apiUrl: '//api.acme.com/v2',
staticUrl: '//static.acme.com'
// antoherCustomVar: 'lorem',
// antoherCustomVar: 'ipsum'
}
// anotherStage: {
// customVar: 'lorem',
// customVar: 'ipsum'
// }
}
});
// run the environment check, so the comprobation is made
// before controllers and services are built
envServiceProvider.check();
});
And then, you can call the variables from your controllers such as this:
envService.read('apiUrl');
Hope it helps.
You could use lvh.me:9000 to access your AngularJS app, (lvh.me just points to 127.0.0.1) and then specify a different endpoint if lvh.me is the host:
app.service("Configuration", function() {
if (window.location.host.match(/lvh\.me/)) {
return this.API = 'http://localhost\\:7080/myapi/';
} else {
return this.API = 'http://localhost\\:8099/hisapi/';
}
});
And then inject the Configuration service and use Configuration.API wherever you need to access the API:
$resource(Configuration.API + '/endpoint/:id', {
id: '#id'
});
A tad clunky, but works fine for me, albeit in a slightly different situation (API endpoints differ in production and development).
We could also do something like this.
(function(){
'use strict';
angular.module('app').service('env', function env() {
var _environments = {
local: {
host: 'localhost:3000',
config: {
apiroot: 'http://localhost:3000'
}
},
dev: {
host: 'dev.com',
config: {
apiroot: 'http://localhost:3000'
}
},
test: {
host: 'test.com',
config: {
apiroot: 'http://localhost:3000'
}
},
stage: {
host: 'stage.com',
config: {
apiroot: 'staging'
}
},
prod: {
host: 'production.com',
config: {
apiroot: 'production'
}
}
},
_environment;
return {
getEnvironment: function(){
var host = window.location.host;
if(_environment){
return _environment;
}
for(var environment in _environments){
if(typeof _environments[environment].host && _environments[environment].host == host){
_environment = environment;
return _environment;
}
}
return null;
},
get: function(property){
return _environments[this.getEnvironment()].config[property];
}
}
});
})();
And in your controller/service, we can inject the dependency and call the get method with property to be accessed.
(function() {
'use strict';
angular.module('app').service('apiService', apiService);
apiService.$inject = ['configurations', '$q', '$http', 'env'];
function apiService(config, $q, $http, env) {
var service = {};
/* **********APIs **************** */
service.get = function() {
return $http.get(env.get('apiroot') + '/api/yourservice');
};
return service;
}
})();
$http.get(env.get('apiroot') would return the url based on the host environment.
Good question!
One solution could be to continue using your config.xml file, and provide api endpoint information from the backend to your generated html, like this (example in php):
<script type="text/javascript">
angular.module('YourApp').constant('API_END_POINT', '<?php echo $apiEndPointFromBackend; ?>');
</script>
Maybe not a pretty solution, but it would work.
Another solution could be to keep the API_END_POINT constant value as it should be in production, and only modify your hosts-file to point that url to your local api instead.
Or maybe a solution using localStorage for overrides, like this:
.factory('User',['$resource','API_END_POINT'],function($resource,API_END_POINT){
var myApi = localStorage.get('myLocalApiOverride');
return $resource((myApi || API_END_POINT) + 'user');
});
Very late to the thread, but a technique I've used, pre-Angular, is to take advantage of JSON and the flexibility of JS to dynamically reference collection keys, and use inalienable facts of the environment (host server name, current browser language, etc.) as inputs to selectively discriminate/prefer suffixed key names within a JSON data structure.
This provides not merely deploy-environment context (per OP) but any arbitrary context (such as language) to provide i18n or any other variance required simultaneously, and (ideally) within a single configuration manifest, without duplication, and readably obvious.
IN ABOUT 10 LINES VANILLA JS
Overly-simplified but classic example: An API endpoint base URL in a JSON-formatted properties file that varies per environment where (natch) the host server will also vary:
...
'svcs': {
'VER': '2.3',
'API#localhost': 'http://localhost:9090/',
'API#www.uat.productionwebsite.com': 'https://www.uat.productionwebsite.com:9090/res/',
'API#www.productionwebsite.com': 'https://www.productionwebsite.com:9090/api/res/'
},
...
A key to the discrimination function is simply the server hostname in the request.
This, naturally, can be combined with an additional key based on the user's language settings:
...
'app': {
'NAME': 'Ferry Reservations',
'NAME#fr': 'Réservations de ferry',
'NAME#de': 'Fähren Reservierungen'
},
...
The scope of the discrimination/preference can be confined to individual keys (as above) where the "base" key is only overwritten if there's a matching key+suffix for the inputs to the function -- or an entire structure, and that structure itself recursively parsed for matching discrimination/preference suffixes:
'help': {
'BLURB': 'This pre-production environment is not supported. Contact Development Team with questions.',
'PHONE': '808-867-5309',
'EMAIL': 'coder.jen#lostnumber.com'
},
'help#www.productionwebsite.com': {
'BLURB': 'Please contact Customer Service Center',
'BLURB#fr': 'S\'il vous plaît communiquer avec notre Centre de service à la clientèle',
'BLURB#de': 'Bitte kontaktieren Sie unseren Kundendienst!!1!',
'PHONE': '1-800-CUS-TOMR',
'EMAIL': 'customer.service#productionwebsite.com'
},
SO, if a visiting user to the production website has German (de) language preference setting, the above configuration would collapse to:
'help': {
'BLURB': 'Bitte kontaktieren Sie unseren Kundendienst!!1!',
'PHONE': '1-800-CUS-TOMR',
'EMAIL': 'customer.service#productionwebsite.com'
},
What does such a magical preference/discrimination JSON-rewriting function look like? Not much:
// prefer(object,suffix|[suffixes]) by/par/durch storsoc
// prefer({ a: 'apple', a#env: 'banana', b: 'carrot' },'env') -> { a: 'banana', b: 'carrot' }
function prefer(o,sufs) {
for (var key in o) {
if (!o.hasOwnProperty(key)) continue; // skip non-instance props
if(key.split('#')[1]) { // suffixed!
// replace root prop with the suffixed prop if among prefs
if(o[key] && sufs.indexOf(key.split('#')[1]) > -1) o[key.split('#')[0]] = JSON.parse(JSON.stringify(o[key]));
// and nuke the suffixed prop to tidy up
delete o[key];
// continue with root key ...
key = key.split('#')[0];
}
// ... in case it's a collection itself, recurse it!
if(o[key] && typeof o[key] === 'object') prefer(o[key],sufs);
};
};
In our implementations, which include Angular and pre-Angular websites, we simply bootstrap the configuration well ahead of other resource calls by placing the JSON within a self-executing JS closure, including the prefer() function, and fed basic properties of hostname and language-code (and accepts any additional arbitrary suffixes you might need):
(function(prefs){ var props = {
'svcs': {
'VER': '2.3',
'API#localhost': 'http://localhost:9090/',
'API#www.uat.productionwebsite.com': 'https://www.uat.productionwebsite.com:9090/res/',
'API#www.productionwebsite.com': 'https://www.productionwebsite.com:9090/api/res/'
},
...
/* yadda yadda moar JSON und bisque */
function prefer(o,sufs) {
// body of prefer function, broken for e.g.
};
// convert string and comma-separated-string to array .. and process it
prefs = [].concat( ( prefs.split ? prefs.split(',') : prefs ) || []);
prefer(props,prefs);
window.app_props = JSON.parse(JSON.stringify(props));
})([location.hostname, ((window.navigator.userLanguage || window.navigator.language).split('-')[0]) ] );
A pre-Angular site would now have a collapsed (no # suffixed keys) window.app_props to refer to.
An Angular site, as a bootstrap/init step, simply copies the dead-dropped props object into $rootScope, and (optionally) destroys it from global/window scope
app.constant('props',angular.copy(window.app_props || {})).run( function ($rootScope,props) { $rootScope.props = props; delete window.app_props;} );
to be subsequently injected into controllers:
app.controller('CtrlApp',function($log,props){ ... } );
or referred to from bindings in views:
<span>{{ props.help.blurb }} {{ props.help.email }}</span>
Caveats? The # character is not valid JS/JSON variable/key naming, but so far accepted. If that's a deal-breaker, substitute for any convention you like, such as "__" (double underscore) as long as you stick to it.
The technique could be applied server-side, ported to Java or C# but your efficiency/compactness may vary.
Alternately, the function/convention could be part of your front-end compile script, so that the full gory all-environment/all-language JSON is never transmitted over the wire.
UPDATE
We've evolved usage of this technique to allow multiple suffixes to a key, to avoid being forced to use collections (you still can, as deeply as you want), and as well to honor the order of the preferred suffixes.
Example (also see working jsFiddle):
var o = { 'a':'apple', 'a#dev':'apple-dev', 'a#fr':'pomme',
'b':'banana', 'b#fr':'banane', 'b#dev&fr':'banane-dev',
'c':{ 'o':'c-dot-oh', 'o#fr':'c-point-oh' }, 'c#dev': { 'o':'c-dot-oh-dev', 'o#fr':'c-point-oh-dev' } };
/*1*/ prefer(o,'dev'); // { a:'apple-dev', b:'banana', c:{o:'c-dot-oh-dev'} }
/*2*/ prefer(o,'fr'); // { a:'pomme', b:'banane', c:{o:'c-point-oh'} }
/*3*/ prefer(o,'dev,fr'); // { a:'apple-dev', b:'banane-dev', c:{o:'c-point-oh-dev'} }
/*4*/ prefer(o,['fr','dev']); // { a:'pomme', b:'banane-dev', c:{o:'c-point-oh-dev'} }
/*5*/ prefer(o); // { a:'apple', b:'banana', c:{o:'c-dot-oh'} }
1/2 (basic usage) prefers '#dev' keys, discards all other suffixed keys
3 prefers '#dev' over '#fr', prefers '#dev&fr' over all others
4 (same as 3 but prefers '#fr' over '#dev')
5 no preferred suffixes, drops ALL suffixed properties
It accomplishes this by scoring each suffixed property and promoting the value of a suffixed property to the non-suffixed property when iterating over the properties and finding a higher-scored suffix.
Some efficiencies in this version, including removing dependence on JSON to deep-copy, and only recursing into objects that survive the scoring round at their depth:
function prefer(obj,suf) {
function pr(o,s) {
for (var p in o) {
if (!o.hasOwnProperty(p) || !p.split('#')[1] || p.split('##')[1] ) continue; // ignore: proto-prop OR not-suffixed OR temp prop score
var b = p.split('#')[0]; // base prop name
if(!!!o['##'+b]) o['##'+b] = 0; // +score placeholder
var ps = p.split('#')[1].split('&'); // array of property suffixes
var sc = 0; var v = 0; // reset (running)score and value
while(ps.length) {
// suffix value: index(of found suffix in prefs)^10
v = Math.floor(Math.pow(10,s.indexOf(ps.pop())));
if(!v) { sc = 0; break; } // found suf NOT in prefs, zero score (delete later)
sc += v;
}
if(sc > o['##'+b]) { o['##'+b] = sc; o[b] = o[p]; } // hi-score! promote to base prop
delete o[p];
}
for (var p in o) if(p.split('##')[1]) delete o[p]; // remove scores
for (var p in o) if(typeof o[p] === 'object') pr(o[p],s); // recurse surviving objs
}
if( typeof obj !== 'object' ) return; // validate
suf = ( (suf || suf === 0 ) && ( suf.length || suf === parseFloat(suf) ) ? suf.toString().split(',') : []); // array|string|number|comma-separated-string -> array-of-strings
pr(obj,suf.reverse());
}
If you're using Brunch, the plugin Constangular helps you to manage variables for different environments.
Have you seen this question and its answer?
You can set a globally valid value for you app like this:
app.value('key', 'value');
and then use it in your services. You could move this code to a config.js file and execute it on page load or another convenient moment.

Categories

Resources