Why browsersync does not reload page - javascript

The problem is in stream reloading page
Just reload method work correctly
But when I user browserSync.stream() (browserSync.reload({stream: true})) it's not working
It's my browser sync init function
function browserSyncInit(baseDir, browser) {
browser = browser === undefined ? 'default' : browser;
var routes = null;
if(baseDir === conf.paths.src || (util.isArray(baseDir) && baseDir.indexOf(conf.paths.src) !== -1)) {
routes = {
'/bower_components': 'bower_components'
};
}
var server = {
baseDir: baseDir,
routes: routes,
middleware: proxyMiddleware('http://0.0.0.0:8080')
};
var nodemonConfig = {
cwd: path.normalize(__dirname + '/../../'),
script: 'server/server.js',
ext: 'js json',
ignore: ['client/**/*.*'],
env: {'PORT': '8080'}
};
var serverStarted;
nodemon(nodemonConfig)
.on('start', function () {
if (serverStarted) return;
browserSync.init(null, {
startPath: '/',
open: false,
server: server,
browser: browser
});
serverStarted = true;
});
}
Proxy server it's Loopback application (may be problem in this)
It's task for reloading styles and scrips
gulp.task('styles-reload', ['styles'], function() {
return buildStyles()
.pipe(browserSync.stream());
});
gulp.task('scripts-reload', ['scripts'], function() {
return buildScripts()
.pipe(browserSync.stream());
});

Streams are for injecting scripts/css/etc., from a task's Gulp stream, which is why in the documentation, it mentions to place it after the gulp.dest.
If you're looking to manually reload the BrowserSync page, you can do that with .reload in your two functions, otherwise, you'll need to pass through the files into your reload tasks, since it looks like you're calling those tasks from elsewhere.
To add to this, I don't see a reason to separate the two tasks (styles/scripts with their respective -reload tasks). You should just pipe it after the dest, so that you don't have to mess with starting a new stream or merging between tasks.

Related

Refactored watch task using gulp v4 doesn't work

I'm refactoring my gulpfile now I'm using gulp v4 and am having an issue with gulp watch not running my stylesCompileIncremental function. Any help or pointers would be much appreciated.
My refactoring includes:
Switching to using functions instead of gulp.task
Using series and parallel as per the docs
Exporting public tasks at the bottom of my gulpfile ie exports.stylesWatch = stylesWatch;
Adding callbacks in functions to tell Gulp the function is complete
The code for the affected tasks is as follows (directory paths are stored in package.json file hence pathConfig.ui... values):
// Compile only particular Sass file that has import of changed file
function stylesCompileIncremental(cb) {
sassCompile({
source: getResultedFilesList(changedFilePath),
dest: pathConfig.ui.core.sass.dest,
alsoSearchIn: [pathConfig.ui.lib.resources]
});
cb();
}
// Compile all Sass files and watch for changes
function stylesWatch(cb) {
createImportsGraph();
var watcher = gulp.watch(pathConfig.ui.core.sass.src + '**/*.scss', gulp.parallel(devServReloadStyles));
watcher.on('change', function(event) {
changedFilePath = event;
});
cb();
}
// reload css separated into own function. No callback needed as returning event stream
function reloadCss() {
return gulp.src(generateFilePath)
.pipe($.connect.reload()); // css only reload
}
function devServReloadStyles(cb) {
gulp.series(stylesCompileIncremental, reloadCss);
cb();
}
When I run gulp stylesWatch using my refactored code I get the below output (notice the stylesCompileIncremental task is not run):
So my watch tasking is successfully running but there's something wrong when the devServReloadStyles is run for the stylesCompileIncremental function to not kick in.
The original code before refactoring (when using gulp v3) is below:
// Compile only particular Sass file that has import of changed file
gulp.task('styles:compile:incremental', () => {
return sassCompile({
source: getResultedFilesList(changedFilePath),
dest: pathConfig.ui.core.sass.dest,
alsoSearchIn: [pathConfig.ui.lib.resources]
});
});
// Compile all Sass files and watch for changes
gulp.task('styles:watch', () => {
createImportsGraph();
gulp.watch(
pathConfig.ui.core.sass.src + '**/*.scss',
['devServ:reload:styles']
).on('change', event => changedFilePath = event.path);
});
// Reload the CSS links right after 'styles:compile:incremental' task is returned
gulp.task('devServ:reload:styles', ['styles:compile:incremental'], () => {
return gulp.src(generateFilePath) // css only reload
.pipe($.connect.reload());
});
The original task output when running styles:watch is this:
And this is the sassCompile variable used inside stylesCompileIncremental which I've currently not changed in anyway.
/**
* Configurable Sass compilation
* #param {Object} config
*/
const sassCompile = config => {
const sass = require('gulp-sass');
const postcss = require('gulp-postcss');
const autoprefixer = require('autoprefixer');
const postProcessors = [
autoprefixer({
flexbox: 'no-2009'
})
];
return gulp.src(config.source)
.pipe($.sourcemaps.init({
loadMaps: true,
largeFile: true
}))
.pipe(sass({
includePaths: config.alsoSearchIn,
sourceMap: false,
outputStyle: 'compressed',
indentType: 'tab',
indentWidth: '1',
linefeed: 'lf',
precision: 10,
errLogToConsole: true
}))
.on('error', function (error) {
$.util.log('\x07');
$.util.log(error.message);
this.emit('end');
})
.pipe(postcss(postProcessors))
.pipe($.sourcemaps.write('.'))
.pipe(gulp.dest(config.dest));
};
UPDATE
This is due to an issue with my devServReloadStyles function, although I'm still unsure why. If I change my stylesWatch function to use the original devServ:reload:styles task stylesCompileIncremental gets run.
// Compile all Sass files and watch for changes
function stylesWatch(cb) {
createImportsGraph();
var watcher = gulp.watch(pathConfig.ui.core.sass.src + '**/*.scss', gulp.parallel('devServ:reload:styles'));
watcher.on('change', function(event) {
changedFilePath = event;
});
cb();
}
It would still be good to not use the old task and have this as a function though.
Can anybody tell me why my refactored version doesn't work and have any suggestions as to how this should look?
I've fixed this now.
gulp.series and gulp.parallel return functions so there was no need to wrap stylesCompileIncremental and reloadCss inside another function ie. devServReloadStyles.
As per Blaine's comment here.
So my function:
function devServReloadStyles(cb) {
gulp.series(stylesCompileIncremental, reloadCss);
cb();
}
Can just be assigned to a variable:
const devServReloadStyles = gulp.series(stylesCompileIncremental, reloadCss);
And my stylesWatch task is already calling devServReloadStyles:
// Compile all Sass files and watch for changes
function stylesWatch(cb) {
createImportsGraph();
var watcher = gulp.watch(pathConfig.ui.core.sass.src + '**/*.scss', gulp.parallel(devServReloadStyles));
watcher.on('change', function(event) {
changedFilePath = event;
});
cb();
}
So running gulp stylesWatch now runs the stylesCompileIncremental job (notice how devServReloadStyles doesn't show as it's not a function).

Issue bundling javascript with gulp js

I'm bundling my javascript files using the task runner gulp js, during the development of an application I'm noticing a certain issue.
When I add the new feature(reveal a password) script refuses to work because of the form modal script which can be seen below.
'use strict';
var modal__button = document.getElementById("enquiry-form-trigger");
var close__button = document.getElementById("close");
modal__button.addEventListener("click", function (){
var modal = document.getElementById("modal-form");
modal.classList.add("fadeIn");
modal.style.visibility = "visible";
});
close__button.addEventListener("click", function (){
var modal = document.getElementById("modal-form");
modal.classList.remove("fadeIn");
modal.style.visibility = "hidden";
});
When the above script and this other script below
"use strict"
document.getElementById("password-reveal-modal").addEventListener("click", function (){
var x = document.getElementById("password-modal");
if (x.type === "password") {
x.type = "text";
} else {
x.type = "password";
}
});
The password reveal feature doesn't work, but when I paste it in chrome's dev tools works perfectly.
I'm not sure why it wont work without pasting it into the dev tools, it's baffling me, I'm not sure if its my setup or if its the custom javascript.
This is my gulp file setup in case anyone wants to check it, I don't see an issue but Ive only been using gulp for about 3 or 4 months.
var gulp = require("gulp"),
sass = require("gulp-sass"),
image = require("gulp-image"),
concat = require("gulp-concat"),
browserSync = require('browser-sync').create(),
reload = browserSync.reload,
minifyEjs = require("gulp-minify-ejs"),
stripEJSComments = require('gulp-strip-comments'),
nodemon = require('gulp-nodemon'),
plumber = require("gulp-plumber"),
ejs = require("ejs"),
uglify = require("gulp-uglify");
//Build task
gulp.task("build", ["ejs", "styles", "images", "javascript", "routes", "models", "middleware"], function () {
console.log("Build Success");
});
//start up sequence tasks
gulp.task('init', ["nodemon"], function () {
browserSync.init({
proxy: 'http://localhost:2000', //Index.js port number
port: 2128, // The port browser sync runs on
serveStatic: [ './public/', "./assets/"], // What files browser sync should have access to
reloadOnRestart: true, // Enable auto reload
ghostMode:false, //Stops session mirroring
open: "external", //Opens up on an external address (xxx.xxx.x.xx:3128)
});
});
//Starts the express server
gulp.task('nodemon', function (done) {
var running = false; //Default State
return nodemon({
script: 'index.js', //Index file for the JS project
watch: ["./assets/", "./public/"] //What nodemon has access to
})
.on('start', function () {
if (!running) {
done();
}
running = true;
})
//Minor Delay Of 500ms Upon Restart
.on('restart', function () {
setTimeout(function () {
reload();
}, 500);
});
});
//SCSS Task
gulp.task("styles", function () {
gulp.src("./assets/stylesheet/APP.scss")
.pipe(plumber())
.pipe(sass({
outputStyle: 'compressed'
}))
.pipe(gulp.dest("./public/stylesheet/"))
.pipe(browserSync.stream({ stream: true }));
});
//Compiles the express route/s
gulp.task("routes", function () {
gulp.src([
"./assets/routes/*.js"
])
.pipe(plumber())
.pipe(gulp.dest("./public/routes/"))
.pipe(browserSync.stream({ stream: true }));
});
//Compiles the express route/s
gulp.task("models", function () {
gulp.src("./assets/models/*.js")
.pipe(plumber())
.pipe(gulp.dest("./public/models/"))
.pipe(browserSync.stream({ stream: true }));
});
//Image minification
gulp.task("images", function () {
return gulp.src("./assets/images/*")
.pipe(image())
.pipe(gulp.dest("./public/images"))
.pipe(browserSync.stream({ stream: true }));
});
//Client javascript
gulp.task("javascript", function () {
gulp.src([
"./node_modules/jquery/dist/jquery.js",
"./node_modules/tether/dist/js/tether.js",
"./node_modules/bootstrap/dist/js/bootstrap.js",
"./assets/scripts/**/**/*.js"
])
.pipe(plumber())
.pipe(concat("main.js"))
.pipe(gulp.dest("./public/scripts/"))
.pipe(browserSync.stream({ stream: true }));
});
//Middleware task
gulp.task("middleware", function () {
gulp.src("./assets/middleware/*.js")
.pipe(plumber())
.pipe(concat("index.js"))
.pipe(gulp.dest("./public/middleware/"))
.pipe(browserSync.stream({ stream: true }));
});
//EJS task
gulp.task("ejs", function () {
gulp.src("./assets/views/**/*.ejs")
.pipe(plumber())
.pipe(stripEJSComments())
.pipe(minifyEjs({}))
.pipe(gulp.dest("./public/views"))
.pipe(browserSync.stream({ stream: true }));
});
//Default task array
gulp.task("default", ["init", "build"], function (done) {
gulp.watch("./assets/stylesheet/**/*.scss", ["styles"]);
gulp.watch("./assets/scripts/*", ["javascript"]);
gulp.watch("./assets/routes/*.js", ["routes"]);
gulp.watch("./assets/models/*.js",["models"]);
gulp.watch("./assets/images/*",["images"]);
gulp.watch("./assets/views/**/*.ejs",["ejs"]);
browserSync.reload();
done();
});
The two files which are causing the issue are the only files as I have tested each files and its only these two files weirdly so something is causing it to clash.
If you want me to upload my project to github just let me know and I will upload the latest version to my update branch.
I have exhausted all my knowledge into this problem and I am completely stuck now.
If anyone could help a fellow developer out it would be greatly appreciated.
cheers,
alex
I needed to ensure the DOM had fully loaded before the script could be executed.

eslint: howto lint only touched files

I have recently added eslint, as webpack loader, in a codebase that was never parsed with a linter before.
Obviously the amount of errors triggered are endless: there is any chance to configure eslint to parse only the touched files? I would like the linter to parse every file in which developers make changes and those only.
This is the loader I am using so far (in case can be of interest), very standard configuration:
{test: /\.(jsx|js)$/, loader: "eslint-loader?{cache: true}", exclude: /node_modules/}
Thank you
I accomplished it by using a watcher; this is the solution in the details:
dependencies for the Webpack configuration:
var logger = require('reliable-logger');
var watch = require('watch');
var CLIEngine = require('eslint').CLIEngine
watcher and linter configuration and start; I am pasting it with all the todos, as it is:
var configureLinterAndWatchFiles = function() {
var changedFiles = [];
var formatter;
var report;
var SEPARATOR = "////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////";
// TODO I got the feeling that one of those settings is breaking the
// linter (probably the path resolving?)
var linter = new CLIEngine({
// TODO do I need this? Looks like I don't...
// envs: ["node"],
// TODO what is the default?
useEslintrc: true,
// TODO I find weird that I get no error with this: configFile: "../.eslintrc1111"
// make sure that the configuration file is correctly picked up
configFile: ".eslintrc",
// TODO useless if your root is src
// ignorePath: "node_modules"
// TODO probably both useless... the first I still don't get it,
// the second you are enforcing the filtering yourself by checks
// cache: false,
// extensions: [".js", ".jsx"]
});
var fileUpdatedFn = function(f) {
// TODO I would prefer much more to get the list of changed files from
// git status (how to?). Here I am building my own
// resetting the array only for debug purpose
// changedFiles = [];
if(/.js$/.test(f) || /.jsx$/.test(f)) {
changedFiles.push(f);
logger.info(SEPARATOR);
report = linter.executeOnFiles(changedFiles);
logger.info(formatter(report.results));
}
};
// get the default formatter
formatter = linter.getFormatter();
watch.watchTree('src', function(f, curr, prev) {
if (typeof f == "object" && prev === null && curr === null) {
// Finished walking the tree
} else if (prev === null) {
// f is a new file
} else if (curr.nlink === 0) {
// f was removed
} else {
// f was changed
fileUpdatedFn(f);
}
});
};
in module.exports, as last line:
module.exports = function(callback, options){
// ... more code ...
configureLinterAndWatchFiles();
}
That should be it. As I pointed out in a comment:
I wonder, though, if the cache flag (eslint.org/docs/developer-guide/nodejs-api#cliengine) was the best to be used for the problem. From here (github.com/adametry/gulp-eslint/issues/…): "--cache flag will skip over any files that had no problems in the previous run unless they have been modified": not sure if that is my case but is of interest.
Definitively I'm a little late for the party, but I faced the very same issue today & it seems like there is still no common solution for that.
I ended up monkey patching webpack's devServer with this:
const { exec } = require('child_process');
// ...
devServer: {
hot: false,
inline: false,
publicPath: '/',
historyApiFallback: true,
disableHostCheck: true,
after: (app, server, compiler) => {
compiler.hooks.watchRun.tap(
'EsLint-upon-save',
() => {
// This should only work in dev environment
if (process.env.NODE_ENV !== 'development') {
return;
}
// Credits to:
// https://stackoverflow.com/a/43149576/9430588
const filesChanged = Object.keys(compiler.watchFileSystem.watcher.mtimes);
// Might be empty
if (!filesChanged.length) {
return;
}
filesChanged.forEach((changedFileAbsolutePath) => {
const extension = changedFileAbsolutePath.split('.').pop();
if (extension === 'js' || extension === 'jsx') {
exec(`npx eslint --fix --fix-type suggestion,layout ${changedFileAbsolutePath}`);
}
});
}
);
}
},
It's surely quite quick & dirty type of solution, however it seems to work fine with eslint#7.7.0.

How to monitor HTTP calls using browsermob-proxy and nightwatch.js?

I am writing testcases using Nightwatch.js framework for SPA application. A requirement came in here we have to monitor HTTP calls and get the performance results for the site. As this could be easily achieved using JMeter.
Using automation testing tool, we can do it by using browsermob-proxy and selenium.
Is it possible to do the same using Nightwatch.js and browsermob-proxy?
Also what are the steps to do to the same.
For using Nightwatchjs and browsermob-proxy together, check out this repo, which includes info on the NodeJS bindings for browsermob-proxy and programmatically generating HAR (HTTP Archive) files.
If you're content with just using Nightwatchjs, this repo has code in the tests directory for the following:
Custom command to get the requests made so far
Custom assertion for checking if a request, given a filter and query string params, exists.
You might have to brush up on how to add custom commands and assertions to your Nightwatch project, but after that you should be set to go!
You can use browsermob-proxy-api
just simply download browsermob-proxy server then
install by npm command: npm install browsermob-proxy-api --save-dev
configure you night watch like this in desiredCapabilites:
'test_settings': {
'default': {
'launch_url': 'http://localhost:3000',
'screenshots': {
'enabled': true, // if you want to keep screenshots
'path': './screenshots' // save screenshots here
},
'globals': {
'waitForConditionTimeout': 30000 // sometimes internet is slow so wait.
},
'desiredCapabilities': { // use Chrome as the default browser for tests
'browserName': 'chrome',
'proxy': {
'proxyType': 'manual',
'httpProxy': 'localhost:10800'
},
'acceptSslCerts': true,
'javascriptEnabled': true, // turn off to test progressive enhancement
}
},
then download index.js from here:
https://github.com/jmangs/node-browsermob-proxy-api
and add code from example to your step_definitions if you use gherkin or describe step
Bit late into dance. I managed to integrate browsermob to nightwatch. Here are the detailed steps
Download browsermob proxy https://bmp.lightbody.net/
Open your cmd and go to bin folder and then start browsermob using "browsermob-proxy".
I am assuming you have basic nightwatch setup. You also need mobproxy. Install it from "npm i browsermob-proxy-api"
Create a global hook in nightwatch. Say 'globalmodule.js' and give this file path in globals_path in nightwatch.json
In globalmodule, create global hooks as described in http://nightwatchjs.org/guide#external-globals
In beforeEach hook, add below code: //if you are not under corporate proxy and you dont need to chain to upstream proxy
var MobProxy = require('browsermob-proxy-api');
var proxyObj = new MobProxy({'host': 'localhost', 'port': '8080'});
//assuming you started browsermob in 8080 port. That is in step 2.
//if you are working under corporate proxy, you might have to chain your request. This needs editing in browsermob-proxy-api package. Follow steps given at end of this section.
Start proxy on new port
proxyObj.startPort(port, function (err, data) {
if (err) {
console.log(err);
} else {
console.log('New port started')
}
})
Once we have new port, we have to start our chrome browser in above port so that all browser request are proxied through browsermob.
proxyObj.startPort(port, function (err, data) {
if (err) {
console.log(err);
} else {
console.log('New port started')
var dataInJson = JSON.parse(data);
//Step 8:
this.test_settings.desiredCapabilities = {
"browserName": "chrome",
"proxyObj": proxyObj, //for future use
"proxyport": dataInJson.port, //for future use
"proxy": {
"proxyType": "manual",
"httpProxy": "127.0.0.1:" + dataInJson.port,
"sslProxy": "127.0.0.1:" + dataInJson.port //important is you have https site
},
"javascriptEnabled": true,
"acceptSslCerts": true,
"loggingPrefs": {
"browser": "ALL"
}
}
}
})
Try to run with above setting, you can check if cmd [created in step2 to confirm request are going via above port. There will be some activiy]
For creating HAR and getting created HAR, browsermob-proxy-api gives excellent api.
add createHAR.js in any path and mention that path in nightwatch.json[custom_commands section]
exports.command = function (callback) {
var self = this;
if (!self.options.desiredCapabilities.proxyObj) {
console.error('No proxy setup - did you call setupProxy() ?');
}
this.options.desiredCapabilities.proxyObj.createHAR(this.options.desiredCapabilities.proxyport, {
'captureHeaders': 'true',
'captureContent': 'true',
'captureBinaryContent': 'true',
'initialPageRef': 'homepage'
}, function (err, result){
if(err){
console.log(err)
}else{
console.log(result)
if (typeof callback === "function") {
console.log(this.options.desiredCapabilities.proxyObj);
console.log(this.options.desiredCapabilities.proxyport);
// console.log(result);
callback.call(self, result);
}
}
});
return this;
};
then to getHAR, add getHAR.js, add below code.
var parsedData;
exports.command = function(callback) {
var self = this;
if (!self.options.desiredCapabilities.proxy) {
console.error('No proxy setup - did you call setupProxy() ?');
}
self.options.desiredCapabilities.proxyObj.getHAR(self.options.desiredCapabilities.proxyport, function (err, data) {
console.log(self.options.desiredCapabilities.proxyObj);
console.log(self.options.desiredCapabilities.proxyport);
//console.log(result);
if(err){
console.log(err)
}else{
parsedData = JSON.parse(data)
console.log(parsedData.log.entries)
}
if (typeof callback === "function") {
console.log(self.options.desiredCapabilities.proxyObj);
console.log(self.options.desiredCapabilities.proxyport);
callback.call(self, parsedData);
}
});
return this;
};
At start of test, createHAR will not have proxyObj, So this step should be executed sync. Wrap that step with browser.perform()
browser.perform(function(){
browser.createHAR()
})
////some navigation
browser.perform(function(){
browser.getHAR()
})
Note: If you are working behind corporate proxy, You might have to use chain proxy piece which browsermob offers.
According to browsermob proxy documentation, get down to api section, -> /proxy can have request parameters "proxyUsername" and "proxyPassword"
In node_modules->browsermob-proxy-api->index.js
add below line after line 22:
this.proxyUsername = cfg.proxyUsername || '';
this.proxyPassword = cfg.proxyPassword || '';
this.queryString = cfg.queryString || 'httpProxy=yourupstreamProxy:8080'; //you will get this from pac file
then at line 177, where package is making request '/proxy' to browser.
replace
path: url
to
path: url + '?proxyUsername=' +this.proxyUsername + '&proxyPassword=' + this.proxyPassword + '&' + this.queryString

unable to load phantomjs webpage and system modules in karma

I am trying to setup my phantomjs tests to work though karma, but I am unable to load the phantomjs 'webpage' and 'system' modules though requirejs.
here is part of the karma.config.js
module.exports = function(config) {
config.set({
// base path that will be used to resolve all patterns (eg. files, exclude)
basePath: '.',
// frameworks to use
// available frameworks: https://npmjs.org/browse/keyword/karma-adapter
frameworks: ['requirejs'],
// list of files / patterns to load in the browser
files: [
'../ext/ext-all.js',
'test/jasmine/jasmine/jasmine.css',
'test/jasmine/jasmine/jasmine.js',
'test/jasmine/jasmine/jasmine-html.js',
'resources/webclientLogin-all.css',
'resources/webclient/webclient.css',
'app/controller/login/main.js',
'app/view/login/FormContainer.js',
'app/view/login/MustChangePasswordForm.js',
'app/view/MainView.js',
'app/Application.js',
'development.js',
'app.js',
'test/phantomjs/loginSuccess.js',
],
this is the phantomjs test code:
var page = require('webpage').create(), testindex = 0, loadInProgress = false;
var clientURL = "http://localhost:7001/client/";
page.onConsoleMessage = function(msg) {
console.log(msg);
};
page.onLoadStarted = function() {
loadInProgress = true;
console.log("load started");
};
page.onLoadFinished = function() {
loadInProgress = false;
console.log("load finished");
};
console.log('');
console.log("loginSuccess test BEGIN");
console.log('');
var steps = [
function() {
console.log("Load Page");
page.open(clientURL);
},
function() {
console.log("Populate Username and Password");
page.evaluate(function() {
var usernameField = Ext.ComponentQuery.query('textfield#login-username-textfield')[0];
var passwordField = usernameField.next('textfield#login-password-textfield');
usernameField.setValue('testuser');
passwordField.setValue('testpassword');
});
},
function() {
console.log("Fire Submit Button");
page.evaluate(function() {
var button = Ext.ComponentQuery.query('#login-button')[0];
button.fireEvent('click', button);
});
},
function() {
console.log("Verify redirect on successful login");
page.evaluate(function() {
console.log(location.href);
console.log('');
if (location.href.indexOf("/clientMB/") > -1) {
console.log("loginSuccess test SUCCESSFUL");
}
else {
console.log("loginSuccess test FAILURE");
}
console.log('');
});
}
];
this is the error i'm seeing
PhantomJS 1.9.7 (Windows 7) ERROR
Error: Module name "webpage" has not been loaded yet for context: _. Use requi
re([])
http://requirejs.org/docs/errors.html#notloaded
at C:/Users/shining.sun/AppData/Roaming/npm/node_modules/requirejs/require.js?
d84131f3e98422a49aa91d4f87cf96b245726d96:141
i'm using karma v0.12.16
"Require" is working only in the NodeJS environment, so it is unreachable from the Karma tests, because Karma launches a browser and directly injects your code into it, so no NodeJS stuff. Same issue is described here

Categories

Resources