Copying .env files To Mutliple Host Machines w/ Pm2 - javascript

I'm trying to deploy my Node.js script to multiple hosts using Pm2's deploy process.
It's working fine using a single host, with the following ecosystem.config.js file:
require("dotenv").config({ path: `./envs/.production.env` });
const path = require("path");
module.exports = {
apps: [
{
name: process.env.APP_NAME,
interpreter: process.env.NODE_PATH,
cwd: process.env.PROJECT_PATH
script: "dist/index.js",
instances: process.env.INSTANCES || 0,
exec_mode: "cluster",
env: {
...process.env,
},
},
],
deploy: {
production: {
user: "harrison",
host: process.env.HOST,
key: "~/.ssh/id_rsa",
ref: "origin/master",
repo: process.env.GIT_REPO,
path: process.env.PROJECT_PATH,
// Copy keys to server
"pre-deploy-local": `scp -Cr envs harrison#${process.env.HOST}:${process.env.PROJECT_PATH}/current`,
// Build app and restart PM2 processes
"post-deploy": `yarn install --ignore-engines && \
pwd && \
yarn prod:build && \
yarn prod:serve`,
},
},
};
In order to deploy it to multiple hosts, the PM2 documentation is quite simple: Just add multiple host names. Ok, easy enough. Within my .env file, I'm using a series of IP addresses separated by commas, then splitting those into an array inside my config file, like this:
host: process.env.HOST.split(",");
However, copying over my .env files to the multiple hosts is not quite so easy.
How can I configure the "pre-deploy-local" portion of this ecosystem file to scp my .env files to every host machine?

Ended up running a bash script. The list of HOSTS is in my .env.production file, separated by commas, like this
HOSTS=123.12.134.122,134.135.134.134
To pass them to the Pm2 host, replace each comma with a space and pass them into the bash script as arguments. Then, execute that bash script on the deploy.
My ecosystem file now looks like this:
// PM2 CONFIGURATION FOR PRODUCTION BUILDS
require("dotenv").config({ path: `./envs/.production.env` });
const path = require("path");
const hosts = process.env.HOSTS.replace(/,/g, " ");
module.exports = {
apps: [
{
name: process.env.APP_NAME,
args: ["--color"],
interpreter: process.env.NODE_PATH,
cwd: path.resolve(process.env.PROJECT_PATH, "current"), // Path holding the current version of our app (where post-deploy runs)
script: "dist/index.js", // Location of
instances: process.env.INSTANCES || 0,
exec_mode: "cluster",
env: {
...process.env,
},
},
],
deploy: {
production: {
user: "harrison",
host: process.env.HOSTS.split(","),
key: "~/.ssh/id_rsa2",
ref: "origin/master",
repo: process.env.GIT_REPO,
// Where to deploy on the server
path: process.env.PROJECT_PATH,
// Pass hosts as arguments to .env copy script
"pre-deploy-local": `./deployEnvs.sh ${
process.env.PROJECT_PATH
} ${hosts}`,
"post-deploy": `yarn install --ignore-engines && \
yarn prod:build && \
yarn prod:serve`,
},
},
};
And the bash script called deployEnvs.sh looks like this:
#!/bin/bash
PROJECT_PATH="${1}"
for HOST in "${#:2}"
do
scp -Cr envs "harrison#${HOST}:${PROJECT_PATH}/current"
done

Related

What is the best way to add APM to NuxtJS project

What is the right way to configure/enable an Elastic APM agent in a Nuxtjs project?
I referred this documentation for a custom NodeJS app. The key takeaway was:
It’s important that the agent is started before you require any other
modules in your Node.js application - i.e. before http and before your
router etc.
I added the following snippet in nuxt.config.js, but the APM agent is not started or working. I do not see any errors in the app logs.
var apm = require('elastic-apm-node').start({
serviceName: 'nuxt-app',
serverUrl: 'http://ELK_APM_SERVER:8200'
})
Is there any other way to do this?
We managed to get this working using a custom Nuxt module which explicitly requires the Node modules to instrument after it has initiated the APM module.
modules/elastic-apm.js:
const apm = require('elastic-apm-node');
const defu = require('defu');
module.exports = function() {
this.nuxt.hook('ready', async(nuxt) => {
const runtimeConfig = defu(nuxt.options.privateRuntimeConfig, nuxt.options.publicRuntimeConfig);
const config = (runtimeConfig.elastic && runtimeConfig.elastic.apm) || {};
if (!config.serverUrl) {
return;
}
if (!apm.isStarted()) {
await apm.start(config);
// Now explicitly require the modules we want APM to hook into, as otherwise
// they would not be instrumented.
//
// Docs: https://www.elastic.co/guide/en/apm/agent/nodejs/master/custom-stack.html
// Modules: https://github.com/elastic/apm-agent-nodejs/tree/master/lib/instrumentation/modules
require('http');
require('http2');
require('https');
}
});
}
nuxt.config.js:
module.exports = {
// Must be in modules, not buildModules
modules: ['~/modules/elastic-apm'],
publicRuntimeConfig: {
elastic: {
apm: {
serverUrl: process.env.ELASTIC_APM_SERVER_URL,
serviceName: 'my-nuxt-app',
usePathAsTransactionName: true // prevent "GET unknown route" transactions
}
}
}
};
All the answers are outdated and from beginning incorrect (17.02.2022)
To make it work follow these steps:
1.) Create a nodeApm.js in your root dir with the following content:
const nodeApm = require('elastic-apm-node')
if (!nodeApm.isStarted()) {
nodeApm.start()
}
2.) Use environment variables to store your config. For example:
ELASTIC_APM_SERVICE_NAME=NUXT_PRODUCTION
ELASTIC_APM_SECRET_TOKEN=yoursecrettokenhere
3.) Edit your package.json
"scripts": {
// if you want apm also on dev to test, add it also here
"dev": "node -r ./nodeApm.js node_modules/nuxt/bin/nuxt",
...
"start": "node -r ./nodeApm.js node_modules/nuxt/bin/nuxt start",
...
! Be awere that in ~2022 the node_modules bin folder has lost the "." in the directory name
! In all othere anwsers people forget the start parameter at the end
"start": "node -r ./nodeApm.js node_modules/nuxt/bin/nuxt start",
Based on what I've seen it looks like there isn't a "right" way to do this with the stock nuxt command line application. The problem seems to be that while nuxt.config.js is the first time a user has a chance to add some javascript, that the nuxt command line application bootstraps the Node's HTTP frameworks before this config file is required. This means the elastic agent (or any APM agent) doesn't have a chance to hook into the modules.
The current recommendations from the Nuxt team appears to be
Invoke nuxt manually via -r
{
"scripts": {
"start": "node -r elastic-apm-node node_modules/nuxt/.bin/nuxt"
}
}
Skip nuxt and use NuxtJS programmatically as a middleware in your framework of choice
const { loadNuxt } = require('nuxt')
const nuxtPromise = loadNuxt('start')
app.use((req, res) => { nuxtPromise.then(nuxt => nuxt.render(req, res)) })
Based on Alan Storm answer (from Nuxt team) I made it work but with a little modification:
I created a file named nodeApm.js where I added the following code:
const nodeApm = require('elastic-apm-node')
if (!nodeApm.isStarted()) { ... // configuration magic }
In script sections I added:
"start": "node -r ./nodeApm.js node_modules/nuxt/.bin/nuxt"

Unable to bundle a Web Worker to be imported like an NPM package

My goal is to be able to publish a Web Worker NPM package which can be imported normally (import MyPkg from 'my-pkg') without requiring the user to import it with worker-loader (inline or otherwise)
To accomplish this, I've tried using a Babel build script as well as Webpack with worker-loader.
In the following examples there are two projects: the Web Worker package ("Package") which is npm linked to a test application ("App").
The Package is split into two files: entry.webpack.js and index.worker.js. The entry, when built and moved to /dist is designated as the main file in the package.json, and it currently looks like this:
entry.webpack.js
var MyPkg = require('worker-loader!./index.worker.js')
module.exports = MyPkg
index.worker.js
// This is just example code. It doesn't really matter
// what this code does so long as it ends up being run
// as a Web Worker.
var selfRef = self;
function ExampleWorker () {
console.log('Running Worker...');
setTimeout(function () {
// wait 10 seconds then post a message
selfRef.postMessage({foo: "bar"});
}, 10000)
}
module.exports = ExampleWorker
I then bundle the Package with Webpack:
package.json
"build": "rm -rf dist/*.* && webpack --progress"
webpack.config.js
module.exports = {
mode: 'production',
devtool: 'source-map',
entry: __dirname + '/src/entry.webpack.js',
output: {
filename: 'bundle.js',
path: __dirname + '/dist'
},
optimization: {
minimize: false
}
}
This generates two files: bundle.js and a Web Worker file as a hash: [hash].worker.js with the code we want evaluated in it. They key part in this, though, is that because we used worker-loader inline to import, the webpack compiled output looks something like:
module.exports = function() {
return new Worker(__webpack_require__.p + "53dc9610ebc22e0dddef.worker.js");
};
Finally, the App should be able to import it and use it like this:
App.js
import MyPkg from 'my-pkg'
// logging MyPkg here produces `{}`
const worker = new MyPkg()
// That throws an Error:
// Uncaught TypeError: _my_pkg__WEBPACK_IMPORTED_MODULE_4___default.a is not a constructor
worker.onmessage = event => {
// this is where we'd receive our message from the web worker
}
However, you can get it to work if, in the App itself you import the worker build like this:
import MyPkg from 'my-pkg/dist/53dc9610ebc22e0dddef.worker.js'
But, it's a requirement of the package to:
A) NOT require applications using the package to have to explicitly install worker-loader and
B) not have to reference the my-pkg/dist/[hash].worker.js explicitly.
I've tried also designating the built [hash].worker.js' as themain` in package.json but that doesn't work either.
Edit 1: I forgot to mention that I'm basing all of this off of how react-pdf does it. If you take a look in /src/entry.webpack.js and follow how it works throughout the package you'll see a few similarities.
you could try worker-loader with option:
{
test: /\.worker\.js$/,
use: {
loader: 'worker-loader',
options: {
name: '[name].[hash:8].js',
// notice here
inline: true,
fallback: false
}
}
},

Webpack React: Conditionally load json config files

I have WebPack React project which I'm testing on my "staging" server.
Now its time to release it on "production" server.
I'm using server.json file which consists with server info such as api keys, api address, and so on.
What I want is to use different server.json for "production" and "staging".
And when I use production-server.json, there would be no traces of staging-server.json in my bundle.
src
- config
-- config.js
-- production-server.json
-- staging-server.json
maybe something like: yarn build-staging, yarn build-production
You should use environment variables and webpack's DefinePlugin. Additionally, you can use node-config to automatically load a json configuration file based on your NODE_ENV.
package.json
"scripts": {
"build:dev": "NODE_ENV=development start-something",
"build": "NODE_ENV=production start-something"
}
project config structure
config
default.json
{ "api": "https://api.mysite.com/v1" }
staging.json
{ "api": "http://localhost:8000/v1" }
webpack config
// node-config will load your staging.json or default.json file here
// depending on what NODE_ENV is
const config = require('config');
plugins: [
// inject window.CONFIG into your app
new webpack.DefinePlugin({
CONFIG: JSON.stringify(config)
})
]
Then in your react code you will have access to environment-specific config
componentDidMount() {
// in prod: https://api.mysite.com/v1/user/some-user-id
// in staging: http://localhost:8000/v1/user/some-user-id
return axios(`${CONFIG.api}/user/${this.props.userId}`).then(whatever...)
}
If you're on windows use cross-env to set your environment variable.
Using node-config isn't the only way to do this, there are several, but I find it pretty easy, unless you're working with electron.
edit
Since node-config uses nodejs it is typically used in front end projects in conjunction with webpack. If you are unable to to integrate it with webpack you don't need to use node-config at all, I would do something like this:
project structure
config
default.json
development.json
test.json
index.js
src
...etc
config files
// default.json, typically used for production
{
"api": "https://api.mysite.com/v1"
}
// development.json
{
"api": "http://localhost:8000/v1"
}
// index.js
// get process.env via babel-plugin-transform-inline-environment-variables
import production from './default.json';
import development from './development.json';
const { NODE_ENV: env } = process.env;
const config = {
production,
development
};
export default config[env];

node dotenv won't work with pm2

I have an application where locally (without pm2) all the environment variables in the .env file work just fine using dotenv.
But on the server where I'm using pm2 to run the app, the environment variables remain undefined.
The pm2 commands I'm using to run the app on server are:
pm2 start myapp/app.js
pm2 startup
pm2 save
dotenv will read .env file located in the current directory.
When you call pm2 start myapp/app.js it won't search for myapp/.env.
.env // It will try to load this, which doesn't exist
myapp/
app.js
So you have two solutions
use path option:
const path = require('path');
require('dotenv').config({ path: path.join(__dirname, '.env') });
Or call your script from inside myapp/
pm2 start app.js
A good pattern here is to remove dotenv from your code and "require" it on the command line. This makes your code nicely transportable between any environment (including cloud-based) - which is one of the main features of environment variables.
Note: you will still need to install dotenv in your project via npm when running it on a server.
a) code up your .env file alongside your script (e.g. app.js)
b) to run your script without pm2:
node -r dotenv/config app.js
c) in pm2.config.js:
module.exports = {
apps : [{
name : 'My Application',
script : 'app.js',
node_args : '-r dotenv/config',
...
}],
}
and then
pm2 start pm2.config.js
note: the use of dotenv/config on the command line is one of the best practices recommended by dotenv themselves
edit 2021: for completeness - as my answer has got some ticks, I wanted to add a 4th option to the list:
d) combined pm2/env config
module.exports = { apps : [{
name : 'My Application',
script : 'app.js',
env : {
PORT: 5010,
DB_STRING: 'mongodb://localhost:27017',
...
},
}]};
This will be useful if you are treating your pm2.config as environmental configuration and outside of git etc. It just negates the need for a separate .env, which may suit you. It negates the need for dotenv completely as pm2 injects the env variables into your script's process
you have kill you pm2 process first
try
pm2 kill
then restart pm2 using
pm2 start app.js
I had the same problem but it wasnt explained clearly so here is the solution based on
github user vmarchaud comment.
This also fixes the issue people had with #Andy Lorenz solution.
In my case i wanted to create an ecosystem file for multiple apps but i was keep getting
Error: Cannot find module 'dotenv/config'
The solution was easy.
You have to declar cwd, aka the project folder where the dotenv/config will be read from.
module.exports = {
apps: [{
name: 'app1 name',
script: 'app1.js',
cwd: '/path/to/folder/',
exec_mode: 'fork_mode',
node_args: '-r dotenv/config',
}, {
name: 'app2 name',
script: 'app2.js',
cwd: '/path/to/folder/',
instances: 'max',
exec_mode: 'cluster',
node_args: '-r dotenv/config',
}],
};
You can parse .env using dotenv lib end set them manually in ecosystem.config.js
ecosystem.config.js:
const { calcPath, getEnvVariables } = require('./helpers');
module.exports = {
apps: [
{
script: calcPath('../dist/app.js'),
name: 'dev',
env: getEnvVariables(),
},
],
};
helpers.js:
const path = require('path');
const dotenv = require('dotenv');
const fs = require('fs');
function calcPath(relativePath) {
return path.join(__dirname, relativePath);
}
// this function will parce `.env` file but not set them to `process.env`
const getEnvVariables = () => {
const envConfig = dotenv.parse(fs.readFileSync(calcPath('.env')));
const requiredEnvVariables = ['MODE'];
for (envVariable of requiredEnvVariables) {
if (!envConfig[envVariable]) {
throw new Error(`Environment variable "${envVariable}" is not set`);
}
}
return envConfig;
};
None of this worked for me because I was using cluster mode.
I installed dotenv as dev dependency at the root (I was using yarn workspaces too).
Then I did this:
require('dotenv').config({ path: 'path/to/your/.env' })
module.exports = {
apps: [
{
name: 'app',
script: 'server/dist/index.js',
instances: 2,
exec_mode: 'cluster',
instance_var: 'APP_INSTANCE_SEQ',
// listen_timeout: 10000,
// restart_delay: 10000,
}
]
}
I use a much simpler version of #Marcos answer:
.env
app.js
for example we need to store token in .env file and pass it right to app.js:
inside .env
token=value
inside app.js:
require('dotenv').config();
console.log(process.env.token)
Also, don't forget. If you add .env file to .gitignore and then git pull you repo on VPS or smth, you need to copy .env file manually, otherwise your app won't work.
And in some cases it's important in what area you are using your config, so make sure that NODE_ENV=production string is added to your .env file.
After all you could use pm2 start app.js right from your app's folder.
This was my project setup..
/src/app.ts
which than compiled into dist folder.
/dist/app.js
my .env file was outside dist folder so it wasn't accessible.
this is the command i tried.
pm2 start app.js --env=.env

nightwatchjs parallel mode selenium hub docker compose

I'm trying to run tests in parallel written using nightwatchjs in Docker using Selenium Hub. I'm able to get the tests to run in parallel in Docker without Selenium Hub, however, some child processes will timeout causing multiple retries. The results are very inconsistent. I'm hoping to use Selenium Hub or something similar to remove the timeouts and retries so the test results are more consistent, stable, and do not timeout.
However, now when I run docker-compose run --rm nightwatch, using the following code, the selenium server will start in parallel mode and multiple child processes will be started, however, only the first one will execute. Then the other child processes will get Error retrieving a new session from the selenium server. Connection refused! Is selenium server started? Am I missing something to get the nightwatchjs tests to run in parallel without timing out?
nightwatch.conf.js
module.exports = {
src_folders: ['tests'],
output_folder: 'reports',
custom_commands_path: '',
custom_assertions_path: '',
page_objects_path: 'page_objects',
test_workers: true,
live_output: true,
detailed_output: true,
selenium: {
start_process: true,
server_path: './bin/selenium-server-standalone-3.0.1.jar',
log_path: '',
host: '127.0.0.1',
port: 4444,
cli_args: {
'webdriver.chrome.driver' : './node_modules/chromedriver/bin/chromedriver'
}
},
test_settings: {
default: {
launch_url: 'https://example.com',
selenium_port: 4444,
selenium_host: 'hub',
silent: true,
screenshots: {
'enabled': false,
'path': ''
},
desiredCapabilities: {
browserName: 'chrome',
javascriptEnabled: true,
acceptSslCerts: true,
chromeOptions: {
args: [
'--window-size=1024,768',
'--no-sandbox'
]
}
},
globals: {
waitForConditionTimeout: 20000,
asyncHookTimeout: 70000
}
}
};
docker-compose.yml
version: '2'
services:
nightwatch:
build:
context: .
command: /bin/sh -c "node ./node_modules/nightwatch/bin/nightwatch"
links:
- chrome
- hub
volumes:
- .:/opt/nightwatch
chrome:
environment:
VIRTUAL_HOST: node.chrome.docker
HUB_PORT_4444_TCP_ADDR: hub
HUB_PORT_4444_TCP_PORT: 4444
image: selenium/node-chrome:3.1.0-astatine
links:
- hub
hub:
ports:
- 4444:4444
image: selenium/hub:3.1.0-astatine
Dockerfile
FROM java:8-jre
## Node.js setup
RUN curl -sL https://deb.nodesource.com/setup_6.x | bash -
RUN apt-get install -y nodejs
RUN npm config set spin false
WORKDIR /app
COPY . ./
RUN npm install
The docker node images are configured to run only one browser instance. You can change this by overriding environment variables, like so:
chrome:
environment:
VIRTUAL_HOST: node.chrome.docker
HUB_PORT_4444_TCP_ADDR: hub
HUB_PORT_4444_TCP_PORT: 4444
NODE_MAX_INSTANCES: 5
NODE_MAX_SESSION: 5
image: selenium/node-chrome:3.1.0-astatine
links:
- hub
In case you're interested, I discovered this by looking at the Dockerfile source.

Categories

Resources