log4js-node in nodejs not logging to file - javascript

Not a node expert, and this is the first time I'm using log4js-node.
I am trying to get my ERROR logs and any of my console logs to write to a log_file.log file with log4js on a nodejs server running Express. Here is my config file:`
{
"replaceConsole": true,
"appenders": [
{
"type": "file",
"filename":"log_file.log",
"maxLogSize":20480,
"backups": 3,
"category":"relative-logger"
},
{
"type":"logLevelFilter",
"level":"ERROR",
"appender":{
"type":"file",
"filename":"log_file.log"
}
},
{
"appender": {
"type": "smtp",
"recipients": "myemail#gmail.com",
"sender": "myemailadd#gmail.com",
"sendInterval": 60,
"transport": "SMTP",
"SMTP": {
"host": "localhost",
"port": 25
}
}
}]
}`
And here is how I'm requiring the application in my app.js file:
var log4js = require("log4js");
log4js.configure("log_config.json")
logger = log4js.getLogger();
I'm sending manual errors to log4js with this (I can get this to log to the console fine, just can't get the log_file written):
logger.error('A mandrill error occurred: ' + e.name + ' - ' + e.message);
And I'm hoping jog4js catches the application's normal ERROR messages.
How do I get log4js to log to the log_file.log them send me an email of that log? I have installed nodemailer 0.7, fyi, to handle smtp.

maybe you could remove "category":"relative-logger" in your file appender.

Yes remove "category":"relative-logger" it somehow blocks the data transfer into your log file.. Or try something like this:
// Setup Logging
log4js.configure({
appenders: [
{ type: 'console' },
{ type: 'file', filename: '.\\logs\\PesaFastaArchiveData.log' }
]
});
The path is of-course the windows path.

Related

stop or kill node media server

I am trying to implement stop feature for live video streaming using node-media-server.
Basically I want to stop node-media-server completely, restart it later.
const NodeMediaServer = require("node-media-server");
let config = {
logType: 3,
rtmp: {
port: rtmpPort,
chunk_size: 60000,
gop_cache: true,
ping: 60,
ping_timeout: 30,
},
http: {
port: httpPort,
allow_origin: "*",
},
relay: {
ffmpeg: "/usr/bin/ffmpeg",
tasks: [
{
app: "cctv",
mode: "static",
edge: "rtsp://" + cameraUrl + "/h264_ulaw.sdp",
name: "uterum",
rtsp_transport: "udp",
},
],
},
};
let nms = new NodeMediaServer(config);
nms.run();
I see on the project github that there is a stop method.
Have you try to use it ?
https://github.com/illuspas/Node-Media-Server/blob/master/node_media_server.js
nms.stop();
Answer to comment:
Goto the github repo :
Express file is the app.js
Inside it you see
const NodeMediaServer = require('./');
...
let nms = new NodeMediaServer(config)
nms.run();
You see the NodeMediaServer ?
There is another file in the same folder node_media_server.js which exports module.exports = NodeMediaServer
Just a look at this file and you see the stop method.
That's all.

Google cloud 404 after verifying file exists

I have another project where this same code works successfully, so it may be some configuration option I've missed this time around. I'm using the google cloud API to access firebase storage.
For clarity, the file does exist.
var storage = require('#google-cloud/storage')({
keyFilename: 'serviceAccountKey.json',
projectId: 'my-id'
});
var bucket = storage.bucket('my-id.appspot.com');
var file = bucket.file('directory/file.json'); //this exists!
file.exists(function(err, exists){
console.log("Checking for challenges file. Results:" + exists + ", err:" + err); //returns "Checking for challenges file. Results:true, err:nil"
if (exists) {
console.log("File exists. Printing."); //prints "File exists. Printing."
file.download().then(function(currentFileData) {
console.log("This line is never reached.");
}).catch(err => {
console.error('ERROR:', err); //gives a 404 error
});
}
});
Instead of printing "this line is never reached.", it prints the following caught error:
ERROR: { ApiError: Not Found at Object.parseHttpRespMessage (/user_code/node_modules/#google-cloud/storage/node_modules/#google-cloud/common/src/util.js:156:33) at Object.handleResp ... ... The full error is colossal, so I won't post it here in its entirety unless required.
It's possible the user that is trying to access the file only have access over the bucket but not over the file. Check the ACLs of both the bucket and the file in both projects and compare what you get:
myBucket.acl.get()
.then(acls => console.log("Bucket ACLs:", acls));
myFile.acl.get()
.then(acls => console.log("File ACLs:", acls));
You should see an output like this:
[ [ { entity: 'user-abenavides333#gmail.com', role: 'OWNER' },
{ entity: 'user-dwilches#gmail.com', role: 'OWNER' } ],
{ kind: 'storage#objectAccessControls',
items: [ [Object], [Object] ] } ]
If there is no difference there, try the following more verbose versions of the same code:
myBucket.acl.get()
.then(acls => console.log("Bucket ACLs:", JSON.stringify(acls, null, '\t')));
myFile.acl.get()
.then(acls => console.log("File ACLs:", JSON.stringify(acls, null, '\t')));

ssh2 node js sftp protocol Error Handshake failed

Hello i have a little problem, i developped a script sftp client with node js that connect to an sftp server and grab some files, i tested it with my local server its working, but when i tried to use it with production server i received this error :
Error: Handshake failed: no matching key exchange algorithm
i already generated the rsa key using ssh-keygen
here is the relevant part of the script :
var Client = require('ssh2').Client;
var fs = require('fs');
var path = require('path');
var args = process.argv.slice(2);
var connSettings = {
host: args[0] || '127.0.0.1',
port: args[1] || 22,
username: args[2] || 'karim',
password: args[3] || 'karimos',
algorithms: {
hmac: ['hmac-sha2-256', 'hmac-sha2-512', 'hmac-sha1', 'hmac-sha1-96']
}
};
I also had the same problem and solved it by adding the following:
algorithms: {
kex: [
"diffie-hellman-group1-sha1",
"ecdh-sha2-nistp256",
"ecdh-sha2-nistp384",
"ecdh-sha2-nistp521",
"diffie-hellman-group-exchange-sha256",
"diffie-hellman-group14-sha1"
],
cipher: [
"3des-cbc",
"aes128-ctr",
"aes192-ctr",
"aes256-ctr",
"aes128-gcm",
"aes128-gcm#openssh.com",
"aes256-gcm",
"aes256-gcm#openssh.com"
],
serverHostKey: [
"ssh-rsa",
"ecdsa-sha2-nistp256",
"ecdsa-sha2-nistp384",
"ecdsa-sha2-nistp521"
],
hmac: [
"hmac-sha2-256",
"hmac-sha2-512",
"hmac-sha1"
]
}
For myself, I added debug: console.log to my config object. This output more about the connection attempt.
{
"port": 22,
"host": "test.test.com",
"user": "test",
"password": "******",
"debug": console.log
}
Handshake: (remote) KEX method: diffie-hellman-group14-sha1,diffie-hellman-group-exchange-sha1
Handshake: No matching key exchange algorithm
Based on this error I updated my config's algorithm:
{
"port": 22,
"host": "test.test.com",
"user": "test",
"password": "******",
"algorithms": {
"kex": [
"diffie-hellman-group14-sha1","diffie-hellman-group-exchange-sha1"
]
}
}
After adding this algorithm the connection was successful on my machine
You may edit your /etc/ssh/sshd configuration file, on your server, in order to allow the key authentication method :)
My first suggestion would be to upgrade the ssh server on the server you're connecting to so that a more secure configuration can be had. This is the best/most secure solution.
If you cannot make changes on this server and you absolutely need to connect, then you can explicitly set the kex to a list of key exchange methods you want to support (valid algorithm names can be found in the ssh2-streams documentation). For example:
algorithms: {
kex: [ ... ]
}
Have you tried changing your algorithms declaration to...?
algorithms: {
serverHostKey: [ 'hmac-sha2-256', 'hmac-sha2-512', 'hmac-sha1', 'hmac-sha1-96' ],
}

Strongloop app does not load local datasource

I want to use different environment specific datasource configurations in a Strongloop app. I saw at https://docs.strongloop.com/display/public/LB/Environment-specific+configuration that the priority of configurations are:
Environment-specific configuration, based on the value of NODE_ENV;
for example, server/config.staging.json.
Local configuration file;
for example, server/config.local.json.
Default configuration file;
for example, server/config.json.
I have declared three datasource conf files:
datasources.json:
{}
datasources.local.json:
{
"db": {
"name": "db",
"connector": "loopback-connector-mongodb",
"host":"127.0.0.1",
"port": "27017",
"database": "woowDev"
}
}
and datasources.staging.js:
module.exports = {
db: {
connector: 'mongodb',
hostname: process.env.OPENSHIFT_MONGODB_DB_HOST,
port: process.env.OPENSHIFT_MONGODB_DB_PORT,
user: process.env.OPENSHIFT_MONGODB_DB_USERNAME,
password: process.env.OPENSHIFT_MONGODB_DB_PASSWORD,
database: 'woow'
}
};
Now unless I put the configuration of datasources.local.json in datasources.json it does not work. I keep getting the error: AssertionError: User is referencing a dataSource that does not exist: "db"
I tried also to add the local conf to staging conf and defined the variable NODE_ENV, but it would not load neither datasource.staging.js. I defined the NODE_ENV by doing:
export NODE_ENV=staging
I used node-debug to track down the issue. And it came in this particular source strongloop file:
node_modules/loopback-boot/lib/config-loader.js
the function:
function mergeDataSourceConfig(target, config, fileName) {
for (var ds in target) {
var err = applyCustomConfig(target[ds], config[ds]);
if (err) {
throw new Error('Cannot apply ' + fileName + ' to `' + ds + '`: ' + err);
}
}
}
will not merge configs if "db" key is not defined in the master file i.e. datasources.json.
So, I just modified the datasources.json to:
{
"db": {}
}
and it worked!
Maybe it is my fault but the documentation is not clear enough.
Trick is to add all the datasources(memory/redis/mongo/postgres) in datasources.json and then override parameters in datasources.local.js or datasources.staging.js or datasources.production.js
Sample file configuration:
datasources.json
{
"db": {
"name": "db",
"connector": "memory"
},
"redisDS": {
"name": "redisDS",
"connector": "redis"
},
"testPostgress": {
"port": 5432,
"name": "localPostgress",
"user": "akumar",
"connector": "postgresql"
}
}
datasources.staging.js
module.exports = {
db:{
connector: 'memory'
},
redisDS:{
connector: 'redis'
},
testPostgress:{
database:'stagingPostgress'
}
};
Loopback will override database name in this case similarly you can override other datasource parameters like port and user

AWS Lambda : Unable to import module 'mail': No module named mail

I'm following this little tutorial for sending an email when an object is uploaded in a S3 bucket. To use it with Lambda I created a .zip file with following structure:
mail.js
/node_modules
The mail.js has following code:
var MailComposer = require('mailcomposer').MailComposer,
    mailcomposer = new MailComposer();
var ses =
    new aws.SES({
        accessKeyId: 'xxxxxxxxxxxx',
        secretAccessKey: 'xxxxxxxxxxxx'});
s3.getObject(params, function(err, data) {
if (err) {
//error handling
} else {
mailcomposer.setMessageOption({
  from: 'chirer#gmail.com’,
     to: 'sjuif#gmail.com',
    subject: 'Test’,
     body: 's3://' + bucket + '/' + key,
     html: 's3://' + bucket + '/' + key +
           '<br/><img src="cid:' + key + '" />'
});
var attachment = {
contents: data.Body,
     contentType: 'image/png',
     cid: key
  };
mailcomposer.addAttachment(attachment);
mailcomposer.buildMessage(function(err, messageSource) {
if (err) {
// error handling
} else {
ses.sendRawEmail({RawMessage: {Data: messageSource}}, function(err, data) {
  if(err) {
// error handling
} else {
context.done(null, data);
}
});
}
});
}
});
When I create a lambda function I do the following :
In the select blueprint menu I select "s3-get-object-python"
I choose my bucket
As event I choose "Put"
I click "next"
I give a name to the lambda function and choose "upload a .zip file"
I upload the zip file with mail.js and the node_modules directory
As handler I fill in "mail.handler"
As role I choose "S3 execution role". The wizard gives now a new screen where I click "view policy document". I edit the document, the document is now like:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:PutObject",
"ses:SendEmail",
"ses:SendRawEmail"
],
"Resource": [
"*"
]
}
]
}
I click 'Allow' and go back to the previous screen
Then I choose next en enables the lambda function
When I now upload a png file I get the following error in my log.
START RequestId: a4401d96-c0ef-11e5-9ae4-8f38a4f750b6 Version: $LATEST
**Unable to import module 'mail': No module named mail**
END RequestId: a4401d96-c0ef-11e5-9ae4-8f38a4f750b6
REPORT RequestId: a4401d96-c0ef-11e5-9ae4-8f38a4f750b6 Duration: 0.35 ms Billed Duration: 100 ms Memory Size: 128 MB Max Memory Used: 9 MB
I don't know why because i'm sure the mail.js is in the root of my .Zip file
There just so many gotchas you can run to while creating deployment packages for AWS Lambda (for Python). I have spent hours and hours on debugging sessions until I found a formula that rarely fails.
I have created a script that automates the entire process and therefore makes it less error prone. I have also wrote tutorial that explains how everything works. You may want to check it out:
Hassle-Free Python Lambda Deployment [Tutorial + Script]
That error means that Lambda can't find the lib. It can't be in proj/lib/python2.7/site-packages or proj/lib64/python2.7/site-packages
It MUST BE inside proj/ itself. I ran into the same problem with MySQL-python and wrote a howto:
http://www.iheavy.com/2016/02/14/getting-errors-building-amazon-lambda-python-functions-help-howto/
HTH
-Sean

Categories

Resources