Measure Node.js total inbound/outbound traffic generated through net.server - javascript

I got that nifty App I work on, now to calculate some scenarios I'd like to know
how much inbound / outbound traffic is generated per session by the app itself.
I don't want to use the browser for this but to gather that info from the server side.
nodejs net.server does not have any methods that fit the description,
I found only net.socket methods:
socket.bytesRead The amount of received bytes.
socket.bytesWritten The amount of bytes sent.
do they apply to the traffic generated through net.server?
any existing node_modules that gather that kind of stats?
Thanks in advance

Well, that's because net.server uses net.socket. To get the totals, you will have to add bytesRead and bytesWritten to the totals once the socket is closed. Example:
const net = require("net");
var server = net.createServer(function (c) {
c.on('close', function () {
// add to the totals
server.bytesSent += c.bytesWritten;
server.bytesReceived += c.bytesRead;
});
c.write('Hello world!\r\n');
c.pipe(c);
c.end();
});
server.bytesReceived = 0;
server.bytesSent = 0;
server.listen(3000);
var time = process.hrtime();
setInterval(function (){
process.stdout.write('\u001B[2J\u001B[0;0f');
var diff = process.hrtime(time)[0] + process.hrtime(time)[1]/1000000000;
var bpsSent = Math.round(server.bytesSent/diff) || 0;
var bpsReceived = Math.round(server.bytesReceived/diff) || 0;
console.log("Running node.js %s on %s-%s", process.version, process.platform, process.arch);
console.log("Memory usage: %d bytes", process.memoryUsage().rss);
console.log("Uptime: %ds", Math.round(process.uptime()));
console.log("Open connections: %d", server.connections);
console.log("In: %d bytes (%d bytes/s)", server.bytesReceived, bpsReceived);
console.log("Out: %d bytes (%d bytes/s)", server.bytesSent, bpsSent);
}, 100);
If you need to update the totals in real-time (when data is received/sent), you can instead add the length of the buffers directly to the totals when they are written/read. This is especially good when you have sockets that are open for a long time and transferring large amounts of data.
var server = net.createServer(function (c) {
var oldWrite = c.write;
c.write = function(d) {
if (!Buffer.isBuffer(d)) {
d = new Buffer(d);
}
oldWrite.call(this, d);
server.bytesSent += d.length;
};
c.on('data', function(d){
server.bytesReceived += d.length;
});
c.write('Hello world!\r\n');
c.pipe(c);
c.end();
});

Related

Intensive job javascript, browser freez

I got a blob to construct, and received almost 100 parts of (500k) to decrypt and construct a blob file.
Actually it's working fine, but when i do my decryption, that take processor, and freeze my page.
I try different approach, with defered of jquery, timeout but always the same probleme.
It's there a ways to not freez the UI thread ?
var parts = blobs.sort(function (a, b) {
return a.part - b.part;
})
// notre bytesarrays finales
var byteArrays = [];
i = 0;
for (var i = 0; i < blobs.length; i++)
{
// That job is intensive, and take time
byteArrays.push(that.decryptBlob(parts[i].blob.b64, fileType));
}
// create new blob with all data
var blob = new Blob(byteArrays, { type: fileType });
The body inside for(...) loop is synchronous, so the entire decryption process is synchronous, in simple words, decryption happens chunk after chunk. How about making it asynchronous ? Like decrypting multiple chunks in parallel. In JavaScript terminology we can use Asynchronous Workers. These workers can work in parallel, so if you spawn 5 workers for example. The total time is reduced by T / 5. (T = total time in synchronous mode).
Read more about worker threads here :
https://blog.logrocket.com/node-js-multithreading-what-are-worker-threads-and-why-do-they-matter-48ab102f8b10/
Tanks to Sebastian Simon,
I took the avenue of worker. And it's working fine.
var chunks = [];
var decryptedChucnkFnc = function (args) {
// My builder blob job here
}
// determine the number of maximum worker to use
var maxWorker = 5;
if (totalParts < maxWorker) {
maxWorker = totalParts;
}
for (var iw = 0; iw < maxWorker; iw++) {
eval('var w' + iw + ' = new Worker("decryptfile.min.js")');
var wo = eval("w" + iw);
var item = blobs.pop();
wo.postMessage(MyObjectPassToTheFile);
wo.onmessage = decryptedChucnkFnc;
}

socket.io router, assign to one of multiple instances

I am trying to overcome a limitation placed on how many connections TCP protocol can have opened on a single port. So i thought of a way to create multiple instances of my server running on different ports for example:
instance 1 (3001) server_i1.js
instance 2 (3002) server_i2.js
instance 3 (3003) server_i3.js
instance 4 (3004) server_i4.js
then i could have one additional file server_route.js that would check how many connections are established on each instance and forward user to less populated instance. I tried build something using cluster but it only seems to create new processes on the same port. How it can be done to have all users connect for example to http://exmaple.com:3000 and then forward them to one of four possible ports [3001, 3002, 3003, 3004]?
current server approach:
var cluster = require('cluster');
var numCPUs = require('os').cpus().length;
if(cluster.isMaster) {
for (var i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on('exit', function(worker, code, signal) {
console.log('worker ' + worker.process.pid + ' died');
});
} else {
var http = require('http'),
_io = require('socket.io'),
server = http.createServer();
server.listen('3000', 'example.com');
var io = _io.listen(server);
var connections = {},
msg_sent = 0;
io.on('connection', function(socket) {
connections[socket.id] = new Date().getTime();
socket.on('client-request', function(msg) {
msg_sent++;
});
socket.on('disconnect', function() {
delete connections[socket.id];
});
});
setInterval(function() {
console.log( 'Active connections: ', Object.keys(connections).length, 'Messages sent: ', msg_sent );
}, 1000);
}
Maybe use round robin? A sample implementation would look like this:
const ports = ["3001"," 3002", "3003"];
var current = 0;
io.on('connection', function(socket) {
socket.emit("redirect",ports[current]);
current = (current + 1) % ports.length;
});
On clientside one would do
(function start(port){
const socket = io("http://localhost:"+port);
socket.on("redirect", port => (socket.close(), start(port)));
//whatever
})(3000);

Node.js update client-accessible JSON file

A beginner's question as I am new to web programming. I am using the MEAN stack and writing a JSON file within the server in order to make some weather information available to any connected clients.
I am updating the JSON file every hour using the node-schedule library. Will the constant updating of the file from the server cause any concurrency issues if the clients happen to be attempting to access the file's data at the same time?
Code snippet below:
server.js
function updateWeatherFile() {
var weather = require('weather-js');
var w = "";
weather.find({search: weatherSearch, degreeType: 'C'}, function(err, result) {
if(err)
console.log(err);
w = JSON.stringify(result, null, 2);
fs.writeFile('public/weather.json', w, function(err) {
if(err) {
console.log(err);
}
});
});
}
if(scheduleWeather) {
var schedule = require('node-schedule');
var sequence = '1 * * * *'; // cron string to specify first minute of every hour
var j = schedule.scheduleJob(sequence, function(){
updateWeatherFile();
console.log('weather is updated to public/weather.json at ' + new Date());
});
}
else {
updateWeatherFile();
}
client_sample.js
// get the current weather from the server
$http.get('weather.json').then(function(response) {
console.log(response['data'][0]['current']);
vm.weather = response['data'][0]["current"].skytext;
vm.temperature = response['data'][0]["current"].temperature;
});
NodeJs is single threaded environment.
However To read and write files Node starts external processes and eventually the file can be accessed to read and write simultaneously. In this case the concurrency is not handled by Node, but by the Operational System.
If you think this concurrency may harm you program, consider using a lock file as commented and explained here.

RabbitMQ for NodeJS with Express routing

My server is running NodeJS and uses the amqplib api to request data from another application. The NodeJS server is receiving the information successfully but there's a noticable delay and I'm trying to determine whether I am doing this in the most efficient manner. Specifically I'm concerned with the way that I open and close connections.
Project Layout
I have two controller files that handle receiving and requesting the data, request.img.server.controller.js and receive.img.server.controller.js. Finally the routes handle the controller methods when a button on the front end is pushed, oct.server.routes.js.
request.img.server.controller.js
'use strict';
var amqp = require('amqplib/callback_api');
var connReady = false;
var conn, ch;
amqp.connect('amqp://localhost:5672', function(err, connection) {
conn = connection;
connReady = true;
conn.createChannel(function(err, channel) {
ch = channel;
});
});
exports.sendRequest = function(message) {
console.log('sending request');
if(connReady) {
var ex = '';
var key = 'utils';
ch.publish(ex, key, new Buffer(message));
console.log(" [x] Sent %s: '%s'", key, message);
}
};
receive.img.server.controller.js
var amqp = require('amqplib/callback_api');
var fs = require('fs');
var wstream = fs.createWriteStream('C:\\Users\\yako\\desktop\\binarytest.txt');
var image, rows, cols;
exports.getResponse = function(resCallback) {
amqp.connect('amqp://localhost:5672', function(err, conn) {
conn.createChannel(function(err, ch) {
var ex = '';
ch.assertQueue('server', {}, function(err, q) {
console.log('waiting for images');
var d = new Date();
var n = d.getTime();
ch.consume(q.queue, function(msg) {
console.log(" [x] %s: '%s'", msg.fields.routingKey, msg.content.toJSON());
rows = msg.content.readInt16LE(0);
cols = msg.content.readInt16LE(2);
console.log("rows = %s", msg.content.readInt16LE(0));
console.log("cols = %s", msg.content.readInt16LE(2));
image = msg.content;
var currMax = 0;
for (var i = 4; i < image.length; i+=2) {
if (image.readInt16LE(i) > currMax) {
currMax = image.readInt16LE(i);
}
wstream.write(image.readInt16LE(i) + ',');
}
console.log('done writing max is', currMax);
//console.log(image);
resCallback(rows, cols, image);
}, {
noAck: true
});
});
});
});
};
oct.server.routes.js
'use strict';
module.exports = function(app) {
var request_img = require('../../app/controllers/image-tools/request.img.server.controller.js');
var receive_img = require('../../app/controllers/image-tools/receive.img.server.controller.js');
// oct routes
app.get('/load_slice', function(req, res) {
console.log('load slice hit');
receive_img.getResponse(function (rows, cols, image) {
res.end(image);
});
request_img.sendRequest('123:C:\\Users\\yako\\Documents\\Developer\\medicaldiag\\test_files\\RUS-01-035-09M-21.oct');
});
};
The way you're opening connections is bad, and is at least part of the performance problem.
Connections are expensive to open. They open a new TCP/IP connection on a TCP/IP port between the client and rabbitmq server. This takes time, and uses up a limited resource on both the client and server.
Because of this, a single connection to RabbitMQ should be created and used within each of your node.js processes. This one connection should be shared by all of the code in that process.
Whenever you need to do something with RabbitMQ, open a new channel on the shared connection and do your work. Channels are cheap and are meant to be opened and closed as needed, within a connection.
More specifically in your code, the receive.img.server.controller.js file is the major problem. This opens a new connection to RabbitMQ every time you call the getResponse method.
If you have 10 users hitting the site, you'll have 10 open RabbitMQ connections when 1 would be sufficient. If you have thousands of users hitting the site, you'll have thousands of open RabbitMQ connections when 1 would be sufficient. You also run the risk of exhausting your available TCP/IP connections on the RabbitMQ server or client.
Your receive.img.server.controller.js should look more like your request.img.server.controller.js - one connection open, and re-used all the time.
Also, FWIW - I recommend using the wascally library for RabbitMQ w/ node.js. This library sits on top of amqplib, but makes things significantly easier. It will manage your one connection for you, and make it easier for you to send and receive messages.
I also have some training material available for RabbitMQ and node.js that covers the basics of amqplib and then moves in to using wascally for real application development.

Node.js net tcp buffering memory leak

I'm writing a TCP game server in Node.js and am having issues with splitting the TCP stream into messages. As i want to read numbers and floats from the buffer i cannot find a suitable module to outsource to as all the ones i've found deal with simple strings ending with a new line delimiter. I decided to go with prefixing each message with the length in bytes of the message. I did this and wrote a simple program to spam the server with random messages ( well constructed with a UInt16LE prefix depicting the length of the message ). I noticed that the longer I leave the programs running my actual server keeps using up more and more memory. I tried using a debugging tool to trace the memory allocation with no success so I figured i'd post my code here and hope for a reply. So here is my code... any tips or pointers as to where I'm going wrong or what I can do differently/more efficiently would be amazing!
Thanks.
server.on("connection", function(socket) {
var session = new sessionCS(socket);
console.log("Connection from " + session.address);
// data buffering variables
var currentBuffer = new Buffer(args.bufSize);
var bufWrite = 0;
var bufRead = 0;
var mSize = null;
var i = 0;
socket.on("data", function(dataBuffer) {
// check if buffer risk of overflow
if (bufWrite + dataBuffer.length > args.bufSize-1) {
var newBufWrite = 0;
var newBuffer = new Buffer(args.bufSize);
while(bufRead < bufWrite) {
newBuffer[newBufWrite] = currentBuffer[bufRead];
newBufWrite++;
bufRead++;
}
currentBuffer = newBuffer;
bufWrite = newBufWrite;
bufRead = 0;
newBufWrite = null;
}
// appending buffer
for (i=0; i<dataBuffer.length; i++) {
currentBuffer[bufWrite] = dataBuffer[i];
bufWrite ++;
}
// if beginning of message not acknowleged
if (mSize === null && (bufWrite - bufRead) >= 2) {
mSize = currentBuffer.readUInt16LE(bufRead);
}
// if difference between read and write is greater or equal to message mSize + 2
// +2 for the integer holding the message size
// this means that a full message is in the buffer and needs to be extracted
while ((bufWrite - bufRead) >= mSize+2) {
bufRead += 2;
var messageBuffer = new Buffer(mSize);
for(i=0; i<messageBuffer.length; i++) {
messageBuffer[i] = currentBuffer[bufRead];
bufRead++;
}
// this is where the message buffer would be passed to the router
router(session, messageBuffer);
messageBuffer = null;
// seeinf if another message length indicator is in the buffer
if ((bufWrite - bufRead) >= 2) {
mSize = currentBuffer.readUInt16LE(bufRead);
}
else {
mSize = null;
}
}
});
}
Buffer Frame Serialization Protocol (BUFSP) https://github.com/teambition/bufsp
It may be that you want: encode messages into buffer, write to TCP, receive and splitting the TCP stream buffers into messages.

Categories

Resources