Node.js: Receiving too many UDP messages at a time, losing them - javascript

My node server receives about 400 UDP messages in one second, and it all works, and I am able to process all 400 of them.
However, when I start to receive about 700 UDP messages in one second, I lose 2-20 of the messages, and they never get parsed :(
I have thought about some options here:
Create a queue of all the socket messages, then consume one-by-one,
although I'm not sure how to implement this
Can't figure out how to implement
Find a setting in Node / Express / dgram socket where i can increase the memory size / buffer size, something like that
I couldn't find any settings like this, though :(
Use a different UDP receiver, stop using node's build in socket UDP receiver
Didn't find other receivers
Here's what my UDP sender looks like:
var dgram = require("dgram");
var udpserver = dgram.createSocket("udp4");
var seatStateStore = require("./SeatStateStore");
udpserver.on("message",
function (msg, rinfo)
{
seatStateStore.parseMessage(msg.toString());
});
Anyone have any ideas? I couldn't figure out any of the 3 options :/ Can someone help me out?
Node v0.10.29
Express v3.14.0
===============================
UPDATE / SOLUTION
Here's the code I ended up using (slightly modified #RoyHB 's solution):
var dgram = require("dgram");
var udpserver = dgram.createSocket("udp4");
var seatStateStore = require("./SeatStateStore");
var Dequeue = require('dequeue');
var FIFO = new Dequeue();
fetcher();
udpserver.on("message",
function (msg, rinfo)
{
FIFO.push(msg.toString());
});
udpserver.bind(43278);
function fetcher () {
while (FIFO.length > 0)
{
var msg = FIFO.shift();
seatStateStore.parseMessage(msg);
}
setImmediate(fetcher); //make this function continuously run
}

I know there is already an answer to this, but as of today, I found a way to increase the buffer on dgram from the official documentation: official doc.
socket.setRecvBufferSize(size);
Added in: v8.7.0
size <integer>
Sets the SO_RCVBUF socket option. Sets the maximum socket receive buffer in bytes.
socket.setSendBufferSize(size)
Added in: v8.7.0
size <integer>
Sets the SO_SNDBUF socket option. Sets the maximum socket send buffer in bytes.
Usage example:
var socket = dgram.createSocket('udp4');
socket.on("listening", () => {
socket.setRecvBufferSize(100000000); // 100mb
socket.setSendBufferSize(100000000); // 100mb
});
The default value is 65507

There is a NPM module called node-dequeue. I use it a lot for similar situations to yours.
basically,
your program pushes received messages onto the end of the queue.
an interval timer periodically activates another method or function ( a queue-fetcher) which checks to see if there are messages on the queue and if so, fetches one or more and processes it.
Alternatively (maybe better) no timer is used to schedule queue fetches. Instead the node process.nextTick method is used.
Alternatively, maybe preferably, you can use node process.nextTick to continuously check the queue for messages.
Ideally, seatStateStore.parseMessage would create a new object to asynchronously process one message so that parseMessage returns without delay while the actual message processing continues in the background. (see bottom of example code )
I haven't tested the code below, it's meant to illustrate, not to run
var FIFO = require ('dequeue');
var seatStateStore = require("./SeatStateStore");
var dgram = require("dgram");
setInterval(fetcher, 1);
var udpserver = dgram.createSocket("udp4");
udpserver.on("message",
function (msg, rinfo) {
FIFO.push(msg);
}
);
function fetcher () {
while (FIFO.length > 0) {
var msg = FIFO.shift();
seatStateStore.parseMessage(msg);
}
}
** OR (maybe better) **
var FIFO = require ('dequeue');
var seatStateStore = require("./SeatStateStore");
var dgram = require("dgram");
fetcher();
var udpserver = dgram.createSocket("udp4");
udpserver.on("message",
function (msg, rinfo) {
FIFO.push(msg);
}
);
function fetcher () {
while (FIFO.length > 0) {
var msg = FIFO.shift();
seatStateStore.parseMessage(msg);
process.nextTick(fetcher);
}
}
Outline of seatStateProcessor.parseMessage:
seatStateProcessor.parseMessage = function (msg) {
proc = new asyncProcHandler(msg, function (err) {
if (err) {
//handle the error
}
});
}

Related

IBM MQ How read one by one message, not all available in a queue manager at once?

Now, my app receives all available messages in a Queue manager. I collect them locally and process one by one.
Could do I configure it to receive one message, do some work (it can take some time), delete the received message, repeat? Is this behavior possible with IBM MQ?
The code was updated
function listenToMQ() {
const qMgr = inbound.queueManagerName;
const qName = inbound.queueName;
const connName = inbound.host;
const cno = new mq.MQCNO();
const sco = new mq.MQSCO();
const csp = new mq.MQCSP();
const cd = new mq.MQCD();
cno.SecurityParms = csp;
csp.UserId = inbound.userID;
csp.authenticationType = 0;
cno.Options |= MQC.MQCNO_CLIENT_BINDING;
cd.ConnectionName = connName;
cd.ChannelName = inbound.channelName;
cd.SSLClientAuth = MQC.MQSCA_OPTIONAL;
cd.MaxMsgLength = 104857600;
cno.ClientConn = cd;
cno.SSLConfig = sco;
mq.setTuningParameters({
syncMQICompat: true });
mq.Connx(qMgr, cno, function(err, hConn) {
if (err) {
logger.errorLogger().error(err.message);
} else {
const od = new mq.MQOD();
od.ObjectName = qName;
od.ObjectType = MQC.MQOT_Q;
const openOptions = MQC.MQOO_BROWSE;
mq.Open(hConn, od, openOptions, function(err, hObj) {
queueHandle = hObj;
if (err) {
logger.errorLogger().error(err.message);
} else {
getMessages();
}
});
} }); }
function getMessages() {
const md = new mq.MQMD();
const gmo = new mq.MQGMO();
gmo.Options =
MQC.MQGMO_NO_SYNCPOINT |
MQC.MQGMO_MQWI_UNLIMITED |
MQC.MQGMO_CONVERT |
MQC.MQGMO_FAIL_IF_QUIESCING;
gmo.Options |= MQC.MQGMO_BROWSE_FIRST;
gmo.MatchOptions = MQC.MQMO_NONE;
mq.setTuningParameters({
getLoopPollTimeMs: 500 }); mq.Get(queueHandle, md, gmo, getCB); }
function getCB(err, hObj, gmo, md, buf, hConn) {
if (md.Format == "MQSTR") {
console.log(md);
const message = decoder.write(buf);
updateDB(getMetaFeed(message));
}
mq.Cmit(hConn);
}
gmo.Options &= ~MQC.MQGMO_BROWSE_FIRST;
gmo.Options |= MQC.MQGMO_BROWSE_NEXT; }
Yes, most certainly you can.
Your application can get one message, perhaps using syncpoint if it is a message that drives some work that needs done, do the work and then when the work is done commit the get of the message and then go and get the next one. If the work that needs to be done is also transactional (e.g. update a database), then a global transaction could be used to commit both the MQ message and the update of the other transactional resource at the same time.
The code you show in your question appears to be doing a browse of messages (queue opened with MQOO_BROWSE and then messages read using MQGMO_BROWSE_FIRST and then MQGMO_BROWSE_NEXT). I'm not sure how or when your application currently removes the messages from the queue?
Your current code appears to be processing the messages one by one already, so the only changes needed would be to the get options (and to add a commit call).

How to improve websocket 'onmessage'

I'm learning websockets and wanted to make a websocket onmessage logger which writes the received data in a mongodb.
I'm starting my script with
node listner.js
listner.js:
'use strict';
let DBAbstract = require('./db-controller');
const WebSocket = require('ws');
// getting an instance of a mongodb connection
let mongoInstance = new DBAbstract();
const ws = new WebSocket('ws://ws-url');
ws.onopen = function() {
console.log('Open')
};
ws.onmessage = function(d) {
console.log(d.data)
mongoInstance.insertOne(JSON.parse(d.data)) //Promise which add the data
};
ws.onclose = function() {
console.log('Close')
};
ws.onerror = function(e) {
console.log(e.code)
};
I made this script so far and it works.
When there is an onmessage Event I'm getting a small JSON like this.
{ "event":2,
"value": 12,
"item": 'Spoon' }
I was just wondering if this is might be enough in terms of scalability of the received onmessage Events.
I mean there is no problem when I receive three times of small-JSON's in 10 seconds.
What will happen when I'm receiving 100 small-JSON's in 10 seconds ?
Where is the limit in receive onmessage events as Client ?
Will my listner.js crash because I can't handle the amount of onmessage Events ? Or will my mongodb crash because it can't handle the amount of database writes
Can I improve this code ?

RabbitMQ for NodeJS with Express routing

My server is running NodeJS and uses the amqplib api to request data from another application. The NodeJS server is receiving the information successfully but there's a noticable delay and I'm trying to determine whether I am doing this in the most efficient manner. Specifically I'm concerned with the way that I open and close connections.
Project Layout
I have two controller files that handle receiving and requesting the data, request.img.server.controller.js and receive.img.server.controller.js. Finally the routes handle the controller methods when a button on the front end is pushed, oct.server.routes.js.
request.img.server.controller.js
'use strict';
var amqp = require('amqplib/callback_api');
var connReady = false;
var conn, ch;
amqp.connect('amqp://localhost:5672', function(err, connection) {
conn = connection;
connReady = true;
conn.createChannel(function(err, channel) {
ch = channel;
});
});
exports.sendRequest = function(message) {
console.log('sending request');
if(connReady) {
var ex = '';
var key = 'utils';
ch.publish(ex, key, new Buffer(message));
console.log(" [x] Sent %s: '%s'", key, message);
}
};
receive.img.server.controller.js
var amqp = require('amqplib/callback_api');
var fs = require('fs');
var wstream = fs.createWriteStream('C:\\Users\\yako\\desktop\\binarytest.txt');
var image, rows, cols;
exports.getResponse = function(resCallback) {
amqp.connect('amqp://localhost:5672', function(err, conn) {
conn.createChannel(function(err, ch) {
var ex = '';
ch.assertQueue('server', {}, function(err, q) {
console.log('waiting for images');
var d = new Date();
var n = d.getTime();
ch.consume(q.queue, function(msg) {
console.log(" [x] %s: '%s'", msg.fields.routingKey, msg.content.toJSON());
rows = msg.content.readInt16LE(0);
cols = msg.content.readInt16LE(2);
console.log("rows = %s", msg.content.readInt16LE(0));
console.log("cols = %s", msg.content.readInt16LE(2));
image = msg.content;
var currMax = 0;
for (var i = 4; i < image.length; i+=2) {
if (image.readInt16LE(i) > currMax) {
currMax = image.readInt16LE(i);
}
wstream.write(image.readInt16LE(i) + ',');
}
console.log('done writing max is', currMax);
//console.log(image);
resCallback(rows, cols, image);
}, {
noAck: true
});
});
});
});
};
oct.server.routes.js
'use strict';
module.exports = function(app) {
var request_img = require('../../app/controllers/image-tools/request.img.server.controller.js');
var receive_img = require('../../app/controllers/image-tools/receive.img.server.controller.js');
// oct routes
app.get('/load_slice', function(req, res) {
console.log('load slice hit');
receive_img.getResponse(function (rows, cols, image) {
res.end(image);
});
request_img.sendRequest('123:C:\\Users\\yako\\Documents\\Developer\\medicaldiag\\test_files\\RUS-01-035-09M-21.oct');
});
};
The way you're opening connections is bad, and is at least part of the performance problem.
Connections are expensive to open. They open a new TCP/IP connection on a TCP/IP port between the client and rabbitmq server. This takes time, and uses up a limited resource on both the client and server.
Because of this, a single connection to RabbitMQ should be created and used within each of your node.js processes. This one connection should be shared by all of the code in that process.
Whenever you need to do something with RabbitMQ, open a new channel on the shared connection and do your work. Channels are cheap and are meant to be opened and closed as needed, within a connection.
More specifically in your code, the receive.img.server.controller.js file is the major problem. This opens a new connection to RabbitMQ every time you call the getResponse method.
If you have 10 users hitting the site, you'll have 10 open RabbitMQ connections when 1 would be sufficient. If you have thousands of users hitting the site, you'll have thousands of open RabbitMQ connections when 1 would be sufficient. You also run the risk of exhausting your available TCP/IP connections on the RabbitMQ server or client.
Your receive.img.server.controller.js should look more like your request.img.server.controller.js - one connection open, and re-used all the time.
Also, FWIW - I recommend using the wascally library for RabbitMQ w/ node.js. This library sits on top of amqplib, but makes things significantly easier. It will manage your one connection for you, and make it easier for you to send and receive messages.
I also have some training material available for RabbitMQ and node.js that covers the basics of amqplib and then moves in to using wascally for real application development.

Memory efficient message chunk processing using a XMLHttpRequest

I have a XMLHttpRequest with a progress event handler that is requesting a chunked page which continuously sends adds message chunks. If I do not set a responseType, I can access the response property of the XMLHttpRequest in each progress event and handle the additional message chunk. The problem of this approach is that the browser must keep the entire response in memory, and eventually, the browser will crash due to this memory waste.
So, I tried a responseType of arraybuffer in the hope that I can slice the buffer to prevent the previous excessive memory waste. Unfortunately, the progress event handler is no longer capable of reading the response property of the XMLHttpRequest at this point. The event parameter of the progress event does not contain the buffer, either. Here is a short, self-contained example of my attempt at this (this is written for node.js):
var http = require('http');
// -- The server.
http.createServer(function(req, res) {
if (req.url === '/stream') return serverStream(res);
serverMain(res);
}).listen(3000);
// -- The server functions to send a HTML page with the client code, or a stream.
function serverMain(res) {
res.writeHead(200, {'Content-Type': 'text/html'});
res.write('<html><body>Hello World</body><script>');
res.end(client.toString() + ';client();</script></html>');
}
function serverStream(res) {
res.writeHead(200, {'Content-Type': 'text/html'});
setInterval(function() {
res.write('Hello World<br />\n');
}, 1000);
}
// -- The client code which runs in the browser.
function client() {
var xhr = new XMLHttpRequest();
xhr.addEventListener('progress', function() {
if (!xhr.response) return console.log('progress without response :-(');
console.log('progress: ' + xhr.response.size);
}, false);
xhr.open('GET', '/stream', true);
xhr.responseType = 'arraybuffer';
xhr.send();
}
The progress event handler has no access to the response I wanted. How can I handle the message chunks in the browser in a memory-efficient way? Please do not suggest a WebSocket. I do not wish to use one just to process a read-only stream of message chunks.
XMLHttpRequest doesn't seem really designed for this kind of usage. The obvious solution is polling, which is a popular use of XMLHttpRequest but I'm guessing you don't want to miss data from your stream that would slip between the calls.
To my question Can the "real" data chunks be identified in some way or is it basically random data ?, you answered With some effort, the chunks could be identified by adding an event-id of sorts to the server-side
Based on this premise, I propose:
The idea: cooperating concurrent listeners
Connect to the stream and set up the progress listener (referred to as listenerA()).
When a chunk arrives, process it and output it. Keep a reference to the ids of both the first and last chunk received by listenerA(). Count how many chunks listenerA() has received.
After listenerA() has received a certain amount of chunks, spawn another "thread" (connection + listener, listenerB()) doing the steps 1 and 2 in parallel to the first one but keep the processed data in a buffer instead of outputting it.
When listenerA() receives the chunk with the same id as the first chunk received by listenerB(), send a signal to listenerB(), drop the first connection and kill listenerA().
When listenerB() receives the termination signal from the listenerA(), dump the buffer to the output and keep processing normally.
Have listenerB() spawn listenerC() on the same conditions as before.
Keep repeating with as many connections + listeners as necessary.
By using two overlapping connections, you can prevent the possible loss of chunks that would result from dropping a single connection and then reconnecting.
Notes
This assumes the data stream is the same for all connections and doesn't introduce some individualized settings.
Depending on the output rate of the stream and the connection delay, the buffer dump during the transition from one connection to another might be noticeable.
You could also measure the total response size rather than the chunks count to decide when to switch to a new connection.
It might be necessary to keep a complete list of chunks ids to compare against rather than just the first and last one because we can't guarantee the timing of the overlap.
The responseType of XMLHttpRequest must be set to its default value of "" or "text", to return text. Other datatypes will not return a partial response. See https://xhr.spec.whatwg.org/#the-response-attribute
Test server in node.js
The following code is a node.js server that outputs a consistent stream of elements for testing purposes. You can open multiple connections to it, the output will be the same accross sessions, minus possible server lag.
http://localhost:5500/stream
will return data where id is an incremented number
http://localhost:5500/streamRandom
will return data where id is a random 40 characters long string. This is meant to test a scenario where the id can not be relied upon for ordering the data.
var crypto = require('crypto');
// init + update nodeId
var nodeId = 0;
var nodeIdRand = '0000000000000000000000000000000000000000';
setInterval(function() {
// regular id
++nodeId;
//random id
nodeIdRand = crypto.createHash('sha1').update(nodeId.toString()).digest('hex');
}, 1000);
// create server (port 5500)
var http = require('http');
http.createServer(function(req, res) {
if(req.url === '/stream') {
return serverStream(res);
}
else if(req.url === '/streamRandom') {
return serverStream(res, true);
}
}).listen(5500);
// serve nodeId
function serverStream(res, rand) {
// headers
res.writeHead(200, {
'Content-Type' : 'text/plain',
'Access-Control-Allow-Origin' : '*',
});
// remember last served id
var last = null;
// output interval
setInterval(function() {
// output on new node
if(last != nodeId) {
res.write('[node id="'+(rand ? nodeIdRand : nodeId)+'"]');
last = nodeId;
}
}, 250);
}
Proof of concept, using aforementioned node.js server code
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
</head>
<body>
<button id="stop">stop</button>
<div id="output"></div>
<script>
/*
Listening to a never ending page load (http stream) without running out of
memory by using concurrent overlapping connections to prevent loss of data,
using only xmlHttpRequest, under the condition that the data can be identified.
listen arguments
url url of the http stream
chunkMax number of chunks to receive before switching to new connection
listen properties
output a reference to a DOM element with id "output"
queue an array filled with non-duplicate received chunks and metadata
lastFetcherId an incrementing number used to assign an id to new fetchers
fetchers an array listing all active fetchers
listen methods
fire internal use fire an event
stop external use stop all connections
fetch internal use starts a new connection
fetchRun internal use initialize a new fetcher object
Usage
var myListen = new listen('http://localhost:5500/streamRandom', 20);
will listen to url "http://localhost:5500/streamRandom"
will switch connections every 20 chunks
myListen.stop()
will stop all connections in myListen
*/
function listen(url, chunkMax) {
// main ref
var that = this;
// output element
that.output = document.getElementById('output');
// main queue
that.queue = [];
// last fetcher id
that.lastFetcherId = 0;
// list of fetchers
that.fetchers = [];
//********************************************************* event dispatcher
that.fire = function(name, data) {
document.dispatchEvent(new CustomEvent(name, {'detail':data}));
}
//******************************************************** kill all fetchers
that.stop = function() {
that.fire('fetch-kill', -1);
}
//************************************************************** url fetcher
that.fetch = function(fetchId, url, fetchRef) {
//console.log('start fetcher #'+fetchId);
var len = 0;
var xhr = new XMLHttpRequest();
var cb_progress;
var cb_kill;
// progress listener
xhr.addEventListener('progress', cb_progress = function(e) {
// extract chunk data
var chunkData = xhr.response.substr(len);
// chunk id
var chunkId = chunkData.match(/id="([a-z0-9]+)"/)[1];
// update response end point
len = xhr.response.length;
// signal end of chunk processing
that.fire('chunk-ready', {
'fetchId' : fetchId,
'fetchRef' : fetchRef,
'chunkId' : chunkId,
'chunkData' : chunkData,
});
}, false);
// kill switch
document.addEventListener('fetch-kill', cb_kill = function(e) {
// kill this fetcher or all fetchers (-1)
if(e.detail == fetchId || e.detail == -1) {
//console.log('kill fetcher #'+fetchId);
xhr.removeEventListener('progress', cb_progress);
document.removeEventListener('fetch-kill', cb_kill);
xhr.abort();
that.fetchers.shift(); // remove oldest fetcher from list
xhr = null;
delete xhr;
}
}, false);
// go
xhr.open('GET', url, true);
xhr.responseType = 'text';
xhr.send();
};
//****************************************************** start a new fetcher
that.fetchRun = function() {
// new id
var id = ++that.lastFetcherId;
//console.log('create fetcher #'+id);
// create fetcher with new id
var fetchRef = {
'id' : id, // self id
'queue' : [], // internal queue
'chunksIds' : [], // retrieved ids, also used to count
'hasSuccessor' : false, // keep track of next fetcher spawn
'ignoreId' : null, // when set, ignore chunks until this id is received (this id included)
};
that.fetchers.push(fetchRef);
// run fetcher
that.fetch(id, url, fetchRef);
};
//************************************************ a fetcher returns a chunk
document.addEventListener('chunk-ready', function(e) {
// shorthand
var f = e.detail;
// ignore flag is not set, process chunk
if(f.fetchRef.ignoreId == null) {
// store chunk id
f.fetchRef.chunksIds.push(f.chunkId);
// create queue item
var queueItem = {'id':f.chunkId, 'data':f.chunkData};
// chunk is received from oldest fetcher
if(f.fetchId == that.fetchers[0].id) {
// send to main queue
that.queue.push(queueItem);
// signal queue insertion
that.fire('queue-new');
}
// not oldest fetcher
else {
// use fetcher internal queue
f.fetchRef.queue.push(queueItem);
}
}
// ignore flag is set, current chunk id the one to ignore
else if(f.fetchRef.ignoreId == f.chunkId) {
// disable ignore flag
f.fetchRef.ignoreId = null;
}
//******************** check chunks count for fetcher, threshold reached
if(f.fetchRef.chunksIds.length >= chunkMax && !f.fetchRef.hasSuccessor) {
// remember the spawn
f.fetchRef.hasSuccessor = true;
// spawn new fetcher
that.fetchRun();
}
/***********************************************************************
check if the first chunk of the second oldest fetcher exists in the
oldest fetcher.
If true, then they overlap and we can kill the oldest fetcher
***********************************************************************/
if(
// is this the oldest fetcher ?
f.fetchId == that.fetchers[0].id
// is there a successor ?
&& that.fetchers[1]
// has oldest fetcher received the first chunk of its successor ?
&& that.fetchers[0].chunksIds.indexOf(
that.fetchers[1].chunksIds[0]
) > -1
) {
// get index of last chunk of the oldest fetcher within successor queue
var lastChunkId = that.fetchers[0].chunksIds[that.fetchers[0].chunksIds.length-1]
var lastChunkIndex = that.fetchers[1].chunksIds.indexOf(lastChunkId);
// successor has not reached its parent last chunk
if(lastChunkIndex < 0) {
// discard whole queue
that.fetchers[1].queue = [];
that.fetchers[1].chunksIds = [];
// set ignore id in successor to future discard duplicates
that.fetchers[1].ignoreId = lastChunkId;
}
// there is overlap
else {
/**
console.log('triming queue start: '+that.fetchers[1].queue.length
+" "+(lastChunkIndex+1)
+" "+(that.fetchers[1].queue.length-1)
);
/**/
var trimStart = lastChunkIndex+1;
var trimEnd = that.fetchers[1].queue.length-1;
// trim queue
that.fetchers[1].queue = that.fetchers[1].queue.splice(trimStart, trimEnd);
that.fetchers[1].chunksIds = that.fetchers[1].chunksIds.splice(trimStart, trimEnd);
//console.log('triming queue end: '+that.fetchers[1].queue.length);
}
// kill oldest fetcher
that.fire('fetch-kill', that.fetchers[0].id);
}
}, false);
//***************************************************** main queue processor
document.addEventListener('queue-new', function(e) {
// process chunks in queue
while(that.queue.length > 0) {
// get chunk and remove from queue
var chunk = that.queue.shift();
// output item to document
if(that.output) {
that.output.innerHTML += "<br />"+chunk.data;
}
}
}, false);
//****************************************************** start first fetcher
that.fetchRun();
};
// run
var process = new listen('http://localhost:5500/streamRandom', 20);
// bind global kill switch to button
document.getElementById('stop').addEventListener('click', process.stop, false);
</script>
</body>
</html>

really strange behaviour on node.js using connect-form, socket.io and express

the following code:
req.form.on('progress', function(bytesReceived, bytesExpected){
var percent = (bytesReceived / bytesExpected * 100) | 0;
// progressEvent.download(percent);
io.sockets.on('connection', function (socket) {
socket.emit('progress', { percent: percent});
client = socket;
});
});
written on an http post handler (express.js) sends socket messages to the client js, but it obviously creates a huge amount of listeners, in fact it warns me saying:
"node) warning: possible EventEmitter memory leak detected. 11 listeners added. Use emitter.setMaxListeners() to increase limit."
on the other hand this code:
io.sockets.on('connection', function (socket) {
progressEvent.on('progress', function(percentage) {
console.log(percentage);
socket.emit('progress', { percent: percentage});
});
});
Doesn't send any message back to the client, the ProgressEvent is:
var util = require('util'),
events = require('events');
function ProgressEvent() {
if(false === (this instanceof ProgressEvent)) {
return new ProgressEvent();
}
events.EventEmitter.call(this);
}
util.inherits(ProgressEvent, events.EventEmitter);
ProgressEvent.prototype.download = function(percentage) {
var self = this;
self.emit('progress', percentage);
}
exports.ProgressEvent = ProgressEvent;
I've been a good day on this strange problem I can't really see why socket.io doesn't send the socket message to the client.
the whole project is here: https://github.com/aterreno/superuploader
Thanks for your attention & help
You shouldn't listen to socket.io connections inside of the progress event. It looks like you're trying to get socket.io to connect when a user uploads a file, but that will not do that. Instead it will listen for a new connection each time the progress event fires on the upload, which I'm guessing is pretty often and it's why you're getting the warning about too many listeners.
What you want to do instead is on the client side, when you initialize an upload, tell the server through socket.io. Then the server links up that socket.io client with their upload through their session, http://www.danielbaulig.de/socket-ioexpress/
Something like this should do it
io.sockets.on('connection', function(socket) {
var session = socket.handshake.session;
socket.on('initUpload', function() {
session.socket = socket;
});
socket.on('disconnect', function() {
session.socket = null;
});
});
And then in your route
req.form.on('progress', function(bytesReceived, bytesExpected){
var percent = (bytesReceived / bytesExpected * 100) | 0;
if (req.session.socket) {
socket.emit('progress', percent);
}
});
This only works with one upload per session, but you get the idea.

Categories

Resources