Memory efficient message chunk processing using a XMLHttpRequest - javascript

I have a XMLHttpRequest with a progress event handler that is requesting a chunked page which continuously sends adds message chunks. If I do not set a responseType, I can access the response property of the XMLHttpRequest in each progress event and handle the additional message chunk. The problem of this approach is that the browser must keep the entire response in memory, and eventually, the browser will crash due to this memory waste.
So, I tried a responseType of arraybuffer in the hope that I can slice the buffer to prevent the previous excessive memory waste. Unfortunately, the progress event handler is no longer capable of reading the response property of the XMLHttpRequest at this point. The event parameter of the progress event does not contain the buffer, either. Here is a short, self-contained example of my attempt at this (this is written for node.js):
var http = require('http');
// -- The server.
http.createServer(function(req, res) {
if (req.url === '/stream') return serverStream(res);
serverMain(res);
}).listen(3000);
// -- The server functions to send a HTML page with the client code, or a stream.
function serverMain(res) {
res.writeHead(200, {'Content-Type': 'text/html'});
res.write('<html><body>Hello World</body><script>');
res.end(client.toString() + ';client();</script></html>');
}
function serverStream(res) {
res.writeHead(200, {'Content-Type': 'text/html'});
setInterval(function() {
res.write('Hello World<br />\n');
}, 1000);
}
// -- The client code which runs in the browser.
function client() {
var xhr = new XMLHttpRequest();
xhr.addEventListener('progress', function() {
if (!xhr.response) return console.log('progress without response :-(');
console.log('progress: ' + xhr.response.size);
}, false);
xhr.open('GET', '/stream', true);
xhr.responseType = 'arraybuffer';
xhr.send();
}
The progress event handler has no access to the response I wanted. How can I handle the message chunks in the browser in a memory-efficient way? Please do not suggest a WebSocket. I do not wish to use one just to process a read-only stream of message chunks.

XMLHttpRequest doesn't seem really designed for this kind of usage. The obvious solution is polling, which is a popular use of XMLHttpRequest but I'm guessing you don't want to miss data from your stream that would slip between the calls.
To my question Can the "real" data chunks be identified in some way or is it basically random data ?, you answered With some effort, the chunks could be identified by adding an event-id of sorts to the server-side
Based on this premise, I propose:
The idea: cooperating concurrent listeners
Connect to the stream and set up the progress listener (referred to as listenerA()).
When a chunk arrives, process it and output it. Keep a reference to the ids of both the first and last chunk received by listenerA(). Count how many chunks listenerA() has received.
After listenerA() has received a certain amount of chunks, spawn another "thread" (connection + listener, listenerB()) doing the steps 1 and 2 in parallel to the first one but keep the processed data in a buffer instead of outputting it.
When listenerA() receives the chunk with the same id as the first chunk received by listenerB(), send a signal to listenerB(), drop the first connection and kill listenerA().
When listenerB() receives the termination signal from the listenerA(), dump the buffer to the output and keep processing normally.
Have listenerB() spawn listenerC() on the same conditions as before.
Keep repeating with as many connections + listeners as necessary.
By using two overlapping connections, you can prevent the possible loss of chunks that would result from dropping a single connection and then reconnecting.
Notes
This assumes the data stream is the same for all connections and doesn't introduce some individualized settings.
Depending on the output rate of the stream and the connection delay, the buffer dump during the transition from one connection to another might be noticeable.
You could also measure the total response size rather than the chunks count to decide when to switch to a new connection.
It might be necessary to keep a complete list of chunks ids to compare against rather than just the first and last one because we can't guarantee the timing of the overlap.
The responseType of XMLHttpRequest must be set to its default value of "" or "text", to return text. Other datatypes will not return a partial response. See https://xhr.spec.whatwg.org/#the-response-attribute
Test server in node.js
The following code is a node.js server that outputs a consistent stream of elements for testing purposes. You can open multiple connections to it, the output will be the same accross sessions, minus possible server lag.
http://localhost:5500/stream
will return data where id is an incremented number
http://localhost:5500/streamRandom
will return data where id is a random 40 characters long string. This is meant to test a scenario where the id can not be relied upon for ordering the data.
var crypto = require('crypto');
// init + update nodeId
var nodeId = 0;
var nodeIdRand = '0000000000000000000000000000000000000000';
setInterval(function() {
// regular id
++nodeId;
//random id
nodeIdRand = crypto.createHash('sha1').update(nodeId.toString()).digest('hex');
}, 1000);
// create server (port 5500)
var http = require('http');
http.createServer(function(req, res) {
if(req.url === '/stream') {
return serverStream(res);
}
else if(req.url === '/streamRandom') {
return serverStream(res, true);
}
}).listen(5500);
// serve nodeId
function serverStream(res, rand) {
// headers
res.writeHead(200, {
'Content-Type' : 'text/plain',
'Access-Control-Allow-Origin' : '*',
});
// remember last served id
var last = null;
// output interval
setInterval(function() {
// output on new node
if(last != nodeId) {
res.write('[node id="'+(rand ? nodeIdRand : nodeId)+'"]');
last = nodeId;
}
}, 250);
}
Proof of concept, using aforementioned node.js server code
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
</head>
<body>
<button id="stop">stop</button>
<div id="output"></div>
<script>
/*
Listening to a never ending page load (http stream) without running out of
memory by using concurrent overlapping connections to prevent loss of data,
using only xmlHttpRequest, under the condition that the data can be identified.
listen arguments
url url of the http stream
chunkMax number of chunks to receive before switching to new connection
listen properties
output a reference to a DOM element with id "output"
queue an array filled with non-duplicate received chunks and metadata
lastFetcherId an incrementing number used to assign an id to new fetchers
fetchers an array listing all active fetchers
listen methods
fire internal use fire an event
stop external use stop all connections
fetch internal use starts a new connection
fetchRun internal use initialize a new fetcher object
Usage
var myListen = new listen('http://localhost:5500/streamRandom', 20);
will listen to url "http://localhost:5500/streamRandom"
will switch connections every 20 chunks
myListen.stop()
will stop all connections in myListen
*/
function listen(url, chunkMax) {
// main ref
var that = this;
// output element
that.output = document.getElementById('output');
// main queue
that.queue = [];
// last fetcher id
that.lastFetcherId = 0;
// list of fetchers
that.fetchers = [];
//********************************************************* event dispatcher
that.fire = function(name, data) {
document.dispatchEvent(new CustomEvent(name, {'detail':data}));
}
//******************************************************** kill all fetchers
that.stop = function() {
that.fire('fetch-kill', -1);
}
//************************************************************** url fetcher
that.fetch = function(fetchId, url, fetchRef) {
//console.log('start fetcher #'+fetchId);
var len = 0;
var xhr = new XMLHttpRequest();
var cb_progress;
var cb_kill;
// progress listener
xhr.addEventListener('progress', cb_progress = function(e) {
// extract chunk data
var chunkData = xhr.response.substr(len);
// chunk id
var chunkId = chunkData.match(/id="([a-z0-9]+)"/)[1];
// update response end point
len = xhr.response.length;
// signal end of chunk processing
that.fire('chunk-ready', {
'fetchId' : fetchId,
'fetchRef' : fetchRef,
'chunkId' : chunkId,
'chunkData' : chunkData,
});
}, false);
// kill switch
document.addEventListener('fetch-kill', cb_kill = function(e) {
// kill this fetcher or all fetchers (-1)
if(e.detail == fetchId || e.detail == -1) {
//console.log('kill fetcher #'+fetchId);
xhr.removeEventListener('progress', cb_progress);
document.removeEventListener('fetch-kill', cb_kill);
xhr.abort();
that.fetchers.shift(); // remove oldest fetcher from list
xhr = null;
delete xhr;
}
}, false);
// go
xhr.open('GET', url, true);
xhr.responseType = 'text';
xhr.send();
};
//****************************************************** start a new fetcher
that.fetchRun = function() {
// new id
var id = ++that.lastFetcherId;
//console.log('create fetcher #'+id);
// create fetcher with new id
var fetchRef = {
'id' : id, // self id
'queue' : [], // internal queue
'chunksIds' : [], // retrieved ids, also used to count
'hasSuccessor' : false, // keep track of next fetcher spawn
'ignoreId' : null, // when set, ignore chunks until this id is received (this id included)
};
that.fetchers.push(fetchRef);
// run fetcher
that.fetch(id, url, fetchRef);
};
//************************************************ a fetcher returns a chunk
document.addEventListener('chunk-ready', function(e) {
// shorthand
var f = e.detail;
// ignore flag is not set, process chunk
if(f.fetchRef.ignoreId == null) {
// store chunk id
f.fetchRef.chunksIds.push(f.chunkId);
// create queue item
var queueItem = {'id':f.chunkId, 'data':f.chunkData};
// chunk is received from oldest fetcher
if(f.fetchId == that.fetchers[0].id) {
// send to main queue
that.queue.push(queueItem);
// signal queue insertion
that.fire('queue-new');
}
// not oldest fetcher
else {
// use fetcher internal queue
f.fetchRef.queue.push(queueItem);
}
}
// ignore flag is set, current chunk id the one to ignore
else if(f.fetchRef.ignoreId == f.chunkId) {
// disable ignore flag
f.fetchRef.ignoreId = null;
}
//******************** check chunks count for fetcher, threshold reached
if(f.fetchRef.chunksIds.length >= chunkMax && !f.fetchRef.hasSuccessor) {
// remember the spawn
f.fetchRef.hasSuccessor = true;
// spawn new fetcher
that.fetchRun();
}
/***********************************************************************
check if the first chunk of the second oldest fetcher exists in the
oldest fetcher.
If true, then they overlap and we can kill the oldest fetcher
***********************************************************************/
if(
// is this the oldest fetcher ?
f.fetchId == that.fetchers[0].id
// is there a successor ?
&& that.fetchers[1]
// has oldest fetcher received the first chunk of its successor ?
&& that.fetchers[0].chunksIds.indexOf(
that.fetchers[1].chunksIds[0]
) > -1
) {
// get index of last chunk of the oldest fetcher within successor queue
var lastChunkId = that.fetchers[0].chunksIds[that.fetchers[0].chunksIds.length-1]
var lastChunkIndex = that.fetchers[1].chunksIds.indexOf(lastChunkId);
// successor has not reached its parent last chunk
if(lastChunkIndex < 0) {
// discard whole queue
that.fetchers[1].queue = [];
that.fetchers[1].chunksIds = [];
// set ignore id in successor to future discard duplicates
that.fetchers[1].ignoreId = lastChunkId;
}
// there is overlap
else {
/**
console.log('triming queue start: '+that.fetchers[1].queue.length
+" "+(lastChunkIndex+1)
+" "+(that.fetchers[1].queue.length-1)
);
/**/
var trimStart = lastChunkIndex+1;
var trimEnd = that.fetchers[1].queue.length-1;
// trim queue
that.fetchers[1].queue = that.fetchers[1].queue.splice(trimStart, trimEnd);
that.fetchers[1].chunksIds = that.fetchers[1].chunksIds.splice(trimStart, trimEnd);
//console.log('triming queue end: '+that.fetchers[1].queue.length);
}
// kill oldest fetcher
that.fire('fetch-kill', that.fetchers[0].id);
}
}, false);
//***************************************************** main queue processor
document.addEventListener('queue-new', function(e) {
// process chunks in queue
while(that.queue.length > 0) {
// get chunk and remove from queue
var chunk = that.queue.shift();
// output item to document
if(that.output) {
that.output.innerHTML += "<br />"+chunk.data;
}
}
}, false);
//****************************************************** start first fetcher
that.fetchRun();
};
// run
var process = new listen('http://localhost:5500/streamRandom', 20);
// bind global kill switch to button
document.getElementById('stop').addEventListener('click', process.stop, false);
</script>
</body>
</html>

Related

Asynchronous recursive functions in javascript

I am trying to stream mp3 data from my server to the client side. I am doing this using Ajax. The server sends 50 kilobytes per request. I wrote two functions: one that gets the mp3 data and one that plays them. The first function takes the 50 kilobytes, decodes them and stores the decoded data in an array then it calls itself recursively. The second function starts playing as soon as the first element in the array is filled with data. The problem is that this works for the first 50kilobytes only then it fails. What I want to do is keep my get_buffer function running until the server tells it no more data to send, and keep my play() function playing data until there is no more elements in the array.
Here is my two functions:
function buffer_seg() {
// starts a new request
buff_req = new XMLHttpRequest();
// Request attributes
var method = 'GET';
var url = '/buffer.php?request=buffer&offset=' + offset;
var async = true;
// set attributes
buff_req.open(method, url, async);
buff_req.responseType = 'arraybuffer';
// keeps loading until something is recieved
if (!loaded) {
change_icon();
buffering = true;
}
buff_req.onload = function() {
segment = buff_req.response;
// if the whole file was already buffered
if (segment.byteLength == 4) {
return true;
} else if (segment.byteLength == 3) {
return false;
}
// sets the new offset
if (offset == -1) {
offset = BUFFER_SIZE;
} else {
offset += BUFFER_SIZE;
}
//decodes mp3 data and adds it to the array
audioContext.decodeAudioData(segment, function(decoded) {
buffer.push(decoded);
debugger;
if (index == 0) {
play();
}
});
}
buff_req.send();
buff_seg();
}
Second function:
function play() {
// checks if the end of buffer has been reached
if (index == buffer.length) {
loaded = false;
change_icon();
if (buffer_seg == false) {
stop();
change_icon();
return false;
}
}
loaded = true;
change_icon();
// new buffer source
var src = audioContext.createBufferSource();
src.buffer = buffer[index++];
// connects
src.connect(audioContext.destination);
src.start(time);
time += src.buffer.duration;
src.onended = function() {
src.disconnect(audioContext.destination);
play();
}
}
The recursive call to buffer_seg is in the main body of buffer_seg, not in the callback, so it happens immediately - not, as you seem to intend, after a response is received. Second, this also means that the recursive call is unconditional when it should be based on whether the previous response indicated more data would be available. If this isn't just crashing your browser, I'm not sure why. It also means that chunks of streamed audio could be pushed into the buffer out of order.
So to start I'd look at moving the recursive call to the end of the onload handler, after the check for end of stream.
In the 2nd function, what do you intend if (buffer_seg == false) to do? This condition will never be met. Are you thinking this is a way to see the last return value from buffer_seg? That's not how it works. Perhaps you should have a variable that both functions can see, which buffer_seg can set and play can test, or something like that.

Node.js process out of memory

I have written a service to download files from an external partner site. There are around 1000 files of 1 MB each. My process is going out of memory every time I reach around 800 files.
How should I identify the root cause ?
var request = require('sync-request');
var fs = require('graceful-fs')
function find_starting_url(xyz_category){
feed_url = "<url>"
response = request("GET", feed_url).getBody().toString()
response = JSON.parse(response)
apiListings = response['apiGroups']['affiliate']['apiListings']
starting_url = apiListings[xyz_category]['availableVariants']['v0.1.0']['get']
return starting_url
}
function get_all_files(feed_category, count, next_url, retry_count){
var headers = {
'Id': '<my_header>',
'Token': '<my key>'
}
console.log(Date())
console.log(count)
if(next_url){
products_url = next_url
}
else{
products_url = find_starting_url(feed_category)
}
try{
var products = request("GET", products_url, {"headers": headers}).getBody().toString()
var parsed = JSON.parse(products)
var home = process.env.HOME
var fd = fs.openSync(home + "/data/abc/xyz/" + feed_category + "/" + count + ".json", 'w')
fs.writeSync(fd, products)
fs.closeSync(fd)
next_url = parsed['nextUrl']
count++;
if(next_url){
get_all_files(feed_category, count, next_url)
}
}catch(e){
if(retry_count >= 5){
console.log("TERRIBLE ENDING!!!", e)
}else{
retry_count++;
console.log("some error... retrying ..", e)
get_all_files(feed_category, count, next_url, retry_count)
}
}
}
var feed_category = process.argv[2]
get_all_files(feed_category, 1)
You're calling a synchronous function recursively so every single request you have and all the data from each request is retained in memory in your local variables until all of the requests are done and all the recursive calls can unwind and then finally free all the sets of local variables. This requires monster amounts of memory (as you have discovered).
It would be best to restructure your code so that the current request is processed, written to disk and then nothing from that request is retained when it goes onto the next request. The simplest way to do that would be to use a while loop instead of a recursive call. In pseudo code:
initialize counter
while (more to do) {
process the next item
increment counter
}
I don't understand the details of what your code is trying to do well enough to propose a rewrite, but hopefully you can see how you can replace the recursion with the type of non-recursive structure above.
It's because you are performing a recursive call to the get_all_files function and it's keeping the body variable in memory for every single execution, since every child execution needs to be completed before the memory is released.

Assemble paginated ajax data in a Bacon FRP stream

I'm learning FRP using Bacon.js, and would like to assemble data from a paginated API in a stream.
The module that uses the data has a consumption API like this:
// UI module, displays unicorns as they arrive
beautifulUnicorns.property.onValue(function(allUnicorns){
console.log("Got "+ allUnicorns.length +" Unicorns");
// ... some real display work
});
The module that assembles the data requests sequential pages from an API and pushes onto the stream every time it gets a new data set:
// beautifulUnicorns module
var curPage = 1
var stream = new Bacon.Bus()
var property = stream.toProperty()
var property.onValue(function(){}) # You have to add an empty subscriber, otherwise future onValues will not receive the initial value. https://github.com/baconjs/bacon.js/wiki/FAQ#why-isnt-my-property-updated
var allUnicorns = [] // !!! stateful list of all unicorns ever received. Is this idiomatic for FRP?
var getNextPage = function(){
/* get data for subsequent pages.
Skipping for clarity */
}
var gotNextPage = function (resp) {
Array.prototype.push.apply(allUnicorns, resp) // just adds the responses to the existing array reference
stream.push(allUnicorns)
curPage++
if (curPage <= pageLimit) { getNextPage() }
}
How do I subscribe to the stream in a way that provides me a full list of all unicorns ever received? Is this flatMap or similar? I don't think I need a new stream out of it, but I don't know. I'm sorry, I'm new to the FRP way of thinking. To be clear, assembling the array works, it just feels like I'm not doing the idiomatic thing.
I'm not using jQuery or another ajax library for this, so that's why I'm not using Bacon.fromPromise
You also may wonder why my consuming module wants the whole set instead of just the incremental update. If it were just appending rows that could be ok, but in my case it's an infinite scroll and it should draw data if both: 1. data is available and 2. area is on screen.
This can be done with the .scan() method. And also you will need a stream that emits items of one page, you can create it with .repeat().
Here is a draft code (sorry not tested):
var itemsPerPage = Bacon.repeat(function(index) {
var pageNumber = index + 1;
if (pageNumber < PAGE_LIMIT) {
return Bacon.fromCallback(function(callback) {
// your method that talks to the server
getDataForAPage(pageNumber, callback);
});
} else {
return false;
}
});
var allItems = itemsPerPage.scan([], function(allItems, itemsFromAPage) {
return allItems.concat(itemsFromAPage);
});
// Here you go
allItems.onValue(function(allUnicorns){
console.log("Got "+ allUnicorns.length +" Unicorns");
// ... some real display work
});
As you noticed, you also won't need .onValue(function(){}) hack, and curPage external state.
Here is a solution using flatMap and fold. When dealing with network you have to remember that the data can come back in a different order than you sent the requests - that's why the combination of fold and map.
var pages = Bacon.fromArray([1,2,3,4,5])
var requests = pages.flatMap(function(page) {
return doAjax(page)
.map(function(value) {
return {
page: page,
value: value
}
})
}).log("Data received")
var allData = requests.fold([], function(arr, data) {
return arr.concat([data])
}).map(function(arr) {
// I would normally write this as a oneliner
var sorted = _.sortBy(arr, "page")
var onlyValues = _.pluck(sorted, "value")
var inOneArray = _.flatten(onlyValues)
return inOneArray
})
allData.log("All data")
function doAjax(page) {
// This would actually be Bacon.fromPromise($.ajax...)
// Math random to simulate the fact that requests can return out
// of order
return Bacon.later(Math.random() * 3000, [
"Page"+page+"Item1",
"Page"+page+"Item2"])
}
http://jsbin.com/damevu/4/edit

Node.js: Receiving too many UDP messages at a time, losing them

My node server receives about 400 UDP messages in one second, and it all works, and I am able to process all 400 of them.
However, when I start to receive about 700 UDP messages in one second, I lose 2-20 of the messages, and they never get parsed :(
I have thought about some options here:
Create a queue of all the socket messages, then consume one-by-one,
although I'm not sure how to implement this
Can't figure out how to implement
Find a setting in Node / Express / dgram socket where i can increase the memory size / buffer size, something like that
I couldn't find any settings like this, though :(
Use a different UDP receiver, stop using node's build in socket UDP receiver
Didn't find other receivers
Here's what my UDP sender looks like:
var dgram = require("dgram");
var udpserver = dgram.createSocket("udp4");
var seatStateStore = require("./SeatStateStore");
udpserver.on("message",
function (msg, rinfo)
{
seatStateStore.parseMessage(msg.toString());
});
Anyone have any ideas? I couldn't figure out any of the 3 options :/ Can someone help me out?
Node v0.10.29
Express v3.14.0
===============================
UPDATE / SOLUTION
Here's the code I ended up using (slightly modified #RoyHB 's solution):
var dgram = require("dgram");
var udpserver = dgram.createSocket("udp4");
var seatStateStore = require("./SeatStateStore");
var Dequeue = require('dequeue');
var FIFO = new Dequeue();
fetcher();
udpserver.on("message",
function (msg, rinfo)
{
FIFO.push(msg.toString());
});
udpserver.bind(43278);
function fetcher () {
while (FIFO.length > 0)
{
var msg = FIFO.shift();
seatStateStore.parseMessage(msg);
}
setImmediate(fetcher); //make this function continuously run
}
I know there is already an answer to this, but as of today, I found a way to increase the buffer on dgram from the official documentation: official doc.
socket.setRecvBufferSize(size);
Added in: v8.7.0
size <integer>
Sets the SO_RCVBUF socket option. Sets the maximum socket receive buffer in bytes.
socket.setSendBufferSize(size)
Added in: v8.7.0
size <integer>
Sets the SO_SNDBUF socket option. Sets the maximum socket send buffer in bytes.
Usage example:
var socket = dgram.createSocket('udp4');
socket.on("listening", () => {
socket.setRecvBufferSize(100000000); // 100mb
socket.setSendBufferSize(100000000); // 100mb
});
The default value is 65507
There is a NPM module called node-dequeue. I use it a lot for similar situations to yours.
basically,
your program pushes received messages onto the end of the queue.
an interval timer periodically activates another method or function ( a queue-fetcher) which checks to see if there are messages on the queue and if so, fetches one or more and processes it.
Alternatively (maybe better) no timer is used to schedule queue fetches. Instead the node process.nextTick method is used.
Alternatively, maybe preferably, you can use node process.nextTick to continuously check the queue for messages.
Ideally, seatStateStore.parseMessage would create a new object to asynchronously process one message so that parseMessage returns without delay while the actual message processing continues in the background. (see bottom of example code )
I haven't tested the code below, it's meant to illustrate, not to run
var FIFO = require ('dequeue');
var seatStateStore = require("./SeatStateStore");
var dgram = require("dgram");
setInterval(fetcher, 1);
var udpserver = dgram.createSocket("udp4");
udpserver.on("message",
function (msg, rinfo) {
FIFO.push(msg);
}
);
function fetcher () {
while (FIFO.length > 0) {
var msg = FIFO.shift();
seatStateStore.parseMessage(msg);
}
}
** OR (maybe better) **
var FIFO = require ('dequeue');
var seatStateStore = require("./SeatStateStore");
var dgram = require("dgram");
fetcher();
var udpserver = dgram.createSocket("udp4");
udpserver.on("message",
function (msg, rinfo) {
FIFO.push(msg);
}
);
function fetcher () {
while (FIFO.length > 0) {
var msg = FIFO.shift();
seatStateStore.parseMessage(msg);
process.nextTick(fetcher);
}
}
Outline of seatStateProcessor.parseMessage:
seatStateProcessor.parseMessage = function (msg) {
proc = new asyncProcHandler(msg, function (err) {
if (err) {
//handle the error
}
});
}

Loop with socket.io terminates at the beginning

I'm building this function for upload to the server small tile images.
The client builds the tileBuffer and then calls the fireTiles function.
Here I would like to build a loop based on the tileBuffer.length. The server will handle the control. So, i emit StartAddTiles and I immediately called back from the server with the AnotherTile event. The debugger shows me I've been called by the server and I see the code going into the socket.on('AnotherTile'... sentence.
The problem is that when the code reaches the AddTile emit function, it stops there and nothing happens. The server does not receive the request and the loop is terminated there.
Where is the error in my code?
function fireTiles (tileBuffer, mapSelected) {
var tiles = tileBuffer.length;
var tBx = 0;
try
{
var socket = io.connect('http://myweb:8080/');
socket.emit('StartAddTiles', tiles, mapSelected);
socket.on('AnotherTile', function (tlN){
if (tlN < tiles) {
var data = tileBuffer[tlN]; //uso tlN per far comandare il server
tBx++; // debug purpose
socket.emit('AddTile', mapSelected, data, tBx);
} else {
// something went wrong
alert('Error calculating tiles');
return;
}
});
}
catch(err)
{
document.getElementById('status').innerHTML = err.message;
}
}
Here is the server side:
io.sockets.on('connection', function(client) {
console.log('Connecting....');
// controls are limited, this is just a beginning
// Initiate loop
client.on('StartAddTiles', function(tiles, mapSelected) {
var mapId = mapSelected;
mapLoading[mapId] = { //Create a new Entry in The mapLoading Variable
tilesToLoad : tiles,
tilesLoaded : 0
}
console.log('Start loading '+mapLoading[mapId].tilesToLoad+' tiles.');
// Ask for the first tile
client.emit('AnotherTile', mapLoading[mapId].tilesLoaded);
//
});
// client add new Tile/Tiles
client.on('addTile', function(mapSelected, data, tBx) {
var mapId = mapSelected;
mapLoading[mapId].tilesLoaded = ++1;
console.log('Adding tile '+mapLoading[mapId].tilesLoaded+' of '+mapLoading[mapId].tilesToLoad+' tBx '+tBx);
// insert Tile
db_manager.add_tiles(tileBuffer, function(result) {
if (mapLoading[mapId].tilesLoaded == mapLoading[mapId].tilesToLoad) { // full map loaded
mapLoading[mapId] = ""; //reset the buffer
client.emit('TilesOk', mapLoading[mapId].tilesLoaded);
} else {
console.log('requesting tile num: '+mapLoading[mapId].tilesLoaded);
client.emit('AnotherTile', mapLoading[mapId].tilesLoaded);
}
//
});
});
The event names are case sensitive, you should probably use AddTile instead of addTile on the server side too.

Categories

Resources