Error handling over websockets a design dessision - javascript

Im currently building a webapp that has two clear use cases.
Traditional client request data from server.
Client request a stream from the server after wich the server starts pushing data to the client.
Currently im implementing both 1 and 2 using json message passing over a websocket. However this has proven hard since I need to handcode lots of error handling since the client is not waiting for the response. It just sends the message hoping it will get a reply sometime.
Im using Js and react on the frontend and Clojure on the backend.
I have two questions regarding this.
Given the current design, what alternatives are there for error handling over a websocket?
Would it be smarter to split the two UC using rest for UC1 and websockets for UC2 then i could use something like fetch on the frontend for rest calls.
Update.
The current problem is not knowing how to build an async send function over websockets can match send messages and response messages.

Here's a scheme for doing request/response over socket.io. You could do this over plain webSocket, but you'd have to build a little more of the infrastructure yourself. This same library can be used in client and server:
function initRequestResponseSocket(socket, requestHandler) {
var cntr = 0;
var openResponses = {};
// send a request
socket.sendRequestResponse = function(data, fn) {
// put this data in a wrapper object that contains the request id
// save the callback function for this id
var id = cntr++;
openResponses[id] = fn;
socket.emit('requestMsg', {id: id, data: data});
}
// process a response message that comes back from a request
socket.on('responseMsg', function(wrapper) {
var id = wrapper.id, fn;
if (typeof id === "number" && typeof openResponses[id] === "function") {
fn = openResponses[id];
delete openResponses[id];
fn(wrapper.data);
}
});
// process a requestMsg
socket.on('requestMsg', function(wrapper) {
if (requestHandler && wrapper.id) {
requestHandler(wrapper.data, function(responseToSend) {
socket.emit('responseMsg', {id: wrapper.id, data; responseToSend});
});
}
});
}
This works by wrapping every message sent in a wrapper object that contains a unique id value. Then, when the other end sends it's response, it includes that same id value. That id value can then be matched up with a particular callback response handler for that specific message. It works both ways from client to server or server to client.
You use this by calling initRequestResponseSocket(socket, requestHandler) once on a socket.io socket connection on each end. If you wish to receive requests, then you pass a requestHandler function which gets called each time there is a request. If you are only sending requests and receiving responses, then you don't have to pass in a requestHandler on that end of the connection.
To send a message and wait for a response, you do this:
socket.sendRequestResponse(data, function(err, response) {
if (!err) {
// response is here
}
});
If you're receiving requests and sending back responses, then you do this:
initRequestResponseSocket(socket, function(data, respondCallback) {
// process the data here
// send response
respondCallback(null, yourResponseData);
});
As for error handling, you can monitor for a loss of connection and you could build a timeout into this code so that if a response doesn't arrive in a certain amount of time, then you'd get an error back.
Here's an expanded version of the above code that implements a timeout for a response that does not come within some time period:
function initRequestResponseSocket(socket, requestHandler, timeout) {
var cntr = 0;
var openResponses = {};
// send a request
socket.sendRequestResponse = function(data, fn) {
// put this data in a wrapper object that contains the request id
// save the callback function for this id
var id = cntr++;
openResponses[id] = {fn: fn};
socket.emit('requestMsg', {id: id, data: data});
if (timeout) {
openResponses[id].timer = setTimeout(function() {
delete openResponses[id];
if (fn) {
fn("timeout");
}
}, timeout);
}
}
// process a response message that comes back from a request
socket.on('responseMsg', function(wrapper) {
var id = wrapper.id, requestInfo;
if (typeof id === "number" && typeof openResponse[id] === "object") {
requestInfo = openResponses[id];
delete openResponses[id];
if (requestInfo) {
if (requestInfo.timer) {
clearTimeout(requestInfo.timer);
}
if (requestInfo.fn) {
requestInfo.fn(null, wrapper.data);
}
}
}
});
// process a requestMsg
socket.on('requestMsg', function(wrapper) {
if (requestHandler && wrapper.id) {
requestHandler(wrapper.data, function(responseToSend) {
socket.emit('responseMsg', {id: wrapper.id, data; responseToSend});
});
}
});
}

There are a couple of interesting things in your question and your design, I prefer to ignore the implementation details and look at the high level architecture.
You state that you are looking to a client that requests data and a server that responds with some stream of data. Two things to note here:
HTTP 1.1 has options to send streaming responses (Chunked transfer encoding). If your use-case is only the sending of streaming responses, this might be a better fit for you. This does not hold when you e.g. want to push messages to the client that are not responding to some sort of request (sometimes referred to as Server side events).
Websockets, contrary to HTTP, do not natively implement some sort of request-response cycle. You can use the protocol as such by implementing your own mechanism, something that e.g. the subprotocol WAMP is doing.
As you have found out, implementing your own mechanism comes with it's pitfalls, that is where HTTP has the clear advantage. Given the requirements stated in your question I would opt for the HTTP streaming method instead of implementing your own request/response mechanism.

Related

SignalR with long process: send intermediate messages

I am using SignalR to start long computations on server side and post a message to the client when the result is available.
The input bindings is an HTTP request.
I would like to be able to send multiple messages back in order to notify the client of the differents steps of the process (eg, computation starts, computation ends, etc..).
I tried pushing different messages to context.bindings.signalRMessages but I see that everything is sent together at the end of the whole process. Is there a way to send several messages at different times?
Another related issue is that my HTTP request on client side remains stuck until the end of the process. I would like to be able to post a quick response early, since I get the response via a signalR message.
Here is my server code :
module.exports = async function(context, req) {
let ID = context.bindingData.invocationId;
context.bindings.signalRMessages = [];
const messageQueue = context.bindings.signalRMessages;
var postMessage = (message) => {
message.userId = req.query.userId;
message.isPrivate = true;
messageQueue.push(message);
};
let preProcessData = preProcess(req.body.input);
let startMessage = {
"target": "optimStart",
"arguments": [{ preProcessData: preProcessData }]
};
postMessage(startMessage); // <<<< I want this one to be sent immediately
try {
let optimOutput = await computeOptim(req.body.input, ID); // that's the long process
let response = {
optimId: ID,
optimOutput: optimOutput
};
let optimCompleteMessage = {
"target": "optimComplete",
"arguments": [response]
};
postMessage(optimCompleteMessage);
} catch (err) {
// ....
}
};
Am I doing anything wrong or is it just not possible ?
Thanks!
This is not possible with a simple HTTP triggered function since bindings resolve only once the execution of the function completes.
For your scenario, durable functions would be the perfect choice.
You would still have a HTTP Triggered function (client function) to start on orchestration and return immediately. In the orchestration function, you would have separate activity functions for the processing and for sending updates to the client using the SignalR binding.

CometD data not reaching client

I have two client applications that use CometD to talk to a server. My server sends some data to my clients using the deliver() method of the ServerSession. The data is in the form of a string.
One of my applications is a Javascript- based web application. I can access the data delivered by the server in the following manner:
function(theMsg) {
alert(theMsg.data);
}
This works well as a callback for when I want to send data on a particular channel.
Unfortunately, my second application is a Java application whose callback does not seem able to get the data. The callback works as follows:
public void onMessage(ClientSessionChannel channel, Message message)
{
String data = (String )theMsg.getData();
System.out.println("Data "+data);
}
The problem here is that the getData() for some reason returns a null in Java. I cannot seem to find any way to get at the data that I sent from the server!
Is there some kind of bug in the java CometD API, or am I using the wrong function to get the data that I am sending from the server? How can I get at this data?
Someone please advise...
Addition 1: below is the first client, implemented in Javascript, as requested by sbordet. This client works...
var cometD = $.cometd;
var isConnected = false;
var rcvHandshake = function(hndValue) {
console.log("Received handshake. Success flag is " + hndValue.successful);
}
var amConnected = function(msgConnect) {
if(cometD.isDisconnected())
{
isConnected = false;
console.log("Server connection not established!");
}
else
{
var prevconnected = mySelf.isConnected;
// This checks whether or not the connection was actually successful
isConnected = msgConnect.successful === true;
if((prevconnected == false) && (isConnected == true))
{
console.log("Connected to the server!");
cometD.addListener("/service/output",updateOutput);
}
else if((prevconnected == true) && (isConnected == false))
{
console.log("Connection to server has ended!")
}
}
}
var startUp = function() {
console.log("Starting up...");
var cometURL = $(location).attr('origin') + "/tester/cometd";
cometD.configure({
url: cometURL,
logLevel: 'info'
});
cometD.addListener('/meta/handshake',rcvHandshake);
cometD.addListener('/meta/connect',amConnected);
cometD.handshake({
"thehash.autohash": "foo-bar-baz-hash"
});
}
var updateOutput = function(theOut) {
alert(theOut.data);
}
I solved the problem.
I was looking over my Java code in order to format it for posting to this Question, when I noticed a typo in the channel names that I was listening for. I corrected the typo, which included a channel that I was using to publish requests to the server (apparently the listener I was using was activated before the data was returned), and as a result the getData() method was empty.
Sending the data to the correct channel solved the problem. My getData() method no longer returns null.
Sorry I bugged people about such a ridiculously amateurish mistake. I will try to avoid this sort of thing in future.
Special thanks to sbordet for requesting the full code, which caused me to re- examine it and find my typo...

Pooled connections - monitoring concurrent requests in Node.js Request library?

I'm sending out a lot of requests to another server, and want to limit them so as to not overload the server. My impression is that this can be done with the pool parameter in options, but I'm not sure if I'm doing so properly.
I'd like to be able to keep track of when the requests are sent out, as I'm trying to establish a duplex connection, and need to make sure the corresponding GET and POST requests are sent out at the same time.
Here's a simplified example of what I'm trying:
var request = require('request');
var options = {
'url': 'http://www.google.com',
'pool': {
'maxSockets': 3
}
};
for (var i = 0; i < 100; i++) {
request.get(options, (function(j) {
return function(err, res, body) {
console.log(j);
}
})(i));
}
Is there an event emitted when the requests are actually sent out? Is there any way for me to track when, and in what order each request is being sent out?
I found this in the Node.js documentation:
Event: 'socket'#
function (socket) { }
Emitted after a socket is assigned to this request.
You can use it to monitor connections as they are assigned sockets like this:
http.get(options, function(res) {
// Do stuff
}).on("socket", function (socket) {
socket.on("connection") {
// record connection
};
});
The same event is emitted with the request library, since it's just a wrapper around the builtin http module.

Sequential execution in javascript over network

In my application, a recursive function calls a remote peer for some data and then calls itself again. Is there any way to wait for response from the server and then continue the flow of execution?
I am using Simple-Peer for remote calls.
function foo() {
data = getFromPeer();
if(condition)
foo();
else return bar;
}
getFromPeer is a user-defined function which sends data to a remote peer using a SimplePeer connection. The remote peer responds back when it receives the request. There are no promises or callbacks defined as of now.
Looking at the documentation of https://github.com/feross/simple-peer, there does not seem to be a standard way of doing "Remote Procedure Calls" or receiving an ACKnowledgement of the fact that some data that was previously sent was actually received.
So in order to achieve what you want to achieve with simple-peer you need to add a custom "protocol" over the simple-peer channel. Since you have no guarantee over the order in which the remote peer will receive your messages, you need to give an ID to each message.
something like this in pseudo code (and considering that peer is a connected simple-peer)
pendingJobs = {}
function addJob(data, callback) {
var id = uuid() // imagine a function giving a uuid
pendingJobs[id] = callback
peer.send({ id: uuid, payload: data })
}
peer.on('data', function(info) {
var id = info.id
var cb;
if (pendingJobs[id] && info.payload) {
cb = pendingJobs[id]
cb(info.payload)
}
})
Of course this pseudo-code solution is very rough, and you would probably need to add some garbage collection of pendingJobs, have error callbacks in case the remote peer never sends the ACK message.
you could add a type for each message ('rpc', 'ack', ..)

A design pattern for async requests to handle success, failure, retry ? (javascript)

I'm writing a mobile app with Appcelerator Titanium that makes a lot of different xhr requests. This is not really an Appcelerator Titanium specific question. But if you do write some code, I hope it's javascript.
The app needs to authenticate itself, the user must be logged for some interactions, etc.
I've come to a point where any request might get any kind of response such as:
not authenticated
not logged
bad params
successful
...
The requests are wrapped in different model methods or helpers.
The thing is, I'm not familiar with this kind of app. I was wondering what are the best practices.
Some real questions for example would be:
If the app is not authenticated (token expired, first launch), should the app try to authenticate itself and then send again the request that was denied ? (transparent to user)
Should I send an authentication request each time the app launches and then "forget" about it?
The problem I'm facing is that the code becomes quickly big if I try to handle this for each request. Full of nested callbacks, retry conditions, various events listeners to manage, etc. It just does not feel very "nice". And it's not DRY at all, when what I really need is for any request, check what was wrong, try to fix it (authenticate if not, automatic login if possible or show the login UI, etc..) then if that works retry the original request a couple of times, abort if needed.
I've been looking at the promise pattern but only know theory and don't know if it could be what I need.
So I welcome any advice regarding this particular problem. I wonder how apps like "Facebook" handle this.
Thank you for your help
This question is not easily answered, but let me try to give you some Ideas:
The most important thing, before coding anything in your app, is the API itself. It has to be reliable and adhere to standards. I will not go into too much detail here, but a well written RESTful API can reduce the complexity of your httpClient significantly. It has to respond with standard http status codes and to methods like POST, GET, PUT, DELETE...
A pretty good read is The REST API Design Handbook by George Reese.
My approach to httpClients with Titanium is a single module, which is loaded via require() wherever needed. I stick to one single client at a time, as I had massive problems with multiple parallel calls. Whenever a call is made, the client checks if there is already a call in progress and sends it to a queue if necessary.
Let me show you an example. I have left out lots of stuff for sake of brevity:
// lib/customClient.js
var xhrRequest; // This will be our HTTPClient
var callQueue = []; // This will be our queue
// Register the request
// params are:
// method (e.g. 'GET')
// url (e.g. 'http://test.com/api/v1/user/1')
// done (callback function)
function registerRequest(params) {
if(!xhrRequest) {
sendRequest(params);
} else {
queueRequest(params);
}
}
// This simply sends the request
// to the callQueue
function queueRequest(params) {
callQueue.push(params);
}
// Send the request with the params from register
// Please note that I do not hardcode error messages,
// I just do it here so it is easier to read
function sendRequest(params) {
// Set callback if available and valid
var callback = params.done && typeof(params.done) === "function" ? params.callback : null;
// Set method
var method = params.method || 'GET';
// Create the HTTP Client
xhrRequest = Ti.Network.createHTTPClient({
// Success
onload: function() {
// You can check for status codes in detail here
// For brevity, I will just check if it is valid
if (this.status >= 200 && this.status < 300) {
if(this.responseText) {
// You might want to check if it can be parsed as JSON here
try {
var jsonData = JSON.parse(this.responseText);
if(callback) callback({ success: true, response: jsonData });
} catch(e) {
if(callback) callback({ success: false, errormessage: 'Could not parse JSON data' });
}
processQueue();
} else {
if(callback) callback({ success: false, errormessage: 'No valid response received' });
processQueue();
}
} else {
if(callback) callback({ success: false, errormessage: 'Call response is success but status is ' + this.status });
processQueue();
}
},
// Error
onerror: function(e) {
if(this.responseText) {
try {
var jsonData = JSON.parse(this.responseText);
if(callback) callback({ success: false, reponse: jsonData });
} catch(e) {};
}
processQueue();
},
});
// Prepare and send request
// A lot more can (and should) be configured here, check documentation!
xhrRequest.setTimeout(10000);
xhrRequest.open(method, params.url);
xhrRequest.send();
}
// Checks if there is anything else in the queue
// and sends it
function processQueue() {
xhrRequest = null;
var nextInQueue = callQueue.shift();
if(nextInQueue) sendRequest(nextInQueue);
}
// Our public API
var publicAPI = {
sendRequest: function(params) {
registerRequest(params);
}
};
module.exports = publicAPI;
I can then send a call from any other controller/view
var customClient = require('lib/customClient'); // omit 'lib' if you use alloy
// Send the request
customClient.sendRequest({
method : 'GET',
url : 'http://test.com/api/v1/user/1',
done : function(response) {
Ti.API.debug(JSON.stringify(response));
}
});
Note that this is not complete and does not check for connectivity, has no real error handling etc., but it might help you to get an idea.
I think there is loads of stuff to talk about here, but I will stop here for now...

Categories

Resources