I've node application which done spawn(child process) to and application,
the application have host and port:
var exec = require('child_process').spawn;
var child = exec('start app');
console.log("Child Proc ID " + child.pid)
child.stdout.on('data', function(data) {
console.log('stdout: ' + data);
});
child.stderr.on('data', function(data) {
console.log('stdout: ' + data);
});
child.on('close', function(code) {
console.log('closing code: ' + code);
});
some application will start immediately and some application will take some time 10 - 20 sec until they start.
Now I use the node http proxy to run the app and the problem is that Im getting error when the use want to run the app before it up and running.
Any idea how somehow I can solve this issue?
proxy.on('error', function (err, req, res) {
res.end('Cannot run app');
});
Btw, I cannot send response 500 in proxy error due to limitation of our framework. Any other idea how can I track the application maybe with some timeout to see weather it send response 200.
UPDATE - Sample of my logic
httpProxy = require('http-proxy');
var proxy = httpProxy.createProxyServer({});
http.createServer(function (req, res) {
console.log("App proxy new port is: " + 5000)
res.end("Request received on " + 5000);
}).listen(5000);
function proxyRequest(req, res) {
var hostname = req.headers.host.split(":")[0];
proxy.web(req, res, {
target: 'http://' + hostname + ':' + 5000
});
proxy.on('error', function (err, req, res) {
res.end('Cannot run app');
});
}
What you need is to listen for the first response on your proxy and look at its status code to determine whether your app started successfully or not. Here's how you do that:
proxy.on('proxyRes', function (proxyRes, req, res) {
// Your target app is definitely up and running now because it just sent a response;
// Use the status code now to determine whether the app started successfully or not
var status = res.statusCode;
});
Hope this helps.
Not sure if it make sense , In your Main App the experience should start with a html page and each child process should have is own loader.
So basically , you need a http Handler, which linger the request until the the child process is ready. So just make and ajax call from the html , and show loading animation till the service is ready .
//Ajax call for each process and update the UI accordingly
$.get('/services/status/100').then(function(resp) {
$('#service-100').html(resp.responseText);
})
//server side code (express syntax)
app.get('/services/status/:id ', function(req,res) {
// Check if service is ready
serviceManager.isReady(req.params.id, function(err, serviceStats) {
if(err) {
//do logic err here , maybe notify the ui if an error occurred
res.send(err);
return;
}
// notify the ui , that the service is ready to run , and hide loader
res.send(serviceStats);
});
})
I am not sure i understand the question correctly, but you want to wait for a child process to spin on request and you want this request to wait for this child process and then be send to it?
If that is so a simple solution will be to use something like this
var count = 0;//Counter to check
var maxDelay = 45;//In Seconds
var checkEverySeconds = 1;//In seconds
async.whilst(
function () {
return count < maxDelay;
},
function (callback) {
count++;
self.getAppStatus(apiKey, function (err, status) {
if (status != 200) {
return setTimeout(callback, checkEverySeconds * 1000);
}
continueWithRequest();
});
},
function (err) {
if (err) {
return continueWithRequest(new Error('process cannot spin!'));
}
}
);
The function continueWithRequest() will forward the request to the child process and the getAppStatus will return 200 when the child process has started and some other code when it is not. The general idea is that whilst will check every second if the process is running and after 45 second if not it will return an error. Waiting time and check intervals can be easily adjusted. This is a bit crude, but will work for delaying the request and setTimeout will start a new stack and will not block. Hope this helps.
If you know (around) how much time the app takes to get up and running, simply add setTimeout(proxyRequest, <Time it takes the app to start in MS>)
(There are most likely more smart/complicated solutions, but this one is the easiest.)
Why not use event-emitter or messenger?
var eventEmitter = require('event-emitter')
var childStart = require('./someChildProcess').start()
if (childStart !== true) {
eventEmitter.emit('progNotRun', {
data: data
})
}
function proxyRequest(req, res) {
var hostname = req.headers.host.split(":")[0];
proxy.web(req, res, {
target: 'http://' + hostname + ':' + 5000
});
eventEmitter.on('progNotRun', function(data) {
res.end('Cannot run app', data);
})
}
Related
I am exploring ways to abort client requests that are taking too long, thereby consuming server resources.
Having read some sources (see below), I tried a solution like the following (as suggested here):
const express = require('express');
const server = express();
server
.use((req, res, next) => {
req.setTimeout(5000, () => {
console.log('req timeout!');
res.status(400).send('Request timeout');
});
res.setTimeout(5000, () => {
console.log('res timeout!');
res.status(400).send('Request timeout');
});
next();
})
.use(...) // more stuff here, of course
.listen(3000);
However, it seems not to work: the callbacks are never called, and the request is not interrupted.
Yet, according to recent posts, it should.
Apart from setting the timeout globally (i.e. server.setTimeout(...)), which would not suit my use case,
I have seen many suggesting the connect-timeout middleware.
However, I read in its docs that
While the library will emit a ‘timeout’ event when requests exceed the given timeout, node will continue processing the slow request until it terminates.
Slow requests will continue to use CPU and memory, even if you are returning a HTTP response in the timeout callback.
For better control over CPU/memory, you may need to find the events that are taking a long time (3rd party HTTP requests, disk I/O, database calls)
and find a way to cancel them, and/or close the attached sockets.
It is not clear to me how to "find the events that are taking long time" and "a way to cancel them",
so I was wondering if someone could share their suggestions.
Is this even the right way to go, or is there a more modern, "standard" approach?
Specs:
Node 12.22
Ubuntu 18.04
Linux 5.4.0-87-generic
Sources:
Express.js Response Timeout
Express.js connect timeout vs. server timeout
Express issue 3330 on GitHub
Express issue 4664 on GitHub
Edit:
I have seen some answers and comments offering a way to setup a timeout on responses or "request handlers": in other words, the time taken by the middleware is measured, and aborted if it takes too long. However, I was seeking for a way to timeout requests, for example in the case of a client sending a large file over a slow connection. This happens probably even before the first handler in the express router, and that is why I suspect that there must be some kind of setting at the server level.
Before rejecting long request, I think, it's better to measure requests, find long ones, and optimize them. if it is possible.
How to measure requests?
Simplest way it is to measure time from start, till end of request. You'll get Request Time Taken = time in nodejs event loop + time in your nodejs code + wait for setTimeout time + wait for remote http/db/etc services
If you don't have many setTimeout's in code, then Request Time Taken is a good metric.
(But in high load situations, it becomes unreliable, it is greatly affected by time in event loop )
So you can try this measure and log solution
http://www.sheshbabu.com/posts/measuring-response-times-of-express-route-handlers/
How to abort long request
it all depends on your request handler
Handler does Heavy computing
In case of heavy computing, which block main thread, there's noting you can do without rewriting handler.
if you set req.setTimeout(5000, ...) - it fires after res.send(), when main loop will be unlocked
function (req, res) {
for (let i = 0; i < 1000000000; i++) {
//blocking main thread loop
}
res.send("halted " + timeTakenMs(req));
}
So you can make your code async, by injecting setTimeout(, 0) some where;
or move computing to worker thread
Handler has many remote requests
I simulate remote requests with Promisfied setTimeout
async function (req, res) {
let delayMs = 500;
await delay(delayMs); // maybe axios http call
await delay(delayMs); // maybe slow db query
await delay(delayMs);
await delay(delayMs);
res.send("long delayed" + timeTakenMs(req));
}
In this case you can inject some helpers to abort your request chain
blockLongRequest - throws error if request time is too big;
async function (req, res) {
let delayMs = 500;
await delay(delayMs); // mayby axios call
blockLongRequest(req);
await delay(delayMs); // maybe db slow query
blockLongRequest(req);
await delay(delayMs);
blockLongRequest(req);
await delay(delayMs);
res.send("long delayed" + timeTakenMs(req));
})
single remote request
function (req, res) {
let delayMs = 1000;
await delay(delayMs);
//blockLongRequest(req);
res.send("delayed " + timeTakenMks(req));
}
we don't use blockLongRequest because it's better to deliver answer instead of error.
Error may trigger client to retry, and you get your slow requests doubled.
Full example
(sorry for TypeScript, yarn ts-node sever.ts )
import express, { Request, Response, NextFunction } from "express";
declare global {
namespace Express {
export interface Request {
start?: bigint;
}
}
}
const server = express();
server.use((req, res, next) => {
req["start"] = process.hrtime.bigint();
next();
});
server.use((err: any, req: Request, res: Response, next: NextFunction) => {
console.error("Error captured:", err.stack);
res.status(500).send(err.message);
});
server.get("/", function (req, res) {
res.send("pong " + timeTakenMks(req));
});
server.get("/halt", function (req, res) {
for (let i = 0; i < 1000000000; i++) {
//halting loop
}
res.send("halted " + timeTakenMks(req));
});
server.get(
"/delay",
expressAsyncHandler(async function (req, res) {
let delayMs = 1000;
await delay(delayMs);
blockLongRequest(req); //actually no need for it
res.send("delayed " + timeTakenMks(req));
})
);
server.get(
"/long_delay",
expressAsyncHandler(async function (req, res) {
let delayMs = 500;
await delay(delayMs); // mayby axios call
blockLongRequest(req);
await delay(delayMs); // maybe db slow query
blockLongRequest(req);
await delay(delayMs);
blockLongRequest(req);
await delay(delayMs);
res.send("long delayed" + timeTakenMks(req));
})
);
server.listen(3000, () => {
console.log("Ready on 3000");
});
function delay(delayTs: number): Promise<void> {
return new Promise((resolve) => {
setTimeout(() => {
resolve();
}, delayTs);
});
}
function timeTakenMks(req: Request) {
if (!req.start) {
return 0;
}
const now = process.hrtime.bigint();
const taken = now - req.start;
return taken / BigInt(1000);
}
function blockLongRequest(req: Request) {
const timeTaken = timeTakenMks(req);
if (timeTaken > 1000000) {
throw Error("slow request - aborting after " + timeTaken + " mks");
}
}
function expressAsyncHandler(
fn: express.RequestHandler
): express.RequestHandler {
return function asyncUtilWrap(...args) {
const fnReturn = fn(...args);
const next = args[args.length - 1] as any;
return Promise.resolve(fnReturn).catch(next);
};
}
I hope, this approach helps you to find an acceptable solution
To provide context, here's the problem I'm attempting to solve:
I've made a giphy bot for a casual groupchat with friends of mine. By typing /giphy [terms] in a message, it will automatically post the top result for [terms]. My friends, being the rambunctious assholes that they are, quickly started abusing it to spam the groupchat. What I would like to do to prevent this is only allow my postMessage function to be called once per minute.
What I've tried:
Using setTimeout(), which doesn't do exactly what I'd like, since it will only call the function after the amount of time specified in the argument has passed. As far as I can tell, this will cause a delay in messages from the time the bot is called, but it won't actually prevent the bot from accepting new postMessage() calls in that time.
Using setInterval(), which just causes the function to be called forever at a certain interval.
What I think might work:
Right now, I'm working with two .js files.
Index.js
var http, director, cool, bot, router, server, port;
http = require('http');
director = require('director');
bot = require('./bot.js');
router = new director.http.Router({
'/' : {
post: bot.respond,
get: ping
}
});
server = http.createServer(function (req, res) {
req.chunks = [];
req.on('data', function (chunk) {
req.chunks.push(chunk.toString());
});
router.dispatch(req, res, function(err) {
res.writeHead(err.status, {"Content-Type": "text/plain"});
res.end(err.message);
});
});
port = Number(process.env.PORT || 5000);
server.listen(port);
function ping() {
this.res.writeHead(200);
this.res.end("This is my giphy side project!");
}
Bot.js
var HTTPS = require('https');
var botID = process.env.BOT_ID;
var giphy = require('giphy-api')();
function respond() {
var request = JSON.parse(this.req.chunks[0]);
var giphyRegex = /^\/giphy (.*)$/;
var botMessage = giphyRegex.exec(request.text);
var offset = Math.floor(Math.random() * 10);
if(request.text && giphyRegex.test(request.text) && botMessage != null) {
this.res.writeHead(200);
giphy.search({
q: botMessage[1],
rating: 'pg-13'
}, function (err, res) {
try {
postMessage(res.data[offset].images.downsized.url);
} catch (err) {
postMessage("There is no gif of that.");
}
});
this.res.end();
} else {
this.res.writeHead(200);
this.res.end();
}
function postMessage(phrase) {
var botResponse, options, body, botReq;
botResponse = phrase;
options = {
hostname: 'api.groupme.com',
path: '/v3/bots/post',
method: 'POST'
};
body = {
"bot_id" : botID,
"text" : botResponse
};
botReq = HTTPS.request(options, function(res) {
if(res.statusCode == 202) {
} else {
console.log('Rejecting bad status code: ' + res.statusCode);
}
});
botReq.on('error', function(err) {
console.log('Error posting message: ' + JSON.stringify(err));
});
botReq.on('timeout', function(err) {
console.log('Timeout posting message: ' + JSON.stringify(err));
});
botReq.end(JSON.stringify(body));
}
exports.respond = respond;
Basically, I'm wondering where would be the ideal place to implement the timer that I'm envisioning. It seems like I would want to have it only listen for /giphy [terms] after one minute, rather than waiting one minute to post.
My Question(s):
Would the best way to go about this be to set a timer on the response() function, since then it will only actually parse the incoming information once per minute? Is there a more elegant place to put this?
How should the timer work on that function? I don't think I can just run response() once every minute, since that seems to mean it'll only parse incoming json from the GroupMe API once per minute, so it could potentially miss incoming messages that I would want it to capture.
Store the time when a request is made and then use that to see if subsequent requests should be ignored if these are executed to fast.
var waitTime = 10*1000; // 10 s in millis
var lastRequestTime = null;
function respond() {
if(lastRequestTime){
var now = new Date();
if(now.getTime() - lastRequestTime.getTime() <= waitTime){
this.res.writeHead(200);
this.res.end("You have to wait "+waitTime/1000+" seconds.");
return;
}
}
lastRequestTime = new Date();
postMessage();
}
I have been building a youtube video conversion app which streams youtube videos using youtube-dl and saves them, everything was working fine until I attempted to stream a video that was over an hour long. When the task was anywhere between 50% - 100% complete or 40-80seconds in, would be when the entire block of code would get re-run resulting in multiple simultaneous streams occurring. The response can therefor never get sent as it waits for the pipe to finish. Adding next(); outside the stream function allowed the conversion to complete with out any interruption or reruns of the code block, however it resulted in the following error when attempting to send the response:
throw new Error('Can\'t set headers after they are sent.');
This is the Node.js code block in question:
app.post('/convert/', function (req, res, next){
var url = 'http://www.youtube.com/'+req.body.url;
var quality = req.body.quality;
var socketId = req.body.socketId;
stream = youtubedl(url,
['-f ' + quality],
// // Additional options can be given for calling `child_process.execFile()`.
{ cwd: __dirname });
stream.on('info', function(info) {
console.log('Download started');
console.log('filename: ' + info._filename);
console.log('size: ' + info.size);
console.log('format: ' + info.format);
var fileName = info._filename;
var videoId = info.id;
var videoTitle = info.title;
videoTitle = videoTitle.replace(/[^a-zA-Z0-9\s]/g, '');
console.log(videoTitle);
var mp4 = 'downloads/'+videoTitle+'-'+info.format_id+'.mp4';
fs.writeFile(mp4, "file", function(err) {
if(err) {
return console.log(err);
}
console.log("The file was saved!");
var stat = fs.statSync(mp4);
var str = progress({
length: info.size,
time: 100
});
str.on('progress', function(progress) {
io.to(global.socketid).emit('progressVideo',{progress: progress.percentage});
console.log(info.size);
console.log(progress.transferred);
console.log(progress.percentage);
console.log(progress.remaining);
});
var pipe = stream.pipe(str).pipe(fs.createWriteStream(mp4));
pipe.on('finish', function () {
console.log("stream finished.");
res.json(videoTitle+'-'+info.format_id+'.mp4');
});
});
});
// next();
});
Called by some Angular code.
// Sends youtube link to backend
$scope.getVideo = function(youtubeLink, resolution){
var cleanedLink = youtubeLink.substring(24);
var url = {url: cleanedLink, quality: resolution};
$http.post('/convert/', url).success(function (response){
// Do some stuff
});
}
Confused as to why it was getting run more then once, I slowly removed more and more code until I was left with this simple test.
app.post('/convert/', function (req, res, next){
console.log('hello!');
});
Which was called by an ng-click event and after waiting a minute or so the console also printed out two and then three hello! statements. I am completely lost as to why this happens. If anyone could shed some light on this for me it would be greatly appreciated.
So after just getting down to a very basic post route and logging out some text but not returning a response, I came to the conclusion the issue resides with node. I decided to record the length of time between the console.log statements which turned out to be every 2 minutes. With this I was able to find out that node has a default timeout of 2 minutes if a response is not sent back to the client.
I was able to set the response to never timeout with the following code:
res.connection.setTimeout(0);
I hope this helps anyone else that needs to hold connections for large periods of times for file conversions/transfers etc...
I have a Hubot plugin, that listens for JIRA webhooks, and announces in a room when new tickets are created:
module.exports = (robot) ->
robot.router.post '/hubot/tickets', (req, res) ->
data = if req.body.payload? then JSON.parse req.body.payload else req.body
if data.webhookEvent = 'jira:issue_created'
console.dir("#{new Date()} New ticket created")
shortened_summary = if data.issue.fields.summary.length >= 20 then data.issue.fields.summary.substring(0, 20) + ' ...' else data.issue.fields.summary
shortened_description = if data.issue.fields.description.length >= 50 then data.issue.fields.description.substring(0, 50) + ' ...' else data.issue.fields.description
console.log("New **#{data.issue.fields.priority.name.split ' ', 1}** created by #{data.user.name} (**#{data.issue.fields.customfield_10030.name}**) - #{shortened_summary} - #{shortened_description}")
robot.messageRoom "glados-test", "New **#{data.issue.fields.priority.name.split ' ', 1}** | #{data.user.name} (**#{data.issue.fields.customfield_10030.name}**) | #{shortened_summary} | #{shortened_description}"
res.send 'OK'
I'd like to extend this, to perform lookup against a remote API - basically, there's extra info I want to lookup, then add into the message I'm passing to room.messageRoom. I'm using request, because I need digest support.
So the following snippet works fine on its own.
request = require('request')
company_lookup = request.get('https://example.com/clients/api/project?name=FOOBAR', (error, response, body) ->
contracts = JSON.parse(body)['contracts']
console.log contracts
).auth('johnsmith', 'johnspassword', false)
And this is where my JS/Node newbness comes out...lol.
I can process the response inside the callback - but I'm really sure how to access it outside of that callback?
And how should I be integrating this into the webhook processing code - do I just move the snippet inside the if block, and assign it to a variable?
I'd use a middleware (supposing you are using Express with Node.js) so you can add the company_lookup response to the req and use it into any route where you add the middleware. http://expressjs.com/guide/using-middleware.html
For example:
server.js
var middlewares = require('./middlewares');
module.exports = function (robot) {
// Tell the route to execute the middleware before continue
return robot.router.post(middlewares.company_loop, '/hubot/tickets', function (req, res) {
// Now in the req you have also the middleware response attached to req.contracts
console.log(req.contracts);
return res.send('OK');
});
};
middlewares.js
var request = require('request');
// This is your middleware where you can attach your response to the req
exports.company_lookup = function (req, res, next) {
request.get('https://example.com/clients/api/project?name=FOOBAR', function (error, response, body) {
var contracts;
contracts = JSON.parse(body)['contracts'];
// Add the response to req
req.contracts = contracts;
// Tell it to continue
next();
}).auth('johnsmith', 'johnspassword', false);
};
I need help.
I've been trying to wrap my head around async programming with node.js and socket.io for a day now. I understand that I need some flow control but I don't seem to understand how to properly implement it.
I have a redis datastore that has modules stored in a set let's say 'moda','modb'
instances of those modules are 'moda:instances' and in 'modb:instances' the properties of those instances are stored in 'moda:instancea' and 'modb:instanceb' as a hash.
I am trying to get the following json:
"moda": {"instancea": {"property1": "value1", "property2", "value2"}}, "modb": {"instanceb": {"property1": "value1"}}
Could someone give me a little push in the right direction?
Here is my current code:
var io = require('socket.io').listen(2000);
var redis = require('redis').createClient();
var http = require('http');
var async = require('async');
var step = require('step');
io.sockets.on('connection', function (socket) {
var notifications = require('redis').createClient();
notifications.subscribe("notification");
notifications.on("message", function (channel, message) {
socket.send(message);
console.log(channel + ':' + message);
});
socket.on('modules', function(params, callback) {
var response = {};
async.series([
function (callback) {
console.log('1>');
redis.smembers('modules', function (err, modules) {
async.forEachSeries(modules, function(module, moduleCallback) {
response[module] = {}
redis.smembers(module + ':instances', function(err, instances) {
async.forEachSeries(instances, function(instance, instanceCallback) {
response[module][instance] = {}
console.log('2>' + module + ':' +instance);
instanceCallback();
});
moduleCallback();
});
});
callback();
});
},
function (callback) {
console.log('3');
callback();
}
], function() {
console.log(JSON.stringify(response));
});
});
});
The output from this code is:
info - socket.io started
debug - client authorized
info - handshake authorized JMMn1I8aiOMGCMPOhC11
debug - setting request GET /socket.io/1/websocket/JMMn1I8aiOMGCMPOhC11
debug - set heartbeat interval for client JMMn1I8aiOMGCMPOhC11
debug - client authorized for
debug - websocket writing 1::
1>
3
{"moda":{}}
2>moda:instancea
2>moda:instanceb
2>modb:instancea
The problem comes from the fact forEachSeries requires an additional callback as a third parameter (to be called when the whole processing is complete). You cannot just put some code after forEachSeries hoping it will be called once it is complete.
Here is your code modified:
var response = {};
async.series([
function (callback) {
console.log('1>');
redis.smembers('modules', function (err, modules) {
async.forEachSeries(modules, function(module, moduleCallback) {
response[module] = {}
redis.smembers(module + ':instances', function(err, instances) {
async.forEachSeries(instances, function(instance, instanceCallback) {
response[module][instance] = {}
console.log('2>' + module + ':' +instance);
instanceCallback();
}, moduleCallback );
});
}, callback );
});
},
function (callback) {
console.log('3');
callback();
}],
function() {
console.log(JSON.stringify(response));
});
Note how the callback, and moduleCallback are used as a third parameter. The output is:
1>
2>moda:instancea
2>moda:instanceb
2>modb:instancea
3
{"moda":{"instancea":{},"instanceb":{}},"modb":{"instancea":{}}}
which I guess is what you expected.
Additional remark: forEachSeries will process everything in sequence, the next operation waiting for the previous one to complete. This will generate plenty of roundtrips to Redis. forEach should be much more efficient here to leverage pipelining.
Take a look also at promise concept, with promises you configure the flow much more readable way.
First you need to prepare promise versions of redis methods that you're using:
var promisify = require('deferred').promisify;
var RedisClient = require('redis').RedisClient;
RedisClient.prototype.psmembers = promisify(RedisClient.prototype.smembers);
Then you can construct your flow as:
console.log('1>');
redis.psmembers('modules').map(function (module) {
response[module] = {};
return redis.psmembers(module + ':instances').map(function (instance) {
response[module][instance] = {};
console.log('2>' + module + ':' +instance);
});
}).end(function () {
console.log('3');
console.log(JSON.stringify(response));
});
Check: https://github.com/medikoo/deferred