child Process reply after 10 min to browser api - javascript

I am trying to run a node js child process in the backend which is taking at least 10 min time to complete, and I want to send the response after 10 min. In node js, i have increased the server timeout to 10 min and its fine. But in the browser, it's still a problem. Its saying timeout error after 2 min of waiting.
As I am using axios module from npm, it has the functionality of timeout but that not working.
Could someone help me how to stalled browser for more than 10 min to send the message from the server and then render the UI?
UI Code:
action created code->
export const executeFile = (fileName) => {
return ((dispatch)=>{
axios.post(`/execute`,{fileName},{timeout:1000000}).then(
res=> dispatch({
type:'EXECUTE_FILE',
payload:res
})
).catch(err=>console.log(err))
})
}
backend code->
app.post('/execute',(req,res)=>{
console.log(req.body.fileName,"aaaaaaaaa");
// handler_function.deleteFolderRecursive()
exec('sleep 200 && ls', (err, stdout, stderr) => {
if (err) {
console.error(err,"here");
return;
}
console.log(stdout,"in here sucess");
res.send(stdout)
});
});

Related

Stop webpage from waiting for cancelled XHR requests

I'm working on a Django project and the Javascript for one of my pages makes some XHR requests that can take a while to complete. If the user goes to this page, they have to wait for these requests to finish before the site lets them go to another page.
I've been trying to have the page cancel these requests when clicking to another page, but even if the requests cancel the page still waits quite a while before changing URLs. This is how my requests are structured.
const controller = new AbortController()
const signal = controller.signal
let get_salesperson_sales = async() => {
return await fetch(`/reports/salesperson-sales/${fromDate && toDate ? `from_date=${fromDate}&to_date=${toDate}` : ''}`, { signal })
.then(response => response.json())
.then(response => {
document.getElementById('total_rep_sales').innerText = '$'+Number(response.total_sales.toFixed(2)).toLocaleString()
return response
})
.catch(error => {
if (error.name === 'AbortError') console.log('Salesperson sales fetch was aborted.')
else errorMessage.innerHTML += '<p>Error getting sales by rep: '+error.responseJSON ? error.responseJSON.details : 'Unexpected error'+'</p>'
})
}
There are 4 of these functions in total that I call with Promise.allSettled(). At the bottom of the page, I bind a couple functions to abort the requests.
$('#date-filter').submit(() => {
// Stop loading current report before submitting
controller.abort()
return true
})
$(window).bind('beforeunload', () => {
controller.abort()
})
The requests cancel successfully, I can see both in the Network tab of my browser and because my console logs show up in the console, but for some reason the page continues to wait after the requests are cancelled.
The part that's really tripping me up is that, while this happens in my production build, the page seems to change immediately upon cancelling requests when I run the project on localhost. The only major configuration difference between the web app in both builds is that Django's DEBUG is True in the local build and False in the production build. I've tried 4 different methods of cancelling requests and 2 different kinds of request ($.ajax and fetch), each time successfully cancelling the requests and running into this problem.
What could possibly be causing this issue and how can I force my page to change immediately instead of waiting like this?

Nodejs killed Sharp process by timeout 5 sec

I use sharp on Hook to resize images on the fly in fastify.
fastify.addHook('onSend', async (request, reply, payload) => {
const newPayload = await sharp(payload.filename).resize(200, 100).toFormat('jpg').toBuffer();
return newPayload
})
But when I run this code
I see 5 seconds between finished resize and killed some process bu Nodejs at the end.
After it I get image into browser.
Can you explain this behaviour?

How can I increase the default timeout on an http request?

I am making a call from my react app to a node server that needs to run a command on some physical hardware. The hardware takes about three minutes to complete its routine however the request always times out in my react app after only two. The hardware is connected over serial and will print "done" to the command line when finished so I need some way to see when that happens in my node server.
Ive tried increasing the setTimeout on my node server but to my understanding that only impacts the request it makes to other servers and not the requests made to it. I've also tried spawning a child process rather than running exec however I'm not sure how to access it in a later api call if it is scoped into the initial calls function.
I have a function run in my server that looks like this:
function run(command) {
return new Promise((resolve, reject) => {
exec(command, (err, stdout, stderr) => {
if(err){
reject(stderr);
}
resolve(stdout);
});
});
}
My api endpoint exec uses run like so:
app.post('/api/exec', (req, res) => {
req.setTimeout(0); //some commands take a long time, so we shouldnt time out the response
if(req.body.command){
run(req.body.command)
.then(data => {
return res.status(201).send({
message: data
});
}).catch(err => {
return res.status(500).send({
message: err
});
});
} else {
return res.status(400).send({
message: "exec requires a command member"
})
}
});
Finally, I am calling the api from my react app with the following:
execute = (c) => {
fetch(`/api/exec`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
command: c
})
}).then (resp => {
... do stuff with my app ...
I expect that the server would run my command until completion and then respond with the stdout from running the command, however my client application times out after ~two minutes of calling the execute function.
Several questions already have good answers regarding how to decrease the timeout time for an api call by wrapping in a timeout promise, however this method does not allow me to exceed the two minute default timeout time.

How to notify HTTP client of the completion of a long task

I have a Node.js system that uploads a large number of objects to MongoDB and creates folders in dropbox for each object. This takes around 0.5 seconds per object. In situations therefore where i have many objects this could take up to around a minute. What i currently do is notify the client that the array of objects has been accepted using a 202 response code. However how do i then notify the client of completion a minute later.
app.post('/BulkAdd', function (req, res) {
issues = []
console.log(req.body)
res.status(202).send({response:"Processing"});
api_functions.bulkAdd(req.body).then( (failed, issues, success) => {
console.log('done')
})
});
bulkAdd: async function (req, callback) {
let failed = []
let issues = []
let success = []
i = 1
await req.reduce((promise, audit) => {
// return promise.then(_ => dropbox_functions.createFolder(audit.scanner_ui)
let globalData;
return promise.then(_ => this.add(audit)
.then((data)=> {globalData = data; return dropbox_functions.createFolder(data.ui, data)}, (error)=> {failed.push({audit: audit, error: 'There was an error adding this case to the database'}); console.log(error)})
.then((data)=>{console.log(data, globalData);return dropbox_functions.checkScannerFolderExists(audit.scanner_ui)},(error)=>{issues.push({audit: globalData, error: 'There was an error creating the case folder in dropbox'})})
.then((data)=>{return dropbox_functions.moveFolder(audit.scanner_ui, globalData.ui)},(error)=>{issues.push({audit: globalData, error: 'No data folder was found so an empty one was created'}); return dropbox_functions.createDataFolder(globalData.ui)})
.then(()=>success.push({audit:globalData}), issues.push({audit: globalData, error: 'Scanner folder found but items not moved'}))
);
}, Promise.resolve()).catch(error => {console.log(error)});
return(failed, issues, success)
},
Well the problem with making client request wait, is it will timeout after certain period or sometimes will show error with no response received.
What you can do is
- Make client request to server to initiate the task, and return 200OK and keep doing your task on server.
- Now write a file on server after insertion of every object as status.
- Read the file from client every 5-10 sec to check if server has completed creating objects or not.
- Mean while your task is not completed on server, show status with completion percentage or some animation.
Or simply implement WebHook or WebSockets to maintain communication.

How to handle the Storage Queue using the WebJobs

I just started to use Azure as my mobile development as well as my web development.
I am using NodeJs as my framework to work on the azure backend. I am using mobile services and web apps in the azure.
Here is the situation, I am using the Storage Queue from Azure and I am using webjob from my webapps to handle the storage queues. The messages in the queue are going to be sent out to each specific user via notification hub. (Push Notification)
So, the queues will have the size of the 50,000 or more queue messages. All these messages are used to push out the message to the user one by one. However, I tried to handle the queues using WebJob by scheduling 2minutes interval. I know that webjob wont run two instances when the schedule is currently running.
Initially, I wanna use the webjob which run continuously but it will go to pending to restart once the script run finished. My assumption for the continuously running of webjob is that it will run under an endless loop for the script over and over again. until it caught exception or something wrong. My assumption goes wrong, where it will restart by it self once it succeeded the whole script. I know the restart can be adjusted to less than 60seconds but I am not sure whether this helps as I could a lot aysnc operation as well.
For my script, it will run 50,000 or more users messages in the loop. Then, it will send out the push message via Azure nodejs package and then upon return, then it will delete the messages so that it wont appear in the queue anymore. So, there will be some async operation for each loop in the action.
However, everything is working fine but the webjob only have execute maximum of 5 mins and then it will run again on next schedule. Meaning, it will only run to a maximum 5 mins regardless of the operation. I tried with 1,000 messages from the queue and everything works fine but when the messages go up to 5,000 and above, the time is not sufficient. Therefore, some of the async operation is not completed which cause the messages are not deleted.
Is there a way to extend the 5 mins execution time or other better ways to handle the Storage Queues. I looked into the Webjobs SDK but it is only limited to C# and Visual Studio. I am using Mac OSX and Javascript which I could not use.
Please advise as I wasted a lot of time figuring out whats best to handle the storage queue using webjobs but now it seems like it does not serve the purpose when the messages grow bigger and when it dealt with async operation with the total of only 5 mins execution time. I do not have any VM at the moments which I only use PAAS in azure.
According your description:
All these messages are used to push out the message to the user one by one
it will run 50,000 or more users messages in the loop
So your requirement is to send each message in queue to user,and now you get all the messages in queue one time even the message size will get up to more then 50,000, and loop the messages for further operations?
If there is any misunderstanding, feel free to let me know.
In my opinion, cloud you get the top message of the queue at once, and send it to your user, so that it will remarkbly reduce the processing time and which can be set in a continuously webjob. You can refer to How To: Peek at the Next Message to see how to peek at the message in the front of a queue without removing it from the queue
update
As I found you have mentioned that I also have a Web App in Node.js in your whole project architecture.
So I consider whether you can leverage continuous webjob in Web Apps to get one message and send to Notification Hub one time.
And here is my test code snippet:
var azureStorage = require('azure-storage'),
azure = require('azure'),
accountName = '<accountName>',
accountKey = '<accountKey>';
var queueSvc = azureStorage.createQueueService(accountName, accountKey);
var notificationHubService = azure.createNotificationHubService('<notificationhub-name>', '<connectionstring>');
queueSvc.getMessages('myqueue', {numOfMessages:1}, function(error, result, response) {
if (!error) {
// Message text is in messages[0].messagetext
var message = result[0];
console.log(message.messagetext);
var payload = {
data: {
msg: message.messagetext
}
};
notificationHubService.gcm.send(null, payload, function(error) {
if (!error) {
//notification sent
console.log('notification sent');
queueSvc.deleteMessage('myqueue', message.messageid,message.popreceipt,function(error, response) {
if (!error) {
console.log(response);
// Message deleted
} else {
console.log(error);
}
});
}
});
}
});
Details refer to How to use Notification Hubs from Node.js And https://github.com/Azure/azure-storage-node/blob/master/lib/services/queue/queueservice.js#L727
update2
As I get the idea of Service-bus demo on GitHub, I modified the code above, and which greatly improve the efficiency.
Here the code snippet, for your information:
var queueName = 'myqueue';
function checkForMessages(queueSvc, queueName, callback) {
queueSvc.getMessages(queueName, function(err, message) {
if (err) {
if (err === 'No messages to receive') {
console.log('No messages');
} else {
console.log(err);
// callback(err);
}
} else {
callback(null, message[0]);
console.log(message);
}
});
}
function processMessage(queueSvc, err, lockedMsg) {
if (err) {
console.log('Error on Rx: ', err);
} else {
console.log('Rx: ', lockedMsg);
var payload = {
data: {
msg: lockedMsg.messagetext
}
};
notificationHubService.gcm.send(null, payload, function(error) {
if (!error) {
//notification sent
console.log('notification sent');
console.log(lockedMsg)
console.log(lockedMsg.popreceipt)
queueSvc.deleteMessage(queueName, lockedMsg.messageid, lockedMsg.popreceipt, function(err2) {
if (err2) {
console.log('Failed to delete message: ', err2);
} else {
console.log('Deleted message.');
}
})
}
});
}
}
var t = setInterval(checkForMessages.bind(null, queueSvc, queueName, processMessage.bind(null, queueSvc)), 100);
I set the loop time as 100ms in setInterval, now it can process almost 600 message per minutes in my test.
The various configuration settings for WebJobs are explained on this wiki page. In your case you should increase the WEBJOBS_IDLE_TIMEOUT value, which is the time in seconds that a triggered job will timeout if it hasn't produced any output for a period of time. The WEBJOBS_IDLE_TIMEOUT setting needs to be configured in the portal app settings, not via the app.config file.

Categories

Resources