I've been having this issue for over a couple of months now, and still can't seem to figure out how to fix it. It seems that I'm experiencing a high number of connections to our database, and I assume it's because our connections aren't closing properly which is causing them to hang for long periods of time. In return this causes a lot of overhead which occasionally causes our web application to crash. Currently the application runs the promise-mysql npm package to create a connection and query the database. Our web application uses socketio to request these connections to our mysql database.
I'm working with existing code that was here before me, so I did not set it up this way. This makes it a bit more confusing for me to debug this issue because I'm not that familiar with how the connections get closed after a successful / unsuccessful query.
When logging errors from our server I'm getting messages like this:
db error { Error: Connection lost: The server closed the connection.
at Protocol.end (/home/ec2-user/myapp/node_modules/mysql/lib/protocol/Protocol.js:113:13)
at Socket.<anonymous> (/home/ec2-user/myapp/node_modules/mysql/lib/Connection.js:109:28)
at Socket.emit (events.js:185:15)
at Socket.emit (domain.js:422:20)
at endReadableNT (_stream_readable.js:1106:12)
at process._tickCallback (internal/process/next_tick.js:178:19) fatal: true, code: 'PROTOCOL_CONNECTION_LOST' }
(Not sure if that has anything to do with the high number of connections I'm seeing or not)
I recently changed the wait_timeout and interactive_timeout to 5000 in MySql, which is way lower than the default 28800, but setting it to this stopped the application from crashing so often.
This is the code for creating the database connection:
database.js file
import mysql from 'promise-mysql';
import env from '../../../env.config.json';
const db = async (sql, descriptor, serializedParameters = []) => {
return new Promise( async (resolve, reject) => {
try {
const connection = await mysql.createConnection({
//const connection = mysql.createPool({
host: env.DB.HOST,
user: env.DB.USER,
password: env.DB.PASSWORD,
database: env.DB.NAME,
port: env.DB.PORT
})
if (connection && env.ENV === "development") {
//console.log(/*"There is a connection to the db for: ", descriptor*/);
}
let result;
if(serializedParameters.length > 0) {
result = await connection.query(sql, serializedParameters)
} else result = await connection.query(sql);
connection.end();
resolve(result);
} catch (e) {
console.log("ERROR pool.db: " + e);
reject(e);
};
});
}
export default db;
And this is an example of what the sockets look like:
sockets.js file
socket.on('updateTimeEntry', async (time, notes, TimeEntryID, callback) => {
try {
const results = await updateTimeEntry(time, notes, TimeEntryID);
callback(true);
//socket.emit("refreshJobPage", false, "");
}
catch (error) {
callback(false);
}
});
socket.on('selectDatesFromTimeEntry', (afterDate, beforeDate, callback) => {
const results = selectDatesFromTimeEntry(afterDate, beforeDate).then((results) => {
//console.log('selectLastTimeEntry: ', results);
callback(results);
})
});
And this is an example of the methods that get called from the sockets to make a connection to the database
timeEntry.js file
import db from './database';
export const updateTimeEntry = (time, notes, TimeEntryID) => {
return new Promise(async (resolve, reject) => {
try {
const updateTimeEntry = `UPDATE mytable SET PunchOut = NOW(), WorkTimeTotal = '${time}', Notes = "${notes}" WHERE TimeEntryID = '${TimeEntryID}';`
const response = await db(updateTimeEntry, "updateTimeEntry");
resolve(response[0]);
} catch (e) {
console.log("ERROR TimeEntry.updateTimeEntry: " + e);
reject(e);
}
});
};
//Gets a List for Assigned Jobs
export const selectDatesFromTimeEntry = (afterDate, beforeDate) => {
return new Promise(async (resolve, reject) => {
try {
const selectDatesFromTimeEntry = `SELECT * FROM mytable.TimeEntry WHERE PunchIn >= '${afterDate}' && PunchIn < '${beforeDate}';`
//console.log("Call: " + selectDatesFromTimeEntry);
const response = await db(selectDatesFromTimeEntry, "selectDatesFromTimeEntry");
//console.log("Response: " + response);
resolve(response);
} catch (e) {
console.log("ERROR TimeEntry.selectDatesFromTimeEntry: " + e);
reject(e);
}
});
};
I just really want to figure out why I'm noticing so much overhead with my database connections, and what I can do to resolve it. I really don't want to have to keep restarting my server each time it crashes, so hopefully I can find some answers to this. If anyone has any suggestions or knows what I can change in my code to solve this issue that would help me out a lot, thanks!
EDIT 1
These are the errors I'm getting from mysql
2020-04-30T11:12:40.214381Z 766844 [Note] Aborted connection 766844 to db: 'mydb' user: 'xxx' host: 'XXXXXX' (Got timeout reading communication packets)
2020-04-30T11:12:48.155598Z 766845 [Note] Aborted connection 766845 to db: 'mydb' user: 'xxx' host: 'XXXXXX' (Got timeout reading communication packets)
2020-04-30T11:15:53.167160Z 766848 [Note] Aborted connection 766848 to db: 'mydb' user: 'xxx' host: 'XXXXXX' (Got timeout reading communication packets)
EDIT 2
Is there a way I can see why some of these connections would be hanging or going idle?
EDIT 3
I've been looking into using a pool instead, as it seems that it is a more scalable and appropriate solution for my application. How can I achieve this with the existing code that I have?
You are opening a new connection for each and every query... Opening a connection is slow, there is a lot of overhead for doing so, and your server certainly does not have unlimited number of connections allowed. The NodeJS mysql package provides a pooling mechanism which would be a lot more efficient for you.
The goal is to reuse the connections as much as possible instead of always disposing of them right after the first query.
In your db.js, create a pool on startup and use it:
var pool = mysql.createPool({
connectionLimit : 10, //Number of connections to create.
host: env.DB.HOST,
user: env.DB.USER,
password: env.DB.PASSWORD,
database: env.DB.NAME,
port: env.DB.PORT
});
To execute your query, you would simply do this:
await pool;
return pool.query(sql, serializedParameters);
Related
I have an app with divided code (client / server). On the client side, I'd like socket io to attempt multiple URLs (one at a time) until it connects successfully.
Here's my code:
const BAD_HOST = "http://localhost:8081";
const LOCAL_HOST = "http://localhost:8080";
const SOCKET_CONFIG = {
upgrade: false,
transports: ["websocket"],
auth: { ... }, // Trimmed for brevity
extraHeaders: { ... }, // Trimmed for brevity
};
let socket = io(BAD_HOST, SOCKET_CONFIG); // This connects fine when I use LOCAL_HOST
socket.on("connect_error", (err) => {
console.log(err);
socket = io(LOCAL_HOST, SOCKET_CONFIG); // DOES NOT WORK
});
socket.on("connect", () => { ... } // Trimmed for brevity
In short, when I try to reassign the value for socket to a new io connection, it seems to retain the old, failed connection. My browser continues to throw 'connect_error' messages from the bad url:
WebSocket connection to 'ws://localhost:8081/socket.io/?EIO=4&transport=websocket' failed:
I checked but couldn't find any official documentation on this question.
I think an approach is already discussed here:
https://stackoverflow.com/a/22722710/656708
Essentially you have an array of URLs, which in your case would be:
const socketServerURLs = ["http://localhost:8081","http://localhost:8080"];
and then iterate over them, trying to initiate a socket connection, like this:
// something along these lines
socketServerURLs.forEach((url) => {
// ...
socket.connect(url, socketConfiguration, (client) => {});
// ...
}
Then again, I don't know what a BAD_HOST entails. Assuming that you mean that a connection to that host failed, how would you know that without actually trying to connect to it?
I have the following method to connect to MongoDB:
import { Db, MongoClient } from 'mongodb';
let cachedConnection: { client: MongoClient; db: Db } | null = null;
export async function connectToDatabase(mongoUri?: string, database?: string) {
if (!mongoUri) {
throw new Error(
'Please define the MONGO_URI environment variable inside .env.local'
);
}
if (!database) {
throw new Error(
'Please define the DATABASE environment variable inside .env.local'
);
}
if (cachedConnection) return cachedConnection;
cachedConnection = await MongoClient.connect(mongoUri, {
useNewUrlParser: true,
useUnifiedTopology: true,
}).then((client) => ({
client,
db: client.db(database),
}));
return cachedConnection!;
}
And I use it in the following way:
const { db, client } = await connectToDatabase(
config.URI,
config.USERS_DATABASE
);
const user = await db
.collection(config.USERS_COLLECTION)
.findOne({ _id: new ObjectId(userId) });
It seems to be ok, but it is not. The problem of this method is that it doesn't close the connections. For example I have a cluster on Atlas, and the connections keep growing till 500. after that it doesn't serve anymore, goes in timeout and then my backend crashes.
To solve this I tried with client.close() just before returning the response to frontend.
It throws me one error saying MongoError: Topology is closed, please connect. I believe that it closes the connection before it finishes? is it right? Even if I put it after the DB responded.
this is the screenshot of the error:
Do you think there is a way to solve this or I just have to do the entire procedure in each file I need to connect to mongo? Do you also think I did something wrong?
I'm fairly new to Javascript and am trying to wrap my head around async, promises, etc.
I have an application running a TCP API (non-HTTP) on the localhost. I'm building an Electron app to interact with this API. I need to send a single request to the API every second and retrieve a single JSON object it returns.
I'm able to do this successfully (for while) by running something like this:
const net = require('net');
function apiCall() {
if (running) {
setTimeout(() => {
// Send the request
request = '{"id":1,"jsonrpc":"2.0","method":"getdetails"}'
socketClient = net.connect({host:'localhost', port:8888}, () => {
socketClient.write(request + '\r\n');
});
// Listen for the response
var response;
socketClient.on('data', (data) => {
response = JSON.parse(data).result;
updateUI(response);
socketClient.end();
});
// On disconnect
socketClient.on('end', () => {
console.log('Disconnected from API');
});
apiCall();
}, refreshRate)
}
}
After running this for an extended amount of time, it appears that the API server is crashing:
Error: connect ECONNREFUSED 127.0.0.1:8888
at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1146)
Unfortunately, I have no control over the API server or its source code. I'd like some clarification on whether my client might be causing the API server to crash by sending requests this way.
Should I be opening and closing the connection for each request or keep it open and send requests only every second?
If I should be keeping the connection open, how can I do this, and do I need to worry about keep-alive?
It looks like that every time you call apiCall you are creating a new socket client and you are not removing the old socket client instances. This is a memory leak and it will cause the application to crash after running for some time
You can keep a running connection instead like below
const net = require("net");
const { once } = require("events");
let socketClient;
function apiCall() {
if (running) {
setTimeout(async () => {
const request = '{"id":1,"jsonrpc":"2.0","method":"getdetails"}';
// Create the socket client if it was not already created
if (!socketClient) {
socketClient = net.connect({ host: "localhost", port: 8888 });
// On disconnect
socketClient.on("end", () => {
console.log("Disconnected from API");
socketClient.destroy();
socketClient = null;
});
// Wait until connection is established
await once(socketClient, "connect");
}
// Send the request
socketClient.write(request + "\r\n");
// Listen for the response
const data = await once(socketClient, "data");
const response = JSON.parse(data).result;
updateUI(response);
apiCall();
}, refreshRate);
}
}
Right now, I'm writing a small web application using node.js with my partner. In the application, we need to query some data from database using mongoose, then, send the data to the client.Trying to do this, I'm using the code below:
io.sockets.on('connection', function (socket) {
var id = socket.id;
socket.on('request', function (data) {
mongoose.createConnection(dbConfig.url);
var ClassX = require("./models/" + data.request);
var class_query = ClassX.findOne({}, function (err, result) {
if (err) {
console.log("There is an error: "+ err);
return handleError(err);
}
if (!result) {
console.log("No result!");
}
io.sockets.to(id).emit("response", result);
});
});
});
But it doesn't work.
And when the code is running, the log file of the database show some information like below:
2017-02-23T15:40:48.426+0800 I NETWORK [thread1] connection accepted from 127.0.0.1:5015 #3 (3 connections now open)
2017-02-23T15:40:48.428+0800 I NETWORK [conn3] received client metadata from 127.0.0.1:5015 conn3: { driver: { name: "n
odejs", version: "2.2.24" }, os: { type: "Windows_NT", name: "win32", architecture: "x64", version: "10.0.14393" }, plat
form: "Node.js v6.9.5, LE, mongodb-core: 2.1.8" }
The information "[conn3] received client metadata ..." seems weird. And, from the experience of many attempts, I think the callback function of ClassX.finOne() has not been executed. And I totally don't know why.
What is the best way to have a single list of users which is shared between several processes?. The processes are initiated using PM2.
The processes will have access to the list in order to add, remove, and check if a user exist in the list already.
The easiest way is to use redis(or memocache, even mongodb) to store those user list.
Or you will have to handle very complex IPC in your case, since pm2 uses node cluter, based on child_process.
You can use an in-memory data store like Redis.
Redis runs as a separate process and serves requests on a TCP port(by default 6379). Redis is a key-value data store and can be used by all your node processes.
Here's how you can do it:
List item
Install redis. (https://redis.io/)
Install node client for redis:
npm install --save redis
Now you can use redis to store your application state data and share it accross processes.
Refer this link for code example.
i just wrote a Job tracking logger for large web crawler system up to 1200 instances using redis.
Ok! Let's do it!
First you will need define it:
const redis = require("redis");
const client_redis = redis.createClient({
retry_strategy: function(options) {
if (options.error && options.error.code === "ECONNREFUSED") {
// End reconnecting on a specific error and flush all commands with
// a individual error
return new Error("The server refused the connection");
}
if (options.total_retry_time > 1000 * 60 * 60) {
// End reconnecting after a specific timeout and flush all commands
// with a individual error
return new Error("Retry time exhausted");
}
if (options.attempt > 10) {
// End reconnecting with built in error
return undefined;
}
// reconnect after
return Math.min(options.attempt * 100, 3000);
},
});
This function for update and create log.
function create_and_update_log(productName2, url2, proc, msg) {
var data_value = {
id: 'BESTBUY::DATA_LOG::'+md5(productName2 + url2),
totalrv: 'WAIT',
product: productName2,
url: url2,
process: proc,
task: msg,
timestamp: moment().format('DD/MM/YYYY HH:mm:ss')
};
client_redis.set('BESTBUY::DATA_LOG::'+md5(productName2 + url2), JSON.stringify(data_value));
}
This function for query all data
async function get_log_redis() {
return new Promise(function(resolve, reject) {
try {
var logger_data = {
logger: []
};
client_redis.multi()
.keys('BESTBUY::DATA_LOG::*', function(err, replies) {
replies.forEach(function(reply, index) {
client_redis.get(reply, function(err, data) {
if (!data.includes("Total reviews left: 0")) {
logger_data.logger.push(JSON.parse(data));
}
if (index == replies.length - 1) {
resolve(logger_data);
}
});
});
})
.exec(function(err, replies) {});
} catch (err) {
console.log(err);
}
});
}
Remember to replace :
BESTBUY::DATA_LOG::
... with what you want to define.
And the final is how to fetch all log belong to my key name begin with "BESTBUY::DATA_LOG::"
var log_obj_data = "";
(async () => {
var log_obj_data = await get_log_redis();
response.writeHead(200, {
"Content-Type": "application/json"
});
response.end(JSON.stringify(log_obj_data));
})();