Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 1 year ago.
Improve this question
I have built an Express Server like this:
var mysql = require('mysql2');
var express = require('express');
var app = express();
var PORT = 3000;
app.get('/getDataFromDatabase', function(req, res) {
console.log("Called")
var con = mysql.createConnection({
host: "localhost",
user: "root",
password: "", // Password is filled ()
database: "casesdb"
});
con.connect(function(err) {
if (err) throw err;
con.query("SELECT * FROM cases", function (err, result, fields) {
if (err) throw err;
res.status(200).send(result);
console.log("Test")
});
});
});
app.listen(PORT, () =>
console.log(`Example app listening on port ${PORT}!`),
);
My Goal is to call the /getDataFromDatabase in client javascript and then use that data. How would I go about that?
Try the following on client-side:
<script>
fetch('http://localhost:3000/getDataFromDatabase')
.then(response => response.json())
.then(data => console.log(data))
.catch(err => console.error(err));
</script>
Related
Closed. This question is not reproducible or was caused by typos. It is not currently accepting answers.
This question was caused by a typo or a problem that can no longer be reproduced. While similar questions may be on-topic here, this one was resolved in a way less likely to help future readers.
Closed 6 days ago.
Improve this question
I'm trying to confirm my app has successfully connect to my DB. I setup a test to use once I am connected, but am not seeing the results of it.
Right now I have a server.js file and a db.js file. When I connect I do see the Server is running on port 8080 message that comes from server.js However I am not seeing anything that is suppose be shown from db.js
I don't really know if this is working properly, or if I have done something that is canceling out the messages that db.js is suppose to show.
server.js
const express = require("express");
const app = express();
const port = process.env.PORT || 8080;
const db = require("./db");
db.startTest();
app.listen(port, () => {
console.log(`Server is running on port ${port}`);
});
db.js
require("dotenv").config({ path: __dirname + "/.env" });
const MongoClient = require("mongodb").MongoClient;
const uri = process.env.MONGODB_URI;
const client = new MongoClient(uri, { useNewUrlParser: true });
function startTest() {
client.connect((err) => {
if (err) {
console.log("Unable to connect to MongoDB");
} else {
console.log("Connected to DB");
const db = client.db("test");
const sampleCollection = db.collection("sample");
sampleCollection.insertOne({ name: "Sample User" }, (error, result) => {
if (error) {
console.log("Unable to insert document into the collection");
} else {
console.log("Document inserted into the collection");
}
});
}
});
}
module.exports = {
startTest: startTest,
};
I am trying to connect my application to the database using the connection pool method, its connecting fine, and data insertion is happening fine without any issues but other queries in the same file are slowing down.
I have tried with release() method also not working properly.
How can release the pool to the next query once it's executed the current query?
Below is my dbpool.js file code where I am writing a common generalized database connection,
var pg = require('pg');
var PGUSER = 'postgres';
var PGDATABASE = 'test_database';
var config = {
user: PGUSER, // name of the user account
host: 'localhost',
database: PGDATABASE, // name of the database
password: 'password#AWS',
port: 5432,
max: 10,
idleTimeoutMillis: 10000
};
const pool = new pg.Pool(config);
const DB = {
query: function(query, callback) {
pool.connect((err, client, done) => {
if(err){ return callback(err); }
client.query(query, (err, results) => {
// done();
client.release();
// if(err) { console.error("ERROR: ", err) }
if(err) { return callback(err); }
callback(null, results.rows);
})
});
}
};
module.exports = DB;
I tried with both the done() and client.release() method but no luck. If I use both then I am getting an error message client is already released.
Below is my socket.js file code:
var express = require('express');
const connection = require('./dbpool.js');
if(arData == '0022'){
const queryText = "INSERT INTO alert(alert_data) VALUES('"+arData+"')";
connection.query(queryText,(err, res) => {
if(err){
console.log(err.stack);
}
});
}
if(arData == '0011'){
const queryText = "INSERT INTO table2(alert_data) VALUES('"+arData+"')";
connection.query(queryText,(err, res) => {
if(err){
console.log(err.stack);
}
});
}
function ReverseCommunication(){
const select1 = "SELECT * FROM alert WHERE action = '0' ORDER BY alert_id ASC LIMIT 1";
connection.query(select1, (err, res) =>{
if(err) {
console.log("Error1");
res.json({"error":true});
}
else{
console.log("res==",res);
}
});
}
setInterval(function(){
ReverseCommunication();
}, 2000)
With pool you shouldn't need to close the connection. With pool it will reuse the connection pool for subsequent request so you don't have to connect to the DB each time.
(i'm not a PG expert here, sure other could expand on that way better then I )
What works for us is to set up the dbpool file you have like this
const {Pool,Client} = require('pg');
const pool = new Pool({
user: process.env.POSTGRES_USER,
host: process.env.POSTGRES_URL,
database: process.env.POSTGRES_DATABASE,
password: process.env.POSTGRES_PASSWORD,
port: process.env.POSTGRES_PORT,
keepAlive: true,
connectionTimeoutMillis: 10000, // 10 seconds
max: 10
});
pool.connect()
.then(() => console.log('pg connected'))
.catch(err => console.error(err))
module.exports = pool
Then use the pool.query like you have now with pool.connect
Also, just a side note what lib are you using for PG? Noticed your queries are dynamic, you may want to adjust those to prevent possible SQL-injection.
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 3 years ago.
Improve this question
I'm pretty new to Node so I apologize if this is a simple question. But I'm trying to read the contents of a directory, ./resources, then display links to them on the webpage. The catch is that the files in the directory are dynamic, so I'm using fs.readdir in Node.
But the <h1> is not showing on the index.html page; any ideas why?
const resDir = "resources/";
const resDirFiles = [];
const app = http.createServer((req, res) => {
...
fs.readFile(filePath, (err, content) => {
if (err) {
// To be implemented
} else {
res.writeHead(200, {
"Content-type": contentType
});
res.end(content, "utf8", callback(req, res));
}
});
});
function callback(req, res) {
if (req.url == "/" || req.url == "/index") {
fs.readdir(resDir, (err, files) => {
files.forEach(file => {
resDirFiles.push(file);
res.end("<h1>Ok</h1>"); // placeholder
});
});
}
}
This solution makes use of module http and it list all files in directory resDir. It also provides links to the files, but it doesn't work on all browsers/servers due to security concerns, to avoid giving a free gateway between the user and the server. For something more robust you should have a file server.
var path = require('path')
var http = require('http')
var find = require('find')
var resDir = __dirname
http.createServer(function (req, res) {
res.setHeader('content-type', 'text/html')
find.file(resDir, function (files) {
res.write('<ul>')
for (let i = 0; i < files.length; i++) {
let fileRelative = path.relative(resDir, files[i])
res.write('<li>' + fileRelative + '</li>')
}
res.write('</ul>')
res.end()
})
}).listen(3000, function () {
console.log('server start at port 3000')
})
Now you can access it on http://localhost:3000
You can straight away use lot of npm project for this, like http-server
I'm new to javascript and node.js.
Can someone answer the following questions.
1. How I split the PostgreSQL part properly in an other file.
2. How I the pest practice is to use the pg pools.
3. How I improve this code for production.
const express = require('express');
const app = express();
const pg = require('pg');
const pool = new pg.Pool({
user: 'admin',
password: 'test123!',
host: '127.0.0.1',
port: '5432',
database: 'test_db'
});
app.get('/api/recipes', function(req, res){
pool.connect(function(err, client, done) {
if(err){
console.log('Connection failed '+ err);
res.status(400).send(err);
}
client.query('SELECT * FROM recipes;', function(err, result) {
done();
if(err){
console.log('Error with query! ERROR code: ' + err.code);
res.status(400).send(err);
}
else{
res.status(200).send(result.rows)
}
});
});
});
app.get('/api/recipes/:id', function(req, res){
var id = req.params.id;
pool.connect(function(err, client, done) {
if(err){
console.log('Connection failed ' + err);
res.status(400).send(err);
}
else{
client.query('SELECT * FROM recipes WHERE recipes_id = $1;', [id], function(err, result) {
done();
if(err){
console.log('Error with query! ERROR code: ' + err.code);
res.status(400).send(err);
}
else{
res.status(200).send(result.rows)
}
});
}
});
});
app.listen(3000,function(){
console.log('Server listen on port 3000');
});
There are a lot of ways folks go to split the code you've described. I'll take it piece by piece.
First, pull any configurable variables out and setup one file that can get them from the environment (possibly with dev defaults in place, your choice on that). You can use a library like commander or convict, but honestly I prefer to just write a simple file that pulls them myself:
// ./config.js
module.exports = {
pool: {
user: process.env.DB_USER || 'admin',
password: process.env.DB_PW || 'test123!',
host: process.env.DB_HOST || '127.0.0.1',
port: process.env.DB_PORT || '5432',
database: process.env.DB_NAME || 'test_db'
}
};
As for your database calls, some folks like to use ORM-like stuff such as sequelize, but again I tend to start simple and add things as needed. In your case, you should think about what boilerplate stuff you can make common code around, and then wrap those into simple modules that only expose to the calling code stuff it really needs. For example, you will note that most of your routes are going to connect to the pool, test for an error, then run a query if it doesn't error out, and finally render either the error or query results, right? So that can all be wrapped into a fairly simple query function that handles the boilerplate internally and works with just a query expression and a callback, for example:
// ./db/index.js
const pg = require('pg');
const config = require('./config');
const pool = new pg.Pool(config.pool);
function query(sql, params, callback) {
// maybe check for valid inputs here or something, but at least normalize in case folks don't pass params
if(arguments.length < 3) {
callback = params;
params = null;
}
pool.connect((err, client, done) => {
// just exit here and let the calling code know there was a problem
if(err) return callback(err);
// I haven't tested this w/ the pg library recently, you might have to do two of these if it doesn't like null as a second argument
client.query(sql, params, (err, result) => {
if(err) return callback(err);
done();
// calling code probably doesn't care about anything but rows, but you can do other stuff here if you prefer
return callback(null, result.rows);
});
});
};
// You can also add additional functions if you want shorthand for doing things like query by ID or with params, or similar
module.exports = { query };
I also think that it can be helpful to store the SQL strings somewhere centrally, or on model objects, just to make the routing code note have to care about that. For a super simple example using your two routes, I might do something like this:
// ./db/queries.js
module.exports = {
RECIPES: {
LIST: 'SELECT * FROM recipes;',
FIND_BY_ID: 'SELECT * FROM recipes WHERE recipes_id = $1;'
}
};
Ok, so now your routing code can be quite simple, you can just get the db module and work the query, letting the routing worry just about what it's got to do with the request and response. Another option that folks like is to actually create a module for each model in your app (e.g. a Recipe) that wraps the above two files into a set of static functions so that your routes don't even know they're querying specifically. The calls in that case would be something like Recipe.list(cb) or Recipe.findById(id, cb). This is a style made popular by Ruby on Rails a few years ago, it has mixed acceptance in the Node community, but I'm mentioning it for completeness.
// ./routes/recipes.js
const router = require('express').Router();
const db = require('./db');
const queries = require('./db/queries');
router.get('/api/recipes', (req, res, next) => {
db.query(queries.RECIPES.LIST, (err, rows) => {
if(err) return next(err);
return res.send(rows); // status 200 is the default here
});
});
router.get('/api/recipes/:id', (req, res, next) => {
const id = req.params.id;
db.query(queries.RECIPES.FIND_BY_ID, [id], (err, rows) => {
if (err) return next(err);
return res.send(rows);
});
});
Finally, in your main Express setup file:
// ./app.js
const express = require('express');
const app = express();
const recipeRoutes = require('./routes/recipes') // note if you have an index.js file that gets imported just by calling for the folder, so that's a way to group features as well
app.use(recipeRoutes);
// I'm a big fan of error handling middleware. There's a more complex approach I did in [praeter][4] that gives you http-verb based errors that you can then catch and send the appropriate status, but that's again more complex than you might need here.
app.use((err, req, res, next) => {
// this can be as simple or as complex as you like.
// note it's a best practice to send only "clean" messages to the client, so you don't give away that you're using a Postgres db or other stuff that makes hacking easier.
console.error(err);
res.status(500).send('Oops! Something went wrong!!');
});
Obviously, there's a lot of ways to skin this cat, so I'd recommend mostly just looking for where you're repeating yourself, and then refactor to repeat less. Also, if you're interested in making more production-ready apps in general, the 12 factor app is a must-read.
To answer number 1,
dbPool.js
const pg = require('pg');
export.pool = new pg.Pool({
user: 'admin',
password: 'test123!',
host: '127.0.0.1',
port: '5432',
database: 'test_db'
});
app.js
const express = require('express');
const app = express();
const pool = require('./dbPool');
....
You should create config file and require that file in app.js
--config
----config.js
--app.js
var config = {
production: {
pool: {
user: 'admin',
password: 'test123!',
host: '127.0.0.1',
port: '5432',
database: 'test_db'
}
},
development: {
pool: {
user: 'admin',
password: 'test123!',
host: '127.0.0.1',
port: '5432',
database: 'test_db'
}
}
}
exports.get = function get(env) {
return config[env] || config.development;
}
I am currently developing a node.js backend for a mobile app with potentially many users. However it's my first time in developing node.js. I was following a tutorial on how to connect to a mysql database via mysql pools.
I am able to create a single mysql connection and do queries via my routes.
The problem arises once I establish the file structure mentioned in the tutorial:
dbConnect
-[models]
--users.js
-db.js
-server-ks
I am not getting an error message regarding the connection of the mysql database - even if I enter a wrong password.
// server.js
///////////////////////////// basic setup ///////////////////////////////////
var restify = require('restify');
var bodyParser = require('body-parser');
var mysql = require('mysql');
var db = require('./db');
var users = require('./models/users');
///////////////////////////// initilisation of the server ///////////////////////////////////
var server = restify.createServer({
name: 'testUsers',
});
server.use(restify.bodyParser({ mapParams: true }));
///////////////////////////// Säuberung der URL //////////////////////////////////////////
server.pre(restify.pre.sanitizePath());
///////////////////////////// MySQL Instanz starten //////////////////////////////////////////
db.connect(db.MODE_PRODUCTION, function (err) {
if (err) {
console.log('Unable to connect to MySQL.')
process.exit(1)
} else {
server.listen(8080, function () {
console.log('Listening on port 8080 ...')
})
}
})
///////////////////////////// implementation of the routes ///////////////////////////////////
function send(req, res, next) {
var test = users.getAll();
res.json({ test: 'Hello ' + req.params.name });
return next();
};
My DB.js file looks the following:
var mysql = require('mysql'),
sync = require('async')
var PRODUCTION_DB = 'userDB',
TEST_DB = 'userDB'
exports.MODE_TEST = 'mode_test'
exports.MODE_PRODUCTION = 'mode_production'
var state = {
pool: null,
mode: null,
}
exports.connect = function (mode, done) {
state.pool = mysql.createPool({
connectionLimit: 50,
host: 'localhost',
user: 'user',
password: 'password',
database: 'userDB' // test
//mode === exports.MODE_PRODUCTION ? PRODUCTION_DB : TEST_DB
})
state.mode = mode
done()
}
exports.get = function () {
return state.pool
}
Could it be, that the tutorial spared out an essential part in utilizing mysql pools and node.js?
Thanks in advance for at least trying to answer that question.
Are there better methods sequelize(?) available to create performant connections to a MySQL database?
It looks like creating the pool object does not actually connect to the database. A big clue is that the createPool function is not asynchronous, which is what you would expect if it was actually connecting at that moment.
You have to make use of the returned pool object to perform a query, which IS asynchronous.
From the documentation:
var mysql = require('mysql');
var pool = mysql.createPool({
connectionLimit : 10,
host : 'example.org',
user : 'bob',
password : 'secret',
database : 'my_db'
});
pool.query('SELECT 1 + 1 AS solution', function(err, rows, fields) {
if (err) throw err;
console.log('The solution is: ', rows[0].solution);
});