I am trying to check if the oAuth token currently associated to the user's account is still valid before running a refresh of the oAuth to reduce the number of oAuth refresh calls sending into the system. The only issue is, with my current setup, the process isn't running before the code that follows it which requires a valid oAuth to properly execute causing uncaught errors. How can I in the Node.js server have it so this process must complete before the async function continues and have it properly check/refresh the token?
Tech used:
Google Firebase
Node.js
Express
app.post('[redacted]', async (req, res) => {
if (req.body.token != '[redacted]') {
res.status(500).end()
return
}
let globalConfig;
let oAuthToken;
const cityRef = db.collection('orgsAndConfigs').doc(`${req.body.orgId}`);
const doc = await cityRef.get();
if (!doc.exists) {
console.log('No such document!');
} else {
globalConfig = doc.data();
}
async function checkOAuthValid(){
try{
if(!globalConfig.oAuthToken || globalConfig.oAuthToken_expires.toDate() < Date() || !globalConfig.refresh_token){
return true
} else {
return false
}
} catch (e) {
return true
}
}
async function checkoAuth() {
const runoAuth = await checkOAuthValid()
if (runoAuth) {
try {
const params = new url.URLSearchParams({ client_id: '[redacted]', client_secret: '[redacted]', refresh_token: globalConfig.refresh_token, grant_type: 'refresh_token' });
axios.post('https://driftapi.com/oauth2/token', params.toString())
.then(async function (response) {
oAuthToken = response.data.access_token;
const orgDoc = db.collection('orgsAndConfigs').doc(`${req.body.orgId}`);
var oAuthExpires = new Date();
oAuthExpires.setSeconds(oAuthExpires.getSeconds() + 7200);
const resolution = await orgDoc.update({ refresh_token: response.data.refresh_token, oAuthToken: oAuthToken, oAuthToken_expires: Timestamp(oAuthExpires) });
})
} catch (e) {
const orgDoc = db.collection('orgsAndConfigs').doc(`${req.body.orgId}`);
const resolution = await orgDoc.update({ refresh_token: null });
}
} else {
oAuthToken = globalConfig.oAuthToken;
}
}
const tokenAquired = await checkoAuth();
[remainder of the code]
Related
I am new to GTM+GA.I am trying to display google Analytics(GA4) reports on my webpage. I created Oauth Client Id in google cloud console and also done other settings in Google cloud console. Through javascript code i am trying to get access token from google Api and I am getting below exception.
After successful authentication I will integrate GA repots with my web page. Below is my javascript code for getting access token.
function main(propertyId = 'YOUR-GA4-PROPERTY-ID') {
propertyId = '347415282';
const {
OAuth2Client
} = require('google-auth-library');
const {
grpc
} = require('google-gax');
const http = require('http');
const url = require('url');
const open = require('open');
const destroyer = require('server-destroy');
const keys = require('./oauth2.keys.json');
const SCOPES = ['https://www.googleapis.com/auth/analytics.readonly'];
function getAnalyticsDataClient(authClient) {
const sslCreds = grpc.credentials.createSsl();
const credentials = grpc.credentials.combineChannelCredentials(
sslCreds,
grpc.credentials.createFromGoogleCredential(authClient));
return new BetaAnalyticsDataClient({
sslCreds: credentials,
});
}
function getOAuth2Client() {
return new Promise((resolve, reject) => {
const oAuth2Client = new OAuth2Client(
keys.web.client_id,
keys.web.client_secret,
'http://localhost:3000/oauth2callback');
const authorizeUrl = oAuth2Client.generateAuthUrl({
access_type: 'offline',
scope: SCOPES.join(' '),
});
const server = http
.createServer(async(req, res) => {
try {
if (req.url.indexOf('/oauth2callback') > -1) {
const qs = new url.URL(req.url, 'http://localhost:3000')
.searchParams;
const code = qs.get('code');
console.log(`Code is ${code}`);
res.end(
'Authentication successful! Please return to the console.');
server.destroy();
const r = await oAuth2Client.getToken(code);
oAuth2Client.setCredentials(r.tokens);
console.info('Tokens acquired.');
resolve(oAuth2Client);
}
} catch (e) {
reject(e);
}
})
.listen(3000, () => {
console.info(`Opening the browser with URL: ${authorizeUrl}`);
open(authorizeUrl, {
wait: false
}).then(cp => cp.unref());
});
destroyer(server);
});
}
async function runReport() {
const oAuth2Client = await getOAuth2Client();
const analyticsDataClient = getAnalyticsDataClient(oAuth2Client);
const[response] = await analyticsDataClient.runReport({
property: `properties/${propertyId}`,
dateRanges: [{
startDate: '2020-03-31',
endDate: 'today',
},
],
dimensions: [{
name: 'city',
},
],
metrics: [{
name: 'activeUsers',
},
],
});
console.log('Report result:');
response.rows.forEach(row => {
console.log(row.dimensionValues[0], row.metricValues[0]);
});
}
runReport();
process.on('unhandledRejection', err => {
console.error(err.message);
process.exitCode = 1;
});
main(...process.argv.slice(2));
Please let me know how to get rid off this issue.
Regards,
Prabhash
I want to populate my database with some random data. I have used Faker.js for generating that data. I'm using MongoDB on my localhost and all the data is properly following all the validation rules from the schema. I'm having problem with the closing connection of my connection after insertion of data. I want to close the connection soon after the data is populated. I'm using async function to be aware of all the things but something is not going right.
Here is my code seeds.js which is the script im using to populate database
const path = require("path");
require("dotenv").config({ path: path.resolve(__dirname, "../.env") });
var mongoose = require("mongoose");
mongoose.connect(process.env.MONGODB_URI);
require("../models/User");
require("../models/Item");
require("../models/Comment");
var Item = mongoose.model("Item");
var Comment = mongoose.model("Comment");
var User = mongoose.model("User");
const ItemData = require("../data/item.json");
const CommentData = require("../data/comment.json");
const UserData = require("../data/user.json");
async function InsertData() {
ItemData.forEach(async (item) => {
item.seller = item.seller.$oid;
const oldItem = await Item.find({ title: item.title });
if (!oldItem.length) {
var newItem = new Item(item);
await newItem.save();
} else {
console.log(item.slug);
}
});
UserData.forEach(async (user) => {
const oldUser = await User.find({ username: user.username });
if (!oldUser.length) {
var user = new User(user);
await user.save();
} else {
console.log(user.username);
}
});
CommentData.forEach(async (comment) => {
comment.item = comment.item.$oid;
comment.seller = comment.seller.$oid;
var newComment = new Comment(comment);
const oldComment = await Comment.find({ _id: newComment.id });
if (!oldComment.length) {
await newComment.save();
} else {
console.log(comment.body);
}
});
}
async function cleanup() {
await Item.deleteMany({}, () => console.log("Data Cleared Item"));
await Comment.deleteMany({}, () => console.log("Data Cleared Comment"));
await User.deleteMany({}, () => console.log("Data Cleared User"));
}
async function main() {
InsertData().then(async () => {
console.debug('Data Inserted. Closing connection.');
await mongoose.connection.close();
});
}
main();
Here is the stack trace of the error
/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/core/connection/pool.js:841
cb(new MongoError('pool destroyed'));
^
MongoError: pool destroyed
at Pool.write (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/core/connection/pool.js:841:8)
at _command (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/core/wireprotocol/command.js:120:10)
at command (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/core/wireprotocol/command.js:28:5)
at Object.query (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/core/wireprotocol/query.js:66:3)
at Server.query (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/core/topologies/server.js:644:16)
at FindOperation.execute (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/operations/find.js:38:12)
at /Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/operations/execute_operation.js:144:17
at Server.selectServer (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/core/topologies/server.js:832:3)
at Server.selectServer (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/topologies/topology_base.js:342:32)
at executeWithServerSelection (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/operations/execute_operation.js:131:12)
at /Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/operations/execute_operation.js:70:9
at maybePromise (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/utils.js:685:3)
at executeOperation (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/operations/execute_operation.js:34:10)
at Cursor._initializeCursor (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/core/cursor.js:534:7)
at Cursor._initializeCursor (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/cursor.js:186:11)
at nextFunction (/Users/karnikkanojia/Desktop/Anythink-Market-21cto/backend/node_modules/mongodb/lib/core/cursor.js:737:10)
error Command failed with exit code 1.
I am writing a lambda function to add hosts to a SQS queue for a rolling restart. The code I have written works individually, but not together. Even when I hard code values in the constructor. This doesn't appear to be a memory/CPU. I tried running the function with 1GB of memory, even though it only uses about 80MB. The average execution time for the individual functions is about 0.5 seconds (shouldn't take more than about 1.5 seconds to execute in total). I did trying running this function with a 30 second timeout, but it still timed out.
I work behind a corporate proxy, and have to hand jam the code. I don't have an IDE or intellisense on my internet facing network. There may be typos here, but not in the actual code. I have omitted my module imports and variable declarations to save time. It isn't relevant to the issue at hand.
EDIT: I added the module imports and variable declarations to the first example to hopefully alleviate some confusion.
Here are just a few things I have tried. This does not work (timing out):
// Custom lambda layer
const { marklogic, aws } = require('nodejs-layer-lib');
const { HOSTS, DOMAIN, PORT, USERNAME, PASSWORD, RESTART_QUEUE_NAME } = process.env;
const params = [
'format=json'
];
const options = {
port: PORT,
params: params,
httpOptions: {
headers: {
'Authorization': `Basic ${Buffer.from(`${USERNAME}:${PASSWORD}`).toString('base64')}`
},
method: 'GET'
}
};
const taskServers = (HOSTS.split(',') || []).map(host => {
const _host = host.split(':');
return {
id: _host[0],
name: `http://${_host[1].toLowerCase()}.${DOMAIN}`
};
});
exports.handler = async () => {
let hosts, queueUrl, addToQueueResults;
try {
hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
} catch (e) { console.error('hosts', e); }
try {
queueUrl = await aws.sqs.getQueueUrlByName(RESTART_QUEUE_NAME);
} catch (e) { console.error('queueUrl ', e); }
try {
addToQueueResults = await aws.sqs.addMessages(queueURL, hosts);
} catch (e) { console.error('addToQueueResults ', e); }
return {
status: 200,
body: addToQueueResults
};
}
This does not work (timing out):
// Modules imports and variable declarations here...
exports.handler = async () => {
const hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
const queueUrl = await aws.sqs.getQueueUrlByName(RESTART_QUEUE_NAME);
const addToQueueResults = await aws.sqs.addMessages(queueURL, hosts);
return {
status: 200,
body: addToQueueResults
};
}
This does not work (timing out):
// Modules imports and variable declarations here...
exports.handler = async () => {
const hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
const queueUrl = await aws.sqs.getQueueUrlByName('my-queue-name');
const addToQueueResults = await aws.sqs.addMessages('http://queueurl.com', ['anything', 'in', 'here']); // Doesn't even need the queueUrl or hosts anymore
return {
status: 200,
body: addToQueueResults
};
}
This works. It will return the host objects I am expecting in the response:
// Modules imports and variable declarations here...
exports.handler = async () => {
const hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
return {
status: 200,
body: hosts
};
}
This works. It will get the queue url, then add messages to my SQS queue and return the SQS response:
// Modules imports and variable declarations here...
exports.handler = async () => {
const queueUrl = await aws.sqs.getQueueUrlByName(RESTART_QUEUE_NAME);
const addToQueueResults = await aws.sqs.addMessages(queueUrl , ['anything', 'in', 'here']);
return {
status: 200,
body: addToQueueResults
};
}
I tried implementing the Async handler in AWS Lambda function handler in Node.js and reviewed many AWS Lambda execution troubleshooting documents. The marklogic management API runs on port 8002 by default and I think the aws-sdk module uses http/https (80/443), so I don't think the ports are getting tied up.
What am I missing here?
EDIT 2: This has something to do with how promises are handled with AWS Lambda. I cannot find much information about this. Even following the instructions in AWS Lambda function handler in Node.js for "Async Handlers" I cannot get this to work. It works perfectly fine locally with or without my custom lambda layer.
Node.js runtime: 12.x (I didn't mention this before)
This also doesn't work (timing out):
// Modules imports and variable declarations here...
exports.handler = async function (event) {
const promise = function () {
return new Promise(async function (resolve, reject) {
try {
const hosts = await marklogic.hosts.getHosts(taskServers, options) || [];
const queueUrl = await aws.sqs.getQueueUrlByName(RESTART_QUEUE_NAME);
const addToQueueResults = await aws.sqs.addMessages(queueUrl, hosts);
resolve({
status: 200,
body: addToQueueResults
});
} catch (error) {
reject({
status: 500,
error: error
});
}
});
};
return promise(); // Throws error without constructor despite the AWS doc example
}
Unless someone AWS Lambda genius has ran into a similar issue before with Node.js, I am just going to convert it into 2 lambda functions and use Step Functions to process them.
There was a typo in queueUrl (I imagine not that, but worth a try!)
Please run:
// Custom lambda layer
const { marklogic, aws } = require('nodejs-layer-lib');
const { HOSTS, DOMAIN, PORT, USERNAME, PASSWORD, RESTART_QUEUE_NAME } = process.env;
const params = [
'format=json'
];
const options = {
port: PORT,
params,
httpOptions: {
headers: {
Authorization: `Basic ${Buffer.from(`${USERNAME}:${PASSWORD}`).toString('base64')}`
},
method: 'GET'
}
};
const taskServers = (HOSTS.split(',') || []).map(host => {
const _host = host.split(':');
return {
id: _host[0],
name: `http://${_host[1].toLowerCase()}.${DOMAIN}`
};
});
exports.handler = async () => {
let hosts, queueUrl, addToQueueResults;
try {
hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
} catch (e) { console.error('hosts', e); }
try {
queueUrl = await aws.sqs.getQueueUrlByName(RESTART_QUEUE_NAME);
} catch (e) { console.error('queueUrl ', e); }
try {
addToQueueResults = await aws.sqs.addMessages(queueUrl, hosts);
} catch (e) { console.error('addToQueueResults ', e); }
return {
status: 200,
body: JSON.stringify(addToQueueResults)
};
};
// keeping the same format.. ^^
If no luck - what is on my mind, is as aws-sdk is included out of the box in lambda.. it's not customary to require it extraneously via a layer and although it may not look to be imported at top level by marklogic, it may be bundled deep within marklogic, then when you import AWS and change config (in the layer) it overwrites it
Let's find out..:
Step 1:
So, this you say should work.. if we ignore the AWS import, and just import marklogic?
// Custom lambda layer
// const { marklogic, aws } = require('nodejs-layer-lib'); // ignoring AWS for now
const { marklogic } = require('nodejs-layer-lib');
const { HOSTS, DOMAIN, PORT, USERNAME, PASSWORD, RESTART_QUEUE_NAME } = process.env;
const params = [
'format=json'
];
const options = {
port: PORT,
params,
httpOptions: {
headers: {
Authorization: `Basic ${Buffer.from(`${USERNAME}:${PASSWORD}`).toString('base64')}`
},
method: 'GET'
}
};
const taskServers = (HOSTS.split(',') || []).map(host => {
const _host = host.split(':');
return {
id: _host[0],
name: `http://${_host[1].toLowerCase()}.${DOMAIN}`
};
});
exports.handler = async () => {
// let hosts, queueUrl, addToQueueResults;
let hosts;
try {
hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
console.log('hosts => ', hosts);
// queueUrl = await aws.sqs.getQueueUrlByName(RESTART_QUEUE_NAME);
// addToQueueResults = await aws.sqs.addMessages(queueUrl, hosts);
return {
status: 200,
body: JSON.stringify(hosts)
};
} catch (error) {
console.log('error => ', error);
throw error;
}
};
Ok, so if that works..:
Step 2 (Please set the region for SQS and also hard code in the queueUrl):
// Custom lambda layer
// const { marklogic, aws } = require('nodejs-layer-lib');
const { marklogic } = require('nodejs-layer-lib');
const AWS = require('aws-sdk');
AWS.config.update({ region: 'eu-west-1' }); // Please set region accordingly
const sqs = new AWS.SQS({ apiVersion: '2012-11-05' });
const { HOSTS, DOMAIN, PORT, USERNAME, PASSWORD, RESTART_QUEUE_NAME } = process.env;
const params = [
'format=json'
];
const options = {
port: PORT,
params,
httpOptions: {
headers: {
Authorization: `Basic ${Buffer.from(`${USERNAME}:${PASSWORD}`).toString('base64')}`
},
method: 'GET'
}
};
const taskServers = (HOSTS.split(',') || []).map(host => {
const _host = host.split(':');
return {
id: _host[0],
name: `http://${_host[1].toLowerCase()}.${DOMAIN}`
};
});
exports.handler = async () => {
let hosts, addToQueueResults;
try {
hosts = (await marklogic.hosts.getHosts(taskServers, options) || []);
console.log('hosts => ', hosts);
const queueUrl = 'Please hard code the queueUrl for now';
const sqsParams = {
MessageBody: hosts,
QueueUrl: queueUrl
};
addToQueueResults = await sqs.sendMessage(sqsParams).promise();
console.log('addToQueueResults => ', addToQueueResults);
return {
status: 200,
body: JSON.stringify(addToQueueResults)
};
} catch (error) {
console.log('error => ', error);
throw error;
}
};
IF.. that doesn't work.. then Step 3.. move the require of marklogic to below the require of AWS and setting the region in this last example.. (so any deeply nested marklogic AWS logic we're unaware of now overwrites your AWS require..) re-run it.. fingers crossed :-)
With the google oauth2 library, I can successfully authenticate a user on their first pass through, get their refresh token and first access token. Until the token expires, everything works as expected.
However, when the access token expires, I need to get a new access token and store these tokens in my data store using the existing refresh token. I am aware the documentation states tokens should re-fetch themselves when they expire, but as I am creating a new client for each call (to ensure tokens are not re-used between users), I think the client gets torn down before a token gets chance to refresh itself.
Inspecting what the library does calling the actual google api, I should be able to get new access tokens by calling the client.refreshAccessToken() method, the response from this call gives me the invalid_grant Bad Request error. I have compared the actual api request this method makes to the one on google oauth2 playground and the two calls are identical - although their call for refreshing their token works and mine does not.
Attached is my code as it now currently stands Please send help - I don't have any hair left to pull out!
const { google } = require('googleapis')
const scopes = [
'https://www.googleapis.com/auth/spreadsheets.readonly',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/drive.readonly'
]
module.exports = (env, mongo) => {
const getBaseClient = () => {
const { OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET, OAUTH_CALLBACK_URL } = env.credentials
return new google.auth.OAuth2(
OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET, OAUTH_CALLBACK_URL
)
}
const getNewAccessTokens = async (authId, refreshToken) => {
const { tokens } = await getBaseClient().getToken(refreshToken)
await mongo.setAccessTokensForAuthUser(authId, { ...tokens, refresh_token: refreshToken })
return tokens
}
const getAuthedClient = async (authId) => {
let tokens = await mongo.getAccessTokensForAuthUser(authId)
if (!tokens.access_token) {
tokens = await getNewAccessTokens(authId, tokens.refresh_token)
}
const client = getBaseClient()
client.setCredentials(tokens)
if (client.isTokenExpiring()) {
const { credentials } = await client.refreshAccessToken()
tokens = { ...credentials, refresh_token: tokens.refreshToken }
await mongo.setAccessTokensForAuthUser(authId, tokens)
client.setCredentials(tokens)
}
return client
}
const generateAuthUrl = (userId) => {
return getBaseClient().generateAuthUrl({
access_type: 'offline',
scope: scopes,
state: `userId=${userId}`
})
}
const getUserInfo = async (authId) => {
const auth = await getAuthedClient(authId)
return google.oauth2({ version: 'v2', auth }).userinfo.get({})
}
const listSheets = async (authId) => {
const auth = await getAuthedClient(authId)
let nextPageToken = null
let results = []
do {
const { data } = await google
.drive({ version: 'v3', auth })
.files.list({
q: 'mimeType = \'application/vnd.google-apps.spreadsheet\'',
includeItemsFromAllDrives: true,
supportsAllDrives: true,
corpora: 'user',
orderBy: 'name',
pageToken: nextPageToken
})
nextPageToken = data.nextPageToken
results = results.concat(data.files)
} while (nextPageToken)
return results
}
return {
generateAuthUrl,
getUserInfo,
listSheets
}
}
I solved my own problem.
I was conflating access_codes with refresh_tokens, and believed the code you receive from the auth url was the refresh_token, storing it, and attempting to reuse it to get more access_tokens. This is wrong. Don't do this.
You get the access_code from the authentication url, and the first time you use that with the client.getToken(code) method, you receive the refresh_token and access_token.
I've attached updated and working code should anyone wish to use it.
I should also mention that I added prompt: 'consent' to the auth url so that you always receive an access_code you can use to get a refresh_token when someone re-authenticates (as if you don't, then a call to client.getToken() does not return a refresh_token (part of what was confusing me in the first place).
const { google } = require('googleapis')
const scopes = [
'https://www.googleapis.com/auth/spreadsheets.readonly',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/drive.readonly'
]
module.exports = (env, mongo) => {
const getBaseClient = () => {
const { OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET, OAUTH_CALLBACK_URL } = env.credentials
return new google.auth.OAuth2(
OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET, OAUTH_CALLBACK_URL
)
}
const getAuthedClient = async (authId) => {
let tokens = await mongo.getAccessTokensForAuthUser(authId)
const client = getBaseClient()
client.setCredentials(tokens)
if (client.isTokenExpiring()) {
const { credentials } = await client.refreshAccessToken()
tokens = { ...credentials, refresh_token: tokens.refresh_token }
await mongo.setAccessTokensForAuthUser(authId, tokens)
client.setCredentials(tokens)
}
return client
}
const generateAuthUrl = (userId) => {
return getBaseClient().generateAuthUrl({
access_type: 'offline',
prompt: 'consent',
scope: scopes,
state: `userId=${userId}`
})
}
const getUserInfo = async (accessCode) => {
const auth = getBaseClient()
const { tokens } = await auth.getToken(accessCode)
auth.setCredentials(tokens)
const { data } = await google.oauth2({ version: 'v2', auth }).userinfo.get({})
return { ...data, tokens }
}
const listSheets = async (authId) => {
const auth = await getAuthedClient(authId)
let nextPageToken = null
let results = []
do {
const { data } = await google
.drive({ version: 'v3', auth })
.files.list({
q: 'mimeType = \'application/vnd.google-apps.spreadsheet\'',
includeItemsFromAllDrives: true,
supportsAllDrives: true,
corpora: 'user',
orderBy: 'name',
pageToken: nextPageToken
})
nextPageToken = data.nextPageToken
results = results.concat(data.files)
} while (nextPageToken)
return results
}
return {
generateAuthUrl,
getUserInfo,
listSheets
}
}
FIXED: USER storageEngine: "wiredTiger"
I use Mocha / Chai / Supertest and Mongodb-Memory-Server to test my app. But's I received error: Transaction numbers are only allowed on storage engines that support document-level locking
In real database and test by postman, it's working well.
My code:
In database.js
const mongoose = require('mongoose')
const { MongoMemoryReplSet } = require('mongodb-memory-server')
mongoose.set('useFindAndModify', false);
const connect = async () => {
try {
let url = process.env.MONGO_URL
let options = {
//Something
}
if (process.env.NODE_ENV === 'test') {
const replSet = new MongoMemoryReplSet();
await replSet.waitUntilRunning();
const uri = await replSet.getUri();
await mongoose.connect(uri, options)
//log connected
} else {
await mongoose.connect(url, options)
//log connected
}
} catch (error) {
//error
}
}
I have two model: Company and User. I made a function to add a member to company with used transaction. My code
const addMember = async (req, res, next) => {
const { companyId } = req.params
const { userId } = req.body
const session = await mongoose.startSession()
try {
await session.withTransaction(async () => {
const [company, user] = await Promise.all([
Company.findOneAndUpdate(
//Something
).session(session),
User.findByIdAndUpdate(
//Something
).session(session)
])
//Something if... else
return res.json({
message: `Add member successfully!`,
})
})
} catch (error) {
//error
}
}
Here's router:
router.post('/:companyId/add-member',
authentication.required,
company.addMember
)
Test file:
const expect = require('chai').expect
const request = require('supertest')
const app = require('../app')
describe('POST /company/:companyId/add-member', () => {
it('OK, add member', done => {
request(app).post(`/company/${companyIdEdited}/add-member`)
.set({ "x-access-token": signedUserTokenKey })
.send({userId: memberId})
.then(res => {
console.log(res.body)
expect(res.statusCode).to.equals(200)
done()
})
.catch((error) => done(error))
})
})
And i received error: Transaction numbers are only allowed on storage engines that support document-level locking'
How can I fix this?
Add retryWrites=false to your database uri. Example below:
mongodb://xx:xx#xyz.com:PORT,zz.com:33427/database-name?replicaSet=rs-xx&ssl=true&retryWrites=false