node: managing SQL files with variable replacement - javascript

I have large SQL statements that I would like to store as separate files (with syntax highlighting etc). I like the accepted solution proposed here.
In my use case, I write SQL in Javascript Template Literal syntax for variable replacement. So my file looks like
-- some_file.sql
select col1, col2, col3
from my_table
where col1 = '${val1}'
and col2 between ${val2} and ${val3}
In fact this way of writing queries started from using template literals initially before the queries grew and demanded their own file.
The question is how to achieve template literals like evaluation for a query string read using fs.readFileSync without having to do the dreaded eval? I looked into es6-template-render, however that implementation is not suited for variables in the execution context; i.e. not specifying a separate context parameter, but implicitly using the variables (global/local) available in the environment during runtime.
Any pointers?

Apologies if my assumption is incorrect, but the quotes around '${val1}' suggest you're planning to use string substitution rather than parameterized queries. Don't do that. :-) Never use string substitution to put values into SQL queries. Let me introduce you to my friend Bobby:
Use parameterized queries instead.
For instance, you might use a format very much like you have, just without any quotes around ${val1}:
select col1, col2, col3
from my_table
where col1 = ${val1}
and col2 between ${val2} and ${val3}
Then your code could convert that into a query appropriate to your DB API. Many of them use ? placeholders, so for instance (here I'm using node-mysql2 as the DB API, but the specific API isn't the point):
const rexParam = /(?<!\$)\$\{([^}]+)\}/g;
function doQuery(sql, params) {
return new Promise((resolve, reject) => {
const values = [];
const preppedSql = sql.replace(rexParam, (_, paramName) => {
const value = params[paramName];
if (value === undefined) { // Or do an `in` check if you want to allow `undefined`
throw new Error(`Missing parameter ${paramName}`);
}
values.push(value);
return "?";
});
return connection.execute(
preppedSql,
values,
function(err, results, fields) {
if (err) {
reject(err);
} else {
resolve({results, fields});
}
}
);
});
}
That spins through the string, replacing the ${val1} and such tokens with ? and at the same time filling in an array of values to pass to the parameterized query function.
(Note the negative lookbehind so that $${...} isn't expanded, just like in template literals. The regex is a bit primitive, but should suffice for SQL I'd think...)
Live example just dumping out the string and values:
const sql =
"select col1, col2, col3\n" +
"from my_table\n" +
"where col1 = ${val1}\n" +
"and col2 between ${val2} and ${val3}";
const rexParam = /(?<!\$)\$\{([^}]+)\}/g;
function doQuery(sql, params) {
const values = [];
const preppedSql = sql.replace(rexParam, (_, paramName) => {
const value = params[paramName];
if (value === undefined) { // Or do an `in` check if you want to allow `undefined`
throw new Error(`Missing parameter '${paramName}'`);
}
values.push(value);
return "?";
});
console.log(preppedSql);
console.log(values);
}
doQuery(sql, {val1: "one", val2: 2, val3: 20});

Related

Getting JSON data from snowflake stored procedure parameter and inserting it in target table

I have a requirement to receive JSON data in a Stored Proc parameter and insert the same in the snowflake target table (user_json_feedback). JSON Data has three key elements(User, EntityID, and Entity Type), whereas the target table has five columns (User, ID, Entity Type, Region, and Date). The region will have a default value of "NA," and the date will be the current date.
If the inserts are successful, it returns true; otherwise, it returns false.
I am struggling with the syntax and parsing issues here, as I am very new to writing procedures.
Here is what I have been trying to do, which is giving me errors obviously but serves the algorithm of my intent.
CREATE OR REPLACE SP_UPDATE_JSON_DATA (JSON_DATA VARIANT)
RETURNS BOOLEAN
LANGUAGE JAVASCRIPT
EXECUTE AS OWNER
AS
$$
//Declare variables
var REGION = 'NA'
var V_DATE = `select current_date;`;
var DATE_STMT= snowflake.createStatement({sqlText: V_DATE });
var curr_date = DATE_STMT.execute();
var src_json = JSON.parse(JSON_DATA);
var sql_command =
`INSERT INTO user_json_feedback (user,id,etype,region ,date)//
select src_json:USER,src_json:ENTITY_ID,src_json:ENTITY_TYPE,REGION,curr_date;`;
try {
snowflake.execute (
{sqlText: sql_command}
);
return "Succeeded."; // Return a success/error indicator.
}
catch (err) {
return "Failed: " + err; // Return a success/error indicator.
}
$$;
The function call with parameters will be like
call SP_UPDATE_JSON_DATA ('[{"USER":"XYZ","ENTITY_ID":"BMT0001","ENTITY_TYPE":"BMT"},{"USER":"ABC","ENTITY_ID":"BMT0002","ENTITY_TYPE":"BMT"}]');
Thanks in advance for the help!
theres a few things here.
Firstly the step to get current date. curr_date is a result set object. to extract the value and use it later, you need to read the first row with .next() then GetColumnValue to read the column content. to pass it later as a well formatted string you'll wanna convert with .toISOString().
Secondly the parsed json returns an array in this case so you'll need to iterate over the array to insert the individual records. As it's not known ahead of time if the variant will contain an array you're best checking if the parsed json is an array and handle it accordingly
Last tweak was altering the return type so you get the verbose feedback you're expecting from your return calls.
Updated code:
CREATE OR REPLACE TEMPORARY TABLE user_json_feedback
(
user VARCHAR(100)
,id VARCHAR(100)
,etype VARCHAR(100)
,region VARCHAR(100)
,date TIMESTAMP_NTZ
);
CREATE OR REPLACE TEMPORARY PROCEDURE SP_UPDATE_JSON_DATA(JSON_DATA VARIANT)
RETURNS STRING
LANGUAGE JAVASCRIPT
EXECUTE AS OWNER
AS
$$
//Declare variables
var REGION = 'NA'
var V_DATE = `select current_date;`;
var DATE_STMT= snowflake.createStatement({sqlText: V_DATE });
var DATE_STMT_RES = DATE_STMT.execute();
DATE_STMT_RES.next()
var curr_date = DATE_STMT_RES.getColumnValue(1).toISOString();
var src_json = JSON.parse(JSON_DATA);
try {
if (Array.isArray(src_json)){
for (key in src_json){
var sql_command =
`INSERT INTO user_json_feedback (user,id,etype,region,date)//
VALUES(:1,:2,:3,:4,:5)`;
snowflake.execute (
{
sqlText: sql_command,
binds: [src_json[key].USER,src_json[key].ENTITY_ID,src_json[key].ENTITY_TYPE,REGION,curr_date]
}
);
}
}
else {
var sql_command =
`INSERT INTO user_json_feedback (user,id,etype,region,date)//
VALUES(:1,:2,:3,:4,:5)`;
snowflake.execute (
{
sqlText: sql_command,
binds: [src_json.USER,src_json.ENTITY_ID,src_json.ENTITY_TYPE,REGION,curr_date]
}
);
}
return "Succeeded."; // Return a success/error indicator.
}
catch (err) {
return "Failed: " + err; // Return a success/error indicator.
}
$$;
--Need to cast variable string as variant.
--ARRAY example
call SP_UPDATE_JSON_DATA ('[{"USER":"XYZ","ENTITY_ID":"BMT0001","ENTITY_TYPE":"BMT"},{"USER":"ABC","ENTITY_ID":"BMT0002","ENTITY_TYPE":"BMT"}]'::VARIANT);
--Single object example
call SP_UPDATE_JSON_DATA ('{"USER":"JST","ENTITY_ID":"BMT0003","ENTITY_TYPE":"BMT"}'::VARIANT);
SELECT *
FROM user_json_feedback;
Result set:
While all this works, you may well be better served just inserting the whole variant into a table and relying on snowflake's significant semi-structured data querying capabilities. Certainly for large payloads you'll find much better performance from bulk loading to a variant column in a table then parsing in a view.

How to return a MAP like structure from bigquery javascript UDF so that I can generate a key-value column dynamically at runtime?

I'm trying to return a MAP like structure from Javascript UDF in bigquery. So that I can convert that structure directly into relational columns without knowing the column named prior.
In below approach, I'm trying to stringify JSON and then using json_extract_scaler function to create columns.
CREATE TEMP FUNCTION main(json_str STRING)
RETURNS STRING
LANGUAGE js AS
r"""
var row = JSON.parse(json_str);
return JSON.stringify(row);
""";
with temp_table as (
select "ram" name, "ram#gmail.com" email
),
Rule_result as (SELECT main(TO_JSON_STRING(STRUCT(t.name, t.email))) result FROM temp_table as t)
SELECT json_extract_scalar(result, '$.name') name,
json_extract_scalar(result, '$.email') email
FROM Rule_result as r;
In this approach, I'm returning the struct, knowing the column names beforehand.
CREATE TEMP FUNCTION main(json_str STRING)
RETURNS STRUCT<name STRING, email STRING>
LANGUAGE js AS
r"""
var row = JSON.parse(json_str);
return row;
""";
with temp_table as (
select "ram" name, "ram#gmail.com" email
),
Rule_result as (SELECT main(TO_JSON_STRING(STRUCT(t.name, t.email))) result FROM temp_table as t)
SELECT r.result.* FROM Rule_result as r;
Both the approaches work fine. But it doesn't solve the problem. Because I need to be aware of the column names.
Bigquery supports struct return type but that doesn't fit my usecase. Since I'm not aware about the column names beforehand.
How can I dynamically create columns from the data return by the javascript UDF without knowing the column names?
data is flat JSON object
{
"name":"ram",
"email":"ram#gmail.com"
}
I somehow need to convert this JSON object into table columns like
name
email
ram
ram#gmail.com
Consider below
create temp function extract_keys(input string) returns array<string> language js as """
return Object.keys(JSON.parse(input));
""";
create temp function extract_values(input string) returns array<string> language js as """
return Object.values(JSON.parse(input));
""";
create temp table tmp as
select id, key, value
from your_table,
unnest(extract_keys(json)) key with offset
join unnest(extract_values(json)) value with offset
using(offset);
execute immediate (select
'''select * from tmp
pivot (any_value(value) for key in (''' || string_agg(distinct "'" || key || "'") || '''))
'''
from tmp
);
if applied to sample data like in your question's example
output is

delete user from json table in js

So I'm a beginner to js and I have a table of users in a json file and I'm making an account delete feature. I have a find set up to find the user and it works fine but I can't figure out how to make it delete the user from the file, any help would be appreciated!
Json:
{
"users": [
{
"name": "ImBattleDash",
"Id": "780748c5d4504446bbba3114ce48f6e9",
"discordId": "471621420162744342",
"dateAdded": 1548295371
}
]
}
JS:
function findJson() {
fs.readFile('./linkedusers.json', 'utf-8', function (err, data) {
if (err) message.channel.send('Invalid Code.')
var arrayOfObjects = JSON.parse(data)
let findEntry = arrayOfObjects.users.find(entry => entry.discordId == myCode)
let linkEmbed = new Discord.RichEmbed()
.setTitle('Account unlinked!')
.setDescription('Link your account by friending "BattleDash Bot" on Fortnite and then input the code you get messaged by typing "!link <code>"!')
.setColor('#a900ff');
message.channel.send({embed: linkEmbed});
})
}
EDIT: Not sure if it's an array or a table I don't know a lot about json
You need to use:
Array#find to find a given user by some given criteria.
Array#indexOf to get the index of the found user in users
Array#splice to drop one element starting from the index given by Array#indexOf:
const input = {
"users": [
{
"name": "ImBattleDash",
"Id": "780748c5d4504446bbba3114ce48f6e9",
"discordId": "471621420162744342",
"dateAdded": 1548295371
}
]
}
const removeUser = (criteria, users) =>
users.splice (users.indexOf (users.find (criteria)), 1)
removeUser (
({ Id, discordId }) =>
Id == '780748c5d4504446bbba3114ce48f6e9'
&& discordId == '471621420162744342',
input.users
)
// Output: 0 <-- User has been removed!
console.log(input.users.length)
About persisting the change, it's just about calling JSON.stringify (input) and then just write the contents to the desired output file. See this other Q&A: Writing files in Node.js
With great help from Cat and Matias I came up with this code that works!
function findJson() {
fs.readFile('./linkedusers.json', 'utf-8', function (err, data) {
if (err) message.channel.send('Invalid Code.')
var arrayOfObjects = JSON.parse(data)
let findEntry = arrayOfObjects.users.find(entry => entry.discordId == myCode)
const input = arrayOfObjects;
const removeUser = (criteria, users) =>
users.splice (users.indexOf (users.find (criteria)), 1)
removeUser (
({ Id, discordId }) =>
Id == findEntry.Id
&& discordId == findEntry.discordId,
input.users
)
console.log('unlinked')
fs.writeFile('./linkedusers.json', JSON.stringify(arrayOfObjects, null, 4), 'utf-8', function(err) {
if (err) throw err
console.log('Done!')
})
let linkEmbed = new Discord.RichEmbed()
.setTitle('Account unlinked!')
.setDescription('Link your account by friending "BattleDash Bot" on Fortnite and then input the code you get messaged by typing "!link <code>"!')
.setColor('#a900ff');
message.channel.send({embed: linkEmbed});
})
}
Here's a quick tutorial for you:
"Users" would be either an array (using []) or a javascript object (using {}), your choice. There won't be any actual tables unless you use a database instead of a JSON file (although if your JSON expression is as simple as your example, you could almost think of it as a table.) -- And actually, a third option would be to use the javascript Map type, which is like a beefed-up object, but I won't address that here.
While using an array would make it a bit easier to retrieve a list of data for all users (because arrays are simpler to iterate through), using an object would make it considerably easier to retrieve data for a single user (since you can directly specify the user you want by its key instead of needing to loop through the whole array until you find the one you want.) I'll show you an example that uses an object.
The individual user in your sample code is an example of a javascript object. JSON lets you convert an object to a string (for storage, I/O, and human readability) and back to an object (so javascript can understand it). You use the JSON.stringify() and JSON.parse() methods, respectively for these conversions. The string has to be JSON-formatted or this won't work, and your example is almost in JSON format.
To comply with JSON formatting, you could structure a Users object as follows. (Of course we're looking at the stringified version because mere humans can't easily read an "actual" javascript object):
"Users": { // Each individual user is a property of your users object
"780748c5d4504446bbba3114ce48f6e9": // The Id is the key in the "key/value pair"
{ // The individual user object itself is the value in the key/value pair
// Id is duplicated inside user for convenience (not necessarily the best way to do it)
"id": "780748c5d4504446bbba3114ce48f6e9",
"name": "ImBattleDash", // Each property of the user is also a key/value pair
"discordId": "471621420162744342", //Commas separate the properties of an object
"dateAdded": "1548295371" // All property values need double quotes for JSON compatibility
}, // Commas separate the properties (ie the individual users) of the users object
"446bbba3114ce48f6e9780748c5d4504": // This string is the second user's key
{ // This object is the second user's value
"id": "446bbba3114ce48f6e9780748c5d4504",
"name": "Wigwam",
"discordId": "162744342471621420",
"dateAdded": "1548295999"
}
}
Once you retrieve the string from storage, you convert it to an object and delete a user as follows. (This is broken down into more steps than necessary for clarity.):
let usersObject = JSON.parse(stringRetrievedFromFile);
let userId = "780748c5d4504446bbba3114ce48f6e9";
let userToModifyOrDelete = usersObject[userId];
delete userToModifyOrDelete;
To change the user's discordId instead, you would do:
let discordId = userToModifyOrDelete.discordId; // Not necessary, just shows how to retrieve value
let newDiscordId = "whateverId";
userToModifyOrDelete.discordId = newDiscordId;
And you'd convert the object back into a string to store in your file with:
JSON.stringify(usersObject);
Hopefully that's almost all you need to know about JSON!

node-mssql Transaction insert - Returning the inserted id..?

I'm using node-mssql 3.2.0 and I need to INSERT INTO a table and return the id of the inserted record.
I can successfully use sql.Transaction() to insert data, but the only parameters given to callbacks (request.query() and transaction.commit()) are:
const request = new sql.Request();
request.query('...', (err, recordset, affected) => {});
const transaction = new sql.Transaction();
transaction.commit((err) => {});
So recordset is undefined for INSERT, UPDATE and DELETE statements, and affected is the number of rows affected, in my case 1.
Does anyone know a good way to obtain an inserted records id (just a primary key id) after a transaction.commit() using node-mssql..?
Instead of just doing an INSERT INTO... statement, you can add a SELECT... statement as well:
INSERT INTO table (...) VALUES (...); SELECT SCOPE_IDENTITY() AS id;
The SCOPE_IDENTITY() function returns the inserted identity column, which means recordset now contains the id:
const request = new sql.Request();
request.query('...', (err, recordset, affected) => {});
I don't think request.multiple = true; is required, because although this includes multiple statements, only one of them is a SELECT... and so returns.
So the answer was SQL related and is not specific to node-mssql.
I know this question has accepted answer.
I made the following way:
let pool = await sql.connect(config);
let insertItem = await pool.request()
.input('ItemId',sql.NVarChar, 'itemId1234')
.input('ItemDesc',sql.NVarChar, 'nice item')
.query("insert into itemTable (Id, ItemId,ItemDesc) OUTPUT INSERTED.ID
values (NEWID(), #ItemId, #ItemDesc);
var insertedItemId = insertItem.recordset[0].ID
This adds unique identifier to data that is saved to db (if table is created so)
create table itemTable(
Id UNIQUEIDENTIFIER primary key default NEWID(),
ItemId nvarchar(25),
ItemDesc nvarchar(25)
)

Convert XQuery Search API bucket generation to JSearch in MarkLogic

I wanted to "convert" one of my old XQuery examples that uses buckets (bucketed search) to JSearch:
import module namespace search =
"http://marklogic.com/appservices/search"
at "/MarkLogic/appservices/search/search.xqy";
declare variable $options := <options xmlns="http://marklogic.com/appservices/search">
<constraint name="height">
<range type="xs:double" facet="true">
<bucket ge="1.9" name="supertall">1.90m + </bucket>
<bucket lt="1.9" ge="1.7" name="tall">1.70m - 1.90m</bucket>
<bucket lt="1.7" ge="1.2" name="normalish">1.20m - 1.70m</bucket>
<bucket lt="1.2" name="short">0m - 1.20m</bucket>
<facet-option>limit=20</facet-option>
<json-property>height</json-property>
</range>
</constraint>
</options>;
let $results := search:search("height:short", $options)
for $facet in $results/search:facet
return $results;
The above allows the definition of buckets as well as allows the usage of 'height' as part of the search grammar, meaning that a search such as search:search('height:short') works just as fine.
Unfortunately I couldn't get the JSearch version working, this is what I have tried:
var jsearch = require('/MarkLogic/jsearch');
jsearch.documents(
jsearch.facet('Height', 'height').groupInto([
jsearch.bucketName('short'), 1.60,
jsearch.bucketName('normal'), 1.90,
jsearch.bucketName('tall'), 4.00
]))
.where(cts.parse('height:short'))
.result();
The above code returns:
{
"results": null,
"estimate": 0
}
I have also tried to add a reference to the JSON property 'height' but that didn't work either:
var jsearch = require('/MarkLogic/jsearch');
var reference = { height: cts.jsonPropertyReference('height') };
jsearch.documents(
jsearch.facet('Height', 'height').groupInto([
jsearch.bucketName('short'), 1.60,
jsearch.bucketName('normal'), 1.90,
jsearch.bucketName('tall'), 4.00
]))
.where(cts.parse('height:short', reference))
.result();
However when I remove the .where() constraint I get my buckets generated just fine. Any suggestions?
I believe I have found a solution to this. The values of height are numbers and in my JSearch query I am trying to match the string 'short' against those numbers. In order to overcome this I had to use a callback function which was documented on this page http://docs.marklogic.com/cts.parse
Essentially the solution was to create my own query using a set of cts query constructors (cts.andQuery and cts.jsonPropertyRangeQuery). The solution now looks like this:
var jsearch = require('/MarkLogic/jsearch');
var short = 1.60;
var normal = 1.80;
var tall = 1.90;
var refCallback = function(operator, values, options) {
if (values === 'short') {
return cts.jsonPropertyRangeQuery('height', '<=', short)
} else if (values === 'normal') {
return cts.andQuery([
cts.jsonPropertyRangeQuery('height', '>=', normal),
cts.jsonPropertyRangeQuery('height', '<', tall)
])
} else {
return cts.jsonPropertyRangeQuery('height', '>=', tall)
}
};
var reference = { height: refCallback };
jsearch.documents(
jsearch.facet('height').groupInto([
jsearch.bucketName('short'), short,
jsearch.bucketName('normal'), normal,
jsearch.bucketName('tall'), tall
]))
.where(cts.parse('height:tall', reference))
.result();
Note that I also had to externalise the variable declarations as now I can reuse those within the bucket definitions as well as the callback function.

Categories

Resources