I could verify an ECDSA / SHA256 signature using the standard library of Javascript (window.crypto.subtle.verify) but cannot using the jsrsasign library (KJUR.crypto). I have also tried 'KJUR.crypto.ECDSA' class directly but no luck neither.
See below both script methods which dont provide same result. Could someone advise the issue(s) ?
//function to convert HEX to Decimal - return Arraybuffer
function hexStringToUint8Array(hexString) {
if (hexString.length % 2 != 0)
throw "Invalid hexString";
var arrayBuffer = new Uint8Array(hexString.length / 2);
for (var i = 0; i < hexString.length; i += 2) {
var byteValue = parseInt(hexString.substr(i, 2), 16);
if (byteValue == NaN)
throw "Invalid hexString";
arrayBuffer[i / 2] = byteValue;
}
return arrayBuffer;
}
//function to convert Base64 to hex (8 bits formats)
function base64ToHex(str) {
const raw = atob(str);
let result = '';
for (let i = 0; i < raw.length; i++) {
const hex = raw.charCodeAt(i).toString(16);
result += (hex.length === 2 ? hex : '0' + hex);
}
return result;
}
//convert Base64 URL to Base64
function base64urlToBase64(base64url) {
base64url = base64url.toString();
return base64url
.replace(/\-/g, "+")
.replace(/_/g, "/");
}
//Define values
Base64URL_coordX = '2uYQAsY-bvzz7r7SL-tK2C0eySfYEf1blv91cnd_1G4';
Base64URL_coordY = 'S3j1vy2sbkExAYXumb3w1HMVH-4ztoHclVTwQd45Reg';
signature = 'ed0c2b2e56731511ce2cea1d7320cdbc39dbabca7f525ec5d646b7c11cb35d5846a1cb70c2a1d8480f5ef88b46d401ca78b18ccae9ae4e3934a6b8fe412f7b11';
dataHex = '48656c6c6f20386777696669'; // ='Hello 8gwifi'
////////////Verifying Method using standard javascript
var dataToVerify = hexStringToUint8Array(dataHex);
var SignatureToVerify = hexStringToUint8Array(signature);
window.crypto.subtle.importKey(
"jwk", //can be "jwk" (public or private), "spki" (public only), or "pkcs8" (private only)
{ //this is an example jwk key, other key types are Uint8Array objects
kty: "EC",
crv: "P-256",
x: Base64URL_coordX, // expects x and y to be «base64url» encoded
y: Base64URL_coordY,
ext: true,
},
{ //these are the algorithm options
name: "ECDSA",
namedCurve: "P-256", //can be "P-256", "P-384", or "P-521"
},
false, //whether the key is extractable (i.e. can be used in exportKey)
["verify"] //"verify" for public key import, "sign" for private key imports
)
.then(function(publicKey){
window.crypto.subtle.verify(
{
name: "ECDSA",
hash: {name: "SHA-256"}, //can be "SHA-1", "SHA-256", "SHA-384", or "SHA-512"
},
publicKey, //from generateKey or importKey above
SignatureToVerify, //ArrayBuffer of the signature
dataToVerify //ArrayBuffer of the data
)
.then(function(isvalid){
console.log('Signature valid1: ', isvalid);
})
.catch(function(err){
console.error(err);
});
});
////////////Verifying Method using KJUR
Hex_coordX = base64ToHex(base64urlToBase64(Base64URL_coordX));
Hex_coordY = base64ToHex(base64urlToBase64(Base64URL_coordY));
var XY = Hex_coordX.toString(16) + Hex_coordY.toString(16);
var sig = new KJUR.crypto.Signature({"alg": "SHA256withECDSA", "prov": "cryptojs/jsrsa"});
sig.init({xy: XY, curve: "secp256r1"});
sig.updateHex(dataHex);
var result = sig.verify(signature);
//Printing Verification
console.log('Signature valid2: ', result);
It says in the description of the library that it is JCA style. This probably means that the signature generation / verification functions have a ASN.1 / DER encoded input / output.
This consist of an ASN.1 SEQUENCE (tag 0x30), the length of the two integers inside. These two INTEGER's have tag 0x02 and a length of the size of the integer value of the r and s components of the signature. These are big endian, signed integers (which means stripping bytes if they are 0x00 or adding a 0x00 if the top byte is 0x80 or higher).
In your case that would be:
r = ed0c2b2e56731511ce2cea1d7320cdbc39dbabca7f525ec5d646b7c11cb35d58
s = 46a1cb70c2a1d8480f5ef88b46d401ca78b18ccae9ae4e3934a6b8fe412f7b11
Now converting these to DER ASN.1:
ri = 02 21 00 ed0c2b2e56731511ce2cea1d7320cdbc39dbabca7f525ec5d646b7c11cb35d58
si = 02 20 46a1cb70c2a1d8480f5ef88b46d401ca78b18ccae9ae4e3934a6b8fe412f7b11
and finally adding the sequence and adding the concatenation of above:
sig = 30 45 02 21 00 ed0c2b2e56731511ce2cea1d7320cdbc39dbabca7f525ec5d646b7c11cb35d58
02 20 46a1cb70c2a1d8480f5ef88b46d401ca78b18ccae9ae4e3934a6b8fe412f7b11
and checking the result e.g. here.
But I guess in your case just calling the function concatSigToASN1Sig would be faster :P
Related
Yes, there is 100 topics about it.
Yes most of them are snippets/parts of code that answer the specific problem and don't actually help.
So perhaps this topic would help provide a "complete" solution for symmetric and maybe if some1 would be willing to help with asymmetric private/public key example.
So here are pre-reqs
javascript:
npm intall crypto
c++
https://github.com/QuasarApp/Qt-AES/tree/master
and Qt
Now In order to do encryption the tutorial on this page works quite well >
Example 2 > https://www.geeksforgeeks.org/node-js-crypto-createdecipheriv-method/?ref=lbp
Now as far as I can tell, say we create our Key - password :
const password = 'mySuperFancyPassword';
// Defining key
export const key = crypto.scryptSync(password, 'salt', 32);
This password is not the same as the one we would made in C++ using >
QAESEncryption encryption(QAESEncryption::AES_256, QAESEncryption::CBC,QAESEncryption::PKCS7);
QString key("mySuperFancyPassword");
QByteArray hashKey = QCryptographicHash::hash(key.toLocal8Bit(), QCryptographicHash::Sha256);
QByteArray decodeText = encryption.decode(jsByteArray, hashKey , jsIv);
Because Qt-AES takes Hash rather than whatever crypto.scryptSync() produces.
I suppose the question is... how can I match these 2 passwords?
If I were to pass javascript key-hex to C++ and convert it to byte array (auto key = QByteArray::fromHex(hexByte)) C++ library will decompile the string properly and with PKCS7 padding it will match javascript.
Now I know that I should use OpenSSL as that is standard, but every time I look at it I want to cry.
So this library here seems to be very dummy friendly so far...
However, if any1 is interested in openSSL, there is this interesting "file" > https://github.com/soroush/qtz-security/blob/dev/lib/src/crypto.cpp
That shows how to do it OpenSSL but I get error 0 in
error_code = EVP_DecryptFinal_ex(ctx, plaintext + len, &len);
indecryptRawData(const QByteArray& input, const QByteArray& rawKey, const QByteArray& rawIV)
So same issue, black magic! I did match my EVP_aes_256_cbc settings between JS and C++ in second library.
Bottom line, can any1 help me convert the KEY to properly match between C++ and javascript?
Or help with second lib openSSL? But I take its the same issue of salt/key generation...
UPDATE!
Big thanks to #absolute.madness for his solution!
Also, I found another way of... "partially" solving the problem.
I found out that crypto has PKCS5_PBKDF2_HMAC support too! So here is a proposed workflow for that one, however even tho I can send from Javascript > C++, I can't send C++ > Javascript using the QAESEncryption library due to (I think) incorrect padding...? As I crash at
decrypted = Buffer.concat([decrypted, decipher.final()]); .final() statement I think.
Here is Javascript & C++ code that I got working up to 50%.
JS:
// Defining password
const password: string = process.env.KEY_LICENSE_GENERIC! as string
// Defining key
var key: Buffer
crypto.pbkdf2(password, 'salt_', 10000, 32,
'sha256', (err, derivedKey) => {
if (err) {
throw new Error();
}
key = derivedKey
})
const iv = crypto.randomBytes(16);
export function encrypt2(text: string) {
// Creating Cipheriv with its parameter
let cipher = crypto.createCipheriv('aes-256-cbc', Buffer.from(key), iv);
// Updating text
let encrypted = cipher.update(text);
// Using concatenation iv + encrypt + enging & padding?
encrypted = Buffer.concat([iv, encrypted, cipher.final()]);
return encrypted.toString('hex')
}
// A decrypt function
export function decrypt2(text: string) {
let rawData = Buffer.from(text, 'hex');
if (rawData.length > 16) {
let iv = rawData.subarray(0, 16) // We put IV as 1st 16 bytes.
let encr = rawData.subarray(16, rawData.length)
// Creating Decipher
let decipher = crypto.createDecipheriv(
'aes-256-cbc', Buffer.from(key), iv);
// Updating encrypted text
let decrypted = decipher.update(encr);
decrypted = Buffer.concat([decrypted, decipher.final()]);
return decrypted.toString()
}
return ""
}
c++
#include <openssl/rand.h>
#include <openssl/hmac.h>
#include <openssl/evp.h>
QByteArray generateKey(const QByteArray &phrase, bool encode, const int iterations) {
const int length = 32;
QByteArray salt("salt_");
unsigned char key[length];
PKCS5_PBKDF2_HMAC(
phrase.data(), phrase.size(),
(const unsigned char *) (salt.data()), salt.size(),
iterations, EVP_sha256(),
length, key
);
return encode ? QByteArray((const char *) (key), length).toBase64(QByteArray::Base64UrlEncoding) : QByteArray((const char *) (key), length);
}
QByteArray randomBytes(int size) {
QByteArray bytes(size, char(0));
if (RAND_bytes((unsigned char *) (bytes.data()), bytes.size()) != 1) {
QRandomGenerator::securelySeeded().fillRange((quint32 *) (bytes.data()), bytes.size() / sizeof(quint32));
}
return bytes;
}
void decrypt(){
QByteArray hexEnc = reply.readAll(); // QNetworkReply*
QByteArray enc = QByteArray::fromHex(hexEnc.toUtf8());
auto iv = enc.mid(0, 16);
enc = enc.mid(16, enc.size());
QAESEncryption encryption(QAESEncryption::AES_256,
QAESEncryption::CBC,QAESEncryption::PKCS7);
QByteArray decodeText = encryption.decode(enc, generateKey("Fancy
password", false, 10000), iv);
/// Remove padding, I think this is missing when we encrypt.
QString decodedString = QString(encryption.removePadding(decodeText ));
}
void encrypt(){
auto iv = randomBytes(16);
auto encrypted = encryption.encode("Hello test code",
generateKey("Fancy password", false, 10000), iv); // bad encrypt, js will crash.
}
You cannot just use SHA-256 to match scrypt key derivation algorithm, obviously. Scrypt is defined in RFC 7914 and it's not (as of yet) implemented in Qt via its interfaces. OpenSSL (used by Qt) supports it on the other hand. I added 2 implementations of the Node.js example1 which you reference: the first one uses OpenSSL & Qt-AES, the second uses pure OpenSSL. Initially, I got an error from EVP_DecryptFinal_ex similar to what you described. When I started to debug it turned out that EVP_DecodeBlock was returning incorrect size when decoding from base64. After using EVP_DecodeInit/EVP_DecodeUpdate/EVP_DecodeFinal to handle base64 instead of EVP_DecodeBlock as was suggested here the error was gone.
I include the c++ code which roughly translates js-code from example 1 to c++ (I used OpenSSL 1.1.1q for testing):
#include <QDebug>
#include <openssl/aes.h>
#include <openssl/evp.h>
#include <openssl/kdf.h>
#include "qaesencryption.h"
void error(const char *msg)
{
qCritical(msg);
}
#define ERROR(msg) \
{ \
qCritical(msg); \
return; \
}
// scrypt key derivation function/algorithm, see also
// https://www.openssl.org/docs/man1.1.1/man7/scrypt.html
// returns -1 on error and 1 on success
int scrypt_kdf(unsigned char *key, size_t *keylen,
const unsigned char *pass, size_t passlen,
const unsigned char *salt, size_t saltlen,
uint64_t N = 16384, uint64_t r = 8, uint64_t p = 1)
{
// Note, default values for N, r, p are taken from
// https://nodejs.org/api/crypto.html#cryptoscryptsyncpassword-salt-keylen-options
EVP_PKEY_CTX *kctx;
int ret = 1;
kctx = EVP_PKEY_CTX_new_id(EVP_PKEY_SCRYPT, NULL);
if(EVP_PKEY_derive_init(kctx) <= 0)
{
error("EVP_PKEY_derive_init failed");
ret = -1;
}
if(1 == ret && EVP_PKEY_CTX_set1_pbe_pass(kctx, pass, passlen) <= 0)
{
error("EVP_PKEY_CTX_set1_pbe_pass failed");
ret = -1;
}
if(1 == ret && EVP_PKEY_CTX_set1_scrypt_salt(kctx, salt, saltlen) <= 0)
{
error("EVP_PKEY_CTX_set1_scrypt_salt failed");
ret = -1;
}
if(1 == ret && EVP_PKEY_CTX_set_scrypt_N(kctx, N) <= 0)
{
error("EVP_PKEY_CTX_set_scrypt_N failed");
ret = -1;
}
if (1 == ret && EVP_PKEY_CTX_set_scrypt_r(kctx, 8) <= 0)
{
error("EVP_PKEY_CTX_set_scrypt_r failed");
ret = -1;
}
if (1 == ret && EVP_PKEY_CTX_set_scrypt_p(kctx, 1) <= 0)
{
error("EVP_PKEY_CTX_set_scrypt_p failed");
ret = -1;
}
if (1 == ret && EVP_PKEY_derive(kctx, key, keylen) <= 0)
{
error("EVP_PKEY_derive failed");
ret = -1;
}
EVP_PKEY_CTX_free(kctx);
return ret;
}
// we use OpenSSL for scrypt key derivation algorithm and Qt/Qt-AES for decryption
void example1_openssl_and_qt(void)
{
unsigned char key[24];
size_t sz_key = sizeof(key);
const char password[] = "bncaskdbvasbvlaslslasfhj";
const char salt[] = "GfG";
QByteArray iv(16, char(0));
QByteArray encrypted = QByteArray::fromBase64("MfHwhG/WPv+TIbG/qM78qA==");
// you can also try
// encrypted = QByteArray::fromBase64(
// "j9QsjAFxuIAK0zvi5Iq2Z2+mo44RRpR2VMnJTNS7Ey0IkPjsGSJ+A+OPuvAqGO77Ww"
// "S2rI0dnJVREkFz0v8hug==");
if(scrypt_kdf(
key, &sz_key, reinterpret_cast<const unsigned char*>(password),
sizeof(password)-1, reinterpret_cast<const unsigned char*>(salt),
sizeof(salt)-1) <= 0)
{
ERROR("Key derivation failed");
}
OPENSSL_assert(sz_key == sizeof(key));
QAESEncryption encryption(QAESEncryption::AES_192, QAESEncryption::CBC,
QAESEncryption::PKCS7);
QByteArray decrypted = encryption.decode(
encrypted, QByteArray(reinterpret_cast<char*>(key), sizeof(key)), iv);
qDebug() << decrypted;
}
// we use qt only for base64 decoding
void example1_pure_openssl(void)
{
int len; // general purpose length variable, used in EVP_*Update/EVP_*Final
EVP_ENCODE_CTX *b64ctx;
unsigned char key[24];
size_t sz_key = sizeof(key);
EVP_CIPHER_CTX *dctx;
const char password[] = "bncaskdbvasbvlaslslasfhj";
const char salt[] = "GfG";
unsigned char iv[16] = { 0 }; // 16 zero bytes
char encrypted_b64[] = "MfHwhG/WPv+TIbG/qM78qA==";
// you can also try
// char encrypted_b64[] = "j9QsjAFxuIAK0zvi5Iq2Z2+mo44RRpR2VMnJTNS7Ey0IkPjsG"
// "SJ+A+OPuvAqGO77WwS2rI0dnJVREkFz0v8hug==";
// Note, base64 encoding is supposed to be b64size = (size + 2) / 3 * 4
// characters long, where size is the size of the encoded string, therefore
// the following assert checks that the size is correct and thus the size
// of the maximum decoded string size can be calculated as
// max_size = 3 * b64size / 4
// https://stackoverflow.com/questions/13378815/base64-length-calculation
OPENSSL_assert((sizeof(encrypted_b64) - 1) % 4 == 0);
unsigned char encrypted[3 * (sizeof(encrypted_b64) - 1) / 4];
unsigned char decrypted[sizeof(encrypted) + 1]; // +1 for terminating 0
int sz_decoded, sz_decrypted;
// Note, do not use EVP_DecodeBlock for decoding from base64 as it returns
// wrong decoded length and ignores padding, see
// https://github.com/openssl/openssl/issues/17197
b64ctx = EVP_ENCODE_CTX_new();
EVP_DecodeInit(b64ctx);
if(EVP_DecodeUpdate(b64ctx, encrypted, &sz_decoded,
(const unsigned char*)encrypted_b64,
sizeof (encrypted_b64) - 1) < 0)
{
EVP_ENCODE_CTX_free(b64ctx);
ERROR("EVP_DecodeUpdate failed");
}
if(EVP_DecodeFinal(b64ctx, encrypted + sz_decoded, &len) <= 0)
{
EVP_ENCODE_CTX_free(b64ctx);
ERROR("EVP_DecodeFinal failed");
}
sz_decoded += len;
EVP_ENCODE_CTX_free(b64ctx);
OPENSSL_assert(sz_decoded <= sizeof(encrypted));
if(scrypt_kdf(
key, &sz_key, (const unsigned char*)password, sizeof(password)-1,
(const unsigned char*)salt, sizeof(salt)-1) <= 0)
{
ERROR("Key derivation failed");
}
OPENSSL_assert(sz_key == sizeof(key));
dctx = EVP_CIPHER_CTX_new();
if (EVP_DecryptInit_ex(dctx, EVP_aes_192_cbc(), NULL, key, iv) <= 0)
{
EVP_CIPHER_CTX_free(dctx);
ERROR("EVP_DecryptInit_ex failed");
}
if(EVP_CIPHER_CTX_set_key_length(dctx, 24) <= 0)
{
EVP_CIPHER_CTX_free(dctx);
ERROR("EVP_CIPHER_CTX_set_key_length failed");
}
if(EVP_DecryptUpdate(dctx, decrypted, &sz_decrypted,
encrypted, sz_decoded) <= 0)
{
EVP_CIPHER_CTX_free(dctx);
ERROR("EVP_DecryptUpdate failed");
}
if(EVP_DecryptFinal_ex(dctx, decrypted + sz_decrypted, &len) <= 0)
{
EVP_CIPHER_CTX_free(dctx);
ERROR("EVP_DecryptFinal_ex failed");
}
EVP_CIPHER_CTX_free(dctx);
sz_decrypted += len;
// do not forget the null terminator
decrypted[sz_decrypted] = 0;
qDebug() << (const char*)decrypted;
}
int main(void)
{
qDebug() << "example1_openssl_and_qt decryption:";
example1_openssl_and_qt();
qDebug() << "example1_pure_openssl decryption:";
example1_pure_openssl();
return 0;
}
I also attach the code I used to generate the additional encrypted data:
const crypto = require('crypto');
const algorithm = 'aes-192-cbc';
const password = 'bncaskdbvasbvlaslslasfhj';
const plaintext = 'Lorem ipsum dolor sit amet, consectetur adipiscing';
const key = crypto.scryptSync(password, 'GfG', 24);
const iv = Buffer.alloc(16, 0);
const cipher = crypto.createCipheriv(algorithm, key, iv);
const encrypted = Buffer.concat([cipher.update(plaintext), cipher.final()]);
console.log(encrypted.toString('base64'));
UPD
C++
void pbkdf2withsha256_pure_openssl(void)
{
int len; // general purpose length variable, used in EVP_*Update/EVP_*Final
EVP_ENCODE_CTX *b64ctx;
const int sz_key = 32;
unsigned char key[sz_key];
// Note, base64 encoding size is supposed to be b64size = (size + 2) / 3 * 4
// characters long, where size is the size of the source string
// https://stackoverflow.com/questions/13378815/base64-length-calculation
unsigned char key_b64[(sz_key + 2) / 3 * 4 + 1];
int sz_key_b64;
const char password[] = "myPassw0rd";
const unsigned char salt[] = "mySalt";
if(PKCS5_PBKDF2_HMAC(password, sizeof(password) - 1, salt, sizeof(salt) - 1,
10000, EVP_sha256(), sz_key, key) < 1)
{
ERROR("PKCS5_PBKDF2_HMAC failed");
}
b64ctx = EVP_ENCODE_CTX_new();
EVP_EncodeInit(b64ctx);
if(EVP_EncodeUpdate(b64ctx, key_b64, &sz_key_b64, key, sz_key) < 0)
{
EVP_ENCODE_CTX_free(b64ctx);
ERROR("EVP_DecodeUpdate failed");
}
EVP_EncodeFinal(b64ctx, key_b64 + sz_key_b64, &len);
sz_key_b64 += len;
EVP_ENCODE_CTX_free(b64ctx);
qDebug() << (const char*)key_b64;
}
JS
crypto = require ('crypto');
crypto.pbkdf2('myPassw0rd', 'mySalt', 10000, 32,
'sha256', (err, key) => {
if (err) throw new Error();
console.log(key.toString('base64'))
})
I have found 3 methods to convert Uint8Array to BigInt and all of them give different results for some reason. Could you please tell me which one is correct and which one should I use?
Using bigint-conversion library. We can use bigintConversion.bufToBigint() function to get a BigInt. The implementation is as follows:
export function bufToBigint (buf: ArrayBuffer|TypedArray|Buffer): bigint {
let bits = 8n
if (ArrayBuffer.isView(buf)) bits = BigInt(buf.BYTES_PER_ELEMENT * 8)
else buf = new Uint8Array(buf)
let ret = 0n
for (const i of (buf as TypedArray|Buffer).values()) {
const bi = BigInt(i)
ret = (ret << bits) + bi
}
return ret
}
Using DataView:
let view = new DataView(arr.buffer, 0);
let result = view.getBigUint64(0, true);
Using a FOR loop:
let result = BigInt(0);
for (let i = arr.length - 1; i >= 0; i++) {
result = result * BigInt(256) + BigInt(arr[i]);
}
I'm honestly confused which one is right since all of them give different results but do give results.
I'm fine with either BE or LE but I'd just like to know why these 3 methods give a different result.
One reason for the different results is that they use different endianness.
Let's turn your snippets into a form where we can execute and compare them:
let source_array = new Uint8Array([
0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88,
0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11]);
let buffer = source_array.buffer;
function method1(buf) {
let bits = 8n
if (ArrayBuffer.isView(buf)) {
bits = BigInt(buf.BYTES_PER_ELEMENT * 8)
} else {
buf = new Uint8Array(buf)
}
let ret = 0n
for (const i of buf.values()) {
const bi = BigInt(i)
ret = (ret << bits) + bi
}
return ret
}
function method2(buf) {
let view = new DataView(buf, 0);
return view.getBigUint64(0, true);
}
function method3(buf) {
let arr = new Uint8Array(buf);
let result = BigInt(0);
for (let i = arr.length - 1; i >= 0; i--) {
result = result * BigInt(256) + BigInt(arr[i]);
}
return result;
}
console.log(method1(buffer).toString(16));
console.log(method2(buffer).toString(16));
console.log(method3(buffer).toString(16));
Note that this includes a bug fix for method3: where you wrote for (let i = arr.length - 1; i >= 0; i++), you clearly meant i-- at the end.
For "method1" this prints: ffeeddccbbaa998877665544332211
Because method1 is a big-endian conversion (first byte of the array is most-significant part of the result) without size limit.
For "method2" this prints: 8899aabbccddeeff
Because method2 is a little-endian conversion (first byte of the array is least significant part of the result) limited to 64 bits.
If you switch the second getBigUint64 argument from true to false, you get big-endian behavior: ffeeddccbbaa9988.
To eliminate the size limitation, you'd have to add a loop: using getBigUint64 you can get 64-bit chunks, which you can assemble using shifts similar to method1 and method3.
For "method3" this prints: 112233445566778899aabbccddeeff
Because method3 is a little-endian conversion without size limit. If you reverse the for-loop's direction, you'll get the same big-endian behavior as method1: result * 256n gives the same value as result << 8n; the latter is a bit faster.
(Side note: BigInt(0) and BigInt(256) are needlessly verbose, just write 0n and 256n instead. Additional benefit: 123456789123456789n does what you'd expect, BigInt(123456789123456789) does not.)
So which method should you use? That depends on:
(1) Do your incoming arrays assume BE or LE encoding?
(2) Are your BigInts limited to 64 bits or arbitrarily large?
(3) Is this performance-critical code, or are all approaches "fast enough"?
Taking a step back: if you control both parts of the overall process (converting BigInts to Uint8Array, then transmitting/storing them, then converting back to BigInt), consider simply using hexadecimal strings instead: that'll be easier to code, easier to debug, and significantly faster. Something like:
function serialize(bigint) {
return "0x" + bigint.toString(16);
}
function deserialize(serialized_bigint) {
return BigInt(serialized_bigint);
}
If you need to store really big integers that isn't bound to any base64 or 128 and also keep negative numbers then this is a solution for you...
function encode(n) {
let hex, bytes
// shift all numbers 1 step to the left and xor if less then 0
n = (n << 1n) ^ (n < 0n ? -1n : 0n)
// convert to hex
hex = n.toString(16)
// pad if neccesseery
if (hex.length % 2) hex = '0' + hex
// convert hex to bytes
bytes = hex.match(/.{1,2}/g).map(byte => parseInt(byte, 16))
return bytes
}
function decode(bytes) {
let hex, n
// convert bytes back into hex
hex = bytes.map(e => e.toString(16).padStart(2, 0)).join('')
// Convert hex to BigInt
n = BigInt(`0x`+hex)
// Shift all numbers to right and xor if the first bit was signed
n = (n >> 1n) ^ (n & 1n ? -1n : 0n)
return n
}
const input = document.querySelector('input')
input.oninput = () => {
console.clear()
const bytes = encode(BigInt(input.value))
// TODO: Save or transmit this bytes
// new Uint8Array(bytes)
console.log(bytes.join(','))
const n = decode(bytes)
console.log(n.toString(10)+'n') // cuz SO can't render bigints...
}
input.oninput()
<input type="number" value="-39287498324798237498237498273323423" style="width: 100%">
I'm converting Rijndael decryption from C# to NodeJS.
The Key (or Passphrase) used is 13 characters long. The IV used is 17 characters long.
Note: I have no control over the length choice
Below is the Rijndael decryption in C#
using System;
using System.IO;
using System.Security.Cryptography;
using System.Text;
public class Program
{
public class CryptoProvider
{
private ICryptoTransform encryptor = (ICryptoTransform)null;
private ICryptoTransform decryptor = (ICryptoTransform)null;
private int minSaltLen = -1;
private int maxSaltLen = -1;
public CryptoProvider(string passPhrase, string initVector)
: this(passPhrase, initVector, -1, -1, -1, (string)null, (string)null, 3)
{
}
public CryptoProvider(
string passPhrase,
string initVector,
int minSaltLen,
int maxSaltLen,
int keySize,
string hashAlgorithm,
string saltValue,
int passwordIterations)
{
this.minSaltLen = 4;
this.maxSaltLen = 8;
keySize = 256;
hashAlgorithm = "SHA512";
byte[] rgbIV = Encoding.ASCII.GetBytes(initVector);
byte[] rgbSalt = new byte[0];
byte[] bytes = new PasswordDeriveBytes(passPhrase, rgbSalt, hashAlgorithm, passwordIterations).GetBytes(keySize / 8);
RijndaelManaged rijndaelManaged = new RijndaelManaged();
if (rgbIV.Length == 0)
rijndaelManaged.Mode = CipherMode.ECB;
else
rijndaelManaged.Mode = CipherMode.CBC;
this.encryptor = rijndaelManaged.CreateEncryptor(bytes, rgbIV);
this.decryptor = rijndaelManaged.CreateDecryptor(bytes, rgbIV);
}
public string Decrypt(string cipherText) {
return this.Decrypt(Convert.FromBase64String(cipherText));
}
public string Decrypt(byte[] cipherTextBytes) {
return Encoding.UTF8.GetString(this.DecryptToBytes(cipherTextBytes));
}
public byte[] DecryptToBytes(string cipherText) {
return this.DecryptToBytes(Convert.FromBase64String(cipherText));
}
public byte[] DecryptToBytes(byte[] cipherTextBytes)
{
int num = 0;
int sourceIndex = 0;
MemoryStream memoryStream = new MemoryStream(cipherTextBytes);
byte[] numArray = new byte[cipherTextBytes.Length];
lock (this)
{
CryptoStream cryptoStream = new CryptoStream((Stream)memoryStream, this.decryptor, CryptoStreamMode.Read);
num = cryptoStream.Read(numArray, 0, numArray.Length);
memoryStream.Close();
cryptoStream.Close();
}
if (this.maxSaltLen > 0 && this.maxSaltLen >= this.minSaltLen)
sourceIndex = (int)numArray[0] & 3 | (int)numArray[1] & 12 | (int)numArray[2] & 48 | (int)numArray[3] & 192;
byte[] destinationArray = new byte[num - sourceIndex];
Array.Copy((Array)numArray, sourceIndex, (Array)destinationArray, 0, num - sourceIndex);
return destinationArray;
}
}
public static void Main()
{
string Key = "";
string IV = "";
string encryptedUserData = "u7uENpFfpQhMXiTThL/ajA==";
string decryptedUserData;
CryptoProvider crypto = new CryptoProvider(Key, IV);
decryptedUserData = crypto.Decrypt(encryptedUserData.Trim());
Console.WriteLine(decryptedUserData);
}
}
which for some reason, I can decrypt the string in dotnetfiddle, but not in Visual Studio (because it returns an error of 'Specified initialization vector (IV) does not match the block size for this algorithm. (Parameter 'rgbIV')'
Below is my attempt to convert in NodeJS using the rijndael-js library:
const Rijndael = require("rijndael-js");
const key = "";
const iv = "";
const cipher = new Rijndael(key, "cbc");
const ciphertext = "u7uENpFfpQhMXiTThL/ajA==";
const plaintext = Buffer.from(cipher.decrypt(ciphertext, 256, iv));
which returns an error of Unsupported key size: 104 bit
All errors point to the same thing: Invalid Key/IV lengths.
Would there be a work-around where I can force NodeJS to accept the Key and IV as valid lengths? Is there something I am missing, doing incorrectly, or misconfigured?
Edit:
I was able to find a PasswordDeriveBytes implementation for NodeJS and compared the results from C# and they are equal.
I updated my NodeJS implementation (see sandbox) and noticed a few things:
All resulting ciphertexts are the same. I am guessing this stems from salts.
I tried decrypting a ciphertext generated from C#, but there seems to be a few characters to the left of the resulting value.
Example: C# Encrypted String: zAqv5w/gwT0sFYXZEx+Awg==, NodeJS Decrypted String: ���&��4423
When I try to decrypt a ciphertext generated in NodeJS in C#, the C# compiler returns an error of System.Security.Cryptography.CryptographicException: Padding is invalid and cannot be removed.
Edit:
C# code (executable with .NET Framework 4.7.2):
using System;
using System.IO;
using System.Security.Cryptography;
using System.Text;
namespace ProgramEncrypt
{
public class CryptoProvider
{
private ICryptoTransform encryptor = (ICryptoTransform)null;
private ICryptoTransform decryptor = (ICryptoTransform)null;
private int minSaltLen = -1;
private int maxSaltLen = -1;
public CryptoProvider(string passPhrase, string initVector) : this(passPhrase, initVector, -1, -1, -1, (string)null, (string)null, 3) { }
public CryptoProvider(
string passPhrase,
string initVector,
int minSaltLen,
int maxSaltLen,
int keySize,
string hashAlgorithm,
string saltValue,
int passwordIterations)
{
this.minSaltLen = 4;
this.maxSaltLen = 8;
keySize = 256;
hashAlgorithm = "SHA512";
byte[] rgbIV = Encoding.ASCII.GetBytes(initVector);
byte[] rgbSalt = new byte[0];
byte[] bytes = new PasswordDeriveBytes(passPhrase, rgbSalt, hashAlgorithm, passwordIterations).GetBytes(keySize / 8);
RijndaelManaged rijndaelManaged = new RijndaelManaged();
if (rgbIV.Length == 0)
rijndaelManaged.Mode = CipherMode.ECB;
else
rijndaelManaged.Mode = CipherMode.CBC;
this.encryptor = rijndaelManaged.CreateEncryptor(bytes, rgbIV);
this.decryptor = rijndaelManaged.CreateDecryptor(bytes, rgbIV);
}
public string Encrypt(string plainText) => this.Encrypt(Encoding.UTF8.GetBytes(plainText));
public string Encrypt(byte[] plainTextBytes) => Convert.ToBase64String(this.EncryptToBytes(plainTextBytes));
public byte[] EncryptToBytes(string plainText) => this.EncryptToBytes(Encoding.UTF8.GetBytes(plainText));
public byte[] EncryptToBytes(byte[] plainTextBytes)
{
byte[] buffer = this.AddSalt(plainTextBytes);
MemoryStream memoryStream = new MemoryStream();
lock (this)
{
CryptoStream cryptoStream = new CryptoStream((Stream)memoryStream, this.encryptor, CryptoStreamMode.Write);
cryptoStream.Write(buffer, 0, buffer.Length);
cryptoStream.FlushFinalBlock();
byte[] array = memoryStream.ToArray();
memoryStream.Close();
cryptoStream.Close();
return array;
}
}
public string Decrypt(string cipherText) => this.Decrypt(Convert.FromBase64String(cipherText));
public string Decrypt(byte[] cipherTextBytes) => Encoding.UTF8.GetString(this.DecryptToBytes(cipherTextBytes));
public byte[] DecryptToBytes(string cipherText) => this.DecryptToBytes(Convert.FromBase64String(cipherText));
public byte[] DecryptToBytes(byte[] cipherTextBytes)
{
int num = 0;
int sourceIndex = 0;
MemoryStream memoryStream = new MemoryStream(cipherTextBytes);
byte[] numArray = new byte[cipherTextBytes.Length];
lock (this)
{
CryptoStream cryptoStream = new CryptoStream((Stream)memoryStream, this.decryptor, CryptoStreamMode.Read);
num = cryptoStream.Read(numArray, 0, numArray.Length);
memoryStream.Close();
cryptoStream.Close();
}
if (this.maxSaltLen > 0 && this.maxSaltLen >= this.minSaltLen)
sourceIndex = (int)numArray[0] & 3 | (int)numArray[1] & 12 | (int)numArray[2] & 48 | (int)numArray[3] & 192;
byte[] destinationArray = new byte[num - sourceIndex];
Array.Copy((Array)numArray, sourceIndex, (Array)destinationArray, 0, num - sourceIndex);
return destinationArray;
}
private byte[] AddSalt(byte[] plainTextBytes)
{
if (this.maxSaltLen == 0 || this.maxSaltLen < this.minSaltLen)
return plainTextBytes;
byte[] salt = this.GenerateSalt();
byte[] destinationArray = new byte[plainTextBytes.Length + salt.Length];
Array.Copy((Array)salt, (Array)destinationArray, salt.Length);
Array.Copy((Array)plainTextBytes, 0, (Array)destinationArray, salt.Length, plainTextBytes.Length);
return destinationArray;
}
private byte[] GenerateSalt()
{
int length = this.minSaltLen != this.maxSaltLen ? this.GenerateRandomNumber(this.minSaltLen, this.maxSaltLen) : this.minSaltLen;
byte[] data = new byte[length];
new RNGCryptoServiceProvider().GetNonZeroBytes(data);
data[0] = (byte)((int)data[0] & 252 | length & 3);
data[1] = (byte)((int)data[1] & 243 | length & 12);
data[2] = (byte)((int)data[2] & 207 | length & 48);
data[3] = (byte)((int)data[3] & 63 | length & 192);
return data;
}
private int GenerateRandomNumber(int minValue, int maxValue)
{
byte[] data = new byte[4];
new RNGCryptoServiceProvider().GetBytes(data);
return new Random(((int)data[0] & (int)sbyte.MaxValue) << 24 | (int)data[1] << 16 | (int)data[2] << 8 | (int)data[3]).Next(minValue, maxValue + 1);
}
public static void Main()
{
string Key = "HelL!oWoRL3ds";
string IV = "HElL!o#wOrld!##%$";
string toEncrypt = "1234";
string encryptedData, decryptedData;
CryptoProvider crypto = new CryptoProvider(Key, IV);
encryptedData = crypto.Encrypt(toEncrypt.Trim());
decryptedData = crypto.Decrypt(encryptedData.Trim());
Console.WriteLine("ENCRYPTED: " + encryptedData);
Console.WriteLine("DECRYPTED: " + decryptedData);
}
}
}
NodeJS code (codesandbox.io):
import { deriveBytesFromPassword } from "./deriveBytesFromPassword";
const Rijndael = require("rijndael-js");
const dataToEncrypt = "1234";
const SECRET_KEY = "HelL!oWoRL3ds"; // 13 chars
const SECRET_IV = "HElL!o#wOrld!##%$"; // 17 chars
const keySize = 256;
const hashAlgorithm = "SHA512";
// Use only the first 16 bytes of the IV
const rgbIV = Buffer.from(SECRET_IV, "ascii").slice(0, 16); // #ref https://stackoverflow.com/a/57147116/12278028
const rgbSalt = Buffer.from([]);
const derivedPasswordBytes = deriveBytesFromPassword(
SECRET_KEY,
rgbSalt,
3,
hashAlgorithm,
keySize / 8
);
const dataToEncryptInBytes = Buffer.from(dataToEncrypt, "utf8");
const cipher = new Rijndael(derivedPasswordBytes, "cbc");
const encrypted = Buffer.from(cipher.encrypt(dataToEncryptInBytes, 16, rgbIV));
console.log(encrypted.toString("base64"));
// Use this if you only have the Base64 string
// Note: The Base64 string in Line 34 is from C#
// const decrypted = Buffer.from(
// cipher.decrypt(Buffer.from("zAqv5w/gwT0sFYXZEx+Awg==", "base64"), 16, rgbIV)
// );
const decrypted = Buffer.from(cipher.decrypt(encrypted, 16, rgbIV));
console.log(decrypted.toString());
A possible NodeJS implementation based on your sandbox code that is compatible with the C# code is:
const crypto = require("crypto");
const Rijndael = require("rijndael-js");
const pkcs7 = require('pkcs7-padding');
const SECRET_KEY = "HelL!oWoRL3ds"; // 13 chars
const SECRET_IV = "HElL!o#wOrld!##%$"; // 17 chars
const rgbIV = Buffer.from(SECRET_IV, "ascii").slice(0, 16);
const rgbSalt = Buffer.from([]);
const keySize = 256;
const hashAlgorithm = "SHA512";
const minSaltLen = 4;
const maxSaltLen = 8;
function encrypt(plaintextStr) {
var derivedPasswordBytes = deriveBytesFromPassword(SECRET_KEY, rgbSalt, 3, hashAlgorithm, keySize/8);
var cipher = new Rijndael(derivedPasswordBytes, "cbc");
var plaintext = Buffer.from(plaintextStr, "utf8");
var salt = generateSalt();
var saltPlaintext = Buffer.concat([salt, plaintext])
var saltPlaintextPadded = pkcs7.pad(saltPlaintext, 16)
var ciphertext = Buffer.from(cipher.encrypt(saltPlaintextPadded, 128, rgbIV));
return ciphertext.toString("base64");
}
function decrypt(ciphertextB64) {
var derivedPasswordBytes = deriveBytesFromPassword(SECRET_KEY, rgbSalt, 3, hashAlgorithm, keySize/8);
var cipher = new Rijndael(derivedPasswordBytes, "cbc");
var ciphertext = Buffer.from(ciphertextB64, 'base64');
var saltPlaintextPadded = Buffer.from(cipher.decrypt(ciphertext, 128, rgbIV));
var sourceIndex = saltPlaintextPadded[0] & 3 | saltPlaintextPadded[1] & 12 | saltPlaintextPadded[2] & 48 | saltPlaintextPadded[3] & 192
var plaintextPadded = saltPlaintextPadded.subarray(sourceIndex)
var plaintext = pkcs7.unpad(plaintextPadded)
return plaintext;
}
function generateSalt() {
var length = minSaltLen != maxSaltLen ? crypto.randomInt(minSaltLen, maxSaltLen + 1) : minSaltLen;
var data = crypto.randomBytes(length);
data[0] = data[0] & 252 | length & 3;
data[1] = data[1] & 243 | length & 12;
data[2] = data[2] & 207 | length & 48;
data[3] = data[3] & 63 | length & 192;
return data;
}
var plaintext = "1234";
var ciphertextB64 = encrypt(plaintext);
var plaintext = decrypt(ciphertextB64);
console.log(ciphertextB64);
console.log(plaintext.toString('hex'))
using the key derivation from the linked post.
Ciphertexts generated with this code can be decrypted with the C# code, and vice versa, ciphertexts generated with the C# code can be decrypted with this code.
Explanation:
The linked C# code can process a 17 bytes IV under .NET Framework (tested for 4.7.2). However, only the first 16 bytes are taken into account. With the addition rijndaelManaged.IV = rgbIV (as in the MS examples) an exception is thrown. Under .NET Core (tested for 3.0+) an exception is always thrown. This indicates that processing an IV in the .NET Framework that is too large, is more likely a bug. Anyway, in the NodeJS code also only the first 16 bytes of the IV have to be considered.
The C# code uses the proprietary key derivation PasswordDeriveBytes. The same key derivation must be applied in the NodeJS code. In the code above, the implementation linked by the OP is used.
The library involved rijndael-js applies Zero padding, but the C# code uses PKCS#7 padding. Therefore, in the NodeJS code, the plaintext (or concatenation of salt and plaintext) must be padded with PKCS#7 before encryption (this satisfies the length criterion and Zero padding is no longer applied). Accordingly, the padding must be removed after decryption. A possible library is pkcs7-padding. Alternatively, instead of rijndael-js, another library could be used which applies PKCS#7 padding by default.
The C# code uses two salts: One is the empty (!) rgbSalt, which is applied in the key derivation. The other is a second salt, which is randomly generated with respect to both length and content during encryption, is prepended to the plaintext, and contains the information about the salt length, which is determined during decryption. This logic must be implemented in the NodeJS code for both codes to be compatible.
The GenerateRandomNumber() method cannot be ported because its result depends on the internal details of the Random() implementation (which, by the way, is not a CSPRNG). The method is supposed to generate a random integer. For this purpose crypto.randomInt() is used. For RNGCryptoServiceProvider#GetNonZeroBytes() create.RandomBytes() is applied. This NodeJS function also allows 0x00 bytes, which could be optimized if needed.
Security:
The proprietary key derivation PasswordDeriveBytes is deprecated and insecure. Instead, Rfc2898DeriveBytes should be used in the C# code and PBKDF2 in the NodeJS code.
The missing salt in the key derivation is insecure and allows attacks e.g. via rainbow tables. Instead, a salt of sufficient size (at least 8 bytes) should be randomly generated for each encryption. This salt is not secret and is therefore usually concatenated with the ciphertext.
The C# implementation uses a static IV, which is insecure as well. Although the random second salt provides a different ciphertext for identical plaintexts and identical IVs, a best practice should be applied instead of a user defined construct. A proven way is a randomly generated IV, analogous to the salt used for key derivation (randomly generated for each encryption, concatenated with the ciphertext).
I'm learning about Blockchain and wanted to create an example of creating an address, purely for educational purposes - WOULD NOT BE DONE ANYWHERE NEAR PRODUCTION.
Task: create 160 random bits, convert it to hex, convert that to base 58, then to test correctness by reversing the process.
It kind of works, however I get intermittent 'false' on comparison of before and after binary. The hexStringToBinary function returns strings with varying lengths:
const bs58 = require('bs58');
//20 * 8 = 160
function generate20Bytes () {
let byteArray = [];
let bytes = 0;
while (bytes < 20) {
let byte = '';
while (byte.length < 8) {
byte += Math.floor(Math.random() * 2);
}
byteArray.push(byte);
bytes++;
}
return byteArray;
}
//the issue is probably from here
function hexStringToBinary (string) {
return string.match(/.{1,2}/g)
.map(hex => parseInt(hex, 16).toString(2).padStart(8, '0'));
}
const addressArray = generate20Bytes();
const binaryAddress = addressArray.join('');
const hex = addressArray.map(byte => parseInt(byte, 2).toString(16)).join('');
console.log(hex);
// then lets convert it to base 58
const base58 = bs58.encode(Buffer.from(hex));
console.log('base 58');
console.log(base58);
// lets see if we can reverse the process
const destructuredHex = bs58.decode(base58).toString();
console.log('hex is the same');
console.log(hex === destructuredHex);
// lets convert back to a binary string
const destructuredAddress = hexStringToBinary(destructuredHex).join('');
console.log('destructured address');
console.log(destructuredAddress);
console.log('binaryAddress address');
console.log(binaryAddress);
//intermittent false/true
console.log(destructuredAddress === binaryAddress);
Got round to refactoring with tdd. Realised it wasn't zero filling hex < 16. My playground repo
BigQuery uses Javascript for its user-defined functions. Input and outputs that are BYTES in BigQuery are mapped to and from base64-encoded strings in Javascript.
BigQuery doesn't have the browser window object, so atob and btoa are missing. Is there an easy way to encode and decode in the Bigquery JS environment, or do you have to include a library for doing the mapping?
You'll need to include a library, but it's fairly straightforward once you get the JavaScript onto Cloud Storage, and you can use this approach for other common libraries that you want to import. I found an implementation in a StackOverflow post, and I put these contents in a file named btoa_atob.js:
(function () {
var chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=';
function InvalidCharacterError(message) {
this.message = message;
}
InvalidCharacterError.prototype = new Error;
InvalidCharacterError.prototype.name = 'InvalidCharacterError';
// encoder
// [https://gist.github.com/999166] by [https://github.com/nignag]
btoa = function (input) {
var str = String(input);
for (
// initialize result and counter
var block, charCode, idx = 0, map = chars, output = '';
// if the next str index does not exist:
// change the mapping table to "="
// check if d has no fractional digits
str.charAt(idx | 0) || (map = '=', idx % 1);
// "8 - idx % 1 * 8" generates the sequence 2, 4, 6, 8
output += map.charAt(63 & block >> 8 - idx % 1 * 8)
) {
charCode = str.charCodeAt(idx += 3/4);
if (charCode > 0xFF) {
throw new InvalidCharacterError("'btoa' failed: The string to be encoded contains characters outside of the Latin1 range.");
}
block = block << 8 | charCode;
}
return output;
};
// decoder
// [https://gist.github.com/1020396] by [https://github.com/atk]
atob = function (input) {
var str = String(input).replace(/[=]+$/, ''); // #31: ExtendScript bad parse of /=
if (str.length % 4 == 1) {
throw new InvalidCharacterError("'atob' failed: The string to be decoded is not correctly encoded.");
}
for (
// initialize result and counters
var bc = 0, bs, buffer, idx = 0, output = '';
// get next character
buffer = str.charAt(idx++);
// character found in table? initialize bit storage and add its ascii value;
~buffer && (bs = bc % 4 ? bs * 64 + buffer : buffer,
// and if not first of each 4 characters,
// convert the first 8 bits to one ascii character
bc++ % 4) ? output += String.fromCharCode(255 & bs >> (-2 * bc & 6)) : 0
) {
// try to find character in table (0-63, not found => -1)
buffer = chars.indexOf(buffer);
}
return output;
};
}());
Then I copied the file to my Cloud Storage:
gsutil cp btoa_atob.js gs://my-bucket/
Then I wrote a dummy function that uses it:
#standardSQL
CREATE TEMP FUNCTION Foo(b BYTES) RETURNS STRING LANGUAGE js AS """
var result = atob(b);
// ... process result of atob.
return result;
"""
OPTIONS (library='gs://my-bucket/btoa_atob.js');
SELECT Foo(b'\xa0b1\xff\xee');