How to store 256 bit Hex in javascript - javascript

how can i handle a 64 character hexadecimal (256 bit) in Javascript.
> b = 0x1936c240636390dc823e3a728e94b208eb53c6756d81da57ec3425e05d43ac10
// 1.1404571630774433e+76
> b.toString(16)
// '1936c24063639100000000000000000000000000000000000000000000000000'
When i do that, i loose accuracy.
Edited:
Unfortunately i in the following scenario js transforms the input paramter
function uint256(hexNumber){
let bigNumber = BigInt(hexNumber);
logger.info('Hex:'+ bigNumber.toString(16)+' to bigInt '+bigNumber);
return bigNumber.toString(16);
}
so that the call gives me:
uint256(0x1936c240636390dc823e3a728e94b208eb53c6756d81da57ec3425e05d43ac10)
//Hex:1936c24063639100000000000000000000000000000000000000000000000000 to bigInt 11404571630774432649143590157348217225799776391499391730068078364228824596480
For the context, I'm trying to transcript a large Solidity function which looks like this only with 520 array allocation instead of six.
vk.a = Pairing.G1Point(uint256(0x1936c240636390dc823e3a728e94b208eb53c6756d81da57ec3425e05d43ac10), uint256(0x2d70ff78e8216bf29d58923a686d9738278b8ce2fd822e197c85b09286d15566));
vk.b = Pairing.G2Point([uint256(0x2b4daf047abe2e7f0b311118c1b963b63695dc0d769cea78849604434de055bf), uint256(0x29c13ecb6f33dbc4b3b8a02e2e255511ce4c26a8a2f299efcc94caf2de4fce00)], [uint256(0x1da9020008df7f549751f8a251af3b2dc4a2ad3e0870de54acaedd9fc1b47e17), uint256(0x25ea0d7e2b29de431b86a943db30dbf4d98f68df9ca8a9628d14d1591e817d90)]);
vk.gamma = Pairing.G2Point([uint256(0x011016e22ae045444f50fb80f246ec486c7e02af09132cd38c4fcf484983e4f2), uint256(0x00e83c788c2878d1d5eba3ed49b0d81e4c0487dedc3e4d1c2baab5833785b62f)], [uint256(0x05eb89e741ed5b5d611cebf92d1ed02cd6f3311089f0d400df7d9ced5a48fd41), uint256(0x132a90a3b0d369ccd66e2a5ba04a935e44d8ad5dca93a76bba592a578130a911)]);
vk.delta = Pairing.G2Point([uint256(0x065f6a3323a2abffd621fc263f348eb914904b68d5897729ae34a6b9d33f0852), uint256(0x0c3b60f59d3bd50328a04c0ff6d979199685d0526f89f6ac29d6174ce24707a2)], [uint256(0x26e7ebce2b44efef6b6315938e33f0a8ecc82dbad635c9efa681ed85bbb59982), uint256(0x12e0f3721230a0f38f6c9913048d5230fd2615ef3ff7f6ee4b20dfe0bdea1a86)]);
//vk.gamma_abc = new Pairing.G1Point[](520);
vk.gamma_abc[0] = Pairing.G1Point(uint256(0x196af2b7293f4166f93674ac9a094d1d280285ee936d12c0f009d8c0ceb7275c), uint256(0x2d536bc8c9d330f9e10ab4db1210b14c5caa794064a34aeeb92d35a2142e1003));
vk.gamma_abc[1] = Pairing.G1Point(uint256(0x17519a69d3ab06c7376e8b441eaf8abf240bfba37fa92d203a57ee4e7ebcc09a), uint256(0x0afafa533a58968132a300d25f3e2e31efb50713c60dd5e62093eb9ac68947cd));
vk.gamma_abc[2] = Pairing.G1Point(uint256(0x09c5badd7ac47ceadcdc87260b470e1bc173a2e281d0ee2bbe1e998761906a1c), uint256(0x10225903a96deb54a81d5b2fa1f0e19c080ae66e219038c421e61b55abc6e8c2));
vk.gamma_abc[3] = Pairing.G1Point(uint256(0x0d1cb6242e59537351ba89b729849ccf46909b6f4de62d661541dcee62d4ab91), uint256(0x086240d4e29a207011e2ef99d35db84fcc0d85f805e0102ddbbee0d1ca5c814a));
vk.gamma_abc[4] = Pairing.G1Point(uint256(0x004b8d16937ab63656090b905a085f8b4209c1122d12173ed1d8681b4e2e9c2f), uint256(0x194925508ed037169cf95c94716551dbce6aad6c65a44fcccd44280461816e95));
vk.gamma_abc[5] = Pairing.G1Point(uint256(0x0e8d97d12590c0c3854e5a4ba14c802d4a66e6cfda02b950584014341ec261dc), uint256(0x2e8d13c1c8ee7a46f50ad7b74de98eaf4cae13360a537ef3f55998a914255045));
vk.gamma_abc[6] = Pairing.G1Point(uint256(0x16f625a3746225f60ae1d9891196b2baa06f8be90dd738ea1dbeefffb4c62d0c), uint256(0x2de7e064393312d12c372c87ade6b533e2c01ae7e45c0b94b892cf608e52f910));
I'm hoping to find an easy way to get this key into a JSON format, so i was thinking to add the following to the upper part:
let Pairing = {
G1Point : (a,b) => {
return {x:a, y: b};
},
G2Point : (a,b) => {
return {x : a, y : b}
}
}
let vk = {
a : {},
b : {},
gamma_abc : [],
delta : {},
};
function uint256(hexNumber){
let bigNumber = BigInt(hexNumber);
logger.info('Hex:'+ bigNumber.toString(16)+' to bigInt '+bigNumber);
return bigNumber.toString(16);
}

You need to convert it too BigInt. If you want to store that value in a database for example, then you need to store it as an string.
var b = "0x1936c240636390dc823e3a728e94b208eb53c6756d81da57ec3425e05d43ac10";
var bigNumb = BigInt(b);
console.log(bigNumb.toString(16));
console.log(bigNumb.toString(10));

Related

Increment/decrement numbers and letters by 'n'

I have a function encryptString(string, shift).
If encryptString('c', 1), then c becomes d.
if the arguments are encryptString('c', -1), then c becomes b.
If shift is greater than 26, or less than 0, the letter/number wraps back around. Meaning in encryptString('a', 29), a becomes `c.
I believe this could be solved using the charCode() functions, but I started using reduce, and would like to solve it with what I have.
My attempt below can shift letters and numbers, but only if they're within the array length. If shift is greater than 26, it doesn't work.
I'd appreciate any input I can get.
const encryptString = (str, shift) => {
if(!str.match(/^[a-z0-9]+$/i)) { throw new Error }
const caps = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'.split('');
const letters = 'abcdefghijklmnopqrstuvwxyz'.split('');
const numbers = '123456789'.split('');
const reduction = str.split('').reduce((a,b) => {
caps.includes(b) ? b = caps[caps.indexOf(b) + shift] || caps[shift] : null;
letters.includes(b) ? b = letters[letters.indexOf(b) + shift] || letters[shift] : null;
numbers.includes(b) ? b = numbers[numbers.indexOf(b) + shift] || numbers[shift] : null;
a.push(b);
return a;
}, [])
return reduction.join('');
};
Some additional cases:
encryptString('JavaScript', 3) => 'MdydVfulsw' // passes
encryptString('mqblhw', -1) => 'lpakgv' // passes
encryptString('z956qau', 29) => 'c845tdx' //code above fails
encryptString('password123', -266) => 'jummqilx567' // code above fails

Javascript binary string, to hex, to base58, back to hex, back to string, random error

I'm learning about Blockchain and wanted to create an example of creating an address, purely for educational purposes - WOULD NOT BE DONE ANYWHERE NEAR PRODUCTION.
Task: create 160 random bits, convert it to hex, convert that to base 58, then to test correctness by reversing the process.
It kind of works, however I get intermittent 'false' on comparison of before and after binary. The hexStringToBinary function returns strings with varying lengths:
const bs58 = require('bs58');
//20 * 8 = 160
function generate20Bytes () {
let byteArray = [];
let bytes = 0;
while (bytes < 20) {
let byte = '';
while (byte.length < 8) {
byte += Math.floor(Math.random() * 2);
}
byteArray.push(byte);
bytes++;
}
return byteArray;
}
//the issue is probably from here
function hexStringToBinary (string) {
return string.match(/.{1,2}/g)
.map(hex => parseInt(hex, 16).toString(2).padStart(8, '0'));
}
const addressArray = generate20Bytes();
const binaryAddress = addressArray.join('');
const hex = addressArray.map(byte => parseInt(byte, 2).toString(16)).join('');
console.log(hex);
// then lets convert it to base 58
const base58 = bs58.encode(Buffer.from(hex));
console.log('base 58');
console.log(base58);
// lets see if we can reverse the process
const destructuredHex = bs58.decode(base58).toString();
console.log('hex is the same');
console.log(hex === destructuredHex);
// lets convert back to a binary string
const destructuredAddress = hexStringToBinary(destructuredHex).join('');
console.log('destructured address');
console.log(destructuredAddress);
console.log('binaryAddress address');
console.log(binaryAddress);
//intermittent false/true
console.log(destructuredAddress === binaryAddress);
Got round to refactoring with tdd. Realised it wasn't zero filling hex < 16. My playground repo

Can't get Lotka-Volterra equations to oscillate stable with math.js

I'm trying to implement a simple Lotka-Volterra system in JavaScript, but get different result from what I see in academic papers and slides. This is my equations:
sim2.eval("dxdt(x, y) = (2 * x) - (x * y)");
sim2.eval("dydt(x, y) = (-0.25 * y) + (x * y)");
using coefficients a = 2, b = 1, c = 0.25 and d = 1. Yet, my result looks like this:
when I expected a stable oscillation as seen in these PDF slides:
Could it be the implementation of ndsolve that causes this? Or a machine error in JavaScript due to floating-point arithmetic?
Disregard, the error was simply using a too big evaluation step (dt = 0.1, must be 0.01 at least). The numerical method used is known for this problem.
For serious purposes use a higher order method, the minimum is fixed step classical Runge-Kutta. Then you can also use dt=0.1, it is stable for multiple periods, I tried tfinal=300 without problems. However you will see the step size in the graph as it is visibly piecewise linear. This is much reduced with half the step size, dt=0.05.
function odesolveRK4(f, x0, dt, tmax) {
var n = f.size()[0]; // Number of variables
var x = x0.clone(),xh=[]; // Current values of variables
var dxdt = [], k1=[], k2=[], k3=[], k4=[]; // Temporary variable to hold time-derivatives
var result = []; // Contains entire solution
var nsteps = math.divide(tmax, dt); // Number of time steps
dt2 = math.divide(dt,2);
dt6 = math.divide(dt,6);
for(var i=0; i<nsteps; i++) {
// compute the 4 stages if the classical order-4 Runge-Kutta method
k1 = f.map(function(fj) {return fj.apply(null, x.toArray()); } );
xh = math.add(x, math.multiply(k1, dt2));
k2 = f.map(function(fj) {return fj.apply(null, xh.toArray()); } );
xh = math.add(x, math.multiply(k2, dt2));
k3 = f.map(function(fj) {return fj.apply(null, xh.toArray()); } );
xh = math.add(x, math.multiply(k3, dt));
k4 = f.map(function(fj) {return fj.apply(null, xh.toArray()); } );
x = math.add(x, math.multiply(math.add(math.add(k1,k4), math.multiply(math.add(k2,k3),2)), dt6))
if( 0==i%50) console.log("%3d %o %o",i,dt,x.toString());
result.push(x.clone());
}
return math.matrix(result);
}
math.import({odesolveRK4:odesolveRK4});

JavaScript Pattern Comparison

I'm working on a small machine learning theoretical algorithm using nodeJs.
My goal is to compare many array patterns to one source pattern then return how
similar they are represented as a percent . For an example pattern1 maybe 80% similar to the source pattern .
What can be the best method for determining percent similarity for one array to another?
What I've done so far..
//source
var soureSequence = [0.53,0.55,0.50,0.40,0.50,0.52,0.58,0.60]
//patterns to compare
var sequence1 = [0.53,0.54,0.49,0.40,0.50,0.52,0.58,0.60]
var sequence2 = [0.53,0.55,0.50,0.42,0.50,0.53,0.57,0.62]
Since I've chosen a percent based outcome , I figured I should base my source pattern off percentage change from first value to second value in array .
var percentChange = (firstVal, secondVal) => {
var pChange = ((parseFloat(secondVal) - firstVal) /
Math.abs(firstVal)) * 100.00;
//To avoid NaN , Infinity , and Zero
if(!pChange || pChange == 0){
return 0.00000001
}
return pChange;
}
Here I will generate my source pattern from my source sequence
var storePattern = function(sequence){
var pattern = [];
for(var i = 0 ; i < sequence.length ; i++){
let $change = percentChange(sequence[i] , sequence[i + 1]);
if(i != sequence.length && $change ){
pattern.push($change)
}
}
return pattern;
}
var sourcePattern = storePattern(soureSequence);
Now I will create more patterns to be compared
var testPattern1 = storePattern(sequence1);
var testPattern2 = storePattern(sequence2);
Below is my comparison function
var processPattern = function(source , target){
var simularityArray = [];
for(var i = 0 ; i < target.length ; i++){
//Compare percent change at indexof testPattern to sourcePattern of same index
let change = Math.abs(percentChange(target[i] , source[i]));
simularityArray.push(100.00 - change);
}
var rating = simularityArray.reduce((a,b) => {
return a + b
});
//returns percent rating based of average of similarity pattern
rating = rating / parseFloat(source.length + ".00");
return rating;
}
Now I can try to estimate the similarity
var similarityOfTest1 = processPattern(sourcePattern , testPattern1)
My problem is that this only works on sequences within the same range of value .. for example 0.50 , 0.52 .. the percent change in these values would not be the same for 0.20 , 0.22 but the value difference is the same ie -> 0.02
I thought about a difference in value based pattern but at this point I'm lost.
All answers will be considered . Thanks for the help!
used reduce to get the difference than the average.
//patterns to compare
var sequence1 = [0.53,0.54,0.49,0.40,0.50,0.52,0.58,0.60]
var sequence2 = [0.53,0.55,0.50,0.42,0.50,0.53,0.57,0.62]
function diff(sequence){
var soureSequence = [0.53,0.55,0.50,0.40,0.50,0.52,0.58,0.60]
var delta = soureSequence.reduce(function (r, a, i, aa) {
i && r.push(a - sequence[i]);
return r;
}, []),
average = delta.reduce(function (a, b) { return a + b; }) / delta.length;
return {delta:delta, average:average}
}
console.log('sequence1',diff(sequence1));
console.log('sequence2',diff(sequence2));
In my experience, the similarity of two vectors (arrays) is measured using the dot product ex. Like it says in that link, you multiply each corresponding elements of the arrays, add those up, then divide by the magnitude of each array (square root of the sum of the squares of each component). Rosetta Code has an example of the dot product in JavaScript, copied here
// dotProduct :: [Int] -> [Int] -> Int
const dotProduct = (xs, ys) => {
const sum = xs => xs ? xs.reduce((a, b) => a + b, 0) : undefined;
return xs.length === ys.length ? (
sum(zipWith((a, b) => a * b, xs, ys))
) : undefined;
}
// zipWith :: (a -> b -> c) -> [a] -> [b] -> [c]
const zipWith = (f, xs, ys) => {
const ny = ys.length;
return (xs.length <= ny ? xs : xs.slice(0, ny))
.map((x, i) => f(x, ys[i]));
}
So, you would call
const score1 = dotProduct(sourceSequence, sequence1);
const score2 = dotProduct(sourceSequence, sequence2);
And whichever is bigger is the closer sequence to sourceSequence.
I'm not sure you need machine learning for this. You have a source pattern and you have some inputs and you basically want to perform a diff of the patterns.
Machine learning could be used to find the patterns, assuming you have some heuristic for measuring the error (if you're using unsupervised learning techniques) or you have sample sets to train the network.
But if you are simply wanting to measure the differences between one pattern and another pattern then just perform a diff operation. What you'll need to do is decide what differences your measuring and how to normalize the result.
I can't tell how exactly you would like to measure the similarity. I go by calculating the difference of corresponding items and accumulating these differences to see how much deviation it would result from the sum of the source array. You can play with the calculation the way you like.
function check([x,...xs],[y,...ys], state = {sumSource: 0, sumDiff: 0}){
state.sumSource += x;
state.sumDiff += Math.abs(x-y);
return xs.length ? check(xs,ys,state) : (100 - 100 * state.sumDiff / state.sumSource).toFixed(4) + "% similarity";
}
var soureSequence = [0.53,0.55,0.50,0.40,0.50,0.52,0.58,0.60],
sequence1 = [0.53,0.54,0.49,0.40,0.50,0.52,0.58,0.60],
sequence2 = [0.53,0.55,0.50,0.42,0.50,0.53,0.57,0.62];
console.log(check(soureSequence,sequence1));
console.log(check(soureSequence,sequence2));

Inverting a binary value of a number

I would like first to convert a number to binary, then invert it bitwise.
Like this:
Number is 793 = 1100011001
then convert the binary value into: 0011100110
In JavaScript I can do the following:
var x = 793;
document.write(x.toString(2)); // gives 0011100110
This will give me the binary value of the number.
But how do I invert the binary bitwise?
I tried the ~ operator, but not working probably. The output is: -1100011010
MooGoo's answer is correct.
Here is some information about what is happening.... Lets assume this is a 64 bit integer.
793 = 1100011001
~793 = -794 = 1111111111111111111111111111111111111111111111111111110011100110
0x3ff = 1111111111
(-793 & 0x3ff) = 11100110
So you could do this to solve for all cases with this code:
var x = 793; // input value
var y = x.toString(2);
var yl = y.length;
var mask = (Math.pow(2,yl)-1); // calculate mask
var result = ~x & mask;
document.write(result.toString(2)+"<br/>");
You need to use a bitmask.
(~793 & 0x3ff).toString(2) //11100110
Or with XOR
793 ^ 0x3ff
You want to XOR the binary value with 111111111 - however many 1s as there are digits in the original. So:
var x = 793;
var result = x ^ parseInt((new Array(x.toString(2).length+1)).join("1"),2);
(Code for str_repeat taken from PHP.JS)
Revisiting years later, try:
var x = 793;
var result = parseInt(x.toString(2).replace(/[01]/g,function(n) {return 1-n;}),2);
I believe this will be more efficient... probably. Could be completely wrong. Oh well.
Not the shortest code but more readable. My technique is similar to #Ivo Wetzel:
const bitwiseComplement = (N) => {
let binary = N.toString(2); // 793 is "1100011001" in binary
binary = binary.split('').map(x => {
return (x == 1) ? 0 : 1;
}).join('');
return binary; // with complement "0011100110" in binary
};
console.log(bitwiseComplement(793));
One-liner javascript solution. Regex /[0-1]/g means match a single character present in the list below [0-1].
const bitwiseComplement = (N) => {
return N.toString(2).replace(/[0-1]/g, (v) => (v == 1 ? 0 : 1));
};
console.log(bitwiseComplement(793));
I just do this
Let's say x = -11.3 and it's a 16 bit integer from somewhere.
My results will go into variable r.
var r = ((x & 0x7FFF) ^ 0x7FFF)+1;
It's the kiss principle.
Update
It's unclear to me whether you want a string of the inverted value, if so you can do this:
function invert(x){
var e = x.toString(2).split('');
for(var i = 0, l = e.length; i < l; i++) {
e[i] = e[i] === '0' ? '1' : (e[i] === '1' ? '0' : e[i]);
}
return e.join('');
}
invert(793); // '0011100110'
invert(-793); // '-0011100110'
This will also preserve leading zeros.

Categories

Resources