Javascript - turning on bits - javascript

I have some understanding about bits and bytes, shifting concept and so - but no actual experience with it.
So:
I need to turn an array of true and false into a buffer, made of 1344 bits (which i send using UDP packets).
The other side will evaluate the buffer bit by bit..
Since i'm new to nodeJs, feel free to add tips or point me to new directions.
var arrBinary = new Array(1344);
for(i=0;i<1344;i++)arrBinary[i]=0;
// some code here, which will turn some of the array's elements to 1
var arrForBuffer = new Array(168);
for(i=0;i<168;i++)arrForBuffer[i]=0;
var x = buffer.from(arr);
/****** the question ******/
// How to change and set arrForBuffer so it will represent the arrBinary Bits state?

You can use some bitshifting as you said:
// arrForBuffer must be initialized with 0s
for(let i = 0; i < 1344; i++)
arrForBuffer[ Math.floor(i / 8) ] += arrBinary[i] << (7 - (i % 8));
The first bit for example of arrBinary will be left shifted by 7 and added to the first byte, the second will be shifted left by 6, and so on. The 8th will be shifted left by 7 again, and will be added to the second byte.
It might be more readable (and possibly more performant), if it would be written as:
for(let byte = 0; byte < 168; byte++) {
arrForBuffer[byte] =
arrBinary[byte * 8 + 0] << 7 |
arrBinary[byte * 8 + 1] << 6 |
arrBinary[byte * 8 + 2] << 5 |
arrBinary[byte * 8 + 3] << 4 |
arrBinary[byte * 8 + 4] << 3 |
arrBinary[byte * 8 + 5] << 2 |
arrBinary[byte * 8 + 6] << 1 |
arrBinary[byte * 8 + 7];
}

Javascript supports bits operations like in every major language. You can use the | and << operators to achieve this transformation:
const size = 16;
const packsize = 8;
const arrBinary = new Array(size).fill(false);
arrBinary[2] = true;
arrBinary[6] = true;
arrBinary[8] = true;
let arrForBuffer = new Array(size / packsize);
let acc = 0;
let byteCounter = 0;
for (let i = 0; i < arrBinary.length; i++) {
if (arrBinary[i]) {
acc |= 1 << (i % packsize);
}
if (i % packsize == packsize - 1) {
arrForBuffer[byteCounter] = acc;
byteCounter++;
acc = 0;
}
}
for (let i = 0; i < arrForBuffer.length; i++) {
console.log(`${i}: ${arrForBuffer[i]}`);
}

Related

Slow performance when unpacking bits for a tensor layer in tensorflow

I'm working with data that comes over a WebSocket connection with a starcraft 2 client to obtain image data from a game that is in progress. In some cases, the image data maybe be set with a format of 1 bit per pixel. When this happens I need to "unpack" the bits from each byte in the response (1 byte => 8 bits). This is done in the code below:
function unpackbits(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte
let offset
for (let i = 0; i < uint8data.length; i++) {
byte = uint8data[i]
offset = (8 * i)
results[offset + 7] = ((byte & (1 << 0)) >> 0)
results[offset + 6] = ((byte & (1 << 1)) >> 1)
results[offset + 5] = ((byte & (1 << 2)) >> 2)
results[offset + 4] = ((byte & (1 << 3)) >> 3)
results[offset + 3] = ((byte & (1 << 4)) >> 4)
results[offset + 2] = ((byte & (1 << 5)) >> 5)
results[offset + 1] = ((byte & (1 << 6)) >> 6)
results[offset + 0] = ((byte & (1 << 7)) >> 7)
}
return results
}
This gets fed into a tensor like so:
static unpack_layer(plane) {
//Return a correctly shaped tensor given the feature layer bytes.//
const size = point.Point.build(plane.getSize()) // { x, y }
if (plane.getBitsPerPixel() === 1) {
data = unpackbits(data)
if (data.length !== (size.x * size.y)) {
// This could happen if the correct length isn't a multiple of 8, leading
// to some padding bits at the end of the string which are incorrectly
// interpreted as data.
data = data.slice(0, size.x * size.y)
}
}
data = tf.tensor(data, [size.y, size.x], 'int32')
return data
}
In one of my tests, this code get's run 1900 times and takes 0.0737s to execute.
This is very slow.
For comparison, the equivalent functionality in python takes 0.0209s to run 1900 times. The python code looks like this:
def unpack_layer(plane):
"""Return a correctly shaped numpy array given the feature layer bytes."""
size = point.Point.build(plane.size) # {x, y }
data = np.frombuffer(plane.data, dtype=Feature.dtypes[plane.bits_per_pixel])
if plane.bits_per_pixel == 1:
data = np.unpackbits(data)
if data.shape[0] != size.x * size.y:
# This could happen if the correct length isn't a multiple of 8, leading
# to some padding bits at the end of the string which are incorrectly
# interpreted as data.
data = data[:size.x * size.y]
return data.reshape(size.y, size.x)
In short, it takes the javascript version roughly 4x as long as the python version.
I'll be looking at the numpy unpackbits documentation as that seems to be doing something much more efficient than my own approach -
However, I was wondering if anyone had any thoughts as to how I could better optimize my own unpackbits function or better yet a way to have TensorFlow do that for me?
Not sure if this helps, but am kicking myself as I got hung up on the need for bitwise operators in tensorflow in order to convert a byte stream into a bit stream, per the original question. Simple use of integer division and modulus can do the trick too!
In short, the algorithm by example is thus. Given byte stream of [ 92 ]...
Divide and mod by 16, resulting in 2 bytes, namely [ 5 ] and [ 12 ] respectively.
Interleave these results into a tensor [ 5, 12 ].
Take each of those values, and divide and mod by 4, resulting in [ 1, 3 ] and [ 1, 0 ].
Interleave these results into a tensor [ 1, 1, 3, 0 ].
Divide and mod by 2, resulting in [ 0, 0, 1, 0 ] and [ 1, 1, 1, 0 ].
Interleave into [ 0, 1, 0, 1, 1, 1, 0, 0 ] which is binary for 92.
Below are two versions of the same algorithm. One in tensorflow and one in pure javascript.
function tfDaC( stream ) {
const stream8bit = tf.tensor( stream, undefined, 'int32' );
console.time('in-tf');
const stream4bitHi = tf.div(stream8bit, tf.scalar(16, 'int32' ));
const stream4bitLo = tf.mod(stream8bit, tf.scalar(16, 'int32' ));
const stream4bit = tf.stack([stream4bitHi, stream4bitLo],1).flatten();
const stream2bitHi = tf.div( stream4bit, tf.scalar(4, 'int32' ));
const stream2bitLo = tf.mod(stream4bit, tf.scalar(4, 'int32' ));
const stream2bit = tf.stack([stream2bitHi, stream2bitLo],1).flatten();
const stream1bitHi = tf.div(stream2bit, tf.scalar(2, 'int32' ));
const stream1bitLo = tf.mod(stream2bit, tf.scalar(2, 'int32' ));
const stream1bit = tf.stack([stream1bitHi, stream1bitLo],1).flatten().toBool();
console.timeEnd('in-tf');
return stream1bit.dataSync().buffer;
}
function jsDaC( stream ) {
let result = new ArrayBuffer( stream.byteLength * 8 );
let buffer32 = new Uint32Array( result ); // Pointer to every 4 bytes!
for ( let i = 0; i < stream.byteLength; i++ ) {
let byte = stream[ i ];
buffer32[ (i * 2) |0 ] = ( byte / 16) |0;
buffer32[ (i * 2 + 1) |0 ] = ( byte % 16 ) |0;
}
let buffer16 = new Uint16Array( result ); // Pointer to every 2 bytes!
for ( let i = 0; i < buffer32.length; i++ ) {
let byte = buffer32[ i ];
buffer16[ (i * 2) |0 ] = ( byte / 4) |0;
buffer16[ (i * 2 + 1) |0 ] = ( byte % 4 ) |0;
}
let buffer8 = new Uint8Array( result ); // Pointer to every 4 bytes!
for ( let i = 0; i < buffer16.length; i++ ) {
let byte = buffer16[ i ];
buffer8[ (i * 2) |0 ] = ( byte / 2 ) |0;
buffer8[ (i * 2 + 1) |0 ] = ( byte % 2 ) |0;
}
return result;
}
console.log( 'Generating array of 1M bytes' );
let buffer = new ArrayBuffer( 1000000 );
let testArray = new Uint8Array( buffer );
for ( let i = 0; i < testArray.length; i++ ) {
testArray[ i ] = Math.floor( 256 * Math.random() );
}
let result;
console.log( 'Begin tensorflow divide & conquer test with 1M bytes.' );
console.time( 'tf' );
result = tfDaC( testArray );
console.timeEnd( 'tf' );
console.log( `End tensorflow test with 1M bytes resulting in array of ${result.byteLength} bytes` );
console.log( 'Begin javascript divide & conquer test with 1M bytes.' );
console.time( 'js' );
result = jsDaC( testArray );
console.timeEnd( 'js' );
console.log( `End javascript test with 1M bytes resulting in array of ${result.byteLength} bytes` );
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs#2.0.1/dist/tf.min.js"></script>
The tensorflow performance was terrible on my workstation. I had to reduce the byte stream down to 1M bytes as my GPU was throwing memory errors at my previous test levels of a 10M byte stream. And even then at only 1M bytes, a handful of tests ranged from 1236ms to 1414ms. Not sure why it was so slow. Could possibly be the coercion of the numbers to int32 which might be adding a lot of overhead, as my understanding is that GPU's are generally built primarily for floating point operations. And marshalling the data onto and off of the GPU consumes some time too. Maybe it's worthwhile to try to convert this function to a floating point only function rather than int32...?! Maybe a grabbed a poor version of tensorflow.js...?! Be interested to hear how it runs in your NodeJS configuration...
On the other hand, the javascript version for 1M bytes ranged from 30ms to 42ms, almost 2 orders of magnitude(!) faster than the GPU. But still, when extrapolating these results to 10M bytes, this algorithm is still slower than all the other previous algorithms...
So not sure if this helps. It might simply help eliminate tensorflow as an option, although it might still be worthwhile trying floats rather than int32, but am not very hopeful...
It looks like tensorflow.js does not have a bitwise AND function, so suspect doing the work within tensorflow.js will require some coding gymnastics...
One suggestion, though, is to create an array of 256 Uint8Array's of size 8, and pre-populate it with the complete list of 8 byte translations. This greatly reduces the repeated calculations for a byte stream that will likely have repeated values in the range of 0 - 255. Eg, the first entry in the precomputed array represents the unpacking of byte 0, and therefore is a Uint8Array of size 8 populated with 0's, the next entry is another Uint8Array of size 8 populated with 00000001, etc all the way to the entry representing byte 255 with is a Uint8Array of size 8 populated with all 1's.
Then, when unpacking, simply make use of the typed array .set method to copy the precomputed unpacked representation into the results Uint8Array...
Hope this helps.
EDIT Created a number of variants of the unpacking algorithm to test the performance of inline calculations vs memory lookup and was surprised at the results using Chrome. Some of the optimizations of the V8 compiler are non-intuitive...
The differences in the versions...
unpackbits [FAST]: From the original question and this is the bar by which the others variations are compared.
unpackbits1 [FAST]: Modified by...
Specifying "|0" after every integer.
Using the increment unary op ( "++" ) rather adding increments to the offset index of the result array.
Replacing the calculation of bit masks with the actual value. (Ie, rather than 1 << 5, the function used 32.)
unpackbits1a [FAST]: The same as unpackbits1, except...
Kept the calculation of bit masks rather than integer values. (Ie, using 1 << 5 rather than 32, as in the original question.) Counter intuitively, this produces a bit faster result!
unpackbits1b [SLOWER]: The same as unpackbits1a, except...
The offset is not recomputed every time inside the loop. Ie, offset = 0|0 is initially set, and then thereafter offset is only incremented within the loop. So, offset = ( (8|0) * i ) is no longer calculated for every byte. Counter intuitively, this produces a slower result!
unpackbits2 [SLOWEST]: This is the memory lookup option that I recommended above. Counter intuitively, this implies that typed array memory operations are much slower than calculating the results as in unpackbits!
unpackbits3 [SLOWER]: This is the memory lookup option that I recommended above, with the following change.
Rather than used the the typed array .set method, this version set the eight bytes one-by-one. Counter intuitively, this implies that the typed array .set method is slower (at least for eight bytes) than individually setting the values!
unpackbits4 [SLOWER]: This variation of the algorithm was on par with the original, and was a variation of the memory lookup option. But, rather than 256 individual Uint8Array's, this combined all the pre-calculated results into a single Uint8Array of length 256 * 8. And it did not make use of the typed array .set method.
unpackbits5 [SLOWER]: Same as unpackbits4, except...
Rather than using the unary "++" on the index into the lookup table, it calculated the index for each of the 8 bytes being copied. As expected, calculating the index every time was slower than using the unary "++" operator.
Here are the tests. BEWARE that this builds an initial array of 10M random bytes, and then runs each unpack algorithm on this same data. On my workstation, the test runs in less than 5 seconds.
var lookupTable = initializeLookupTable();
function initializeLookupTable() {
let lookup = new Array( 256 );
let v = new Uint8Array( 1 );
for ( let i = 0; i < 256; i++ ) {
v[ 0 ] = i;
lookup[ i ] = unpackbits( v );
}
return lookup;
}
var lookupTable4 = initializeLookupTable4();
function initializeLookupTable4() {
let lookup = new Uint8Array( 256 * 8 );
let v = new Uint8Array( 1 );
for ( let i = 0; i < 256; i++ ) {
v[ 0 ] = i;
let temp = unpackbits( v );
lookup.set( temp, i * 8 );
}
return lookup;
}
function unpackbits(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte
let offset
for (let i = 0; i < uint8data.length; i++) {
byte = uint8data[i]
offset = (8 * i);
results[offset + 7] = ((byte & (1 << 0)) >> 0)
results[offset + 6] = ((byte & (1 << 1)) >> 1)
results[offset + 5] = ((byte & (1 << 2)) >> 2)
results[offset + 4] = ((byte & (1 << 3)) >> 3)
results[offset + 3] = ((byte & (1 << 4)) >> 4)
results[offset + 2] = ((byte & (1 << 5)) >> 5)
results[offset + 1] = ((byte & (1 << 6)) >> 6)
results[offset + 0] = ((byte & (1 << 7)) >> 7)
}
return results
}
function unpackbits1(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte;
let offset;
for (let i = 0|0, n = uint8data.length; i < n; i++) {
byte = uint8data[i]|0
offset = (8 * i)|0;
results[offset++] = ((byte & 128)>>7)|0;
results[offset++] = ((byte & 64)>>6)|0;
results[offset++] = ((byte & 32)>>5)|0;
results[offset++] = ((byte & 16)>>4)|0;
results[offset++] = ((byte & 8)>>3)|0;
results[offset++] = ((byte & 4)>>2)|0;
results[offset++] = ((byte & 2)>>1)|0;
results[offset++] = ((byte & 1)>>0)|0;
}
return results
}
function unpackbits1a(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte;
let offset;
for (let i = 0|0, n = uint8data.length; i < n; i++) {
byte = uint8data[i]|0;
offset = (8 * i)|0;
results[offset++] = ((byte & (1 << 7))>>7)|0;
results[offset++] = ((byte & (1 << 6))>>6)|0;
results[offset++] = ((byte & (1 << 5))>>5)|0;
results[offset++] = ((byte & (1 << 4))>>4)|0;
results[offset++] = ((byte & (1 << 3))>>3)|0;
results[offset++] = ((byte & (1 << 2))>>2)|0;
results[offset++] = ((byte & (1 << 1))>>1)|0;
results[offset++] = (byte & 1)|0;
}
return results
}
function unpackbits1b(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte;
let offset = 0|0;
for (let i = 0|0, n = uint8data.length; i < n; i++) {
byte = uint8data[i]|0;
results[offset++] = ((byte & (1 << 7))>>7)|0;
results[offset++] = ((byte & (1 << 6))>>6)|0;
results[offset++] = ((byte & (1 << 5))>>5)|0;
results[offset++] = ((byte & (1 << 4))>>4)|0;
results[offset++] = ((byte & (1 << 3))>>3)|0;
results[offset++] = ((byte & (1 << 2))>>2)|0;
results[offset++] = ((byte & (1 << 1))>>1)|0;
results[offset++] = (byte & 1)|0;
}
return results
}
function unpackbits2( uint8data ) {
const result = new Uint8Array( 8 * uint8data.length );
for ( let i = 0|0, ri = 0|0, n = uint8data.length; i < n; i++, ri += 8 ) {
result.set( lookupTable[ uint8data[ i ] ], ri );
}
return result;
}
function unpackbits3( uint8data ) {
const result = new Uint8Array( 8 * uint8data.length );
let ri = 0|0;
for ( let i = 0|0, n = uint8data.length; i < n; i++ ) {
//result.set( lookupTable[ uint8data[ i ] ], ri );
let lv = lookupTable[ uint8data[ i ] ];
result[ ri++ ] = lv [ 0|0 ];
result[ ri++ ] = lv [ 1|0 ];
result[ ri++ ] = lv [ 2|0 ];
result[ ri++ ] = lv [ 3|0 ];
result[ ri++ ] = lv [ 4|0 ];
result[ ri++ ] = lv [ 5|0 ];
result[ ri++ ] = lv [ 6|0 ];
result[ ri++ ] = lv [ 7|0 ];
}
return result;
}
function unpackbits4( uint8data ) {
const result = new Uint8Array( 8 * uint8data.length );
let ri = 0|0;
for ( let i = 0|0, n = uint8data.length; i < n; i++ ) {
let li = (uint8data[ i ] * 8)|0;
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
}
return result;
}
function unpackbits5( uint8data ) {
const result = new Uint8Array( 8 * uint8data.length );
let ri = 0|0;
for ( let i = 0|0, n = uint8data.length; i < n; i++ ) {
let li = (uint8data[ i ] * 8)|0;
result[ ri++ ] = lookupTable4[ li ];
result[ ri++ ] = lookupTable4[ li+1 ];
result[ ri++ ] = lookupTable4[ li+2 ];
result[ ri++ ] = lookupTable4[ li+3 ];
result[ ri++ ] = lookupTable4[ li+4 ];
result[ ri++ ] = lookupTable4[ li+5 ];
result[ ri++ ] = lookupTable4[ li+6 ];
result[ ri++ ] = lookupTable4[ li+7 ];
}
return result;
}
// Test
console.log( 'Building array of 10,000,000 test values.' );
let buffer = new ArrayBuffer( 10000000 );
let testArray = new Uint8Array( buffer );
for ( let i = 0; i < testArray.length; i++ ) {
testArray[ i ] = Math.floor( 256 * Math.random() );
}
console.log( 'Finished building test values.' );
console.log( 'Starting unpackbits.' );
console.time('u');
let u = unpackbits( testArray );
console.timeEnd('u');
console.log( 'Finished unpackbits.' );
console.log( 'Starting unpackbits1.' );
console.time('u1');
u = unpackbits1( testArray );
console.timeEnd('u1');
console.log( 'Finished unpackbits1.' );
console.log( 'Starting unpackbits1a.' );
console.time('u1a');
u = unpackbits1a( testArray );
console.timeEnd('u1a');
console.log( 'Finished unpackbits1a.' );
console.log( 'Starting unpackbits1b.' );
console.time('u1b');
u = unpackbits1b(testArray );
console.timeEnd('u1b');
console.log( 'Finished unpackbits1b.' );
console.log( 'Starting unpackbits2.' );
console.time('u2');
u = unpackbits2( testArray );
console.timeEnd('u2');
console.log( 'Finished unpackbits2.' );
console.log( 'Starting unpackbits3.' );
console.time('u3');
u = unpackbits3( testArray );
console.timeEnd('u3');
console.log( 'Finished unpackbits3.' );
console.log( 'Starting unpackbits4.' );
console.time('u4');
u = unpackbits4( testArray );
console.timeEnd('u4');
console.log( 'Finished unpackbits4.' );
console.log( 'Starting unpackbits5.' );
console.time('u5');
u = unpackbits5( testArray );
console.timeEnd('u5');
console.log( 'Finished unpackbits5.' );
This response is a continuation of the comment chain under #Jon Trent's answer.
EDIT: Include TensorFlow comparison for the reshaping portion.
I'm profiling the performance of two unpacking bits methods; unpackbits1a, and unpackbits (original). I am also profiling the different methods for reshaping the data to a NxM grid, where N is probably the same as M. Here's what I got:
function unpackbits1a(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte;
let offset;
for (let i = 0|0, n = uint8data.length; i < n; i++) {
byte = uint8data[i]
offset = ((8|0) * i); // The "|0" on this line cut's the time almost in half!
results[offset++] = (byte & ((1|0) << (7|0)))>>7|0;
results[offset++] = (byte & ((1|0) << (6|0)))>>6|0;
results[offset++] = (byte & ((1|0) << (5|0)))>>5|0;
results[offset++] = (byte & ((1|0) << (4|0)))>>4|0;
results[offset++] = (byte & ((1|0) << (3|0)))>>3|0;
results[offset++] = (byte & ((1|0) << (2|0)))>>2|0;
results[offset++] = (byte & ((1|0) << (1|0)))>>1|0;
results[offset++] = (byte & (1|0));
}
return results
}
function unpackbits(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte
let offset
for (let i = 0; i < uint8data.length; i++) {
byte = uint8data[i]
offset = 8 * i
results[offset + 7] = ((byte & (1 << 0)) >> 0)
results[offset + 6] = ((byte & (1 << 1)) >> 1)
results[offset + 5] = ((byte & (1 << 2)) >> 2)
results[offset + 4] = ((byte & (1 << 3)) >> 3)
results[offset + 3] = ((byte & (1 << 4)) >> 4)
results[offset + 2] = ((byte & (1 << 5)) >> 5)
results[offset + 1] = ((byte & (1 << 6)) >> 6)
results[offset + 0] = ((byte & (1 << 7)) >> 7)
}
return results
}
function unpackbitsToShape1(uint8data, shape = [1, 1]) {
var data = unpackbits(uint8data)
const dims = [shape[0] | 0, shape[1] | 0]
const result = new Array(dims[0])
let temp
const width = 0 | dims[1]
for (let i = 0 | 0; i < dims[0]; i++) {
temp = new Array(dims[1])
for (let j = 0| 0; j < dims[1]; j++) {
temp[j] = data[uint8data[i * width + j]]
}
result[i] = temp
}
return result
}
function unpackbitsToShape2(uint8data, shape = [1, 1]) {
var data = unpackbits(uint8data)
const dims = [shape[0] | 0, shape[1] | 0]
const result = new Array(dims[0])
const width = dims[1]
let offset
for (let i = 0 | 0; i < dims[0]; i++) {
offset = (width * i)
result[i] = data.slice(offset, offset + width)
}
return result
}
function unpackbitsToShape3(uint8data, shape = [1, 1]) {
const dims = [0 | shape[0], 0 | shape[1]]
const result = new Array(dims[0])
let position = 0 | 0
const smallCount = 0 | (uint8data.length % dims[0])
const bigCount = 0 | (uint8data.length - smallCount)
const bigByteChunk = 0 | (bigCount / dims[0])
const bigBitWidth = 0 | 8 * bigByteChunk
const smallByteChunk = 0 | (smallCount / dims[0])
const smallBitWidth = 0 | 8 * smallByteChunk
if (smallCount) {
let big
let small
let odd
let temp
for (let i = 0 | 0; i < dims[0]; i++) {
temp = new Uint8Array(dims[1])
odd = i % 2
big = unpackbits(uint8data.subarray(position, position + bigByteChunk))
position += bigByteChunk
if (odd) {
temp.set(small.subarray(smallBitWidth, 8), 0)
temp.set(big, smallBitWidth)
result[i] = temp
} else {
temp.set(big, 0)
small = unpackbits(uint8data.subarray(position, position + 1))
position++
temp.set(small.subarray(0, smallBitWidth), bigBitWidth)
result[i] = temp
}
}
return result
}
for (let i = 0 | 0; i < dims[0]; i++) {
// console.log('unpacking: ', uint8data.subarray(position, position + bigByteChunk))
result[i] = unpackbits(uint8data.subarray(position, position + bigByteChunk))
position += bigByteChunk
}
return result
}
var tf = require('#tensorflow/tfjs')
tf = require('#tensorflow/tfjs-node')
function unpackBitsToShapeTensorflow(uint8data, shape) {
return tf.tensor(unpackbits(uint8data), shape, 'int32')
}
var test64by64 = new Uint8Array(512)
for (let i = 0; i < test64by64.length; i++) {
test64by64[ i ] = Math.floor(256 * Math.random());
}
var test84by84 = new Uint8Array(882)
for (let i = 0; i < test84by84.length; i++) {
test84by84[ i ] = Math.floor(256 * Math.random());
}
var test100by100 = new Uint8Array(1250)
for (let i = 0; i < test100by100.length; i++) {
test100by100[ i ] = Math.floor(256 * Math.random());
}
function assert(condition, errMsg) {
if (!condition) {
console.error(errMsg)
}
}
console.log('********* 64 x 64 *********\n\n')
console.log('Starting unpackbits1a.');
console.time('u1a');
var foo = unpackbits1a(test64by64);
console.timeEnd('u1a');
console.log('Finished unpackbits1a.');
console.log('Starting "unpackbits"');
console.time('u-orig');
foo = unpackbits(test64by64);
console.timeEnd('u-orig');
console.log('Finished unpackbits.');
console.log('Starting "unpackbitsToShape1"');
console.time('u1');
foo = unpackbitsToShape1(test64by64, [64, 64])
console.timeEnd('u1');
assert(
foo.length === 64 && foo[0].length === 64,
'foo.length === 64 && foo[0].length === 64'
)
console.log('Finished unpackbitsToShape1.');
console.log('Starting "unpackbitsToShape2"');
console.time('u2');
foo = unpackbitsToShape2(test64by64, [64, 64])
console.timeEnd('u2');
assert(
foo.length === 64 && foo[0].length === 64,
'foo.length === 64 && foo[0].length === 64'
)
console.log('Finished unpackbitsToShape2.');
console.log('Starting "unpackbitsToShape3"');
console.time('u3');
foo = unpackbitsToShape3(test64by64, [64, 64])
console.timeEnd('u3');
assert(
foo.length === 64 && foo[0].length === 64,
'foo.length === 64 && foo[0].length === 64'
)
console.log('Finished unpackbitsToShape3.');
console.log('\nStarting "unpackBitsToShapeTensorflow"')
console.time('u-tensor')
foo = unpackBitsToShapeTensorflow(test64by64, [64, 64])
console.timeEnd('u-tensor')
console.log('Finished unpackBitsToShapeTensorflow.');
console.log('\n\n********* 84 x 84 *********\n\n')
console.log('Starting unpackbits1a.');
console.time('u1a');
foo = unpackbits1a(test84by84);
console.timeEnd('u1a');
console.log('Finished unpackbits1a.');
console.log('Starting "unpackbits"');
console.time('u-orig');
foo = unpackbits(test84by84);
console.timeEnd('u-orig');
console.log('Finished unpackbits.');
console.log('Starting "unpackbitsToShape1"');
console.time('u1');
foo = unpackbitsToShape1(test84by84, [84, 84])
console.timeEnd('u1');
assert(
foo.length === 84 && foo[0].length === 84,
'foo.length === 84 && foo[0].length === 84'
)
console.log('Finished unpackbitsToShape1.');
console.log('Starting "unpackbitsToShape2"');
console.time('u2');
foo = unpackbitsToShape2(test84by84, [84, 84])
console.timeEnd('u2');
assert(
foo.length === 84 && foo[0].length === 84,
'foo.length === 84 && foo[0].length === 84'
)
console.log('Finished unpackbitsToShape2.');
console.log('Starting "unpackbitsToShape3"');
console.time('u3');
foo = unpackbitsToShape3(test84by84, [84, 84])
console.timeEnd('u3');
assert(
foo.length === 84 && foo[0].length === 84,
'foo.length === 84 && foo[0].length === 84'
)
console.log('Finished unpackbitsToShape3.');
console.log('\nStarting "unpackBitsToShapeTensorflow"')
console.time('u-tensor')
foo = unpackBitsToShapeTensorflow(test84by84, [84, 84])
console.timeEnd('u-tensor')
console.log('Finished unpackBitsToShapeTensorflow.');
console.log('\n\n********* 100 x 100 *********\n\n')
console.log('Starting unpackbits1a.');
console.time('u1a');
foo = unpackbits1a(test100by100);
console.timeEnd('u1a');
console.log('Finished unpackbits1a.');
console.log('Starting "unpackbits"');
console.time('u-orig');
foo = unpackbits(test100by100);
console.timeEnd('u-orig');
console.log('Finished unpackbits.');
console.log('Starting "unpackbitsToShape1"');
console.time('u1');
foo = unpackbitsToShape1(test100by100, [100, 100])
console.timeEnd('u1');
assert(
foo.length === 100 && foo[0].length === 100,
'foo.length === 100 && foo[0].length === 100'
)
console.log('Finished unpackbitsToShape1.');
console.log('Starting "unpackbitsToShape2"');
console.time('u2');
foo = unpackbitsToShape2(test100by100, [100, 100])
console.timeEnd('u2');
assert(
foo.length === 100 && foo[0].length === 100,
'foo.length === 100 && foo[0].length === 100'
)
console.log('Finished unpackbitsToShape2.');
console.log('Starting "unpackbitsToShape3"');
console.time('u3');
foo = unpackbitsToShape3(test100by100, [100, 100])
console.timeEnd('u3');
assert(
foo.length === 100 && foo[0].length === 100,
'foo.length === 100 && foo[0].length === 100'
)
console.log('Finished unpackbitsToShape3.');
console.log('\nStarting "unpackBitsToShapeTensorflow"')
console.time('u-tensor')
foo = unpackBitsToShapeTensorflow(test100by100, [100, 100])
console.timeEnd('u-tensor')
console.log('Finished unpackBitsToShapeTensorflow.');
I don't know how different the browser's execution environment is than node, but results seem more stable in node. Here's what I get:
********* 64 x 64 *********
Starting unpackbits1a.
u1a: 0.513ms
Finished unpackbits1a.
Starting "unpackbits"
u-orig: 0.189ms
Finished unpackbits.
Starting "unpackbitsToShape1"
u1: 0.434ms
Finished unpackbitsToShape1.
Starting "unpackbitsToShape2"
u2: 0.365ms
Finished unpackbitsToShape2.
Starting "unpackbitsToShape3"
u3: 0.590ms
Finished unpackbitsToShape3.
Starting "unpackBitsToShapeTensorflow"
u-tensor: 0.508ms
Finished unpackBitsToShapeTensorflow.
********* 84 x 84 *********
Starting unpackbits1a.
u1a: 0.222ms
Finished unpackbits1a.
Starting "unpackbits"
u-orig: 0.425ms
Finished unpackbits.
Starting "unpackbitsToShape1"
u1: 0.622ms
Finished unpackbitsToShape1.
Starting "unpackbitsToShape2"
u2: 0.303ms
Finished unpackbitsToShape2.
Starting "unpackbitsToShape3"
u3: 0.388ms
Finished unpackbitsToShape3.
Starting "unpackBitsToShapeTensorflow"
u-tensor: 0.175ms
Finished unpackBitsToShapeTensorflow.
********* 100 x 100 *********
Starting unpackbits1a.
u1a: 1.502ms
Finished unpackbits1a.
Starting "unpackbits"
u-orig: 0.018ms
Finished unpackbits.
Starting "unpackbitsToShape1"
u1: 1.631ms
Finished unpackbitsToShape1.
Starting "unpackbitsToShape2"
u2: 0.072ms
Finished unpackbitsToShape2.
Starting "unpackbitsToShape3"
u3: 0.159ms
Finished unpackbitsToShape3.
Starting "unpackBitsToShapeTensorflow"
u-tensor: 0.052ms
Finished unpackBitsToShapeTensorflow.

Javascript get single bit

I have an integer and i want to check if a single bit is 0 or 1.
What is the best practise for doing that?
An example of what i'm doing at this moment:
const myInt = 8; // Binary in 32 Bit integer = 00000000000000000000000000001000
const myBit = myInt << 28 >>> 31; // 00000000000000000000000000000001
if (myBit === 1) {
//do something
}
But i think that this isn't the best methode for doing this.
Have you any better idea?
EDIT:
It is always the same bit i want to check, but the integer is different
myInt = 8+4; // 1100
n = 3;
(myInt >> n) & 0x1; // 1
n = 2;
(myInt >> n) & 0x1; // 1
n = 1;
(myInt >> n) & 0x1; // 0
n = 0;
(myInt >> n) & 0x1; // 0
general solution shifts your number by N bits to right, and applies bitmask, that leaves only last bit, all other are set to 0
I think you can use the bitwise AND
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Bitwise_Operators
my32Bit = 123414123;
twoBy7 = 128;
//check the 7th bit
if (my32Bit & twoBy7) {
// should return 1 if the 7thbit is 1
}
You could take the bit and a left shift << with bitwise AND & operator.
var value = 10,
bit;
for (bit = 0; bit < 4; bit++) {
console.log(bit, !!(value & (1 << bit)));
}

Can't get BBP formula to work in nodejs

I've been trying to make a little program that can compute the n-th digit of pi.
After a few searches I've found that the most common formula is the BBP formula, wich is n-th digit = 16^-n[4/(8n + 1)-2/(8n + 4)-1/(8n + 5)-1/(8n + 6)].
The output is in base 16.
My code is the following:
function run(n) {
return Math.pow(16, -n) * (4 / (8 * n + 1) - 2 / (8 * n + 4) - 1 / (8 * n + 5) - 1 / (8 * n + 6));
}
function convertFromBaseToBase(str, fromBase, toBase) {
var num = parseInt(str, fromBase);
return num.toString(toBase);
}
for (var i = 0; i < 10; i++) {
var a = run(i);
console.log(convertFromBaseToBase(a, 16, 10));
}
So far, my output is the following:
1:3
2:0
3:0
4:0
5:1
6:7
7:3
8:1
9:7
10:3
Obviously, these are not the 10 first digits of PI.
My understanding is that values get rounded too often and that causes huge innacuracy in the final result.
However, I could be wrong, that's why I'm here to ask if I did anything wrong or if it's nodejs's fault. So I would loove if one of you guys have the answer to my problem!
Thanks!!
Unfortunately, 4/(8n + 1) - 2/(8n + 4) - 1/(8n + 5) - 1/(8n + 6) does not directly return the Nth hexadecimal digit of pi. I don't blame you, I made the same assumption at first. Although all the terms do indeed sum to pi, each individual term does not represent an individual hexadecimal digit. As seen here, the algorithm must be rewritten slightly in order to function correctly as a "digit spigot". Here is what your new run implementation ought to look like:
/**
Bailey-Borwein-Plouffe digit-extraction algorithm for pi
<https://en.wikipedia.org/wiki/Bailey%E2%80%93Borwein%E2%80%93Plouffe_formula#BBP_digit-extraction_algorithm_for_.CF.80>
*/
function run(n) {
var partial = function(d, c) {
var sum = 0;
// Left sum
var k;
for (k = 0; k <= d - 1; k++) {
sum += (Math.pow(16, d - 1 - k) % (8 * k + c)) / (8 * k + c);
}
// Right sum. This converges fast...
var prev = undefined;
for(k = d; sum !== prev; k++) {
prev = sum;
sum += Math.pow(16, d - 1 - k) / (8 * k + c);
}
return sum;
};
/**
JavaScript's modulus operator gives the wrong
result for negative numbers. E.g. `-2.9 % 1`
returns -0.9, the correct result is 0.1.
*/
var mod1 = function(x) {
return x < 0 ? 1 - (-x % 1) : x % 1;
};
var s = 0;
s += 4 * partial(n, 1);
s += -2 * partial(n, 4);
s += -1 * partial(n, 5);
s += -1 * partial(n, 6);
s = mod1(s);
return Math.floor(s * 16);
}
// Pi in hex is 3.243f6a8885a308d313198a2e037073...
console.log(run(0) === 3); // 0th hexadecimal digit of pi is the leading 3
console.log(run(1) === 2);
console.log(run(2) === 4);
console.log(run(3) === 3);
console.log(run(4) === 15); // i.e. "F"
Additionally, your convertFromBaseToBase function is more complicated than it needs to be. You have written it to accept a string in a specific base, but it is already being passed a number (which has no specific base). All you should really need is:
for (var i = 0; i < 10; i++) {
var a = run(i);
console.log(a.toString(16));
}
Output:
3
2
4
3
f
6
a
8
8
8
I have tested this code for the first 30 hexadecimal digits of pi, but it might start to return inaccurate results once Math.pow(16, d - 1 - k) grows beyond Number.MAX_SAFE_INTEGER, or maybe earlier for other reasons. At that point you may need to implement the modular exponentiation technique suggested in the Wikipedia article.

Guidance to understand Base64 encoding algorithm

I found this algorithm on the net but I'm having a bit of trouble understanding exactly how it works. It encodes an Uint8Array to Base64. I would like to understand especially the sections under the comments "Combine the three bytes into a single integer" and "Use bitmasks to extract 6-bit segments from the triplet". I understood the concept of bit shifting used there, but can't understand what's its purpose in those two sections.
function base64ArrayBuffer(bytes) {
var base64 = ''
var encodings = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
var byteLength = bytes.byteLength
var byteRemainder = byteLength % 3
var mainLength = byteLength - byteRemainder
var a, b, c, d
var chunk
// Main loop deals with bytes in chunks of 3
for (var i = 0; i < mainLength; i = i + 3) {
// Combine the three bytes into a single integer
chunk = (bytes[i] << 16) | (bytes[i + 1] << 8) | bytes[i + 2]
// Use bitmasks to extract 6-bit segments from the triplet
a = (chunk & 16515072) >> 18 // 16515072 = (2^6 - 1) << 18
b = (chunk & 258048) >> 12 // 258048 = (2^6 - 1) << 12
c = (chunk & 4032) >> 6 // 4032 = (2^6 - 1) << 6
d = chunk & 63 // 63 = 2^6 - 1
// Convert the raw binary segments to the appropriate ASCII encoding
base64 += encodings[a] + encodings[b] + encodings[c] + encodings[d]
}
// Deal with the remaining bytes and padding
if (byteRemainder == 1) {
chunk = bytes[mainLength]
a = (chunk & 252) >> 2 // 252 = (2^6 - 1) << 2
// Set the 4 least significant bits to zero
b = (chunk & 3) << 4 // 3 = 2^2 - 1
base64 += encodings[a] + encodings[b] + '=='
} else if (byteRemainder == 2) {
chunk = (bytes[mainLength] << 8) | bytes[mainLength + 1]
a = (chunk & 64512) >> 10 // 64512 = (2^6 - 1) << 10
b = (chunk & 1008) >> 4 // 1008 = (2^6 - 1) << 4
// Set the 2 least significant bits to zero
c = (chunk & 15) << 2 // 15 = 2^4 - 1
base64 += encodings[a] + encodings[b] + encodings[c] + '='
}
return base64
}
The first step takes each group of 3 bytes in the input and combines them into a 24-bit number. If we call them x = bytes[i], y = bytes[i+1], and z = bytes[i+2], it uses bit-shifting and bit-OR to create a 24-bit integer whose bits are:
xxxxxxxxyyyyyyyyzzzzzzzz
Then it extracts these bits in groups of 6 to get 4 numbers. The bits of a, b, c, and d correspond this way:
xxxxxxxxyyyyyyyyzzzzzzzz
aaaaaabbbbbbccccccdddddd
Then for each of these 6-bit numbers, it indexes the encodings string to get a corresponding character, and concatenates them into the base64 result string.
At the end there are some special cases to deal with the last 1 or 2 bytes in the input if it wasn't a multiple of 3 bytes long.

Implementation of Luhn algorithm

I am trying to implement simple validation of credit card numbers. I read about the Luhn algorithm on Wikipedia:
Counting from the check digit, which is the rightmost, and moving
left, double the value of every second digit.
Sum the digits of the products (e.g., 10: 1 + 0 = 1, 14: 1 + 4 = 5)
together with the undoubled digits from the original number.
If the total modulo 10 is equal to 0 (if the total ends in zero)
then the number is valid according to the Luhn formula; else it is
not valid.
On Wikipedia, the description of the Luhn algorithm is very easily understood. However, I have also seen other implementations of the Luhn algorithm on Rosetta Code and elsewhere (archived).
Those implementations work very well, but I am confused about why they can use an array to do the work. The array they use seems to have no relation with Luhn algorithm, and I can't see how they achieve the steps described on Wikipedia.
Why are they using arrays? What is the significance of them, and how are they used to implement the algorithm as described by Wikipedia?
Unfortunately none of the codes above worked for me. But I found on GitHub a working solution
// takes the form field value and returns true on valid number
function valid_credit_card(value) {
// accept only digits, dashes or spaces
if (/[^0-9-\s]+/.test(value)) return false;
// The Luhn Algorithm. It's so pretty.
var nCheck = 0, nDigit = 0, bEven = false;
value = value.replace(/\D/g, "");
for (var n = value.length - 1; n >= 0; n--) {
var cDigit = value.charAt(n),
nDigit = parseInt(cDigit, 10);
if (bEven) {
if ((nDigit *= 2) > 9) nDigit -= 9;
}
nCheck += nDigit;
bEven = !bEven;
}
return (nCheck % 10) == 0;
}
the array [0,1,2,3,4,-4,-3,-2,-1,0] is used as a look up array for finding the difference between a number in 0-9 and the digit sum of 2 times its value. For example, for number 8, the difference between 8 and (2*8) = 16 -> 1+6 = 7 is 7-8 = -1.
Here is graphical presentation, where {n} stand for sum of digit of n
[{0*2}-0, {1*2}-1, {2*2}-2, {3*2}-3, {4*2}-4, {5*2}-5, {6*2}-6, {7*2}-7....]
| | | | | | | |
[ 0 , 1 , 2 , 3 , 4 , -4 , -3 , -2 ....]
The algorithm you listed just sum over all the digit and for each even spot digit, look up the the difference using the array, and apply it to the total sum.
Compact Luhn validator:
var luhn_validate = function(imei){
return !/^\d+$/.test(imei) || (imei.split('').reduce(function(sum, d, n){
return sum + parseInt(((n + imei.length) %2)? d: [0,2,4,6,8,1,3,5,7,9][d]);
}, 0)) % 10 == 0;
};
Works fine for both CC and IMEI numbers. Fiddle: http://jsfiddle.net/8VqpN/
Lookup tables or arrays can simplify algorithm implementations - save many lines of code - and with that increase performance... if the calculation of the lookup index is simple - or simpler - and the array's memory footprint is affordable.
On the other hand, understanding how the particular lookup array or data structure came to be can at times be quite difficult, because the related algorithm implementation may look - at first sight - quite different from the original algorithm specification or description.
Indication to use lookup tables are number oriented algorithms with simple arithmetics, simple comparisons, and equally structured repetition patterns - and of course - of quite finite value sets.
The many answers in this thread go for different lookup tables and with that for different algorithms to implement the very same Luhn algorithm. Most implementations use the lookup array to avoid the cumbersome figuring out of the value for doubled digits:
var luhnArr = [0, 2, 4, 6, 8, 1, 3, 5, 7, 9];
//
// ^ ^ ^ ^ ^ ^ ^ ^ ^ ^
// | | | | | | | | | |
//
// - d-igit=index: 0 1 2 3 4 5 6 7 8 9
// - 1st
// calculation: 2*0 2*2 2*2 2*3 2*4 2*5 2*6 2*7 2*8 2*9
// - intermeduate
// value: = 0 = 2 = 4 = 6 = 8 =10 =12 =14 =16 =18
// - 2nd
// calculation: 1+0 1+2 1+4 1+6 1+8
//
// - final value: 0 2 4 6 8 =1 =3 =5 =7 =9
//
var luhnFinalValue = luhnArray[d]; // d is numeric value of digit to double
An equal implementation for getting the luhnFinalValue looks like this:
var luhnIntermediateValue = d * 2; // d is numeric value of digit to double
var luhnFinalValue = (luhnIntermediateValue < 10)
? luhnIntermediateValue // (d ) * 2;
: luhnIntermediateValue - 10 + 1; // (d - 5) * 2 + 1;
Which - with the comments in above true and false terms - is of course simplified:
var luhnFinalValue = (d < 5) ? d : (d - 5) * 2 + 1;
Now I'm not sure if I 'saved' anything at all... ;-) especially thanks the value-formed or short form of if-then-else. Without it, the code may look like this - with 'orderly' blocks
and embedded in the next higher context layer of the algorithm and therefore luhnValue:
var luhnValue; // card number is valid when luhn values for each digit modulo 10 is 0
if (even) { // even as n-th digit from the the end of the string of digits
luhnValue = d;
} else { // doubled digits
if (d < 5) {
luhnValue = d * 2;
} else {
lunnValue = (d - 5) * 2 + 1;
}
}
Or:
var luhnValue = (even) ? d : (d < 5) ? d * 2 : (d - 5) * 2 + 1;
Btw, with modern, optimizing interpreters and (just in time) compilers, the difference is only in the source code and matters only for readability.
Having come that far with explanation - and 'justification' - of the use of lookup tables and comparison to straight forward coding, the lookup table looks now a bit overkill to me. The algorithm without is now quite easy to finish - and it looks pretty compact too:
function luhnValid(cardNo) { // cardNo as a string w/ digits only
var sum = 0, even = false;
cardNo.split("").reverse().forEach(function(dstr){ d = parseInt(dstr);
sum += ((even = !even) ? d : (d < 5) ? d * 2 : (d - 5) * 2 + 1);
});
return (sum % 10 == 0);
}
What strikes me after going through the explanation exercise is that the initially most enticing implementation - the one using reduce() from #kalypto - just lost totally its luster for me... not only because it is faulty on several levels, but more so because it shows that bells and whistles may not always 'ring the victory bell'. But thank you, #kalypto, it made me actually use - and understand - reduce():
function luhnValid2(cardNo) { // cardNo as a string w/ digits only
var d = 0, e = false; // e = even = n-th digit counted from the end
return ( cardNo.split("").reverse().reduce(
function(s,dstr){ d = parseInt(dstr); // reduce arg-0 - callback fnc
return (s + ((e = !e) ? d : [0,2,4,6,8,1,3,5,7,9][d]));
} // /end of callback fnc
,0 // reduce arg-1 - prev value for first iteration (sum)
) % 10 == 0
);
}
To be true to this thread, some more lookup table options have to be mentioned:
how about just adjust varues for doubled digits - as posted by #yngum
how about just everything with lookup tables - as posted by #Simon_Weaver - where also the values for the non-doubled digits are taken from a look up table.
how about just everything with just ONE lookup table - as inspired by the use of an offset as done in the extensively discussed luhnValid() function.
The code for the latter - using reduce - may look like this:
function luhnValid3(cardNo) { // cardNo as a string w/ digits only
var d = 0, e = false; // e = even = n-th digit counted from the end
return ( cardNo.split("").reverse().reduce(
function(s,dstr){ d = parseInt(dstr);
return (s + [0,1,2,3,4,5,6,7,8,9,0,2,4,6,8,1,3,5,7,9][d+((e=!e)?0:10)]);
}
,0
) % 10 == 0
);
}
And for closing lunValid4() - very compact - and using just 'old fashioned' (compatible) JavaScript - with one single lookup table:
function luhnValid4(cardNo) { // cardNo as a string w/ digits only
var s = 0, e = false, p = cardNo.length; while (p > 0) { p--;
s += "01234567890246813579".charAt(cardNo.charAt(p)*1 + ((e=!e)?0:10)) * 1; }
return (s % 10 == 0);
}
Corollar: Strings can be looked at as lookup tables of characters... ;-)
A perfect example of a nice lookup table application is the counting of set bits in bits lists - bits set in a a (very) long 8-bit byte string in (an interpreted) high-level language (where any bit operations are quite expensive). The lookup table has 256 entries. Each entry contains the number of bits set in an unsigned 8-bit integer equal to the index of the entry. Iterating through the string and taking the unsigned 8-bit byte equal value to access the number of bits for that byte from the lookup table. Even for low-level language - such as assembler / machine code - the lookup table is the way to go... especially in an environment, where the microcode (instruction) can handle multiple bytes up to 256 or more in an (single CISC) instruction.
Some notes:
numberString * 1 and parseInt(numberStr) do about the same.
there are some superfluous indentations, parenthesis,etc... supporting my brain in getting the semantics quicker... but some that I wanted to leave out, are actually required... when
it comes to arithmetic operations with short-form, value-if-then-else expressions as terms.
some formatting may look new to you; for examples, I use the continuation comma with the
continuation on the same line as the continuation, and I 'close' things - half a tab - indented to the 'opening' item.
All formatting is all done for the human, not the computer... 'it' does care less.
algorithm datastructure luhn lookuptable creditcard validation bitlist
A very fast and elegant implementation of the Luhn algorithm following:
const isLuhnValid = function luhn(array) {
return function (number) {
let len = number ? number.length : 0,
bit = 1,
sum = 0;
while (len--) {
sum += !(bit ^= 1) ? parseInt(number[len], 10) : array[number[len]];
}
return sum % 10 === 0 && sum > 0;
};
}([0, 2, 4, 6, 8, 1, 3, 5, 7, 9]);
console.log(isLuhnValid("4112344112344113".split(""))); // true
console.log(isLuhnValid("4112344112344114".split(""))); // false
On my dedicated git repository you can grab it and retrieve more info (like benchmarks link and full unit tests for ~50 browsers and some node.js versions).
Or you can simply install it via bower or npm. It works both on browsers and/or node.
bower install luhn-alg
npm install luhn-alg
If you want to calculate the checksum, this code from this page is very concise and in my random tests seems to work.
NOTE: the verification algorithmns on this page do NOT all work.
// Javascript
String.prototype.luhnGet = function()
{
var luhnArr = [[0,1,2,3,4,5,6,7,8,9],[0,2,4,6,8,1,3,5,7,9]], sum = 0;
this.replace(/\D+/g,"").replace(/[\d]/g, function(c, p, o){
sum += luhnArr[ (o.length-p)&1 ][ parseInt(c,10) ]
});
return this + ((10 - sum%10)%10);
};
alert("54511187504546384725".luhnGet());​
Here's my findings for C#
function luhnCheck(value) {
return 0 === (value.replace(/\D/g, '').split('').reverse().map(function(d, i) {
return +['0123456789','0246813579'][i % 2][+d];
}).reduce(function(p, n) {
return p + n;
}) % 10);
}
Update: Here's a smaller version w/o string constants:
function luhnCheck(value) {
return !(value.replace(/\D/g, '').split('').reverse().reduce(function(a, d, i) {
return a + d * (i % 2 ? 2.2 : 1) | 0;
}, 0) % 10);
}
note the use of 2.2 here is to make doubling d roll over with an extra 1 when doubling 5 to 9.
Code is the following:
var LuhnCheck = (function()
{
var luhnArr = [0, 2, 4, 6, 8, 1, 3, 5, 7, 9];
return function(str)
{
var counter = 0;
var incNum;
var odd = false;
var temp = String(str).replace(/[^\d]/g, "");
if ( temp.length == 0)
return false;
for (var i = temp.length-1; i >= 0; --i)
{
incNum = parseInt(temp.charAt(i), 10);
counter += (odd = !odd)? incNum : luhnArr[incNum];
}
return (counter%10 == 0);
}
})();
The variable counter is the sum of all the digit in odd positions, plus the double of the digits in even positions, when the double exceeds 10 we add the two numbers that make it (ex: 6 * 2 -> 12 -> 1 + 2 = 3)
The Array you are asking about is the result of all the possible doubles
var luhnArr = [0, 2, 4, 6, 8, 1, 3, 5, 7, 9];
0 * 2 = 0 --> 0
1 * 2 = 2 --> 2
2 * 2 = 4 --> 4
3 * 2 = 6 --> 6
4 * 2 = 8 --> 8
5 * 2 = 10 --> 1+0 --> 1
6 * 2 = 12 --> 1+2 --> 3
7 * 2 = 14 --> 1+4 --> 5
8 * 2 = 16 --> 1+6 --> 7
9 * 2 = 18 --> 1+8 --> 9
So for example
luhnArr[3] --> 6 (6 is in 3rd position of the array, and also 3 * 2 = 6)
luhnArr[7] --> 5 (5 is in 7th position of the array, and also 7 * 2 = 14 -> 5 )
Another alternative:
function luhn(digits) {
return /^\d+$/.test(digits) && !(digits.split("").reverse().map(function(checkDigit, i) {
checkDigit = parseInt(checkDigit, 10);
return i % 2 == 0
? checkDigit
: (checkDigit *= 2) > 9 ? checkDigit - 9 : checkDigit;
}).reduce(function(previousValue, currentValue) {
return previousValue + currentValue;
}) % 10);
}
Alternative ;) Simple and Best
<script>
// takes the form field value and returns true on valid number
function valid_credit_card(value) {
// accept only digits, dashes or spaces
if (/[^0-9-\s]+/.test(value)) return false;
// The Luhn Algorithm. It's so pretty.
var nCheck = 0, nDigit = 0, bEven = false;
value = value.replace(/\D/g, "");
for (var n = value.length - 1; n >= 0; n--) {
var cDigit = value.charAt(n),
nDigit = parseInt(cDigit, 10);
if (bEven) {
if ((nDigit *= 2) > 9) nDigit -= 9;
}
nCheck += nDigit;
bEven = !bEven;
}
return (nCheck % 10) == 0;
}
console.log(valid_credit_card("5610591081018250"),"valid_credit_card Validation");
</script>
Best Solution here
http://plnkr.co/edit/34aR8NUpaKRCYpgnfUbK?p=preview
with all test cases passed according to
http://www.paypalobjects.com/en_US/vhelp/paypalmanager_help/credit_card_numbers.htm
and the credit goes to
https://gist.github.com/DiegoSalazar/4075533
const LuhnCheckCard = (number) => {
if (/[^0-9-\s]+/.test(number) || number.length === 0)
return false;
return ((number.split("").map(Number).reduce((prev, digit, i) => {
(!(( i & 1 ) ^ number.length)) && (digit *= 2);
(digit > 9) && (digit -= 9);
return prev + digit;
}, 0) % 10) === 0);
}
console.log(LuhnCheckCard("4532015112830366")); // true
console.log(LuhnCheckCard("gdsgdsgdsg")); // false
I worked out the following solution after I submitted a much worse one for a test..
function valid(number){
var splitNumber = parseInt(number.toString().split(""));
var totalEvenValue = 0;
var totalOddValue = 0;
for(var i = 0; i < splitNumber.length; i++){
if(i % 2 === 0){
if(splitNumber[i] * 2 >= 10){
totalEvenValue += splitNumber[i] * 2 - 9;
} else {
totalEvenValue += splitNumber[i] * 2;
}
}else {
totalOddValue += splitNumber[i];
}
}
return ((totalEvenValue + totalOddValue) %10 === 0)
}
console.log(valid(41111111111111111));
I recently wrote a solution using Javascript, I leave the code here for anyone who can help:
// checksum with Luhn Algorithm
const luhn_checksum = function(strIn) {
const len = strIn.length;
let sum = 0
for (let i = 0; i<10; i += 1) {
let factor = (i % 2 === 1) ? 2: 1
const v = parseInt(strIn.charAt(i), 10) * factor
sum += (v>9) ? (1 + v % 10) : v
}
return (sum * 9) % 10
}
// teste exampple on wikipedia:
// https://en.wikipedia.org/wiki/Luhn_algorithm
const strIn = "7992739871"
// The checksum of "7992739871" is 3
console.log(luhn_checksum(strIn))
If you understand this code above, you will have no problem converting it to any other language.
For example in python:
def nss_checksum(nss):
suma = 0
for i in range(10):
factor = 2 if (i % 2 == 1) else 1
v = int(nss[i]) * factor
suma += (1 + v % 10) if (v >9) else v
return (suma * 9) % 10
For more info, check this:
https://en.wikipedia.org/wiki/Luhn_algorithm
My Code(En español):
https://gist.github.com/fitorec/82a3e27fae3bab709a07c19c71c3a8d4
def validate_credit_card_number(card_number):
if(len(str(card_number))==16):
group1 = []
group1_double = []
after_group_double = []
group1_sum = 0
group2_sum = 0
group2 = []
total_final_sum = 0
s = str(card_number)
list1 = [int(i) for i in list(s)]
for i in range(14, -1, -2):
group1.append(list1[i])
for x in group1:
b = 0
b = x * 2
group1_double.append(b)
for j in group1_double:
if(j > 9):
sum_of_digits = 0
alias = str(j)
temp1 = alias[0]
temp2 = alias[1]
sum_of_digits = int(temp1) + int(temp2)
after_group_double.append(sum_of_digits)
else:
after_group_double.append(j)
for i in after_group_double:
group1_sum += i
for i in range(15, -1, -2):
group2.append(list1[i])
for i in group2:
group2_sum += i
total_final_sum = group1_sum + group2_sum
if(total_final_sum%10==0):
return True
else:
return False
card_number= 1456734512345698 #4539869650133101 #1456734512345698 # #5239512608615007
result=validate_credit_card_number(card_number)
if(result):
print("credit card number is valid")
else:
print("credit card number is invalid")

Categories

Resources