Related
Password = base64 encoded(sha1(nonce+created+secret))
where:
nonce = 186269,
created = 2015-07-08T11:31:53+01:00,
secret = Ok4IWYLBHbKn8juM1gFPvQxadieZmS2
should give ZDg3MTZiZTgwYTMwYWY4Nzc4OGFjMmZhYjA5YzM3MTdlYmQ1M2ZkMw== as password. I am approaching with:
I need the JavaScript for this.
As per security standards this is still not good to perform such coding, one should always do this in backend programming language & get value from API Calls.
but you can use the sha1 function below to do sha1 hashing in JavaScript:
function sha1 (str) {
// discuss at: https://locutus.io/php/sha1/
// original by: Webtoolkit.info (https://www.webtoolkit.info/)
// improved by: Michael White (https://getsprink.com)
// improved by: Kevin van Zonneveld (https://kvz.io)
// input by: Brett Zamir (https://brett-zamir.me)
// note 1: Keep in mind that in accordance with PHP, the whole string is buffered and then
// note 1: hashed. If available, we'd recommend using Node's native crypto modules directly
// note 1: in a steaming fashion for faster and more efficient hashing
// example 1: sha1('Kevin van Zonneveld')
// returns 1: '54916d2e62f65b3afa6e192e6a601cdbe5cb5897'
let hash
try {
const crypto = require('crypto')
const sha1sum = crypto.createHash('sha1')
sha1sum.update(str)
hash = sha1sum.digest('hex')
} catch (e) {
hash = undefined
}
if (hash !== undefined) {
return hash
}
const _rotLeft = function (n, s) {
const t4 = (n << s) | (n >>> (32 - s))
return t4
}
const _cvtHex = function (val) {
let str = ''
let i
let v
for (i = 7; i >= 0; i--) {
v = (val >>> (i * 4)) & 0x0f
str += v.toString(16)
}
return str
}
let blockstart
let i, j
const W = new Array(80)
let H0 = 0x67452301
let H1 = 0xEFCDAB89
let H2 = 0x98BADCFE
let H3 = 0x10325476
let H4 = 0xC3D2E1F0
let A, B, C, D, E
let temp
// utf8_encode
str = unescape(encodeURIComponent(str))
const strLen = str.length
const wordArray = []
for (i = 0; i < strLen - 3; i += 4) {
j = str.charCodeAt(i) << 24 |
str.charCodeAt(i + 1) << 16 |
str.charCodeAt(i + 2) << 8 |
str.charCodeAt(i + 3)
wordArray.push(j)
}
switch (strLen % 4) {
case 0:
i = 0x080000000
break
case 1:
i = str.charCodeAt(strLen - 1) << 24 | 0x0800000
break
case 2:
i = str.charCodeAt(strLen - 2) << 24 | str.charCodeAt(strLen - 1) << 16 | 0x08000
break
case 3:
i = str.charCodeAt(strLen - 3) << 24 |
str.charCodeAt(strLen - 2) << 16 |
str.charCodeAt(strLen - 1) <<
8 | 0x80
break
}
wordArray.push(i)
while ((wordArray.length % 16) !== 14) {
wordArray.push(0)
}
wordArray.push(strLen >>> 29)
wordArray.push((strLen << 3) & 0x0ffffffff)
for (blockstart = 0; blockstart < wordArray.length; blockstart += 16) {
for (i = 0; i < 16; i++) {
W[i] = wordArray[blockstart + i]
}
for (i = 16; i <= 79; i++) {
W[i] = _rotLeft(W[i - 3] ^ W[i - 8] ^ W[i - 14] ^ W[i - 16], 1)
}
A = H0
B = H1
C = H2
D = H3
E = H4
for (i = 0; i <= 19; i++) {
temp = (_rotLeft(A, 5) + ((B & C) | (~B & D)) + E + W[i] + 0x5A827999) & 0x0ffffffff
E = D
D = C
C = _rotLeft(B, 30)
B = A
A = temp
}
for (i = 20; i <= 39; i++) {
temp = (_rotLeft(A, 5) + (B ^ C ^ D) + E + W[i] + 0x6ED9EBA1) & 0x0ffffffff
E = D
D = C
C = _rotLeft(B, 30)
B = A
A = temp
}
for (i = 40; i <= 59; i++) {
temp = (_rotLeft(A, 5) + ((B & C) | (B & D) | (C & D)) + E + W[i] + 0x8F1BBCDC) & 0x0ffffffff
E = D
D = C
C = _rotLeft(B, 30)
B = A
A = temp
}
for (i = 60; i <= 79; i++) {
temp = (_rotLeft(A, 5) + (B ^ C ^ D) + E + W[i] + 0xCA62C1D6) & 0x0ffffffff
E = D
D = C
C = _rotLeft(B, 30)
B = A
A = temp
}
H0 = (H0 + A) & 0x0ffffffff
H1 = (H1 + B) & 0x0ffffffff
H2 = (H2 + C) & 0x0ffffffff
H3 = (H3 + D) & 0x0ffffffff
H4 = (H4 + E) & 0x0ffffffff
}
temp = _cvtHex(H0) + _cvtHex(H1) + _cvtHex(H2) + _cvtHex(H3) + _cvtHex(H4)
return temp.toLowerCase()
}
// create sha1
var e = sha1('186269'+'2015-07-08T11:31:53+01:00'+'Ok4IWYLBHbKn8juM1gFPvQxadieZmS2');
console.log(e)
// to base64 encoding
var encoded = btoa(e)
console.log(encoded)
Function link: https://locutus.io/php/sha1/
I have some UTF-8 encoded data living in a range of Uint8Array elements in Javascript. Is there an efficient way to decode these out to a regular javascript string (I believe Javascript uses 16 bit Unicode)? I dont want to add one character at the time as the string concaternation would become to CPU intensive.
TextEncoder and TextDecoder from the Encoding standard, which is polyfilled by the stringencoding library, converts between strings and ArrayBuffers:
var uint8array = new TextEncoder().encode("someString");
var string = new TextDecoder().decode(uint8array);
This should work:
// http://www.onicos.com/staff/iz/amuse/javascript/expert/utf.txt
/* utf.js - UTF-8 <=> UTF-16 convertion
*
* Copyright (C) 1999 Masanao Izumo <iz#onicos.co.jp>
* Version: 1.0
* LastModified: Dec 25 1999
* This library is free. You can redistribute it and/or modify it.
*/
function Utf8ArrayToStr(array) {
var out, i, len, c;
var char2, char3;
out = "";
len = array.length;
i = 0;
while(i < len) {
c = array[i++];
switch(c >> 4)
{
case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7:
// 0xxxxxxx
out += String.fromCharCode(c);
break;
case 12: case 13:
// 110x xxxx 10xx xxxx
char2 = array[i++];
out += String.fromCharCode(((c & 0x1F) << 6) | (char2 & 0x3F));
break;
case 14:
// 1110 xxxx 10xx xxxx 10xx xxxx
char2 = array[i++];
char3 = array[i++];
out += String.fromCharCode(((c & 0x0F) << 12) |
((char2 & 0x3F) << 6) |
((char3 & 0x3F) << 0));
break;
}
}
return out;
}
It's somewhat cleaner as the other solutions because it doesn't use any hacks nor depends on Browser JS functions, e.g. works also in other JS environments.
Check out the JSFiddle demo.
Also see the related questions: here and here
Here's what I use:
var str = String.fromCharCode.apply(null, uint8Arr);
In Node "Buffer instances are also Uint8Array instances", so buf.toString() works in this case.
In NodeJS, we have Buffers available, and string conversion with them is really easy. Better, it's easy to convert a Uint8Array to a Buffer. Try this code, it's worked for me in Node for basically any conversion involving Uint8Arrays:
let str = Buffer.from(uint8arr.buffer).toString();
We're just extracting the ArrayBuffer from the Uint8Array and then converting that to a proper NodeJS Buffer. Then we convert the Buffer to a string (you can throw in a hex or base64 encoding if you want).
If we want to convert back to a Uint8Array from a string, then we'd do this:
let uint8arr = new Uint8Array(Buffer.from(str));
Be aware that if you declared an encoding like base64 when converting to a string, then you'd have to use Buffer.from(str, "base64") if you used base64, or whatever other encoding you used.
This will not work in the browser without a module! NodeJS Buffers just don't exist in the browser, so this method won't work unless you add Buffer functionality to the browser. That's actually pretty easy to do though, just use a module like this, which is both small and fast!
Found in one of the Chrome sample applications, although this is meant for larger blocks of data where you're okay with an asynchronous conversion.
/**
* Converts an array buffer to a string
*
* #private
* #param {ArrayBuffer} buf The buffer to convert
* #param {Function} callback The function to call when conversion is complete
*/
function _arrayBufferToString(buf, callback) {
var bb = new Blob([new Uint8Array(buf)]);
var f = new FileReader();
f.onload = function(e) {
callback(e.target.result);
};
f.readAsText(bb);
}
The solution given by Albert works well as long as the provided function is invoked infrequently and is only used for arrays of modest size, otherwise it is egregiously inefficient. Here is an enhanced vanilla JavaScript solution that works for both Node and browsers and has the following advantages:
• Works efficiently for all octet array sizes
• Generates no intermediate throw-away strings
• Supports 4-byte characters on modern JS engines (otherwise "?" is substituted)
var utf8ArrayToStr = (function () {
var charCache = new Array(128); // Preallocate the cache for the common single byte chars
var charFromCodePt = String.fromCodePoint || String.fromCharCode;
var result = [];
return function (array) {
var codePt, byte1;
var buffLen = array.length;
result.length = 0;
for (var i = 0; i < buffLen;) {
byte1 = array[i++];
if (byte1 <= 0x7F) {
codePt = byte1;
} else if (byte1 <= 0xDF) {
codePt = ((byte1 & 0x1F) << 6) | (array[i++] & 0x3F);
} else if (byte1 <= 0xEF) {
codePt = ((byte1 & 0x0F) << 12) | ((array[i++] & 0x3F) << 6) | (array[i++] & 0x3F);
} else if (String.fromCodePoint) {
codePt = ((byte1 & 0x07) << 18) | ((array[i++] & 0x3F) << 12) | ((array[i++] & 0x3F) << 6) | (array[i++] & 0x3F);
} else {
codePt = 63; // Cannot convert four byte code points, so use "?" instead
i += 3;
}
result.push(charCache[codePt] || (charCache[codePt] = charFromCodePt(codePt)));
}
return result.join('');
};
})();
Uint8Array to String
let str = Buffer.from(key.secretKey).toString('base64');
String to Uint8Array
let uint8arr = new Uint8Array(Buffer.from(data,'base64'));
I was frustrated to see that people were not showing how to go both ways or showing that things work on none trivial UTF8 strings. I found a post on codereview.stackexchange.com that has some code that works well. I used it to turn ancient runes into bytes, to test some crypo on the bytes, then convert things back into a string. The working code is on github here. I renamed the methods for clarity:
// https://codereview.stackexchange.com/a/3589/75693
function bytesToSring(bytes) {
var chars = [];
for(var i = 0, n = bytes.length; i < n;) {
chars.push(((bytes[i++] & 0xff) << 8) | (bytes[i++] & 0xff));
}
return String.fromCharCode.apply(null, chars);
}
// https://codereview.stackexchange.com/a/3589/75693
function stringToBytes(str) {
var bytes = [];
for(var i = 0, n = str.length; i < n; i++) {
var char = str.charCodeAt(i);
bytes.push(char >>> 8, char & 0xFF);
}
return bytes;
}
The unit test uses this UTF-8 string:
// http://kermitproject.org/utf8.html
// From the Anglo-Saxon Rune Poem (Rune version)
const secretUtf8 = `ᚠᛇᚻ᛫ᛒᛦᚦ᛫ᚠᚱᚩᚠᚢᚱ᛫ᚠᛁᚱᚪ᛫ᚷᛖᚻᚹᛦᛚᚳᚢᛗ
ᛋᚳᛖᚪᛚ᛫ᚦᛖᚪᚻ᛫ᛗᚪᚾᚾᚪ᛫ᚷᛖᚻᚹᛦᛚᚳ᛫ᛗᛁᚳᛚᚢᚾ᛫ᚻᛦᛏ᛫ᛞᚫᛚᚪᚾ
ᚷᛁᚠ᛫ᚻᛖ᛫ᚹᛁᛚᛖ᛫ᚠᚩᚱ᛫ᛞᚱᛁᚻᛏᚾᛖ᛫ᛞᚩᛗᛖᛋ᛫ᚻᛚᛇᛏᚪᚾ᛬`;
Note that the string length is only 117 characters but the byte length, when encoded, is 234.
If I uncomment the console.log lines I can see that the string that is decoded is the same string that was encoded (with the bytes passed through Shamir's secret sharing algorithm!):
Do what #Sudhir said, and then to get a String out of the comma seperated list of numbers use:
for (var i=0; i<unitArr.byteLength; i++) {
myString += String.fromCharCode(unitArr[i])
}
This will give you the string you want,
if it's still relevant
If you can't use the TextDecoder API because it is not supported on IE:
You can use the FastestSmallestTextEncoderDecoder polyfill recommended by the Mozilla Developer Network website;
You can use this function also provided at the MDN website:
function utf8ArrayToString(aBytes) {
var sView = "";
for (var nPart, nLen = aBytes.length, nIdx = 0; nIdx < nLen; nIdx++) {
nPart = aBytes[nIdx];
sView += String.fromCharCode(
nPart > 251 && nPart < 254 && nIdx + 5 < nLen ? /* six bytes */
/* (nPart - 252 << 30) may be not so safe in ECMAScript! So...: */
(nPart - 252) * 1073741824 + (aBytes[++nIdx] - 128 << 24) + (aBytes[++nIdx] - 128 << 18) + (aBytes[++nIdx] - 128 << 12) + (aBytes[++nIdx] - 128 << 6) + aBytes[++nIdx] - 128
: nPart > 247 && nPart < 252 && nIdx + 4 < nLen ? /* five bytes */
(nPart - 248 << 24) + (aBytes[++nIdx] - 128 << 18) + (aBytes[++nIdx] - 128 << 12) + (aBytes[++nIdx] - 128 << 6) + aBytes[++nIdx] - 128
: nPart > 239 && nPart < 248 && nIdx + 3 < nLen ? /* four bytes */
(nPart - 240 << 18) + (aBytes[++nIdx] - 128 << 12) + (aBytes[++nIdx] - 128 << 6) + aBytes[++nIdx] - 128
: nPart > 223 && nPart < 240 && nIdx + 2 < nLen ? /* three bytes */
(nPart - 224 << 12) + (aBytes[++nIdx] - 128 << 6) + aBytes[++nIdx] - 128
: nPart > 191 && nPart < 224 && nIdx + 1 < nLen ? /* two bytes */
(nPart - 192 << 6) + aBytes[++nIdx] - 128
: /* nPart < 127 ? */ /* one byte */
nPart
);
}
return sView;
}
let str = utf8ArrayToString([50,72,226,130,130,32,43,32,79,226,130,130,32,226,135,140,32,50,72,226,130,130,79]);
// Must show 2H₂ + O₂ ⇌ 2H₂O
console.log(str);
Try these functions,
var JsonToArray = function(json)
{
var str = JSON.stringify(json, null, 0);
var ret = new Uint8Array(str.length);
for (var i = 0; i < str.length; i++) {
ret[i] = str.charCodeAt(i);
}
return ret
};
var binArrayToJson = function(binArray)
{
var str = "";
for (var i = 0; i < binArray.length; i++) {
str += String.fromCharCode(parseInt(binArray[i]));
}
return JSON.parse(str)
}
source: https://gist.github.com/tomfa/706d10fed78c497731ac, kudos to Tomfa
I'm using this function, which works for me:
function uint8ArrayToBase64(data) {
return btoa(Array.from(data).map((c) => String.fromCharCode(c)).join(''));
}
For ES6 and UTF8 string
decodeURIComponent(escape(String.fromCharCode(...uint8arrData)))
By far the easiest way that has worked for me is:
//1. Create or fetch the Uint8Array to use in the example
const bufferArray = new Uint8Array([10, 10, 10])
//2. Turn the Uint8Array into a regular array
const array = Array.from(bufferArray);
//3. Stringify it (option A)
JSON.stringify(array);
//3. Stringify it (option B: uses #serdarsenay code snippet to decode each item in array)
let binArrayToString = function(binArray) {
let str = "";
for (let i = 0; i < binArray.length; i++) {
str += String.fromCharCode(parseInt(binArray[i]));
}
return str;
}
binArrayToString(array);
class UTF8{
static encode(str:string){return new UTF8().encode(str)}
static decode(data:Uint8Array){return new UTF8().decode(data)}
private EOF_byte:number = -1;
private EOF_code_point:number = -1;
private encoderError(code_point) {
console.error("UTF8 encoderError",code_point)
}
private decoderError(fatal, opt_code_point?):number {
if (fatal) console.error("UTF8 decoderError",opt_code_point)
return opt_code_point || 0xFFFD;
}
private inRange(a:number, min:number, max:number) {
return min <= a && a <= max;
}
private div(n:number, d:number) {
return Math.floor(n / d);
}
private stringToCodePoints(string:string) {
/** #type {Array.<number>} */
let cps = [];
// Based on http://www.w3.org/TR/WebIDL/#idl-DOMString
let i = 0, n = string.length;
while (i < string.length) {
let c = string.charCodeAt(i);
if (!this.inRange(c, 0xD800, 0xDFFF)) {
cps.push(c);
} else if (this.inRange(c, 0xDC00, 0xDFFF)) {
cps.push(0xFFFD);
} else { // (inRange(c, 0xD800, 0xDBFF))
if (i == n - 1) {
cps.push(0xFFFD);
} else {
let d = string.charCodeAt(i + 1);
if (this.inRange(d, 0xDC00, 0xDFFF)) {
let a = c & 0x3FF;
let b = d & 0x3FF;
i += 1;
cps.push(0x10000 + (a << 10) + b);
} else {
cps.push(0xFFFD);
}
}
}
i += 1;
}
return cps;
}
private encode(str:string):Uint8Array {
let pos:number = 0;
let codePoints = this.stringToCodePoints(str);
let outputBytes = [];
while (codePoints.length > pos) {
let code_point:number = codePoints[pos++];
if (this.inRange(code_point, 0xD800, 0xDFFF)) {
this.encoderError(code_point);
}
else if (this.inRange(code_point, 0x0000, 0x007f)) {
outputBytes.push(code_point);
} else {
let count = 0, offset = 0;
if (this.inRange(code_point, 0x0080, 0x07FF)) {
count = 1;
offset = 0xC0;
} else if (this.inRange(code_point, 0x0800, 0xFFFF)) {
count = 2;
offset = 0xE0;
} else if (this.inRange(code_point, 0x10000, 0x10FFFF)) {
count = 3;
offset = 0xF0;
}
outputBytes.push(this.div(code_point, Math.pow(64, count)) + offset);
while (count > 0) {
let temp = this.div(code_point, Math.pow(64, count - 1));
outputBytes.push(0x80 + (temp % 64));
count -= 1;
}
}
}
return new Uint8Array(outputBytes);
}
private decode(data:Uint8Array):string {
let fatal:boolean = false;
let pos:number = 0;
let result:string = "";
let code_point:number;
let utf8_code_point = 0;
let utf8_bytes_needed = 0;
let utf8_bytes_seen = 0;
let utf8_lower_boundary = 0;
while (data.length > pos) {
let _byte = data[pos++];
if (_byte == this.EOF_byte) {
if (utf8_bytes_needed != 0) {
code_point = this.decoderError(fatal);
} else {
code_point = this.EOF_code_point;
}
} else {
if (utf8_bytes_needed == 0) {
if (this.inRange(_byte, 0x00, 0x7F)) {
code_point = _byte;
} else {
if (this.inRange(_byte, 0xC2, 0xDF)) {
utf8_bytes_needed = 1;
utf8_lower_boundary = 0x80;
utf8_code_point = _byte - 0xC0;
} else if (this.inRange(_byte, 0xE0, 0xEF)) {
utf8_bytes_needed = 2;
utf8_lower_boundary = 0x800;
utf8_code_point = _byte - 0xE0;
} else if (this.inRange(_byte, 0xF0, 0xF4)) {
utf8_bytes_needed = 3;
utf8_lower_boundary = 0x10000;
utf8_code_point = _byte - 0xF0;
} else {
this.decoderError(fatal);
}
utf8_code_point = utf8_code_point * Math.pow(64, utf8_bytes_needed);
code_point = null;
}
} else if (!this.inRange(_byte, 0x80, 0xBF)) {
utf8_code_point = 0;
utf8_bytes_needed = 0;
utf8_bytes_seen = 0;
utf8_lower_boundary = 0;
pos--;
code_point = this.decoderError(fatal, _byte);
} else {
utf8_bytes_seen += 1;
utf8_code_point = utf8_code_point + (_byte - 0x80) * Math.pow(64, utf8_bytes_needed - utf8_bytes_seen);
if (utf8_bytes_seen !== utf8_bytes_needed) {
code_point = null;
} else {
let cp = utf8_code_point;
let lower_boundary = utf8_lower_boundary;
utf8_code_point = 0;
utf8_bytes_needed = 0;
utf8_bytes_seen = 0;
utf8_lower_boundary = 0;
if (this.inRange(cp, lower_boundary, 0x10FFFF) && !this.inRange(cp, 0xD800, 0xDFFF)) {
code_point = cp;
} else {
code_point = this.decoderError(fatal, _byte);
}
}
}
}
//Decode string
if (code_point !== null && code_point !== this.EOF_code_point) {
if (code_point <= 0xFFFF) {
if (code_point > 0)result += String.fromCharCode(code_point);
} else {
code_point -= 0x10000;
result += String.fromCharCode(0xD800 + ((code_point >> 10) & 0x3ff));
result += String.fromCharCode(0xDC00 + (code_point & 0x3ff));
}
}
}
return result;
}
`
Using base64 as the encoding format works quite well. This is how it was implemented for passing secrets via urls in Firefox Send. You will need the base64-js package. These are the functions from the Send source code:
const b64 = require("base64-js")
function arrayToB64(array) {
return b64.fromByteArray(array).replace(/\+/g, "-").replace(/\//g, "_").replace(/=/g, "")
}
function b64ToArray(str) {
return b64.toByteArray(str + "===".slice((str.length + 3) % 4))
}
With vanilla, browser side, recording from microphone, base64 functions worked for me (I had to implement an audio sending function to a chat).
const ui8a = new Uint8Array(e.target.result);
const string = btoa(ui8a);
const ui8a_2 = atob(string).split(',');
Full code now. Thanks to Bryan Jennings & breakspirit#py4u.net for the code.
https://medium.com/#bryanjenningz/how-to-record-and-play-audio-in-javascript-faa1b2b3e49b
https://www.py4u.net/discuss/282499
index.html
<html>
<head>
<title>Record Audio Test</title>
<meta name="encoding" charset="utf-8" />
</head>
<body>
<h1>Audio Recording Test</h1>
<script src="index.js"></script>
<button id="action" onclick="start()">Start</button>
<button id="stop" onclick="stop()">Stop</button>
<button id="play" onclick="play()">Listen</button>
</body>
</html>
index.js:
const recordAudio = () =>
new Promise(async resolve => {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
const mediaRecorder = new MediaRecorder(stream);
const audioChunks = [];
mediaRecorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
const start = () => mediaRecorder.start();
const stop = () =>
new Promise(resolve => {
mediaRecorder.addEventListener("stop", () => {
const audioBlob = new Blob(audioChunks);
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
const play = () => audio.play();
resolve({ audioBlob, audioUrl, play });
});
mediaRecorder.stop();
});
resolve({ start, stop });
});
let recorder = null;
let audio = null;
const sleep = time => new Promise(resolve => setTimeout(resolve, time));
const start = async () => {
recorder = await recordAudio();
recorder.start();
}
const stop = async () => {
audio = await recorder.stop();
read(audio.audioUrl);
}
const play = ()=> {
audio.play();
}
const read = (blobUrl)=> {
var xhr = new XMLHttpRequest;
xhr.responseType = 'blob';
xhr.onload = function() {
var recoveredBlob = xhr.response;
const reader = new FileReader();
// This fires after the blob has been read/loaded.
reader.addEventListener('loadend', (e) => {
const ui8a = new Uint8Array(e.target.result);
const string = btoa(ui8a);
const ui8a_2 = atob(string).split(',');
playByteArray(ui8a_2);
});
// Start reading the blob as text.
reader.readAsArrayBuffer(recoveredBlob);
};
// get the blob through blob url
xhr.open('GET', blobUrl);
xhr.send();
}
window.onload = init;
var context; // Audio context
var buf; // Audio buffer
function init() {
if (!window.AudioContext) {
if (!window.webkitAudioContext) {
alert("Your browser does not support any AudioContext and cannot play back this audio.");
return;
}
window.AudioContext = window.webkitAudioContext;
}
context = new AudioContext();
}
function playByteArray(byteArray) {
var arrayBuffer = new ArrayBuffer(byteArray.length);
var bufferView = new Uint8Array(arrayBuffer);
for (i = 0; i < byteArray.length; i++) {
bufferView[i] = byteArray[i];
}
context.decodeAudioData(arrayBuffer, function(buffer) {
buf = buffer;
play2();
});
}
// Play the loaded file
function play2() {
// Create a source node from the buffer
var source = context.createBufferSource();
source.buffer = buf;
// Connect to the final output node (the speakers)
source.connect(context.destination);
// Play immediately
source.start(0);
}
var decodedString = decodeURIComponent(escape(String.fromCharCode(...new Uint8Array(err))));
var obj = JSON.parse(decodedString);
I am using this Typescript snippet:
function UInt8ArrayToString(uInt8Array: Uint8Array): string
{
var s: string = "[";
for(var i: number = 0; i < uInt8Array.byteLength; i++)
{
if( i > 0 )
s += ", ";
s += uInt8Array[i];
}
s += "]";
return s;
}
Remove the type annotations if you need the JavaScript version.
Hope this helps!
I'm working with data that comes over a WebSocket connection with a starcraft 2 client to obtain image data from a game that is in progress. In some cases, the image data maybe be set with a format of 1 bit per pixel. When this happens I need to "unpack" the bits from each byte in the response (1 byte => 8 bits). This is done in the code below:
function unpackbits(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte
let offset
for (let i = 0; i < uint8data.length; i++) {
byte = uint8data[i]
offset = (8 * i)
results[offset + 7] = ((byte & (1 << 0)) >> 0)
results[offset + 6] = ((byte & (1 << 1)) >> 1)
results[offset + 5] = ((byte & (1 << 2)) >> 2)
results[offset + 4] = ((byte & (1 << 3)) >> 3)
results[offset + 3] = ((byte & (1 << 4)) >> 4)
results[offset + 2] = ((byte & (1 << 5)) >> 5)
results[offset + 1] = ((byte & (1 << 6)) >> 6)
results[offset + 0] = ((byte & (1 << 7)) >> 7)
}
return results
}
This gets fed into a tensor like so:
static unpack_layer(plane) {
//Return a correctly shaped tensor given the feature layer bytes.//
const size = point.Point.build(plane.getSize()) // { x, y }
if (plane.getBitsPerPixel() === 1) {
data = unpackbits(data)
if (data.length !== (size.x * size.y)) {
// This could happen if the correct length isn't a multiple of 8, leading
// to some padding bits at the end of the string which are incorrectly
// interpreted as data.
data = data.slice(0, size.x * size.y)
}
}
data = tf.tensor(data, [size.y, size.x], 'int32')
return data
}
In one of my tests, this code get's run 1900 times and takes 0.0737s to execute.
This is very slow.
For comparison, the equivalent functionality in python takes 0.0209s to run 1900 times. The python code looks like this:
def unpack_layer(plane):
"""Return a correctly shaped numpy array given the feature layer bytes."""
size = point.Point.build(plane.size) # {x, y }
data = np.frombuffer(plane.data, dtype=Feature.dtypes[plane.bits_per_pixel])
if plane.bits_per_pixel == 1:
data = np.unpackbits(data)
if data.shape[0] != size.x * size.y:
# This could happen if the correct length isn't a multiple of 8, leading
# to some padding bits at the end of the string which are incorrectly
# interpreted as data.
data = data[:size.x * size.y]
return data.reshape(size.y, size.x)
In short, it takes the javascript version roughly 4x as long as the python version.
I'll be looking at the numpy unpackbits documentation as that seems to be doing something much more efficient than my own approach -
However, I was wondering if anyone had any thoughts as to how I could better optimize my own unpackbits function or better yet a way to have TensorFlow do that for me?
Not sure if this helps, but am kicking myself as I got hung up on the need for bitwise operators in tensorflow in order to convert a byte stream into a bit stream, per the original question. Simple use of integer division and modulus can do the trick too!
In short, the algorithm by example is thus. Given byte stream of [ 92 ]...
Divide and mod by 16, resulting in 2 bytes, namely [ 5 ] and [ 12 ] respectively.
Interleave these results into a tensor [ 5, 12 ].
Take each of those values, and divide and mod by 4, resulting in [ 1, 3 ] and [ 1, 0 ].
Interleave these results into a tensor [ 1, 1, 3, 0 ].
Divide and mod by 2, resulting in [ 0, 0, 1, 0 ] and [ 1, 1, 1, 0 ].
Interleave into [ 0, 1, 0, 1, 1, 1, 0, 0 ] which is binary for 92.
Below are two versions of the same algorithm. One in tensorflow and one in pure javascript.
function tfDaC( stream ) {
const stream8bit = tf.tensor( stream, undefined, 'int32' );
console.time('in-tf');
const stream4bitHi = tf.div(stream8bit, tf.scalar(16, 'int32' ));
const stream4bitLo = tf.mod(stream8bit, tf.scalar(16, 'int32' ));
const stream4bit = tf.stack([stream4bitHi, stream4bitLo],1).flatten();
const stream2bitHi = tf.div( stream4bit, tf.scalar(4, 'int32' ));
const stream2bitLo = tf.mod(stream4bit, tf.scalar(4, 'int32' ));
const stream2bit = tf.stack([stream2bitHi, stream2bitLo],1).flatten();
const stream1bitHi = tf.div(stream2bit, tf.scalar(2, 'int32' ));
const stream1bitLo = tf.mod(stream2bit, tf.scalar(2, 'int32' ));
const stream1bit = tf.stack([stream1bitHi, stream1bitLo],1).flatten().toBool();
console.timeEnd('in-tf');
return stream1bit.dataSync().buffer;
}
function jsDaC( stream ) {
let result = new ArrayBuffer( stream.byteLength * 8 );
let buffer32 = new Uint32Array( result ); // Pointer to every 4 bytes!
for ( let i = 0; i < stream.byteLength; i++ ) {
let byte = stream[ i ];
buffer32[ (i * 2) |0 ] = ( byte / 16) |0;
buffer32[ (i * 2 + 1) |0 ] = ( byte % 16 ) |0;
}
let buffer16 = new Uint16Array( result ); // Pointer to every 2 bytes!
for ( let i = 0; i < buffer32.length; i++ ) {
let byte = buffer32[ i ];
buffer16[ (i * 2) |0 ] = ( byte / 4) |0;
buffer16[ (i * 2 + 1) |0 ] = ( byte % 4 ) |0;
}
let buffer8 = new Uint8Array( result ); // Pointer to every 4 bytes!
for ( let i = 0; i < buffer16.length; i++ ) {
let byte = buffer16[ i ];
buffer8[ (i * 2) |0 ] = ( byte / 2 ) |0;
buffer8[ (i * 2 + 1) |0 ] = ( byte % 2 ) |0;
}
return result;
}
console.log( 'Generating array of 1M bytes' );
let buffer = new ArrayBuffer( 1000000 );
let testArray = new Uint8Array( buffer );
for ( let i = 0; i < testArray.length; i++ ) {
testArray[ i ] = Math.floor( 256 * Math.random() );
}
let result;
console.log( 'Begin tensorflow divide & conquer test with 1M bytes.' );
console.time( 'tf' );
result = tfDaC( testArray );
console.timeEnd( 'tf' );
console.log( `End tensorflow test with 1M bytes resulting in array of ${result.byteLength} bytes` );
console.log( 'Begin javascript divide & conquer test with 1M bytes.' );
console.time( 'js' );
result = jsDaC( testArray );
console.timeEnd( 'js' );
console.log( `End javascript test with 1M bytes resulting in array of ${result.byteLength} bytes` );
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs#2.0.1/dist/tf.min.js"></script>
The tensorflow performance was terrible on my workstation. I had to reduce the byte stream down to 1M bytes as my GPU was throwing memory errors at my previous test levels of a 10M byte stream. And even then at only 1M bytes, a handful of tests ranged from 1236ms to 1414ms. Not sure why it was so slow. Could possibly be the coercion of the numbers to int32 which might be adding a lot of overhead, as my understanding is that GPU's are generally built primarily for floating point operations. And marshalling the data onto and off of the GPU consumes some time too. Maybe it's worthwhile to try to convert this function to a floating point only function rather than int32...?! Maybe a grabbed a poor version of tensorflow.js...?! Be interested to hear how it runs in your NodeJS configuration...
On the other hand, the javascript version for 1M bytes ranged from 30ms to 42ms, almost 2 orders of magnitude(!) faster than the GPU. But still, when extrapolating these results to 10M bytes, this algorithm is still slower than all the other previous algorithms...
So not sure if this helps. It might simply help eliminate tensorflow as an option, although it might still be worthwhile trying floats rather than int32, but am not very hopeful...
It looks like tensorflow.js does not have a bitwise AND function, so suspect doing the work within tensorflow.js will require some coding gymnastics...
One suggestion, though, is to create an array of 256 Uint8Array's of size 8, and pre-populate it with the complete list of 8 byte translations. This greatly reduces the repeated calculations for a byte stream that will likely have repeated values in the range of 0 - 255. Eg, the first entry in the precomputed array represents the unpacking of byte 0, and therefore is a Uint8Array of size 8 populated with 0's, the next entry is another Uint8Array of size 8 populated with 00000001, etc all the way to the entry representing byte 255 with is a Uint8Array of size 8 populated with all 1's.
Then, when unpacking, simply make use of the typed array .set method to copy the precomputed unpacked representation into the results Uint8Array...
Hope this helps.
EDIT Created a number of variants of the unpacking algorithm to test the performance of inline calculations vs memory lookup and was surprised at the results using Chrome. Some of the optimizations of the V8 compiler are non-intuitive...
The differences in the versions...
unpackbits [FAST]: From the original question and this is the bar by which the others variations are compared.
unpackbits1 [FAST]: Modified by...
Specifying "|0" after every integer.
Using the increment unary op ( "++" ) rather adding increments to the offset index of the result array.
Replacing the calculation of bit masks with the actual value. (Ie, rather than 1 << 5, the function used 32.)
unpackbits1a [FAST]: The same as unpackbits1, except...
Kept the calculation of bit masks rather than integer values. (Ie, using 1 << 5 rather than 32, as in the original question.) Counter intuitively, this produces a bit faster result!
unpackbits1b [SLOWER]: The same as unpackbits1a, except...
The offset is not recomputed every time inside the loop. Ie, offset = 0|0 is initially set, and then thereafter offset is only incremented within the loop. So, offset = ( (8|0) * i ) is no longer calculated for every byte. Counter intuitively, this produces a slower result!
unpackbits2 [SLOWEST]: This is the memory lookup option that I recommended above. Counter intuitively, this implies that typed array memory operations are much slower than calculating the results as in unpackbits!
unpackbits3 [SLOWER]: This is the memory lookup option that I recommended above, with the following change.
Rather than used the the typed array .set method, this version set the eight bytes one-by-one. Counter intuitively, this implies that the typed array .set method is slower (at least for eight bytes) than individually setting the values!
unpackbits4 [SLOWER]: This variation of the algorithm was on par with the original, and was a variation of the memory lookup option. But, rather than 256 individual Uint8Array's, this combined all the pre-calculated results into a single Uint8Array of length 256 * 8. And it did not make use of the typed array .set method.
unpackbits5 [SLOWER]: Same as unpackbits4, except...
Rather than using the unary "++" on the index into the lookup table, it calculated the index for each of the 8 bytes being copied. As expected, calculating the index every time was slower than using the unary "++" operator.
Here are the tests. BEWARE that this builds an initial array of 10M random bytes, and then runs each unpack algorithm on this same data. On my workstation, the test runs in less than 5 seconds.
var lookupTable = initializeLookupTable();
function initializeLookupTable() {
let lookup = new Array( 256 );
let v = new Uint8Array( 1 );
for ( let i = 0; i < 256; i++ ) {
v[ 0 ] = i;
lookup[ i ] = unpackbits( v );
}
return lookup;
}
var lookupTable4 = initializeLookupTable4();
function initializeLookupTable4() {
let lookup = new Uint8Array( 256 * 8 );
let v = new Uint8Array( 1 );
for ( let i = 0; i < 256; i++ ) {
v[ 0 ] = i;
let temp = unpackbits( v );
lookup.set( temp, i * 8 );
}
return lookup;
}
function unpackbits(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte
let offset
for (let i = 0; i < uint8data.length; i++) {
byte = uint8data[i]
offset = (8 * i);
results[offset + 7] = ((byte & (1 << 0)) >> 0)
results[offset + 6] = ((byte & (1 << 1)) >> 1)
results[offset + 5] = ((byte & (1 << 2)) >> 2)
results[offset + 4] = ((byte & (1 << 3)) >> 3)
results[offset + 3] = ((byte & (1 << 4)) >> 4)
results[offset + 2] = ((byte & (1 << 5)) >> 5)
results[offset + 1] = ((byte & (1 << 6)) >> 6)
results[offset + 0] = ((byte & (1 << 7)) >> 7)
}
return results
}
function unpackbits1(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte;
let offset;
for (let i = 0|0, n = uint8data.length; i < n; i++) {
byte = uint8data[i]|0
offset = (8 * i)|0;
results[offset++] = ((byte & 128)>>7)|0;
results[offset++] = ((byte & 64)>>6)|0;
results[offset++] = ((byte & 32)>>5)|0;
results[offset++] = ((byte & 16)>>4)|0;
results[offset++] = ((byte & 8)>>3)|0;
results[offset++] = ((byte & 4)>>2)|0;
results[offset++] = ((byte & 2)>>1)|0;
results[offset++] = ((byte & 1)>>0)|0;
}
return results
}
function unpackbits1a(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte;
let offset;
for (let i = 0|0, n = uint8data.length; i < n; i++) {
byte = uint8data[i]|0;
offset = (8 * i)|0;
results[offset++] = ((byte & (1 << 7))>>7)|0;
results[offset++] = ((byte & (1 << 6))>>6)|0;
results[offset++] = ((byte & (1 << 5))>>5)|0;
results[offset++] = ((byte & (1 << 4))>>4)|0;
results[offset++] = ((byte & (1 << 3))>>3)|0;
results[offset++] = ((byte & (1 << 2))>>2)|0;
results[offset++] = ((byte & (1 << 1))>>1)|0;
results[offset++] = (byte & 1)|0;
}
return results
}
function unpackbits1b(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte;
let offset = 0|0;
for (let i = 0|0, n = uint8data.length; i < n; i++) {
byte = uint8data[i]|0;
results[offset++] = ((byte & (1 << 7))>>7)|0;
results[offset++] = ((byte & (1 << 6))>>6)|0;
results[offset++] = ((byte & (1 << 5))>>5)|0;
results[offset++] = ((byte & (1 << 4))>>4)|0;
results[offset++] = ((byte & (1 << 3))>>3)|0;
results[offset++] = ((byte & (1 << 2))>>2)|0;
results[offset++] = ((byte & (1 << 1))>>1)|0;
results[offset++] = (byte & 1)|0;
}
return results
}
function unpackbits2( uint8data ) {
const result = new Uint8Array( 8 * uint8data.length );
for ( let i = 0|0, ri = 0|0, n = uint8data.length; i < n; i++, ri += 8 ) {
result.set( lookupTable[ uint8data[ i ] ], ri );
}
return result;
}
function unpackbits3( uint8data ) {
const result = new Uint8Array( 8 * uint8data.length );
let ri = 0|0;
for ( let i = 0|0, n = uint8data.length; i < n; i++ ) {
//result.set( lookupTable[ uint8data[ i ] ], ri );
let lv = lookupTable[ uint8data[ i ] ];
result[ ri++ ] = lv [ 0|0 ];
result[ ri++ ] = lv [ 1|0 ];
result[ ri++ ] = lv [ 2|0 ];
result[ ri++ ] = lv [ 3|0 ];
result[ ri++ ] = lv [ 4|0 ];
result[ ri++ ] = lv [ 5|0 ];
result[ ri++ ] = lv [ 6|0 ];
result[ ri++ ] = lv [ 7|0 ];
}
return result;
}
function unpackbits4( uint8data ) {
const result = new Uint8Array( 8 * uint8data.length );
let ri = 0|0;
for ( let i = 0|0, n = uint8data.length; i < n; i++ ) {
let li = (uint8data[ i ] * 8)|0;
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
result[ ri++ ] = lookupTable4[ li++ ];
}
return result;
}
function unpackbits5( uint8data ) {
const result = new Uint8Array( 8 * uint8data.length );
let ri = 0|0;
for ( let i = 0|0, n = uint8data.length; i < n; i++ ) {
let li = (uint8data[ i ] * 8)|0;
result[ ri++ ] = lookupTable4[ li ];
result[ ri++ ] = lookupTable4[ li+1 ];
result[ ri++ ] = lookupTable4[ li+2 ];
result[ ri++ ] = lookupTable4[ li+3 ];
result[ ri++ ] = lookupTable4[ li+4 ];
result[ ri++ ] = lookupTable4[ li+5 ];
result[ ri++ ] = lookupTable4[ li+6 ];
result[ ri++ ] = lookupTable4[ li+7 ];
}
return result;
}
// Test
console.log( 'Building array of 10,000,000 test values.' );
let buffer = new ArrayBuffer( 10000000 );
let testArray = new Uint8Array( buffer );
for ( let i = 0; i < testArray.length; i++ ) {
testArray[ i ] = Math.floor( 256 * Math.random() );
}
console.log( 'Finished building test values.' );
console.log( 'Starting unpackbits.' );
console.time('u');
let u = unpackbits( testArray );
console.timeEnd('u');
console.log( 'Finished unpackbits.' );
console.log( 'Starting unpackbits1.' );
console.time('u1');
u = unpackbits1( testArray );
console.timeEnd('u1');
console.log( 'Finished unpackbits1.' );
console.log( 'Starting unpackbits1a.' );
console.time('u1a');
u = unpackbits1a( testArray );
console.timeEnd('u1a');
console.log( 'Finished unpackbits1a.' );
console.log( 'Starting unpackbits1b.' );
console.time('u1b');
u = unpackbits1b(testArray );
console.timeEnd('u1b');
console.log( 'Finished unpackbits1b.' );
console.log( 'Starting unpackbits2.' );
console.time('u2');
u = unpackbits2( testArray );
console.timeEnd('u2');
console.log( 'Finished unpackbits2.' );
console.log( 'Starting unpackbits3.' );
console.time('u3');
u = unpackbits3( testArray );
console.timeEnd('u3');
console.log( 'Finished unpackbits3.' );
console.log( 'Starting unpackbits4.' );
console.time('u4');
u = unpackbits4( testArray );
console.timeEnd('u4');
console.log( 'Finished unpackbits4.' );
console.log( 'Starting unpackbits5.' );
console.time('u5');
u = unpackbits5( testArray );
console.timeEnd('u5');
console.log( 'Finished unpackbits5.' );
This response is a continuation of the comment chain under #Jon Trent's answer.
EDIT: Include TensorFlow comparison for the reshaping portion.
I'm profiling the performance of two unpacking bits methods; unpackbits1a, and unpackbits (original). I am also profiling the different methods for reshaping the data to a NxM grid, where N is probably the same as M. Here's what I got:
function unpackbits1a(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte;
let offset;
for (let i = 0|0, n = uint8data.length; i < n; i++) {
byte = uint8data[i]
offset = ((8|0) * i); // The "|0" on this line cut's the time almost in half!
results[offset++] = (byte & ((1|0) << (7|0)))>>7|0;
results[offset++] = (byte & ((1|0) << (6|0)))>>6|0;
results[offset++] = (byte & ((1|0) << (5|0)))>>5|0;
results[offset++] = (byte & ((1|0) << (4|0)))>>4|0;
results[offset++] = (byte & ((1|0) << (3|0)))>>3|0;
results[offset++] = (byte & ((1|0) << (2|0)))>>2|0;
results[offset++] = (byte & ((1|0) << (1|0)))>>1|0;
results[offset++] = (byte & (1|0));
}
return results
}
function unpackbits(uint8data) {
const results = new Uint8Array(8 * uint8data.length)
let byte
let offset
for (let i = 0; i < uint8data.length; i++) {
byte = uint8data[i]
offset = 8 * i
results[offset + 7] = ((byte & (1 << 0)) >> 0)
results[offset + 6] = ((byte & (1 << 1)) >> 1)
results[offset + 5] = ((byte & (1 << 2)) >> 2)
results[offset + 4] = ((byte & (1 << 3)) >> 3)
results[offset + 3] = ((byte & (1 << 4)) >> 4)
results[offset + 2] = ((byte & (1 << 5)) >> 5)
results[offset + 1] = ((byte & (1 << 6)) >> 6)
results[offset + 0] = ((byte & (1 << 7)) >> 7)
}
return results
}
function unpackbitsToShape1(uint8data, shape = [1, 1]) {
var data = unpackbits(uint8data)
const dims = [shape[0] | 0, shape[1] | 0]
const result = new Array(dims[0])
let temp
const width = 0 | dims[1]
for (let i = 0 | 0; i < dims[0]; i++) {
temp = new Array(dims[1])
for (let j = 0| 0; j < dims[1]; j++) {
temp[j] = data[uint8data[i * width + j]]
}
result[i] = temp
}
return result
}
function unpackbitsToShape2(uint8data, shape = [1, 1]) {
var data = unpackbits(uint8data)
const dims = [shape[0] | 0, shape[1] | 0]
const result = new Array(dims[0])
const width = dims[1]
let offset
for (let i = 0 | 0; i < dims[0]; i++) {
offset = (width * i)
result[i] = data.slice(offset, offset + width)
}
return result
}
function unpackbitsToShape3(uint8data, shape = [1, 1]) {
const dims = [0 | shape[0], 0 | shape[1]]
const result = new Array(dims[0])
let position = 0 | 0
const smallCount = 0 | (uint8data.length % dims[0])
const bigCount = 0 | (uint8data.length - smallCount)
const bigByteChunk = 0 | (bigCount / dims[0])
const bigBitWidth = 0 | 8 * bigByteChunk
const smallByteChunk = 0 | (smallCount / dims[0])
const smallBitWidth = 0 | 8 * smallByteChunk
if (smallCount) {
let big
let small
let odd
let temp
for (let i = 0 | 0; i < dims[0]; i++) {
temp = new Uint8Array(dims[1])
odd = i % 2
big = unpackbits(uint8data.subarray(position, position + bigByteChunk))
position += bigByteChunk
if (odd) {
temp.set(small.subarray(smallBitWidth, 8), 0)
temp.set(big, smallBitWidth)
result[i] = temp
} else {
temp.set(big, 0)
small = unpackbits(uint8data.subarray(position, position + 1))
position++
temp.set(small.subarray(0, smallBitWidth), bigBitWidth)
result[i] = temp
}
}
return result
}
for (let i = 0 | 0; i < dims[0]; i++) {
// console.log('unpacking: ', uint8data.subarray(position, position + bigByteChunk))
result[i] = unpackbits(uint8data.subarray(position, position + bigByteChunk))
position += bigByteChunk
}
return result
}
var tf = require('#tensorflow/tfjs')
tf = require('#tensorflow/tfjs-node')
function unpackBitsToShapeTensorflow(uint8data, shape) {
return tf.tensor(unpackbits(uint8data), shape, 'int32')
}
var test64by64 = new Uint8Array(512)
for (let i = 0; i < test64by64.length; i++) {
test64by64[ i ] = Math.floor(256 * Math.random());
}
var test84by84 = new Uint8Array(882)
for (let i = 0; i < test84by84.length; i++) {
test84by84[ i ] = Math.floor(256 * Math.random());
}
var test100by100 = new Uint8Array(1250)
for (let i = 0; i < test100by100.length; i++) {
test100by100[ i ] = Math.floor(256 * Math.random());
}
function assert(condition, errMsg) {
if (!condition) {
console.error(errMsg)
}
}
console.log('********* 64 x 64 *********\n\n')
console.log('Starting unpackbits1a.');
console.time('u1a');
var foo = unpackbits1a(test64by64);
console.timeEnd('u1a');
console.log('Finished unpackbits1a.');
console.log('Starting "unpackbits"');
console.time('u-orig');
foo = unpackbits(test64by64);
console.timeEnd('u-orig');
console.log('Finished unpackbits.');
console.log('Starting "unpackbitsToShape1"');
console.time('u1');
foo = unpackbitsToShape1(test64by64, [64, 64])
console.timeEnd('u1');
assert(
foo.length === 64 && foo[0].length === 64,
'foo.length === 64 && foo[0].length === 64'
)
console.log('Finished unpackbitsToShape1.');
console.log('Starting "unpackbitsToShape2"');
console.time('u2');
foo = unpackbitsToShape2(test64by64, [64, 64])
console.timeEnd('u2');
assert(
foo.length === 64 && foo[0].length === 64,
'foo.length === 64 && foo[0].length === 64'
)
console.log('Finished unpackbitsToShape2.');
console.log('Starting "unpackbitsToShape3"');
console.time('u3');
foo = unpackbitsToShape3(test64by64, [64, 64])
console.timeEnd('u3');
assert(
foo.length === 64 && foo[0].length === 64,
'foo.length === 64 && foo[0].length === 64'
)
console.log('Finished unpackbitsToShape3.');
console.log('\nStarting "unpackBitsToShapeTensorflow"')
console.time('u-tensor')
foo = unpackBitsToShapeTensorflow(test64by64, [64, 64])
console.timeEnd('u-tensor')
console.log('Finished unpackBitsToShapeTensorflow.');
console.log('\n\n********* 84 x 84 *********\n\n')
console.log('Starting unpackbits1a.');
console.time('u1a');
foo = unpackbits1a(test84by84);
console.timeEnd('u1a');
console.log('Finished unpackbits1a.');
console.log('Starting "unpackbits"');
console.time('u-orig');
foo = unpackbits(test84by84);
console.timeEnd('u-orig');
console.log('Finished unpackbits.');
console.log('Starting "unpackbitsToShape1"');
console.time('u1');
foo = unpackbitsToShape1(test84by84, [84, 84])
console.timeEnd('u1');
assert(
foo.length === 84 && foo[0].length === 84,
'foo.length === 84 && foo[0].length === 84'
)
console.log('Finished unpackbitsToShape1.');
console.log('Starting "unpackbitsToShape2"');
console.time('u2');
foo = unpackbitsToShape2(test84by84, [84, 84])
console.timeEnd('u2');
assert(
foo.length === 84 && foo[0].length === 84,
'foo.length === 84 && foo[0].length === 84'
)
console.log('Finished unpackbitsToShape2.');
console.log('Starting "unpackbitsToShape3"');
console.time('u3');
foo = unpackbitsToShape3(test84by84, [84, 84])
console.timeEnd('u3');
assert(
foo.length === 84 && foo[0].length === 84,
'foo.length === 84 && foo[0].length === 84'
)
console.log('Finished unpackbitsToShape3.');
console.log('\nStarting "unpackBitsToShapeTensorflow"')
console.time('u-tensor')
foo = unpackBitsToShapeTensorflow(test84by84, [84, 84])
console.timeEnd('u-tensor')
console.log('Finished unpackBitsToShapeTensorflow.');
console.log('\n\n********* 100 x 100 *********\n\n')
console.log('Starting unpackbits1a.');
console.time('u1a');
foo = unpackbits1a(test100by100);
console.timeEnd('u1a');
console.log('Finished unpackbits1a.');
console.log('Starting "unpackbits"');
console.time('u-orig');
foo = unpackbits(test100by100);
console.timeEnd('u-orig');
console.log('Finished unpackbits.');
console.log('Starting "unpackbitsToShape1"');
console.time('u1');
foo = unpackbitsToShape1(test100by100, [100, 100])
console.timeEnd('u1');
assert(
foo.length === 100 && foo[0].length === 100,
'foo.length === 100 && foo[0].length === 100'
)
console.log('Finished unpackbitsToShape1.');
console.log('Starting "unpackbitsToShape2"');
console.time('u2');
foo = unpackbitsToShape2(test100by100, [100, 100])
console.timeEnd('u2');
assert(
foo.length === 100 && foo[0].length === 100,
'foo.length === 100 && foo[0].length === 100'
)
console.log('Finished unpackbitsToShape2.');
console.log('Starting "unpackbitsToShape3"');
console.time('u3');
foo = unpackbitsToShape3(test100by100, [100, 100])
console.timeEnd('u3');
assert(
foo.length === 100 && foo[0].length === 100,
'foo.length === 100 && foo[0].length === 100'
)
console.log('Finished unpackbitsToShape3.');
console.log('\nStarting "unpackBitsToShapeTensorflow"')
console.time('u-tensor')
foo = unpackBitsToShapeTensorflow(test100by100, [100, 100])
console.timeEnd('u-tensor')
console.log('Finished unpackBitsToShapeTensorflow.');
I don't know how different the browser's execution environment is than node, but results seem more stable in node. Here's what I get:
********* 64 x 64 *********
Starting unpackbits1a.
u1a: 0.513ms
Finished unpackbits1a.
Starting "unpackbits"
u-orig: 0.189ms
Finished unpackbits.
Starting "unpackbitsToShape1"
u1: 0.434ms
Finished unpackbitsToShape1.
Starting "unpackbitsToShape2"
u2: 0.365ms
Finished unpackbitsToShape2.
Starting "unpackbitsToShape3"
u3: 0.590ms
Finished unpackbitsToShape3.
Starting "unpackBitsToShapeTensorflow"
u-tensor: 0.508ms
Finished unpackBitsToShapeTensorflow.
********* 84 x 84 *********
Starting unpackbits1a.
u1a: 0.222ms
Finished unpackbits1a.
Starting "unpackbits"
u-orig: 0.425ms
Finished unpackbits.
Starting "unpackbitsToShape1"
u1: 0.622ms
Finished unpackbitsToShape1.
Starting "unpackbitsToShape2"
u2: 0.303ms
Finished unpackbitsToShape2.
Starting "unpackbitsToShape3"
u3: 0.388ms
Finished unpackbitsToShape3.
Starting "unpackBitsToShapeTensorflow"
u-tensor: 0.175ms
Finished unpackBitsToShapeTensorflow.
********* 100 x 100 *********
Starting unpackbits1a.
u1a: 1.502ms
Finished unpackbits1a.
Starting "unpackbits"
u-orig: 0.018ms
Finished unpackbits.
Starting "unpackbitsToShape1"
u1: 1.631ms
Finished unpackbitsToShape1.
Starting "unpackbitsToShape2"
u2: 0.072ms
Finished unpackbitsToShape2.
Starting "unpackbitsToShape3"
u3: 0.159ms
Finished unpackbitsToShape3.
Starting "unpackBitsToShapeTensorflow"
u-tensor: 0.052ms
Finished unpackBitsToShapeTensorflow.
Anyone know of a good snippet of JavaScript code to convert HEX encoded strings to base64 encoded strings?
If you're working in Node or using Browserify, you can use
var base64String = Buffer.from(hexString, 'hex').toString('base64')
or
var hexString = Buffer.from(base64String, 'base64').toString('hex')
The excellent comment by #dandavis is modified by StackOverflow, and has some weird hidden characters.
Here it is as one liner :
btoa("a6b580481008e60df9350de170b7e728".match(/\w{2}/g).map(function(a){return String.fromCharCode(parseInt(a, 16));} ).join(""))
or :
function hexToBase64(hexstring) {
return btoa(hexstring.match(/\w{2}/g).map(function(a) {
return String.fromCharCode(parseInt(a, 16));
}).join(""));
}
hexToBase64("a6b580481008e60df9350de170b7e728");
Both return :
"prWASBAI5g35NQ3hcLfnKA=="
Note that the hex string should have an even length :
hexToBase64("00");
// => "AA=="
hexToBase64("000");
// => "AA=="
if (!window.atob) {
var tableStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var table = tableStr.split("");
window.atob = function (base64) {
if (/(=[^=]+|={3,})$/.test(base64)) throw new Error("String contains an invalid character");
base64 = base64.replace(/=/g, "");
var n = base64.length & 3;
if (n === 1) throw new Error("String contains an invalid character");
for (var i = 0, j = 0, len = base64.length / 4, bin = []; i < len; ++i) {
var a = tableStr.indexOf(base64[j++] || "A"), b = tableStr.indexOf(base64[j++] || "A");
var c = tableStr.indexOf(base64[j++] || "A"), d = tableStr.indexOf(base64[j++] || "A");
if ((a | b | c | d) < 0) throw new Error("String contains an invalid character");
bin[bin.length] = ((a << 2) | (b >> 4)) & 255;
bin[bin.length] = ((b << 4) | (c >> 2)) & 255;
bin[bin.length] = ((c << 6) | d) & 255;
};
return String.fromCharCode.apply(null, bin).substr(0, bin.length + n - 4);
};
window.btoa = function (bin) {
for (var i = 0, j = 0, len = bin.length / 3, base64 = []; i < len; ++i) {
var a = bin.charCodeAt(j++), b = bin.charCodeAt(j++), c = bin.charCodeAt(j++);
if ((a | b | c) > 255) throw new Error("String contains an invalid character");
base64[base64.length] = table[a >> 2] + table[((a << 4) & 63) | (b >> 4)] +
(isNaN(b) ? "=" : table[((b << 2) & 63) | (c >> 6)]) +
(isNaN(b + c) ? "=" : table[c & 63]);
}
return base64.join("");
};
}
function hexToBase64(str) {
return btoa(String.fromCharCode.apply(null,
str.replace(/\r|\n/g, "").replace(/([\da-fA-F]{2}) ?/g, "0x$1 ").replace(/ +$/, "").split(" "))
);
}
function base64ToHex(str) {
for (var i = 0, bin = atob(str.replace(/[ \r\n]+$/, "")), hex = []; i < bin.length; ++i) {
var tmp = bin.charCodeAt(i).toString(16);
if (tmp.length === 1) tmp = "0" + tmp;
hex[hex.length] = tmp;
}
return hex.join(" ");
}
I liked the approach from #eric-duminil nevertheless the solution below - avoiding regex - is ~2x faster.
Browser:
function hexToBase64(hexStr) {
return btoa([...hexStr].reduce((acc, _, i) =>
acc += !(i - 1 & 1) ? String.fromCharCode(parseInt(hexStr.substring(i - 1, i + 1), 16)) : ""
,""));
}
OR
// even a bit faster
function hexToBase64(hexStr) {
let base64 = "";
for(let i = 0; i < hexStr.length; i++) {
base64 += !(i - 1 & 1) ? String.fromCharCode(parseInt(hexStr.substring(i - 1, i + 1), 16)) : ""
}
return btoa(base64);
}
Node:
const base64 = Buffer.from(hexStr, 'hex').toString('base64');
Large strings, no btoa
Solution below is good for large strings - if you want to get bytes from base64 then look HERE
function bytesArrToBase64(arr) {
const abc = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; // base64 alphabet
const bin = n => n.toString(2).padStart(8,0); // convert num to 8-bit binary string
const l = arr.length
let result = '';
for(let i=0; i<=(l-1)/3; i++) {
let c1 = i*3+1>=l; // case when "=" is on end
let c2 = i*3+2>=l; // case when "=" is on end
let chunk = bin(arr[3*i]) + bin(c1? 0:arr[3*i+1]) + bin(c2? 0:arr[3*i+2]);
let r = chunk.match(/.{1,6}/g).map((x,j)=> j==3&&c2 ? '=' :(j==2&&c1 ? '=':abc[+('0b'+x)]));
result += r.join('');
}
return result;
}
function hexToBytes(hexString) {
return hexString.match(/.{1,2}/g).map(x=> +('0x'+x));
}
// ---------
// TEST
// ---------
let hexString = "a6b580481008e60df9350de170b7e728";
let bytes = hexToBytes(hexString);
let base64 = bytesArrToBase64(bytes);
console.log(base64);
I have a signed value given as a hex number, by example 0xffeb and want convert it into -21 as a "normal" Javascript integer.
I have written some code so far:
function toBinary(a) { //: String
var r = '';
var binCounter = 0;
while (a > 0) {
r = a%2 + r;
a = Math.floor(a/2);
}
return r;
}
function twoscompl(a) { //: int
var l = toBinaryFill(a).length;
var msb = a >>> (l-1);
if (msb == 0) {
return a;
}
a = a-1;
var str = toBinary(a);
var nstr = '';
for (var i = 0; i < str.length; i++) {
nstr += str.charAt(i) == '1' ? '0' : '1';
}
return (-1)*parseInt(nstr);
}
The problem is, that my function returns 1 as MSB for both numbers because only at the MSB of the binary representation "string" is looked. And for this case both numbers are 1:
-21 => 0xffeb => 1111 1111 1110 1011
21 => 0x15 => 1 0101
Have you any idea to implement this more efficient and nicer?
Greetings,
mythbu
Use parseInt() to convert (which just accepts your hex string):
parseInt(a);
Then use a mask to figure out if the MSB is set:
a & 0x8000
If that returns a nonzero value, you know it is negative.
To wrap it all up:
a = "0xffeb";
a = parseInt(a, 16);
if ((a & 0x8000) > 0) {
a = a - 0x10000;
}
Note that this only works for 16-bit integers (short in C). If you have a 32-bit integer, you'll need a different mask and subtraction.
I came up with this
function hexToInt(hex) {
if (hex.length % 2 != 0) {
hex = "0" + hex;
}
var num = parseInt(hex, 16);
var maxVal = Math.pow(2, hex.length / 2 * 8);
if (num > maxVal / 2 - 1) {
num = num - maxVal
}
return num;
}
And usage:
var res = hexToInt("FF"); // -1
res = hexToInt("A"); // same as "0A", 10
res = hexToInt("FFF"); // same as "0FFF", 4095
res = hexToInt("FFFF"); // -1
So basically the hex conversion range depends on hex's length, ant this is what I was looking for. Hope it helps.
Based on #Bart Friederichs I've come with:
function HexToSignedInt(num, numSize) {
var val = {
mask: 0x8 * Math.pow(16, numSize-1), // 0x8000 if numSize = 4
sub: -0x1 * Math.pow(16, numSize) //-0x10000 if numSize = 4
}
if((parseInt(num, 16) & val.mask) > 0) { //negative
return (val.sub + parseInt(num, 16))
}else { //positive
return (parseInt(num,16))
}
}
so now you can specify the exact length (in nibbles).
var numberToConvert = "CB8";
HexToSignedInt(numberToConvert, 3);
//expected output: -840
function hexToSignedInt(hex) {
if (hex.length % 2 != 0) {
hex = "0" + hex;
}
var num = parseInt(hex, 16);
var maxVal = Math.pow(2, hex.length / 2 * 8);
if (num > maxVal / 2 - 1) {
num = num - maxVal
}
return num;
}
function hexToUnsignedInt(hex){
return parseInt(hex,16);
}
the first for signed integer and
the second for unsigned integer
As I had to turn absolute numeric values to int32 values that range from -2^24 to 2^24-1,
I came up with this solution, you just have to change your input into a number through parseInt(hex, 16), in your case, nBytes is 2.
function toSignedInt(value, nBytes) { // 0 <= value < 2^nbytes*4, nBytes >= 1,
var hexMask = '0x80' + '00'.repeat(nBytes - 1);
var intMask = parseInt(hexMask, 16);
if (value >= intMask) {
value = value - intMask * 2;
}
return value;
}
var vals = [ // expected output
'0x00', // 0
'0xFF', // 255
'0xFFFFFF', // 2^24 - 1 = 16777215
'0x7FFFFFFF', // 2^31 -1 = 2147483647
'0x80000000', // -2^31 = -2147483648
'0x80000001', // -2^31 + 1 = -2147483647
'0xFFFFFFFF', // -1
];
for (var hex of vals) {
var num = parseInt(hex, 16);
var result = toSignedInt(num, 4);
console.log(hex, num, result);
}
var sampleInput = '0xffeb';
var sampleResult = toSignedInt(parseInt(sampleInput, 16), 2);
console.log(sampleInput, sampleResult); // "0xffeb", -21
Based on the accepted answer, expand to longer number types:
function parseSignedShort(str) {
const i = parseInt(str, 16);
return i >= 0x8000 ? i - 0x10000 : i;
}
parseSignedShort("0xffeb"); // -21
function parseSignedInt(str) {
const i = parseInt(str, 16);
return i >= 0x80000000 ? i - 0x100000000 : i;
}
parseSignedInt("0xffffffeb"); // -21
// Depends on new JS feature. Only supported after ES2020
function parseSignedLong(str) {
if (!str.toLowerCase().startsWith("0x"))
str = "0x" + str;
const i = BigInt(str);
return Number(i >= 0x8000000000000000n ? i - 0x10000000000000000n : i);
}
parseSignedLong("0xffffffffffffffeb"); // -21