Join up PNG images to an APNG animated image - javascript

Is it possible somehow to join up PNG images to an APNG animated image using nodejs?
I've found PHP library only: link

UPNG.js can parse and build APNG files - https://github.com/photopea/UPNG.js
From the readme -
UPNG.js supports APNG and the interface expects "frames".
UPNG.encode(imgs, w, h, cnum, [dels])
imgs: array of frames. A frame is an ArrayBuffer containing the pixel
data (RGBA, 8 bits per channel)
w, h : width and height of the image
cnum: number of colors in the result; 0: all colors (lossless PNG)
dels: array of delays for each frame (only when 2 or more frames)
returns an ArrayBuffer with binary data of a PNG file
UPNG.js can do a lossy minification of PNG files, similar to TinyPNG
and other tools. It performs color quantization using the k-means
algorithm.
Lossy compression is allowed by the last parameter cnum. Set it to
zero for a lossless compression, or write the number of allowed colors
in the image. Smaller values produce smaller files. Or just use 0 for
lossless / 256 for lossy.

There is no library for that, but it is quite simple to implement. Algorithm for merging multiple PNG files into single APNG is described in Wikipedia:
Take all chunks of the first PNG file as a building basis.
Insert an animation control chunk (acTL) after the image header chunk (IHDR).
If the first PNG is to be part of the animation, insert a frame control chunk (fcTL) before the image data chunk (IDAT).
For each of the remaining frames, add a frame control chunk (fcTL) and a frame data chunk (fdAT). Then add the image end chunk (IEND). The content for the frame data chunks (fdAT) is taken from the image data chunks (IDAT) of their respective source images.
Here is an example implementation:
const fs = require('fs')
const crc32 = require('crc').crc32
function findChunk(buffer, type) {
let offset = 8
while (offset < buffer.length) {
let chunkLength = buffer.readUInt32BE(offset)
let chunkType = buffer.slice(offset + 4, offset + 8).toString('ascii')
if (chunkType === type) {
return buffer.slice(offset, offset + chunkLength + 12)
}
offset += 4 + 4 + chunkLength + 4
}
throw new Error(`Chunk "${type}" not found`)
}
const images = process.argv.slice(2).map(path => fs.readFileSync(path))
const actl = Buffer.alloc(20)
actl.writeUInt32BE(8, 0) // length of chunk
actl.write('acTL', 4) // type of chunk
actl.writeUInt32BE(images.length, 8) // number of frames
actl.writeUInt32BE(0, 12) // number of times to loop (0 - infinite)
actl.writeUInt32BE(crc32(actl.slice(4, 16)), 16) // crc
const frames = images.map((data, idx) => {
const ihdr = findChunk(data, 'IHDR')
const fctl = Buffer.alloc(38)
fctl.writeUInt32BE(26, 0) // length of chunk
fctl.write('fcTL', 4) // type of chunk
fctl.writeUInt32BE(idx ? idx * 2 - 1 : 0, 8) // sequence number
fctl.writeUInt32BE(ihdr.readUInt32BE(8), 12) // width
fctl.writeUInt32BE(ihdr.readUInt32BE(12), 16) // height
fctl.writeUInt32BE(0, 20) // x offset
fctl.writeUInt32BE(0, 24) // y offset
fctl.writeUInt16BE(1, 28) // frame delay - fraction numerator
fctl.writeUInt16BE(1, 30) // frame delay - fraction denominator
fctl.writeUInt8(0, 32) // dispose mode
fctl.writeUInt8(0, 33) // blend mode
fctl.writeUInt32BE(crc32(fctl.slice(4, 34)), 34) // crc
const idat = findChunk(data, 'IDAT')
// All IDAT chunks except first one are converted to fdAT chunks
let fdat;
if (idx === 0) {
fdat = idat
} else {
const length = idat.length + 4
fdat = Buffer.alloc(length)
fdat.writeUInt32BE(length - 12, 0) // length of chunk
fdat.write('fdAT', 4) // type of chunk
fdat.writeUInt32BE(idx * 2, 8) // sequence number
idat.copy(fdat, 12, 8) // image data
fdat.writeUInt32BE(crc32(4, length - 4), length - 4) // crc
}
return Buffer.concat([ fctl, fdat ])
})
const signature = Buffer.from('\211PNG\r\n\032\n', 'ascii')
const ihdr = findChunk(images[0], 'IHDR')
const iend = Buffer.from('0000000049454e44ae426082', 'hex')
const output = Buffer.concat([ signature, ihdr, actl, ...frames, iend ])
fs.writeFileSync('output.png', output)

Currently, no it doesn't look like it. Wikipedia lists the available software, and as you can see there's no support for ImageMagick which has a Node wrapper. However, you may find you can download the command line tool apngasm and shell out to it, if you find it's worth your while there are Node command line wrappers to hook this into an existing application using child_process (http://nodejs.org/api/child_process.html).

I'm not sure about nodejs, but you could try APNG-canvas. APNG uses HTML5 (-webkit-canvas), JavaScript (jQuery).
"APNG-canvas is a library for displaing Animated PNG files in the browsers with canvas support (Google Chrome, Internet Explorer 9, Apple Safari)."
Working demo is here.

Related

Adding fast random access in reading and parsing file using csv-parse and node

I was using the csv-parser library to handle csv parsing in node. The file can be huge ranging from 50,000 to 500,000 lines, maybe even larger. I had to perform some computations on the csv, after this is submitted to the server for that I was thinking of dividing the csv into chunks which I could then provide to worker threads for performing the computation. The worker thread would get the number of lines to skip and then start reading the lines after that up to a particular limit. I create a read stream and pass the csv-parser in with the option of number of lines to skip. I tried to perform some benchmarks on it but could find no visible benefits between skipping lines and not skipping lines. Even if I read the whole file it was sometimes faster than reading the ending 30,000 lines.
My guess is that this problem is because of read stream which reads data one by one and hence is not perfect for quick random access of the file.
Maybe my benchmarking is wrong?
Here is the piece of code
const csv = require('csv-parser');
const bench = require('nanobench');
const fs = require('fs');
const parse = (number) => {
// const results = [];
fs.createReadStream('test.csv').pipe(csv({
skipLines: number
})).on('data', (data) => {}).on('end', () => {});
}
const arr = [0, 30000, 15000, 15000/2, 15000/4, 15000/8];
arr.forEach(item => {
bench(`CSV skip ${item} lines 40 times`, function(b) {
b.start();
for(let i = 0; i < 40; i++) parse(item);
b.end();
})
})
and here's the ouptut
# CSV skip 0 lines 40 times
ok ~4.14 ms (0 s + 4139981 ns)
# CSV skip 30000 lines 40 times
ok ~2.05 ms (0 s + 2054537 ns)
# CSV skip 15000 lines 40 times
ok ~2.7 ms (0 s + 2702328 ns)
# CSV skip 7500 lines 40 times
ok ~2.43 ms (0 s + 2434555 ns)
# CSV skip 3750 lines 40 times
ok ~1.97 ms (0 s + 1966652 ns)
# CSV skip 1875 lines 40 times
ok ~2.17 ms (0 s + 2172144 ns)
Is there some any other better way for what I aim to do ?
The problem is, even if you want to skip N lines, the parser still has to read and analyze all bytes from the top down to the N-th line. The further from the beginning the first line is, the more useless work is to be performed (Schlemiel the Painter's algorithm).
You can consider the following logic instead:
for each file, start with currentPosition = 0
seek to the offset currentPosition + chunkSize
read i bytes until you encounter a newline or EOF
allocate a new thread with parameters position=currentPosition and size = chunkSize + i
continue with currentPosition = currentPosition + size + 1
This way, each chunk will contain a whole number of lines.
In a thread, use parameters position and size to read the whole chunk and parse it in-memory.
In pseudocode:
size = fs.statSync("filename").size
chunkSize = 99999
currentPos = 0
fd = fs.open("filename")
while (currentPos < size) {
endPos = currentPos + chunkSize
fs.readSync(fd, buf, 0, 1000, endPos)
i = 0
while(buf[i] != \n) i++
endPos += i
threads.add(filename: "filename", position: currentPos, size: endPos - currentPos)
currentPos = endPos + 1
}

I am stuck in determining whether BLE device is connectable or not, by analyzing advertisement data. Here is my code sample and study guide

I am using Ionic React. And Advertising data received from device was in ArrayBuffer format, from ArrayBuffer I took UInt8Array and then parsed it using following function:
function asHexString(i: any) {
var hex;
hex = i.toString(16);
// zero padding
if (hex.length === 1) {
hex = "0" + hex;
}
return "0x" + hex;
}
export const parseAdvertisingData = (buffer: any) => {
var length, type, data, i = 0, advertisementData = {};
var bytes = new Uint8Array(buffer);
while (length !== 0) {
length = bytes[i] & 0xFF;
i++;
// decode type constants from https://www.bluetooth.org/en-us/specification/assigned-numbers/generic-access-profile
type = bytes[i] & 0xFF;
i++;
data = bytes.slice(i, i + length - 1).buffer; // length includes type byte, but not length byte
i += length - 2; // move to end of data
i++;
// #ts-ignore
advertisementData[asHexString(type)] = data;
}
return advertisementData;
}
This returned Object of ArrayBuffer, one with key 0x19(APPEARANCE DATA) and other 0xff(MANUFACTURER DATA)
Then I converted both ArrayBuffer->UInt8Array to hex string and got following results:
Advertisement Raw Data Received: 0x031919001AFF580015E8FF000000000C0C0011D40000000000000001010000
Len
Type
Value
3
0x19
0x1900
26
0x26
0x580015E8FF000000000C0C0011D40000000000000001010000
Now, I wanted to determine whether this device is connectable or not from above results??
Following are study material:
Legacy Advertising PDUs
These are available for all Bluetooth versions – also enables backward compatibility with older versions and are used on the Primary advertising channels.
ADV_IND: Connectable Scannable Undirected advertising.
ADV_DIRECT_IND: Connectable Directed advertising
ADV_NONCONN_IND: Non-Connectable Non-Scannable Undirected advertising
ADV_SCAN_IND: Scannable Undirected advertising
Reference:
BLE GAP CHART: https://www.bluetooth.com/specifications/assigned-numbers/generic-access-profile/
BLE ADVERTISEMENT: https://www.novelbits.io/bluetooth-low-energy-advertisements-part-1/
The advertisement data in the buffer you are examining is part of the payload and does not contain information from the PDU header. The PDU type indicates if it's connectable or not. I don't know how what BLE features are exposed in your javascript BLE lib, but at least on Android you can use https://developer.android.com/reference/android/bluetooth/le/ScanResult#isConnectable() to determine if it's connectable or not.

shift ArrayBuffer so I can access Float32 values

I am trying to read a .stl file
the stl file format has the first 80 bytes are a string
then a float32 that is the number of triangle stored in the file.
then for each triangle 12 float32 values,
then a Uint16
then the 12 Float32 and 1 Uint16 pattern repeats.
I have been able to get the first triangle values, but I can't read the next array of float32 values out because the offset is not divisible by 4 anymore after getting the Uint16 value.
Is there any way to do a Left Shift operation on the arraybuffer or some way to continue to read out the values?
You could pull them out by making a copy
const src = new Uint8Array([1,2,3,4,5,0,0,246,66,0,0,144,64,14,15,16,17,18,19]);
const offset = 5;
const numFloats = 2;
const floats = new Float32Array(src.slice(offset, offset + numFloats * 4).buffer);
console.log(floats);
You could also use a DataView
const src = new Uint8Array([1,2,3,4,5,0,0,246,66,0,0,144,64,14,15,16,17,18,19]);
const offset = 5;
const dataview = new DataView(src.buffer);
const littleEndian = true;
console.log(dataview.getFloat32(offset, littleEndian));
console.log(dataview.getFloat32(offset + 4, littleEndian));

Minimizing canvas "bitmap" data size

Context: multi-user app (node.js) - 1 painter, n clients
Canvas size: 650x400 px (= 260,000 px)
For the canvas to be updated frequently (I'm thinking about 10 times a second), I need to keep the data size as small as possible, especially when thinking about upload rates.
The toDataURL() method returning a base64 string is fine but it contains masses of data I don't even need (23 bit per pixel). Its length is 8,088 (without the preceding MIME information), and assuming the JavaScript strings have 8-bit encoding that would be 8.1 kilobytes of data, 10 times per second.
My next try was using JS objects for the different context actions like moveTo(x, y) or lineTo(x, y), sending them to the server and have the clients receive the data in delta updates (via timestamps). However, this turned out to be even less efficient than the base64 string.
{
"timestamp": 0,
"what": {
"name": LINE_TO,
"args": {"x": x, "y": y}
}
}
It doesn't work fluently nor precisely because there are nearly 300 lineTo commands already when you swipe your brush shortly. Sometimes there's a part of the movement missing (making a line straight instead of rounded), sometimes the events aren't even recognized by the script client-side because it seems to be "overwhelmed" by the mass of events triggered already.
So I have to end up using the base64 string with its 8.1 KB. I don't want to worry about this much - but even if done asynchronously with delta updates, there will be major lags on a real server, let alone the occasional bandwidth overrun.
The only colors I am using are #000 and #FFF, so I was thinking about a 1-bit data structure with delta updates only. This would basically suffice and I wouldn't mind any "color" precision losses (it is black after all).
With most of the canvas being white, you could think of additional Huffman run-length encoding to reduce size even further, too. Like a canvas with a size of 50x2 px and a single black pixel at (26, 2) would return the following string: 75W1B74W (50 + 25 white pixels, then 1 black pixel, then 24 more white pixels)
It would even help if the canvas consisted of a 1-bit string like this:
00000000000000000000000000000000000000000000000000
00000000000000000000000001000000000000000000000000
That would help a lot already.
My first question is: How to write an algorithm to get this data efficiently?
The second is: How could I pass the pure binary canvas data to the clients (via node server)? How do I even send a 1-bit data structure to the server? Would I have to convert my bits to a hexadecimal (or more) number and re-parse?
Would it be possible to use this as a data structure?
Thanks in advance,
Harti
I need to keep the data size as small as possible
Then don't send the entire data. Send only the changes, close to what you propose yourself.
Make the framework such that every user can only do "actions" such as "draw black strokeWidth 2 from X1,Y1 to X2,Y2".
I wouldn't bother with some pure binary thing. If there's only two colors then that's easy to send as the string "1,2,x,y,x2,y2", which the other people will parse precisely the same way the local client will, and it will get drawn the same way.
I wouldn't overthink this. Get it working with simple strings before you worry about any clever encoding. It's worth trying the simple thing first. Maybe the performance will be quite good without going through a lot of trouble!
I sorted it out, finally. I used an algorithm to get the image data within a specified area (i.e. the area currently drawn on), and then paste the image data to the same coordinates.
While drawing, I keep my application informed about how big the modified area is and where it starts (stored in currentDrawingCoords).
pixels is an ImageData Array obtained by calling context.getImageData(left, top, width, height) with the stored drawing coordinates.
getDeltaUpdate is called upon onmouseup (yeah, that's the drawback of the area idea):
getDeltaUpdate = function(pixels, currentDrawingCoords) {
var image = "" +
currentDrawingCoords.left + "," + // x
currentDrawingCoords.top + "," + // y
(currentDrawingCoords.right - currentDrawingCoords.left) + "," + // width
(currentDrawingCoords.bottom - currentDrawingCoords.top) + ""; // height
var blk = 0, wht = 0, d = "|";
// http://stackoverflow.com/questions/667045/getpixel-from-html-canvas
for (var i=0, n=pixels.length; i < n; i += 4) {
if(
pixels[i] > 0 ||
pixels[i+1] > 0 ||
pixels[i+2] > 0 ||
pixels[i+3] > 0
) {
// pixel is black
if(wht > 0 || (i == 0 && wht == 0)) {
image = image + d + wht;
wht = 0;
d = ",";
}
blk++;
//console.log("Pixel " + i + " is BLACK (" + blk + "-th in a row)");
} else {
// pixel is white
if(blk > 0) {
image = image + d + blk;
blk = 0;
d = ",";
}
wht++;
//console.log("Pixel " + i + " is WHITE (" + blk + "-th in a row)");
}
}
return image;
}
image is a string with a header part (x,y,width,height|...) and a data body part (...|w,b,w,b,w,[...])
The result is a string with less characters than the base64 string has (as opposed to the 8k characters string, the delta updates have 1k-6k characters, depending on how many things have been drawn into the modification area)
That string is sent to the server, pushed to all the other clients and reverted to ImageData by using getImageData:
getImageData = function(imagestring) {
var data = imagestring.split("|");
var header = data[0].split(",");
var body = data[1].split(",");
var where = {"x": header[0], "y": header[1]};
var image = context.createImageData(header[2], header[3]); // create ImageData object (width, height)
var currentpixel = 0,
pos = 0,
until = 0,
alpha = 0,
white = true;
for(var i=0, n=body.length; i < n; i++) {
var pixelamount = parseInt(body[i]); // amount of pixels with the same color in a row
if(pixelamount > 0) {
pos = (currentpixel * 4);
until = pos + (pixelamount * 4); // exclude
if(white) alpha = 0;
else alpha = 255;
while(pos < until) {
image.data[pos] = 0;
image.data[pos+1] = 0;
image.data[pos+2] = 0;
image.data[pos+3] = alpha;
pos += 4;
}
currentpixel += pixelamount;
white = (white ? false : true);
} else {
white = false;
}
}
return {"image": image, "where": where};
}
Call context.putImageData(data.image, data.where.x, data.where.y); to put the area on top of everything there is!
As previously mentioned, this may not be the perfect suit for every kind of monochrome canvas drawing application since the modification area is only submit onmouseup. However, I can live with this trade-off because it's far less stressful for the server than all the other methods presented in the question.
I hope I was able to help the people to follow this question.

Decrypting images using JavaScript within browser

I have a web based application that requires images to be encrypted before they are sent to server, and decrypted after loaded into the browser from the server, when the correct key was given by a user.
[Edit: The goal is that the original image and the key never leaves the user's computer so that he/she is not required to trust the server.]
My first approach was to encrypt the image pixels using AES and leave the image headers untouched. I had to save the encrypted image in lossless format such as png. Lossy format such as jpg would alter the AES encrypted bits and make them impossible to be decrypted.
Now the encrypted images can be loaded into the browser, with a expected completely scrambled look. Here I have JavaScript code to read in the image data as RGB pixels using Image.canvas.getContext("2d").getImageData(), get the key form the user, decrypt the pixels using AES, redraw the canvas and show the decrypted image to the user.
This approach works but suffers two major problems.
The first problem is that saving the completely scrambled image in lossless format takes a lot of bytes, close to 3 bytes per pixel.
The second problem is that decrypting large images in the browser takes a long time.
This invokes the second approach, which is to encrypt the image headers instead of the actual pixels. But I haven't found any way to read in the image headers in JavaScript in order to decrypt them. The Canvas gives only the already decompressed pixel data. In fact, the browser shows the image with altered header as invalid.
Any suggestions for improving the first approach or making the second approach possible, or providing other approaches are much appreciated.
Sorry for the long post.
You inspired me to give this a try. I blogged about it and you can find a demo here.
I used Crypto-JS to encrypt and decrypt with AES and Rabbit.
First I get the CanvasPixelArray from the ImageData object.
var ctx = document.getElementById('leif')
.getContext('2d');
var imgd = ctx.getImageData(0,0,width,height);
var pixelArray = imgd.data;
The pixel array has four bytes for each pixel as RGBA but Crypto-JS encrypts a string, not an array. At first I used .join() and .split(",") to get from array to string and back. It was slow and the string got much longer than it had to be. Actually four times longer. To save even more space I decided to discard the alpha channel.
function canvasArrToString(a) {
var s="";
// Removes alpha to save space.
for (var i=0; i<pix.length; i+=4) {
s+=(String.fromCharCode(pix[i])
+ String.fromCharCode(pix[i+1])
+ String.fromCharCode(pix[i+2]));
}
return s;
}
That string is what I then encrypt. I sticked to += after reading String Performance an Analysis.
var encrypted = Crypto.Rabbit.encrypt(imageString, password);
I used a small 160x120 pixels image. With four bytes for each pixels that gives 76800 bytes. Even though I stripped the alpha channel the encrypted image still takes up 124680 bytes, 1.62 times bigger. Using .join() it was 384736 bytes, 5 times bigger. One cause for it still being larger than the original image is that Crypto-JS returns a Base64 encoded string and that adds something like 37%.
Before I could write it back to the canvas I had to convert it to an array again.
function canvasStringToArr(s) {
var arr=[];
for (var i=0; i<s.length; i+=3) {
for (var j=0; j<3; j++) {
arr.push(s.substring(i+j,i+j+1).charCodeAt());
}
arr.push(255); // Hardcodes alpha to 255.
}
return arr;
}
Decryption is simple.
var arr=canvasStringToArr(
Crypto.Rabbit.decrypt(encryptedString, password));
imgd.data=arr;
ctx.putImageData(imgd,0,0);
Tested in Firefox, Google Chrome, WebKit3.1 (Android 2.2), iOS 4.1, and a very recent release of Opera.
Encrypt and Base64 encode the image's raw data when it is saved. (You can only do that on a web browser that supports the HTML5 File API unless you use a Java applet). When the image is downloaded, unencode it, decrypt it, and create a data URI for the browser to use (or again, use a Java applet to display the image).
You cannot, however, remove the need for the user to trust the server because the server can send whatever JavaScript code it wants to to the client, which can send a copy of the image to anyone when it is decrypted. This is a concern some have with encrypted e-mail service Hushmail – that the government could force the company to deliver a malicious Java applet. This isn't an impossible scenario; telecommunications company Etisalat attempted to intercept BlackBerry communications by installing spyware onto the device remotely (http://news.bbc.co.uk/2/hi/technology/8161190.stm).
If your web site is one used by the public, you have no control over your users' software configurations, so their computers could even already be infected with spyware.
I wanted to do something similar: On the server is an encrypted gif and I want to download, decrypt, and display it in javascript. I was able to get it working and the file stored on the server is the same size as the original plus a few bytes (maybe up to 32 bytes). This is the code that performs AES encryption of the file calendar.gif and makes calendar.gif.enc, written in VB.Net.
Private Sub Button1_Click(sender As Object, e As EventArgs) Handles Button1.Click
Dim AES As New System.Security.Cryptography.RijndaelManaged
Dim encryption_key As String = "603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4"
AES.Key = HexStringToBytes(encryption_key)
Dim iv_string As String = "000102030405060708090A0B0C0D0E0F"
'System.IO.File.ReadAllBytes("calendar.gif")
'Dim test_string As String = "6bc1bee22e409f96e93d7e117393172a"
AES.Mode = Security.Cryptography.CipherMode.CBC
AES.IV = HexStringToBytes(iv_string)
Dim Encrypter As System.Security.Cryptography.ICryptoTransform = AES.CreateEncryptor
Dim b() As Byte = System.IO.File.ReadAllBytes("calendar.gif")
System.IO.File.WriteAllBytes("calendar.gif.enc", (Encrypter.TransformFinalBlock(System.IO.File.ReadAllBytes("calendar.gif"), 0, b.Length)))
End Sub
This is the javascript code that downloads calendar.gif.enc as binary, decrypts, and makes an image:
function wordArrayToBase64(wordArray) {
var words = wordArray.words;
var sigBytes = wordArray.sigBytes;
// Convert
var output = "";
var chr = [];
for(var i = 0; i < sigBytes; i++) {
chr.push((words[i >>> 2] >>> (24 - (i % 4) * 8)) & 0xff);
if(chr.length == 3) {
var enc = [
(chr[0] & 0xff) >> 2,
((chr[0] & 3) << 4) | ((chr[1] & 0xff) >> 4),
((chr[1] & 15) << 2) | ((chr[2] & 0xff) >> 6),
chr[2] & 63
];
for(var j = 0; j < 4; j++) {
output += Base64._keyStr.charAt(enc[j]);
}
chr = [];
}
}
if(chr.length == 1) {
chr.push(0,0);
var enc = [
(chr[0] & 0xff) >> 2,
((chr[0] & 3) << 4) | ((chr[1] & 0xff) >> 4),
((chr[1] & 15) << 2) | ((chr[2] & 0xff) >> 6),
chr[2] & 63
];
enc[2] = enc[3] = 64;
for(var j = 0; j < 4; j++) {
output += Base64._keyStr.charAt(enc[j]);
}
} else if(chr.length == 2) {
chr.push(0);
var enc = [
(chr[0] & 0xff) >> 2,
((chr[0] & 3) << 4) | ((chr[1] & 0xff) >> 4),
((chr[1] & 15) << 2) | ((chr[2] & 0xff) >> 6),
chr[2] & 63
];
enc[3] = 64;
for(var j = 0; j < 4; j++) {
output += Base64._keyStr.charAt(enc[j]);
}
}
return(output);
}
var xhr = new XMLHttpRequest();
xhr.overrideMimeType('image/gif; charset=x-user-defined');
xhr.onreadystatechange = function() {
if(xhr.readyState == 4) {
var key = CryptoJS.enc.Hex.parse('603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4');
var iv = CryptoJS.enc.Hex.parse('000102030405060708090A0B0C0D0E0F');
var aesEncryptor = CryptoJS.algo.AES.createDecryptor(key, { iv: iv });
var words = [];
for(var i=0; i < (xhr.response.length+3)/4; i++) {
var newWord = (xhr.response.charCodeAt(i*4+0)&0xff) << 24;
newWord += (xhr.response.charCodeAt(i*4+1)&0xff) << 16;
newWord += (xhr.response.charCodeAt(i*4+2)&0xff) << 8;
newWord += (xhr.response.charCodeAt(i*4+3)&0xff) << 0;
words.push(newWord);
}
var inputWordArray = CryptoJS.lib.WordArray.create(words, xhr.response.length);
var ciphertext0 = aesEncryptor.process(inputWordArray);
var ciphertext1 = aesEncryptor.finalize();
$('body').append('<img src="data:image/gif;base64,' + wordArrayToBase64(ciphertext0.concat(ciphertext1)) + '">');
$('body').append('<p>' + wordArrayToBase64(ciphertext0.concat(ciphertext1)) + '</p>');
}
};
Caveats:
I used a fixed IV and fixed password. You should modify the code to generate a random IV during encryption and prepend them as the first bytes of the output file. The javascript needs to be modified, too, to extract these bytes.
The password length should be fixed: 256-bits for AES-256. If the password isn't 256 bytes, one possibility is to use AES hashing to hash the password to 256 bits in length in both encryption and decryption.
You'll need crypto-js.
overrideMimeType might not work on older browsers. You need this so that the binary data will get downloaded properly.

Categories

Resources