How to calculate MD5 checksum of blob using JQuery - javascript

My requirement is to upload large files (upto 50GB) through browser. So I used JQuery file upload plugin to upload those large files in smaller chunks.
Now I have to calculate MD5 checksum for each chunk to avoid data corruption issues from browser to server.
Is there any plugin in JQuery to calculate checksum for the blob chunks except Google CryptoJS utility ?

You can try js-md5.
This is a online demo to get file checksum.
You can input arraybuffer like this
var total; // total file size
var batch = 1024 * 1024; // batch size
var start = 0;
var current = md5;
var asyncUpdate = function () {
if (start < total) {
var end = Math.min(start + batch, total);
current = current.update(arrayBuffer.slice(start, end));
start = end;
setTimeout(asyncUpdate);
} else {
var hash = current.hex(); // result
}
}
asyncUpdate();

Related

Change segment text before processing using hls.js

so due to some security reason i want to add some extra text to .ts file in the begining of so when parsing it causes buffering issues
to fix this i decided to removed that 'extra' text i added before processing the segment issue is i dont know how to manipulate arraybuffer so i can remove that text since i am not that knowledgable on js
I tried many things including just download hlsjs file directly then edit readystatechange
// >= HEADERS_RECEIVED
if (readyState >= 2) {
....
if (isArrayBuffer)
{
console.log(xhr.response);
var ress = xhr.response;
//console.log(ress.replace('FFmpeg',''));
var enc = new TextDecoder('ASCII');
var seg = enc.decode(ress);
//var binaryArray = new Uint8Array(this.response.slice(0)); // use UInt8Array for binary
//var blob = new Blob([seg], { type: "video/MP2T" });
var enc = new TextEncoder(); // always utf-8
var newww = enc.encode(enc.encode(seg));
var ddd = newww.buffer;
console.debug( newww );
console.debug( newww.buffer);
//dec = dec.replace('ÿØÿà �JFIF','') ;
//xhr.response = Array.from(newww) ;
data = ddd;
len = data.byteLength;
the idea was to convert arraybuffer to string remove that text then convert it back to arraybuffer

I'm missing something to send long audios recorded in the web browser to oracle apex?

I am trying to send recorded audio from the web browser to Oracle Apex but a problem is happening when the audio is quite long. The code works very well when the audio is less than two minutes.
What I know, the data is sent by URL, therefore it is being sent in text format. Oracle has a 32k limit for the string, so if the blob exceeds that limit, it must be sent in an array divided into parts of 30k each. So I am suspecting that the array is not being sent in the correct format, but I don't know how to confirm it.
The code I am using is as follows: (I built a plugin for Apex to send the audio)
Fragment in Javascript that sends the audio:
// builds a js array from long string
clob2Array: function(clob, size, array) {
loopCount = Math.floor(clob.length / size) + 1;
for (var i = 0; i < loopCount; i++) {
array.push(clob.slice(size * i, size * (i + 1)));
}
return array;
},
// converts DataURI to base64 string
dataURI2base64: function(dataURI) {
var base64 = dataURI.substr(dataURI.indexOf(',') + 1);
return base64;
},
blobToDataURL: function(blob, callback) {
var a = new FileReader();
a.onload = function(e) {callback(e.target.result);}
a.readAsDataURL(blob);
},
// save to DB function
save2Db: function(pAjaxIdentifier, pRegionId, pAudio, callback) {
apexAudio.blobToDataURL(pAudio, function(data){
// audio DataURI to base64
var base64 = apexAudio.dataURI2base64(data);
// split base64 clob string to f01 array length 30k
var f01Array = new Array();
f01Array = apexAudio.clob2Array(base64, 30000, f01Array);
// Apex Ajax Call
apex.server.plugin(pAjaxIdentifier, {
f01: f01Array,
}, {
dataType: 'html',
// SUCESS function
success: function() {
// add apex event
$('#' + pRegionId).trigger('apexaudio-saved-db');
// callback
callback();
},
// ERROR function
error: function(xhr, pMessage) {
// add apex event
$('#' + pRegionId).trigger('apexaudio-error-db');
console.log('save2Db: apex.server.plugin ERROR:', pMessage);
// callback
callback();
}
});
});
}
The PL/SQL Code that receives adn transforms the string array into blob
DECLARE
--
l_collection_name VARCHAR2(100);
l_blob BLOB;
l_filename VARCHAR2(100);
l_mime_type VARCHAR2(100);
l_token VARCHAR2(32000);
--
BEGIN
-- get defaults
l_filename := 'audio_' || to_char(SYSDATE, 'YYYYMMDDHH24MISS') || '.webm';
l_mime_type := 'audio/webm';
-- build BLOB from f01 30k Array
dbms_lob.createtemporary(l_blob,
TRUE,
dbms_lob.session);
FOR i IN 1 .. apex_application.g_f01.count LOOP
l_token := wwv_flow.g_f01(i);
IF length(l_token) > 0 THEN
dbms_lob.append(l_blob
,to_blob(utl_encode.base64_decode(utl_raw.cast_to_raw(l_token))));
END IF;
END LOOP;
l_collection_name := 'APEX_AUDIO';
APEX_COLLECTION.CREATE_OR_TRUNCATE_COLLECTION(
p_collection_name => l_collection_name);
-- add collection member (only if BLOB not null)
IF dbms_lob.getlength(l_blob) IS NOT NULL THEN
apex_collection.add_member(p_collection_name => l_collection_name,
p_c001 => l_filename, -- filename
p_c002 => l_mime_type, -- mime_type
p_d001 => SYSDATE, -- date created
p_blob001 => l_blob); -- BLOB audio content
END IF;
END;
I repeat, the code works perfectly if the audio is short, but if it is long, the following error arises:
2020-02-20T20:09:27.169Z SEVERE <P-fvMwI2WpKybDySZRumRQ> java.sql.SQLException: ORA-06550: line 2, column 2:
PLS-00306: number or wrong type arguments when calling 'AJAX'
ORA-06550: line 2, column 2:
PL/SQL: Statement ignored
InternalServerException [statusCode=500, reasons=[]]
at oracle.dbtools.apex.ModApexContext.handleError(ModApexContext.java:288)
at oracle.dbtools.apex.OWA.execute(OWA.java:206)
at oracle.dbtools.apex.ModApex.handleRequest(ModApex.java:310)
at oracle.dbtools.apex.ModApex.doPost(ModApex.java:188)
at oracle.dbtools.apex.ModApex.service(ModApex.java:112)
at oracle.dbtools.http.entrypoint.Dispatcher.dispatch(Dispatcher.java:126)
[...]
Tecnology:
Oracle 12c
Oracle Apex 19.2
Ords 19.4
Tomcat 8
Thus you are sending your request using AJAX with content type "application/x-www-form-urlencoded" Tomcat is limiting the max allowed POST size (defaults to 2MB).
To make it work in APEX you have possibly 2 ways
1) Do some kind of double chunked upload, so first you split the file itself with e.g. file.slice() and then you build the 30k base64 array of each file chunk and upload this, chunk by chunk
2) Use a "multipart/form-data" content type with an form submit, thus here you are not running in Tomcats limitation of 2MB.
I built a file uploader plugin some time ago, just have a look at this function:
https://github.com/Dani3lSun/apex-plugin-dropzone/blob/90a82f4bb83fee9d78458af790560fb6c5b77978/server/js/apexdropzone.js#L378
The uploaded file will then be inserted into apex_application_files automatically, from there you can grab it:
https://github.com/Dani3lSun/apex-plugin-dropzone/blob/90a82f4bb83fee9d78458af790560fb6c5b77978/source/render_region.sql#L332
I would not recommend doing it with ORDS when you can do it inside of your APEX app, thus you have to deal with security, additional authentication etc...

HTML Table to Excel (xls) using javascript/jQuery

I am trying to export a html table to an Excel document via a javascript/jquery export. I so far have found 2 solutions but neither meet my requirements:
HTML Table Export
Does not export inline styling (styling is a must-have requirement)
Table2Excel
Does not work in all IE versions (need all browser compatibility)
I am looking for a solution that is as minimal as possible (both of these solutions were very "tidy" but as per my comments above they both had a downfall that fell short of my requirements.
Does anyone know a better solution? My requirements for this export are:
Needs to work on all browsers
Needs to export the inline styling
IF POSSIBLE it would be nice to be able to name the file
Would appreciate any assistance here, pulling my hair out as I can't be the first person that has required this feature...
Thanks!
I'm assuming that PHP / Flash is a no go. (If not, checkout PHPExcel and DataTables' export feature.)
Also, naming the file is nigh impossible without server-side programming of some type. I'm pretty sure this is a security problem for most browsers.
Before we get to the code, some limitations:
You'll probably need to lookup MS Office XML formats for XLS and customize
Once the file downloads (it will be .xls) you will likely get an error like "This file says it's in XLS format but might not be, do you want to open anyway?"
You will likely need to "Save As" some valid excel format to force it to convert from HTML in the excel document body.
I've used this fairly extensively in Chrome / Firefox / Internet Explorer but you should test yourself.
Currently set to freeze rows / columns. Change the XML and / or freeze vars at the top of the tableToExcel() function.
Resource-expensive on the client side. Probably best to use a server-side script of some kind.
There is a limit to the size of the table which is (I'm guessing) based on the size of the javascript variable. I used it with some tables like 50x300 but I wouldn't recommend on much larger tables.
It accepts a string of HTML table text (include <table>). You can format that HTML with inline styles (won't work with a CSS stylesheet unfortunately).
tableToExcel($('table').html(),'Worksheet Name');
Here's the functions. Enjoy!
function b64toBlob(b64Data, contentType, sliceSize) {
contentType = contentType || '';
sliceSize = sliceSize || 512;
var byteCharacters = atob(b64Data);
var byteArrays = [];
for (var offset = 0; offset < byteCharacters.length; offset += sliceSize) {
var slice = byteCharacters.slice(offset, offset + sliceSize);
var byteNumbers = new Array(slice.length);
for (var i = 0; i < slice.length; i++) {
byteNumbers[i] = slice.charCodeAt(i);
}
var byteArray = new Uint8Array(byteNumbers);
byteArrays.push(byteArray);
}
var blob = new Blob(byteArrays, {type: contentType});
return blob;
}
function tableToExcel(table,name) {
var freezeTopRowNumber = '4';
var freezeColNumber = '6';
var template = '<html xmlns:o="urn:schemas-microsoft-com:office:office" xmlns:x="urn:schemas-microsoft-com:office:excel" xmlns="http://www.w3.org/TR/REC-html40">';
template += '<head><!--[if gte mso 9]>';
template += '<xml><x:ExcelWorkbook><x:ExcelWorksheets><x:ExcelWorksheet><x:Name>{worksheet}</x:Name>';
template += '<x:WorksheetOptions><x:Selected/><x:FreezePanes/><x:FrozenNoSplit/><x:SplitHorizontal>'+freezeTopRowNumber+'</x:SplitHorizontal><x:TopRowBottomPane>'+freezeTopRowNumber+'</x:TopRowBottomPane>';
template += '<x:SplitVertical>'+freezeColNumber+'</x:SplitVertical><x:LeftColumnRightPane>'+freezeColNumber+'</x:LeftColumnRightPane>';
template += '<x:ActivePane>2</x:ActivePane><x:Panes><x:Pane><x:Number>3</x:Number></x:Pane><x:Pane><x:Number>2</x:Number></x:Pane></x:Panes>';
template += '<x:ProtectContents>False</x:ProtectContents><x:ProtectObjects>False</x:ProtectObjects><x:ProtectScenarios>False</x:ProtectScenarios>';
template += '<x:DisplayGridlines/></x:WorksheetOptions></x:ExcelWorksheet></x:ExcelWorksheets></x:ExcelWorkbook></xml><![endif]--></head>';
template += '<body>{table}</body></html>';
var base64 = function(s) { return window.btoa(unescape(encodeURIComponent(s))) };
var format = function(s, c) { return s.replace(/{(\w+)}/g, function(m, p) { return c[p]; }) };
var ctx = {worksheet: name || 'Worksheet', table: table};
var b = base64(format(template,ctx));
var blob = b64toBlob(b,'application/vnd.ms-excel');
var blobURL = URL.createObjectURL(blob);
window.location.href = blobURL;
}

filereader api on big files

My file reader api code has been working good so far until one day I got a 280MB txt file from one of my client. Page just crashes straight up in Chrome and in Firefox nothing happens.
// create new reader object
var fileReader = new FileReader();
// read the file as text
fileReader.readAsText( $files[i] );
fileReader.onload = function(e)
{ // read all the information about the file
// do sanity checks here etc...
$timeout( function()
{
// var fileContent = e.target.result;
// get the first line
var firstLine = e.target.result.slice(0, e.target.result.indexOf("\n") ); }}
What I am trying to do above is that get the first line break so that I can get the column length of the file. Should I not read it as text ? How can I get the column length of the file without breaking the page on big files?
Your application is failing for big files because you're reading the full file into memory before processing it. This inefficiency can be solved by streaming the file (reading chunks of a small size), so you only need to hold a part of the file in memory.
A File objects is also an instance of a Blob, which offers the .slice method to create a smaller view of the file.
Here is an example that assumes that the input is ASCII (demo: http://jsfiddle.net/mw99v8d4/).
function findColumnLength(file, callback) {
// 1 KB at a time, because we expect that the column will probably small.
var CHUNK_SIZE = 1024;
var offset = 0;
var fr = new FileReader();
fr.onload = function() {
var view = new Uint8Array(fr.result);
for (var i = 0; i < view.length; ++i) {
if (view[i] === 10 || view[i] === 13) {
// \n = 10 and \r = 13
// column length = offset + position of \r or \n
callback(offset + i);
return;
}
}
// \r or \n not found, continue seeking.
offset += CHUNK_SIZE;
seek();
};
fr.onerror = function() {
// Cannot read file... Do something, e.g. assume column size = 0.
callback(0);
};
seek();
function seek() {
if (offset >= file.size) {
// No \r or \n found. The column size is equal to the full
// file size
callback(file.size);
return;
}
var slice = file.slice(offset, offset + CHUNK_SIZE);
fr.readAsArrayBuffer(slice);
}
}
The previous snippet counts the number of bytes before a line break. Counting the number of characters in a text consisting of multibyte characters is slightly more difficult, because you have to account for the possibility that the last byte in the chunk could be a part of a multibyte character.
There is a awesome library called Papa Parse that do that in a graceful way! It can really handle big files and also you can use web worker.
Just try out the demos that they provide: https://www.papaparse.com/demo

Join up PNG images to an APNG animated image

Is it possible somehow to join up PNG images to an APNG animated image using nodejs?
I've found PHP library only: link
UPNG.js can parse and build APNG files - https://github.com/photopea/UPNG.js
From the readme -
UPNG.js supports APNG and the interface expects "frames".
UPNG.encode(imgs, w, h, cnum, [dels])
imgs: array of frames. A frame is an ArrayBuffer containing the pixel
data (RGBA, 8 bits per channel)
w, h : width and height of the image
cnum: number of colors in the result; 0: all colors (lossless PNG)
dels: array of delays for each frame (only when 2 or more frames)
returns an ArrayBuffer with binary data of a PNG file
UPNG.js can do a lossy minification of PNG files, similar to TinyPNG
and other tools. It performs color quantization using the k-means
algorithm.
Lossy compression is allowed by the last parameter cnum. Set it to
zero for a lossless compression, or write the number of allowed colors
in the image. Smaller values produce smaller files. Or just use 0 for
lossless / 256 for lossy.
There is no library for that, but it is quite simple to implement. Algorithm for merging multiple PNG files into single APNG is described in Wikipedia:
Take all chunks of the first PNG file as a building basis.
Insert an animation control chunk (acTL) after the image header chunk (IHDR).
If the first PNG is to be part of the animation, insert a frame control chunk (fcTL) before the image data chunk (IDAT).
For each of the remaining frames, add a frame control chunk (fcTL) and a frame data chunk (fdAT). Then add the image end chunk (IEND). The content for the frame data chunks (fdAT) is taken from the image data chunks (IDAT) of their respective source images.
Here is an example implementation:
const fs = require('fs')
const crc32 = require('crc').crc32
function findChunk(buffer, type) {
let offset = 8
while (offset < buffer.length) {
let chunkLength = buffer.readUInt32BE(offset)
let chunkType = buffer.slice(offset + 4, offset + 8).toString('ascii')
if (chunkType === type) {
return buffer.slice(offset, offset + chunkLength + 12)
}
offset += 4 + 4 + chunkLength + 4
}
throw new Error(`Chunk "${type}" not found`)
}
const images = process.argv.slice(2).map(path => fs.readFileSync(path))
const actl = Buffer.alloc(20)
actl.writeUInt32BE(8, 0) // length of chunk
actl.write('acTL', 4) // type of chunk
actl.writeUInt32BE(images.length, 8) // number of frames
actl.writeUInt32BE(0, 12) // number of times to loop (0 - infinite)
actl.writeUInt32BE(crc32(actl.slice(4, 16)), 16) // crc
const frames = images.map((data, idx) => {
const ihdr = findChunk(data, 'IHDR')
const fctl = Buffer.alloc(38)
fctl.writeUInt32BE(26, 0) // length of chunk
fctl.write('fcTL', 4) // type of chunk
fctl.writeUInt32BE(idx ? idx * 2 - 1 : 0, 8) // sequence number
fctl.writeUInt32BE(ihdr.readUInt32BE(8), 12) // width
fctl.writeUInt32BE(ihdr.readUInt32BE(12), 16) // height
fctl.writeUInt32BE(0, 20) // x offset
fctl.writeUInt32BE(0, 24) // y offset
fctl.writeUInt16BE(1, 28) // frame delay - fraction numerator
fctl.writeUInt16BE(1, 30) // frame delay - fraction denominator
fctl.writeUInt8(0, 32) // dispose mode
fctl.writeUInt8(0, 33) // blend mode
fctl.writeUInt32BE(crc32(fctl.slice(4, 34)), 34) // crc
const idat = findChunk(data, 'IDAT')
// All IDAT chunks except first one are converted to fdAT chunks
let fdat;
if (idx === 0) {
fdat = idat
} else {
const length = idat.length + 4
fdat = Buffer.alloc(length)
fdat.writeUInt32BE(length - 12, 0) // length of chunk
fdat.write('fdAT', 4) // type of chunk
fdat.writeUInt32BE(idx * 2, 8) // sequence number
idat.copy(fdat, 12, 8) // image data
fdat.writeUInt32BE(crc32(4, length - 4), length - 4) // crc
}
return Buffer.concat([ fctl, fdat ])
})
const signature = Buffer.from('\211PNG\r\n\032\n', 'ascii')
const ihdr = findChunk(images[0], 'IHDR')
const iend = Buffer.from('0000000049454e44ae426082', 'hex')
const output = Buffer.concat([ signature, ihdr, actl, ...frames, iend ])
fs.writeFileSync('output.png', output)
Currently, no it doesn't look like it. Wikipedia lists the available software, and as you can see there's no support for ImageMagick which has a Node wrapper. However, you may find you can download the command line tool apngasm and shell out to it, if you find it's worth your while there are Node command line wrappers to hook this into an existing application using child_process (http://nodejs.org/api/child_process.html).
I'm not sure about nodejs, but you could try APNG-canvas. APNG uses HTML5 (-webkit-canvas), JavaScript (jQuery).
"APNG-canvas is a library for displaing Animated PNG files in the browsers with canvas support (Google Chrome, Internet Explorer 9, Apple Safari)."
Working demo is here.

Categories

Resources