How can I read chunks from stream in JavaScript? - javascript

I have a file open in the browser that I want to create a stream and read with JS. I want to read the file in chunks of 1kb, but it always reads the whole file.
import { ReadableStream as PolyfillReadableStream } from 'web-streams-polyfill';
import { createReadableStreamWrapper } from '#mattiasbuelens/web-streams-adapter';
const toPolyfillReadable = createReadableStreamWrapper(PolyfillReadableStream);
const file = myFile;
const fStreamReader = toPolyfillReadable(file.stream(), new ByteLengthQueuingStrategy({
highWaterMark: 1024,
}));
const stream = [];
for await (const value of fStreamReader) { // This works because I'm using web-streams-polyfill
console.log(value); // This only runs once and prints the whole file.
}

The following is a simplified version of https://stackoverflow.com/a/28318964/16462950. When I used it to read a 28MB file, the allocation timeline always stayed at around 1KB:
async function read(file) {
for (var offset = 0; offset < file.size; offset += 1024) {
var oneKB = await file.slice(offset, offset + 1024).text();
console.log(oneKB);
}
}
<input name="file" type="file" onchange="read(this.files[0])" />
A file.stream() is faster, but consumes 64KB chunks and therefore allocates more memory.

Related

How can I optimize my JavaScript code to handle large log files (over 1 GB)? [duplicate]

I need to do some parsing of large (5-10 Gb)logfiles in Javascript/Node.js (I'm using Cube).
The logline looks something like:
10:00:43.343423 I'm a friendly log message. There are 5 cats, and 7 dogs. We are in state "SUCCESS".
We need to read each line, do some parsing (e.g. strip out 5, 7 and SUCCESS), then pump this data into Cube (https://github.com/square/cube) using their JS client.
Firstly, what is the canonical way in Node to read in a file, line by line?
It seems to be fairly common question online:
http://www.quora.com/What-is-the-best-way-to-read-a-file-line-by-line-in-node-js
Read a file one line at a time in node.js?
A lot of the answers seem to point to a bunch of third-party modules:
https://github.com/nickewing/line-reader
https://github.com/jahewson/node-byline
https://github.com/pkrumins/node-lazy
https://github.com/Gagle/Node-BufferedReader
However, this seems like a fairly basic task - surely, there's a simple way within the stdlib to read in a textfile, line-by-line?
Secondly, I then need to process each line (e.g. convert the timestamp into a Date object, and extract useful fields).
What's the best way to do this, maximising throughput? Is there some way that won't block on either reading in each line, or on sending it to Cube?
Thirdly - I'm guessing using string splits, and the JS equivalent of contains (IndexOf != -1?) will be a lot faster than regexes? Has anybody had much experience in parsing massive amounts of text data in Node.js?
I searched for a solution to parse very large files (gbs) line by line using a stream. All the third-party libraries and examples did not suit my needs since they processed the files not line by line (like 1 , 2 , 3 , 4 ..) or read the entire file to memory
The following solution can parse very large files, line by line using stream & pipe. For testing I used a 2.1 gb file with 17.000.000 records. Ram usage did not exceed 60 mb.
First, install the event-stream package:
npm install event-stream
Then:
var fs = require('fs')
, es = require('event-stream');
var lineNr = 0;
var s = fs.createReadStream('very-large-file.csv')
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
lineNr += 1;
// process line here and call s.resume() when rdy
// function below was for logging memory usage
logMemoryUsage(lineNr);
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(err){
console.log('Error while reading file.', err);
})
.on('end', function(){
console.log('Read entire file.')
})
);
Please let me know how it goes!
You can use the inbuilt readline package, see docs here. I use stream to create a new output stream.
var fs = require('fs'),
readline = require('readline'),
stream = require('stream');
var instream = fs.createReadStream('/path/to/file');
var outstream = new stream;
outstream.readable = true;
outstream.writable = true;
var rl = readline.createInterface({
input: instream,
output: outstream,
terminal: false
});
rl.on('line', function(line) {
console.log(line);
//Do your stuff ...
//Then write to output stream
rl.write(line);
});
Large files will take some time to process. Do tell if it works.
I really liked #gerard answer which is actually deserves to be the correct answer here. I made some improvements:
Code is in a class (modular)
Parsing is included
Ability to resume is given to the outside in case there is an asynchronous job is chained to reading the CSV like inserting to DB, or a HTTP request
Reading in chunks/batche sizes that
user can declare. I took care of encoding in the stream too, in case
you have files in different encoding.
Here's the code:
'use strict'
const fs = require('fs'),
util = require('util'),
stream = require('stream'),
es = require('event-stream'),
parse = require("csv-parse"),
iconv = require('iconv-lite');
class CSVReader {
constructor(filename, batchSize, columns) {
this.reader = fs.createReadStream(filename).pipe(iconv.decodeStream('utf8'))
this.batchSize = batchSize || 1000
this.lineNumber = 0
this.data = []
this.parseOptions = {delimiter: '\t', columns: true, escape: '/', relax: true}
}
read(callback) {
this.reader
.pipe(es.split())
.pipe(es.mapSync(line => {
++this.lineNumber
parse(line, this.parseOptions, (err, d) => {
this.data.push(d[0])
})
if (this.lineNumber % this.batchSize === 0) {
callback(this.data)
}
})
.on('error', function(){
console.log('Error while reading file.')
})
.on('end', function(){
console.log('Read entirefile.')
}))
}
continue () {
this.data = []
this.reader.resume()
}
}
module.exports = CSVReader
So basically, here is how you will use it:
let reader = CSVReader('path_to_file.csv')
reader.read(() => reader.continue())
I tested this with a 35GB CSV file and it worked for me and that's why I chose to build it on #gerard's answer, feedbacks are welcomed.
I used https://www.npmjs.com/package/line-by-line for reading more than 1 000 000 lines from a text file. In this case, an occupied capacity of RAM was about 50-60 megabyte.
const LineByLineReader = require('line-by-line'),
lr = new LineByLineReader('big_file.txt');
lr.on('error', function (err) {
// 'err' contains error object
});
lr.on('line', function (line) {
// pause emitting of lines...
lr.pause();
// ...do your asynchronous line processing..
setTimeout(function () {
// ...and continue emitting lines.
lr.resume();
}, 100);
});
lr.on('end', function () {
// All lines are read, file is closed now.
});
The Node.js Documentation offers a very elegant example using the Readline module.
Example: Read File Stream Line-by-Line
const { once } = require('node:events');
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('sample.txt'),
crlfDelay: Infinity
});
rl.on('line', (line) => {
console.log(`Line from file: ${line}`);
});
await once(rl, 'close');
Note: we use the crlfDelay option to recognize all instances of CR LF ('\r\n') as a single line break.
Apart from read the big file line by line, you also can read it chunk by chunk. For more refer to this article
var offset = 0;
var chunkSize = 2048;
var chunkBuffer = new Buffer(chunkSize);
var fp = fs.openSync('filepath', 'r');
var bytesRead = 0;
while(bytesRead = fs.readSync(fp, chunkBuffer, 0, chunkSize, offset)) {
offset += bytesRead;
var str = chunkBuffer.slice(0, bytesRead).toString();
var arr = str.split('\n');
if(bytesRead = chunkSize) {
// the last item of the arr may be not a full line, leave it to the next chunk
offset -= arr.pop().length;
}
lines.push(arr);
}
console.log(lines);
I had the same problem yet. After comparing several modules that seem to have this feature, I decided to do it myself, it's simpler than I thought.
gist: https://gist.github.com/deemstone/8279565
var fetchBlock = lineByline(filepath, onEnd);
fetchBlock(function(lines, start){ ... }); //lines{array} start{int} lines[0] No.
It cover the file opened in a closure, that fetchBlock() returned will fetch a block from the file, end split to array (will deal the segment from last fetch).
I've set the block size to 1024 for each read operation. This may have bugs, but code logic is obvious, try it yourself.
Reading / Writing files using stream with the native nodejs modules (fs, readline):
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('input.json'),
output: fs.createWriteStream('output.json')
});
rl.on('line', function(line) {
console.log(line);
// Do any 'line' processing if you want and then write to the output file
this.output.write(`${line}\n`);
});
rl.on('close', function() {
console.log(`Created "${this.output.path}"`);
});
Based on this questions answer I implemented a class you can use to read a file synchronously line-by-line with fs.readSync(). You can make this "pause" and "resume" by using a Q promise (jQuery seems to require a DOM so cant run it with nodejs):
var fs = require('fs');
var Q = require('q');
var lr = new LineReader(filenameToLoad);
lr.open();
var promise;
workOnLine = function () {
var line = lr.readNextLine();
promise = complexLineTransformation(line).then(
function() {console.log('ok');workOnLine();},
function() {console.log('error');}
);
}
workOnLine();
complexLineTransformation = function (line) {
var deferred = Q.defer();
// ... async call goes here, in callback: deferred.resolve('done ok'); or deferred.reject(new Error(error));
return deferred.promise;
}
function LineReader (filename) {
this.moreLinesAvailable = true;
this.fd = undefined;
this.bufferSize = 1024*1024;
this.buffer = new Buffer(this.bufferSize);
this.leftOver = '';
this.read = undefined;
this.idxStart = undefined;
this.idx = undefined;
this.lineNumber = 0;
this._bundleOfLines = [];
this.open = function() {
this.fd = fs.openSync(filename, 'r');
};
this.readNextLine = function () {
if (this._bundleOfLines.length === 0) {
this._readNextBundleOfLines();
}
this.lineNumber++;
var lineToReturn = this._bundleOfLines[0];
this._bundleOfLines.splice(0, 1); // remove first element (pos, howmany)
return lineToReturn;
};
this.getLineNumber = function() {
return this.lineNumber;
};
this._readNextBundleOfLines = function() {
var line = "";
while ((this.read = fs.readSync(this.fd, this.buffer, 0, this.bufferSize, null)) !== 0) { // read next bytes until end of file
this.leftOver += this.buffer.toString('utf8', 0, this.read); // append to leftOver
this.idxStart = 0
while ((this.idx = this.leftOver.indexOf("\n", this.idxStart)) !== -1) { // as long as there is a newline-char in leftOver
line = this.leftOver.substring(this.idxStart, this.idx);
this._bundleOfLines.push(line);
this.idxStart = this.idx + 1;
}
this.leftOver = this.leftOver.substring(this.idxStart);
if (line !== "") {
break;
}
}
};
}
node-byline uses streams, so i would prefer that one for your huge files.
for your date-conversions i would use moment.js.
for maximising your throughput you could think about using a software-cluster. there are some nice-modules which wrap the node-native cluster-module quite well. i like cluster-master from isaacs. e.g. you could create a cluster of x workers which all compute a file.
for benchmarking splits vs regexes use benchmark.js. i havent tested it until now. benchmark.js is available as a node-module
import * as csv from 'fast-csv';
import * as fs from 'fs';
interface Row {
[s: string]: string;
}
type RowCallBack = (data: Row, index: number) => object;
export class CSVReader {
protected file: string;
protected csvOptions = {
delimiter: ',',
headers: true,
ignoreEmpty: true,
trim: true
};
constructor(file: string, csvOptions = {}) {
if (!fs.existsSync(file)) {
throw new Error(`File ${file} not found.`);
}
this.file = file;
this.csvOptions = Object.assign({}, this.csvOptions, csvOptions);
}
public read(callback: RowCallBack): Promise < Array < object >> {
return new Promise < Array < object >> (resolve => {
const readStream = fs.createReadStream(this.file);
const results: Array < any > = [];
let index = 0;
const csvStream = csv.parse(this.csvOptions).on('data', async (data: Row) => {
index++;
results.push(await callback(data, index));
}).on('error', (err: Error) => {
console.error(err.message);
throw err;
}).on('end', () => {
resolve(results);
});
readStream.pipe(csvStream);
});
}
}
import { CSVReader } from '../src/helpers/CSVReader';
(async () => {
const reader = new CSVReader('./database/migrations/csv/users.csv');
const users = await reader.read(async data => {
return {
username: data.username,
name: data.name,
email: data.email,
cellPhone: data.cell_phone,
homePhone: data.home_phone,
roleId: data.role_id,
description: data.description,
state: data.state,
};
});
console.log(users);
})();
I have made a node module to read large file asynchronously text or JSON.
Tested on large files.
var fs = require('fs')
, util = require('util')
, stream = require('stream')
, es = require('event-stream');
module.exports = FileReader;
function FileReader(){
}
FileReader.prototype.read = function(pathToFile, callback){
var returnTxt = '';
var s = fs.createReadStream(pathToFile)
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
//console.log('reading line: '+line);
returnTxt += line;
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(){
console.log('Error while reading file.');
})
.on('end', function(){
console.log('Read entire file.');
callback(returnTxt);
})
);
};
FileReader.prototype.readJSON = function(pathToFile, callback){
try{
this.read(pathToFile, function(txt){callback(JSON.parse(txt));});
}
catch(err){
throw new Error('json file is not valid! '+err.stack);
}
};
Just save the file as file-reader.js, and use it like this:
var FileReader = require('./file-reader');
var fileReader = new FileReader();
fileReader.readJSON(__dirname + '/largeFile.json', function(jsonObj){/*callback logic here*/});

NodeJS: Using Pipe To Write A File From A Readable Stream Gives Heap Memory Error

I am trying to create 150 million lines of data and write the data into a csv file so that I can insert the data into different databases with little modification.
I am using a few functions to generate seemingly random data and pushing the data into the writable stream.
The code that I have right now is unsuccessful at handling memory issue.
After a few hours of research, I am starting to think that I should not be pushing each data at the end of the for loop because it seems that the pipe method simply cannot handle garbage collection this way.
Also, I found a few StackOverFlow answers and NodeJS docs that recommend against using push at all.
However, I am very new to NodeJS and I feel like I am blocked and do not know how to proceed from here.
If someone can provide me any guidance on how to proceed and give me an example, I would really appreciate it.
Below is a part of my code to give you a better understanding of what I am trying to achieve.
P.S. -
I have found a way to write successfully handle memory issue without using pipe method at all --I used the drain event-- but I had to start from scratch and now I am curious to know if there is a simple way to handle this memory issue without completely changing this bit of code.
Also, I have been trying to avoid using any library because I feel like there should be a relatively easy tweak to make this work without using a library but please tell me if I am wrong. Thank you in advance.
// This is my target number of data
const targetDataNum = 150000000;
// Create readable stream
const readableStream = new Stream.Readable({
read() {}
});
// Create writable stream
const writableStream = fs.createWriteStream('./database/RDBMS/test.csv');
// Write columns first
writableStream.write('id, body, date, dp\n', 'utf8');
// Then, push a number of data to the readable stream (150M in this case)
for (var i = 1; i <= targetDataNum; i += 1) {
const id = i;
const body = lorem.paragraph(1);
const date = randomDate(new Date(2014, 0, 1), new Date());
const dp = randomNumber(1, 1000);
const data = `${id},${body},${date},${dp}\n`;
readableStream.push(data, 'utf8');
};
// Pipe readable stream to writeable stream
readableStream.pipe(writableStream);
// End the stream
readableStream.push(null);
Since you're new to streams, maybe start with an easier abstraction: generators. Generators generate data only when it is consumed (just like Streams should), but they don't have buffering and complicated constructors and methods.
This is just your for loop, moved into a generator function:
function * generateData(targetDataNum) {
for (var i = 1; i <= targetDataNum; i += 1) {
const id = i;
const body = lorem.paragraph(1);
const date = randomDate(new Date(2014, 0, 1), new Date());
const dp = randomNumber(1, 1000);
yield `${id},${body},${date},${dp}\n`;
}
}
In Node 12, you can create a Readable stream directly from any iterable, including generators and async generators:
const stream = Readable.from(generateData(), {encoding: 'utf8'})
stream.pipe(writableStream)
i suggest to try a solution like the following:
const { Readable } = require('readable-stream');
class CustomReadable extends Readable {
constructor(max, options = {}) {
super(options);
this.targetDataNum = max;
this.i = 1;
}
_read(size) {
if (i <= this.targetDataNum) {
// your code to build the csv content
this.push(data, 'utf8');
return;
}
this.push(null);
}
}
const rs = new CustomReadable(150000000);
rs.pipe(ws);
Just complete it with your portion of code to fill the csv and create the writable stream.
With this solution you leave calling the rs.push method to the internal _read stream method invoked until this.push(null) is not called. Probably before you were filling the internal stream buffer too fast calling push manually in a loop getting the out memory error.
Try pipeing to the WritableStream before you start pumping data into the ReadableStream and yield before you write the next chunk.
...
// Write columns first
writableStream.write('id, body, date, dp\n', 'utf8');
// Pipe readable stream to writeable stream
readableStream.pipe(writableStream);
// Then, push a number of data to the readable stream (150M in this case)
for (var i = 1; i <= targetDataNum; i += 1) {
const id = i;
const body = lorem.paragraph(1);
const date = randomDate(new Date(2014, 0, 1), new Date());
const dp = randomNumber(1, 1000);
const data = `${id},${body},${date},${dp}\n`;
readableStream.push(data, 'utf8');
// somehow YIELD for the STREAM to drain out.
};
...
The entire Stream implementation of Node.js relies on the fact that the wire is slow and that the CPU can actually have a downtime before the next chunk of data comes in from the stream source or till the next chunk of data has been written to the stream destination.
In the current implementation, since the for-loop has booked up the CPU, there is no downtime for the actual pipeing of the data to the writestream. You will be able to catch this if you watch cat test.csv which will not change while the loop is running.
As (I am sure) you know, pipe helps in guaranteeing that the data you are working with is buffered in memory only in chunks and not as a whole. But that guarantee only holds true if the CPU gets enough downtime to actually drain the data.
Having said all that, I wrapped your entire code into an async IIFE and ran it with an await for a setTimeout which ensures that I yield for the stream to drain the data.
let fs = require('fs');
let Stream = require('stream');
(async function () {
// This is my target number of data
const targetDataNum = 150000000;
// Create readable stream
const readableStream = new Stream.Readable({
read() { }
});
// Create writable stream
const writableStream = fs.createWriteStream('./test.csv');
// Write columns first
writableStream.write('id, body, date, dp\n', 'utf8');
// Pipe readable stream to writeable stream
readableStream.pipe(writableStream);
// Then, push a number of data to the readable stream (150M in this case)
for (var i = 1; i <= targetDataNum; i += 1) {
console.log(`Pushing ${i}`);
const id = i;
const body = `body${i}`;
const date = `date${i}`;
const dp = `dp${i}`;
const data = `${id},${body},${date},${dp}\n`;
readableStream.push(data, 'utf8');
await new Promise(resolve => setImmediate(resolve));
};
// End the stream
readableStream.push(null);
})();
This is what top looks like pretty much the whole time I am running this.
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
15213 binaek ** ** ****** ***** ***** * ***.* 0.5 *:**.** node
Notice the %MEM which stays more-or-less static.
You were running out of memory because you were pre-generating all the data in memory before you wrote any of it to disk. Instead, you need a strategy to write is as you generate so you don't have to hold large amounts of data in memory.
It does not seem like you need .pipe() here because you control the generation of the data (it's not coming from some random readStream).
So, you can just generate the data and immediately write it and handle the drain event when needed. Here's a runnable example (this creates a very large file):
const {once} = require('events');
const fs = require('fs');
// This is my target number of data
const targetDataNum = 150000000;
async function run() {
// Create writable stream
const writableStream = fs.createWriteStream('./test.csv');
// Write columns first
writableStream.write('id, body, date, dp\n', 'utf8');
// Then, push a number of data to the readable stream (150M in this case)
for (let i = 1; i <= targetDataNum; i += 1) {
const id = i;
const body = lorem.paragraph(1);
const date = randomDate(new Date(2014, 0, 1), new Date());
const dp = randomNumber(1, 1000);
const data = `${id},${body},${date},${dp}\n`;
const canWriteMore = writableStream.write(data);
if (!canWriteMore) {
// wait for stream to be ready for more writing
await once(writableStream, "drain");
}
}
writableStream.end();
}
run().then(() => {
console.log(done);
}).catch(err => {
console.log("got rejection: ", err);
});
// placeholders for the functions that were being used
function randomDate(low, high) {
let rand = randomNumber(low.getTime(), high.getTime());
return new Date(rand);
}
function randomNumber(low, high) {
return Math.floor(Math.random() * (high - low)) + low;
}
const lorem = {
paragraph: function() {
return "random paragraph";
}
}

Nodejs - removing substring from a huge file

I need to remove a substring (that appears only in specific known lines of the file) from a file.
there are simple solutions of reading all file data to a string, removing the substring, and then write the fixed data to the file.
here is a code I found in here:
Node js - Remove string from text file
var data = fs.readFileSync('banlist.txt', 'utf-8');
var newValue = data.replace(new RegEx("STRING_TO_REMOVE"), '');
fs.writeFileSync('banlist.txt', newValue, 'utf-8');
My problem is, that the file is huge - up to billion lines of logs, so I can't read all content to the memory.
Why not a simple transform stream and replace()? replace can take a callback as second parameter i.e. .replace(/bad1|bad2|bad3/g, filterWords) in case you need to replace words rather than remove them completely.
const fs = require("fs")
const { pipeline, Transform } = require("stream")
const { join } = require("path")
const readFile = fs.createReadStream("./words.txt")
const writeFile = fs.createWriteStream(
join(__dirname, "words-filtered.txt"),
"utf8"
)
const transformFile = new Transform({
transform(chunk, enc, next) {
let c = chunk.toString().replace(/bad/g, "replaced")
this.push(c)
next()
},
})
pipeline(readFile, transformFile, writeFile, (err) => {
if (err) {
console.log(err.message)
}
})
https://nodejs.org/api/fs.html#fs_fs_read_fd_buffer_offset_length_position_callback
Dont read the whole file at once... read a small buffered piece of it.. and look for your input with that buffered piece.... then increment your buffer starting position and do it again.... would recommend having each buffer start not at the end of the previous buffer... but overlap by at least the expected size of the data being sought so that you dont run into half of your data being at end of one buffer and other half at beginning of the other
You could use a file read stream. However, you would have to find a way to detect if the read data only contains part of the result.
What you probably want to do is use streams so that you are writing after partial reads. this example could probably work for you. you need to copy over the output text file ".tmp" over the original to get the same behavior in your question. It works by reading a chunk and then looking to see if you've come across a new line. then it processes that line, writes it, then removes it from the buffer. This should help with your memory problem.
var fs = require("fs");
var readStream = fs.createReadStream("./BFFile.txt", { encoding: "utf-8" });
var writeStream = fs.createWriteStream("./BFFile.txt.tmp");
const STRING_TO_REMOVE = "badword";
var buffer = ""
readStream.on("data", (chunk) => {
buffer += chunk;
var indexOfNewLine = buffer.search("\n");
while (indexOfNewLine !== -1) {
var line = buffer.substring(0, indexOfNewLine + 1);
buffer = buffer.substring(indexOfNewLine + 1, buffer.length);
line = line.replace(new RegExp(STRING_TO_REMOVE), "");
writeStream.write(line);
indexOfNewLine = buffer.search("\n");
}
})
readStream.on("end", () => {
buffer = buffer.replace(new RegExp(STRING_TO_REMOVE), "");
writeStream.write(buffer);
writeStream.close();
})
There are a few assumptions with this solution such as the data being UTF-8, there only being 1 bad word potentially per line, every line having some text (I didn't test for that), and that every line ends with new line and not some other line ending.
Heres the docs for streams in Node
another thought I had was to use pipe and a transform stream but that seems like over kill.
You can use this code to do it. I'm using fs stream. it's created for read huge files in small memory by chunks. docs
const fs = require('fs');
const readStream = fs.createReadStream('./XXXXX');
const writeStream = fs.createWriteStream('./XXXXXXX');
readStream.on('data', (chunk) => {
const data = chunk.toString().replace('STRING_TO_REMOVE', 'XXXXXX');
writeStream.write(data);
});
readStream.on('end', () => {
writeStream.close();
});

Transfer Learning Tensorflow.js size/shape error

I am trying to apply transfer learning by using a knnClassifier and the mobileNet image recognition model in Tensorflow.js I am, however, receiving the following error:
Size(28672) must match the product of shape 28,3072
I don't know how to tackle this issue, I've tried creating tensor3D, resizing using bilinear and nearest neighbor but to no avail. I was wondering if someone here could check this out.
Note that my idea here is to train images from certain folders and assign them to their class using the add example of the knnClassifier. I have a function that reads the image from a path, and an async function that trains the model and makes a prediction from an image.
................................................................................................
const tf = require('#tensorflow/tfjs');
//MobileNet : pre-trained model for TensorFlow.js
const mobilenet = require('#tensorflow-models/mobilenet');
//The module provides native TensorFlow execution
//in backend JavaScript applications under the Node.js runtime.
const tfnode = require('#tensorflow/tfjs-node');
const knnClassifier = require('./node_modules/#tensorflow-models/knn-classifier/dist/knn-classifier');
var glob = require('glob')
//The fs module provides an API for interacting with the file system.
const fs = require('fs');
const readImage = path => {
//reads the entire contents of a file.
//readFileSync() is synchronous and blocks execution until finished.
const imageBuffer = fs.readFileSync(path);
//Given the encoded bytes of an image,
//it returns a 3D or 4D tensor of the decoded image. Supports BMP, GIF, JPEG and PNG formats.
var tfimage = tfnode.node.decodeImage(imageBuffer);
// const t3d = tf.tensor3d(Array.from(tfimage.dataSync()),[tfimage.shape[0], tfimage.shape[1], 1])
const smalImg = tf.image.resizeNearestNeighbor(tfimage, [32, 32]);
const resized = tf.cast(smalImg, 'float32');
// t3d.reshape([32,32,3])
// var smalImg = tf.image.resizeBilinear(tfimage, [368, 432]);
// const resized = tf.cast(smalImg, 'float32');
return resized;
}
var mainDirectory = "./img_samples/";
const imageClassification = async path => {
const classifier = await knnClassifier.create();
const image = await readImage(path);
// Load the model.
const model = await mobilenet.load();
// Classify the image.
const predictions = await model.classify(image);
// print results on terminal
console.log('Classification Results:', predictions);
var folders = fs.readdirSync(mainDirectory);
var filesPerClass = [];
for(var i=0;i<folders.length;i++){
files = fs.readdirSync(mainDirectory+folders[i]);
var files_complete = [];
for(var j=0;j<files.length;j++){
files_complete.push(mainDirectory+folders[i]+"/"+files[j]);
}
filesPerClass.push(files_complete);
}
for(var i=0;i<filesPerClass.length;i++){
for(var j=0;j<filesPerClass[i].length;j++){
imageSample = readImage(filesPerClass[i][j]);
console.log(imageSample);
activation = await model.infer(imageSample, 'conv_preds'); //main directory
classifier.addExample(activation,i);
}
}
console.log(readImage('./hospitalTest.jpg'))
const predictionsTest = await classifier.predictClass(readImage('./hospitalTest.jpg'));
console.log('classficationTest:',predictionsTest);
}
if (process.argv.length !== 3) throw new Error('Incorrect arguments: node classify.js <IMAGE_FILE>');
imageClassification(process.argv[2]);
Since the knn classifier is trained using an output from a node of mobilenet, the prediction needs to be done likewise
outputMobilenet = await model.infer(readImage('./hospitalTest.jpg'), 'conv_preds')
predicted = await classifier.predictClass(outputMobilenet)

Firefox UI becomes unresponsive while downloading many files with Addon SDK API

I have a problem that is rather hard to debug, i need to download a lot (~400) of rather small (~3-4mb) files in the background using the firefox addon sdk API.
I tried using the old API (nsIWebBrowserPersist) as well as the new API (Downloads.jsm) (shortened code):
Task.spawn(function () {
for (var i = 0; i < documents.length; i++) {
var url = ...;
var file = ...;
let download = yield Downloads.createDownload({
source: url,
target: file,
});
yield download.start();
yield download.finalize();
}
});
But the UI gets extremely unresponsive after some time, i tried using the same file and overwriting it, because my first guess was windows file handles accumulating over the time, but it didn't help. It does not seem to be related to the system performance, also it works sometimes and on the same machine after 5 min it fails.
Is there a known issue with downloading a lot of files using the firefox sdk api, or am i doing something wrong?
I found that by using an alternative API the download became faster and the ui more responsive:
function downloadFromUrl(url, file, callback) {
var channel = chrome.Cc["#mozilla.org/network/io-service;1"]
.getService(chrome.Ci.nsIIOService)
.newChannel(url, 0, null);
var bstream = chrome.Cc["#mozilla.org/binaryinputstream;1"]
.createInstance(chrome.Ci.nsIBinaryInputStream);
bstream.setInputStream(channel.open());
var fos = chrome.Cc["#mozilla.org/network/safe-file-output-stream;1"]
.createInstance(chrome.Ci.nsIFileOutputStream);
try {
fos.init(file, 0x04 | 0x08 | 0x10 | 0x20 | 0x40, 0600, 0); // see:https://developer.mozilla.org/en-US/docs/PR_Open#Parameters
var length = 0;
var size = 0;
while(size = bstream.available()) {
fos.write(bstream.readBytes(size), size);
length += size;
callback(length);
}
} finally {
if (fos instanceof chrome.Ci.nsISafeOutputStream) {
fos.finish();
} else {
fos.close();
}
}
}
I know that this is kind of primitive api, but it works way better than the alternatives..
Edit:
I improved the above function, but it may be too bloated, here is it anyways:
/**
* Downloads from a given url to a local file
* #param url url to download
* #param file local file
* #param callback called during the download, signature: callback(currentBytes)
* #returns downloadResult {contentType, error: false | ExceptionObject}
*/
function downloadFromUrl(url, file, callback) {
let result = {
contentType: null,
error: false
};
try {
let channel = chrome.Cc["#mozilla.org/network/io-service;1"]
.getService(chrome.Ci.nsIIOService)
.newChannel(url, 0, null);
let bstream = chrome.Cc["#mozilla.org/binaryinputstream;1"]
.createInstance(chrome.Ci.nsIBinaryInputStream);
bstream.setInputStream(channel.open());
let fos = chrome.Cc["#mozilla.org/network/safe-file-output-stream;1"]
.createInstance(chrome.Ci.nsIFileOutputStream);
try {
// const values from https://developer.mozilla.org/en-US/docs/PR_Open#Parameters
const PR_RDWR = 0x04; // Open for reading and writing.
const PR_CREATE_FILE = 0x08; // If the file does not exist, the file is created. If the file exists, this flag has no effect.
const PR_APPEND = 0x10; // The file pointer is set to the end of the file prior to each write.
const PR_TRUNCATE = 0x20; // If the file exists, its length is truncated to 0.
const PR_SYNC = 0x40; // If set, each write will wait for both the file data and file status to be physically updated.
fos.init(file, PR_RDWR | PR_CREATE_FILE | PR_APPEND | PR_TRUNCATE | PR_SYNC, 0600, 0);
let length = 0;
let size = bstream.available();
while(size) {
fos.write(bstream.readBytes(size), size);
length += size;
callback(length);
size = bstream.available();
}
fos.flush();
result.contentType = channel.contentType;
} finally {
if (fos instanceof chrome.Ci.nsISafeOutputStream) {
fos.finish();
} else {
fos.close();
}
}
} catch (e) {
result.error = e;
}
return result;
}

Categories

Resources