I am trying to read a large file one line at a time. I found a question on Quora that dealt with the subject but I'm missing some connections to make the whole thing fit together.
var Lazy=require("lazy");
new Lazy(process.stdin)
.lines
.forEach(
function(line) {
console.log(line.toString());
}
);
process.stdin.resume();
The bit that I'd like to figure out is how I might read one line at a time from a file instead of STDIN as in this sample.
I tried:
fs.open('./VeryBigFile.csv', 'r', '0666', Process);
function Process(err, fd) {
if (err) throw err;
// DO lazy read
}
but it's not working. I know that in a pinch I could fall back to using something like PHP, but I would like to figure this out.
I don't think the other answer would work as the file is much larger than the server I'm running it on has memory for.
Since Node.js v0.12 and as of Node.js v4.0.0, there is a stable readline core module. Here's the easiest way to read lines from a file, without any external modules:
const fs = require('fs');
const readline = require('readline');
async function processLineByLine() {
const fileStream = fs.createReadStream('input.txt');
const rl = readline.createInterface({
input: fileStream,
crlfDelay: Infinity
});
// Note: we use the crlfDelay option to recognize all instances of CR LF
// ('\r\n') in input.txt as a single line break.
for await (const line of rl) {
// Each line in input.txt will be successively available here as `line`.
console.log(`Line from file: ${line}`);
}
}
processLineByLine();
Or alternatively:
var lineReader = require('readline').createInterface({
input: require('fs').createReadStream('file.in')
});
lineReader.on('line', function (line) {
console.log('Line from file:', line);
});
The last line is read correctly (as of Node v0.12 or later), even if there is no final \n.
UPDATE: this example has been added to Node's API official documentation.
For such a simple operation there shouldn't be any dependency on third-party modules. Go easy.
var fs = require('fs'),
readline = require('readline');
var rd = readline.createInterface({
input: fs.createReadStream('/path/to/file'),
output: process.stdout,
console: false
});
rd.on('line', function(line) {
console.log(line);
});
Update in 2019
An awesome example is already posted on official Nodejs documentation. here
This requires the latest Nodejs is installed on your machine. >11.4
const fs = require('fs');
const readline = require('readline');
async function processLineByLine() {
const fileStream = fs.createReadStream('input.txt');
const rl = readline.createInterface({
input: fileStream,
crlfDelay: Infinity
});
// Note: we use the crlfDelay option to recognize all instances of CR LF
// ('\r\n') in input.txt as a single line break.
for await (const line of rl) {
// Each line in input.txt will be successively available here as `line`.
console.log(`Line from file: ${line}`);
}
}
processLineByLine();
You don't have to open the file, but instead, you have to create a ReadStream.
fs.createReadStream
Then pass that stream to Lazy
require('fs').readFileSync('file.txt', 'utf-8').split(/\r?\n/).forEach(function(line){
console.log(line);
})
there is a very nice module for reading a file line by line, it's called line-reader
with it you simply just write:
var lineReader = require('line-reader');
lineReader.eachLine('file.txt', function(line, last) {
console.log(line);
// do whatever you want with line...
if(last){
// or check if it's the last one
}
});
you can even iterate the file with a "java-style" interface, if you need more control:
lineReader.open('file.txt', function(reader) {
if (reader.hasNextLine()) {
reader.nextLine(function(line) {
console.log(line);
});
}
});
Old topic, but this works:
var rl = readline.createInterface({
input : fs.createReadStream('/path/file.txt'),
output: process.stdout,
terminal: false
})
rl.on('line',function(line){
console.log(line) //or parse line
})
Simple. No need for an external module.
You can always roll your own line reader. I have'nt benchmarked this snippet yet, but it correctly splits the incoming stream of chunks into lines without the trailing '\n'
var last = "";
process.stdin.on('data', function(chunk) {
var lines, i;
lines = (last+chunk).split("\n");
for(i = 0; i < lines.length - 1; i++) {
console.log("line: " + lines[i]);
}
last = lines[i];
});
process.stdin.on('end', function() {
console.log("line: " + last);
});
process.stdin.resume();
I did come up with this when working on a quick log parsing script that needed to accumulate data during the log parsing and I felt that it would nice to try doing this using js and node instead of using perl or bash.
Anyway, I do feel that small nodejs scripts should be self contained and not rely on third party modules so after reading all the answers to this question, each using various modules to handle line parsing, a 13 SLOC native nodejs solution might be of interest .
With the carrier module:
var carrier = require('carrier');
process.stdin.resume();
carrier.carry(process.stdin, function(line) {
console.log('got one line: ' + line);
});
I ended up with a massive, massive memory leak using Lazy to read line by line when trying to then process those lines and write them to another stream due to the way drain/pause/resume in node works (see: http://elegantcode.com/2011/04/06/taking-baby-steps-with-node-js-pumping-data-between-streams/ (i love this guy btw)). I haven't looked closely enough at Lazy to understand exactly why, but I couldn't pause my read stream to allow for a drain without Lazy exiting.
I wrote the code to process massive csv files into xml docs, you can see the code here: https://github.com/j03m/node-csv2xml
If you run the previous revisions with Lazy line it leaks. The latest revision doesn't leak at all and you can probably use it as the basis for a reader/processor. Though I have some custom stuff in there.
Edit: I guess I should also note that my code with Lazy worked fine until I found myself writing large enough xml fragments that drain/pause/resume because a necessity. For smaller chunks it was fine.
In most cases this should be enough:
const fs = require("fs")
fs.readFile('./file', 'utf-8', (err, file) => {
const lines = file.split('\n')
for (let line of lines)
console.log(line)
});
Edit:
Use a transform stream.
With a BufferedReader you can read lines.
new BufferedReader ("lorem ipsum", { encoding: "utf8" })
.on ("error", function (error){
console.log ("error: " + error);
})
.on ("line", function (line){
console.log ("line: " + line);
})
.on ("end", function (){
console.log ("EOF");
})
.read ();
I was frustrated by the lack of a comprehensive solution for this, so I put together my own attempt (git / npm). Copy-pasted list of features:
Interactive line processing (callback-based, no loading the entire file into RAM)
Optionally, return all lines in an array (detailed or raw mode)
Interactively interrupt streaming, or perform map/filter like processing
Detect any newline convention (PC/Mac/Linux)
Correct eof / last line treatment
Correct handling of multi-byte UTF-8 characters
Retrieve byte offset and byte length information on per-line basis
Random access, using line-based or byte-based offsets
Automatically map line-offset information, to speed up random access
Zero dependencies
Tests
NIH? You decide :-)
Since posting my original answer, I found that split is a very easy to use node module for line reading in a file; Which also accepts optional parameters.
var split = require('split');
fs.createReadStream(file)
.pipe(split())
.on('data', function (line) {
//each chunk now is a seperate line!
});
Haven't tested on very large files. Let us know if you do.
function createLineReader(fileName){
var EM = require("events").EventEmitter
var ev = new EM()
var stream = require("fs").createReadStream(fileName)
var remainder = null;
stream.on("data",function(data){
if(remainder != null){//append newly received data chunk
var tmp = new Buffer(remainder.length+data.length)
remainder.copy(tmp)
data.copy(tmp,remainder.length)
data = tmp;
}
var start = 0;
for(var i=0; i<data.length; i++){
if(data[i] == 10){ //\n new line
var line = data.slice(start,i)
ev.emit("line", line)
start = i+1;
}
}
if(start<data.length){
remainder = data.slice(start);
}else{
remainder = null;
}
})
stream.on("end",function(){
if(null!=remainder) ev.emit("line",remainder)
})
return ev
}
//---------main---------------
fileName = process.argv[2]
lineReader = createLineReader(fileName)
lineReader.on("line",function(line){
console.log(line.toString())
//console.log("++++++++++++++++++++")
})
I wanted to tackle this same problem, basically what in Perl would be:
while (<>) {
process_line($_);
}
My use case was just a standalone script, not a server, so synchronous was fine. These were my criteria:
The minimal synchronous code that could reuse in many projects.
No limits on file size or number of lines.
No limits on length of lines.
Able to handle full Unicode in UTF-8, including characters beyond the BMP.
Able to handle *nix and Windows line endings (old-style Mac not needed for me).
Line endings character(s) to be included in lines.
Able to handle last line with or without end-of-line characters.
Not use any external libraries not included in the node.js distribution.
This is a project for me to get a feel for low-level scripting type code in node.js and decide how viable it is as a replacement for other scripting languages like Perl.
After a surprising amount of effort and a couple of false starts this is the code I came up with. It's pretty fast but less trivial than I would've expected: (fork it on GitHub)
var fs = require('fs'),
StringDecoder = require('string_decoder').StringDecoder,
util = require('util');
function lineByLine(fd) {
var blob = '';
var blobStart = 0;
var blobEnd = 0;
var decoder = new StringDecoder('utf8');
var CHUNK_SIZE = 16384;
var chunk = new Buffer(CHUNK_SIZE);
var eolPos = -1;
var lastChunk = false;
var moreLines = true;
var readMore = true;
// each line
while (moreLines) {
readMore = true;
// append more chunks from the file onto the end of our blob of text until we have an EOL or EOF
while (readMore) {
// do we have a whole line? (with LF)
eolPos = blob.indexOf('\n', blobStart);
if (eolPos !== -1) {
blobEnd = eolPos;
readMore = false;
// do we have the last line? (no LF)
} else if (lastChunk) {
blobEnd = blob.length;
readMore = false;
// otherwise read more
} else {
var bytesRead = fs.readSync(fd, chunk, 0, CHUNK_SIZE, null);
lastChunk = bytesRead !== CHUNK_SIZE;
blob += decoder.write(chunk.slice(0, bytesRead));
}
}
if (blobStart < blob.length) {
processLine(blob.substring(blobStart, blobEnd + 1));
blobStart = blobEnd + 1;
if (blobStart >= CHUNK_SIZE) {
// blobStart is in characters, CHUNK_SIZE is in octets
var freeable = blobStart / CHUNK_SIZE;
// keep blob from growing indefinitely, not as deterministic as I'd like
blob = blob.substring(CHUNK_SIZE);
blobStart -= CHUNK_SIZE;
blobEnd -= CHUNK_SIZE;
}
} else {
moreLines = false;
}
}
}
It could probably be cleaned up further, it was the result of trial and error.
Generator based line reader: https://github.com/neurosnap/gen-readlines
var fs = require('fs');
var readlines = require('gen-readlines');
fs.open('./file.txt', 'r', function(err, fd) {
if (err) throw err;
fs.fstat(fd, function(err, stats) {
if (err) throw err;
for (var line of readlines(fd, stats.size)) {
console.log(line.toString());
}
});
});
A new function was added in Node.js v18.11.0 to read files line by line
filehandle.readLines([options])
This is how you use this with a text file you want to read
import { open } from 'node:fs/promises';
myFileReader();
async function myFileReader() {
const file = await open('./TextFileName.txt');
for await (const line of file.readLines()) {
console.log(line)
}
}
To understand more read Node.js documentation here is the link for file system readlines():
https://nodejs.org/api/fs.html#filehandlereadlinesoptions
If you want to read a file line by line and writing this in another:
var fs = require('fs');
var readline = require('readline');
var Stream = require('stream');
function readFileLineByLine(inputFile, outputFile) {
var instream = fs.createReadStream(inputFile);
var outstream = new Stream();
outstream.readable = true;
outstream.writable = true;
var rl = readline.createInterface({
input: instream,
output: outstream,
terminal: false
});
rl.on('line', function (line) {
fs.appendFileSync(outputFile, line + '\n');
});
};
var fs = require('fs');
function readfile(name,online,onend,encoding) {
var bufsize = 1024;
var buffer = new Buffer(bufsize);
var bufread = 0;
var fd = fs.openSync(name,'r');
var position = 0;
var eof = false;
var data = "";
var lines = 0;
encoding = encoding || "utf8";
function readbuf() {
bufread = fs.readSync(fd,buffer,0,bufsize,position);
position += bufread;
eof = bufread ? false : true;
data += buffer.toString(encoding,0,bufread);
}
function getLine() {
var nl = data.indexOf("\r"), hasnl = nl !== -1;
if (!hasnl && eof) return fs.closeSync(fd), online(data,++lines), onend(lines);
if (!hasnl && !eof) readbuf(), nl = data.indexOf("\r"), hasnl = nl !== -1;
if (!hasnl) return process.nextTick(getLine);
var line = data.substr(0,nl);
data = data.substr(nl+1);
if (data[0] === "\n") data = data.substr(1);
online(line,++lines);
process.nextTick(getLine);
}
getLine();
}
I had the same problem and came up with above solution
looks simular to others but is aSync and can read large files very quickly
Hopes this helps
Two questions we must ask ourselves while doing such operations are:
What's the amount of memory used to perform it?
Is the memory consumption increasing drastically with the file size?
Solutions like require('fs').readFileSync() loads the whole file into memory. That means that the amount of memory required to perform operations will be almost equivalent to the file size. We should avoid these for anything larger than 50mbs
We can easily track the amount of memory used by a function by placing these lines of code after the function invocation :
const used = process.memoryUsage().heapUsed / 1024 / 1024;
console.log(
`The script uses approximately ${Math.round(used * 100) / 100} MB`
);
Right now the best way to read particular lines from a large file is using node's readline. The documentation has amazing examples.
This is my favorite way of going through a file, a simple native solution for a progressive (as in not a "slurp" or all-in-memory way) file read with modern async/await. It's a solution that I find "natural" when processing large text files without having to resort to the readline package or any non-core dependency.
let buf = '';
for await ( const chunk of fs.createReadStream('myfile') ) {
const lines = buf.concat(chunk).split(/\r?\n/);
buf = lines.pop() ?? '';
for( const line of lines ) {
console.log(line);
}
}
if(buf.length) console.log(buf); // last line, if file does not end with newline
You can adjust encoding in the fs.createReadStream or use chunk.toString(<arg>). Also this let's you better fine-tune the line splitting to your taste, ie. use .split(/\n+/) to skip empty lines and control the chunk size with fs.createReadStream('myfile', { highWaterMark: <chunkSize> }).
Don't forget to create a function like processLine(line) to avoid repeating the line processing code twice due to the ending buf leftover. Unfortunately, the ReadStream instance does not update its end-of-file flags in this setup, so there's no way, afaik, to detect within the loop that we're in the last iteration without some more verbose tricks like comparing the file size from a fs.Stats() with .bytesRead. Hence the final buf processing solution, unless you're absolutely sure your file ends with a newline \n, in which case the for await loop should suffice.
Performance Considerations
Chunk sizes are important for performance, the default is 64k for text files and, for multi MB files, larger chunks can improve speed by an order of magnitude.
The above snippet runs at least the same speed (or even 5% faster sometimes) as code based on NodeJS v18's fs.readLine() or based on the readline module (the accepted answer), once you tune highWaterMark to something that your machine can handle, ie. setting it to the same size as the file, if your available memory allows it, is the fastest.
In any case, any of NodeJS line-reading answers here are an order of magnitude slower than the Perl or native *Nix solutions.
Similar alternatives
★ If you prefer the evented asynchronous version, this would be it:
let buf = '';
fs.createReadStream('myfile')
.on('data', chunk => {
const lines = buf.concat(chunk).split(/\r?\n/);
buf = lines.pop();
for( const line of lines ) {
console.log(line);
}
})
.on('end', () => buf.length && console.log(buf) );
★ Now if you don't mind importing the stream core package, then this is the equivalent piped stream version, which allows for chaining transforms like gzip decompression:
const { Writable } = require('stream');
let buf = '';
fs.createReadStream('myfile').pipe(
new Writable({
write: (chunk, enc, next) => {
const lines = buf.concat(chunk).split(/\r?\n/);
buf = lines.pop();
for (const line of lines) {
console.log(line);
}
next();
}
})
).on('finish', () => buf.length && console.log(buf) );
I have a little module which does this well and is used by quite a few other projects npm readline Note thay in node v10 there is a native readline module so I republished my module as linebyline https://www.npmjs.com/package/linebyline
if you dont want to use the module the function is very simple:
var fs = require('fs'),
EventEmitter = require('events').EventEmitter,
util = require('util'),
newlines = [
13, // \r
10 // \n
];
var readLine = module.exports = function(file, opts) {
if (!(this instanceof readLine)) return new readLine(file);
EventEmitter.call(this);
opts = opts || {};
var self = this,
line = [],
lineCount = 0,
emit = function(line, count) {
self.emit('line', new Buffer(line).toString(), count);
};
this.input = fs.createReadStream(file);
this.input.on('open', function(fd) {
self.emit('open', fd);
})
.on('data', function(data) {
for (var i = 0; i < data.length; i++) {
if (0 <= newlines.indexOf(data[i])) { // Newline char was found.
lineCount++;
if (line.length) emit(line, lineCount);
line = []; // Empty buffer.
} else {
line.push(data[i]); // Buffer new line data.
}
}
}).on('error', function(err) {
self.emit('error', err);
}).on('end', function() {
// Emit last line if anything left over since EOF won't trigger it.
if (line.length){
lineCount++;
emit(line, lineCount);
}
self.emit('end');
}).on('close', function() {
self.emit('close');
});
};
util.inherits(readLine, EventEmitter);
Another solution is to run logic via sequential executor nsynjs. It reads file line-by-line using node readline module, and it doesn't use promises or recursion, therefore not going to fail on large files. Here is how the code will looks like:
var nsynjs = require('nsynjs');
var textFile = require('./wrappers/nodeReadline').textFile; // this file is part of nsynjs
function process(textFile) {
var fh = new textFile();
fh.open('path/to/file');
var s;
while (typeof(s = fh.readLine(nsynjsCtx).data) != 'undefined')
console.log(s);
fh.close();
}
var ctx = nsynjs.run(process,{},textFile,function () {
console.log('done');
});
Code above is based on this exampe: https://github.com/amaksr/nsynjs/blob/master/examples/node-readline/index.js
i use this:
function emitLines(stream, re){
re = re && /\n/;
var buffer = '';
stream.on('data', stream_data);
stream.on('end', stream_end);
function stream_data(data){
buffer += data;
flush();
}//stream_data
function stream_end(){
if(buffer) stream.emmit('line', buffer);
}//stream_end
function flush(){
var re = /\n/;
var match;
while(match = re.exec(buffer)){
var index = match.index + match[0].length;
stream.emit('line', buffer.substring(0, index));
buffer = buffer.substring(index);
re.lastIndex = 0;
}
}//flush
}//emitLines
use this function on a stream and listen to the line events that is will emit.
gr-
While you should probably use the readline module as the top answer suggests, readline appears to be oriented toward command line interfaces rather than line reading. It's also a little bit more opaque regarding buffering. (Anyone who needs a streaming line oriented reader probably will want to tweak buffer sizes). The readline module is ~1000 lines while this, with stats and tests, is 34.
const EventEmitter = require('events').EventEmitter;
class LineReader extends EventEmitter{
constructor(f, delim='\n'){
super();
this.totalChars = 0;
this.totalLines = 0;
this.leftover = '';
f.on('data', (chunk)=>{
this.totalChars += chunk.length;
let lines = chunk.split(delim);
if (lines.length === 1){
this.leftover += chunk;
return;
}
lines[0] = this.leftover + lines[0];
this.leftover = lines[lines.length-1];
if (this.leftover) lines.pop();
this.totalLines += lines.length;
for (let l of lines) this.onLine(l);
});
// f.on('error', ()=>{});
f.on('end', ()=>{console.log('chars', this.totalChars, 'lines', this.totalLines)});
}
onLine(l){
this.emit('line', l);
}
}
//Command line test
const f = require('fs').createReadStream(process.argv[2], 'utf8');
const delim = process.argv[3];
const lineReader = new LineReader(f, delim);
lineReader.on('line', (line)=> console.log(line));
Here's an even shorter version, without the stats, at 19 lines:
class LineReader extends require('events').EventEmitter{
constructor(f, delim='\n'){
super();
this.leftover = '';
f.on('data', (chunk)=>{
let lines = chunk.split(delim);
if (lines.length === 1){
this.leftover += chunk;
return;
}
lines[0] = this.leftover + lines[0];
this.leftover = lines[lines.length-1];
if (this.leftover)
lines.pop();
for (let l of lines)
this.emit('line', l);
});
}
}
const fs = require("fs")
fs.readFile('./file', 'utf-8', (err, data) => {
var innerContent;
console.log("Asynchronous read: " + data.toString());
const lines = data.toString().split('\n')
for (let line of lines)
innerContent += line + '<br>';
});
I wrap the whole logic of daily line processing as a npm module: line-kit
https://www.npmjs.com/package/line-kit
// example
var count = 0
require('line-kit')(require('fs').createReadStream('/etc/issue'),
(line) => { count++; },
() => {console.log(`seen ${count} lines`)})
I use below code the read lines after verify that its not a directory and its not included in the list of files need not to be check.
(function () {
var fs = require('fs');
var glob = require('glob-fs')();
var path = require('path');
var result = 0;
var exclude = ['LICENSE',
path.join('e2e', 'util', 'db-ca', 'someother-file'),
path.join('src', 'favicon.ico')];
var files = [];
files = glob.readdirSync('**');
var allFiles = [];
var patternString = [
'trade',
'order',
'market',
'securities'
];
files.map((file) => {
try {
if (!fs.lstatSync(file).isDirectory() && exclude.indexOf(file) === -1) {
fs.readFileSync(file).toString().split(/\r?\n/).forEach(function(line){
patternString.map((pattern) => {
if (line.indexOf(pattern) !== -1) {
console.log(file + ' contain `' + pattern + '` in in line "' + line +'";');
result = 1;
}
});
});
}
} catch (e) {
console.log('Error:', e.stack);
}
});
process.exit(result);
})();
I have looked through all above answers, all of them use third-party library to solve it. It's have a simple solution in Node's API. e.g
const fs= require('fs')
let stream = fs.createReadStream('<filename>', { autoClose: true })
stream.on('data', chunk => {
let row = chunk.toString('ascii')
}))
Related
I need to do some parsing of large (5-10 Gb)logfiles in Javascript/Node.js (I'm using Cube).
The logline looks something like:
10:00:43.343423 I'm a friendly log message. There are 5 cats, and 7 dogs. We are in state "SUCCESS".
We need to read each line, do some parsing (e.g. strip out 5, 7 and SUCCESS), then pump this data into Cube (https://github.com/square/cube) using their JS client.
Firstly, what is the canonical way in Node to read in a file, line by line?
It seems to be fairly common question online:
http://www.quora.com/What-is-the-best-way-to-read-a-file-line-by-line-in-node-js
Read a file one line at a time in node.js?
A lot of the answers seem to point to a bunch of third-party modules:
https://github.com/nickewing/line-reader
https://github.com/jahewson/node-byline
https://github.com/pkrumins/node-lazy
https://github.com/Gagle/Node-BufferedReader
However, this seems like a fairly basic task - surely, there's a simple way within the stdlib to read in a textfile, line-by-line?
Secondly, I then need to process each line (e.g. convert the timestamp into a Date object, and extract useful fields).
What's the best way to do this, maximising throughput? Is there some way that won't block on either reading in each line, or on sending it to Cube?
Thirdly - I'm guessing using string splits, and the JS equivalent of contains (IndexOf != -1?) will be a lot faster than regexes? Has anybody had much experience in parsing massive amounts of text data in Node.js?
I searched for a solution to parse very large files (gbs) line by line using a stream. All the third-party libraries and examples did not suit my needs since they processed the files not line by line (like 1 , 2 , 3 , 4 ..) or read the entire file to memory
The following solution can parse very large files, line by line using stream & pipe. For testing I used a 2.1 gb file with 17.000.000 records. Ram usage did not exceed 60 mb.
First, install the event-stream package:
npm install event-stream
Then:
var fs = require('fs')
, es = require('event-stream');
var lineNr = 0;
var s = fs.createReadStream('very-large-file.csv')
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
lineNr += 1;
// process line here and call s.resume() when rdy
// function below was for logging memory usage
logMemoryUsage(lineNr);
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(err){
console.log('Error while reading file.', err);
})
.on('end', function(){
console.log('Read entire file.')
})
);
Please let me know how it goes!
You can use the inbuilt readline package, see docs here. I use stream to create a new output stream.
var fs = require('fs'),
readline = require('readline'),
stream = require('stream');
var instream = fs.createReadStream('/path/to/file');
var outstream = new stream;
outstream.readable = true;
outstream.writable = true;
var rl = readline.createInterface({
input: instream,
output: outstream,
terminal: false
});
rl.on('line', function(line) {
console.log(line);
//Do your stuff ...
//Then write to output stream
rl.write(line);
});
Large files will take some time to process. Do tell if it works.
I really liked #gerard answer which is actually deserves to be the correct answer here. I made some improvements:
Code is in a class (modular)
Parsing is included
Ability to resume is given to the outside in case there is an asynchronous job is chained to reading the CSV like inserting to DB, or a HTTP request
Reading in chunks/batche sizes that
user can declare. I took care of encoding in the stream too, in case
you have files in different encoding.
Here's the code:
'use strict'
const fs = require('fs'),
util = require('util'),
stream = require('stream'),
es = require('event-stream'),
parse = require("csv-parse"),
iconv = require('iconv-lite');
class CSVReader {
constructor(filename, batchSize, columns) {
this.reader = fs.createReadStream(filename).pipe(iconv.decodeStream('utf8'))
this.batchSize = batchSize || 1000
this.lineNumber = 0
this.data = []
this.parseOptions = {delimiter: '\t', columns: true, escape: '/', relax: true}
}
read(callback) {
this.reader
.pipe(es.split())
.pipe(es.mapSync(line => {
++this.lineNumber
parse(line, this.parseOptions, (err, d) => {
this.data.push(d[0])
})
if (this.lineNumber % this.batchSize === 0) {
callback(this.data)
}
})
.on('error', function(){
console.log('Error while reading file.')
})
.on('end', function(){
console.log('Read entirefile.')
}))
}
continue () {
this.data = []
this.reader.resume()
}
}
module.exports = CSVReader
So basically, here is how you will use it:
let reader = CSVReader('path_to_file.csv')
reader.read(() => reader.continue())
I tested this with a 35GB CSV file and it worked for me and that's why I chose to build it on #gerard's answer, feedbacks are welcomed.
I used https://www.npmjs.com/package/line-by-line for reading more than 1 000 000 lines from a text file. In this case, an occupied capacity of RAM was about 50-60 megabyte.
const LineByLineReader = require('line-by-line'),
lr = new LineByLineReader('big_file.txt');
lr.on('error', function (err) {
// 'err' contains error object
});
lr.on('line', function (line) {
// pause emitting of lines...
lr.pause();
// ...do your asynchronous line processing..
setTimeout(function () {
// ...and continue emitting lines.
lr.resume();
}, 100);
});
lr.on('end', function () {
// All lines are read, file is closed now.
});
The Node.js Documentation offers a very elegant example using the Readline module.
Example: Read File Stream Line-by-Line
const { once } = require('node:events');
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('sample.txt'),
crlfDelay: Infinity
});
rl.on('line', (line) => {
console.log(`Line from file: ${line}`);
});
await once(rl, 'close');
Note: we use the crlfDelay option to recognize all instances of CR LF ('\r\n') as a single line break.
Apart from read the big file line by line, you also can read it chunk by chunk. For more refer to this article
var offset = 0;
var chunkSize = 2048;
var chunkBuffer = new Buffer(chunkSize);
var fp = fs.openSync('filepath', 'r');
var bytesRead = 0;
while(bytesRead = fs.readSync(fp, chunkBuffer, 0, chunkSize, offset)) {
offset += bytesRead;
var str = chunkBuffer.slice(0, bytesRead).toString();
var arr = str.split('\n');
if(bytesRead = chunkSize) {
// the last item of the arr may be not a full line, leave it to the next chunk
offset -= arr.pop().length;
}
lines.push(arr);
}
console.log(lines);
I had the same problem yet. After comparing several modules that seem to have this feature, I decided to do it myself, it's simpler than I thought.
gist: https://gist.github.com/deemstone/8279565
var fetchBlock = lineByline(filepath, onEnd);
fetchBlock(function(lines, start){ ... }); //lines{array} start{int} lines[0] No.
It cover the file opened in a closure, that fetchBlock() returned will fetch a block from the file, end split to array (will deal the segment from last fetch).
I've set the block size to 1024 for each read operation. This may have bugs, but code logic is obvious, try it yourself.
Reading / Writing files using stream with the native nodejs modules (fs, readline):
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('input.json'),
output: fs.createWriteStream('output.json')
});
rl.on('line', function(line) {
console.log(line);
// Do any 'line' processing if you want and then write to the output file
this.output.write(`${line}\n`);
});
rl.on('close', function() {
console.log(`Created "${this.output.path}"`);
});
Based on this questions answer I implemented a class you can use to read a file synchronously line-by-line with fs.readSync(). You can make this "pause" and "resume" by using a Q promise (jQuery seems to require a DOM so cant run it with nodejs):
var fs = require('fs');
var Q = require('q');
var lr = new LineReader(filenameToLoad);
lr.open();
var promise;
workOnLine = function () {
var line = lr.readNextLine();
promise = complexLineTransformation(line).then(
function() {console.log('ok');workOnLine();},
function() {console.log('error');}
);
}
workOnLine();
complexLineTransformation = function (line) {
var deferred = Q.defer();
// ... async call goes here, in callback: deferred.resolve('done ok'); or deferred.reject(new Error(error));
return deferred.promise;
}
function LineReader (filename) {
this.moreLinesAvailable = true;
this.fd = undefined;
this.bufferSize = 1024*1024;
this.buffer = new Buffer(this.bufferSize);
this.leftOver = '';
this.read = undefined;
this.idxStart = undefined;
this.idx = undefined;
this.lineNumber = 0;
this._bundleOfLines = [];
this.open = function() {
this.fd = fs.openSync(filename, 'r');
};
this.readNextLine = function () {
if (this._bundleOfLines.length === 0) {
this._readNextBundleOfLines();
}
this.lineNumber++;
var lineToReturn = this._bundleOfLines[0];
this._bundleOfLines.splice(0, 1); // remove first element (pos, howmany)
return lineToReturn;
};
this.getLineNumber = function() {
return this.lineNumber;
};
this._readNextBundleOfLines = function() {
var line = "";
while ((this.read = fs.readSync(this.fd, this.buffer, 0, this.bufferSize, null)) !== 0) { // read next bytes until end of file
this.leftOver += this.buffer.toString('utf8', 0, this.read); // append to leftOver
this.idxStart = 0
while ((this.idx = this.leftOver.indexOf("\n", this.idxStart)) !== -1) { // as long as there is a newline-char in leftOver
line = this.leftOver.substring(this.idxStart, this.idx);
this._bundleOfLines.push(line);
this.idxStart = this.idx + 1;
}
this.leftOver = this.leftOver.substring(this.idxStart);
if (line !== "") {
break;
}
}
};
}
node-byline uses streams, so i would prefer that one for your huge files.
for your date-conversions i would use moment.js.
for maximising your throughput you could think about using a software-cluster. there are some nice-modules which wrap the node-native cluster-module quite well. i like cluster-master from isaacs. e.g. you could create a cluster of x workers which all compute a file.
for benchmarking splits vs regexes use benchmark.js. i havent tested it until now. benchmark.js is available as a node-module
import * as csv from 'fast-csv';
import * as fs from 'fs';
interface Row {
[s: string]: string;
}
type RowCallBack = (data: Row, index: number) => object;
export class CSVReader {
protected file: string;
protected csvOptions = {
delimiter: ',',
headers: true,
ignoreEmpty: true,
trim: true
};
constructor(file: string, csvOptions = {}) {
if (!fs.existsSync(file)) {
throw new Error(`File ${file} not found.`);
}
this.file = file;
this.csvOptions = Object.assign({}, this.csvOptions, csvOptions);
}
public read(callback: RowCallBack): Promise < Array < object >> {
return new Promise < Array < object >> (resolve => {
const readStream = fs.createReadStream(this.file);
const results: Array < any > = [];
let index = 0;
const csvStream = csv.parse(this.csvOptions).on('data', async (data: Row) => {
index++;
results.push(await callback(data, index));
}).on('error', (err: Error) => {
console.error(err.message);
throw err;
}).on('end', () => {
resolve(results);
});
readStream.pipe(csvStream);
});
}
}
import { CSVReader } from '../src/helpers/CSVReader';
(async () => {
const reader = new CSVReader('./database/migrations/csv/users.csv');
const users = await reader.read(async data => {
return {
username: data.username,
name: data.name,
email: data.email,
cellPhone: data.cell_phone,
homePhone: data.home_phone,
roleId: data.role_id,
description: data.description,
state: data.state,
};
});
console.log(users);
})();
I have made a node module to read large file asynchronously text or JSON.
Tested on large files.
var fs = require('fs')
, util = require('util')
, stream = require('stream')
, es = require('event-stream');
module.exports = FileReader;
function FileReader(){
}
FileReader.prototype.read = function(pathToFile, callback){
var returnTxt = '';
var s = fs.createReadStream(pathToFile)
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
//console.log('reading line: '+line);
returnTxt += line;
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(){
console.log('Error while reading file.');
})
.on('end', function(){
console.log('Read entire file.');
callback(returnTxt);
})
);
};
FileReader.prototype.readJSON = function(pathToFile, callback){
try{
this.read(pathToFile, function(txt){callback(JSON.parse(txt));});
}
catch(err){
throw new Error('json file is not valid! '+err.stack);
}
};
Just save the file as file-reader.js, and use it like this:
var FileReader = require('./file-reader');
var fileReader = new FileReader();
fileReader.readJSON(__dirname + '/largeFile.json', function(jsonObj){/*callback logic here*/});
I want to read a file with many rows, then write results.
It's fine with small files <50kb.
But I've got 15MB file for programming competition - as a hard input.
Node.js become slow and I can't get the output in time, because I have to send them the output within few minutes.
And it's even not using full CPU/RAM.
Is the problem in my code, or can I do something about it? Thanks!
const fs = require("fs");
const input = "D:\\Downloads\\example.txt";
const output = input + ".final.txt";
var lineReader = require("readline").createInterface({
input: fs.createReadStream(input),
});
let out = "";
let all = [];
const line_counter = (
(i = 0) =>
() =>
++i
)();
lineReader.on("line", function (radek, index = line_counter()) {
all.push(radek);
});
all.forEach((v) => {
out += `${v}\n`;
});
fs.writeFile(output, out, (err) => {
if (err) {
console.error(err);
}
});
It seems like you want a better understanding of how to do the following using a streaming technique:
read an input text file stream line-by-line
perform a transform operation on each line of text
write the result of each transform operation to an output file stream
Node supports the web standard streams API — see the list of global objects in the current LTS version of Node (18): https://nodejs.org/docs/latest-v18.x/api/globals.html.
Below, I'll include a complete, minimal example which demonstrates the criteria above — you can use it as a model for learning and adapt it to meet the needs of your program. Because your goal is learning, I've included verbose comments at every step of the program, including links to documentation.
You'll also probably find it helpful to read about the Streams API on MDN and web.dev.
module.mjs:
import {open} from 'node:fs/promises';
import {Writable} from 'node:stream';
// Break string chunks from a ReadableStream into lines. Adapted from Deno's std library:
// See: https://github.com/denoland/deno_std/blob/0.166.0/streams/delimiter.ts#L11-L68
class TextLineStream extends TransformStream {
#buf = "";
constructor() {
super({
transform: (chunk, controller) => this.#handle(chunk, controller),
flush: (controller) => this.#handle("\r\n", controller),
});
}
#handle(chunk, controller) {
chunk = this.#buf + chunk;
while (true) {
const lfIndex = chunk.indexOf("\n");
if (lfIndex !== -1) {
let crOrLfIndex = lfIndex;
if (chunk[lfIndex - 1] === "\r") {
crOrLfIndex--;
}
controller.enqueue(chunk.slice(0, crOrLfIndex));
chunk = chunk.slice(lfIndex + 1);
continue;
}
break;
}
this.#buf = chunk;
}
}
async function main () {
// Paths based on your examples:
const pathIn = 'example.txt';
const pathOut = `${pathIn}.final.txt`;
// Create file handles to the target file paths:
// See: https://nodejs.org/docs/latest-v18.x/api/fs.html#fspromisesopenpath-flags-mode
const fhIn = await open(pathIn);
// The "w" flag means: Open file for writing. The file is created (if it does not exist) or truncated (if it exists).
// See: https://nodejs.org/docs/latest-v18.x/api/fs.html#file-system-flags
const fhOut = await open(pathOut, 'w');
// Create a web-standard WritableStream from the output file handle:
// See: https://nodejs.org/docs/latest-v18.x/api/stream.html#streamwritabletowebstreamwritable
const writable = Writable.toWeb(fhOut.createWriteStream({encoding: 'utf8'}));
const writer = writable.getWriter();
// A function abstraction for writing a text chunk to the output file stream:
const write = (text) => writer.ready.then(() => writer.write(text));
// Crate a web-standard ReadableStream from the input file handle,
// then pipe through a text decoder and break/collect the emitted chunks into lines:
// See: https://nodejs.org/docs/latest-v18.x/api/fs.html#filehandlereadablewebstream
const readable = fhIn.readableWebStream()
.pipeThrough(new TextDecoderStream())
.pipeThrough(new TextLineStream());
for await (const line of readable) {
// Handle each text line in here:
// For example: get the length of each line,
// and if it's greater than 0, write it as a line to the output stream:
const {length} = line;
if (length > 0) await write(`${length}\n`);
}
}
main();
Here's the CLI output of using the program on an example text file with some Lorem ipsum lines:
% node --version
v18.12.1
% ls
example.txt module.mjs
% cat example.txt
lorem
ipsum
dolor
sit
amet
% node module.mjs
% cat example.txt.final.txt
5
5
5
3
4
I need to remove a substring (that appears only in specific known lines of the file) from a file.
there are simple solutions of reading all file data to a string, removing the substring, and then write the fixed data to the file.
here is a code I found in here:
Node js - Remove string from text file
var data = fs.readFileSync('banlist.txt', 'utf-8');
var newValue = data.replace(new RegEx("STRING_TO_REMOVE"), '');
fs.writeFileSync('banlist.txt', newValue, 'utf-8');
My problem is, that the file is huge - up to billion lines of logs, so I can't read all content to the memory.
Why not a simple transform stream and replace()? replace can take a callback as second parameter i.e. .replace(/bad1|bad2|bad3/g, filterWords) in case you need to replace words rather than remove them completely.
const fs = require("fs")
const { pipeline, Transform } = require("stream")
const { join } = require("path")
const readFile = fs.createReadStream("./words.txt")
const writeFile = fs.createWriteStream(
join(__dirname, "words-filtered.txt"),
"utf8"
)
const transformFile = new Transform({
transform(chunk, enc, next) {
let c = chunk.toString().replace(/bad/g, "replaced")
this.push(c)
next()
},
})
pipeline(readFile, transformFile, writeFile, (err) => {
if (err) {
console.log(err.message)
}
})
https://nodejs.org/api/fs.html#fs_fs_read_fd_buffer_offset_length_position_callback
Dont read the whole file at once... read a small buffered piece of it.. and look for your input with that buffered piece.... then increment your buffer starting position and do it again.... would recommend having each buffer start not at the end of the previous buffer... but overlap by at least the expected size of the data being sought so that you dont run into half of your data being at end of one buffer and other half at beginning of the other
You could use a file read stream. However, you would have to find a way to detect if the read data only contains part of the result.
What you probably want to do is use streams so that you are writing after partial reads. this example could probably work for you. you need to copy over the output text file ".tmp" over the original to get the same behavior in your question. It works by reading a chunk and then looking to see if you've come across a new line. then it processes that line, writes it, then removes it from the buffer. This should help with your memory problem.
var fs = require("fs");
var readStream = fs.createReadStream("./BFFile.txt", { encoding: "utf-8" });
var writeStream = fs.createWriteStream("./BFFile.txt.tmp");
const STRING_TO_REMOVE = "badword";
var buffer = ""
readStream.on("data", (chunk) => {
buffer += chunk;
var indexOfNewLine = buffer.search("\n");
while (indexOfNewLine !== -1) {
var line = buffer.substring(0, indexOfNewLine + 1);
buffer = buffer.substring(indexOfNewLine + 1, buffer.length);
line = line.replace(new RegExp(STRING_TO_REMOVE), "");
writeStream.write(line);
indexOfNewLine = buffer.search("\n");
}
})
readStream.on("end", () => {
buffer = buffer.replace(new RegExp(STRING_TO_REMOVE), "");
writeStream.write(buffer);
writeStream.close();
})
There are a few assumptions with this solution such as the data being UTF-8, there only being 1 bad word potentially per line, every line having some text (I didn't test for that), and that every line ends with new line and not some other line ending.
Heres the docs for streams in Node
another thought I had was to use pipe and a transform stream but that seems like over kill.
You can use this code to do it. I'm using fs stream. it's created for read huge files in small memory by chunks. docs
const fs = require('fs');
const readStream = fs.createReadStream('./XXXXX');
const writeStream = fs.createWriteStream('./XXXXXXX');
readStream.on('data', (chunk) => {
const data = chunk.toString().replace('STRING_TO_REMOVE', 'XXXXXX');
writeStream.write(data);
});
readStream.on('end', () => {
writeStream.close();
});
If multiple, independent node processes use fs.appendFile() to append text chunks (each of which is > 4KB) to a single file, can I lose/corrupt data w/o a file locking scheme or is fs.appendFile an atomic op?
I'm mostly concerned about Linux & a local fs.
The answer might be OS and/or filesystem dependent, but you can use this to test:
// This script aims to test/prove that you can append to a single file from
// multiple processes with buffers up to a certain size, without causing one
// process' output to corrupt the other's.
//
// The script takes one parameter, the length of the buffer. It then creates
// 20 worker processes which each write 50 lines of the specified buffer
// size to the same file. When all processes are done outputting, it tests
// the output file to ensure it is in the correct format.
const child_process = require('child_process');
const fs = require('fs');
const NUM_WORKERS=20;
const LINES_PER_WORKER=50;
const OUTPUT_FILE='./foo';
// each worker will output $LINES_PER_WORKER lines to the output file
function run_worker(worker_num, buf_len) {
// Each line will be a specific character, multiplied by the line length.
// The character changes based on the worker number.
const filler_len=buf_len-1; // -1 -> leave room for \n
const filler_char=String.fromCharCode(+worker_num+64);
line=filler_char.repeat(filler_len) + '\n';
for (let i=0; i<LINES_PER_WORKER; i++) {
fs.appendFile(OUTPUT_FILE, line, (e) => {
if (e) {
console.log('Oh noes!', e);
throw e;
}
});
}
}
if (process.argv[2] === "worker") {
run_worker(process.argv[3], process.argv[4]);
} else {
const buf_len=+process.argv[2] || 4096;
try {
fs.unlinkSync(OUTPUT_FILE);
} catch (e) {
// swallow file if it doesn't exist (the script was never run before)
if (e.code !== 'ENOENT') {
throw e;
}
}
console.log(`Launching ${NUM_WORKERS} worker processes`);
let finished = 0;
for (let i=1; i <= NUM_WORKERS; i++) {
let proc = child_process.spawn(process.execPath, [process.argv[1], 'worker', i, buf_len], {stdio: 'inherit'});
proc.on('close', (code) => {
if (code) {
console.log(`Worker exited with code ${code}`);
process.exit(code);
}
finished++;
if (finished === NUM_WORKERS) {
allFinished();
}
});
proc.on('error', (e) => {
console.log('Worker errored!');
throw e;
});
}
console.log(`Each line will be ${buf_len} characters long`);
console.log(`Waiting for processes to exit`);
// Now we want to test the output file. Each line should be the same letter
// repeated buf_len-1 times (remember the \n takes up one byte). If we had
// workers writing over eachother's lines, then there will be mixed characters
// and/or longer/shorter lines.
function allFinished() {
console.log(`Testing output file`);
// Make sure the file is the right size (ensures processes didn't write over
// each other's lines)
const expected_file_size=NUM_WORKERS * LINES_PER_WORKER * buf_len;
const actual_file_size=fs.statSync(OUTPUT_FILE).size;
if ( expected_file_size !== actual_file_size) {
console.log(`Expected file size of ${expected_file_size}, but got ${actual_file_size}`);
process.exit(1)
}
// File size is OK, test the actual content
// Scan line by line
// Note: Doesn't work on cygwin for lines < 255
const line_length=buf_len-1;
const lineReader = require('readline').createInterface({
input: require('fs').createReadStream(OUTPUT_FILE)
});
let num_lines = 0;
lineReader.on('line', function (line) {
const first_char = line[0];
if (line !== first_char.repeat(line_length)) {
num_lines++
}
});
if (num_lines > 0) {
console.log("Found $num_lines instances of corrupted lines");
} else {
console.log(`All's good! The output file had no corrupted lines.`);
}
fs.unlinkSync(OUTPUT_FILE);
}
}
Two things:
I ported this script to Node.js from the bash script at https://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/.
On macOS Mojave, I am unable to cause interleaved output.
I got a very nice answer on here about how to clear a line / delete a line in a file without having to truncate the file or replace the file with a new version of the file, here's the Python code:
#!/usr/bin/env python
import re,os,sys
logfile = sys.argv[1]
regex = sys.argv[2]
pattern = re.compile(regex)
with open(logfile,"r+") as f:
while True:
old_offset = f.tell()
l = f.readline()
if not l:
break
if pattern.search(l):
# match: blank the line
new_offset = f.tell()
if old_offset > len(os.linesep):
old_offset-=len(os.linesep)
f.seek(old_offset)
f.write(" "*(new_offset-old_offset-len(os.linesep)))
this script can be called like:
./clear-line.py <file> <pattern>
For educational purposes, I am trying to figure out if I can write this in Node.js. I can certainly read a file with Node.js line-by-line. But I am not sure if Node.js has the equivalent calls for tell/seek in this case.
the equivalent for write is surely
https://nodejs.org/api/fs.html#fs_fs_write_fd_buffer_offset_length_position_callback
Here is my attempt
#!/usr/bin/env node
const readline = require('readline');
const fs = require('fs');
const file = process.argv[2];
const rgx = process.argv[3];
const fd = fs.openSync(file, 'r+');
const rl = readline.createInterface({
input: fs.createReadStream(null, {fd: fd})
});
let position = 0;
const onLine = line => {
position += line.length;
if (String(line).match(rgx)) {
let len = line.length;
rl.close();
rl.removeListener('line', onLine);
// output the line that will be replaced/removed
process.stdout.write(line);
fs.write(fd, new Array(len + 1).join(' '), position, 'utf8', err => {
if (err) {
process.stderr.write(err.stack || err);
process.exit(1);
}
else {
process.exit(0);
}
});
}
};
rl.on('line', onLine);
It's not quite right - I don't think I am calculating the offset/position correctly. Perhaps someone who know both Python and Node can help me out. I am not very familiar with calculating position/offset in files, especially in terms of buffers.
Here is the data in a text file that I am working with. All I want to do is read the first line that is not empty, and then remove that line from the file and write that line to stdout.
This could really any non-whitespace data, but here is the JSON that I am working with:
{"dateCreated":"2016-12-26T09:52:03.250Z","pid":5371,"count":0,"uid":"7133d123-e6b8-4109-902b-7a90ade7c655","isRead":false,"line":"foo bar baz"}
{"dateCreated":"2016-12-26T09:52:03.290Z","pid":5371,"count":1,"uid":"e881b0a9-8c28-42bb-8a9d-8109587777d0","isRead":false,"line":"foo bar baz"}
{"dateCreated":"2016-12-26T09:52:03.390Z","pid":5371,"count":2,"uid":"065e51ff-14b8-4454-9ae5-b85152cfcb64","isRead":false,"line":"foo bar baz"}
{"dateCreated":"2016-12-26T09:52:03.491Z","pid":5371,"count":3,"uid":"5af80a95-ff9d-4252-9c4e-0e421fd9320f","isRead":false,"line":"foo bar baz"}
{"dateCreated":"2016-12-26T09:52:03.595Z","pid":5371,"count":4,"uid":"961e578f-288b-413c-b933-b791f833c037","isRead":false,"line":"foo bar baz"}
{"dateCreated":"2016-12-26T09:52:03.696Z","pid":5371,"count":5,"uid":"a65cbf78-2ea1-4c3a-9beb-b4bf56e83a6b","isRead":false,"line":"foo bar baz"}
{"dateCreated":"2016-12-26T09:52:03.799Z","pid":5371,"count":6,"uid":"d411e917-ad25-455f-9449-ae4d31c7b1ad","isRead":false,"line":"foo bar baz"}
{"dateCreated":"2016-12-26T09:52:03.898Z","pid":5371,"count":7,"uid":"46f8841d-c86c-43f2-b440-8ab7feea7527","isRead":false,"line":"foo bar baz"}
{"dateCreated":"2016-12-26T09:52:04.002Z","pid":5371,"count":8,"uid":"81b5ce7e-2f4d-4acb-884c-442c5ac4490f","isRead":false,"line":"foo bar baz"}
{"dateCreated":"2016-12-26T09:52:04.101Z","pid":5371,"count":9,"uid":"120ff45d-74e7-464e-abd5-94c41e3cd089","isRead":false,"line":"foo bar baz"}
You should take into consideration the newline character at the end of each line, that is not included in the 'line' you're getting via the readline module. That is, you should update position to position += (line.length + 1), and then when writing, just use position (without the -1).
Ok, I think I got it, but if someone has any beef with this please feel free to critique. It's close, but it needs some fine tuning I think, there seems to be an off-by-one error or something like that.
#!/usr/bin/env node
const readline = require('readline');
const fs = require('fs');
const file = process.argv[2];
const rgx = new RegExp(process.argv[3]);
const fd = fs.openSync(file, 'r+');
const rl = readline.createInterface({
input: fs.createReadStream(null, {fd: fd})
});
let position = 0;
const onLine = line => {
if (String(line).match(rgx)) {
let len = line.length;
rl.close();
rl.removeListener('line', onLine);
// output the line that will be replaced/removed
process.stdout.write(line + '\n');
fs.write(fd, new Array(len + 1).join(' '), position, 'utf8',
(err, written, string) => {
if (err) {
process.stderr.write(err.stack || err);
return process.exit(1);
}
else {
process.exit(0);
}
});
}
position += (line.length + 1); // 1 is length of \n character
};
rl.on('line', onLine);