How convert a audio file into byte array with javascript - javascript

I'm trying to convert a .wav file to a byte array string. I need to do this on the back-end targeting the file is becoming an issue.
files.forEach(file => {
let index = files.indexOf(file)
let reader = new FileReader();
reader.readAsArrayBuffer(file);
console.log(reader.result);
reader.onload = function (event) {
let byteArray = new Uint8Array(reader.result);
let FileName = file.name;
let dataAsByteArrayString = byteArray.toString();
var listHtml = $list.html();
The above code uses npm's filereader which says to target a file. I'm having difficulty doing this since this is not a front end drag and drop of the file.
my file that is generated is called "response.wav" how would I convert this file using JavaScript and node extensions? Thanks!

I dont know if this will help but the last project I worked on we parsed a .wav file using the Node buffer API and wrote it using the Node file API.
If you have more questions about the code I can direct you to the person that worked on this file the most. I hope it helps somewhat
https://github.com/IntelliSound/intelliSound-Server/blob/development/lib/sound-data-parser.js
'use strict';
function ParsedWave(buffer) {
const RIFF_HEADER_OFFSET = 0;
const FILE_SIZE_OFFSET = 4;
const RIFF_FORMAT_OFFSET = 8;
const SUBCHUNK1_ID_OFFSET = 12;
const AUDIO_FORMAT_OFFSET = 20;
const NUMBER_OF_CHANNELS_OFFSET = 22;
const SAMPLE_RATE_OFFSET = 24;
const BITS_PER_SAMPLE_OFFSET = 34;
const SUBCHUNK2_ID_OFFSET = 36;
const SUBCHUNK2_SIZE_OFFSET = 40;
const DATA_OFFSET = 44;
this.buffer = buffer;
this.riff = buffer.slice(RIFF_HEADER_OFFSET, RIFF_HEADER_OFFSET + 4).toString('utf8');
this.fileSize = buffer.readUInt32LE(FILE_SIZE_OFFSET);
this.riffType = buffer.slice(RIFF_FORMAT_OFFSET, RIFF_FORMAT_OFFSET + 4).toString('utf8');
this.subChunk1Id = buffer.slice(SUBCHUNK1_ID_OFFSET, SUBCHUNK1_ID_OFFSET + 4).toString('utf8');
this.audioFormat = buffer.readUInt16LE(AUDIO_FORMAT_OFFSET);
this.numberOfChannels = buffer.readUInt16LE(NUMBER_OF_CHANNELS_OFFSET);
this.sampleRate = buffer.readUInt32LE(SAMPLE_RATE_OFFSET);
this.bitsPerSample = buffer.readUInt16LE(BITS_PER_SAMPLE_OFFSET);
this.subChunk2Id = buffer.slice(SUBCHUNK2_ID_OFFSET, SUBCHUNK2_ID_OFFSET + 4).toString('utf8');
this.subChunk2Size = buffer.readUInt32LE(SUBCHUNK2_SIZE_OFFSET);
this.data = buffer.slice(DATA_OFFSET, this.subChunk2Size + DATA_OFFSET);
}
// Andrew - The bufferMapper function is going to accept a parsed wave-file and output
// an array of values corresponding to the data subchunk in a format which can
// be accepted as input to the neural network.
const bufferMapper = parsedWave => {
const SIXTEEN_BIT_ZERO = 32768;
const SIXTEEN_BIT_MAX = 65535;
parsedWave.neuralArray = [];
for (let i = 0; i < parsedWave.data.length; i += 2) {
const sample = parsedWave.data.readInt16LE(i);
const unsignedSample = sample + SIXTEEN_BIT_ZERO;
const sigmoidSample = unsignedSample / SIXTEEN_BIT_MAX;
parsedWave.neuralArray.push(sigmoidSample);
}
return parsedWave;
};
module.exports = data => {
const parsedWaveFile = new ParsedWave(data);
if (parsedWaveFile.riff !== 'RIFF') {
throw new TypeError('incorrect file type, must be RIFF format');
}
if (parsedWaveFile.fileSize > 10000000) {
throw new TypeError('file too large, please limit file size to less than 10MB');
}
if (parsedWaveFile.riffType !== 'WAVE') {
throw new TypeError('file must be a WAVE');
}
if (parsedWaveFile.subChunk1Id !== 'fmt ') {
throw new TypeError('the first subchunk must be fmt');
}
if (parsedWaveFile.audioFormat !== 1) {
throw new TypeError('wave file must be uncompressed linear PCM');
}
if (parsedWaveFile.numberOfChannels > 2) {
throw new TypeError('wave file must have 2 or less channels');
}
if (parsedWaveFile.sampleRate > 48000) {
throw new TypeError('wave file must have sample rate of less than 48k');
}
if (parsedWaveFile.bitsPerSample !== 16) {
throw new TypeError(`file's bit depth must be 16`);
}
if (parsedWaveFile.subChunk2Id !== 'data') {
throw new TypeError('subchunk 2 must be data');
}
const neuralMappedWaveFile = bufferMapper(parsedWaveFile);
return neuralMappedWaveFile;
};

Using the included fs module, you can read in your wav file like so:
const fs = require('fs');
const path = './path/to/my.wav';
fs.readFile(path, (err, data) => {
// Data is a Buffer object
});
For documentation on working with a Node.JS Buffer see here. Now, if you were more interested in the file conversion portion, there are a couple of libraries out there. If you just need the conversion functionality, and not implementing yourself, node-fluent-ffmpeg may work for you. If you want to implement it yourself, this node-wav file may be a good reference (too much to paste here).
If you need to go from Buffer to ArrayBuffer, this SO shows some options.

Related

How to cut audiofile from input in browser with js, lamejs?

I have input in my Vue component where i upload audiofile via #change, next i need to cut it, for example i have 30 seconds audio and i need to receive audio from 5 to 15 second.
I have installed lamejs package to do that.
But after all my operations with audio i receive cutted audio but without any sound, so i dont know where is the reason of that. Need help!
method which upload file
async onUploadFile(event) {
const fileData = event.target.files[0];
this.file = fileData;
await this.decodeFile(fileData);
},
method which decodes file to audioBuffer
async onUploadFile(event) {
const fileData = event.target.files[0];
this.file = fileData;
await this.decodeFile(fileData);
},
method which cut audio and encode it to mp3 and then receive blob url
async audioBufferSlice(buffer, begin, end) {
const audioContext = new AudioContext();
const channels = buffer.numberOfChannels;
const rate = buffer.sampleRate;
const duration = buffer.duration;
const startOffset = rate * begin;
const endOffset = rate * end;
const frameCount = endOffset - startOffset;
const audioLength = endOffset - startOffset;
let trimmedAudio = audioContext.createBuffer(
buffer.numberOfChannels,
audioLength,
rate
);
for(var i = 0; i < buffer.numberOfChannels; i++){
trimmedAudio.copyToChannel(buffer.getChannelData(i).slice(begin, end), i);
}
var audioData = this.serializeAudioBuffer(trimmedAudio);
let mp3Data = [];
const sampleBlockSize = 1152;
let mp3encoder = new lamejs.Mp3Encoder(2, audioData.sampleRate, 128);
var left = new Int8Array(audioData.channels[0].length);
var right = new Int8Array(audioData.channels[1].length);
for (var i = 0; i < audioData.channels[0].length; i += sampleBlockSize) {
var leftChunk = left.subarray(i, i + sampleBlockSize);
var rightChunk = right.subarray(i, i + sampleBlockSize);
var mp3buf = await mp3encoder.encodeBuffer(leftChunk, rightChunk);
if (mp3buf.length > 0) {
mp3Data.push(mp3buf);
}
}
let buf = await mp3encoder.flush();
if (buf.length > 0) {
mp3Data.push(buf);
}
var blob = new Blob(mp3Data, {type: 'audio/mp3'});
var url = window.URL.createObjectURL(blob);
console.log('MP3 URl: ', url);
},
What did I do wrong that I receive cut audio but without any sound?
I look at that repository as example https://github.com/Vinit-Dantkale/AudioFy

SHA256 Hashing Large files in angular 6 using Filereader Issue

I have problem with SHA256 hashing. if the file size is more then 250 MB it is terminating browser and crashing.
below is the hashing code please do help us.
let fileReader = new FileReader();
fileReader.readAsArrayBuffer(fileToSend);
fileReader.onload = (e) => {
const hash = CrypTo.SHA256(this.arrayBufferToWordArray(fileReader.result)).toString();
this.hashCode=hash;
this.fileHistory.MediaHash = hash;
this.fileHistory.FileName = fileToSend.name;
//Insert to file history
this.fileHistoryService.postFiles(this.fileHistory).subscribe(
data => {
this.hashCode=data["MediaHash"];
this.alertService.success('HASHFILE.FileUploadSuccessMessage', true);
this.hideGenerateHashCodeButton = true;
},
error => {
this.alertService.error('COMMONERRORMESSAGE.SomethingWentWrongErrorMessage');
});
}
arrayBufferToWordArray(fileResult) {
var i8a = new Uint8Array(fileResult);
var byteArray = [];
for (var i = 0; i < i8a.length; i += 4) {
byteArray.push(i8a[i] << 24 | i8a[i + 1] << 16 | i8a[i + 2] << 8 | i8a[i + 3]);
}
return CrypTo.lib.WordArray.create(byteArray, i8a.length);
}
Below code I tested for all the big files, which fixed my solution.
var hashdata = CrypTo.algo.SHA256.create();
var file =**<FiletoHash>**;
if(file){
var reader = new FileReader();
var size = file.size;
var chunk_size = Math.pow(2, 22);
var chunks = [];
var offset = 0;
var bytes = 0;
reader.onloadend = (e) =>{
if (reader.readyState == FileReader.DONE){
//every chunk read updating hash
hashdata.update(this.arrayBufferToWordArray(reader.result));
let chunk:any = reader.result;
bytes += chunk.length;
chunks.push(chunk);
if((offset < size)){
offset += chunk_size;
var blob = file.slice(offset, offset + chunk_size);
reader.readAsArrayBuffer(blob);
} else {
//use below hash for result
//finaly generating hash
var hash = hashdata.finalize().toString();
//debugger;
};
}
};
var blob = file.slice(offset, offset + chunk_size);
reader.readAsArrayBuffer(blob);
}
}
}
arrayBufferToWordArray(fileResult) {
var i8a = new Uint8Array(fileResult);
return CrypTo.lib.WordArray.create(i8a, i8a.length);
}
You should definitely use streams or something like it to avoid loading all the file into your memory.
Specifically using CryptoJS, I have seen that it's possible to perform progressive Hashing.
var sha256 = CryptoJS.algo.SHA256.create();
sha256.update("Message Part 1");
sha256.update("Message Part 2");
sha256.update("Message Part 3");
​
var hash = sha256.finalize();
So, use FileReader to read parts of the file, then every time you read a part, you update the sha256 until there is nothing more to read.
See :
filereader api on big files

Nodejs Parse Json file transform the input and write to file as JSON array

I need to read a very big location history file and extract some data and write to a file as JSON data. How can i do that.
The following code doesn't generate any output.
Edit:
I expect to string output in the file, because it's piped into fileOutputStream
const fs = require('fs')
var JSONStream = require('JSONStream');
var es = require('event-stream');
const filePath = './location-history.json'
const fileOutputPath = './transform-location-history.json'
fileStream = fs.createReadStream(filePath);
fileOutputStream = fs.createWriteStream(fileOutputPath)
const transformer = (data) => {
const location = {
latitude: data.latitudeE7 / 10000000,
longitude: data.longitudeE7 / 10000000
}
return JSON.stringify(location);
}
fileStream
.pipe(JSONStream.parse('locations.*'))
.pipe(es.through(transformer))
.pipe(fileOutputStream)
This is my solution the my problem. JSONStream parses the input file and spits JSON objects. The es.through(transformer) takes the JSON object and writes it to the file as string. To make file output file to be importable in ES6, 'export default locationHistory' is added.
https://gist.github.com/tuncatunc/35e5449905159928e718d82c06bc66da
const fs = require('fs')
const JSONStream = require('JSONStream');
var es = require('event-stream');
const filePath = './location-history.json'
const fileOutputPath = './transform-location-history.js'
const fileStream = fs.createReadStream(filePath);
const fileOutputStream = fs.createWriteStream(fileOutputPath)
let index = 0;
const transformer = (data) => {
const location = {
latitude: data.latitudeE7 / 10000000,
longitude: data.longitudeE7 / 10000000
};
let result = JSON.stringify(location) + ',';
if (index === 0) {
result = 'const locationHistory = [' + result
}
index++;
if (index < 100)
fileOutputStream.write(result);
}
const end = () => {
const finish = ']; export default locationHistory\n'
fileOutputStream.write(finish, () => {
fileOutputStream.close()
})
console.log(`${index} objects are written to file`)
}
fileStream
.pipe(JSONStream.parse('locations.*'))
.pipe(es.through(transformer, end))

How to log contents of HTML5 drag and drop file that is 60MB+ without hanging for minutes?

I have a file that i want to drop on a page and read file contents. its a CSV with 9 columns. My drop command outputs file contents like this:
function drop(ev) {
ev.preventDefault();
var data = ev.dataTransfer.files[0];
var fileReader = new FileReader();
fileReader.onload = function (e) {
console.log(fileReader.result)
};
fileReader.onerror = function (e) {
throw 'Error reading CSV file';
};
// Start reading file
fileReader.readAsText(data);
return false;
}
When I drag and drop a simple file that is a couple kilobytes or 1MB, I can see the output of the contents of the file. However given a large CSV file, it takes many many minutes before it shows up. Is there a way to make it so that there is some streaming maybe where it does not look like its hanging?
With Screw-FileReader
You can get a ReadableStream and do it in a streaming fashion
'use strict'
var blob = new Blob(['111,222,333\naaa,bbb,ccc']) // simulate a file
var stream = blob.stream()
var reader = stream.getReader()
var headerString = ''
var forEachLine = function(row) {
var colums = row.split(',')
// append to DOM
console.log(colums)
}
var pump = function() {
return reader.read().then(function(result) {
var value = result.value
var done = result.done
if (done) {
// Do the last line
headerString && forEachLine(headerString)
return
}
for (var i = 0; i < value.length; i++) {
// Get the character for the current iteration
var char = String.fromCharCode(value[i])
// Check if the char is a new line
if (char.match(/[^\r\n]+/g) !== null) {
// Not a new line so lets append it to
// our header string and keep processing
headerString += char
} else {
// We found a new line character
forEachLine(headerString)
headerString = ''
}
}
return pump()
})
}
pump().then(function() {
console.log('done reading the csv')
})
<script src="https://cdn.rawgit.com/jimmywarting/Screw-FileReader/master/index.js"></script>
If you prefer using the old FileReader without dependencies, pipe's and transform
'use strict'
var blob = new Blob(['111,222,333\naaa,bbb,ccc']) // simulate a file
var fr = new FileReader()
var headerString = ''
var position = 0
var forEachLine = function forEachLine(row) {
var colums = row.split(',')
// append to DOM
console.log(colums)
}
var pump = function pump() {
return new Promise(function(resolve) {
var chunk = blob.slice(position, position + 524288)
position += 524288
fr.onload = function() {
var value = fr.result
var done = position >= blob.size
for (var i = 0; i < value.length; i++) {
var char = value[i]
// Check if the char is a new line
if (char.match(/[^\r\n]+/g) !== null) {
// Not a new line so lets append it to
// our header string and keep processing
headerString += char
} else {
// We found a new line character
forEachLine(headerString)
headerString = ''
}
}
if (done) {
// Send the last line
forEachLine(headerString)
return resolve() // done
}
return resolve(pump())
}
// Read the next chunk
fr.readAsText(chunk)
})
}
pump().then(function() {
console.log('done reading the csv')
})

node.js: read a text file into an array. (Each line an item in the array.)

I would like to read a very, very large file into a JavaScript array in node.js.
So, if the file is like this:
first line
two
three
...
...
I would have the array:
['first line','two','three', ... , ... ]
The function would look like this:
var array = load(filename);
Therefore the idea of loading it all as a string and then splitting it is not acceptable.
Synchronous:
var fs = require('fs');
var array = fs.readFileSync('file.txt').toString().split("\n");
for(i in array) {
console.log(array[i]);
}
Asynchronous:
var fs = require('fs');
fs.readFile('file.txt', function(err, data) {
if(err) throw err;
var array = data.toString().split("\n");
for(i in array) {
console.log(array[i]);
}
});
If you can fit the final data into an array then wouldn't you also be able to fit it in a string and split it, as has been suggested?
In any case if you would like to process the file one line at a time you can also try something like this:
var fs = require('fs');
function readLines(input, func) {
var remaining = '';
input.on('data', function(data) {
remaining += data;
var index = remaining.indexOf('\n');
while (index > -1) {
var line = remaining.substring(0, index);
remaining = remaining.substring(index + 1);
func(line);
index = remaining.indexOf('\n');
}
});
input.on('end', function() {
if (remaining.length > 0) {
func(remaining);
}
});
}
function func(data) {
console.log('Line: ' + data);
}
var input = fs.createReadStream('lines.txt');
readLines(input, func);
EDIT: (in response to comment by phopkins) I think (at least in newer versions) substring does not copy data but creates a special SlicedString object (from a quick glance at the v8 source code). In any case here is a modification that avoids the mentioned substring (tested on a file several megabytes worth of "All work and no play makes Jack a dull boy"):
function readLines(input, func) {
var remaining = '';
input.on('data', function(data) {
remaining += data;
var index = remaining.indexOf('\n');
var last = 0;
while (index > -1) {
var line = remaining.substring(last, index);
last = index + 1;
func(line);
index = remaining.indexOf('\n', last);
}
remaining = remaining.substring(last);
});
input.on('end', function() {
if (remaining.length > 0) {
func(remaining);
}
});
}
Using the Node.js readline module.
var fs = require('fs');
var readline = require('readline');
var filename = process.argv[2];
readline.createInterface({
input: fs.createReadStream(filename),
terminal: false
}).on('line', function(line) {
console.log('Line: ' + line);
});
js:
var array = fs.readFileSync('file.txt', 'utf8').split('\n');
ts:
var array = fs.readFileSync('file.txt', 'utf8').toString().split('\n');
Essentially this will do the job: .replace(/\r\n/g,'\n').split('\n').
This works on Mac, Linux & Windows.
Code Snippets
Synchronous:
const { readFileSync } = require('fs');
const array = readFileSync('file.txt').toString().replace(/\r\n/g,'\n').split('\n');
for(let i of array) {
console.log(i);
}
Asynchronous:
With the fs.promises API that provides an alternative set of asynchronous file system methods that return Promise objects rather than using callbacks. (No need to promisify, you can use async-await with this too, available on and after Node.js version 10.0.0)
const { readFile } = require('fs').promises;
readFile('file.txt', function(err, data) {
if(err) throw err;
const arr = data.toString().replace(/\r\n/g,'\n').split('\n');
for(let i of arr) {
console.log(i);
}
});
More about \r & \n here: \r\n, \r and \n what is the difference between them?
use readline (documentation). here's an example reading a css file, parsing for icons and writing them to json
var results = [];
var rl = require('readline').createInterface({
input: require('fs').createReadStream('./assets/stylesheets/_icons.scss')
});
// for every new line, if it matches the regex, add it to an array
// this is ugly regex :)
rl.on('line', function (line) {
var re = /\.icon-icon.*:/;
var match;
if ((match = re.exec(line)) !== null) {
results.push(match[0].replace(".",'').replace(":",''));
}
});
// readline emits a close event when the file is read.
rl.on('close', function(){
var outputFilename = './icons.json';
fs.writeFile(outputFilename, JSON.stringify(results, null, 2), function(err) {
if(err) {
console.log(err);
} else {
console.log("JSON saved to " + outputFilename);
}
});
});
file.lines with my JFile package
Pseudo
var JFile=require('jfile');
var myF=new JFile("./data.txt");
myF.lines // ["first line","second line"] ....
Don't forget before :
npm install jfile --save
With a BufferedReader, but the function should be asynchronous:
var load = function (file, cb){
var lines = [];
new BufferedReader (file, { encoding: "utf8" })
.on ("error", function (error){
cb (error, null);
})
.on ("line", function (line){
lines.push (line);
})
.on ("end", function (){
cb (null, lines);
})
.read ();
};
load ("file", function (error, lines){
if (error) return console.log (error);
console.log (lines);
});
To read a big file into array you can read line by line or chunk by chunk.
line by line refer to my answer here
var fs = require('fs'),
es = require('event-stream'),
var lines = [];
var s = fs.createReadStream('filepath')
.pipe(es.split())
.pipe(es.mapSync(function(line) {
//pause the readstream
s.pause();
lines.push(line);
s.resume();
})
.on('error', function(err) {
console.log('Error:', err);
})
.on('end', function() {
console.log('Finish reading.');
console.log(lines);
})
);
chunk by chunk refer to this article
var offset = 0;
var chunkSize = 2048;
var chunkBuffer = new Buffer(chunkSize);
var fp = fs.openSync('filepath', 'r');
var bytesRead = 0;
while(bytesRead = fs.readSync(fp, chunkBuffer, 0, chunkSize, offset)) {
offset += bytesRead;
var str = chunkBuffer.slice(0, bytesRead).toString();
var arr = str.split('\n');
if(bytesRead = chunkSize) {
// the last item of the arr may be not a full line, leave it to the next chunk
offset -= arr.pop().length;
}
lines.push(arr);
}
console.log(lines);
This is a variation on the answer above by #mtomis.
It creates a stream of lines. It emits 'data' and 'end' events, allowing you to handle the end of the stream.
var events = require('events');
var LineStream = function (input) {
var remaining = '';
input.on('data', function (data) {
remaining += data;
var index = remaining.indexOf('\n');
var last = 0;
while (index > -1) {
var line = remaining.substring(last, index);
last = index + 1;
this.emit('data', line);
index = remaining.indexOf('\n', last);
}
remaining = remaining.substring(last);
}.bind(this));
input.on('end', function() {
if (remaining.length > 0) {
this.emit('data', remaining);
}
this.emit('end');
}.bind(this));
}
LineStream.prototype = new events.EventEmitter;
Use it as a wrapper:
var lineInput = new LineStream(input);
lineInput.on('data', function (line) {
// handle line
});
lineInput.on('end', function() {
// wrap it up
});
i just want to add #finbarr great answer, a little fix in the asynchronous example:
Asynchronous:
var fs = require('fs');
fs.readFile('file.txt', function(err, data) {
if(err) throw err;
var array = data.toString().split("\n");
for(i in array) {
console.log(array[i]);
}
done();
});
#MadPhysicist, done() is what releases the async. call.
Using Node.js v8 or later has a new feature that converts normal function into an async function.
util.promisify
It's an awesome feature. Here's the example of parsing 10000 numbers from the txt file into an array, counting inversions using merge sort on the numbers.
// read from txt file
const util = require('util');
const fs = require('fs')
fs.readFileAsync = util.promisify(fs.readFile);
let result = []
const parseTxt = async (csvFile) => {
let fields, obj
const data = await fs.readFileAsync(csvFile)
const str = data.toString()
const lines = str.split('\r\n')
// const lines = str
console.log("lines", lines)
// console.log("str", str)
lines.map(line => {
if(!line) {return null}
result.push(Number(line))
})
console.log("result",result)
return result
}
parseTxt('./count-inversion.txt').then(() => {
console.log(mergeSort({arr: result, count: 0}))
})
I had the same problem, and I have solved it with the module line-by-line
https://www.npmjs.com/package/line-by-line
At least for me works like a charm, both in synchronous and asynchronous mode.
Also, the problem with lines terminating not terminating \n can be solved with the option:
{ encoding: 'utf8', skipEmptyLines: false }
Synchronous processing of lines:
var LineByLineReader = require('line-by-line'),
lr = new LineByLineReader('big_file.txt');
lr.on('error', function (err) {
// 'err' contains error object
});
lr.on('line', function (line) {
// 'line' contains the current line without the trailing newline character.
});
lr.on('end', function () {
// All lines are read, file is closed now.
});
Another answer using an npm package. The nexline package allows one to asynchronously read a file line-by-line:
"use strict";
import fs from 'fs';
import nexline from 'nexline';
const lines = [];
const reader = nexline({
input: fs.createReadStream(`path/to/file.ext`)
});
while(true) {
const line = await reader.next();
if(line === null) break; // line is null if we reach the end
if(line.length === 0) continue; // Ignore empty lines
// Process the line here - below is just an example
lines.push(line);
}
This approach will work even if your text file is larger than the maximum allowed string length, thereby avoiding the Error: Cannot create a string longer than 0x1fffffe8 characters error.
To put each line as an item inside an array, a new function was added in Node.js v18.11.0 to read files line by line
filehandle.readLines([options])
This is how you use this with a text file you want to read a file and put each line in an array
import { open } from 'node:fs/promises';
const arr = [];
myFilereader();
async function myFileReader() {
const file = await open('./TextFileName.txt');
for await (const line of file.readLines()) {
arr.push(line);
}
console.log(arr)
}
To understand more read Node.js documentation here is the link for file system readlines():
https://nodejs.org/api/fs.html#filehandlereadlinesoptions

Categories

Resources