Reading from an input and piping its content as output - javascript

how should I proceed to create an output like this in bash with nodejs
$ echo “Hello World” > foo.txt //creating the text file
$ ./test < foo.txt // launching the test.js script with the text file as input
Hello World //result
I've tried
test.js
#!/usr/bin/env node
var fs = require('fs');
fs.readFile('how to get foo.txt path enterd as input ?','utf8', function(err, data) {
if(err) throw err;
var array = data.toString().split("\n");
for(i in array) {
console.log(array[i]);
}
});

The script below will read the contents of foo.txt into the chunks variable. When the stream is finished the contents will output to console.
//node thisFile.js foo.txt
//console.log(process.argv) will tell you what the CLI inputs are
fileName = process.argv[2];
input = fs.createReadStream(fileName);
var chunks = '';
input.on('data' , function(data){
chunks += data;
});
input.on('end', function(){
console.log('blow ' + chunks);
});
Is this what you're asking?

Related

I want to rename a particular string in multiple filenames in nodejs

I want to rename a particular string in filenames. I am using glob and path to extract multiple file names from multiple locations. Now I just want to rename those files like abcd-change-name.js to abcd-name-changed.js
Here's what I have done so far
var glob = require("glob")
var path = require('path')
const fs = require('fs')
glob(process.cwd() + "/directory/**/*-change-name*.js", {}, function (er,
files) {
for(i=0; i<files.length; i++){
var f = path.basename(files[i])
var d = path.dirname(files[i])
fs.renameSync(files[i] , d + '/name-changed.js', function (err) {
if (err) throw err;
console.log('renamed complete');
});
}
})
The code is changing all the files with the js extension to name-changed.js in their respective folders.
Your code uses has the line fs.renameSync(files[i], d + '/name-changed.js', ... but this line of code renames files[i] to '[foldername]/name-changed.js'.
I would suggest having something like fs.renameSync(files[i], files[i].replace('change-name', 'name-changed'), ...
In other words, you have told fs to rename the file to have a filename of 'name-changed.js' but you want it to contain the original filename data but with 'change-name' replaced with 'name-changed'.
Here is a full code example based on your code.
var glob = require("glob")
var path = require('path')
const fs = require('fs')
glob(process.cwd() + "/directory/**/*-change-name*.js", {}, function (er,
files) {
for(i=0; i<files.length; i++){
var f = path.basename(files[i])
var d = path.dirname(files[i])
fs.renameSync(files[i] , files[i].replace('change-name', 'name-changed'), function (err) {
if (err) throw err;
console.log('renamed complete');
});
}
})

How can I render a static HTML file with Handlebars on a Nodejs server?

I have come across plenty of resources online for this but haven't been able to find one that is straight forward enough for me to understand.
At the moment, I have multiple massive <script> tags in an HTML document that has handlebars content. The server sends this HTML document to the client where the client then renders the page with data from an AJAX call. I'd like to move this entire process server-side so that all the server has to do is send a static file and re-render the page when data is updated. Data changes a few times per day - which is why it isn't hard coded in and I would like to run the handlebars compiler on the HTML document when data is updated.
Is it possible to simply put the HTML document with handlebars templating in <script> tags through a function to generate a new HTML file with data filled in?
Here is the code I have within my app.js file that is runned the Node server that does not do what I want it to:
function registerHelpers(callback){
Handlebars.registerHelper('equal', function(lvalue, rvalue, options) {
if (arguments.length < 3)
throw new Error("Handlebars Helper equal needs 2 parameters");
if( lvalue!=rvalue ) {
return options.inverse(this);
} else {
return options.fn(this);
}
});
Handlebars.registerHelper('trim', function(text) {
text = text.replace(/ /g, '');
return new Handlebars.SafeString(text);
});
callback();
}
function buildHomePage() {
var source = require(__dirname + '/public/home.handlebars');
var template = Handlebars.precompile(source);
var collection = db.get('datalist'); //Monk call to MongoDB
collection.find({}, function (err, docs){
var result = template(docs);
console.log(result)
var fs = require('fs');
fs.writeFile("test.html", result, function(err) {
if(err) {
console.log(err);
}
});
});
};
registerHelpers(buildHomePage);
The following can render handlebars to static html. Run node example.js. You may need to run npm install --save handlebars prior.
var fs = require('fs');
var Handlebars = require('handlebars');
function render(filename, data)
{
var source = fs.readFileSync(filename,'utf8').toString();
var template = Handlebars.compile(source);
var output = template(data);
return output;
}
var data = JSON.parse(fs.readFileSync("./data/strings.json", 'utf8'));
var result = render('./templates/somefile.html', data);
console.log(result);
If your handlebars templates are simple, with only string replacement, you can do this with underscore.js. Assume this example is named 'generate.js'
var fs = require('fs');
var _ = require('underscore');
_.templateSettings.interpolate = /\{\{(.+?)\}\}/g;
function render(filename, data)
{
var source = fs.readFileSync(filename,'utf8').toString();
var compiled = _.template(source);
return compiled(data);
}
var data = JSON.parse(fs.readFileSync("./data/strings.json", 'utf8'));
var result = render('./templates/somefile.html', data);
console.log(result);
Then run node generate.js to output the rendered template to the console. You may need to do npm install --save underscore prior.

nodejs: compare function generated JSON data to JSON file

I have a function that scans a directory and creates a JSON file with the audio files metadata. I want it to check if the file already exists and only overwrite if there is any diference between the file that was created from the last time the script was run and the data from the the second time it runs.
This is my code:
var fs = require('fs');
var nodeID3 = require('node-id3');
var path = require('path');
var tracksPath = './public/tracks/';
var dataPath = './public/data/';
fs.readdir(tracksPath,function(err,files){
if(err) {
throw err;
}
//Read the tracks metadata
var tracksMetadata = [];
files.forEach(function(trackName){
var trackFile = nodeID3.read(tracksPath + trackName);
//If the track returns metadata push it to the array
if (trackFile.title && trackFile.artist){
var metadata = {
"filename" : trackName,
"title" : trackFile.title,
"artist" : trackFile.artist
};
tracksMetadata.push(metadata);
}
//If no metadata is found ignore and log it to the console
else if (trackName.charAt(0) != "."){
var filename = {
"filename" : trackName
};
tracksMetadata.push(filename);
console.log(trackName + " doesn't have metadata. Ignoring.");
}
if(fs.existsSync(dataPath + "metadata.json")){
fs.readFile(dataPath + "metadata.json",'utf8', function (err, data){
if (err) throw err;
console.log(JSON.parse(JSON.stringify(data)));
console.log(JSON.parse(JSON.stringify(tracksMetadata)));
console.log(Boolean(JSON.parse(JSON.stringify(data)) == JSON.parse(JSON.stringify(tracksMetadata))));
});
}
});
fs.writeFile(path.join(dataPath, 'metadata.json'),
JSON.stringify(tracksMetadata),'utf8', function(err){
if(err){
throw err;
}
console.log("Tracks Metadata JSON created succesfully");
});
});
Right now I'm only writing to the console a Boolean value that checks wether the data from the file and the data generated by the function are equal and so far I get false.
What should I do?

Node.js convert binary file to utf8

I have a jrmxl (Jasper report) file stored in a postgresql database in a binary format (bytea). I'm trying to read that file and convert it into a plain jrmxl (XML) file and save it on the disk.
Here is what i've tried so far
var fs = require('fs');
exports.saveFile = function (pg) {
//pg is the postgres connection to query the db
pg.query('Select data from data_file where id = 123', function (err, result) {
if (err) {
console.log(err);
return;
}
var data = result.rows[0].data;
//Buffer.isBuffer(data) === true
// I can get the data here. Now I try to convert it into text
var file = data.toString('utf8');
fs.writeFile('report.jrxml',file, function (er) {
if (er) {
console.log('an error occurred while saving the file');
return;
}
console.log('file saved');
}}
});
}
If i run the code above, the file is saved but it's somehow binary.
How can i convert this to a plain xml file in text format that i can import in ireport for example?
You might try going through a buffer first. I have used this technique to transform DB BLOBs into base64 strings.
var fileBuffer = new Buffer( result.rows[0].data, 'binary' );
var file = fileBuffer.toString('utf8');
I use 'pako' npm package to resolve that issue:
import { connection, Message } from 'websocket';
import * as pako from 'pako';
protected async onCustomMessage(message: Message, con): Promise<void> {
let data;
let text;
if (message.type === 'utf8') {
// console.log("Received UTF8: '" + message.utf8Data + "'");
text = message.utf8Data;
data = JSON.parse(text);
} else {
const binary = message.binaryData;
text = pako.inflate(binary, {
to: 'string',
});
data = JSON.parse(text);
}
}
npm i pako && npm i -D #types/pako

How to download and unzip a zip file in memory in NodeJs?

I want to download a zip file from the internet and unzip it in memory without saving to a temporary file. How can I do this?
Here is what I tried:
var url = 'http://bdn-ak.bloomberg.com/precanned/Comdty_Calendar_Spread_Option_20120428.txt.zip';
var request = require('request'), fs = require('fs'), zlib = require('zlib');
request.get(url, function(err, res, file) {
if(err) throw err;
zlib.unzip(file, function(err, txt) {
if(err) throw err;
console.log(txt.toString()); //outputs nothing
});
});
[EDIT]
As, suggested, I tried using the adm-zip library and I still cannot make this work:
var ZipEntry = require('adm-zip/zipEntry');
request.get(url, function(err, res, zipFile) {
if(err) throw err;
var zip = new ZipEntry();
zip.setCompressedData(new Buffer(zipFile.toString('utf-8')));
var text = zip.getData();
console.log(text.toString()); // fails
});
You need a library that can handle buffers. The latest version of adm-zip will do:
npm install adm-zip
My solution uses the http.get method, since it returns Buffer chunks.
Code:
var file_url = 'http://notepad-plus-plus.org/repository/7.x/7.6/npp.7.6.bin.x64.zip';
var AdmZip = require('adm-zip');
var http = require('http');
http.get(file_url, function(res) {
var data = [], dataLen = 0;
res.on('data', function(chunk) {
data.push(chunk);
dataLen += chunk.length;
}).on('end', function() {
var buf = Buffer.alloc(dataLen);
for (var i = 0, len = data.length, pos = 0; i < len; i++) {
data[i].copy(buf, pos);
pos += data[i].length;
}
var zip = new AdmZip(buf);
var zipEntries = zip.getEntries();
console.log(zipEntries.length)
for (var i = 0; i < zipEntries.length; i++) {
if (zipEntries[i].entryName.match(/readme/))
console.log(zip.readAsText(zipEntries[i]));
}
});
});
The idea is to create an array of buffers and concatenate them into a new one at the end. This is due to the fact that buffers cannot be resized.
Update
This is a simpler solution that uses the request module to obtain the response in a buffer, by setting encoding: null in the options. It also follows redirects and resolves http/https automatically.
var file_url = 'https://github.com/mihaifm/linq/releases/download/3.1.1/linq.js-3.1.1.zip';
var AdmZip = require('adm-zip');
var request = require('request');
request.get({url: file_url, encoding: null}, (err, res, body) => {
var zip = new AdmZip(body);
var zipEntries = zip.getEntries();
console.log(zipEntries.length);
zipEntries.forEach((entry) => {
if (entry.entryName.match(/readme/i))
console.log(zip.readAsText(entry));
});
});
The body of the response is a buffer that can be passed directly to AdmZip, simplifying the whole process.
Sadly you can't pipe the response stream into the unzip job as node zlib lib allows you to do, you have to cache and wait the end of the response. I suggest you to pipe the response to a fs stream in case of big files, otherwise you will full fill your memory in a blink!
I don't completely understand what you are trying to do, but imho this is the best approach. You should keep your data in memory only the time you really need it, and then stream to the csv parser.
If you want to keep all your data in memory you can replace the csv parser method fromPath with from that takes a buffer instead and in getData return directly unzipped
You can use the AMDZip (as #mihai said) instead of node-zip, just pay attention because AMDZip is not yet published in npm so you need:
$ npm install git://github.com/cthackers/adm-zip.git
N.B. Assumption: the zip file contains only one file
var request = require('request'),
fs = require('fs'),
csv = require('csv')
NodeZip = require('node-zip')
function getData(tmpFolder, url, callback) {
var tempZipFilePath = tmpFolder + new Date().getTime() + Math.random()
var tempZipFileStream = fs.createWriteStream(tempZipFilePath)
request.get({
url: url,
encoding: null
}).on('end', function() {
fs.readFile(tempZipFilePath, 'base64', function (err, zipContent) {
var zip = new NodeZip(zipContent, { base64: true })
Object.keys(zip.files).forEach(function (filename) {
var tempFilePath = tmpFolder + new Date().getTime() + Math.random()
var unzipped = zip.files[filename].data
fs.writeFile(tempFilePath, unzipped, function (err) {
callback(err, tempFilePath)
})
})
})
}).pipe(tempZipFileStream)
}
getData('/tmp/', 'http://bdn-ak.bloomberg.com/precanned/Comdty_Calendar_Spread_Option_20120428.txt.zip', function (err, path) {
if (err) {
return console.error('error: %s' + err.message)
}
var metadata = []
csv().fromPath(path, {
delimiter: '|',
columns: true
}).transform(function (data){
// do things with your data
if (data.NAME[0] === '#') {
metadata.push(data.NAME)
} else {
return data
}
}).on('data', function (data, index) {
console.log('#%d %s', index, JSON.stringify(data, null, ' '))
}).on('end',function (count) {
console.log('Metadata: %s', JSON.stringify(metadata, null, ' '))
console.log('Number of lines: %d', count)
}).on('error', function (error) {
console.error('csv parsing error: %s', error.message)
})
})
If you're under MacOS or Linux, you can use the unzip command to unzip from stdin.
In this example I'm reading the zip file from the filesystem into a Buffer object but it works
with a downloaded file as well:
// Get a Buffer with the zip content
var fs = require("fs")
, zip = fs.readFileSync(__dirname + "/test.zip");
// Now the actual unzipping:
var spawn = require('child_process').spawn
, fileToExtract = "test.js"
// -p tells unzip to extract to stdout
, unzip = spawn("unzip", ["-p", "/dev/stdin", fileToExtract ])
;
// Write the Buffer to stdin
unzip.stdin.write(zip);
// Handle errors
unzip.stderr.on('data', function (data) {
console.log("There has been an error: ", data.toString("utf-8"));
});
// Handle the unzipped stdout
unzip.stdout.on('data', function (data) {
console.log("Unzipped file: ", data.toString("utf-8"));
});
unzip.stdin.end();
Which is actually just the node version of:
cat test.zip | unzip -p /dev/stdin test.js
EDIT: It's worth noting that this will not work if the input zip is too big to be read in one chunk from stdin. If you need to read bigger files, and your zip file contains only one file, you can use funzip instead of unzip:
var unzip = spawn("funzip");
If your zip file contains multiple files (and the file you want isn't the first one) I'm afraid to say you're out of luck. Unzip needs to seek in the .zip file since zip files are just a container, and unzip may just unzip the last file in it. In that case you have to save the file temporarily (node-temp comes in handy).
Two days ago the module node-zip has been released, which is a wrapper for the JavaScript only version of Zip: JSZip.
var NodeZip = require('node-zip')
, zip = new NodeZip(zipBuffer.toString("base64"), { base64: true })
, unzipped = zip.files["your-text-file.txt"].data;

Categories

Resources