How to I make newman library take the csv file sequentially? - javascript

This is my code
var fs = require('fs'),
path = require('path');
cheerio = require('cheerio'),
newman = require('newman'),
os = require("os");
const directoryPath = path.join(__dirname, './payload');
fs.readdir(directoryPath, function(err, files) {
if (err) {
return console.log('Unable to scan directory: ' + err);
}
files.forEach(function(file) {
console.log("Files Read")
runNewman(file);
});
});
function runNewman(data) {
let logs=[];
var csvFile = "payload/" + data;
//var logFile = "payload/" + data + ".txt";
newman.run({
collection: require('./kmap_testing.postman_collection.json'),
environment: require('./kmap.postman_environment.json'),
globals: require('./My Workspace.postman_globals.json'),
reporters: 'cli',
iterationData:csvFile
}).on('start',function(err,args){
console.log('start');
}).on('console',function(err, args){
if(err){return;}
logs.push(args.messages);
}).on('done',function(err, summary){
if(err || summary.error){
console.error('collection run encounter an error');
}
else{
console.log('collection run completed');
}
fs.appendFileSync("results.csv",logs.join("\r\n"));
})
}
I have huge csv file with almost 100K+ data, I have split the data into 5K per csv file and saved them under Payload folder. However, newman.run - takes the files randomly or parallely and runs. The results.csv file ends up running two times and has more than 200K + results. Someone, please help me with this? I am a beginner with newman library.

Related

Error while downloading multiple files from AWS S3 using Nodejs

I am getting error "File download failed with error write after end" while trying to download multiple files form AWS s3 using the below code snippet.
Can some one help me to figure out the root cause of the error?It would be great if any one can suggest a solution as well
Below code works without any issues if only a single file needs to be downloaded
var AWS = require('aws-sdk');
var fs = require('fs')
module.exports.download = function (req, res) {
var S3_BUCKET = 'mybucketname'
var s3 = new AWS.S3({
accessKeyId: process.env.ACCESSKEY,
secretAccessKey: process.env.SECRETKEY,
region: process.env.REGION
});
var os = require('os');
var filenames = "file1.jpg,file2.jpg"
var str_array = filenames.split(',');
for (var i = 0; i < str_array.length; i++) {
var filename = str_array[i].trim();
localFileName = os.homedir() + "\\" + "Downloads" + "\\" + filename,
file = fs.createWriteStream(localFileName);
s3.getObject({
Bucket: S3_BUCKET,
Key: filename
})
.on('error', function (err) {
res.end("File download failed with error " + err.message);
})
.on('httpData', function (chunk) {
file.write(chunk);
})
.on('httpDone', function () {
file.end();
})
.send();
}
res.end("Files have been downloaded successfully")
}
It is better to use pipe function rather than handle it manually.
s3.getObject(options)
.createReadStream()
.on('error', e => ...)
.pipe(file)
.on('error', e => ...);

Trying to create a new file by copying data of a variable

Trying to create a new csv file in a directory.
I want to store the data of a variable inside that csv file:
handleRequest(req, res) {
var svcReq = req.body.svcReq;
var csvRecData = JSON.stringify(req.body);
console.log("DATA WE ARE GETIING IS: " + csvRecData);
if (svcReq == 'invDetails') {
var checking = fs.writeFile('../i1/csvData/myCsvFile.csv', csvRecData, function (err) {
if (err) throw err;
console.log("Saved! got the file");
console.log("Checking csvData:" + checking);
});
}
}
I don't see any errors in the console or terminal but the file is not generated. What is my issue?
The path in writeFile should be pointed correctly..you cannot simply use "../il/csv" from your current file.First check your current directory using path.
1)Install path npm module
2)
var path = require('path');
var fs = require('fs');
console.log(path.join(__dirname))
fs.writeFile((path.join(__dirname)+"/test123.csv"), "Sally Whittaker,2018,McCarren House,312,3.75!", function(err) {
if(err) {
return console.log(err);
}
console.log("The file was saved!");
});

Zip application from the current folder

i’ve node.js app that I Need to zip all the current folder with command from
and get the zip on the root
For that I want to use the archiver npm package but I don’t understand the following:
where I put the current folder (since I want to zip all the application )
where should I put the name of the zip (the zip that should be created when execute the command)
My app have the following structure
MyApp
Node_modules
server.js
app.js
package.json
arc.js
In the arc.js I’ve put all the zip logic so I guess I need to provide zipPath (which in my case is ‘./‘)
and zip name like myZip…
I tried with the following without success, any idea ?
var fs = require('fs');
var archiver = require('archiver');
// create a file to stream archive data to.
var output = fs.createWriteStream(__dirname + '/.');
var archive = archiver('zip', {
zlib: { level: 9 } // Sets the compression level.
});
// listen for all archive data to be written
output.on('close', function() {
console.log(archive.pointer() + ' total bytes');
console.log('archiver has been finalized and the output file descriptor has closed.');
});
archive.on('warning', function(err) {
if (err.code === 'ENOENT') {
// log warning
} else {
// throw error
throw err;
}
});
// good practice to catch this error explicitly
archive.on('error', function(err) {
throw err;
});
// pipe archive data to the file
archive.pipe(output);
I need that when I open the command line like
folder->myApp-> run zip arc and will create zipped file under the current path (which is the root in this case....)
You can use the glob method, but make sure to exclude *.zip files. Otherwise the zip file itself will be part of the archive.
Here is an example:
// require modules
var fs = require('fs');
var archiver = require('archiver');
// create a file to stream archive data to.
var output = fs.createWriteStream(__dirname + '/example.zip');
var archive = archiver('zip', {
zlib: { level: 9 } // Sets the compression level.
});
// listen for all archive data to be written
output.on('close', function () {
console.log(archive.pointer() + ' total bytes');
console.log('archiver has been finalized and the output file descriptor has closed.');
});
// good practice to catch warnings (ie stat failures and other non-blocking errors)
archive.on('warning', function (err) {
if (err.code === 'ENOENT') {
// log warning
} else {
// throw error
throw err;
}
});
// good practice to catch this error explicitly
archive.on('error', function (err) {
throw err;
});
// pipe archive data to the file
archive.pipe(output);
archive.glob('**/*', { ignore: ['*.zip'] });
archive.finalize();
In the github of node-archiver there is an example folder
where I put the current folder (since I want to zip all the
application )
Example :
var file1 = __dirname + '/fixtures/file1.txt';
var file2 = __dirname + '/fixtures/file2.txt';
archive
.append(fs.createReadStream(file1), { name: 'file1.txt' })
.append(fs.createReadStream(file2), { name: 'file2.txt' })
.finalize();
Specifically about your case and directory you can use the .directory() method of archiver
where should I put the name of the zip (the zip that should be created
when execute the command)
Example :
var output = fs.createWriteStream(__dirname + '/example-output.zip');

How to recursively read file in a tree like directory structure in node js

I have a root directory say "A" inside this directory i am having lots of directories say "1","2","3","4","5"........ and in all these subdirectories i have single file called cucumber.json. All i want to do is read the cucumber.json file and get the accumulated result. How can i achieve this using node js.
In the below screen shot my root directory is "cucumber" and inside that i have lot of sub directories. All these sub directories contains a single file named cucumber.json.
Are there any dedicated node package which can make my work easy.
Let me know if any further info is required.
Hi there please try the following (javascript):
// Require filesystem package for IO operations
var fs = require('fs');
// Put the path you are looking for here
var path = "d:\\nodef";
//Call the function defined below
recursiveloop(path, function(err,result){
/* begin processing of each result */
// For each file in the array
for(i=0;i<result.length;i++)
{
//Write the name of the file
console.log('Processing: ' + result[i]);
//Read the file
fs.readFile(result[i], 'utf8', function(err, data){
//If there is an error notify to the console
if(err) console.log('Error: ' + err);
//Parse the json object
var obj = JSON.parse(data);
//Print out contents
console.log('Name: ' + obj.name);
console.log('Position: ' + obj.position);
})
}
});
// Asynchronous function to read folders and files recursively
function recursiveloop(dir, done)
{
var results = [];
fs.readdir(dir, function(err, list){
if (err) return done(err);
var i = 0;
(function next() {
var file = list[i++];
if (!file) return done(null, results);
file = dir + '/' + file;
fs.stat(file, function(err, stat) {
if (stat && stat.isDirectory()) {
recursiveloop(file, function(err, res) {
results = results.concat(res);
next();
});
} else {
results.push(file);
next();
}
});
})();
});
}

How to download and unzip a zip file in memory in NodeJs?

I want to download a zip file from the internet and unzip it in memory without saving to a temporary file. How can I do this?
Here is what I tried:
var url = 'http://bdn-ak.bloomberg.com/precanned/Comdty_Calendar_Spread_Option_20120428.txt.zip';
var request = require('request'), fs = require('fs'), zlib = require('zlib');
request.get(url, function(err, res, file) {
if(err) throw err;
zlib.unzip(file, function(err, txt) {
if(err) throw err;
console.log(txt.toString()); //outputs nothing
});
});
[EDIT]
As, suggested, I tried using the adm-zip library and I still cannot make this work:
var ZipEntry = require('adm-zip/zipEntry');
request.get(url, function(err, res, zipFile) {
if(err) throw err;
var zip = new ZipEntry();
zip.setCompressedData(new Buffer(zipFile.toString('utf-8')));
var text = zip.getData();
console.log(text.toString()); // fails
});
You need a library that can handle buffers. The latest version of adm-zip will do:
npm install adm-zip
My solution uses the http.get method, since it returns Buffer chunks.
Code:
var file_url = 'http://notepad-plus-plus.org/repository/7.x/7.6/npp.7.6.bin.x64.zip';
var AdmZip = require('adm-zip');
var http = require('http');
http.get(file_url, function(res) {
var data = [], dataLen = 0;
res.on('data', function(chunk) {
data.push(chunk);
dataLen += chunk.length;
}).on('end', function() {
var buf = Buffer.alloc(dataLen);
for (var i = 0, len = data.length, pos = 0; i < len; i++) {
data[i].copy(buf, pos);
pos += data[i].length;
}
var zip = new AdmZip(buf);
var zipEntries = zip.getEntries();
console.log(zipEntries.length)
for (var i = 0; i < zipEntries.length; i++) {
if (zipEntries[i].entryName.match(/readme/))
console.log(zip.readAsText(zipEntries[i]));
}
});
});
The idea is to create an array of buffers and concatenate them into a new one at the end. This is due to the fact that buffers cannot be resized.
Update
This is a simpler solution that uses the request module to obtain the response in a buffer, by setting encoding: null in the options. It also follows redirects and resolves http/https automatically.
var file_url = 'https://github.com/mihaifm/linq/releases/download/3.1.1/linq.js-3.1.1.zip';
var AdmZip = require('adm-zip');
var request = require('request');
request.get({url: file_url, encoding: null}, (err, res, body) => {
var zip = new AdmZip(body);
var zipEntries = zip.getEntries();
console.log(zipEntries.length);
zipEntries.forEach((entry) => {
if (entry.entryName.match(/readme/i))
console.log(zip.readAsText(entry));
});
});
The body of the response is a buffer that can be passed directly to AdmZip, simplifying the whole process.
Sadly you can't pipe the response stream into the unzip job as node zlib lib allows you to do, you have to cache and wait the end of the response. I suggest you to pipe the response to a fs stream in case of big files, otherwise you will full fill your memory in a blink!
I don't completely understand what you are trying to do, but imho this is the best approach. You should keep your data in memory only the time you really need it, and then stream to the csv parser.
If you want to keep all your data in memory you can replace the csv parser method fromPath with from that takes a buffer instead and in getData return directly unzipped
You can use the AMDZip (as #mihai said) instead of node-zip, just pay attention because AMDZip is not yet published in npm so you need:
$ npm install git://github.com/cthackers/adm-zip.git
N.B. Assumption: the zip file contains only one file
var request = require('request'),
fs = require('fs'),
csv = require('csv')
NodeZip = require('node-zip')
function getData(tmpFolder, url, callback) {
var tempZipFilePath = tmpFolder + new Date().getTime() + Math.random()
var tempZipFileStream = fs.createWriteStream(tempZipFilePath)
request.get({
url: url,
encoding: null
}).on('end', function() {
fs.readFile(tempZipFilePath, 'base64', function (err, zipContent) {
var zip = new NodeZip(zipContent, { base64: true })
Object.keys(zip.files).forEach(function (filename) {
var tempFilePath = tmpFolder + new Date().getTime() + Math.random()
var unzipped = zip.files[filename].data
fs.writeFile(tempFilePath, unzipped, function (err) {
callback(err, tempFilePath)
})
})
})
}).pipe(tempZipFileStream)
}
getData('/tmp/', 'http://bdn-ak.bloomberg.com/precanned/Comdty_Calendar_Spread_Option_20120428.txt.zip', function (err, path) {
if (err) {
return console.error('error: %s' + err.message)
}
var metadata = []
csv().fromPath(path, {
delimiter: '|',
columns: true
}).transform(function (data){
// do things with your data
if (data.NAME[0] === '#') {
metadata.push(data.NAME)
} else {
return data
}
}).on('data', function (data, index) {
console.log('#%d %s', index, JSON.stringify(data, null, ' '))
}).on('end',function (count) {
console.log('Metadata: %s', JSON.stringify(metadata, null, ' '))
console.log('Number of lines: %d', count)
}).on('error', function (error) {
console.error('csv parsing error: %s', error.message)
})
})
If you're under MacOS or Linux, you can use the unzip command to unzip from stdin.
In this example I'm reading the zip file from the filesystem into a Buffer object but it works
with a downloaded file as well:
// Get a Buffer with the zip content
var fs = require("fs")
, zip = fs.readFileSync(__dirname + "/test.zip");
// Now the actual unzipping:
var spawn = require('child_process').spawn
, fileToExtract = "test.js"
// -p tells unzip to extract to stdout
, unzip = spawn("unzip", ["-p", "/dev/stdin", fileToExtract ])
;
// Write the Buffer to stdin
unzip.stdin.write(zip);
// Handle errors
unzip.stderr.on('data', function (data) {
console.log("There has been an error: ", data.toString("utf-8"));
});
// Handle the unzipped stdout
unzip.stdout.on('data', function (data) {
console.log("Unzipped file: ", data.toString("utf-8"));
});
unzip.stdin.end();
Which is actually just the node version of:
cat test.zip | unzip -p /dev/stdin test.js
EDIT: It's worth noting that this will not work if the input zip is too big to be read in one chunk from stdin. If you need to read bigger files, and your zip file contains only one file, you can use funzip instead of unzip:
var unzip = spawn("funzip");
If your zip file contains multiple files (and the file you want isn't the first one) I'm afraid to say you're out of luck. Unzip needs to seek in the .zip file since zip files are just a container, and unzip may just unzip the last file in it. In that case you have to save the file temporarily (node-temp comes in handy).
Two days ago the module node-zip has been released, which is a wrapper for the JavaScript only version of Zip: JSZip.
var NodeZip = require('node-zip')
, zip = new NodeZip(zipBuffer.toString("base64"), { base64: true })
, unzipped = zip.files["your-text-file.txt"].data;

Categories

Resources