I have some markdown files inside /markdown folder. I am trying to read content of these files. I can see the file names inside the array. But when I try to read it, it doesn't return any data or error. What needs to be done here?
app.get("/", async(req, res) => {
const mdPath = "...path"
const data = await fs.readdirSync(mdPath);
console.log(data) // Return Array of files
for (let i = 0; i <= data.length; i++) {
const fileContent = fs.readFileSync(i, "utf-8");
return fileContent;
}
})
You should use something like path() to better handle the filesystem side.
This could work your way:
const fs = require('fs') // load nodejs fs lib
const path = require('path') // load nodejs path lib
const mdPath = 'md' // name of the local dir
const data = fs.readdirSync(path.join(__dirname, mdPath)) //join the paths and let fs read the dir
console.log('file names', data) // Return Array of files
for (let i = 0; i < data.length; i++) {
console.log('file name:', data[i]) // we get each file name
const fileContent = fs.readFileSync(path.join(__dirname, mdPath, data[i]), 'utf-8') // join dir name, md folder path and filename and read its content
console.log('content:\n' + fileContent) // log its content
}
I created a folder ./md, containing the files one.md, two.md, three.md. The code above logs their content just fine.
>> node .\foo.js
file names [ 'one.md', 'three.md', 'two.md' ]
file name: one.md
content:
# one
file name: three.md
content:
# three
file name: two.md
content:
# two
Note that there is no error handling for anything that could go wrong with reading files.
lets assume i have this (snippet):
var filesarray = [];
if(req.files){
req.files.forEach(function(file){
var fileName = file.filename;
var filePath = file.path;
filesarray.push(
filePath
);
})
}
And later i push it with mongoose:
DB.create({
filepaths: filesarray,
}), function (err, res) {
if (err) {
throw err;
}else{
console.log("1 document inserted");
DB.close()
}}
});
The result i receive is not really what i want, because in mongodb i get a comma separated list, like:
filepaths
/files/1540474914824.png,/files/1540474914828.png,files/1540474914831.png
I would like to have something like:
filepaths
filename -> filepath
filename -> filepath
filename -> filepath
i hope i could make clear whats the goal. I am sure there is a elegant way to reacht the goal, so could someone please give me a direction.
Thanks,
Regards
First off you cant have more than one key in am object so {filepaths: {file: '1', file: '2'}} will not work. You will need to have a unique name per file/path
var files = {};
if(req.files){
req.files.forEach(function(file){
var fileName = file.filename;
var filePath = file.path;
files[fileName] = filePath;
})
}
you could use map to have an array of object but this seems more cumbersome to me
var files;
if(req.files){
files = req.files.map(function(file){
var fileName = file.filename;
var filePath = file.path;
return { [fileName]: filePath };
})
}
Trying to create a new csv file in a directory.
I want to store the data of a variable inside that csv file:
handleRequest(req, res) {
var svcReq = req.body.svcReq;
var csvRecData = JSON.stringify(req.body);
console.log("DATA WE ARE GETIING IS: " + csvRecData);
if (svcReq == 'invDetails') {
var checking = fs.writeFile('../i1/csvData/myCsvFile.csv', csvRecData, function (err) {
if (err) throw err;
console.log("Saved! got the file");
console.log("Checking csvData:" + checking);
});
}
}
I don't see any errors in the console or terminal but the file is not generated. What is my issue?
The path in writeFile should be pointed correctly..you cannot simply use "../il/csv" from your current file.First check your current directory using path.
1)Install path npm module
2)
var path = require('path');
var fs = require('fs');
console.log(path.join(__dirname))
fs.writeFile((path.join(__dirname)+"/test123.csv"), "Sally Whittaker,2018,McCarren House,312,3.75!", function(err) {
if(err) {
return console.log(err);
}
console.log("The file was saved!");
});
I am having problem with asynchronous file read and write operations. only the last file is written to the server.
js:
function uploadassignment(req, res){
var path;
var multiparty = require("multiparty");
var form = new multiparty.Form();
console.log(req.query);
var filelength = req.query.filecount;
console.log(filelength);
form.parse(req, function(err, fields, files){
console.log(req.body);
for(i=0;i<filelength;i++){
var img = files.file[i];
console.log(img);
console.log('divide');
var fs = require('fs');
fs.readFile(img.path, function(err, data){
var originalfile = img.originalFilename.split('.');
console.log(originalfile);
var file_ext = originalfile[1];
path = "public/assignments/"+img.originalFilename;
console.log(path);
fs.writeFile(path, data, function(error){
if(error)console.log(error);
});
})
}
});
};
This is a common bug caused by using a loop variable without a closure. By the time the callback for the read operation is invoked, the loop has terminated and the index i points to the last element (and hence your img contains the last file). Create a function (a closure) that accepts the index as the parameter and call this function from the loop:
function blah(i) {
var img = files.file[i];
console.log(img);
console.log('divide');
var fs = require('fs');
fs.readFile(img.path, function(err, data){
var originalfile = img.originalFilename.split('.');
console.log(originalfile);
var file_ext = originalfile[1];
path = "public/assignments/"+img.originalFilename;
console.log(path);
fs.writeFile(path, data, function(error){
if(error)console.log(error);
});
})
}
for(i=0;i<filelength;i++) blah(i);
This isn't quite an answer, but it is too long for a comment.
What is not working? The file reading/writing bit of your code works fine:
var fs = require("fs")
img = {
path: "./test.txt",
originalFilename: "test.txt"
}
fs.readFile(img.path, function(err, data){
if(err)console.log(err);
var originalfile = img.originalFilename.split('.');
console.log(originalfile);
var file_ext = originalfile[1];
path = "public/assignments/"+img.originalFilename;
console.log(path);
fs.writeFile(path, data, function(error){
if(error)console.log(error);
});
})
With a directory structure like:
script.js
text.txt
public
assignments
I think your problem might be that you are assigning "fs" locally, then trying to call it from an async function. That might be why only the last one works (maybe.)
Try moving var fs = require('fs'); to the top of your code.
I want to download a zip file from the internet and unzip it in memory without saving to a temporary file. How can I do this?
Here is what I tried:
var url = 'http://bdn-ak.bloomberg.com/precanned/Comdty_Calendar_Spread_Option_20120428.txt.zip';
var request = require('request'), fs = require('fs'), zlib = require('zlib');
request.get(url, function(err, res, file) {
if(err) throw err;
zlib.unzip(file, function(err, txt) {
if(err) throw err;
console.log(txt.toString()); //outputs nothing
});
});
[EDIT]
As, suggested, I tried using the adm-zip library and I still cannot make this work:
var ZipEntry = require('adm-zip/zipEntry');
request.get(url, function(err, res, zipFile) {
if(err) throw err;
var zip = new ZipEntry();
zip.setCompressedData(new Buffer(zipFile.toString('utf-8')));
var text = zip.getData();
console.log(text.toString()); // fails
});
You need a library that can handle buffers. The latest version of adm-zip will do:
npm install adm-zip
My solution uses the http.get method, since it returns Buffer chunks.
Code:
var file_url = 'http://notepad-plus-plus.org/repository/7.x/7.6/npp.7.6.bin.x64.zip';
var AdmZip = require('adm-zip');
var http = require('http');
http.get(file_url, function(res) {
var data = [], dataLen = 0;
res.on('data', function(chunk) {
data.push(chunk);
dataLen += chunk.length;
}).on('end', function() {
var buf = Buffer.alloc(dataLen);
for (var i = 0, len = data.length, pos = 0; i < len; i++) {
data[i].copy(buf, pos);
pos += data[i].length;
}
var zip = new AdmZip(buf);
var zipEntries = zip.getEntries();
console.log(zipEntries.length)
for (var i = 0; i < zipEntries.length; i++) {
if (zipEntries[i].entryName.match(/readme/))
console.log(zip.readAsText(zipEntries[i]));
}
});
});
The idea is to create an array of buffers and concatenate them into a new one at the end. This is due to the fact that buffers cannot be resized.
Update
This is a simpler solution that uses the request module to obtain the response in a buffer, by setting encoding: null in the options. It also follows redirects and resolves http/https automatically.
var file_url = 'https://github.com/mihaifm/linq/releases/download/3.1.1/linq.js-3.1.1.zip';
var AdmZip = require('adm-zip');
var request = require('request');
request.get({url: file_url, encoding: null}, (err, res, body) => {
var zip = new AdmZip(body);
var zipEntries = zip.getEntries();
console.log(zipEntries.length);
zipEntries.forEach((entry) => {
if (entry.entryName.match(/readme/i))
console.log(zip.readAsText(entry));
});
});
The body of the response is a buffer that can be passed directly to AdmZip, simplifying the whole process.
Sadly you can't pipe the response stream into the unzip job as node zlib lib allows you to do, you have to cache and wait the end of the response. I suggest you to pipe the response to a fs stream in case of big files, otherwise you will full fill your memory in a blink!
I don't completely understand what you are trying to do, but imho this is the best approach. You should keep your data in memory only the time you really need it, and then stream to the csv parser.
If you want to keep all your data in memory you can replace the csv parser method fromPath with from that takes a buffer instead and in getData return directly unzipped
You can use the AMDZip (as #mihai said) instead of node-zip, just pay attention because AMDZip is not yet published in npm so you need:
$ npm install git://github.com/cthackers/adm-zip.git
N.B. Assumption: the zip file contains only one file
var request = require('request'),
fs = require('fs'),
csv = require('csv')
NodeZip = require('node-zip')
function getData(tmpFolder, url, callback) {
var tempZipFilePath = tmpFolder + new Date().getTime() + Math.random()
var tempZipFileStream = fs.createWriteStream(tempZipFilePath)
request.get({
url: url,
encoding: null
}).on('end', function() {
fs.readFile(tempZipFilePath, 'base64', function (err, zipContent) {
var zip = new NodeZip(zipContent, { base64: true })
Object.keys(zip.files).forEach(function (filename) {
var tempFilePath = tmpFolder + new Date().getTime() + Math.random()
var unzipped = zip.files[filename].data
fs.writeFile(tempFilePath, unzipped, function (err) {
callback(err, tempFilePath)
})
})
})
}).pipe(tempZipFileStream)
}
getData('/tmp/', 'http://bdn-ak.bloomberg.com/precanned/Comdty_Calendar_Spread_Option_20120428.txt.zip', function (err, path) {
if (err) {
return console.error('error: %s' + err.message)
}
var metadata = []
csv().fromPath(path, {
delimiter: '|',
columns: true
}).transform(function (data){
// do things with your data
if (data.NAME[0] === '#') {
metadata.push(data.NAME)
} else {
return data
}
}).on('data', function (data, index) {
console.log('#%d %s', index, JSON.stringify(data, null, ' '))
}).on('end',function (count) {
console.log('Metadata: %s', JSON.stringify(metadata, null, ' '))
console.log('Number of lines: %d', count)
}).on('error', function (error) {
console.error('csv parsing error: %s', error.message)
})
})
If you're under MacOS or Linux, you can use the unzip command to unzip from stdin.
In this example I'm reading the zip file from the filesystem into a Buffer object but it works
with a downloaded file as well:
// Get a Buffer with the zip content
var fs = require("fs")
, zip = fs.readFileSync(__dirname + "/test.zip");
// Now the actual unzipping:
var spawn = require('child_process').spawn
, fileToExtract = "test.js"
// -p tells unzip to extract to stdout
, unzip = spawn("unzip", ["-p", "/dev/stdin", fileToExtract ])
;
// Write the Buffer to stdin
unzip.stdin.write(zip);
// Handle errors
unzip.stderr.on('data', function (data) {
console.log("There has been an error: ", data.toString("utf-8"));
});
// Handle the unzipped stdout
unzip.stdout.on('data', function (data) {
console.log("Unzipped file: ", data.toString("utf-8"));
});
unzip.stdin.end();
Which is actually just the node version of:
cat test.zip | unzip -p /dev/stdin test.js
EDIT: It's worth noting that this will not work if the input zip is too big to be read in one chunk from stdin. If you need to read bigger files, and your zip file contains only one file, you can use funzip instead of unzip:
var unzip = spawn("funzip");
If your zip file contains multiple files (and the file you want isn't the first one) I'm afraid to say you're out of luck. Unzip needs to seek in the .zip file since zip files are just a container, and unzip may just unzip the last file in it. In that case you have to save the file temporarily (node-temp comes in handy).
Two days ago the module node-zip has been released, which is a wrapper for the JavaScript only version of Zip: JSZip.
var NodeZip = require('node-zip')
, zip = new NodeZip(zipBuffer.toString("base64"), { base64: true })
, unzipped = zip.files["your-text-file.txt"].data;