Passing argument array to CasperJS from node.js - javascript

I am using CasperJS to test a website. A part of the test is resource check.
What I want to do:
Pass array or object array to CasperJS and iterate through them.
The first step is one array then object array. both have same issue.
Node.js code:
require('child_process').exec('/usr/local/bin/casperjs script.js [url,regex]' , function(err, stdout, stderr) {
err && console.log(err);
stderr && console.log(stderr.toString());
stdout && console.log(stdout.toString());
})
Casperjs script:
var casper = require('casper').create(),
a = casper.cli.args[0],
// we need something here to string to js array
w=a[0],
r=a[1];
casper.start(w, function() {
if (this.resourceExists(r)) {
console.log("PASS\t" +r+ "\t"+ w);
} else {
console.log("FAIL\t" +r+ "\t"+ w);
}
});
casper.run();
The problem is CasperJS takes args as string.

When you call it like that:
'/usr/local/bin/casperjs script.js "[\''+yourURL+'\',\''+yourRegex+'\']"'
You could simply use
a = JSON.parse(casper.cli.args[0]),
w = a[0],
r = new RegExp(a[1]);
If casper.cli.args[0] is actually JSON, then it can be parsed as such. resourceExists() takes regular expressions only as RegExp objects.
A better way if the data that you pass gets too long, then you should write the data to a temporary file with node.js' fs module and read it with PhantomJS' fs module, parsing it along the way.

Better approach to problem:
https://groups.google.com/forum/#!topic/casperjs/bhA81OyHA7s
Passing variables as options. Works like a charm
My Final nodejs:
var exec = require('child_process'),
array = [
{url:"",regex:""}
];
for (var i = 0; i < array.length; i++) {
var url = array[i]["url"];
var regex = array[i]["regex"];
exec.exec('/usr/local/bin/casperjs casper2.js --url="'+url+'" --regex="'+regex+'" ' , function(err, stdout, stderr) {
err && console.log(err);
stderr && console.log(stderr.toString());
stdout && console.log(stdout.toString());
});
};
In CasperJS
w=casper.cli.get("url"),
reg= casper.cli.get("regex"),
rpart = reg.split("/"),
r=new RegExp(rpart[1],rpart[2]);
casper.start(w, function() {
if (this.resourceExists(r)) {
console.log("PASS\t" +r+ "\t"+ w);
} else {
console.log("FAIL\t" +r+ "\t"+ w);
}
});
casper.run();

Related

events.js:142 error in node.js

I have a file , in javascript , that find all the directories that match the parameter.
And i got this error:
my code:
function getUserHome() {
return process.env[(process.platform == 'win32') ? 'USERPROFILE' : 'HOME'];
}
var home_path=getUserHome();
var findit = require('findit'),
path = require('path'),
finder = findit(path.resolve(home_path));
var myArgs = process.argv.slice(2)[0];
var filter1 = new RegExp(myArgs);
//This listens for directories found
finder.on('directory', function (dir) {
var directories = dir.split('\\');
var last= directories[directories.length-1].toLowerCase();
if(filter1.test(last)){
console.log('Directory: ' + dir );
}
});
(My code is a mess, i will clean it later)
How to fix that?
Why you didn't user the fs from Node, and look for dirs recursively? I think the error should be on the findit module...
That a look on https://nodejs.org/api/fs.html#fs_fs_readdir_path_callback or try instead the https://www.npmjs.com/package/recursive-readdir that also does it. I think that the things you use from the module findit, will be available there (like ways to ignore files)...
EDIT1: Example using recursive-readdir:
var recursive = require('recursive-readdir');
var filter1 = new RegExp(myArgs);
function ignoreFunc(file, stats) {
return !(stats.isDirectory() && filter1.test(path.basename(file)));
}
recursive('directory', [ignoreFunc] ,function (err, files) {
// Files is an array of filename (only the ones that matched the condition)
console.log(files);
});
#Moran, can you add a console.log directly in the callback of you "directory" event ?
finder.on('directory', function (dir) {
// Here
console.log(dir);
var directories = dir.split('\\');
var last= directories[directories.length-1].toLowerCase();
if(filter1.test(last)){
console.log('Directory: ' + dir );
}
});
To see what directory is problematic ? Then compare the rights applied on this folder and a directory that work, as "comverse" for example. It would help to find your error

how to add key and value to array that is mongodb find result (added key/value gets lost)

This must be a standard Javascript problem, but my searches here did not turn up any results that were applicable to me.
I am querying a mongodb database and want to add key/values to the objects in the result array through the enrichJSON function.
The MongoBooks.find returns some documents in an Array docs. Probably enrichJSON is a bad name, it should be something like enrichArray, it is arbitrary anyway.
function enrichJSON(jsonobj) {
for (var i = 0; i < jsonobj.length; i++) {
jsonobj[i].myparam = "something meaningful";
}
return jsonobj;
}
MongoBooks.find(mongoquery, function (err, docs) {
if (err) throw err;
var step1 = docs;
var step2 = enrichJSON(step1);
console.log("step1:" + step1);
console.log("step2:" + step2);
});
The step2 console output is missing the myparam output. Same goes if I try
jsonobj[i]["myparam"] = "abctest";
in the enrichJSON.
I am getting
step1: {[{"title":"good book"}]}
step2: {[{"title":"good book"}]}
but would like to get
step2: {[{"title":"good book", "myparam":"something meaningful"}]}
Edit to give an actual (stripped down) example of my result (I edited to make it simpler but probably mixed up the brackets):
step2:{ _id: 5474e8a35e79556ced436700,
isbn10: '0370303466',
author: 'Graham Greene',
lang: 'eng',
title: 'Travels with my aunt'
}
I am still missing what I added (myparam).
You have a broken response.
{[{"title":"good book"}]}
should be something like this :
{'data' : [{"title":"good book"}]}
Then your enrich function would look like this :
function enrichJSON(jsonobj) {
for (var i = 0; i < jsonobj.data.length; i++) {
jsonobj.data[i].myparam = "something meaningful";
}
return jsonobj;
}

Convert a text from text file to array with fs [node js]

I have a txt file contains:
{"date":"2013/06/26","statement":"insert","nombre":1}
{"date":"2013/06/26","statement":"insert","nombre":1}
{"date":"2013/06/26","statement":"select","nombre":4}
how I can convert the contents of the text file as array such as:
statement = [
{"date":"2013/06/26","statement":"insert","nombre":1},
{"date":"2013/06/26","statement":"insert","nombre":1},
{"date":"2013/06/26","statement":"select","nombre":4}, ];
I use the fs module node js. Thanks
Sorry
I will explain more detailed:
I have an array :
st = [
{"date":"2013/06/26","statement":"insert","nombre":1},
{"date":"2013/06/26","statement":"insert","nombre":5},
{"date":"2013/06/26","statement":"select","nombre":4},
];
if I use this code :
var arr = new LINQ(st)
.OrderBy(function(x) {return x.nombre;})
.Select(function(x) {return x.statement;})
.ToArray();
I get the result I want.
insert select insert
but the problem my data is in a text file.
any suggestion and thanks again.
There is no reason for not to do your file parser yourself. This will work on any size of a file:
var fs = require('fs');
var fileStream = fs.createReadStream('file.txt');
var data = "";
fileStream.on('readable', function() {
//this functions reads chunks of data and emits newLine event when \n is found
data += fileStream.read();
while( data.indexOf('\n') >= 0 ){
fileStream.emit('newLine', data.substring(0,data.indexOf('\n')));
data = data.substring(data.indexOf('\n')+1);
}
});
fileStream.on('end', function() {
//this functions sends to newLine event the last chunk of data and tells it
//that the file has ended
fileStream.emit('newLine', data , true);
});
var statement = [];
fileStream.on('newLine',function(line_of_text, end_of_file){
//this is the code where you handle each line
// line_of_text = string which contains one line
// end_of_file = true if the end of file has been reached
statement.push( JSON.parse(line_of_text) );
if(end_of_file){
console.dir(statement);
//here you have your statement object ready
}
});
If it's a small file, you might get away with something like this:
// specifying the encoding means you don't have to do `.toString()`
var arrayOfThings = fs.readFileSync("./file", "utf8").trim().split(/[\r\n]+/g).map(function(line) {
// this try/catch will make it so we just return null
// for any lines that don't parse successfully, instead
// of throwing an error.
try {
return JSON.parse(line);
} catch (e) {
return null;
}
// this .filter() removes anything that didn't parse correctly
}).filter(function(object) {
return !!object;
});
If it's larger, you might want to consider reading it in line-by-line using any one of the many modules on npm for consuming lines from a stream.
Wanna see how to do it with streams? Let's see how we do it with streams. This isn't a practical example, but it's fun anyway!
var stream = require("stream"),
fs = require("fs");
var LineReader = function LineReader(options) {
options = options || {};
options.objectMode = true;
stream.Transform.call(this, options);
this._buffer = "";
};
LineReader.prototype = Object.create(stream.Transform.prototype, {constructor: {value: LineReader}});
LineReader.prototype._transform = function _transform(input, encoding, done) {
if (Buffer.isBuffer(input)) {
input = input.toString("utf8");
}
this._buffer += input;
var lines = this._buffer.split(/[\r\n]+/);
this._buffer = lines.pop();
for (var i=0;i<lines.length;++i) {
this.push(lines[i]);
}
return done();
};
LineReader.prototype._flush = function _flush(done) {
if (this._buffer.length) {
this.push(this._buffer);
}
return done();
};
var JSONParser = function JSONParser(options) {
options = options || {};
options.objectMode = true;
stream.Transform.call(this, options);
};
JSONParser.prototype = Object.create(stream.Transform.prototype, {constructor: {value: JSONParser}});
JSONParser.prototype._transform = function _transform(input, encoding, done) {
try {
input = JSON.parse(input);
} catch (e) {
return done(e);
}
this.push(input);
return done();
};
var Collector = function Collector(options) {
options = options || {};
options.objectMode = true;
stream.Transform.call(this, options);
this._entries = [];
};
Collector.prototype = Object.create(stream.Transform.prototype, {constructor: {value: Collector}});
Collector.prototype._transform = function _transform(input, encoding, done) {
this._entries.push(input);
return done();
};
Collector.prototype._flush = function _flush(done) {
this.push(this._entries);
return done();
};
fs.createReadStream("./file").pipe(new LineReader()).pipe(new JSONParser()).pipe(new Collector()).on("readable", function() {
var results = this.read();
console.log(results);
});
fs.readFileSync("myfile.txt").toString().split(/[\r\n]/)
This gets your each line as a string
You can then use UnderscoreJS or your own for loop to apply the JSON.parse("your json string") method to each element of the array.
var arr = fs.readFileSync('mytxtfile', 'utf-8').split('\n')
I think this is the simplest way of creating an array from your text file

Uglify JS - compressing unused variables

Uglify has a "compression" option that can remove unused variables...
However, if I stored some functions in an object like this....
helpers = {
doSomething: function () { ... },
doSomethingElese: function () { ... }
}
... is there a way to remove helpers.doSomething() if it's never accessed?
Guess I want to give the compressor permission to change my object.
Any ideas if it's possible? Or any other tools that can help?
Using a static analyzer like Uglify2 or Esprima to accomplish this task is somewhat nontrivial, because there are lots of situations that will call a function that are difficult to determine. To show the complexity, there's this website:
http://sevinf.github.io/blog/2012/09/29/esprima-tutorial/
Which attempts to at least identify unused functions. However the code as provided on that website will not work against your example because it is looking for FunctionDeclarations and not FunctionExpressions. It is also looking for CallExpression's as Identifiers while ignoring CallExpression's that are MemberExpression's as your example uses. There's also a problem of scope there, it doesn't take into account functions in different scopes with the same name - perfectly legal Javascript, but you lose fidelity using that code as it'll miss some unused functions thinking they were called when they were not.
To handle the scope problem, you might be able to employ ESTR (https://github.com/clausreinke/estr), to help figure out the scope of the variables and from there the unused functions. Then you'll need to use something like escodegen to remove the unused functions.
As a starting point for you I've adapted the code on that website to work for your very specific situation provided, but be forwarned, it will have scope issue.
This is written for Node.js, so you'll need to get esprima with npm to use the example as provided, and of course execute it with node.
var fs = require('fs');
var esprima = require('esprima');
if (process.argv.length < 3) {
console.log('Usage: node ' + process.argv[1] + ' <filename>');
process.exit(1);
}
notifydeadcode = function(data){
function traverse(node, func) {
func(node);
for (var key in node) {
if (node.hasOwnProperty(key)) {
var child = node[key];
if (typeof child === 'object' && child !== null) {
if (Array.isArray(child)) {
child.forEach(function(node) {
traverse(node, func);
});
} else {
traverse(child, func);
}
}
}
}
}
function analyzeCode(code) {
var ast = esprima.parse(code);
var functionsStats = {};
var addStatsEntry = function(funcName) {
if (!functionsStats[funcName]) {
functionsStats[funcName] = {calls: 0, declarations:0};
}
};
var pnode = null;
traverse(ast, function(node) {
if (node.type === 'FunctionExpression') {
if(pnode.type == 'Identifier'){
var expr = pnode.name;
addStatsEntry(expr);
functionsStats[expr].declarations++;
}
} else if (node.type === 'FunctionDeclaration') {
addStatsEntry(node.id.name);
functionsStats[node.id.name].declarations++;
} else if (node.type === 'CallExpression' && node.callee.type === 'Identifier') {
addStatsEntry(node.callee.name);
functionsStats[node.callee.name].calls++;
}else if (node.type === 'CallExpression' && node.callee.type === 'MemberExpression'){
var lexpr = node.callee.property.name;
addStatsEntry(lexpr);
functionsStats[lexpr].calls++;
}
pnode = node;
});
processResults(functionsStats);
}
function processResults(results) {
//console.log(JSON.stringify(results));
for (var name in results) {
if (results.hasOwnProperty(name)) {
var stats = results[name];
if (stats.declarations === 0) {
console.log('Function', name, 'undeclared');
} else if (stats.declarations > 1) {
console.log('Function', name, 'decalred multiple times');
} else if (stats.calls === 0) {
console.log('Function', name, 'declared but not called');
}
}
}
}
analyzeCode(data);
}
// Read the file and print its contents.
var filename = process.argv[2];
fs.readFile(filename, 'utf8', function(err, data) {
if (err) throw err;
console.log('OK: ' + filename);
notifydeadcode(data);
});
So if you plop that in a file like deadfunc.js and then call it like so:
node deadfunc.js test.js
where test.js contains:
helpers = {
doSomething:function(){ },
doSomethingElse:function(){ }
};
helpers.doSomethingElse();
You will get the output:
OK: test.js
Function doSomething declared but not called
One last thing to note: attempting to find unused variables and functions might be a rabbit hole because you have situations like eval and Functions created from strings. You also have to think about apply and call etc, etc. Which is why, I assume, we don't have this capability in the static analyzers today.

Reading a file in real-time using Node.js

I need to work out the best way to read data that is being written to a file, using node.js, in real time. Trouble is, Node is a fast moving ship which makes finding the best method for addressing a problem difficult.
What I Want To Do
I have a java process that is doing something and then writing the results of this thing it does to a text file. It typically takes anything from 5 mins to 5 hours to run, with data being written the whole time, and can get up to some fairly hefty throughput rates (circa. 1000 lines/sec).
I would like to read this file, in real time, and then, using node aggregate the data and write it to a socket where it can be graphed on the client.
The client, graphs, sockets and aggregation logic are all done but I am confused about the best approach for reading the file.
What I Have Tried (or at least played with)
FIFO - I can tell my Java process to write to a fifo and read this using node, this is in fact how we have this currently implemted using Perl, but because everything else is running in node it makes sense to port the code over.
Unix Sockets - As above.
fs.watchFile - will this work for what we need?
fs.createReadStream - is this better than watchFile?
fs & tail -f - seems like a hack.
What, actually, is my Question
I am tending towards using Unix Sockets, this seems the fastest option. But does node have better built-in features for reading files from the fs in real time?
If you want to keep the file as a persistent store of your data to prevent a loss of stream in case of a system crash or one of the members in your network of running processes dies, you can still continue on writing to a file and reading from it.
If you do not need this file as a persistent storage of produced results from your Java process, then going with a Unix socket is much better for both the ease and also the performance.
fs.watchFile() is not what you need because it works on file stats as filesystem reports it and since you want to read the file as it is already being written, this is not what you want.
SHORT UPDATE: I am very sorry to realize that although I had accused fs.watchFile() for using file stats in previous paragraph, I had done the very same thing myself in my example code below! Although I had already warned readers to "take care!" because I had written it in just a few minutes without even testing well; still, it can be done better by using fs.watch() instead of watchFile or fstatSync if underlying system supports it.
For reading/writing from a file, I have just written below for fun in my break:
test-fs-writer.js: [You will not need this since you write file in your Java process]
var fs = require('fs'),
lineno=0;
var stream = fs.createWriteStream('test-read-write.txt', {flags:'a'});
stream.on('open', function() {
console.log('Stream opened, will start writing in 2 secs');
setInterval(function() { stream.write((++lineno)+' oi!\n'); }, 2000);
});
test-fs-reader.js: [Take care, this is just demonstration, check err objects!]
var fs = require('fs'),
bite_size = 256,
readbytes = 0,
file;
fs.open('test-read-write.txt', 'r', function(err, fd) { file = fd; readsome(); });
function readsome() {
var stats = fs.fstatSync(file); // yes sometimes async does not make sense!
if(stats.size<readbytes+1) {
console.log('Hehe I am much faster than your writer..! I will sleep for a while, I deserve it!');
setTimeout(readsome, 3000);
}
else {
fs.read(file, new Buffer(bite_size), 0, bite_size, readbytes, processsome);
}
}
function processsome(err, bytecount, buff) {
console.log('Read', bytecount, 'and will process it now.');
// Here we will process our incoming data:
// Do whatever you need. Just be careful about not using beyond the bytecount in buff.
console.log(buff.toString('utf-8', 0, bytecount));
// So we continue reading from where we left:
readbytes+=bytecount;
process.nextTick(readsome);
}
You can safely avoid using nextTick and call readsome() directly instead. Since we are still working sync here, it is not necessary in any sense. I just like it. :p
EDIT by Oliver Lloyd
Taking the example above but extending it to read CSV data gives:
var lastLineFeed,
lineArray;
function processsome(err, bytecount, buff) {
lastLineFeed = buff.toString('utf-8', 0, bytecount).lastIndexOf('\n');
if(lastLineFeed > -1){
// Split the buffer by line
lineArray = buff.toString('utf-8', 0, bytecount).slice(0,lastLineFeed).split('\n');
// Then split each line by comma
for(i=0;i<lineArray.length;i++){
// Add read rows to an array for use elsewhere
valueArray.push(lineArray[i].split(','));
}
// Set a new position to read from
readbytes+=lastLineFeed+1;
} else {
// No complete lines were read
readbytes+=bytecount;
}
process.nextTick(readFile);
}
Why do you think tail -f is a hack?
While figuring out I found a good example I would do something similar.
Real time online activity monitor example with node.js and WebSocket:
http://blog.new-bamboo.co.uk/2009/12/7/real-time-online-activity-monitor-example-with-node-js-and-websocket
Just to make this answer complete, I wrote you an example code which would run under 0.8.0 - (the http server is a hack maybe).
A child process is spawned running with tail, and since a child process is an EventEmitter with three streams (we use stdout in our case) you can just add the a listener with on
filename: tailServer.js
usage: node tailServer /var/log/filename.log
var http = require("http");
var filename = process.argv[2];
if (!filename)
return console.log("Usage: node tailServer filename");
var spawn = require('child_process').spawn;
var tail = spawn('tail', ['-f', filename]);
http.createServer(function (request, response) {
console.log('request starting...');
response.writeHead(200, {'Content-Type': 'text/plain' });
tail.stdout.on('data', function (data) {
response.write('' + data);
});
}).listen(8088);
console.log('Server running at http://127.0.0.1:8088/');
this module is an implementation of the principle #hasanyasin suggests:
https://github.com/felixge/node-growing-file
I took the answer from #hasanyasin and wrapped it up into a modular promise. The basic idea is that you pass a file and a handler function that does something with the stringified-buffer that is read from the file. If the handler function returns true, then the file will stop being read. You can also set a timeout that will kill reading if the handler doesn't return true fast enough.
The promiser will return true if the resolve() was called due to timeout, otherwise it will return false.
See the bottom for usage example.
// https://stackoverflow.com/a/11233045
var fs = require('fs');
var Promise = require('promise');
class liveReaderPromiseMe {
constructor(file, buffStringHandler, opts) {
/*
var opts = {
starting_position: 0,
byte_size: 256,
check_for_bytes_every_ms: 3000,
no_handler_resolution_timeout_ms: null
};
*/
if (file == null) {
throw new Error("file arg must be present");
} else {
this.file = file;
}
if (buffStringHandler == null) {
throw new Error("buffStringHandler arg must be present");
} else {
this.buffStringHandler = buffStringHandler;
}
if (opts == null) {
opts = {};
}
if (opts.starting_position == null) {
this.current_position = 0;
} else {
this.current_position = opts.starting_position;
}
if (opts.byte_size == null) {
this.byte_size = 256;
} else {
this.byte_size = opts.byte_size;
}
if (opts.check_for_bytes_every_ms == null) {
this.check_for_bytes_every_ms = 3000;
} else {
this.check_for_bytes_every_ms = opts.check_for_bytes_every_ms;
}
if (opts.no_handler_resolution_timeout_ms == null) {
this.no_handler_resolution_timeout_ms = null;
} else {
this.no_handler_resolution_timeout_ms = opts.no_handler_resolution_timeout_ms;
}
}
startHandlerTimeout() {
if (this.no_handler_resolution_timeout_ms && (this._handlerTimer == null)) {
var that = this;
this._handlerTimer = setTimeout(
function() {
that._is_handler_timed_out = true;
},
this.no_handler_resolution_timeout_ms
);
}
}
clearHandlerTimeout() {
if (this._handlerTimer != null) {
clearTimeout(this._handlerTimer);
this._handlerTimer = null;
}
this._is_handler_timed_out = false;
}
isHandlerTimedOut() {
return !!this._is_handler_timed_out;
}
fsReadCallback(err, bytecount, buff) {
try {
if (err) {
throw err;
} else {
this.current_position += bytecount;
var buff_str = buff.toString('utf-8', 0, bytecount);
var that = this;
Promise.resolve().then(function() {
return that.buffStringHandler(buff_str);
}).then(function(is_handler_resolved) {
if (is_handler_resolved) {
that.resolve(false);
} else {
process.nextTick(that.doReading.bind(that));
}
}).catch(function(err) {
that.reject(err);
});
}
} catch(err) {
this.reject(err);
}
}
fsRead(bytecount) {
fs.read(
this.file,
new Buffer(bytecount),
0,
bytecount,
this.current_position,
this.fsReadCallback.bind(this)
);
}
doReading() {
if (this.isHandlerTimedOut()) {
return this.resolve(true);
}
var max_next_bytes = fs.fstatSync(this.file).size - this.current_position;
if (max_next_bytes) {
this.fsRead( (this.byte_size > max_next_bytes) ? max_next_bytes : this.byte_size );
} else {
setTimeout(this.doReading.bind(this), this.check_for_bytes_every_ms);
}
}
promiser() {
var that = this;
return new Promise(function(resolve, reject) {
that.resolve = resolve;
that.reject = reject;
that.doReading();
that.startHandlerTimeout();
}).then(function(was_resolved_by_timeout) {
that.clearHandlerTimeout();
return was_resolved_by_timeout;
});
}
}
module.exports = function(file, buffStringHandler, opts) {
try {
var live_reader = new liveReaderPromiseMe(file, buffStringHandler, opts);
return live_reader.promiser();
} catch(err) {
return Promise.reject(err);
}
};
Then use the above code like this:
var fs = require('fs');
var path = require('path');
var Promise = require('promise');
var liveReadAppendingFilePromiser = require('./path/to/liveReadAppendingFilePromiser');
var ending_str = '_THIS_IS_THE_END_';
var test_path = path.join('E:/tmp/test.txt');
var s_list = [];
var buffStringHandler = function(s) {
s_list.push(s);
var tmp = s_list.join('');
if (-1 !== tmp.indexOf(ending_str)) {
// if this return never occurs, then the file will be read until no_handler_resolution_timeout_ms
// by default, no_handler_resolution_timeout_ms is null, so read will continue forever until this function returns something that evaluates to true
return true;
// you can also return a promise:
// return Promise.resolve().then(function() { return true; } );
}
};
var appender = fs.openSync(test_path, 'a');
try {
var reader = fs.openSync(test_path, 'r');
try {
var options = {
starting_position: 0,
byte_size: 256,
check_for_bytes_every_ms: 3000,
no_handler_resolution_timeout_ms: 10000,
};
liveReadAppendingFilePromiser(reader, buffStringHandler, options)
.then(function(did_reader_time_out) {
console.log('reader timed out: ', did_reader_time_out);
console.log(s_list.join(''));
}).catch(function(err) {
console.error('bad stuff: ', err);
}).then(function() {
fs.closeSync(appender);
fs.closeSync(reader);
});
fs.write(appender, '\ncheck it out, I am a string');
fs.write(appender, '\nwho killed kenny');
//fs.write(appender, ending_str);
} catch(err) {
fs.closeSync(reader);
console.log('err1');
throw err;
}
} catch(err) {
fs.closeSync(appender);
console.log('err2');
throw err;
}

Categories

Resources