I've searched around, and either can't find the exact question I'm trying to answer, or I need someone to explain it to me like I'm 5.
Basically, I have a Node.js script using the Net library. I'm connecting to multiple hosts, and sending commands, and listening for return data.
var net = require('net');
var nodes = [
'HOST1,192.168.179.8',
'HOST2,192.168.179.9',
'HOST3,192.168.179.10',
'HOST4,192.168.179.11'
];
function connectToServer(tid, ip) {
var conn = net.createConnection(23, ip);
conn.on('connect', function() {
conn.write (login_string); // login string hidden in pretend variable
});
conn.on('data', function(data) {
var read = data.toString();
if (read.match(/Login Successful/)) {
console.log ("Connected to " + ip);
conn.write(command_string);
}
else if (read.match(/Command OK/)) { // command_string returned successful,
// read until /\r\nEND\r\n/
// First part of data comes in here
console.log("Got a response from " + ip + ':' + read);
}
else {
//rest of data comes in here
console.log("Atonomous message from " + ip + ':' + read);
}
});
conn.on('end', function() {
console.log("Lost conncection to " + ip + "!!");
});
conn.on('error', function(err) {
console.log("Connection error: " + err + " for ip " + ip);
});
}
nodes.forEach(function(node) {
var nodeinfo = node.split(",");
connectToServer(nodeinfo[0], nodeinfo[1]);
});
The data ends up being split into two chunks. Even if I store the data in a hash and append the first part to the remainder when I read the /\r\nEND\r\n/ delimiter, there's a chunk missing out of the middle. How do I properly buffer the data in order to make sure I get the complete message from the stream?
EDIT: Ok, this seems to be working better:
function connectToServer(tid, ip) {
var conn = net.createConnection(23, ip);
var completeData = '';
conn.on('connect', function() {
conn.write (login_string);
});
conn.on('data', function(data) {
var read = data.toString();
if (read.match(/Login Successful/)) {
console.log ("Connected to " + ip);
conn.write(command_string);
}
else {
completeData += read;
}
if (completeData.match(/Command OK/)) {
if (completeData.match(/\r\nEND\r\n/)) {
console.log("Response: " + completeData);
}
}
});
conn.on('end', function() {
console.log("Connection closed to " + ip );
});
conn.on('error', function(err) {
console.log("Connection error: " + err + " for ip " + ip);
});
}
My biggest problem was apparently a logic error. I was either waiting for the chunk that began the reply, or the chunk that ended it. I wasn't saving everything in-between.
I guess if I wanted to get all Node-ish about it, I should fire an event whenever a complete message came in (beginning with a blank line, ending with 'END' on a line by itself), and do the processing there.
You shouldn't do anything with the data you recieve, until you receive the end event. The end callback means that all data chunks have been sent through the stream to your callbacks. If data comes in more than one chunk, you need to create a variable within your function closure to store this data to. Most programs can work just fine ignoring this fact, because data usually comes across in one chunk. But sometimes it doesn't. It doesn't even necessarily depend on the amount of data. If you're in a situation where this is happening, I created an example that demos how to handle it. I basically used your code, but removed all the fluff... this is just demoing the logic you need to collect all the data and do work on it.
function connectToServer(tid, ip) {
var conn = net.createConnection(23, ip);
var completeData = '';
conn.on('connect', function() {
conn.write (login_string); // login string hidden in pretend variable
});
conn.on('data', function(data) {
completeData += data;
var dataArray = completeData.split('your delimiter');
if(dataArray.size > 1) { //If our data was split into several pieces, we have a complete chunk saved in the 0th position in the array
doWorkOnTheFirstHalfOfData(dataArray[0]);
completeData = dataArray[1];// The second portion of data may yet be incomplete, thise may need to be more complete logic if you can get more than one delimeter at a time...
}
});
conn.on('end', function() {
//do stuff with the "completeData" variable in here.
});
}
My problem was a logic problem. I was either looking for the chunk that began the message, or the chunk that ended the message, and ignoring everything in between. I guess expected the entirety of the reply to come in in one or two chunks.
Here's the working code, pasted from above. There's probably a more Node-ish way of doing it (I should really emit an event for each chunk of information), but I'll mark this as the answer unless someone posts a better version by this time tomorrow.
function connectToServer(tid, ip) {
var conn = net.createConnection(23, ip);
var completeData = '';
conn.on('connect', function() {
conn.write (login_string);
});
conn.on('data', function(data) {
var read = data.toString();
if (read.match(/Login Successful/)) {
console.log ("Connected to " + ip);
conn.write(command_string);
}
else {
completeData += read;
}
if (completeData.match(/Command OK/)) {
if (completeData.match(/\r\nEND\r\n/)) {
console.log("Response: " + completeData);
}
}
});
conn.on('end', function() {
console.log("Connection closed to " + ip );
});
conn.on('error', function(err) {
console.log("Connection error: " + err + " for ip " + ip);
});
}
Related
Similar to this question,
I have a script that downloads a file to a given url via http.get.
How can I make sure the pipe is finished before continuing to the next iteration with just the http/https module??
//nodejs default libs
var fs = require("fs");
var http = require('https');
function dlFile(fullFilePath, dlUrl, fsize, fname){
var file = fs.createWriteStream(fullFilePath); //fullFilePath will dictate where we will save the file + filename.
var rsult ='';
var downloadedFsize;
var stats; //stats of the file will be included here
var request = http.get( dlUrl, function(response) {
let rsult = response.statusCode;
//will respond with a 200 if the file is present
//404 if file is missing
response.pipe(file);
/*pipe writes the file...
how do we stop the iteration while it is not yet finished writing?
*/
console.log(" \n FILE : " + fname);
console.log("File analysis finished : statusCode: " + rsult + " || Saved on " + fullFilePath);
console.log(' \n Downloaded from :' + dlUrl);
console.log(' \n SQL File size is : ' + fsize);
//identify filesize
stats = fs.statSync(fullFilePath);
downloadedFsize = stats["size"]; //0 because the pipe isn't finished yet...
console.log(' actual file size is : ' + downloadedFsize);
}).on('error', function(e) {
console.error(e);
//log that an error happened to the file
}).on('end', function(e){
//tried putting the above script here but nothing happens
});
return rsult;
}
Is there a cleaner approach similar to what I have in mind above? or should I approach this differently? I tried putting the code on .on('end' but it does nothing
The end event is not triggered on the request, instead it is triggered on the response (docs):
response.on("end", function() {
console.log("done");
});
As #Jonas Wilms says, the trigger was indeed on response.
//nodejs default libs
var fs = require("fs");
var http = require('https');
function dlFile(fullFilePath, dlUrl, fsize, fname){
var file = fs.createWriteStream(fullFilePath); //fullFilePath will dictate where we will save the file + filename.
var rsult ='';
var downloadedFsize;
var stats; //stats of the file will be included here
var request = http.get( dlUrl, function(response) {
let rsult = response.statusCode;
//will respond with a 200 if the file is present
//404 if file is missing
response.pipe(file).on('finish', function(e){
console.log(" \n FILE : " + fname);
console.log("File analysis finished : statusCode: " + rsult + " || Saved on " + fullFilePath);
console.log(' \n Downloaded from :' + dlUrl);
console.log(' \n SQL File size is : ' + fsize);
//identify filesize
stats = fs.statSync(fullFilePath);
downloadedFsize = stats["size"];
console.log(' actual file size is : ' + downloadedFsize);
});
/*pipe writes the file above, and output the results once it's done */
}).on('error', function(e) {
console.error(e);
//log that an error happened to the file
}).on('end', function(e){
//tried putting the above script here but nothing happens
});
return rsult;
}
I'm using the twitter npm package in an attempt to stream tweets from specified accounts.
I'm having trouble navigating the twitter api docs, so I'm a little confused.
I can hit the REST endpoint to get the specified user tweets info with:
var client = new Twitter({});
client.get('statuses/user_timeline', { screen_name: user }, function(error, tweets) {
if(error) throw error;
console.log(tweets);
});
How do I stream the tweets? Is it even possible? If not, how else could I accomplish this? I would like this to be as responsive and immediate as possible.
Figured it out...
var stream = client.stream('statuses/filter', { follow: userId });
stream.on('data', function(event) {
console.log(event && event.text);
});
This will client stream reader will display the tweets as they are made automatically.
Unfortunately, the screen_name of the user cannot be used, so you'll have to find that out beforehand.
I wrote the code below and was able to get last 20 tweets of a user and save them in a file log.txt.
var Twitter = require('twitter');
var fs = require("fs");
//Your keys go inside {}
var client = new Twitter({});
//These are the parameters for the API call
var params = {
screen_name: 'damiengold', //use damiengold instead of #damiengold
count: 20
};
//Perform the API call and return data
client.get('statuses/user_timeline.json', params, function(error, tweets, response) {
var dataToFile;
if (!error) {
for (var i = 0; i < tweets.length; i++) {
console.log(tweets[i].created_at + " " + tweets[i].text);
console.log("-------------------------------");
dataToFile = tweets[i].created_at + " " + tweets[i].text + "\n-----------\n";
//This block of code will append tweets to the file called "log.txt".
fs.appendFile("log.txt", dataToFile, function(err) {
// If the code experiences any errors it will log the error to the console.
if (err) {
return console.log(err);
}
})
}
}
console.log("Last 20 tweets are now displayed on the screen and saved in log.txt.");
});
Can someone please explain me why I am getting an issue with this one line because for some reason when I run it with node in the console I'm receiving the Unexpected end of input at Object.parse(native) response.
var profile = JSON.parse(body);
Full code:
//Problem: We need a simple way to look at a user's badge count and Javascript points
//Solution: Use Node.js to connect to Treehouse's API to get profile information to print out
var http = require("http");
var username = "testuser";
//Print out message
function printMessage(username, badgeCount, points) {
var message = username + " has " + badgeCount + " total badge(s) and " + points + " points in Javascript";
console.log(message);
}
//Print out error messages
function printError(error) {
console.error(error.message);
}
//Connect to API URL (http://teamtreehouse.com/username.json)
var request = http.get("http://teamtreehouse.com/" + username + ".json", function(response) {
var body = "";
//Read the data
response.on('data', function(chunk) {
body += chunk;
});
response.on('end', function() {
if(response.statusCode == 200){
try {
var profile = JSON.parse(body);
printMessage(username, profile.badges.length, profile.points.Javascript);
} catch(error) {
//Parse Error
printError(error);
}
} else {
//Status Code Error
printError({message: "There was an error getting the profile for " + username +". (" + http.SSTATUS_CODES[response.statusCode] + ")"});
}
});
//Parse the data
//Print the data
});
//Connection Error
request.on('error', printError);
When I try to browse to http://teamtreehouse.com/test.json, it redirects me to the corresponding HTTPS url. Use the nodejs https module and use the https version of the url : https://teamtreehouse.com/test.json.
Or use the popular request module that can handle redirects and https : https://github.com/request/request. It is easier to use as well.
I am writing a backup script that simply downloads all the blobs in all the blob containers of a specific Azure account.
The script uses async.js to make sure only so much threads can run at the same time so it doesn't overload the server. When I run this script it works fine, but when it hits large files it runs out of memory. I'm guessing the download runs faster than the disk can write, and it eventually fills up the in-memory buffer so badly that I run out of memory entirely, but debugging the exact cause has been impossible so far.
The specific function which appears to use a lot of memory is called as follows:
blobService.getBlobToStream(
containerName,
blob.name,
fs.createWriteStream(fullPath),
function(error) {
if(error){ //Something went wrong, write it to the console but finish the queue item and continue.
console.log("Failed writing " + blob.name + " (" + error + ")");
callback();
}
else if(!error) { //Write the last modified date and finish the queue item silently
fs.writeFile(fullPath + ".date", blobLastModified, function(err)
{ if(err) console.log("Couldn't write .date file: " + err); });
callback();
}
});
Even a single 700MB download will easily fill up 1GB of memory on my side.
Is there any way around this? Am I missing a parameter which magically prevents the Azure SDK from buffering everything and the kitchen sink?
Full code:
#!/usr/bin/env node
//Requires
var azure = require('azure');
var fs = require('fs');
var mkdirp = require('mkdirp');
var path = require('path');
var async = require('async');
var maxconcurrency = 1; //Max amount of simultaneous running threads of getBlobsAndSaveThem() running through async.js.
var blobService = azure.createBlobService();
backupPrefix='/backups/azurebackup/' //Always end with a '/'!!
//Main flow of the script is near the bottom of the file.
var containerProcessingQueue = async.queue(
function getBlobsAndSaveThem(containerName) {
console.log(containerName); //DEBUG
blobService.listBlobs(containerName,
function(error, blobs) {
if(!error){
var blobProcessingQueue =
async.queue(function(index,callback) {
var blob = blobs[index];
console.log(blob); //DEBUG
var fullPath = backupPrefix + containerName + '/' + blob.name;
var blobLastModified = new Date(blob.properties['last-modified']);
//Only create if the directoy doesn't exist, since mkdirp fails if the directory exists.
if(!fs.existsSync(path.dirname(fullPath))){ //And do it sync, because otherwise it'll check 99999 times if the directory exists simultaneously, doesn't find it, then fails to create it 99998 times.
mkdirp.sync(path.dirname(fullPath), function(err) { console.log('Failed to create directory ' + path.dirname(fullPath) + " ("+ err + ")"); });
}
if(fs.existsSync(fullPath + ".date")){
if(blobLastModified == fs.readFileSync(fullPath + ".date").toString()) {
callback();
return; //If the file is unmodified, return. No this won't exit the program, because it's called within a function definition (async.queue(function ...))
}
}
blobService.getBlobToStream(
containerName,
blob.name,
fs.createWriteStream(fullPath),
function(error) {
if(error){ //Something went wrong, write it to the console but finish the queue item and continue.
console.log("Failed writing " + blob.name + " (" + error + ")");
callback();
}
else if(!error) { //Write the last modified date and finish the queue item silently
fs.writeFile(fullPath + ".date", blobLastModified, function(err)
{ if(err) console.log("Couldn't write .date file: " + err); });
callback();
}
});
},maxconcurrency);
for(var blobindex in blobs){
blobProcessingQueue.push(blobindex);
} //Push new items to the queue for processing
}
else {
console.log("An error occurred listing the blobs: " + error);
}
});
},1);
blobService.listContainers(function(err, result){
for(var i=0;i<result.length;i++) {
containerProcessingQueue.push(result[i].name);
}
});
For all those now curious the variables for the start and end have changed. They are now just rangeStart and rangeEnd.
Here is the azure node documentation for more help
http://dl.windowsazure.com/nodestoragedocs/BlobService.html
One thing that you could possibly do is read only a chunk of data into stream instead of whole blob data, append that to the file and read next chunk. Blob Storage service supports that. If you look at the source code for getBlobToStream (https://github.com/WindowsAzure/azure-sdk-for-node/blob/master/lib/services/blob/blobservice.js), you can specify from/to bytes in the options - rangeStartHeader and rangeEndHeader. See if that helps.
I have hacked some code which does just that (as you can see from my code, my knowledge about node.js is quite primitive :)). [Please use this code just to get an idea about how you can do chunked download as I think it still has some glitches]
var azure = require('azure');
var fs = require('fs');
var blobService = azure.createBlobService("account", "accountkey");
var containerName = "container name";
var blobName = "blob name";
var blobSize;
var chunkSize = 1024 * 512;//chunk size -- we'll read 512 KB at a time.
var startPos = 0;
var fullPath = "D:\\node\\";
var blobProperties = blobService.getBlobProperties(containerName, blobName, null, function (error, blob) {
if (error) {
throw error;
}
else {
blobSize = blob.contentLength;
fullPath = fullPath + blobName;
console.log(fullPath);
doDownload();
}
}
);
function doDownload() {
var stream = fs.createWriteStream(fullPath, {flags: 'a'});
var endPos = startPos + chunkSize;
if (endPos > blobSize) {
endPos = blobSize;
}
console.log("Downloading " + (endPos - startPos) + " bytes starting from " + startPos + " marker.");
blobService.getBlobToStream("test", blobName, stream,
{ "rangeStartHeader": startPos, "rangeEndHeader": endPos-1 }, function(error) {
if (error) {
throw error;
}
else if (!error) {
startPos = endPos;
if (startPos <= blobSize - 1) {
doDownload();
}
}
});
}
I have a large complex web app with thousands of lines of Javascript. There is a small set of intermittent Javascript bugs that are report by users.
I think these are epiphenomena of race conditions - something has not initialised correctly and the Javascript crashes causing 'down stream' js not to run.
Is there anyway to get Javascript execution crashes to log back server side?
All the js logging libraries like Blackbird and Log4JavaScript are client-side only.
I have written a remote error logging function using window.onerror as suggested by #pimvdb
Err = {};
Err.Remoterr = {};
Err.Remoterr.onerror = function (msg, errorfileurl, lineno) {
var jsonstring, response, pageurl, cookies;
// Get some user input
response = prompt("There has been an error. " +
"It has been logged and will be investigated.",
"Put in comments (and e-mail or phone number for" +
" response.)");
// get some context of where and how the error occured
// to make debugging easier
pageurl = window.location.href;
cookies = document.cookie;
// Make the json message we are going to post
// Could use JSON.stringify() here if you are sure that
// JSON will have run when the error occurs
// http://www.JSON.org/js.html
jsonstring = "{\"set\": {\"jserr\": " +
"{\"msg\": \"" + msg + "\", " +
"\"errorfileurl\": \"" + errorfileurl + "\", " +
"\"pageurl\": \"" + pageurl + "\", " +
"\"cookies\": \"" + cookies + "\", " +
"\"lineno\": \"" + lineno + "\", " +
"\"response\": \"" + response + "\"}}}";
// Use the jquery cross-browser post
// http://api.jquery.com/jQuery.post/
// this assumes that no errors happen before jquery has initialised
$.post("?jserr", jsonstring, null, "json");
// I don't want the page to 'pretend' to work
// so I am going to return 'false' here
// Returning 'true' will clear the error in the browser
return false;
};
window.onerror = Err.Remoterr.onerror;
I deploy this between the head and body tags of the webpage.
You will want to change the JSON and the URL that you post it to depending on how you are going to log the data server side.
Take a look at https://log4sure.com (disclosure: I created it) - but it is really useful, check it out and decide for yourself. It allows you to log errors/event and also lets you create your custom log table. It also allows you to monitor your logs real-time. And the best part, its free.
You can also use bower to install it, use bower install log4sure
The set up code is really easy too:
// setup
var _logServer;
(function() {
var ls = document.createElement('script');
ls.type = 'text/javascript';
ls.async = true;
ls.src = 'https://log4sure.com/ScriptsExt/log4sure.min.js';
var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(ls, s);
ls.onload = function() {
// use your token here.
_logServer = new LogServer("use-your-token-here");
};
})();
// example for logging text
_logServer.logText("your log message goes here.")
//example for logging error
divide = function(numerator, divisor) {
try {
if (parseFloat(value) && parseFloat(divisor)) {
throw new TypeError("Invalid input", "myfile.js", 12, {
value: value,
divisor: divisor
});
} else {
if (divisor == 0) {
throw new RangeError("Divide by 0", "myfile.js", 15, {
value: value,
divisor: divisor
});
}
}
} catch (e) {
_logServer.logError(e.name, e.message, e.stack);
}
}
// another use of logError in window.onerror
// must be careful with window.onerror as you might be overwriting some one else's window.onerror functionality
// also someone else can overwrite window.onerror.
window.onerror = function(msg, url, line, column, err) {
// may want to check if url belongs to your javascript file
var data = {
url: url,
line: line,
column: column,
}
_logServer.logError(err.name, err.message, err.stack, data);
};
// example for custom logs
var foo = "some variable value";
var bar = "another variable value";
var flag = "false";
var temp = "yet another variable value";
_logServer.log(foo, bar, flag, temp);