Hi I'm having problems to perform HTTP request on NodeJS given a larger number array of json object. The request works fine given small array of json object. However, if I try to increase the size array of json, I received Error: socket hang up {"error":{"code":"ECONNRESET"}}. Is it required to perform multiple write? Or is it something wrong going on at the other end?
Thanks in advance for taking your time here!
// data is a json object
var post_data = JSON.stringify(data);
var buf = new Buffer(post_data);
var len = buf.length;
var options = {
hostname: address,
port: port,
path: pathName,
method: 'PUT',
headers: {
'Content-Type':'application/json',
'Content-Length': len,
'Transfer-Encoding':'chunked'
}
};
// http call to REST API server
var req = restHttp.request(options, function(res) {
console.log('server PUT response received.');
var resData = '';
res.on('data', function(replyData) {
// Check reply data for error.
console.log(replyData.toString('utf8'));
if(replyData !== 'undefined')
resData += replyData;
});
res.on('end', function() {
callback(JSON.parse(resData));
});
});
req.write(buf);
req.end();
You can stream the request body.
If the data in buf was in a readable stream then you can just do buf.pipe(req).
For example, if the current directory contains a file data.json with the JSON you can do
var buf = fs.createReadStream(__dirname + '/data.json');
to create a ReadStream object. Then you can pipe this to you req
buf.pipe(req);
The pipe command will call req.end once its done streaming.
Related
Overarching goal is to save some JSON data I create on a webpage to my files locally. I am definitely sending something to the server, but not in format I seem to able to access.
JsonData looks like:
{MetaData: {Stock: "UTX", Analysis: "LinearTrend2"}
Projections: [2018-10-12: 127.62, 2018-10-11: 126.36000000000001, 2018-10-10: 132.17, 2018-10-09: 140.12, 2018-10-08: 137.73000000000002, …]}
XMLHttpRequest on my webpage:
function UpdateBackTestJSON(JsonUpdate){ //JsonUpdate being the JSON object from above
var request = new XMLHttpRequest();
request.open('POST', 'UpdateBackTestJSON');
request.setRequestHeader("Content-Type", "application/json;charset=UTF-8");
// request.setRequestHeader("Content-Type", "text/plain;charset=UTF-8");
request.onload = function() {
console.log("Updated JSON File");
};
console.log("about to send request");
console.log(JsonUpdate);
request.send(JSON.stringify(JsonUpdate));
}
and I handle posts on my server (rather carelessly I realize, just going for functionality as a start here)
var http = require('http')
, fs = require('fs')
, url = require('url')
, port = 8008;
var server = http.createServer (function (req, res) {
var uri = url.parse(req.url)
var qs = require('querystring');
if (req.method == 'POST'){
var body = '';
req.on('data', function (data){
body += data;
// 1e6 === 1 * Math.pow(10, 6) === 1 * 1000000 ~~~ 1MB
if (body.length > 1e6){
// FLOOD ATTACK OR FAULTY CLIENT, NUKE REQUEST
req.connection.destroy();
}
});
req.on('end', function () {
var POST = qs.parse(body);
console.log(POST); // PARSED POST IS NOT THE RIGHT FORMAT... or something, idk whats going on
UpdateBackTestData(POST);
});
}
function UpdateBackTestData(TheJsonData){
console.log("UpdateBackTestData");
console.log(TheJsonData);
JsonUpdate = JSON.parse(TheJsonData);
console.log(JsonUpdate["MetaData"]);
//var Stock = JsonUpdate["MetaData"]["Stock"];
// var Analysis = JsonUpdate["MetaData"]["Analysis"];
fs.writeFile("/public/BackTestData/"+Analysis+"/"+Stock+".json", TheJsonData, function(err){
if(err){
console.log(err);
}
console.log("updated BackTest JSON!!!");
});
}
Most confusing to me is that when I run this, the Json object Im am trying to pass, does go through to the server, but the entirety of the data is a string used as a key for a blank value in an object. when I parse the body of the POST, I get: {'{MetaData:{'Stock':'UTX','Analysis:'LinearTrend2'},'Projections':[...]}': ''}. So my data is there... but not in a practical format.
I would prefer not to use express or other server tools, as I have a fair amount of other services set up in my server that I don't want to go back and change if I can avoid it.
Thanks for any help
I have two servers that communicate with each other. Server1 requests for parts of the file from Server2 and store the data received into one file. Server2 is supposed to receive each of these requests and create a stream pipes the data over.
Suppose the files stored(directory) in Server 2 are as following
bigfile.gz
bigfile.gz.part-0
bigfile.gz.part-1
bigfile.gz.part-2
......
So Server1 will send a request for part-0 then part-1 and so on to the Server2. Hence the use of the loop to make requests.
Server 1 (code snippet)
for (var i in requestInfo['blockName']) {
var blockName = i;
var IP = requestInfo['blockName'][i][0];
var fileData = JSON.stringify({
blockName: blockName,
fileName: requestInfo['fileName']
});
makeRequest(fileData, IP);
console.log(counter);
}
function makeRequest(fileData, IP) {
var options = {
host: IP,
port: 5000,
path: '/read',
method: 'POST',
headers: {
'Content-Type': 'application/json'
}
};
var req = http.request(options, function(res) {
var data = '';
res.on('data', function(chunk) {
data += chunk;
});
res.on('end', function() {
console.log(data.length);
//fs.appendFileSync(fileName, data);
var writeStream = fs.createWriteStream(fileName, { "flags": 'a' });
writeStream.write(data);
writeStream.end();
});
});
req.write(fileData);
req.end();
}
Server 2 (code snippet)
app.post('/read', function(req, res) {
var dataBody = req.body;
fs.createReadStream(dataBody.fileName + '/' + dataBody.blockName).pipe(res);
});
The one above works for when I test it with a 100MB txt file. But it fails when i have 1GB .gz file or even when I test it with a .zip file the output the final .zip generated on the Server 1 side is the incorrect size.
I am not sure what I am doing wrong here or is the alternate solution
EDIT:
Also my Server1 crashes when dealing with the big 1GB .gz file
Your main problem here is that you treating your data as string by appending chunks to a string.
By rewriting this should be
var req = http.request(options, function(res) {
var data = [];
res.on('data', function(chunk) {
data.push(chunk);
});
res.on('end', function() {
fs.writeFile(fileName, Buffer.concat(data), function() {
console.log("write end")
});
});
});
That way we are creating a big array of binary chunks, and when the download is complete we write the concatenation of all the chunks to a file.
But notice the word big
If you stick with this implementation you are risking to get out of memory, especially if you are dealing with large (>500mb) files.
Streams to the rescue
var req = https.request(options, function(res) {
res.pipe(fs.createWriteStream(fileName)).on("close", function() {
console.log("write end");
});
});
Using the above implementation memory footprint should stay low. Because the moment you get a specific amount of data from your download, you write them to the file. That way you never keep the whole file into the program's memory.
I am using Node JS HTTP request. When my response length exceeds 16101 , it truncates my response. And I recieve limited response like this:
{"id_user":"133696"},{"id_u
This is not a chunked response, its only comes once. I want to recieve the whole response instead of truncated .
My Node version is v0.10.36.
Here is my code:
var https = require('https');
var querystring = require('querystring');
postData.format = 'json';
postData.apikey = 'abcd';
jsonObject = querystring.stringify(postData);
var postheaders = {
'Content-Type' : 'application/x-www-form-urlencoded',
'Content-Length' : Buffer.byteLength(jsonObject, 'utf8')
};
if(callMethod == undefined){
callMethod = 'POST';
}
var optionspost = {
host : this.host,
port : this.port,
path : this.path,
method : callMethod,
headers : postheaders
};
var reqPost = https.request(optionspost, function(res) {
res.setEncoding('utf-8');
res.on('data', function(responseData) {
//---->>> responseData containes truncated response
if(callFunction != undefined && callFunction != null && callFunction != ''){
callFunction(responseData, relatedData);//****** calling success function ****
}
});
res.on('end', function() {
});
});
reqPost.write(jsonObject);
reqPost.end();
reqPost.on('error', function(e) {
console.error(e);
});
Your code is expecting the data event only once, but Node can fire it more than once. In fact, it can fire it as many times as it damn pleases.:) Every time a data event is emitted, another part of the data is provided to you. You know that there is no more data to be consumed when the end event is fired - that's where you should process the data and/or call your callbacks.
Since the response is basically a readable stream, have a look at the data event for Readable Stream.
For my front-end (angular) app, I need to connect to an external API, which does not support CORS.
So my way around this is to have a simple proxy in Node.JS / Express.JS to pass the requests. The additional benefit is that I can set my api-credentials at proxy level, and don't have to pass them to the front-end where the user might steal/abuse them.
This is all working perfectly.
Here's the code, for the record:
var request = require('request');
var config = require('./config');
var url = config.api.endpoint;
var uname = config.api.uname;
var pword = config.api.pword;
var headers = {
"Authorization" : 'Basic ' + new Buffer(uname + ':' + pword).toString('base64'),
"Accept" : "application/json"
};
exports.get = function(req, res) {
var api_url = url+req.url;
var r = request({url: api_url, headers: headers});
req.pipe(r).pipe(res);
};
The API-endpoint I have to use has XML as only output format. So I use xml2js on the front-end to convert the XML reponse to JSON.
This is also working great, but I would like to lighten the load for the client, and do the XML -> JSON parsing step on the server.
I assume I will have to create something like:
req.pipe(r).pipe(<convert_xml_to_json>).pipe(res);
But I don't have any idea how do create something like that.
So basically I'm looking to create an XML to JSON proxy as a layer on top of an already existing API.
There are a lot of questions on SO regarding "how do I make a proxy" and "how do I convert XML to JSON" but I couldn't find any that combine the two.
you need to use transform stream and for xml to json conversion you need some library i use this xml2json
..then u use it like this (simplified but it should work with request too)
var http = require('http');
var fs = require('fs');
var parser = require('xml2json');
var Transform = require('stream').Transform;
function xmlParser () {
var transform = new Transform();
transform._transform = function(chunk, encoding, done) {
chunk = parser.toJson(chunk.toString())
console.log(chunk);
this.push(chunk);
done();
};
transform.on('error', function (err) {
console.log(err);
});
return transform;
}
var server = http.createServer(function (req, res) {
var stream = fs.createReadStream(__dirname + '/data.xml');
stream.pipe(xmlParser()).pipe(res);
});
server.listen(8000);
I need to get the document title.
so I try to send request, and paser the response html to get title.
example (via nodejs module request):
request.get("http://www.google.com", function(err, res, body) {
var title = body.match(/<title>(.*?)</title>/g)[1];
})
but when the document is particularly large. the request is slowly.
Is there a way to get document title quickly? Please suggest. Thanks.
Request can give you stream of decompressed data as it is received: http://github.com/request/request#examples (2nd example)
You could keep appending the data received in a buffer, and checking whether it has your desired content yet ("</title>"). As soon as you get it, you could get your title and ignore the rest of the buffer in the stream.
var request = require('request');
var buffer = '';
var flag = 0;
request({
method: 'GET',
uri: 'http://www.google.com',
gzip: true
}).on('data', function(data) {
if (buffer.indexOf('</title>') == -1)
buffer += data;
else done();
});
function done() {
if (flag) return;
flag++;
var title = buffer.match(/<title>(.*?)<\/title>/)[1];
console.log(title);
}