So I've deployed my server to Heroku with the following:
const pool = new Pool({
connectionString: process.env.DATABASE_URL,
ssl: {
rejectUnauthorized: false
}
});
const http = require('http').Server(app);
const io = require('socket.io')(http);
I have a function that sends a request to my Raspberry Pi (which is acting as a socket client in this case) asking it to run a scraping function and update a table in my Heroku Postgres database accordingly. The only problem is on the Pi, the client.query() function is not recognized...not sure why. Here's my function that sends the request to the Pi:
async function F1() {
const client = await pool.connect();
try {
await client.query('BEGIN')
//do some database updating here in some table updates
let date = ("0" + terminationTime.getDate()).slice(-2);
let month = ("0" + (terminationTime.getMonth() + 1)).slice(-2);
let year = terminationTime.getFullYear();
let hours = terminationTime.getHours();
let minutes = terminationTime.getMinutes();
let seconds = terminationTime.getSeconds();
let timestamp = year + "-" + month + "-" + date + " " + hours + ":" + minutes + ":" + seconds
let data = {
pool: pool, //here I tried passing in the pool, also tried passing in client
query: query,
timestamp: timestamp
}
io.to(RPiMiniServerID).emit("requestToPi", data)
} catch(err) {
console.log(err);
}
}
And here's my code on the Pi/Client side:
const io = require("socket.io-client");
const socket = io.connect("MY HEROKU APP URL HERE")
const { Pool, Client } = require("pg");
socket.on("requestToPi", (data) => sync function() {
let client = await data.pool.connect()
let query = data.query
let timestamp = data.timestamp
//a bunch more code and THEN
await client.query('INSERT INTO table (columnnames) VALUES($1)', [value]); //this is the line with the error
}());
I've tried passing in the client directly, but that also doesn't work. Not really sure what to do here--any help is much appreciated!
Let me get this straight, you're trying to send the instance of the pg library object that was started on your heroku server to your Pi and continue operation from that 'pg' library object state?
The first rule of HTTP is that its stateless. I know its Socket io but it still relies on http and the data is recreated on the other end and its state is lost over the connection. So in a way, you're starting a new instance of the pg instance on the Pi and the one on the heroku server is not the same as the one on the Pi.
await client.query('INSERT INTO table (columnnames) VALUES($1)', [value]); //this is the line with the error
That is because the data has completely lost its state. That means, you're not connected to database, you have not started the commit process yet, ect. You have a new instance of the pg library on your hand and everything you did on heroku server is completely different.
One solution to this problem is to use some sort of real time model data synchronization like backbone.io or other similar solutions out there.
Related
I'm trying to create a Shared Access Signature client side in my Node app. The reason being that I do not want to stream files through my app. I want the user to be able to upload a file to my Azure Data Lake Gen2 Blob Storage container directly.
I have looked at all examples I can find, but they are all server side. So I tried to generate generateDataLakeSASQueryParameters and use them in the PUT request. The process looks like it works and I return it to the client.
Server side:
async getFileUploadUrl(path) {
const now = new Date().toUTCString();
const startsOn = new Date(now);
startsOn.setMinutes(startsOn.getMinutes() - 10); // Skip clock skew with server
const expiresOn = new Date(now);
expiresOn.setHours(expiresOn.getHours() + 1); // Expires in one hour
const sharedKeyCredential = new StorageSharedKeyCredential(this.storageAccountName, this.accountKey);
const sas = generateDataLakeSASQueryParameters({
fileSystemName: this.fileSystemClient.name,
ipRange: { start: "0.0.0.0", end: "255.255.255.255" },
expiresOn,
protocol: SASProtocol.HttpsAndHttp,
permissions: DataLakeSASPermissions.parse("c").toString(), // Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u), Process (p)
resourceTypes: AccountSASResourceTypes.parse("o").toString(), // Service (s), Container (c), Object (o)
services: AccountSASServices.parse("b").toString(), // Blob (b), Table (t), Queue (q), File (f)
startsOn,
version: "2019-12-12"
},
sharedKeyCredential);
const encodedURI = encodeURI(path);
const filePath = `${this.fileSystemClient.url}/${encodedURI}`;
return {
url: filePath,
signature: sas.signature,
};
}
Client side:
const { url, signature } = serverResponse;
const file = [file takes from an input tag];
const request = new XMLHttpRequest();
request.open('PUT', url, true);
request.setRequestHeader("x-ms-date", new Date().toUTCString());
request.setRequestHeader("x-ms-version", '2019-12-12');
request.setRequestHeader("x-ms-blob-type", 'BlockBlob');
request.setRequestHeader("Authorization", `SharedKey [storageaccount]:${signature}`);
request.send(file);
And what I keep getting back is a 403 with the following error:
The MAC signature found in the HTTP request '[signature]' is not the
same as any computed signature. Server used following string to sign:
'PUT\n\n\n1762213\n\nimage/png\n\n\n\n\n\n\nx-ms-date:Thu, 24 Sep 2020
12:24:05 GMT\nx-ms-version:2019-12-12\n/[account name]/[container
name]/[folder name]/image.png'.
Obviously I removed the actual signature since I have gotten it to work server side, but it looks something like this: hGhg765+NIGjhgluhuUYG686dnH90HKYFytf6= (I made this up, but it looks as if it's in the correct format).
I have also tried to return the parsed query string and used in a PUT request, but then I get errors stating there is a required header missing, and I cannot figure out which one that should be. No Authorization for instance should be required.
The method generateDataLakeSASQueryParameters is used to create a service sas token. After doing that, we can call Azure Datalake Rest API with the sas token as the query paramater
For example
Create sas token with method generateDataLakeSASQueryParameters. When we call method generateDataLakeSASQueryParameters, we should define a DataLakeSASSignatureValues class : https://learn.microsoft.com/en-us/javascript/api/#azure/storage-file-datalake/datalakesassignaturevalues?view=azure-node-latest
const {
StorageSharedKeyCredential,
generateDataLakeSASQueryParameters,
DataLakeSASPermissions,
} = require("#azure/storage-file-datalake");
const accountName = "testadls05";
const accountKey ="";
const now = new Date().toUTCString();
const startsOn = new Date(now);
startsOn.setMinutes(startsOn.getMinutes() - 10); // Skip clock skew with server
const expiresOn = new Date(now);
expiresOn.setHours(expiresOn.getHours() + 1); // Expires in one hour
const fileSas = generateDataLakeSASQueryParameters(
{
fileSystemName: "test",
pathName: "test.jpg",
permissions: DataLakeSASPermissions.parse("racwd"),
startsOn: startsOn,
expiresOn: expiresOn,
},
new StorageSharedKeyCredential(accountName, accountKey)
).toString();
console.log(fileSas);
Test (create file)
PUT http:// https://{accountName}.{dnsSuffix}/{filesystem}/{path}
?{sas token you create in step1}
Headers:
Content-Type:image/jpeg
Content-Length:0
I have simple websocket API from api.bitfinex.com/ws that stream changes on BTC/USD market.
I'm struggle how to make this to update simple csv file, so when receive new data from ws to update csv.
I try to use fast-csv, but without success.
Here is my node js code:
const WebSocket = require('ws');
const ws = new WebSocket("wss://api.bitfinex.com/ws");
const fs = require('fs');
ws.onopen = function(){
ws.send(JSON.stringify({'event':'subscribe', 'channel':'ticker', 'pair':'btcusd'}))
};
ws.onmessage = function(msg){
var response = JSON.parse(msg.data);
if (response[1] !="hb"){
console.log("Bitfin " + response[7]);
//HERE I need to update existing CSV file,
//for example
//BTC,xxxx
//ETH,xxxx
//two columns, and n rows..
}
};
So, any idea how to stream into csv (BTCUSD in row 1 column 2; ETH in row 2 column 2 etc)
Seems to me that you should be using a writeStream. Something like that:
const fs = require('fs')
let writeStream = fs.createWriteStream(fileName)
ws.onmessage = function(msg){
//... your code ...
writeStream.write(msg)
}
writeStream.on('finish', () => {
console.log(`Finished writing!`)
})
I have been searching for two days now looking for a solution that might work for me. Sadly I have only seen examples and guides on how to setup a websocket server (that sends messages back and forth to clients) and a websocket client (that resides in browser). None of these really work for me, and I am not sure how to achieve what I want here.
Basically I have the following websocket:
require('dotenv').config()
const WebSocket = require('ws');
var connection = new WebSocket('ws://XXX');
connection.onopen = function () {
connection.send(JSON.stringify({"authenticate":process.env.API}));
connection.send(JSON.stringify({"XXX":"YYY"}));
connection.send(JSON.stringify({
"db" : "unique_id",
"query" : {
"table" : "users"
}
}));
};
connection.onerror = function (error) {
console.log('WebSocket Error ' + error);
};
connection.onmessage = function (e) {
console.log('Server: ' + e.data);
var myResponse = JSON.parse(e.data);
var qList = myResponse.results;
};
What I want to do is have my nodeJS-script running, for example an express script with a html page, that also includes the response from onmessage. Why I am complicating this instead of just using the websocket client-side is that I cannot send my auth-code publicly.
Hope I have been clear enough, let me know if you are unsure of my question!
PS. If you think I would be better off using another websocket-script such as Socket.io - I have been looking at them and have not gotten much wiser sadly.
You have a lot of options. Probably the easiest is to export the connection. At the bottom of the file, e.g. module.exports = connection
Then in the express application, import the connection, e.g. const connection = require('./path/connection');
Then make a function that calls itself at a given interval and sends the appropriate message.
Then within the Express app you can use something like connection.on('message', (data, flags) => // do stuff);
Your other option is to create a store object. E.g.
// ./store.js
class store {
constructor() {
this.wsMaterial = {};
}
add(wsInfo) {
this.wsMaterial[key] = wsInfo
}
get store() {
return this.wsMaterial
}
}
module.exports = new store()
Then import the store and updated it, e.g.
// ./websocket file
const store = require('./store');
...
connection.onmessage = function (e) {
console.log('Server: ' + e.data);
var myResponse = JSON.parse(e.data);
var qList = myResponse.results;
store.add(qList)
};
Then from Express...
// ./express.js
const store = require('./store');
store.get // all of your stuff;
I'm trying to recreate the functionality of a hardware serial server with Node and it's actually working, but I'm getting errors from socket instances that have been closed.
Here's a simplified version of the app to show what I'm doing...
var net = require('net');
var SerialPort = require('serialport');
var connectionCounter = 0;
var port = new SerialPort('/dev/ttyUSB0', function () {
var server = net.createServer();
server.on('connection',function(socket) {
connectionCounter++;
var connNumber = connectionCounter;
socket.on('error', function () {
console.log('socket ' + connNumber + ' errored');
});
socket.on('data', function(data) {
port.write(data);
});
port.on('data', function(data) {
socket.write(data);
});
});
server.listen(8887, '127.0.0.1');
}
});
So the first chunk of code that's sent into the 8887 port works fine, and it returns the data back out through the socket. The errors start on the second chunk. In the example, I'm keeping a count of the socket instances and outputting the socket instance number with the error. So as the program runs, the number of sockets instances keeps going up. The most recent instance will eventually handle the data, but I can't figure out what I need to delete to clean up all of the previous socket instances so they'll stop trying to process the incoming data.
I've tried socket.end() and socket.destroy(), but those don't seem to work . Do I need to go as far as deleting the server itself and recreating it?
If anyone ever finds this and cares about what was going wrong, I was setting an event listener on the serialport object every time a new net socket was created. So even though I was deleting the socket every time it was closed, the serialport listener was trying to send data to all of the old deleted sockets. So the solution was to removeListeners from the serialport object upon closing the net socket.
you can use array for storing sockets later on you can delete. this is sample code hope you got the idea
var net = require('net');
var SerialPort = require('serialport');
var connectionCounter = 0;
var mySockets = [];
var port = new SerialPort('/dev/ttyUSB0', function () {
var server = net.createServer();
server.on('connection',function(socket) {
mySockets.push(socket);
connectionCounter++;
var connNumber = connectionCounter;
socket.on('error', function () {
console.log('socket ' + connNumber + ' errored');
});
socket.on('data', function(data) {
port.write(data);
});
port.on('data', function(data) {
socket.write(data);
});
});
server.listen(8887, '127.0.0.1');
}
//get the sockets you want to delete
var s = mySockets.pop();
s = null;
});
I'm trying to manage a bunch of socket connections. My app is basically an http server that receives posts and passes these along to a socket. When clients open a socket connection, they send a connect message with an id:
{"m":"connect","id":"1"}
The app then saves this id and socket in the id2socket and socket2id maps. On disconnect, the socket/id pair is deleted from the maps.
A post will also contain an id, which indicates the post data should be sent to the socket with that id.
That's great, and this works fine for a single open socket. However, when I have more than one socket open, and then I close a socket, that disconnect wipes everything from the map. I think my understanding of sockets in node is incomplete- is there only a single socket object that is used in the callback? Is there a better way to manage my open socket connections and ids?
start server:
>>node server.js
TCP server listening on 127.0.0.1:5280
HTTP server listening on 127.0.0.1:9002
telnet in:
>>telnet localhost 5280
Trying 127.0.0.1...
Connected to localhost.
Escape character is '^]'.
{"m":"connect","id":"123"}
{"m":"connect","id":"123","success":"true"}
server after connection:
>>Connection from 127.0.0.1:57572
received data: {"m":"connect","id":"123"}
id: 1
m: connect
associating uid 1 with socket [object Object]
do a post:
python post.py {"foo":"bar"}
So this works fine for several open sockets (as long as 1 device is id 123, server has this hardwired for now). However, as soon as you close one connection all the socket connections are removed from the map.
Here's my code:
python script to do post:
import sys
import json
import httplib, urllib, urllib2
values = json.loads('{"foo":"bar"}')
headers = {"Content-type": "application/json"}
conn = httplib.HTTPConnection('127.0.0.1', 9002)
headers = {"Content-type": "application/json"}
conn.request("POST", "", json.dumps(values), headers)
response = conn.getresponse()
print "response.status: "+response.status
print "response.reason: "+response.reason
print "response.read: "+response.read()
conn.close()
node server (http and tcp), hardwired to send data to device '123' on post:
var net = require('net'); // tcp-server
var http = require("http"); // http-server
var qs = require('querystring'); // http-post
// Map of sockets to devices
var id2socket = new Object;
var socket2id = new Object;
// Setup a tcp server
var server_plug = net.createServer(function(socket) {
// Event handlers
socket.addListener("connect", function(conn) {
console.log("Connection from " + socket.remoteAddress + ":" + socket.remotePort );
});
socket.addListener("data", function(data) {
console.log("received data: " + data);
try {
request = JSON.parse(data);
response = request;
if(request.m !== undefined && request['id'] !== undefined){ // hack on 'id', id is js obj property
console.log("id: "+request['id']);
console.log("m: "+request.m);
if(request.m == 'connect'){
console.log("associating uid " + request['id'] + " with socket " + socket);
id2socket[request['id']] = socket;
socket2id[socket] = request['id'];
response.success = 'true';
} else {
response.success = 'true';
}
}
socket.write(JSON.stringify(response));
} catch (SyntaxError) {
console.log('Invalid JSON:' + data);
socket.write('{"success":"false","response":"invalid JSON"}');
}
});
socket.on('end', function() {
id = socket2id[socket]
console.log("socket disconnect by id " + id);
// wipe out the stored info
console.log("removing from map socket:"+socket+" id:"+id);
delete id2socket[id];
delete socket2id[socket];
});
socket.on('timeout', function() {
console.log('socket timeout');
});
});
// Setup http server
var server_http = http.createServer(
// Function to handle http:post requests, need two parts to it
// http://jnjnjn.com/113/node-js-for-noobs-grabbing-post-content/
function onRequest(request, response) {
request.setEncoding("utf8");
request.addListener("data", function(chunk) {
request.content += chunk;
});
request.addListener("end", function() {
console.log("post received!");
//console.log("Request received: "+request.content);
if (request.method == 'POST') {
//var json = qs.parse(request.content);
//console.log("Post: "+json);
// HACK TO TEST STUFF:
// send a message to one of the open sockets
try {
var socket = id2socket['123']; //hardwired
socket.write('{"m":"post"}');
} catch (Error) {
console.log("Cannot find socket with id "+'123');
}
}
});
}
);
// Fire up the servers
var HOST = '127.0.0.1';
var PORT = 5280;
var PORT2 = 9002;
server_plug.listen(PORT, HOST);
console.log("TCP server listening on "+HOST+":"+PORT);
server_http.listen(PORT2);
console.log("HTTP server listening on "+HOST+":"+PORT2);
Objects only take strings as keys for their properties. As your log shows, a socket object is converted into the string "[object Object]". As a result, socket #2 overwrites the id from socket #1 in the object, because all sockets are converted into the same string key. So, there is only one property in the object at all times, because all sockets come down to the same key. When you try to remove the id for socket #2, the single property is deleted and the object is empty.
You seem to want a custom property for each separate socket when used as a key. You can use WeakMaps for this. WeakMaps do allow objects as keys (as opposed to string-only keys), but as they're relatively new they may contain bugs at the moment.
(Note that the id2socket map can just be a plain object, because numbers are converted into strings just fine, and each number has its own, distinct string representation*.)
Using WeakMaps is as follows:
var socket2id = new WeakMap; // as if you were doing: var socket2id = {};
socket2id.set(socket, id); // as if you were doing: socket2id[socket] = id;
socket2id.get(socket); // as if you were doing: socket2id[socket];
socket2id.delete(socket); // as if you were doing: delete socket2id[socket];
Make sure to run with node --harmony (>= 0.7) or node --harmony_weakmaps (<= 0.6).
* 0 and -0 are exceptions, but you shouldn't be using -0 anyway because 0 === -0, so it's difficult to differ between them.