I'm new to nodejs and jquery, and I'm trying to update one single html object using a script.
I am using a Raspberry pi 2 and a ultrasonic sensor, to measure distance. I want to measure continuous, and update the html document at the same time with the real time values.
When I try to run my code it behaves like a server and not a client. Everything that i console.log() prints in the cmd and not in the browesers' console. When I run my code now i do it with "sudo node surveyor.js", but nothing happens in the html-document. I have linked it properly to the script. I have also tried document.getElementsByTagName("h6").innerHTML = distance.toFixed(2), but the error is "document is not defiend".
Is there any easy way to fix this?
My code this far is:
var statistics = require('math-statistics');
var usonic = require('r-pi-usonic');
var fs = require("fs");
var path = require("path");
var jsdom = require("jsdom");
var htmlSource = fs.readFileSync("../index.html", "utf8");
var init = function(config) {
usonic.init(function (error) {
if (error) {
console.log('error');
} else {
var sensor = usonic.createSensor(config.echoPin, config.triggerPin, config.timeout);
//console.log(config);
var distances;
(function measure() {
if (!distances || distances.length === config.rate) {
if (distances) {
print(distances);
}
distances = [];
}
setTimeout(function() {
distances.push(sensor());
measure();
}, config.delay);
}());
}
});
};
var print = function(distances) {
var distance = statistics.median(distances);
process.stdout.clearLine();
process.stdout.cursorTo(0);
if (distance < 0) {
process.stdout.write('Error: Measurement timeout.\n');
} else {
process.stdout.write('Distance: ' + distance.toFixed(2) + ' cm');
call_jsdom(htmlSource, function (window) {
var $ = window.$;
$("h6").replaceWith(distance.toFixed(2));
console.log(documentToSource(window.document));
});
}
};
function documentToSource(doc) {
// The non-standard window.document.outerHTML also exists,
// but currently does not preserve source code structure as well
// The following two operations are non-standard
return doc.doctype.toString()+doc.innerHTML;
}
function call_jsdom(source, callback) {
jsdom.env(
source,
[ 'jquery-1.7.1.min.js' ],
function(errors, window) {
process.nextTick(
function () {
if (errors) {
throw new Error("There were errors: "+errors);
}
callback(window);
}
);
}
);
}
init({
echoPin: 15, //Echo pin
triggerPin: 14, //Trigger pin
timeout: 1000, //Measurement timeout in µs
delay: 60, //Measurement delay in ms
rate: 5 //Measurements per sample
});
Node.js is a server-side implementation of JavaScript. It's ok to do all the sensors operations and calculations on server-side, but you need some mechanism to provide the results to your clients. If they are going to use your application by using a web browser, you must run a HTTP server, like Express.js, and create a route (something like http://localhost/surveyor or just http://localhost/) that calls a method you have implemented on server-side and do something with the result. One possible way to return this resulting data to the clients is by rendering an HTML page that shows them. For that you should use a Template Engine.
Any DOM manipulation should be done on client-side (you could, for example, include a <script> tag inside your template HTML just to try and understand how it works, but it is not recommended to do this in production environments).
Try searching google for Node.js examples and tutorials and you will get it :)
Related
I am working with pngjs through many of it's methods. Most of the time, they work fine. However, like in the following example, I get an error: "Stream is not writable"
var fs = require('fs'),
PNG = require('pngjs').PNG;
var dst = new PNG({width: 100, height: 50});
fs.createReadStream('http://1.1m.yt/hry7Eby.png') //download this picture in order to examine the code.
.pipe(new PNG())
.on('parsed', function(data) {
console.log(data);
});
This case is not singular, I get this error on 1 random png image once a day, through all of pngjs methods, and that error obviously crashes my app.
(note: you can't use the http link I gave you with a readStream, you will have to download & rename it and do something like):
fs.createReadStream('1.png')
Thank you for your time and effort.
This seems to be a bug in the library, though I'm wary of saying so as I'm no expert in PNGs. The parser seems to complete while the stream is still writing. It encounters the IEND, and so calls this:
ParserAsync.prototype._finished = function() {
if (this.errord) {
return;
}
if (!this._inflate) {
this.emit('error', 'No Inflate block');
}
else {
// no more data to inflate
this._inflate.end();
}
this.destroySoon();
};
If you comment out the this.destroySoon(); it finishes the image correctly, instead of eventually calling this function:
ChunkStream.prototype.end = function(data, encoding) {
if (data) {
this.write(data, encoding);
}
this.writable = false;
// already destroyed
if (!this._buffers) {
return;
}
// enqueue or handle end
if (this._buffers.length === 0) {
this._end();
}
else {
this._buffers.push(null);
this._process();
}
};
...which would otherwise end up setting the stream.writeable to false, or, if you comment that out, to pushing a null value into the _buffers array and screwing up the ChunkStream._processRead.
I'm fairly certain this is a synchronicity problem between the time the zlib parser takes to complete and the time the stream takes to complete, since if you do this synchronously it works fine:
var data = fs.readFileSync('pic.png');
var png = PNG.sync.read(data);
var buff = PNG.sync.write(png);
fs.writeFileSync('out2.png', buff);
Our extension (Addon SDK) looking for new files in folder C:\scan and send it to server. Every second extension look for latest file creation time and defined it as latest.(compare new file creation time and file creation time 1 sec ago.)
Files put to C:\scan from scanner Brother 7050 on Windows 7.
But sometimes into console.error we see:
Exception
message: "Component returned failure code: 0x8052000e (NS_ERROR_FILE_IS_LOCKED)
[nsIFileInputStream.init]",
result: 2152857614,
name: "NS_ERROR_FILE_IS_LOCKED"
I think Brother 7050 application have no time to unlock file before our extension can start to read it.
Q: How we can read latest file in folder true way without read file lock error?
/*
adr- folder path
array2 - array for search
mode - search or not search in array2 (0-1)
*/
function getfilelist(adr,array2, mode)
{
filelist2=[];
filelist2[0]="";
filelist2[1]=0;
var file = new FileUtils.File(adr);
var enumerator = file.directoryEntries;
while (enumerator.hasMoreElements())
{
inner = enumerator.getNext().QueryInterface(Ci.nsIFile);
if (inner.isFile())
{
namearray=inner.leafName.split(".");
r=namearray[namearray.length-1];
if (r=="jpg" || r=="jpeg")
{
if (mode==0)
{
if (inner.lastModifiedTime>filelist2[1])
{
filelist2[0]=inner.leafName;
filelist2[1]=inner.lastModifiedTime;
}
}
else if (mode==1)
{
if (inner.lastModifiedTime>array2[1] && inner.isReadable()==true)
return inner.leafName;
}
}
}
}
if (mode==0)
{
return filelist2;
}
return false;
}
The reason why you see NS_ERROR_FILE_IS_LOCKED is most likely that the file is still being written and you are trying to access it too early. However, it is also possible that some other software immediately locks the file to check it, e.g. your anti-virus.
Either way, there is no way to ignore the lock. Even if you could, you might get an incomplete file as a result. What you should do is noting that exception and remembering that you should try to read that file on next run. Something along these lines:
var {Cr} = require("chrome");
var unaccessible = null;
setInterval(checknewfiles, 1000);
function checknewfiles()
{
var files = getfilelist(...);
if (unaccessible)
{
// Add any files that we failed to read before to the end of the list
files.push.apply(files, unaccessible);
unaccessible = null;
}
for (var file of files)
{
try
{
readfile(file);
}
except(e if e.result == Cr.NS_ERROR_FILE_IS_LOCKED)
{
if (!unaccessible)
unaccessible = [];
unaccessible.push(file);
}
}
}
For reference:
Components.results
Chrome authority
Conditional catch clauses
for..of loop
I'm using node-typekit to create a new Typekit empty font set in my Yeoman generator for use in my project. I am able to successfully create the kit, but cannot figure out how to return the kit id value back to the Yeoman generator so I can add the necessary Typekit script tag values to my web pages. Here is the part of my index.js generator script in question:
At the top of index.js:
var kit = require('typekit');
var typekitID = '';
function setTypekitID(theid) {
typekitID = theid;
};
And the app section:
app: function () {
var token = 'xxxxxxxxxxxxxxxxxxxxxxxx';
var split = this.domainname.split('.');
split.pop();
var localdomain = split.join('.') + '.dev';
kit.create(token, {
name: this.appname,
badge: false,
domains: [this.domainname, localdomain],
families: []
}, function (err, data) {
setTypekitID(data.kit.id);
});
}
If instead of:
setTypekitID(data.kit.id);
I use:
console.log(data.kit.id);
The correct kit ID is displayed in the console. However, I can't quite figure out how to pass the data.kit.id value in the callback back to the generator for further use. Given the current code above, it comes back as "undefined".
Any ideas? Thanks!
Without any experience with typekit, I would guess that kit.create is an asynchronous call. When you make such a call, the generator needs to know about it, so it can delay invocation of further methods until it knows your asynchronous callback has had a chance to execute.
Try doing this:
app: function () {
var done = this.async(); // this tells the generator, "hang on, yo."
// ...
kit.create(token, { /* ... */ }, function (err, data) {
setTypekitID(data.kit.id);
done(); // calling this resumes the generator.
});
}
An example of how the default generator-generator uses this can be found here.
I am trying to initialize a Breeze manager inside a 'Web Worker'.
RequireJs, knockout, q, breeze are being imported inside the worker.
After a call to:EntityQuery.from('name').using(manager).execute(),
the following error appears:
Uncaught Error: Q is undefined. Are you missing Q.js? See https://github.com/kriskowal/q.
A live preview is uploaded here http://plnkr.co/edit/meXjKa?p=preview
(plunk supports downloading for easier debug).
EDIT -- relevant code
Worker.js
importScripts('knockout.js', 'q.js', 'breeze.js', 'require.js');
define('jquery', function () { return jQuery; });
define('knockout', ko);
define('q', Q); //Just trying to assign q since breeze requests Q as q
require(function () {
var self = this;
this.q = this.Q; //Just trying to assign q since breeze requests Q as q
breeze.NamingConvention.camelCase.setAsDefault();
var manager = new breeze.EntityManager("breeze/Breeze");
var EntityQuery = breeze.EntityQuery;
// Q or q here is defined (TESTED)
var test = function (name) {
return EntityQuery.from(name)
.using(manager).execute() // <-- Here q/Q breaks (I think on execute)
};
var primeData = function () {
return test('Languages')
.then(test('Lala'))
.then(test('Lala2'))
};
primeData();
setTimeout(function () { postMessage("TestMan"); }, 500);
});
Worker will be initialized on main page as:
var myWorker = new Worker("worker.js");
Ok here it goes:
Create a new requireJs and edit the
isBrowser = !!(typeof window !== 'undefined' && typeof navigator !== 'undefined' && window.document)
to
isBrowser = false
Create a new Jquery so it uses nothing related to window and generally anything that a WebWorker cannot access. Unfortunatelly i can't remember where i got this Custom JQueryJs but i have uploaded it here "https://dl.dropboxusercontent.com/u/48132252/jqueydemo.js".
Please if you find the author or the original change link and give credit.
My workerJs file looks like:
importScripts('Scripts/test.js', 'Scripts/jqueydemo.js', 'Scripts/q.js', 'Scripts/breeze.debug.js', 'Scripts/require2.js');
define('jquery', function () { return jQuery; });
require(
{
baseUrl: "..",
},
function () {
var manager = new breeze.EntityManager("breeze/Breeze");
var EntityQuery = breeze.EntityQuery;
var primeData = function () {
return EntityQuery.from(name)
.using(manager).execute() // Get my Data
.then(function (data) {
console.log("fetced!\n" + ((new Date()).getTime()));
var exportData = manager.exportEntities(); // Export my constructed entities
console.log("created!\n" + ((new Date()).getTime()));
var lala = JSON.stringify(exportData)
postMessage(lala); // Send them as a string to the main thread
})
};
primeData();
});
Finally on my mainJs i have something like:
this.testWorker = function () {
var myWorker = new Worker("worker.js"); // Init Worker
myWorker.onmessage = function (oEvent) { // On worker job finished
toastr.success('Worker finished and returned');
var lala = JSON.parse(oEvent.data); // Reverse string to JSON
manager.importEntities(lala); // Import the pre-Constructed Entities to breezeManager
toastr.success('Import done');
myWorker.terminate();
};
};
So we have managed to use breeze on a WebWorker enviroment to fetch and create all of our entities, pass our exported entities to our main breeze manager on the main thread(import).
I have tested this with 9 tables fully related to each other and about 4MB of raw data.
PROFIT: UI stays fully responsive all the time.
No more long execution script, application not responding or out of memory errors) at least for chrome
*As it makes sense breeze import entities is way more faster than the creation a full 4MB raw data plus the association process following for these entities.
By having all the heavy work done on the back, and only use import entities on the front, breeze allows you to handle large datasets 'like a breeze'.
I'm trying to query a large set of results from a MongoDB over Python. I do this via JavaScript, because I want to get something like the grandchildren in a tree-like structure. My code looks like the following:
col = db.getCollection(...)
var res = new Array();
col.find( { "type" : ["example"] } ).forEach(
function(entry)
{
v1 = col.find( {"_id" : entry["..."]} )
... (walk through the structure) ...
vn = ...
res.push([v1["_id"], vn["data"]]);
}
);
return res;
Now, I'm having the problem, that the resulting array becomes very (too) large and the memory gets exceeded. Is there a way, to yield the results instead of pushing them into an array?
Alright, I think I know, what you mean. I created a structure like the following:
var bulksize = 1000;
var col = db.getCollection("..");
var queryRes = col.find( { ... } )
process = function(entity) { ... }
nextEntries = function()
{
var res = new Array();
for(var i=0; i<bulksize; i++)
{
if(hasNext())
res.push(process(queryRes.next()));
else
break;
}
return res;
}
hasNext = function()
{
return queryRes.hasNext();
}
The script separates the results into bulks of 1000 entries. And from Python side eval the noted script and then I do the following:
while database.eval('hasNext()'):
print "test"
for res in database.eval('return nextEntries()'):
doSth(res)
The interesting thing is, that the console always says:
test
test
test
test
test
test
Then I get the error:
pymongo.errors.OperationFailure: command SON([('$eval', Code('return nextEntries()', {})), ('args', ())]) failed: invoke failed: JS Error: ReferenceError: nextEntries is not defined nofile_a:0
This means, that the first calls of nextEntries() work, but then the function is not there, anymore. Could it be, that MongoDB does something like a clearing of the JavaScript cache? The problem does not depend on the bulksize (tested with 10, 100, 1000, 10000 and always the same result).
Alright, I found a line in the source code of MongoDB, which clears all JavaScripts that are used more than 10 times. So if no changes on the database server are wanted, it is necessary to query the database multiple times and send bulks to the client by selecting amounts of items with help of the skip() and limit() functions. This works surprisingly fast. Thanks for your help.