So in my js code, the user is making mosaic images. When they press upload, I create a new object and all is good, my code works perfectly. I then save the id to that object that was just uploaded. If the user presses the upload button again, I want to simply use that same object id and clear out the files that were saved before and use the newest version. All of my saves, updates, and everything appear to be working just fine. When I go to the data browser though, I see the files but when I click, the link is broken. (Which doesn't seem it like it should even be possible). 95% of my code is below, there is some stuff outside of that to generate the mosaic image and to store the id returned. How can I avoid these broken links?
var ins_file = new Parse.File("Instructions.png", { base64: img_ins_InBase64 }, "image/png");
var pretty_file = new Parse.File("Pretty.png", { base64: img_pretty_InBase64 }, "image/png");
ins_file.save().then(function () {
}, function (error) {
console.log("Instruction File couldn't be saved")
// The file either could not be read, or could not be saved to Parse.
});
pretty_file.save().then(function () {
}, function (error) {
console.log("Mosaic File couldn't be saved")
// The file either could not be read, or could not be saved to Parse.
});
var mosaicClass = Parse.Object.extend("Mosaics");
var mosaicObj = new Parse.Object("Mosaics");
var query = new Parse.Query(mosaicClass);
query.get(parseId, {
success: function (objToUpdate) {
// The object was retrieved successfully. SIMPLY UPDATE
objToUpdate.set("img_ins", ins_file);
objToUpdate.set("img_pretty", pretty_file);
objToUpdate.save().then(function () {
console.log("Initial Image updated");
var ins_img_url = objToUpdate.get('img_ins').url();
var pretty_img_url = objToUpdate.get('img_pretty').url();
objToUpdate.set("img_ins_url", ins_img_url);
objToUpdate.set("img_pretty_url", pretty_img_url);
objToUpdate.save();
console.log("Mosaic updated, id was: " + objToUpdate.id);
parseId = objToUpdate.id;
}, function (error) {
console.log("File couldn't be updated")
// The file either could not be read, or could not be saved to Parse.
});
},
error: function (object, error) {
// The object was not retrieved successfully.
// parseId was null so make a new thing
mosaicObj.set("img_ins", ins_file);
mosaicObj.set("img_pretty", pretty_file);
mosaicObj.save().then(function () {
console.log("Initial Images uploaded");
var ins_img_url = mosaicObj.get('img_ins').url();
var pretty_img_url = mosaicObj.get('img_pretty').url();
mosaicObj.set("img_ins_url", ins_img_url);
mosaicObj.set("img_pretty_url", pretty_img_url);
mosaicObj.save();
console.log("Mosaic Saved, id was: " + mosaicObj.id);
parseId = mosaicObj.id;
}, function (error) {
console.log("File couldn't be saved")
// The file either could not be read, or could not be saved to Parse.
});
}
});
It appears you're not waiting for the files to save before trying to assign them to a parse object. Async JS will get you every time.
You should be saving the file object on the parse object only after the save, inside the then. You can put both file saves in an array and use Parse.Promise.when([promise1, promise2]).then(...); too.
Related
I have the JSON data load at the start, and also pull to refresh. During these times a small delay is expected. When going between pages, it should be snappy, so I am looking to use this already requested JSON for my page content. One of the JSON objects is the entire (small enough) html page.
I cannot find a way to use this, and instead am following the examples making a second JSON get request before loading each page (article). I would rather just load the JSON data once at the start and use it until refreshed with pull-to-refresh.
* Currently Working, but using a second JSON get *
{
path: '/article/:article_id/',
// This works after much turmoil.
// sadly have to do a second json call. could have got with initial.
async: function (routeTo, routeFrom, resolve, reject) {
//Testing
// import('window.TodayJsonDB['+ routeTo.params.article_id +'][\'html\']');
// window.TodayJsonDB[routeTo.params.article_id]['html'];
// [data['article']['article_html']
// console.log(routeTo);
// Get external data and return template7 template
this.app.request.json('/__php/json1.php', { one: 1, article_id: routeTo.params.article_id }, function (data) {
// console.log(data['article'][0]['article_html']);
resolve(
// DOM locked until resolve returned.
// object with resolved route content. Must contain one of:
// url, content, template, templateUrl, component or componentUrl
{
content: data['article'][0]['article_html'],
},
);
});
}
// A day of testin but couldnt figure out how to use existing json feed.
//asyncComponent: () => import('window.TodayJsonDB['+ params.article_id +'][\'html\']'),
//el: window.TodayJsonDB[params.article_id]['html'],
//el: import('window.TodayJsonDB['+ params.article_id +'][\'html\']'),
//template: import('window.TodayJsonDB['+ params.article_id +'][\'html\']'),
//template: import('window.TodayJsonDB[' + params.article_id +'][html]'),
//asyncComponent: () => import('window.TodayJsonDB[' + params.article_id +'][html]'),
//asyncComponent: () => import('window.TodayJsonDB[' + $route.params.article_id +'][html]'),
//asyncComponent: () => import('window.TodayJsonDB[' + {{article_id}} +'][html]'),
//asyncComponent: () => import('window.TodayJsonDB[11][\'html\']'),
//content: window.TodayJsonDB[':article_id']['html'],
},
I already have this json get already; loaded when the app opens and updated with pull-down: window.TodayJsonDB
which contains:
window.TodayJsonDB[data['article'][i]['article_id']] = new Array();
window.TodayJsonDB[data['article'][i]['article_id']]['article_id'] = data['article'][i]['article_id'];
window.TodayJsonDB[data['article'][i]['article_id']]['title'] = [data['article'][i]['article_title']];
window.TodayJsonDB[data['article'][i]['article_id']]['content'] = [data['article'][i]['article_content']];
window.TodayJsonDB[data['article'][i]['article_id']]['html'] = [data['article'][i]['article_html']];
So my question is; how can I use the content of window.TodayJsonDB[article_id]['html'] to appear as the page content instead of having to do another JSON call when the user clicks a link.
My attempts in the code, commented out. Any other suggestions on how to approach the entire thing differently very much welcome.
Thanks as ever.
n.b. I tagged Vue as I believe closely related with Framework7. I am not using Vue.
Found a solution, my working code snippet is below. I used the same async section and converted the array output to a string using toString(). Only appears to work in the async section.
Can now load up the JSON for everything at the start, one JSON call.
Maybe will help someone else with Framework7. Good Luck!
{
path: '/article/:article_id/',
async: function (routeTo, routeFrom, resolve, reject) {
// Do we already have the JSON for the page?
if (typeof window.TodayJsonDB[routeTo.params.article_id]['html'] != "undefined") {
resolve({
content: (window.TodayJsonDB[routeTo.params.article_id]['html'].toString()),
});
}
else{
// Try and get it
this.app.request.json('/__php/json1.php', { one: 1, article_id: routeTo.params.article_id }, function (data) {
resolve(
{
content: data['article'][0]['article_html'],
},
);
});
}
}
NativeScript core CameraPlus with MLKit doesn't works if saveToGallery is false. Are thera any method to do this without saving the photo in galery? It works with camera basic plugin.
Here is my code:
const HomeViewModel = require("./home-view-model");
const firebase = require("nativescript-plugin-firebase");
exports.onNavigatingTo = function (args) {
page = args.object;
mv = page.bindingContext = new HomeViewModel();
page.getViewById("camPlus")
.addEventListener("photoCapturedEvent", photoCapturedEvent);
};
exports.onCapture = function() {
camera = page.getViewById("camPlus");
//Must be false
camera.takePicture({ saveToGallery: false});
};
function photoCapturedEvent(args) {
const source = new imageSourceModule.ImageSource();
source.fromAsset(args.data).then((imageSource) => {
getTextFromPhoto(imageSource);
}).catch(function (err) {
console.log("Error -> " + err.message);
});
}
function getTextFromPhoto(imageSource) {
firebase.mlkit.textrecognition.recognizeTextOnDevice({
image: imageSource
}).then(function (result) {
mv.idContainer = getDataFromCameraText(result.text);
if (mv.idContainer == "") {
getTextFromPhotoOnline(imageSource);
} else {
containerDataIsValid(true);
}
}).catch(function (errorMessage) {
return console.log("ML Kit error: " + errorMessage);
});
}
The method "photoCapturedEvent" gives me an error of undefined:
JS: Error -> undefined
JS: Asset
'/storage/emulated/0/Android/data/org.nativescript.gScanContainer/files/IMG_1543942676583.jpg'
cannot be found.
how could I get te image without saving it?
I checked the source code of the plugin and it seems to be a bug. As the error says, they never save the image data in the path they pass on to the photo captured event.
So the only option for you would be, always enable saveToGallery and delete the file once you are done with getTextFromPhoto.
That's an older thread but since this hasn't been fixed in the plugin, for everybody who's running into the same problem, here's a solution:
Instead of always saving the image to the gallery and then tediously fetching it from there, storing it somewhere else and delete the image from the gallery, you can simply add this to the index.ios.js:
On line 490 ff you'll find the method MySwifty.prototype.savePhoto
On iOS, this._photoToSave is a UIImage and ImageAsset doesn't seem to like that. Hence the asset is empty.
So I added this to the method:
if (isIOS) {
const imageFolder = knownFolders.documents();
const iosImage = UIImageJPEGRepresentation(this._photoToSave, 0.7);
const result1 = NSFileManager.defaultManager.createFileAtPathContentsAttributes(imageFolder.path + "/cam_capture.jpg", iosImage, null);
const asset = new ImageAsset(imageFolder.path + "/cam_capture.jpg");
_this._owner.get().sendEvent(CameraPlus.photoCapturedEvent, asset);
_this.resetPreview();
}
This saves the image and returns an image asset to the photoCapturedEvent.
You can of course change the image name to something more generic and change the image-path, too.
If you want to save the image as PNG instead of JPG, then you'd use
const iosImage = UIImagePNGRepresentation(this._photoToSave);
I am using PDFJS to get textual data from PDF files, but occasionally encountering the following error:
Error: Invalid XRef table: unexpected first object.
I would prefer that my code just skip over problem files and continue on to the next file in the list. According to PDFJS documentation, setting stopAtErrors to true for the DocumentInitParameters in PDFJS should result in rejection of getTextContent when the associated PDF data cannot be successfully parsed. I am not finding such to be the case: even after setting stopAtErrors to true, I continue to get the above error and the code seems to be "spinning" on the problem file rather than just moving on to the next in the list. It is possible that I haven't properly set stopAtErrors to true as I think I have. A snippet of my code is below to illustrate what I think I've done (code based on this example):
// set up the variables to pass to getDocument, including the pdf file's url:
var obj = {};
obj.url = http://www.whatever.com/thefile.pdf; // the specific url linked to desired pdf file goes here
obj.stopAtErrors = true;
// now have PDF JS read in the file:
PDFJS.getDocument(obj).then(function(pdf) {
var pdfDocument = pdf;
var pagesPromises = [];
for (var i = 0; i < pdf.pdfInfo.numPages; i++) {
(function (pageNumber) {
pagesPromises.push(getPageText(pageNumber, pdfDocument));
}) (i+1);
}
Promise.all(pagesPromises).then(function(pagesText) {
// display text of all the pages in the console
console.log(pagesText);
});
}, function (reason) {
console.log('Error! '+reason);
});
function getPageText(pageNum, PDFDocumentInstance) {
return new Promise(function (resolve, reject) {
PDFDocumentInstance.getPage(pageNum).then(function(pdfPage) {
pdfPage.getTextContent().then(function(textContent) { // should stopAtErrors somehow be passed here to getTextContent instead of to getDocument??
var textItems = textContent.items;
var finalString = '';
for (var i = 0; i < textItems.length; i++) {
var item = textItems[i];
finalString += item.str + " ";
}
resolve(finalString);
});
});
}).catch(function(err) {
console.log('Error! '+err);
});
}
One thing I am wondering is if the stopAtErrors parameter should somehow instead be passed to getTextContent? I have not found any examples illustrating the use of stopAtErrors and the PDFJS documentation does not show a working example, either. Given that I am still at the stage of needing examples to get PDFJS to function, I am at a loss as to how to make PDFJS stop trying to parse a problem PDF file and just move on to the next one.
I am working with pngjs through many of it's methods. Most of the time, they work fine. However, like in the following example, I get an error: "Stream is not writable"
var fs = require('fs'),
PNG = require('pngjs').PNG;
var dst = new PNG({width: 100, height: 50});
fs.createReadStream('http://1.1m.yt/hry7Eby.png') //download this picture in order to examine the code.
.pipe(new PNG())
.on('parsed', function(data) {
console.log(data);
});
This case is not singular, I get this error on 1 random png image once a day, through all of pngjs methods, and that error obviously crashes my app.
(note: you can't use the http link I gave you with a readStream, you will have to download & rename it and do something like):
fs.createReadStream('1.png')
Thank you for your time and effort.
This seems to be a bug in the library, though I'm wary of saying so as I'm no expert in PNGs. The parser seems to complete while the stream is still writing. It encounters the IEND, and so calls this:
ParserAsync.prototype._finished = function() {
if (this.errord) {
return;
}
if (!this._inflate) {
this.emit('error', 'No Inflate block');
}
else {
// no more data to inflate
this._inflate.end();
}
this.destroySoon();
};
If you comment out the this.destroySoon(); it finishes the image correctly, instead of eventually calling this function:
ChunkStream.prototype.end = function(data, encoding) {
if (data) {
this.write(data, encoding);
}
this.writable = false;
// already destroyed
if (!this._buffers) {
return;
}
// enqueue or handle end
if (this._buffers.length === 0) {
this._end();
}
else {
this._buffers.push(null);
this._process();
}
};
...which would otherwise end up setting the stream.writeable to false, or, if you comment that out, to pushing a null value into the _buffers array and screwing up the ChunkStream._processRead.
I'm fairly certain this is a synchronicity problem between the time the zlib parser takes to complete and the time the stream takes to complete, since if you do this synchronously it works fine:
var data = fs.readFileSync('pic.png');
var png = PNG.sync.read(data);
var buff = PNG.sync.write(png);
fs.writeFileSync('out2.png', buff);
I have a server (made with Express in Node.js) that gets notifications of RSS feeds, gets data from their entries (title, date, link) and then "does something" with the data by calling a function defined in another JS file ("article_filter_toDB.js"). The code on the server-side is:
// parts omitted
var article_filter_toDB = require('./article_filter_toDB.js');
// parts omitted
client.on('notification', function (notification) {
// gets notifications of RSS feeds
entries = notification.entries;
for (index = 0; index < entries.length; ++index) {
title = entries[index].title;
date = entries[index].published;
link = entries[index].link.href;
// gets data from the entry of the feed
miniwords = 1000;
// a variable that I set
article_filter_toDB(link, title, miniwords);
// "does something" by calling a function defined in another JS file ("article_filter_toDB.js")
}
});
// parts omitted
What the function "article_filter_toDB" does is to get the content of the article given by the link from the RSS feed (using Request), parsing HTML code to count the words of the article, and, if this length is above "miniwords" (here 1000), save the data relative to the article (title, link, date...) to a database (MongoDB, via Mongoose).
Sometimes it works well. But sometimes it computes a length equal to 1 (that is, it was unable to really count the words) although, if I run the function "article_filter_toDB" separately (that is, the separate JS file, applied to same "link", "title", "miniwords" that I copy to it), it is able to correctly count the words.
Do you know what I'm doing wrong? Thanks!
To be more complete, here is the code of the "article_filter_toDB.js" file:
// parts omitted
article_filter_toDB = function (link, title, miniwords) {
Article.findOne({
title: title
}, 'title', function (err, articles) {
if (err) return console.error(err);
if (articles == null) {
// ...if an article with this title is not already present in my database...
// parts omitted here, that set the variable "balise" depending on the link
request(link, function (err, resp, body) {
$ = cheerio.load(body);
texte = $(balise).text();
content = texte.split(" ");
length = content.length;
// ...let's count its words with Request and Cheerio...
if ((length > miniwords)) {
var newArticle = new Article({
site: url.parse(link).hostname.replace(/^www\./, ''),
date: date,
link: link,
title: title,
length: length,
});
newArticle.save(function (err, newArticle) {
if (err) return console.error(err)
});
// if the article's length is more than the number given by "miniwords", let's save its data in my database
}
});
}
});
}
module.exports = article_filter_toDB;
// exportation of the function to use it elsewhere
This how you call functions from another file properly in node.js
// otherfile.js
// ========
module.exports = {
article_filter_toDB: function (link, title, miniwords) {
// do stuff here
},
};
Then on your code:
var otherfile = require('./otherfile');
...
otherfile.article_filter_toDB(link, title, miniwords);