My Resources folder contains an XML file. I need to parse it in Titanium. I have written the following code:
try {
var file = Ti.Filesystem.getFile(Ti.Filesystem.resourcesDirectory,'Translation.xml');
var xmltext = file.read().text;
var doc = Ti.XML.parseString(xmltext);
}
catch(e) {
alert(e);
Ti.API.info(e);
}
But I am getting the next error:
- result of expression 'file.read() is not an object
Any solution? Thanks!
Try to check if your file exists or not.
var file = Ti.Filesystem.getFile(Ti.Filesystem.resourcesDirectory,'Translation.xml');
if ( file.exists() ) {
var xmltext = file.read().text;
var doc = Ti.XML.parseString(xmltext);
}
It looks like the file can't be found on the system, that's why you are getting the error. Try putting the whole path as mentioned bellow. Example:
var file = Titanium.Filesystem.getFile("../Resources/tableWindows/CrossRef.xml");
Somebody with the same problem: http://developer.appcelerator.com/question/123246/xml-file-will-not-read
Related
(I found some similar questions but none of them solved my problem so this is not a duplicated question I think)
I want to retrieve filenames in one of my folder using jQuery. I tried the following method but I still can't get each filename.
$.get(".", function(data) {
$("#divID").append(data);
});
But I noticed that the type of 'data' is string and it contains filenames at the end of it like this:
<script>addRow("filename.csv","filename.csv",0,238618,"233 kB",1512119177,"12/1/17, 5:06:17 PM");</script>
So is there anyway I can retrieve the filenames from 'data'? (not by using regex)
You cannot read files in a client's machine. You may access them for development purpose by modifying a flag. Look at my answer here regarding this. After this proceed with the below code.
Both works.
Using PURE JAVASCRIPT :
var filenames=[], foldernames=[];
var url = "file:///Users/Default/Downloads";
var req = new XMLHttpRequest();
req.open("GET",url,true);
req.onreadystatechange=function(){
if(req.readyState === 4)
{
document.write(req.responseText);
getNames();
}
};
req.send();
function getNames()
{
var files = document.querySelectorAll("a.icon.file");
var folders = document.querySelectorAll("a.icon.dir");
files.forEach(function(item){filenames.push(item.textContent)})
folders.forEach(function(item){foldernames.push(item.textContent.slice(0,-1))})
console.log(filenames);
console.log(foldernames);
}
Using JQUERY :
var filenames=[], foldernames=[];
$.get("file:///Users/Default/Downloads",function(response){
document.write(response);
getNames();
});
function getNames()
{
var files = document.querySelectorAll("a.icon.file");
var folders = document.querySelectorAll("a.icon.dir");
files.forEach(function(item){filenames.push(item.textContent)})
folders.forEach(function(item){foldernames.push(item.textContent.slice(0,-1))})
console.log(filenames);
console.log(foldernames);
}
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
I've never used Javascript before and i've been trying for ages to do this but with no luck, and I can't find any previous people trying.
I want to copy the text data straight from this txt document in my drive, it is possible to do this fine manually but I want it to be done daily automatically instead.
The text document;
Boxes Made,3
Target Percentage,34
Hourly Rate,2
If I import this into a spreadsheet with these settings its perfect;
Import Settings
And it imports like this;
After Import
Now I need to try and automate this so that a script imports it automatically.
The script I have so far doesn't work, please help.
Current script;
function AutoImporter (Source)
{
var Source = DriveApp.getFilesByName('DailyData.txt');
var TextContents = Source.copyText();
var Target = SpreadsheetApp.getActiveSheet();
Target.appendText(TextContents[1]);
}
--edit
Some guy just sent me a script that seems closer but still didn't work;
function autoCSV() {
var ss=SpreadsheetApp.getActiveSpreadsheet();
var s=ss.getActiveSheet();
var r=s.getActiveCell();
var id="DailyData.txt";//<<<<<enter the ID of the text file
var f3=DriveApp.getFileById(id);
var lst1=f3.getBlob().getDataAsString().split('\n').map(function(x) {return x.split(',')});
var ncols=1,i,lst2=[];
for (i in lst1) {if (lst1[i].length>ncols) ncols=lst1[i].length;}
for (i=0;i<ncols;i++) lst2.push('');
for (i in lst1) lst1[i]=lst1[i].concat(lst2.slice(0,lst2.length-lst1[i].length));
s.getRange(r.getRow(), r.getColumn(), lst1.length, ncols).setValues(lst1);
}
You may read text file from Google Drive this way:
'use strict'; // <- Always use strict mode.
function foo() {
var fileName = 'DailyData.txt';
var files = DriveApp.getFilesByName(fileName);
if (!files.hasNext()) {
throw new Error('No file with name:' + fileName);
}
// We take only the first file among all files with such name.
var file = files.next();
var text = file.getBlob().getDataAsString('utf8');
Logger.log(text);
// Now you have to parse the file.
}
Documentation:
DriveApp.getFilesByName returns collection of Files.
File.getBlob returns Blob.
Blob.getDataAsString returns String.
This is supposed to read in a CSV and then write it to bigquery. When it runs, however, nothing is written, and there are no errors logged. I read that I need to write a csv and then turn it into an Octet Stream. I am not sure whether or not this is compatible with google bigquery.
function test(){
try{
var tableReference = BigQuery.newTableReference();
tableReference.setProjectId(PROJECT_ID);
tableReference.setDatasetId(datasetId);
tableReference.setTableId(tableId);
var schema = "CUSTOMER:string, CLASSNUM:integer, CLASSDESC:string, CSR:string, CSR2:string, INSURANCE:string, REFERRALGENERAL:string, REFERRALSPECIFIC:string, NOTES:string, INMIN:integer, INHR:integer, OUTMIN:integer, OUTHR:integer, WAITMIN:integer, WAITHR:integer, DATETIMESTAMP:float, DATEYR:integer,DATEMONTH:integer, DATEDAY:integer";
var load = BigQuery.newJobConfigurationLoad();
load.setDestinationTable(tableReference);
load.setSourceUris(URIs);
load.setSourceFormat('NEWLINE_DELIMITED_JSON');
load.setSchema(schema);
load.setMaxBadRecords(0);
load.setWriteDisposition('WRITE_TRUNCATE');
var configuration = BigQuery.newJobConfiguration();
configuration.setLoad(load);
var newJob = BigQuery.newJob();
newJob.setConfiguration(configuration);
var loadr = DriveApp.getFilesByName("test.csv");
var x = loadr.next().getBlob();
Logger.log(x.getDataAsString());
var d = DriveApp.getFilesByName("test.csv");
var id = d.next().getId();
Logger.log(id);
var data = DocsList.getFileById(id).getBlob().getDataAsString();
var mediaData = Utilities.newBlob(data, 'application/octet-stream');
BigQuery.Jobs.insert(newJob, PROJECT_ID, mediaData)
}
catch(error){Logger.log("A" + error.message);}
}
Your sourceFormat is wrong for CSV files:
The format of the data files. For CSV files, specify "CSV". For
datastore backups, specify "DATASTORE_BACKUP". For newline-delimited
JSON, specify "NEWLINE_DELIMITED_JSON". The default value is CSV.
https://developers.google.com/bigquery/docs/reference/v2/jobs#configuration.load.sourceUris
On the other hand I think you don't need at all the load.setSourceUris(URIs); since you try to load from local file, and not from Google Cloud Storage. Check this python example https://developers.google.com/bigquery/loading-data-into-bigquery
I'm using openxml in my HTML5 mobile app to generate word documents on the mobile device.
In general openxml works fine and straight forward, but I'm struggling with an annyoing problem.
The document generation only works the first time after I've started the app. This time I can open and view the document. Restart the app means:
- Redeploy from development machine
- Removing the app from the task pane (pushing aside; I assume the app is removed then?)
The second time I get the message the document is corrupted and I'm unable to view the file
UPDATE:
I can't reproduce this behaviour when I'm running the app connected to the remote debugger without having a breakpoint set. Doing it this way I always get a working document.
I doesn't make a difference wether I do any changes on the document or not. Simply open and saving reproduce this error.
After doing some research I've found that structure of the docx.zip file of the working and the corrupt file is the same. They also have the same file length. But in the corrupt docx there are some files I've found some files having a wrong/invalid CRC. See here an example when trying to get a corrupt file out of the zip. Other files are working as expected.
The properties for this file are->
(CRC in a working version is: 44D3906C)
Code for processing the doc-template:
/*
* Process the template
*/
function processTemplate(doc64, callback)
{
"use strict";
console.log("PROCESS TEMPLATE");
var XAttribute = Ltxml.XAttribute;
var XCData = Ltxml.XCData;
var XComment = Ltxml.XComment;
var XContainer = Ltxml.XContainer;
var XDeclaration = Ltxml.XDeclaration;
var XDocument = Ltxml.XDocument;
var XElement = Ltxml.XElement;
var XName = Ltxml.XName;
var XNamespace = Ltxml.XNamespace;
var XNode = Ltxml.XNode;
var XObject = Ltxml.XObject;
var XProcessingInstruction = Ltxml.XProcessingInstruction;
var XText = Ltxml.XText;
var XEntity = Ltxml.XEntity;
var cast = Ltxml.cast;
var castInt = Ltxml.castInt;
var W = openXml.W;
var NN = openXml.NoNamespace;
var wNs = openXml.wNs;
var doc = new openXml.OpenXmlPackage(doc64);
// add a paragraph to the beginning of the document.
var body = doc.mainDocumentPart().getXDocument().root.element(W.body);
var tpl_row = ((doc.mainDocumentPart().getXDocument().descendants(W.tbl)).elementAt(1).descendants(W.tr)).elementAt(2);
var newrow = new XElement(tpl_row);
doc.mainDocumentPart().getXDocument().descendants(W.tbl).elementAt(1).add(newrow);
// callback(doc);
var mod_file = null;
var newfile;
var path;
if (doc != null && doc != undefined ) {
mod_file = doc.saveToBlob();
// Start writing document
path = "Templates";
newfile = "Templates/Bau.docx";
console.log("WRITE TEMPLATE DOCUMENT");
fs.root.getFile("Templates/" + "MyGenerated.docx", {create: true, exclusive: false},
function(fileEntry)
{
fileEntry.createWriter(
function(fileWriter)
{
fileWriter.onwriteend = function(e) {
console.log("TEMPLATE DOCUMENT WRITTEN:"+e.target.length);
};
fileWriter.onerror = function(e) {
console.log("ERROR writing DOCUMENT:" + e.code + ";" + e.message);
};
var blobreader = new FileReader();
blobreader.onloadend = function()
{
fileWriter.write(blobreader.result); // reader.result contains the contents of blob as a typed array
};
blobreader.readAsArrayBuffer(mod_file);
},
null);
}, null);
};
Any ideas what I'm doing wrong?
Thanks for posting about the error. There were some issues with jszip.js that I encountered when I was developing the Open XML SDK for JavaScript.
At the following link, there is a sample javascript app that demonstrates generating a document.
Open XML SDK for JavaScript Demo
In that app you can save multiple DOCXs, one after another, and they are not corrupted.
In order to work on this issue, I need to be able to re-produce locally. Maybe you can take that little working web app and replace parts with your parts until it is generating invalid files?
Cheers, Eric
P.S. I am traveling and have intermittent access to internet. If you can continue the thread on OpenXmlDeveloper.org, then it will help me to answer quicker. :-)
What made it work for me, was changing the way of adding images (Parts) to the document. I was using the type "binary" for adding images to document. I changed this to "base64"
So I changed the source from:
mydoc.addPart( "/word/"+reltarget, openXml.contentTypes.png, "binary", fotodata ); // add Image Part to doc
to:
mydoc.addPart( "/word/"+reltarget, openXml.contentTypes.png, "base64", window.btoa(fotodata) ); // add Image Part to doc
I am trying to create a file in my extension directory and I have this code:
AddonManager.getAddonByID(" extension id here ", function(addon)
{
var uri = addon.getResourceURI("hello.txt");
var file = Components.classes["#mozilla.org/file/local;1"]
.createInstance(Components.interfaces.nsILocalFile);
var stringUri = uri.asciiSpec;
stringUri = stringUri.replace(new RegExp(/\//g), '\\');
stringUri = stringUri.slice(8);
alert(stringUri);
try{
file.initWithPath(stringUri);
} catch(e) {
alert(e);
}
alert(addon.hasResource("hello.txt"));
});
For some reason, the last alert shows always false and file doesn't exist. What am I doing wrong?
I also put unpack true unpack tags in the install.rdf to see my extension directory.
initWithPath accepts only local filesystem paths. Assuming uri is a file url, you can do the conversion like this
var path = uri.QueryInterface(Components.interfaces.nsIFileURL).file.path