I am very new to coding and javascript; just a few days in. I was wondering if there was a way to import objects from a text file(separated by lines) to use in my array: replyText. Here is what I'm working with:
// Variables
var theButton = document.getElementById("theButton");
var mainText = document.getElementById("mainText");
var replyText = [...,...,...,...,];
var i = 0;
// Functions
function nextText() {
mainText.innerHTML = replyText[i++ % replyText.length];
}
// MAIN SCRIPT
theButton.onclick = function() {
nextText();
};
You can use XMLHttpRequest to get the .txt file just pass the path of it.
var file = new XMLHttpRequest();
file.open("GET", "file:/../file.txt", false);
file.onreadystatechange = function () {
if (file.readyState === 4) {
if (file.status === 200 || file.status == 0) {
var text = file.responseText;
alert(text);
}
}
}
EDIT: you must pass the absolute path file:///C:/your/path/to/file.txt
For client/browser-side file reading:
You cannot easily read a file on the client-side as you are not allowed direct access to the client's file system. However, you can place a input element of file type in your HTML markup via which the client can load a file for your program to process. For example:
<input type="file" id="file" onchange="readFile()" />
Now when the client selects a file for use, the readFile() function will be called which will read and process the file. Here's an example:
function readFile() {
var file = document.getElementById('file').files[0]; // select the input element from the DOM
var fileReader = new FileReader(); // initialize a new File Reader object
fileReader.onload(function() { // call this function when file is loaded
console.log(this.result); // <--- You can access the file data from this variable
// Do necessary processing on the file
});
fileReader.readAsText(file); // Read the file as text
}
For more information on File Reader, check out the docs.
To add on to Paulo's solution, read below for splitting string by line breaks (new line character)
var replyText = text.split("\n"); // "\n" is new line character
Related
I'm using the default 'Excel web add-in' template in Visual Studio 2017. I'm trying to create an excel add-in that inserts a copy of an existing workbook into the current one. The first step is to get the full path and name of the current workbook. I got the code from here. I'm using the beta excel API. At the line 'var myFile = document.getElementById("file");' myFile is always null. I assume the null value is because the workbook isn't 'loaded' but the workbook does open when I run the program.
Here is the code from Home.js:
'use strict';
(function () {
Office.onReady(function () {
// Office is ready
$(document).ready(function () {
// The document is ready
$('#RunMacroButton').click(RunMacro);
});
});
function RunMacro() {
var myFile = document.getElementById("file");
var reader = new FileReader();
reader.onload = (function (event) {
Excel.run(function (context) {
// strip off the metadata before the base64-encoded string
var startIndex = event.target.result.indexOf("base64,");
var workbookContents = event.target.result.substr(startIndex + 7);
Excel.createWorkbook(workbookContents);
return context.sync();
}).catch(errorHandlerFunction);
});
// read in the file as a data URL so we can parse the base64-encoded string
reader.readAsDataURL(myFile.files[0]);
}
})();
The line 'var myFile = document.getElementById("file");' is always null because there is no element called "file". This is also the wrong way to get the path of the currently open workbook. Instead use 'Office.context.document.url' to return the path.
I have a file upload functionality in my application which can not upload JSON files which are more than 10MB in size. If user uploads a file >= 10 MB , My app should split it into smaller JSON files each less than 10MB. Also, the Proper JSON objects needs to be maintained in the new low-sized files.
Is there a way to do this in Javascript or jQuery?
I propose a solution like this without any specific library. It does use a bit of modern techniques but maybe useful to you:
var openFile = function(event, callback) {
// get target input
var input = event.target;
// create an instance of filereader
var reader = new FileReader();
// define handler to get results
reader.onload = function(e){
var contents = e.target.result;
// use a promise maybe to make this neater
callback(contents);
};
// make sure you tell it to read as text
// also maybe add some validation on your input
// for correct types
reader.readAsText(input.files[0]);
};
var getChunks = function(str){
var chunks = [];
// not best at these things but this should be
// around 1mb max
var chunkSize = 1000000;
// while the chunk is less than the size indicated it goes
// into the same item of array
while (str) {
if (str.length < chunkSize) {
chunks.push(str);
break;
}
else {
chunks.push(str.substr(0, chunkSize));
str = str.substr(chunkSize);
}
}
return chunks;
}
var fileInput = document.querySelector('#jsonUpload');
fileInput.addEventListener('change', function(event){
openFile(event, function(str){
console.log(getChunks(str));
});
});
Then it would read the json file from:
<input type='file' accept='*' id="jsonUpload">
Link to the fiddle
After some searching I found a handy function that works with iMacros for saving a string to a CSV file. The problem is I can't figure out how to overwrite the file. The following code will only append data to the file. Thanks.
function WriteFile(path,string) {
//Import FileUtils.jsm
Components.utils.import("resource://gre/modules/FileUtils.jsm");
//Declare file
var file = new FileUtils.File(path);
//Declare file path
file.initWithPath(path);
//If it exists move on if not create it
if (!file.exists()) {
file.create(file.NORMAL_FILE_TYPE, 0666);
}
var charset = 'EUC-JP';
var fileStream = Components.classes['#mozilla.org/network/file-output-stream;1']
.createInstance(Components.interfaces.nsIFileOutputStream);
fileStream.init(file, 18, 0x200, false);
var converterStream = Components
.classes['#mozilla.org/intl/converter-output-stream;1']
.createInstance(Components.interfaces.nsIConverterOutputStream);
converterStream.init(fileStream, charset, string.length,
Components.interfaces.nsIConverterInputStream.DEFAULT_REPLACEMENT_CHARACTER);
//Write file to location
converterStream.writeString(string); //+"\r\n"
converterStream.close();
fileStream.close();
}
Components.utils.import("resource://gre/modules/NetUtil.jsm");
Components.utils.import("resource://gre/modules/FileUtils.jsm");
// file is nsIFile, data is a string
// You can also optionally pass a flags parameter here. It defaults to
// FileUtils.MODE_WRONLY | FileUtils.MODE_CREATE | FileUtils.MODE_TRUNCATE;
var ostream = FileUtils.openSafeFileOutputStream(file);
var converter = Components.classes["#mozilla.org/intl/scriptableunicodeconverter"].
createInstance(Components.interfaces.nsIScriptableUnicodeConverter);
converter.charset = "UTF-8";
var istream = converter.convertToInputStream(data);
// The last argument (the callback) is optional.
NetUtil.asyncCopy(istream, ostream, function(status) {
if (!Components.isSuccessCode(status)) {
// Handle error!
return;
}
// Data has been written to the file.
});
Read the sameple above it taken form this link.https://developer.mozilla.org/en-US/Add-ons/Code_snippets/File_I_O. You should specifiy the pass flag parameters to it.
I'm attempting to upload an array of files to parse using javascript with the following code:
html:
<fieldset>
<input type="file" name="fileselect" id="fileselect" multiple></input>
<input id="uploadbutton" type="button" value="Upload"> </input>
</fieldset>
JS:
$('#uploadbutton').click(function () {
var fileUploadControl = $("#fileselect")[0];
if (fileUploadControl.files.length > 0) {
var file = fileUploadControl.files[0];
var name = "style.css";
var parseFile = new Parse.File(name, file);
var filesArray = [parseFile];
}
parseFile.save().then(function() {
// The file has been saved to Parse.
}, function(error) {
// The file either could not be read, or could not be saved to Parse.
});
var newStore = new Parse.Object("FileStore");
newStore.set("files", filesArray);
newStore.save();
});
I am uploading to a class I have called FileStore with key "files" which is set to an array currently, and I would like to have hold an array of files. Is this the best way to go about uploading multiple files to parse? The code for me isn't working right now. My aim is to have multiple files associated with each object in my class.
Be careful with async code. Currently your code will have a race condition that will most likely fail because you call newStore.save() before parseFile.save() is finished.
Those 3 lines dealing with newStore should be inside the success handler for parseFile.save(), e.g.:
parseFile.save().then(function() {
// The file has been saved to Parse.
var newStore = new Parse.Object("FileStore");
newStore.set("files", filesArray);
newStore.save();
}, function(error) {
// The file either could not be read, or could not be saved to Parse.
});
When you get to saving multiple files you'll need to wait for all of them to finish before moving to the next step. You can chain your promises together to run in Series or in Parallel.
For what you want Parallel would work fine:
var fileSavePromises = [];
// assuming some code that creates each file has stored the Parse.File objects in an
// array called "filesToSave" but not yet called save
_.each(filesToSave, function(file) {
fileSavePromises.push(file.save());
});
Parse.Promise.when(fileSavePromises).then(function() {
// all files have saved now, do other stuff here
var newStore = new Parse.Object("FileStore");
newStore.set("files", filesToSave);
newStore.save();
});
I have managed to solve this with the help of #Timothy code, full code after edit:
var fileUploadControl = $("input[type=file]")[0];
var filesToSave = fileUploadControl.files;
var fileSavePromises = [];
// assuming some code that creates each file has stored the Parse.File objects in an
// array called "filesToSave" but not yet called save
_.each(filesToSave, function(file) {
var parseFile = new Parse.File("photo.jpg", file);
fileSavePromises.push(
parseFile.save().then(function() {
// The file has been saved to Parse.
$.post("/i", {
file: {
"__type": "File",
"url": parseFile.url(),
"name": parseFile.name()
}
});
})
);
});
Parse.Promise.when(fileSavePromises).then(function() {
// all files have saved now, do other stuff here
window.location.href = "/";
});
I have a local program which writes a JSON object to a file so that a JavaScript can pick up its data and process it. The file is selected using an <input> object:
<form id = "getfiles">
<input type = "file" multiple id = "files" />
</form>
with the following JS function setInterval to repeat every 300ms. However, when the file changes, only Google Chrome reloads the file and processes the new content; I have to manually reselect the file on the page in IE 10 and Firefox 20.
function speakText()
{
var thefile = document.getElementById('files').files[0];
var lastChanged = thefile.lastModifiedDate;
var reader = new FileReader();
reader.onload = function(event)
{
var lcd = document.getElementById("last_change_date");
if (!lcd)
{
var spanLastChanged = document.createElement("span");
spanLastChanged.id = "last_change_date";
spanLastChanged.innerText = lastChanged;
console.log(lastChanged);
document.body.appendChild(spanLastChanged);
}
else
{
// compare lastChanged with last_change_date
var last_known_change = Date.parse(lcd.innerText);
// var last_known_change = Date.parse(thefile.lastModifiedDate);
if (last_known_change !== Date.parse(lastChanged))
{
console.log("Something is new since " + lcd.innerText);
var fileContent = event.target.result;
var commands = JSON.parse(fileContent);
handleJSON(fileContent);
lcd.innerText = lastChanged;
}
}
}
reader.readAsText(thefile, "UTF-8");
}
Firefox and IE are doing the right thing per spec: the File objects associated with a file input are supposed to be immutable snapshots of a file at the point when the File object was created. It's a known bug in WebKit/Blink that they just store a reference to the file's data, so that mutating the data will change what the File object sees.
In fact, the WebKit/Blink behavior is a privacy bug: when a user selects a file in a file input, they are giving a web page permission to read the data of the file at that time, not for all future versions of the file! Which is why the spec is written as it is.