Is it possible to read a file with file name as Sometext_ddmmyy.xls in JavaScript?
The file name changes daily but date ddmmyy are random date in that week or previous.
But the path of the file never changes.
Can you help to code in JavaScript to read the file?
Code:
var filename = /^path\\sometext_.*.xls$/;
....
var neobjectexcel = new ActiveXObject("Excel.Application");
var sheet = neojectexcel.Workbooks.Open(filename).ActiveSheet;
...
I used Regex but I am getting "Sorry, we couldn't find /^path\sometext_.*.xls$/ Is it possible it was moved, renamed or deleted?" Error!
Even the regex /^path\\sometext_\d\d\d\d\d\d.xls$/ is giving the same error.
Also tried \d+ instead of \d\d\d\d\d\d does not works.
Please help!
Update: The file will be in a folder on a server along with other files of similar names like sometext_ddmmyy.xls randomtext_ddmmyy.xls randomothertext_ddmmyy.xls and will be accessed with script on the same server.
With FileSystemObject you can filter a filename with a RegExp and find the latest filtered file added to a folder like so:
function getLatestFile (path, filter) {
var fileIO = new ActiveXObject('Scripting.FileSystemObject'),
folder = fileIO.GetFolder(path), // Get a folder object
files = new Enumerator(folder.files), // Create files collection
latest = 0,
file, date, isTargetFile, temp, target;
while (!files.atEnd()) { // Iterate files collection
file = files.item(); // Get a file from the collection
temp = new String(file); // Convert a file object to string
isTargetFile = filter.test(temp); // Filter the filename
date = new Date(file.DateCreated).getTime(); // Get the creation date of the file
if (isTargetFile && date > latest) { // Check if this file passed the filter and is newer than the previous filter-passed file
target = temp;
latest = date;
}
files.moveNext(); // Continue iteration
}
return target;
}
An example call for your case would be:
var filename = getLatestFile(path_to_folder, /sometext_\d{6}\.xls$/);
Arguments: path = the path to the folder where to search files. filter = a RegExp object (literal or constructed) to filter the filename. When creating the RegExp, note that the converted file object (temp) uses a backslash as a folder separator, though the RegExp can be created to filter the filename only. If the function returns undefined, none of the filenames has matched the RegExp.
Related
I am writing an automator workflow to work with files and folders. I’m writing it in JavaScript as I’m more familiar with it.
I would like to receive a folder, and get the folder’s name as well as the files inside.
Here is roughly what I have tried:
Window receives current folders in Finder (I’m only interested in the first and only folder)
Get Folder Contents
JavaScript:
function run(input,parameters) {
var files = [];
for(let file of input) files.push(file.toString().replace(/.*\//,''));
// etc
}
This works, but I don’t have the folder name. Using this, I get the full path name of each file, which is why I run it through the replace() method.
If I omit step 2 above, I get the folder, but I don’t know how to access the contents of the folder.
I can fake the folder by getting the first file and stripping off the file name, but I wonder whether there is a more direct approach to getting both the folder and its contents.
I’ve got it working. In case anybody has a similar question:
// Window receives current folders in Finder
var app = Application.currentApplication()
app.includeStandardAdditions = true
function run(input, parameters) {
let directory = input.toString();
var directoryItems = app.listFolder(directory, { invisibles: false })
var files = [];
for(let file of directoryItems) files.push(file.toString().replace(/.*\//,'')) ;
// etc
}
I don’t include the Get Folder Contents step, but iterate through the folder using app.listFolder() instead. The replace() method is to trim off everything up to the last slash, giving the file’s base name.
I'm looking for some guidance or ideas on how to create a proper formatted Excel (XLSX) spreadsheet using Javascript Serverside.
I've found multiple sites/libraries (such as SheetJS) which can create the file, but depend on web functions (ie. blobs and the like).
Alternatively a JS library which similarly can create a zip file without using blobs/web functions (ie. i can create the XML files structured within the XLSX file/zip but cannot compress server side.
The reason for this is the need to export these files on Server Side scripts within NetSuite/SuiteScript... so far I've come up empty.
You may be able to get what you are looking for using the 'N/file' SuiteScript module. Create a file using file.Type.EXCEL
Here's a scheduled script code sample that will take saved search results and create an excel file that is saved to the file cabinet. You can reference Suite Answer Id 93557. Also the "file.Type" enum does allow for the type Zip, reference Suite Answer Id 43530
define(['N/search','N/file'], function(search, file) {
function execute(scriptContext){
//Load saved search
var mySearch = search.load({id: '47'});
//Run saved search
var mySearchResultSet = mySearch.run();
//Headers of CSV File separated by commas and ends with a new line (\r\n)
var csvFile = 'Internal ID,Item Name,Average Cost\r\n';
//Iterate through each result
mySearchResultSet.each(iterator);
function iterator(resultObject){
//get values
var internalid = resultObject.getValue(mySearch.columns[0])
var itemid = resultObject.getValue(mySearch.columns[1])
var formulacolumn = resultObject.getValue(mySearch.columns[2])
//Add each result as a new line on CSV
csvFile += internalid+','+itemid+','+formulacolumn+'\r\n'
return true;
}
//Variable for datetime
var date = new Date();
//Creation of file
var fileObj = file.create({
name: 'Saved Search Result - ' + date.toLocaleDateString() +'.xlsx',
fileType: file.Type.EXCEL,
contents: csvFile,
description: 'This is a CSV file.',
encoding: file.Encoding.UTF8,
folder: 123
});
//Save the CSV file
var fileId = fileObj.save();
}
return {
execute: execute
};
});
I'm new to the coding game, learning how to code day by day.
Recently I'm hooked into Google App Script, learning to make a simple database.
I've tried to decode and re-code the script but I just can't get it working. It was supposed to convert and replace the existing google sheet instead the script just convert and duplicate the excel into many versions (1,2,3) of sheets from the original excel file.
// Convert the user's stored excel files to google spreadsheets based on the specified directories.
// There are quota limits on the maximum conversions per day: consumer #gmail = 250.
function convertCollection1()
{
var user = Session.getActiveUser(); // Used for ownership testing.
var origin = DriveApp.getFolderById("1dPsDfoqMQLCokZK4RN0C0VRzaRATr9AN");
var dest = DriveApp.getFolderById("1M6lDfc_xEkR4w61pUOG4P5AXmSGF1hGy");
// Index the filenames of owned Google Sheets files as object keys (which are hashed).
// This avoids needing to search and do multiple string comparisons.
// It takes around 100-200 ms per iteration to advance the iterator, check if the file
// should be cached, and insert the key-value pair. Depending on the magnitude of
// the task, this may need to be done separately, and loaded from a storage device instead.
// Note that there are quota limits on queries per second - 1000 per 100 sec:
// If the sequence is too large and the loop too fast, Utilities.sleep() usage will be needed.
var gsi = dest.getFilesByType(MimeType.GOOGLE_SHEETS), gsNames = {};
while (gsi.hasNext())
{
var file = gsi.next();
if(file.getOwner().getEmail() == user.getEmail())
gsNames[file.getName()] = true;
}
// Find and convert any unconverted .xls, .xlsx files in the given directories.
var exceltypes = [MimeType.MICROSOFT_EXCEL, MimeType.MICROSOFT_EXCEL_LEGACY];
for(var mt = 0; mt < exceltypes.length; ++mt)
{
var efi = origin.getFilesByType(exceltypes[mt]);
while (efi.hasNext())
{
var file = efi.next();
// Perform conversions only for owned files that don't have owned gs equivalents.
// If an excel file does not have gs file with the same name, gsNames[ ... ] will be undefined, and !undefined -> true
// If an excel file does have a gs file with the same name, gsNames[ ... ] will be true, and !true -> false
if(file.getOwner().getEmail() == user.getEmail() && !gsNames[file.getName()]
{
Drive.Files.insert (
{title: file.getName(), parents: [{"id": dest.getId()}]},
file.getBlob(),
{convert: true}
);
// Do not convert any more spreadsheets with this same name.
gsNames[file.getName()] = true;
}
}
}
}
The logic of the script is spot on. However, there are subtle nuisances in the function .getName() and Drive.Files.insertyou use that is causing the unintended behavior in the code.
folder.getName() gets the full name of the file, which includes the .xls/.xlsx extension. However, when you convert these files with Drive.Files.insert the extension is dropped. So your gsNames object has filenames without extension and but when the code tries to access the specific element using !gsNames[file.getName()] which has the file extension. It always returns undefined, which is evaluated as True.
Hence, you will need to remove the file extension before you try to check if the same file has already been converted. The regex to remove file extension was shamelessly copied from here. Your logic would be modified like so:
if(file.getOwner().getEmail() == user.getEmail() && !gsNames[file.getName().replace(/\.[^/.]+$/, "")])
Your file code will be:
function convertCollection1()
{
var user = Session.getActiveUser(); // Used for ownership testing.1aJcbdGhwliTs_CZ-3ZUvQmGRDzBM7fv9
var origin = DriveApp.getFolderById("1dPsDfoqMQLCokZK4RN0C0VRzaRATr9AN");
var dest = DriveApp.getFolderById("1M6lDfc_xEkR4w61pUOG4P5AXmSGF1hGy");
// Index the filenames of owned Google Sheets files as object keys (which are hashed).
// This avoids needing to search and do multiple string comparisons.
// It takes around 100-200 ms per iteration to advance the iterator, check if the file
// should be cached, and insert the key-value pair. Depending on the magnitude of
// the task, this may need to be done separately, and loaded from a storage device instead.
// Note that there are quota limits on queries per second - 1000 per 100 sec:
// If the sequence is too large and the loop too fast, Utilities.sleep() usage will be needed.
var gsi = dest.getFilesByType(MimeType.GOOGLE_SHEETS), gsNames = {};
while (gsi.hasNext())
{
var file = gsi.next();
if(file.getOwner().getEmail() == user.getEmail())
gsNames[file.getName()] = true;
Logger.log(JSON.stringify(gsNames))
}
// Find and convert any unconverted .xls, .xlsx files in the given directories.
var exceltypes = [MimeType.MICROSOFT_EXCEL, MimeType.MICROSOFT_EXCEL_LEGACY];
for(var mt = 0; mt < exceltypes.length; ++mt)
{
var efi = origin.getFilesByType(exceltypes[mt]);
while (efi.hasNext())
{
var file = efi.next();
// Perform conversions only for owned files that don't have owned gs equivalents.
// If an excel file does not have gs file with the same name, gsNames[ ... ] will be undefined, and !undefined -> true
// If an excel file does have a gs file with the same name, gsNames[ ... ] will be true, and !true -> false
if(file.getOwner().getEmail() == user.getEmail() && !gsNames[file.getName().replace(/\.[^/.]+$/, "")])
{
Drive.Files.insert (
{title: file.getName(), parents: [{"id": dest.getId()}]},
file.getBlob(),
{convert: true}
);
// Do not convert any more spreadsheets with this same name.
gsNames[file.getName()] = true;
}
}
}
Logger.log(JSON.stringify(gsNames))
}
Note the use of Logger.log(), use this in the future to determine what the program might be accessing vs what you think it is doing.
Cheers!
I have multiple named spreadsheets in a folder on google drive. How would I get their id or url by passing one of the names to a function?
I can get the file using DriveApp, but I dont know how to get the id from that.
function getFile(name)
{
var folder = DriveApp.getFolderById('id');
var file = folder.getFilesByName(name);
//get file id
}
Is there a way to get the id or url using DriveApp or SpreadsheetApp?
The command folder.getFilesByName(name);potentially retrieve more than one file (has it's name say it- getFiles) so it give you a file iterator that you need to parse with a while loop.
If you are absolutely sure that there is at least one file with that has the name, you can avoid the loop with a little dirty code like this:
var file = folder.getFilesByName(name).next();
and then retrieve the id with:
var id = file.getId()
-- EDIT:
If you are not sure to retrieve a file (You don't know if it exist or has the right name....) you'll need to check the result of the file iterator.
Instead of writing :
var file = folder.getFilesByName(name);
prefer to write:
var files = folder.getFilesByName(name); // you could have zero or more than one file
And then you can do a while loop if you believe there is more than one file with that name:
var out = [];
while(files.hasNext()) {
var file = files.next();
out.push(file.getId());
Logger.log(file.getId());
}
return out;
or if there is only one or zero file a simple if will do the job:
if (files.hasNext()) {
var file = files.next();
Logger.log(file.getId());
return file.getIt(); // eventually
}
else {
Logger.log("there is no file with that name");
return "no id"; // eventually
}
I’m using gulp and nunjucks to automate some basic email templating tasks.
I have a chain of tasks which can be triggered when an image is added to the images folder e.g.:
images compressed
new image name and dimensions logged to json file
json image data then used to populate template when template task is run
So far so good.
I want to be able to define a generic image file path for each template which will then concatenate to each image name (as stored in the json file). So something like:
<img src="{{data.path}}{{data.src}}" >
If I want to nominate a distinct folder to contain the images for each template generated then cloudinary requires a mandatory unique version component to be applied in the file path. So the image path can never be consistent throughout a template.
if your public ID includes folders (elements divided by '/'), the
version component is mandatory, (but you can make it shorter. )
For example:
http://res.cloudinary.com/demo/image/upload/v1312461204/sample_email/hero_image.jpg
http://res.cloudinary.com/demo/image/upload/v1312461207/sample_email/footer_image.jpg
Same folder. Different path.
So it seems I would now need to create a script/task that can log and store each distinct file path (with its unique id generated by cloudinary) for every image any time an image is uploaded or updated and then rerun the templating process to publish them.
This just seems like quite a convoluted process so if there’s an easier approach I’d love to know?
Else if that really is the required route it would great if someone could point me to an example of the kind of script that achieves something similar.
Presumably some hosting services will not have the mandatory unique key which makes life easier. I have spent some time getting to know cloudinary and it’s a free service with a lot of scope so I guess I'm reluctant to abandon ship but open to all suggestions.
Thanks
Note that the version component (e.g., v1312461204) isn't mandatory anymore for most use-cases. The URL could indeed work without it, e.g.,:
http://res.cloudinary.com/demo/image/upload/sample_email/hero_image.jpg
Having said that, it is very recommended to include the version component in the URL in cases where you'd like to update the image with a new one while keeping the exact same public ID. In that case, if you'd access the exact same URL, you might get a CDN cached version of the image, which may be the old one.
Therefore, when you upload, you can get the version value from Cloudinary's upload response, and store it in your DB, and the next time you update your image, also update the URL with the new version value.
Alternatively, you can also ask Cloudinary to invalidate the image while uploading. Note that while including the version component "busts" the cache immediately, invalidation may take a while to propagate through the CDN. For more information:
http://cloudinary.com/documentation/image_transformations#image_versions
This is the solution I came up with. It's based on adapting the generic script I use to upload images from a folder to cloudinary and now stores the updated file paths from cloudinary and generates a json data file to publish the hosted src details to a template.
I'm sure it could be a lot better semantically so welcome any revisions offered if someone stumbles on this but it seems to do the job:
// points to the config file where we are defining file paths
var path = require('./gulp.path')();
// IMAGE HOSTING
var fs = require('fs'); // !! not installed !! Not required??
var cloudinary = require('cloudinary').v2;
var uploads = {};
var dotenv = require('dotenv');
dotenv.load();
// Finds the images in a specific folder and retrurns an array
var read = require('fs-readdir-recursive');
// Set location of images
var imagesInFolder = read(path.images);
// The array that will be populated with image src data
var imgData = new Array();
(function uploadImages(){
// Loop through all images in folder and upload
for(var i = 0; i < imagesInFolder.length;i++){
cloudinary.uploader.upload(path.images + imagesInFolder[i], {folder: path.hosted_folder, use_filename: true, unique_filename: false, tags: 'basic_sample'}, function(err,image){
console.log();
console.log("** Public Id");
if (err){ console.warn(err);}
console.log("* Same image, uploaded with a custom public_id");
console.log("* "+image.public_id);
// Generate the category title for each image. The category is defined within the image name. It's the first part of the image name i.e. anything prior to a hyphen:
var title = image.public_id.substr(image.public_id.lastIndexOf('/') + 1).replace(/\.[^/.]+$/, "").replace(/-.*$/, "");
console.log("* "+title);
console.log("* "+image.url);
// Add the updated src for each image to the output array
imgData.push({
[title] : {"src" : image.url}
});
// Stringify data with no spacing so .replace regex can easily remove the unwanted curly braces
var imgDataJson = JSON.stringify(imgData, null, null);
// Remove the unwanted [] that wraps the json imgData array
var imgDataJson = imgDataJson.substring(1,imgDataJson.length-1);
// Delete unwanted braces "},{" replace with "," otherwise what is output is not valid json
var imgDataJson = imgDataJson.replace(/(},{)/g, ',');
var outputFilename = "images2-hosted.json"
// output the hosted image path data to a json file
// (A separate gulp task is then run to merge and update the new 'src' data into an existing image data json file)
fs.writeFile(path.image_data_src + outputFilename, imgDataJson, function(err) {
if(err) {
console.log(err);
} else {
console.log("JSON saved to " + outputFilename);
}
});
});
}
})();
A gulp task is then used to merge the newly generated json to overide the existing json data file:
// COMPILE live image hosting data
var merge = require('gulp-merge-json');
gulp.task('imageData:comp', function() {
gulp
.src('src/data/images/*.json')
.pipe(merge('src/data/images.json'))
.pipe(gulp.dest('./'))
.pipe(notify({ message: 'imageData:comp task complete' }));
});