Is there a JavaScript InDesign function to get ID value - javascript

I used the command to export the hard drive ID to drive C:
var command="wmic diskdrive get SerialNumber > C:/idhdd.txt";
app.system("cmd.exe /c\""+command+"" );
I get the text file
SerialNumber
2012062914345300
Is there a JavaScript statement to remove SerialNumber, I just want to get the ID in the text file and save it to the hard drive C.

Here's ready-to-use getDriveIDs() function that should work in any Adobe app and will return array of HDD ID strings for you. I hope this can be easily generalized for other scenarios with Windows scripting inside Adobe scripting ;-)
//----------------------------------------------------------------------//
// Detects IDs (serial numbers) of connected drives and returns them as array of strings.
var getDriveIDs = function() {
var idFile = File(Folder.temp + '/saved_hdd_serials.txt');
var scriptFile = File(Folder.temp + '/dump_hdd_serials.bat');
var scriptContent = 'wmic diskdrive get SerialNumber > ' + idFile.fsName + '\n';
var ids = []
withTempFile(scriptFile, scriptContent, function() {
scriptFile.execute();
$.writeln(idFile.length == 0); // wait for asynchronous script execution to finish
$.sleep(1);
withTempFile(idFile, undefined, function(file, lines) {
ids = lines.slice(1);
});
});
return ids;
};
//----------------------------------------------------------------------//
// utilities
var withTempFile = function(file, content, callback) {
if (undefined == content) { // read temp file
file.open('r');
content = [];
while (!file.eof)
content.push(file.readln());
} else { // write temp file
file.open('w');
file.write(content);
content = undefined;
}
file.close();
callback(file, content);
file.remove();
}
//----------------------------------------------------------------------//
// main: demo
var ids = getDriveIDs();
alert('Drive IDs:\n\t' + ids.join('\n\t'));

Related

Copying Files based on a custom column value between document libraries in SharePoint using javascript

I am new to Javascript and currently working on a task where I need to copy files based on a custom column name "PID" from One Document Library to the other.
I was able to get the below code to work which copies all the files
$scope.copyFiles=function()
{
var sourceLib = '/sites/Protocol/ProtocolDocument';
var destLib = '/sites/Protocol/FinalProtocolDocuments';
var context = new SP.ClientContext.get_current();
var web = context.get_web().get_lists();
var folderSrc = web.getFolderByServerRelativeUrl(sourceLib);
//var cq = "<Query><Where><Eq><FieldRef Name="ProtocolID" LookupId="TRUE"/><Value Type="Text">' + 466 + '</Value></Eq></Where></Query>"
context.load(folderSrc,'Files');
context.executeQueryAsync(
function() {
console.log("Got the source folder right here!");
var files = folderSrc.get_files();
var e = files.getEnumerator();
var dest = [];
while (e.moveNext()) {
var file = e.get_current();
var destLibUrl = destLib + "/" + file.get_name();
dest.push(destLibUrl); //delete this when we're happy we got the file paths right
file.copyTo(destLibUrl, true);
}
console.log(dest); //delete this when we're happy we got the file paths right
context.executeQueryAsync(function() { console.log("Files moved successfully!");}, function(sender, args) {console.log("error: ") + args.get_message()});
},
function(sender, args){console.log("Sorry, something messed up: " + args.get_message());}
);
}
I did some research online to get the Filenames based on a custom column value with no luck
Also tried to use CAML , however not sure how to use it in the code.
Would appreciate if anyone could help me get the filenames from a Document Library based on custom column name "PID" so that only selected/filtered files are moved to the destination Library.
UPDATED CODE
$scope.copyFiles=function()
{
var sourceLib = '/sites/Protocol/ProtocolDocument';
var destLib = '/sites/Protocol/FinalProtocolDocuments';
PID='466'
var context = new SP.ClientContext();
var list = context.get_web().get_lists().getByTitle("ProtocolDocument");
var cq = new SP.CamlQuery();
cq.set_viewXml("<View><Query>" +
"<Where>" +
"<Eq><FieldRef Name=\"ProtocolID\"/><Value Type=\"Text\">PID</Value></Eq>" +
"</Where>" +
"</Query>" +
"</View>");
var items = list.getItems(cq);
context.load(items);
context.executeQueryAsync(
function() {
var e = items.getEnumerator();
var dest = [];
while (e.moveNext())
{
var file = e.get_current();
var destLibUrl = destLib + "/" + file.get_name();
dest.push(destLibUrl); //delete this when we're happy we got the file paths right
file.copyTo(destLibUrl, true);
}
console.log(dest); //delete this when we're happy we got the file paths right
context.executeQueryAsync(function() { console.log("Files moved successfully!");}, function(sender, args) {console.log("error: ") + args.get_message()});
},
function(sender, args){console.log("Sorry, something messed up: " + args.get_message());}
);
}
});
Here is my attempt. I tested it successfully on SharePoint 2013, and it copies files from one document library to another, but only the files with a lookup field set to a specific value. I have included a short summary, but if you only want the code then jump down to Now to the actual code.
Please note that I have used syntax not supported by Internet Explorer, so let me know if you need to support that browser. I also believe that a function or method should only do one thing, so I split the functionality into three separate functions. This also helps keep the code clean and easier to read.
To summarize: The first function, findAndCopyFiles(), will run once and find all the files with the lookup field value you set. It will then send each file to the loadAndCopyFile() function to load the file object. This function will run once for every file that should be copied. When the file object is loaded, it is sent to the third and final function, copyFileTo(), that will actually copy the file to the destination document library. This function will also run once per file.
Now to the actual code
First you need to set these configuration variables according to your setup:
const destinationLibraryPath = 'The path to your destination document library';
const sourceLibraryName = 'The name (not path) of your source document library';
const lookupField = 'The name of your lookup field';
const lookupValue = 'The value your lookup field should equal for files to be copied';
findAndCopyFiles()
This function is responsible for finding all the files in the source document library with the lookup field set to the value of lookupValue. We use what is known as a CAML query to filter the files. You can filter on all available fields and columns, not only lookup fields.
const findAndCopyFiles = () => {
const clientContext = SP.ClientContext.get_current();
const sourceList = clientContext.get_web().get_lists().getByTitle(sourceLibraryName);
const camlQuery = new SP.CamlQuery();
const whereQuery = `<Eq><FieldRef Name="${lookupField}"/><Value Type="Text">${lookupValue}</Value></Eq>`;
camlQuery.set_viewXml(`<View><Query><Where>${whereQuery}</Where></Query></View>`);
const sourceListItems = sourceList.getItems(camlQuery);
clientContext.load(sourceListItems);
clientContext.executeQueryAsync(
() => {
const filesEnumerator = sourceListItems.getEnumerator();
while (filesEnumerator.moveNext()) {
loadAndCopyFile(filesEnumerator.get_current(), clientContext);
}
},
(_sender, args) => {
console.log(args.get_message());
}
);
}
When the query executes, we use the getEnumerator() method to iterate through all the files returned by the query, in other words all the files that will be copied.
loadAndCopyFile()
After finding all the relevant files, we send each file to the next function to continue our process. This function will load the file object (as in the actual file) and construct the destination URL using the path to the destination document library and the filename of the file.
const loadAndCopyFile = (file, clientContext) => {
const fileRef = file.get_file();
clientContext.load(fileRef);
clientContext.executeQueryAsync(
() => {
const destinationUrl = `${destinationLibraryPath}/${fileRef.get_name()}`;
copyFileTo(fileRef, destinationUrl, clientContext);
},
(_sender, args) => {
console.log(args.get_message());
}
);
}
copyFileTo()
The final function is responsible for actually copying the file to the destination document library. It is quite simple, and looks like this:
const copyFileTo = (file, destinationUrl, clientContext) => {
file.copyTo(destinationUrl, true);
clientContext.executeQueryAsync(
() => {
console.log(`File copied to ${destinationUrl}!`);
},
(_sender, args) => {
console.log(args.get_message());
}
);
}
Putting it all together
And finally, we execute the findAndCopyFiles() function when all the required libraries are ready:
SP.SOD.executeFunc('sp.js', 'SP.ClientContext', () => {
findAndCopyFiles();
});
Disclaimer: I wrote this post on another computer than the one where I tested the code, so if something does not work it may be because of a simple syntax error. In that case, add a comment and let me know!

How convert template HTML with images to PDF in ServiceNow?

I need to create dynamic pdf with html templates in servicenow, but my problem is that these pdf must contain images and styles, and I have not been able to solve it.
try using the api of GeneralPDF of servicenow and get the template converted to pdf but only when it contains text. when I put images I get the following error:
This error appears to me when executing my code:
ExceptionConverter: java.io.IOException: The document has no pages.:
org.mozilla.javascript.JavaScriptException: ExceptionConverter:
java.io.IOException: The document has no pages.:
this is in a script include and is called from UI Action
my code to convert the html to pdf is the following:
create : function (sys_id){
var carta = new GlideRecord('x_solsa_casos_plant_doc');
carta.addQuery('sys_id','6f1e4ac8db29f300ab7c0f95ca96197a');
carta.query();
if(carta.next()){
var parsedBody = carta.body;
var gr = new GlideRecord('x_solsa_casos_x_solsa_casos');
gr.get('sys_id',sys_id);
var sampleString=parsedBody.toString();
var reg = new SNC.Regex('/\\$\\{(.*?)\\}/i');
var match = reg.match(sampleString);
var count =0;
var variables = [];
var values = [];
var tmpValue;
while (match != null)
{
variables.push(match.toString().substring(match.toString().indexOf(',')+1));
match = reg.match();
values.push(variables[count]);
gs.log("array values : " + values);
if(gr.getDisplayValue(values[count])==null || JSUtil.nil(gr.getDisplayValue(values[count])))
{
tmpValue='';
}else{
tmpValue=gr.getDisplayValue(values[count]);
gs.log("tmpValue :" +tmpValue);
}
parsedBody = parsedBody.replace('${'+variables[count]+'}', tmpValue);
count++;
gs.log("parsedBody : " + parsedBody);
}
this.createPDF(parsedBody,'x_solsa_casos_x_solsa_casos',sys_id,'carta.pdf');
}
},
createPDF : function(html, table, sys_id, filename) {
var pdfDoc = new GeneralPDF.Document(null, null, null, null, null, null);
this._document = new GeneralPDF(pdfDoc);
this._document.startHTMLParser();
this._document.addHTML(html);
this._document.stopHTMLParser();
this.saveAs(table, sys_id, filename);
},
saveAs : function (table, sys_id, filename){
var att = new GeneralPDF.Attachment();
att.setTableName(table);
att.setTableId(sys_id);
att.setName(filename);
att.setType('application/pdf');
att.setBody(this._document.get());
GeneralPDF.attach(att);
},
Looks like parsedBody is empty or doesn't always contain HTML. According to this answer, paseXHtml (which ServiceNow probably uses and should be in the complete stack trace) expects HTML tags, not just text:
https://stackoverflow.com/a/20902124/2157581

Is Promise.all not working on the second time through? Why not?

I'm just finishing off this basic webscraper project for a tshirt website.
It enters through one hardcoded url, the home page. It will search for any product pages, and add them to an url. If it finds another link (remainder), it will scrape that again and find any more product pages. It adds the product pages to urlSet and will then scrape those again, grab the tshirt data (price, img, title) and then convert, then write them to a CSV file.
For some reason, this is not working on the second run through of the scrape with 'remainder'.
If I remove the second scrape of url, everything works out fine and the file gets written correctly. But if I want to get the other product pages, it seems to be failing somewhere.
Here is my code, i apologise for posting so much of it but I don't know how it will be understood properly without the right context, hopefully it's been commented okay:
//TASK: Create a command line application that goes to an ecommerce site to get the latest prices.
//Save the scraped data in a spreadsheet (CSV format).
'use strict';
//Modules being used:
var cheerio = require('cheerio');
var json2csv = require('json2csv');
var request = require('request');
var moment = require('moment');
var fs = require('fs');
//harcoded url
var url = 'http://shirts4mike.com/';
//url for tshirt pages
var urlSet = new Set();
var remainder;
var tshirtArray = [];
const requestPromise = function(url) {
return new Promise(function(resolve, reject) {
request(url, function(error, response, html) {
if(error)return reject(error);
if(!error && response.statusCode == 200){
return resolve(html);
}
});
});
}
// Go into webpage via url, load html and grab links shirt in url
function scrape (url) {
console.log("Currently scraping " + url)
return requestPromise(url)
.then(function(html) {
var $ = cheerio.load(html);
var links = [];
//get all the links
$('a[href*=shirt]').each(function(){
var a = $(this).attr('href');
//add into link array
links.push(url + a);
});
// return array of links
return links;
});
}
function nextStep (arrayOfLinks) {
var promiseArray = [];
console.log(arrayOfLinks);
for(var i = 0; i < arrayOfLinks.length; i++){
promiseArray.push(requestPromise(arrayOfLinks[i]));
}
//return both the html of pages and their urls
return Promise.all(promiseArray)
.then(function(arrayOfHtml){
return {arrayOfHtml: arrayOfHtml , arrayOfUrls: arrayOfLinks};
});
}
//go through the html of each url and add to urlSet if there is a checkout button
//add to remainder otherwise to rescrape
function lastStep (obj){
for(var i = 0; i < obj.arrayOfHtml.length; i++){
var $ = cheerio.load(obj.arrayOfHtml[i]);
//if page has a submit it must be a product page
if($('[type=submit]').length !== 0){
//add page to set
urlSet.add(obj.arrayOfUrls[i]);
console.log(obj.arrayOfUrls[i]);
} else if(remainder == undefined) {
//if not a product page, add it to remainder so it another scrape can be performed.
remainder = obj.arrayOfUrls[i];
console.log("The remainder is " + remainder)
}
}
//return remainder for second run-through of scrape
return remainder;
}
//iterate through urlSet (product pages and grab html)
function lastScraperPt1(){
//call lastScraper so we can grab data from the set (product pages)
//scrape set, product pages
var promiseArray = [];
for(var item of urlSet){
var url = item;
promiseArray.push(requestPromise(url));
}
return Promise.all(promiseArray)
.then(function(arrayOfHtml){
return arrayOfHtml;
});
}
//iterate over the html of the product pages and store data as objects
function lastScraperPt2(html){
for(var i = 0; i < html.length; i++){
var $ = cheerio.load(html[i]);
//grab data and store as variables
var price = $('.price').text();
var imgURL = $('.shirt-picture').find('img').attr('src');
var title = $('body').find('.shirt-details > h1').text().slice(4);
var tshirtObject = {};
//add values into tshirt object
tshirtObject.Title = title;
tshirtObject.Price = price;
tshirtObject.ImageURL = imgURL;
tshirtObject.URL = url;
tshirtObject.Date = moment().format('MMMM Do YYYY, h:mm:ss a');
//add the object into the array of tshirts
tshirtArray.push(tshirtObject);
}
convertJson2Csv();
}
//convert tshirt objects and save as CSV file
function convertJson2Csv(){
//The scraper should generate a folder called `data` if it doesn’t exist.
var dir ='./data';
if(!fs.existsSync(dir)){
fs.mkdirSync(dir);
}
var fields = ['Title', 'Price', 'ImageURL', 'URL', 'Date'];
//convert tshirt data into CSV and pass in fields
var csv = json2csv({ data: tshirtArray, fields: fields });
//Name of file will be the date
var fileDate = moment().format('MM-DD-YY');
var fileName = dir + '/' + fileDate + '.csv';
//Write file
fs.writeFile(fileName, csv, {overwrite: true}, function(err) {
console.log('file saved');
if (err) throw err;
});
}
scrape(url) //scrape from original entry point
.then(nextStep)
.then(lastStep)
.then(scrape) //scrape again but with remainder url
.then(nextStep)
.then(lastStep)
.then(lastScraperPt1)
.then(lastScraperPt2)
.catch(function(err) {
// handle any error from any request here
console.log(err);
});
I'm console logging the arrayOfLinks in nextStep so I can see that they are being grabbed properly, I just cannot work out why they aren't being passed through to 'lastStep' properly.
Currently scraping http://shirts4mike.com/
[ 'http://shirts4mike.com/shirts.php',
'http://shirts4mike.com/shirts.php',
'http://shirts4mike.com/shirt.php?id=108',
'http://shirts4mike.com/shirt.php?id=107',
'http://shirts4mike.com/shirt.php?id=106',
'http://shirts4mike.com/shirt.php?id=105' ]
The remainder is http://shirts4mike.com/shirts.php
http://shirts4mike.com/shirt.php?id=108
http://shirts4mike.com/shirt.php?id=107
http://shirts4mike.com/shirt.php?id=106
http://shirts4mike.com/shirt.php?id=105
Currently scraping http://shirts4mike.com/shirts.php
[ 'http://shirts4mike.com/shirts.phpshirts.php',
'http://shirts4mike.com/shirts.phpshirt.php?id=101',
'http://shirts4mike.com/shirts.phpshirt.php?id=102',
'http://shirts4mike.com/shirts.phpshirt.php?id=103',
'http://shirts4mike.com/shirts.phpshirt.php?id=104',
'http://shirts4mike.com/shirts.phpshirt.php?id=105',
'http://shirts4mike.com/shirts.phpshirt.php?id=106',
'http://shirts4mike.com/shirts.phpshirt.php?id=107',
'http://shirts4mike.com/shirts.phpshirt.php?id=108' ]
BUT if I choose to only call the first scrape and don't call the second, like this:
scrape(url) //scrape from original entry point
.then(nextStep)
.then(lastStep)
.then(lastScraperPt1)
.then(lastScraperPt2)
.catch(function(err) {
// handle any error from any request here
console.log(err);
});
... Then everything works. I just don't get to all the urls.
What is happening here and how can I fix it? Thank you guys
The issue is tshirtArray is not defined in convertJson2Csv(). At lastlastScraperPt2 pass tshirtArray to convertJsonCsv()
convertJson2Csv(tshirtArray)
at convertJson2Csv
function convertJson2Csv(tshirtArray) {
// do stuff
}
One problem seems to be in your lastStep. It looks like you mean for remainder to be another array of urls. Correct me if I'm wrong there. However, what's happing is that the first time the if($('[type=submit]').length !== 0) condition fails, you'll automatically go down to the next block, because remainder start undefined. Whatever the current url is, you assign that one to remainder. For the rest of the iterations of your for-loop, you will never again hit the condition where remainder == undefined. So if you will only ever end up with one url assigned to remainder, while any more that you were hoping to get will simply be passed over.
You might want to define remainder as remainder = [];. And then instead of saying else if (remainder == undefined), you would just say
} else {
remainder.push(obj.arrayOfUrls[i]);
}
However, then you're passing an array of urls to scrape when scrape is only expecting a single url. If this is what you want and I am right in assuming that you mean for remainder to be an array of urls, you could defined a new function as follows:
function scrapeRemainders(remainders) {
var promises = [];
remainder.forEach(function (url) {
promises.push(requestPromise(url));
});
return Promise.all(promises).then(function (results) {
_.flattenDeep(results);
})
}
Then instead of the second scrape in your promise chain, you would replace it with scrapeRemainders. Also, for you the _ in the previous function, you would need to npm install lodash and then var _ = require('lodash'). On a side note, lodash has nothing to do with promises, but it is a great tool for data manipulation. You should look into it when you have the chance.
Also, in lastScraperPt1, you can change
return Promise.all(promiseArray)
.then(function(arrayOfHtml){
return arrayOfHtml;
});
to
return Promise.all(promiseArray);
It does the same thing.
Hope this helps. If this does not answer your question, comment at me and I can change my answer accordingly.
All fixed, it was grabbing the wrong urls in scrape(). Though I only knew this after I logged the statusCodes to the console :
//TASK: Create a command line application that goes to an ecommerce site to get the latest prices.
//Save the scraped data in a spreadsheet (CSV format).
'use strict';
//Modules being used:
var cheerio = require('cheerio');
var json2csv = require('json2csv');
var request = require('request');
var moment = require('moment');
var fs = require('fs');
//harcoded url
var urlHome = 'http://shirts4mike.com/';
//url for tshirt pages
var urlSet = [];
var tshirtArray = [];
const requestPromise = function(url) {
return new Promise(function(resolve, reject) {
request(url, function(error, response, html) {
if(error) {
errorHandler(error);
return reject(error);
}
if(!error && response.statusCode == 200){
return resolve(html);
}
if(response.statusCode !== 200){
console.log("response code is " + response.statusCode);
}
return resolve("");
});
});
}
// Go into webpage via url, load html and grab links shirt in url
function scrape (url) {
console.log("Currently scraping " + url)
return requestPromise(url)
.then(function(html) {
var $ = cheerio.load(html);
var links = [];
var URL = 'http://shirts4mike.com/';
//get all the links
$('a[href*=shirt]').each(function(){
var a = $(this).attr('href');
//add into link array
links.push(URL + a);
});
// return array of links
return links;
});
}
function nextStep (arrayOfLinks) {
var promiseArray = [];
console.log(arrayOfLinks);
for(var i = 0; i < arrayOfLinks.length; i++){
promiseArray.push(requestPromise(arrayOfLinks[i]));
}
//return both the html of pages and their urls
return Promise.all(promiseArray)
.then(function(arrayOfHtml){
return {arrayOfHtml: arrayOfHtml , arrayOfUrls: arrayOfLinks};
});
}
//go through the html of each url and add to urlSet if there is a checkout button
//add to remainder otherwise to rescrape
function lastStep (obj){
for(var i = 0; i < obj.arrayOfHtml.length; i++){
var $ = cheerio.load(obj.arrayOfHtml[i]);
//if page has a submit it must be a product page
if($('[type=submit]').length !== 0){
//add page to set
urlSet.push(obj.arrayOfUrls[i]);
console.log(obj.arrayOfUrls[i]);
} else if(remainder == undefined) {
//if not a product page, add it to remainder so it another scrape can be performed.
var remainder = obj.arrayOfUrls[i];
console.log("The remainder is " + remainder)
}
}
//return remainder for second run-through of scrape
return remainder;
}
//iterate through urlSet (product pages and grab html)
function lastScraperPt1(){
//call lastScraper so we can grab data from the set (product pages)
//scrape set, product pages
var promiseArray = [];
for(var item of urlSet){
var url = item;
promiseArray.push(requestPromise(url));
}
return Promise.all(promiseArray)
.then(function(arrayOfHtml){
return arrayOfHtml;
});
}
//iterate over the html of the product pages and store data as objects
function lastScraperPt2(html){
for(var i = 0; i < html.length; i++){
var $ = cheerio.load(html[i]);
//grab data and store as variables
var price = $('.price').text();
var imgURL = $('.shirt-picture').find('img').attr('src');
var title = $('body').find('.shirt-details > h1').text().slice(4);
var tshirtObject = {};
//add values into tshirt object
tshirtObject.Title = title;
tshirtObject.Price = price;
tshirtObject.ImageURL = urlHome + imgURL;
tshirtObject.URL = urlSet[i];
tshirtObject.Date = moment().format('MMMM Do YYYY, h:mm:ss a');
//add the object into the array of tshirts
tshirtArray.push(tshirtObject);
}
return tshirtArray;
}
//conver tshirt objects and save as CSV file
function convertJson2Csv(tshirtArray){
//The scraper should generate a folder called `data` if it doesn’t exist.
var dir ='./data';
if(!fs.existsSync(dir)){
fs.mkdirSync(dir);
}
var fields = ['Title', 'Price', 'ImageURL', 'URL', 'Date'];
//convert tshirt data into CSV and pass in fields
var csv = json2csv({ data: tshirtArray, fields: fields });
//Name of file will be the date
var fileDate = moment().format('MM-DD-YY');
var fileName = dir + '/' + fileDate + '.csv';
//Write file
fs.writeFile(fileName, csv, {overwrite: true}, function(err) {
console.log('file saved');
if (err) errorHandler(err);
});
}
scrape(urlHome) //scrape from original entry point
.then(nextStep)
.then(lastStep)
.then(scrape)
.then(nextStep)
.then(lastStep)
.then(lastScraperPt1)
.then(lastScraperPt2)
.then(convertJson2Csv)
.catch(function(err) {
// handle any error from any request here
console.log(err);
});
//If the site is down, an error message describing the issue should appear in the console.
//This is to be tested by disabling wifi on your device.
//When an error occurs log it to a file scraper-error.log . It should append to the bottom of the file with a time stamp and error
var errorHandler = function (error) {
console.log(error.message);
console.log('The scraper could not not scrape data from ' + url + ' there is either a problem with your internet connection or the site may be down');
/**
* create new date for log file
*/
var loggerDate = new Date();
/**
* create message as a variable
*/
var errLog = '[' + loggerDate + '] ' + error.message + '\n';
/**
*when the error occurs, log that to the error logger file
*/
fs.appendFile('scraper-error.log', errLog, function (err) {
if (err) throw err;
console.log('There was an error. The error was logged to scraper-error.log');
});
};

Is there a way of Creating lnk file using javascript

I would like to give the users in my website the ability to download a "lnk" file.
My idea is to generate this file with to contain an address that can be used only once.
Is there a way to generate this file in javascript?
The flow is something like -
the user presses a button
the javascript generates this file and downloads it to the user's machine
the user sends this file to another user to use this one-time-address from his machine
Is something like this is doable in javascript from the client side? or would i need to generate this file using java server side?
This is a faithful translation of mslink.sh.
I only tested my answer in Windows 8.1, but I would think that it works in older versions of Windows, too.
function create_lnk_blob(lnk_target) {
function hex_to_arr(s) {
var result = Array(s.length / 2);
for (var i = 0; i < result.length; ++i) {
result[i] = +('0x' + s.substr(2*i, 2));
}
return result;
}
function str_to_arr(s) {
var result = Array(s.length);
for (var i = 0; i < s.length; ++i) {
var c = s.charCodeAt(i);
if (c >= 128) {
throw Error("Only ASCII paths are suppored :-(");
}
result[i] = c;
}
return result;
}
function convert_CLSID_to_DATA(s) {
var idx = [[6,2], [4,2], [2,2], [0,2],
[11,2], [9,2], [16,2], [14,2],
[19,4], [24,12]];
var s = idx.map(function (ii) {
return s.substr(ii[0], ii[1]);
});
return hex_to_arr(s.join(''));
}
function gen_IDLIST(s) {
var item_size = (0x10000 + s.length + 2).toString(16).substr(1);
return hex_to_arr(item_size.replace(/(..)(..)/, '$2$1')).concat(s);
}
var HeaderSize = [0x4c, 0x00,0x00,0x00],
LinkCLSID = convert_CLSID_to_DATA("00021401-0000-0000-c000-000000000046"),
LinkFlags = [0x01,0x01,0x00,0x00], // HasLinkTargetIDList ForceNoLinkInfo
FileAttributes_Directory = [0x10,0x00,0x00,0x00],
FileAttributes_File = [0x20,0x00,0x00,0x00],
CreationTime = [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],
AccessTime = [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],
WriteTime = [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],
FileSize = [0x00,0x00,0x00,0x00],
IconIndex = [0x00,0x00,0x00,0x00],
ShowCommand = [0x01,0x00,0x00,0x00], //SW_SHOWNORMAL
Hotkey = [0x00,0x00], // No Hotkey
Reserved = [0x00,0x00],
Reserved2 = [0x00,0x00,0x00,0x00],
Reserved3 = [0x00,0x00,0x00,0x00],
TerminalID = [0x00,0x00],
CLSID_Computer = convert_CLSID_to_DATA("20d04fe0-3aea-1069-a2d8-08002b30309d"),
CLSID_Network = convert_CLSID_to_DATA("208d2c60-3aea-1069-a2d7-08002b30309d"),
PREFIX_LOCAL_ROOT = [0x2f],
PREFIX_FOLDER = [0x31,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],
PREFIX_FILE = [0x32,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],
PREFIX_NETWORK_ROOT = [0xc3,0x01,0x81],
PREFIX_NETWORK_PRINTER = [0xc3,0x02,0xc1],
END_OF_STRING = [0x00];
if (/.*\\+$/.test(lnk_target)) {
lnk_target = lnk_target.replace(/\\+$/g, '');
var target_is_folder = true;
}
var prefix_root, item_data, target_root, target_leaf;
if (lnk_target.substr(0, 2) === '\\\\') {
prefix_root = PREFIX_NETWORK_ROOT;
item_data = [0x1f, 0x58].concat(CLSID_Network);
target_root = lnk_target.subtr(lnk_target.lastIndexOf('\\'));
if (/\\\\.*\\.*/.test(lnk_target)) {
target_leaf = lnk_target.substr(lnk_target.lastIndexOf('\\') + 1);
}
if (target_root === '\\') {
target_root = lnk_target;
}
} else {
prefix_root = PREFIX_LOCAL_ROOT;
item_data = [0x1f, 0x50].concat(CLSID_Computer);
target_root = lnk_target.replace(/\\.*$/, '\\');
if (/.*\\.*/.test(lnk_target)) {
target_leaf = lnk_target.replace(/^.*?\\/, '');
}
}
var prefix_of_target, file_attributes;
if (!target_is_folder) {
prefix_of_target = PREFIX_FILE;
file_attributes = FileAttributes_File;
} else {
prefix_of_target = PREFIX_FOLDER;
file_attributes = FileAttributes_Directory;
}
target_root = str_to_arr(target_root);
for (var i = 1; i <= 21; ++i) {
target_root.push(0);
}
var id_list_items = gen_IDLIST(item_data);
id_list_items = id_list_items.concat(
gen_IDLIST(prefix_root.concat(target_root, END_OF_STRING)));
if (target_leaf) {
target_leaf = str_to_arr(target_leaf);
id_list_items = id_list_items.concat(
gen_IDLIST(prefix_of_target.concat(target_leaf, END_OF_STRING)));
}
var id_list = gen_IDLIST(id_list_items);
var data = [].concat(HeaderSize,
LinkCLSID,
LinkFlags,
file_attributes,
CreationTime,
AccessTime,
WriteTime,
FileSize,
IconIndex,
ShowCommand,
Hotkey,
Reserved,
Reserved2,
Reserved3,
id_list,
TerminalID);
return new Blob([new Uint8Array(data)], { type: 'application/x-ms-shortcut' });
}
var blob = create_lnk_blob('C:\\Windows\\System32\\Calc.exe');
Use it like:
var blob_to_file = create_lnk_blob('C:\\Windows\\System32\\Calc.exe');
var blob_to_folder = create_lnk_blob('C:\\Users\\Myself\\Desktop\\'); // with a trailing slash
Demo: http://jsfiddle.net/5cjgLyan/2/
This would be simple if your website allows php.
If your script is part of an html file, just write the the javascript as if you were writing it to send a static lnk file. Then, at the lnk address part, break apart the javascript into two parts, breaking into html. Then at that point, put in
<?php /*PHP code set a variable *? /* PHP code to generate proper string*/ PRINT /*PHP variable*/
?>
I think make it pure client is impossible.
Even the web rtc protocol need at least one iceServer to signal other client.
And I think the easiest way to do that is use http://peerjs.com/
you could first create a clinet token of the room owner
//room owner side
peer.on('open', function(my_peer_id) {
console.log('My peer ID is: ' + my_peer_id);
});
And send the token to any other you want (by text file, web chat ...etc)
Then other connect it use the token above
//the other one
var conn = peer.connect(other_peer_id);
After the room owner detected someone entered the room.
Disconnect from signal server, so the token will become unusable
//room owner side
peer.disconnect()
About generate and read file by client side, I recommend you read article below.
http://www.html5rocks.com/en/tutorials/file/dndfiles/ read from file
How to use filesaver.js save as file
I believe the compatibility of fileReader api and blob doesn't matter.
Since there will never be a browser which support webrtc but not support fileReader api

InDesign ExtendScript script sometimes creates a corrupted PDF during export

I've had this problem for a while, now. Close to the end of my "Proofing" script, the currently opened document in InDesign is to be exported to two different .pdf files. The first is password-protected while the second is not. I don't seem to have any problems with the latter, but the former often becomes corrupted somehow and cannot be opened by any PDF reader, including Acrobat itself. Here's the code block that does the exporting (it is not runnable by itself, btw):
/********** BEGIN PDF EXPORTING **********/
// First, let's create and set PDF export preferences.
// This begins with creating a temporary preset if it doesn't already exist.
// This preset will be used for both the Proof page and the Cover sheet.
var tempPreset = app.pdfExportPresets.item("tempPreset");
try
{
tempPreset.name;
}
catch (eNoSuchPreset)
{
tempPreset = app.pdfExportPresets.add({name:"tempPreset"});
}
with (tempPreset)
{
acrobatCompatibility = AcrobatCompatibility.ACROBAT_5;
bleedMarks = false;
colorBars = false;
colorBitmapCompression = BitmapCompression.AUTO_COMPRESSION;
colorBitmapQuality = CompressionQuality.MAXIMUM;
colorBitmapSampling = Sampling.BICUBIC_DOWNSAMPLE;
colorBitmapSamplingDPI = 300;
compressTextAndLineArt = true;
cropImagesToFrames = true;
cropMarks = false;
exportGuidesAndGrids = false;
exportNonprintingObjects = false;
exportReaderSpreads = false;
exportWhichLayers = ExportLayerOptions.EXPORT_VISIBLE_PRINTABLE_LAYERS;
generateThumbnails = false;
grayscaleBitmapCompression = BitmapCompression.AUTO_COMPRESSION;
grayscaleBitmapQuality = CompressionQuality.MAXIMUM;
grayscaleBitmapSampling = Sampling.BICUBIC_DOWNSAMPLE;
grayscaleBitmapSamplingDPI = 300;
includeBookmarks = false;
includeHyperlinks = false;
includeSlugArea = false;
includeStructure = true;
monochromeBitmapCompression = MonoBitmapCompression.CCIT4;
monochromeBitmapSampling = Sampling.BICUBIC_DOWNSAMPLE;
monochromeBitmapSamplingDPI = 1200;
omitBitmaps = false;
omitEPS = false;
omitPDF = false;
optimizePDF = true;
pageInformationMarks = false;
pageMarksOffset = 0.0833;
pdfMarkType = MarkTypes.DEFAULT_VALUE;
printerMarkWeight = PDFMarkWeight.P25PT;
registrationMarks = false;
standardsCompliance = PDFXStandards.NONE;
subsetFontsBelow = 100;
thresholdToCompressColor = 450;
thresholdToCompressGray = 450;
thresholdToCompressMonochrome = 1800;
useDocumentBleedWithPDF = false;
}
currentProcess.text = "PDF export preferences"; progressWin.show();
progressIndividual.value++; if (aProducts.length > 1) {progressOverall.value++;}
// Now let's actually set the export preferences. These are for the proof page.
with (app.pdfExportPreferences)
{
pageRange = proofRange;
useSecurity = true;
disallowChanging = true;
disallowCopying = false;
disallowDocumentAssembly = true;
disallowExtractionForAccessibility = false;
disallowFormFillIn = true;
disallowHiResPrinting = true;
disallowNotes = true;
disallowPlaintextMetadata = true;
disallowPrinting = false;
changeSecurityPassword = "sky";
if (multiColor)
{
pageRange = colorTable.toString();
}
if (currentProduct.pLabel != "")
{
pageRange += "," + labelPage.name;
}
}
currentProcess.text = "Exporting PDF proof page"; progressWin.show();
progressIndividual.value++; if (aProducts.length > 1) {progressOverall.value++;}
// Before exporting the Proof page(s), hide the color bar on multicolor products.
if (multiColor) {document.layers.item("COLOR BAR").visible = false;}
// Then we save the proof page.
document.exportFile(ExportFormat.PDF_TYPE, File(jobFolder.toString() + "/" + saveName + ".pdf"), false, tempPreset);
When that produced corrupted PDFs once in a while, I thought that perhaps it was our less-than-ideal network structure causing the problem, so I instead tried exporting the PDF file to the local hard drive rather than directly to the network, then having the file be moved to the network afterward. So, the last line in the above code block was replaced with:
// First, to the local HDD.
document.exportFile(ExportFormat.PDF_TYPE, File("~/Documents/" + saveName + ".pdf"), false, tempPreset);
$.sleep(1000);
File("~/Documents/" + saveName + ".pdf").copy(File(jobFolder.toString() + "/" + saveName + ".pdf"));
$.sleep(1000);
File("~/Documents/" + saveName + ".pdf").remove();
I even added in those 1-second delays, just in case. Sadly, this hasn't helped. I am still getting a corrupted PDF every now and then. If there is any pattern to the corrupted files, I haven't been able to discern it. Does anyone have any thoughts?
It finally hit me that, if the corrupted files are not able to be opened in Acrobat, then why not just test for that after the file is created? So I created a loop that exports the PDF file and tries to open it in Acrobat. If it opens fine, then it prints and closes the file, returning a "true" message. If it is unable to do so, then it returns a "false" message to the script. Then the loop repeats so long as that message is "false". While not a great fix for the underlying cause (whatever it may be), it at least is a workaround that will do just fine for our needs. The trick is that, because we work with Macs, we have to route the message through an AppleScript instead of using BridgeTalk to communicate directly with Acrobat.
Here's the code snippet from the main InDesign script which goes through the PDF-checking loop:
// Then we save the proof page.
// The loop is to make sure that the file was saved properly.
var validFile = false; // Flag that states whether or not the file is corrupted after saving.
var rString; // String returned from Acrobat that should be either "true" or "false".
var testAndPrintFile = File("~/Documents/testAndPrint.applescript"); // The applescript file that calls Acrobat and runs a folder-level script.
var pdfFile; // A String of the filename & path that will be passed to through the applescript file to Acrobat.
var pdfArray = new Array(4); // An array to send to Acrobat. [0] is the PDF filename as a String,
// [1] is duplex if true, [2] is the printer name, and [3] is to enable printing.
if (multiTwoSided || twoPages) pdfArray[1] = "true";
else pdfArray[1] = "false";
pdfArray[2] = localPrinter;
pdfArray[3] = "true";
while (!validFile)
{
$.writeln("If this message is seen more than once, then the Proof PDF was corrupted.");
try
{
document.exportFile(ExportFormat.PDF_TYPE, File(jobFolder.toString() + "/" + saveName + ".pdf"), false, tempPreset);
}
catch (e)
{
alert("Could not save the Proof PDF. Please close any open copies of the Proof PDF, then save and print it manually.");
}
pdfFile = jobFolder.toString() + "/" + saveName + ".pdf";
pdfArray[0] = pdfFile;
$.writeln("pdfArray contains: " + pdfArray);
try
{
rString = app.doScript(testAndPrintFile, ScriptLanguage.APPLESCRIPT_LANGUAGE, pdfArray);
validFile = rString == "true";
// validFile = true;
$.writeln("validFile is " + validFile);
if (!validFile)
{
alert("It seems that the file " + unescape(pdfArray[0]) + " is corrupted. Will try to export it again.");
}
}
catch (e)
{
$.writeln("ERROR at line number " + e.line);
$.writeln(e.description);
throw new Error("ERROR at line number " + e.line + "\n" + e.description);
}
}
The testAndPrint.applescript file that this loop calls:
set pdfFile to item 1 of arguments
set duplexed to item 2 of arguments
set printerName to item 3 of arguments
set printEnabled to item 4 of arguments
tell application "Adobe Acrobat Pro"
set result to do script ("testAndPrint(\"" & pdfFile & "\", \"" & duplexed & "\", \"" & printerName & "\", \"" & printEnabled & "\");")
end tell
return result
And, finally, the folder-level Javascript file that is loaded into memory when Acrobat starts, ready to have its function called by the above Applescript file:
var testAndPrint = app.trustedFunction(function (fName, duplexed, sPrinterName, bEnablePrinting)
{
var success = true;
app.beginPriv();
console.println("fName is " + unescape(fName));
console.println("sPrinterName is " + sPrinterName);
try
{
var printDoc = app.openDoc(unescape(fName));
var pp = printDoc.getPrintParams();
if (duplexed == "true") pp.DuplexType = pp.constants.duplexTypes.DuplexFlipLongEdge;
else pp.DuplexType = pp.constants.duplexTypes.Simplex;
pp.printerName = sPrinterName;
pp.interactive = pp.constants.interactionLevel.silent;
pp.pageHandling = pp.constants.handling.none;
if (bEnablePrinting == "true") printDoc.print({bUI: false, bSilent: true, bShrinkToFit: false, printParams: pp});
printDoc.closeDoc(true);
}
catch (e)
{
console.println("ERROR at line number " + e.lineNumber);
console.println(e.message);
success = false;
}
app.endPriv();
console.println("success is " + success);
return success;
});
I hope that, perhaps, this information might be useful to anyone else running into a similar problem. It's not pretty, of course, but it certainly gets the job done.

Categories

Resources