Generate tests based on the result of a promise in Jest JS - javascript

Right now I have a simple.test.js file that generates calls to test based on simplified call/response files (so we don't need to write a .test.js for each of these simplified cases). For reference I'll include the file here:
'use strict';
const api = require('./api');
const SCRIPT_NAME_KEY = Symbol('script name key'),
fs = require('fs'),
path = require('path');
const generateTests = (dir) => {
const relPath = path.relative(__dirname, dir);
let query, resultScripts = [], resultSqls = [];
for (let entry of fs.readdirSync(dir)) {
if (entry[0] === '-')
continue;
let fqEntry = path.join(dir, entry);
if (fs.statSync(fqEntry).isDirectory()) {
generateTests(fqEntry);
continue;
}
if (entry === 'query.json')
query = fqEntry;
else if (entry.endsWith('.sql'))
resultSqls.push(fqEntry);
else if (entry.endsWith('.js') && !entry.endsWith('.test.js'))
resultScripts.push(fqEntry);
}
if (!query && resultScripts.length === 0 && resultSqls.length === 0)
return;
if (!query)
throw `${relPath} contains result script(s)/sql(s) but no query.json`;
if (resultScripts.length === 0 && resultSqls.length === 0)
throw `${relPath} contains a query.json file but no result script(s)/sql(s)`;
try {
query = require(query);
} catch (ex) {
throw `${relPath} query.json could not be parsed`;
}
for (let x = 0; x < resultScripts.length; x++) {
let scriptName = path.basename(resultScripts[x]);
console.log('scriptName', scriptName);
try {
resultScripts[x] = require(resultScripts[x]);
} catch (ex) {
throw `${relPath} result script ${scriptName} could not be parsed`;
}
resultScripts[x][SCRIPT_NAME_KEY] = scriptName;
}
test(`ST:${relPath}`, () => api.getSqls(query).then(resp => {
if (resultScripts.length === 0) {
expect(resp.err).toBeFalsy();
expect(resp.data).toBeAllValidSql();
} else {
for (const script of resultScripts)
expect({ n: script[SCRIPT_NAME_KEY], r: script(resp, script[SCRIPT_NAME_KEY]) }).toPass();
}
for (const sql of resultSqls)
expect(resp.data).toIncludeSql(fs.readFileSync(sql, 'utf8'));
}));
};
expect.extend({
toPass(actual) {
const pass = actual.r === void 0 || actual.r === null || !!actual.r.pass;
return {
pass: pass,
message: pass ? null : () => actual.r.message || `${actual.n} check failed!`
}
}
});
generateTests(path.join(__dirname, 'SimpleTests'));
This works really great! It runs immediately when the .test.js file is loaded by Jest and generates a test for each folder containing the valid files.
However, I now have a need to generate a test per record in a database. From what I can tell most of the available modules that provide DB functionality work on the premise of promises (and reasonably so!). So now I need to wait for a query to come back BEFORE I generate the tests.
This is what I'm trying:
'use strict';
const api = require('./api');
api.getAllReportsThroughSideChannel().then((reports) => {
for (const report of reports) {
test(`${report.Name} (${report.Id} - ${report.OwnerUsername})`, () => {
// ...
});
}
});
However when I do this I get:
FAIL ./reports.test.js
● Test suite failed to run
Your test suite must contain at least one test.
at ../node_modules/jest/node_modules/jest-cli/build/TestScheduler.js:256:22
As one might expect, the promise gets created but doesn't get a chance to actually trigger the generation of tests until after Jest has already expected to receive a list of tests from the file.
One thing I considered was to have a test that itself is a promise that checks out all the reports, but then it would fail on the first expect that results in a failure, and we want to get a list of all reports that fail tests. What we really want is a separate test for each.
I guess ultimately the question I want to know is if it is possible for the generation of tests to be done via a promise (rather then the tests themselves).
There is a TON of resources for Jest out there, after searching I didn't find anything that applies to my question, so apologies if I just missed it somehow.

Ok, after a few days of looking through docs, and code, it's looking more and more like this simply can not be done in Jest (or probably more correctly, it goes counter to Jest's testing philosophies).
As such, I have created a step prior to running the jest runtime proper, that simply downloads the results of the query to a file, then I use the file to synchronously generate the test cases.
I would LOVE it if someone can propose a better solution though.

Related

Speed up node.js when operating with rows from large file

I want to read a file with many rows, then write results.
It's fine with small files <50kb.
But I've got 15MB file for programming competition - as a hard input.
Node.js become slow and I can't get the output in time, because I have to send them the output within few minutes.
And it's even not using full CPU/RAM.
Is the problem in my code, or can I do something about it? Thanks!
const fs = require("fs");
const input = "D:\\Downloads\\example.txt";
const output = input + ".final.txt";
var lineReader = require("readline").createInterface({
input: fs.createReadStream(input),
});
let out = "";
let all = [];
const line_counter = (
(i = 0) =>
() =>
++i
)();
lineReader.on("line", function (radek, index = line_counter()) {
all.push(radek);
});
all.forEach((v) => {
out += `${v}\n`;
});
fs.writeFile(output, out, (err) => {
if (err) {
console.error(err);
}
});
It seems like you want a better understanding of how to do the following using a streaming technique:
read an input text file stream line-by-line
perform a transform operation on each line of text
write the result of each transform operation to an output file stream
Node supports the web standard streams API — see the list of global objects in the current LTS version of Node (18): https://nodejs.org/docs/latest-v18.x/api/globals.html.
Below, I'll include a complete, minimal example which demonstrates the criteria above — you can use it as a model for learning and adapt it to meet the needs of your program. Because your goal is learning, I've included verbose comments at every step of the program, including links to documentation.
You'll also probably find it helpful to read about the Streams API on MDN and web.dev.
module.mjs:
import {open} from 'node:fs/promises';
import {Writable} from 'node:stream';
// Break string chunks from a ReadableStream into lines. Adapted from Deno's std library:
// See: https://github.com/denoland/deno_std/blob/0.166.0/streams/delimiter.ts#L11-L68
class TextLineStream extends TransformStream {
#buf = "";
constructor() {
super({
transform: (chunk, controller) => this.#handle(chunk, controller),
flush: (controller) => this.#handle("\r\n", controller),
});
}
#handle(chunk, controller) {
chunk = this.#buf + chunk;
while (true) {
const lfIndex = chunk.indexOf("\n");
if (lfIndex !== -1) {
let crOrLfIndex = lfIndex;
if (chunk[lfIndex - 1] === "\r") {
crOrLfIndex--;
}
controller.enqueue(chunk.slice(0, crOrLfIndex));
chunk = chunk.slice(lfIndex + 1);
continue;
}
break;
}
this.#buf = chunk;
}
}
async function main () {
// Paths based on your examples:
const pathIn = 'example.txt';
const pathOut = `${pathIn}.final.txt`;
// Create file handles to the target file paths:
// See: https://nodejs.org/docs/latest-v18.x/api/fs.html#fspromisesopenpath-flags-mode
const fhIn = await open(pathIn);
// The "w" flag means: Open file for writing. The file is created (if it does not exist) or truncated (if it exists).
// See: https://nodejs.org/docs/latest-v18.x/api/fs.html#file-system-flags
const fhOut = await open(pathOut, 'w');
// Create a web-standard WritableStream from the output file handle:
// See: https://nodejs.org/docs/latest-v18.x/api/stream.html#streamwritabletowebstreamwritable
const writable = Writable.toWeb(fhOut.createWriteStream({encoding: 'utf8'}));
const writer = writable.getWriter();
// A function abstraction for writing a text chunk to the output file stream:
const write = (text) => writer.ready.then(() => writer.write(text));
// Crate a web-standard ReadableStream from the input file handle,
// then pipe through a text decoder and break/collect the emitted chunks into lines:
// See: https://nodejs.org/docs/latest-v18.x/api/fs.html#filehandlereadablewebstream
const readable = fhIn.readableWebStream()
.pipeThrough(new TextDecoderStream())
.pipeThrough(new TextLineStream());
for await (const line of readable) {
// Handle each text line in here:
// For example: get the length of each line,
// and if it's greater than 0, write it as a line to the output stream:
const {length} = line;
if (length > 0) await write(`${length}\n`);
}
}
main();
Here's the CLI output of using the program on an example text file with some Lorem ipsum lines:
% node --version
v18.12.1
% ls
example.txt module.mjs
% cat example.txt
lorem
ipsum
dolor
sit
amet
% node module.mjs
% cat example.txt.final.txt
5
5
5
3
4

How do I test the code in a "if (require.main === module)" section of a Node.js script using Jest?

I have written a function like this:
const myFunction = () => {
return 'text';
};
exports.myFunction = myFunction;
if (require.main === module) {
console.log(myFunction());
}
and this is my test:
const { myFunction } = require('../myFunction');
describe('test', () => {
it('should return the text', () => {
expect(myFunction()).toMatch('text');
});
});
According to code coverage tools, every line in the code is covered except for this line line:
console.log(myFunction());
Based on comments, I think maybe the reality is that this line cannot be tested, so I'm updating my question:
How can I:
Test this line with Jest, understanding that it may not actually tick the "covered" box, but so I can literally test it. Because not every one of my files has such trivial code in that block. Sometimes I do want to test it for real.
Cause the coverage statistic to show the file as 100% covered? Not because I am pedantic, but I like using the coverage report to find things I need to add tests for, and having dozens of "false negatives" in my report makes that more difficult.
Based on a suggestion in the comments, I found that I can use a child_process exec call within the test to test the output from the command line like this:
const util = require('util');
const exec = util.promisify(require('child_process').exec);
const { myFunction } = require('../myFunction');
describe('test', () => {
it('should return the text', () => {
expect(myFunction()).toBe('text');
});
it('should return the text when called via command line too', async () => {
const { stdout } = await exec('node myFunction', {
encoding: 'utf8',
});
expect(stdout).toBe('text\n');
});
});
Further comments pointed out that without exporting that section of code, Jest can never see it, and hence, never test it, meaning it will never show as "covered". Therefore, once I am satisfied that it is "tested well enough" I can exclude it form my report by adding /* istanbul ignore next */ before the offending line like this:
const myFunction = () => {
return 'text';
};
exports.myFunction = myFunction;
if (require.main === module) {
/* istanbul ignore next */
console.log(myFunction());
}
As explained here, Node.js require wraps script contents with wrapper code specified in Module.wrapper and evaluates it with vm.runInThisContext. This can be implemented in a test. It can be something like:
let Module = require('module');
...
jest.resetModules();
jest.spyOn(console, 'log');
let myModPath = require.resolve('../myFunction');
let wrapper = Module.wrap(fs.readFileSync(myModPath));
let compiledWrapper = vm.runInThisContext(wrapper, {});
let mockModule = new Module(myModPath);
let mockExport = mockModule.exports;
let mockRequire = Module.createRequire(myModPath);
mockRequire.main = mockModule;
wrapper(mockExport, mockRequire, mockModule, path.basename(myModPath), path.dirname(myModPath));
expect(console.log).toBeCalledWith('test');

fs.createWriteStream doesn't use back-pressure when writing data to a file, causing high memory usage

Problem
I'm trying to scan a drive directory (recursively walk all the paths) and write all the paths to a file (as it's finding them) using fs.createWriteStream in order to keep the memory usage low, but it doesn't work, the memory usage reaches 2GB during the scan.
Expected
I was expecting fs.createWriteStream to automatically handle memory/disk usage at all times, keeping memory usage at a minimum with back-pressure.
Code
const fs = require('fs')
const walkdir = require('walkdir')
let dir = 'C:/'
let options = {
"max_depth": 0,
"track_inodes": true,
"return_object": false,
"no_return": true,
}
const wstream = fs.createWriteStream("C:/Users/USERNAME/Desktop/paths.txt")
let walker = walkdir(dir, options)
walker.on('path', (path) => {
wstream.write(path + '\n')
})
walker.on('end', (path) => {
wstream.end()
})
Is it because I'm not using .pipe()? I tried creating a new Stream.Readable({read{}}) and then inside the .on('path' emitter pushing paths into it with readable.push(path) but that didn't really work.
UPDATE:
Method 2:
I tried the proposed in the answers drain method but it doesn't help much, it does reduce memory usage to 500mb (which is still too much for a stream) but it slows down the code significantly (from seconds to minutes)
Method 3:
I also tried using readdirp, it uses even less memory (~400mb) and is faster but I don't know how to pause it and use the drain method there to reduce the memory usage further:
const readdirp = require('readdirp')
let dir = 'C:/'
const wstream = fs.createWriteStream("C:/Users/USERNAME/Desktop/paths.txt")
readdirp(dir, {alwaysStat: false, type: 'files_directories'})
.on('data', (entry) => {
wstream.write(`${entry.fullPath}\n`)
})
Method 4:
I also tried doing this operation with a custom recursive walker, and even though it uses only 30mb of memory, which is what I wanted, but it is like 10 times slower than the readdirp method and it is synchronous which is undesirable:
const fs = require('fs')
const path = require('path')
let dir = 'C:/'
function customRecursiveWalker(dir) {
fs.readdirSync(dir).forEach(file => {
let fullPath = path.join(dir, file)
// Folders
if (fs.lstatSync(fullPath).isDirectory()) {
fs.appendFileSync("C:/Users/USERNAME/Desktop/paths.txt", `${fullPath}\n`)
customRecursiveWalker(fullPath)
}
// Files
else {
fs.appendFileSync("C:/Users/USERNAME/Desktop/paths.txt", `${fullPath}\n`)
}
})
}
customRecursiveWalker(dir)
Preliminary observation: you've attempted to get the results you want using multiple approaches. One complication when comparing the approaches you used is that they do not all do the same work. If you run tests on file tree that contains only regular files, that tree does not contain mount points, you can probably compare the approaches fairly, but when you start adding mount points, symbolic links, etc, you may get different memory and time statistics merely due to the fact that one approach excludes files that another approach includes.
I've initially attempted a solution using readdirp, but unfortunately, but that library appears buggy to me. Running it on my system here, I got inconsistent results. One run would output 10Mb of data, another run with the same input parameters would output 22Mb, then I'd get another number, etc. I looked at the code and found that it does not respect the return value of push:
_push(entry) {
if (this.readable) {
this.push(entry);
}
}
As per the documentation the push method may return a false value, in which case the Readable stream should stop producing data and wait until _read is called again. readdirp entirely ignores that part of the specification. It is crucial to pay attention to the return value of push to get proper handling of back-pressure. There are also other things that seemed questionable in that code.
So I abandoned that and worked on a proof of concept showing how it could be done. The crucial parts are:
When the push method returns false it is imperative to stop adding data to the stream. Instead, we record where we were, and stop.
We start again only when _read is called.
If you uncomment the console.log statements that print START and STOP. You'll see them printed out in succession on the console. We start, produce data until Node tells us to stop, and then we stop, until Node tells us to start again, and so on.
const stream = require("stream");
const fs = require("fs");
const { readdir, lstat } = fs.promises;
const path = require("path");
class Walk extends stream.Readable {
constructor(root, maxDepth = Infinity) {
super();
this._maxDepth = maxDepth;
// These fields allow us to remember where we were when we have to pause our
// work.
// The path of the directory to process when we resume processing, and the
// depth of this directory.
this._curdir = [root, 1];
// The directories still to process.
this._dirs = [this._curdir];
// The list of files to process when we resume processing.
this._files = [];
// The location in `this._files` were to continue processing when we resume.
this._ix = 0;
// A flag recording whether or not the fetching of files is currently going
// on.
this._started = false;
}
async _fetch() {
// Recall where we were by loading the state in local variables.
let files = this._files;
let dirs = this._dirs;
let [dir, depth] = this._curdir;
let ix = this._ix;
while (true) {
// If we've gone past the end of the files we were processing, then
// just forget about them. This simplifies the code that follows a bit.
if (ix >= files.length) {
ix = 0;
files = [];
}
// Read directories until we have files to process.
while (!files.length) {
// We've read everything, end the stream.
if (dirs.length === 0) {
// This is how the stream API requires us to indicate the stream has
// ended.
this.push(null);
// We're no longer running.
this._started = false;
return;
}
// Here, we get the next directory to process and get the list of
// files in it.
[dir, depth] = dirs.pop();
try {
files = await readdir(dir, { withFileTypes: true });
}
catch (ex) {
// This is a proof-of-concept. In a real application, you should
// determine what exceptions you want to ignore (e.g. EPERM).
}
}
// Process each file.
for (; ix < files.length; ++ix) {
const dirent = files[ix];
// Don't include in the results those files that are not directories,
// files or symbolic links.
if (!(dirent.isFile() || dirent.isDirectory() || dirent.isSymbolicLink())) {
continue;
}
const fullPath = path.join(dir, dirent.name);
if (dirent.isDirectory() & depth < this._maxDepth) {
// Keep track that we need to walk this directory.
dirs.push([fullPath, depth + 1]);
}
// Finally, we can put the data into the stream!
if (!this.push(`${fullPath}\n`)) {
// If the push returned false, we have to stop pushing results to the
// stream until _read is called again, so we have to stop.
// Uncomment this if you want to see when the stream stops.
// console.log("STOP");
// Record where we were in our processing.
this._files = files;
// The element at ix *has* been processed, so ix + 1.
this._ix = ix + 1;
this._curdir = [dir, depth];
// We're stopping, so indicate that!
this._started = false;
return;
}
}
}
}
async _read() {
// Do not start the process that puts data on the stream over and over
// again.
if (this._started) {
return;
}
this._started = true; // Yep, we've started.
// Uncomment this if you want to see when the stream starts.
// console.log("START");
await this._fetch();
}
}
// Change the paths to something that makes sense for you.
stream.pipeline(new Walk("/home/", 5),
fs.createWriteStream("/tmp/paths3.txt"),
(err) => console.log("ended with", err));
When I run the first attempt you made with walkdir here, I get the following statistics:
Elapsed time (wall clock): 59 sec
Maximum resident set size: 2.90 GB
When I use the code I've shown above:
Elapsed time (wall clock): 35 sec
Maximum resident set size: 0.1 GB
The file tree I use for the tests produces a file listing of 792 MB
You could exploit the returned value from WritableStream.write(): it essentially states if you should continue to read or not. a WritableStream has an internal property that stores the threshold after which the buffer should be processed by the OS. The drain event will be emitted when the buffer has been flushed, i.e. you can call safely call WritableStream.write() without risking to excessively fill the buffer (which means the RAM). Luckily for you, walkdir let you control the process: you can emit pause(pause the walk. no more events will be emitted until resume) and resume(resume the walk) event from the walkdir object, pausing and resuming the writing process on you stream accordingly. Try with this:
let is_emitter_paused = false;
wstream.on('drain', (evt) => {
if (is_emitter_paused) {
walkdir.resume();
}
});
walkdir.on('path', function(path, stat) {
is_emitter_paused = !wstream.write(path + '\n');
if (is_emitter_paused) {
walkdir.pause();
}
});
Here's an implementation inspired by #Louis's answer. I think it's a bit easier to follow and in my minimal testing it performs about the same.
const fs = require('fs');
const path = require('path');
const stream = require('stream');
class Walker extends stream.Readable {
constructor(root = process.cwd(), maxDepth = Infinity) {
super();
// Dirs to process
this._dirs = [{ path: root, depth: 0 }];
// Max traversal depth
this._maxDepth = maxDepth;
// Files to flush
this._files = [];
}
_drain() {
while (this._files.length > 0) {
const file = this._files.pop();
if (file.isFile() || file.isDirectory() || file.isSymbolicLink()) {
const filePath = path.join(this._dir.path, file.name);
if (file.isDirectory() && this._maxDepth > this._dir.depth) {
// Add directory to be walked at a later time
this._dirs.push({ path: filePath, depth: this._dir.depth + 1 });
}
if (!this.push(`${filePath}\n`)) {
// Hault walking
return false;
}
}
}
if (this._dirs.length === 0) {
// Walking complete
this.push(null);
return false;
}
// Continue walking
return true;
}
async _step() {
try {
this._dir = this._dirs.pop();
this._files = await fs.promises.readdir(this._dir.path, { withFileTypes: true });
} catch (e) {
this.emit('error', e); // Uh oh...
}
}
async _walk() {
this.walking = true;
while (this._drain()) {
await this._step();
}
this.walking = false;
}
_read() {
if (!this.walking) {
this._walk();
}
}
}
stream.pipeline(new Walker('some/dir/path', 5),
fs.createWriteStream('output.txt'),
(err) => console.log('ended with', err));

Cypress request wait by default?

I need Cypress to wait for any xhr requests to complete by default before performing any operations. Is there any way to make this as a default or any other alternatives because the application I am testing is slow and makes a lot of api calls?
Edit: By writing a single statement for every api request is getting messy and unnecessary work. Need a way to make this easier.
If what you want is to wait for a specific xhr you can do it making use of cy.route(). I use this in some scenarios and it is really useful. The general steps to use it are:
cy.server()
cy.route('GET','**/api/my-call/**').as('myXHR');
Do things in the UI such as clicking on a button that will trigger such api calls
cy.wait(#myXHR)
This way if such call isn't triggered your test will fail. You can find extensive documentation about this here
Found something that works for me here https://github.com/PinkyJie/cypress-auto-stub-example
Look for cy.waitUntilAllAPIFinished
I partialy solve the problem adding a waitAll command and ovewrite route command in support folder:
const routeCallArr = [];
Cypress.Commands.overwrite('route', (route, ...params) => {
const localRoute = route(...params);
if (localRoute.alias === undefined) return;
localRoute.onRequest = function() {
routeCallArr.push({alias: `#${localRoute.alias}`, starTime: Date.now()});
}
localRoute.onResponse = function() {
clearCall(`#${localRoute.alias}`);
}
})
const waitAll = (timeOut = 50000, options = {verbose: false, waitNested: false}) => {
const filterRouteCallArr = [];
const date = Date.now();
for (const routeCall of routeCallArr) {
if ((date - routeCall.starTime) > timeOut) continue;
filterRouteCallArr.push(routeCall.alias);
}
if (options.verbose ){
console.table(routeCallArr.map(routeCall => ({
deltaTime: date - routeCall.starTime,
alias: routeCall.alias,
starTime: routeCall.starTime,
})));
console.log(routeCallArr, filterRouteCallArr)
};
routeCallArr.length = [];
if (filterRouteCallArr.length > 0) {
const waiter = cy.wait(filterRouteCallArr, {timeout: timeOut});
options.waitNested && waiter.then(() => {
if (routeCallArr.length > 0) {
waitAll(timeOut, options);
}
});
}
}
Cypress.Commands.add('waitAll', waitAll)
And in the test instead of use cy.wait(['#call01',..., '#callN']); I use cy.waitAll();
The problem with this implementation came when have nested calls in a relative separate time interval from original calls. In that case you can use a recursive wait cy.waitAll(50000, {waitNested: true});

Permissions error when running another JS AppleScript from another JSAppleScript

I am trying to separate out my .applescript files into different ones to tidy things up.
I have a JS AppleScript file called Test.applescript that tries to run the JS AppleScript file Group Tracks Dependency.applescript and what I want to do is pass in a parameter into the dependency script and get a return value out of it. (It creates an array of arrays of iTunes tracks).
Test.applescript
(function() {
var app = Application('iTunes');
app.includeStandardAdditions = true;
app.doShellScript('Group Tracks Dependency.applescript');
return "Done";
})();
// For quick logging
function log(obj) {
this.console.log(obj);
}
Group Tracks Dependency.applescript
(function(selection) {
return getGroupsOfTracks(selection);
function getGroupsOfTracks(originalTracksArray) {
if (originalTracksArray == null || originalTracksArray.length == 0)
return null;
var tracks = originalTracksArray.slice();
var groups = [];
while (true) {
var group = [];
group.push(tracks[0]);
tracks = tracks.slice(1);
while (true) {
if (!tracks[0]) break;
if (tracks[0].album() != group[0].album())
break;
if (tracks[0].artist() != group[0].artist())
break;
if (tracks[0].discNumber() != group[0].discNumber())
break;
group.push(tracks[0]);
tracks = tracks.slice(1);
}
groups.push(group);
if (!tracks[0]) break;
}
return groups;
}
})();
When I try to run the Test script I get this error (line 5 is the app.doShellScript line):
Error on line 5: Error: A privilege violation occurred.
Is there any way to get around this? I should also note that I want other people to be able to download these scripts and run them on their own iTunes libraries in the future (currently it's not user-friendly though).
If there's no way to get around this then would importing another JS AppleScript file work?
I think you may be fighting a battle that you can’t win using .doShellScript.
The Apple way is to use a Script Library as defined on https://developer.apple.com/library/mac/releasenotes/InterapplicationCommunication/RN-JavaScriptForAutomation/Articles/OSX10-11.html#//apple_ref/doc/uid/TP40014508-CH110-SW1
Unfortunately a script library has constraints where you can only pass simple variables.
Another way is to use require, which can be defined with code like https://github.com/dtinth/JXA-Cookbook/wiki/Importing-Scripts
I'm not sure what you are trying to accomplish, but this works for me using Script Editor 2.8.1 (183.1) on OSX 10.11.4:
Create a main JXA Script file
Create a JXA Script Library file
BOTH of these MUST be saved as compiled script files (.scpt)
It is INCORRECT that "Unfortunately a script library has constraints where you can only pass simple variables."
You can call any of the functions in the Script Library file from any JXA script.
In your MAIN script file, which I will call "Get iTunes Group Selection.scpt":
var app = Application('iTunes');
app.includeStandardAdditions = true;
var myLib = Library("My JXA Lib")
var selectionArr = app.selection() // ### Change as needed ###
var groupArr = myLib.getGroupsOfTracks(selectionArr)
groupArr
~~~~~~~~~~~~~~~~~~~~~
And then in a separate script file, saved as:
~/Library/Script Libraries/My JXA Lib.scpt
function getGroupsOfTracks(originalTracksArray) {
if (originalTracksArray == null || originalTracksArray.length == 0)
return null;
var tracks = originalTracksArray.slice();
var groups = [];
while (true) {
var group = [];
group.push(tracks[0]);
tracks = tracks.slice(1);
while (true) {
if (!tracks[0]) break;
if (tracks[0].album() != group[0].album())
break;
if (tracks[0].artist() != group[0].artist())
break;
if (tracks[0].discNumber() != group[0].discNumber())
break;
group.push(tracks[0]);
tracks = tracks.slice(1);
}
groups.push(group);
if (!tracks[0]) break;
}
return groups;
}
Well, it's been a few years...
I ran into errors with JXA and doShellScript when I tried to run with Application("Finder"). These errors went away when I instead ran the script from Application.currentApplication(). So for my script, I used const finder = Application("Finder") for Finder specific stuff, then const app = Application.currentApplication() for running the script.
For example:
//test1.scpt
function run() {
const app = Application.currentApplication()
app.includeStandardAdditions = true
app.doShellScript("osascript ~/Desktop/test2.scpt")
}
//test2.scpt
function run() {
const app = Application.currentApplication()
app.includeStandardAdditions = true
app.displayDialog("foo")
app.doShellScript("osascript -e 'display dialog \"bar\"'")
}
As expected, running test1.scpt gives me two dialogs: foo and `bar.

Categories

Resources