Name html blob urls for easy reference - javascript

Within our web application we load a lot of content from package files (zipped packages containing html, js, css, images and so on.) The module loader (client side JS) processes the packages and makes the content available to the DOM using blob urls.
While this works very nice, it's sometimes tedious to find the right piece of JavaScript file for debugging.
IE: in chrome in the development console->sources all blob urls are listed under (no domain) and have random names such as:
blob:https://example.com/0613efd7-6977-4872-981f-519eea0bc911
In a normal production environment there are roughly 100 lines like this, so finding the right one might take some time.
I'd pretty much like to name the blob urls, or do something to make them easier to find for debugging purposes. This seems possible since WebPack is doing something like this, however i can't seem to find how. Is there anybody that can hint me in the right direction.

Ok, the way I would do it is have some global that keeps a track of the URL's, using a simple reverse map.
One problem of course with this is that references to a blob that no longer exists will be kept in memory, but if say you was only enabling this for debugging purposes this might not be a problem.
var namedblobs = {};
function addNamedBlob(name, uri) {
namedblobs[uri] = name;
}
function getNamedBlob(uri) {
return namedblobs[uri];
}
function createSomeBlob() {
//for testing just a random number would do
return Math.random().toString();
}
var blob = createSomeBlob();
addNamedBlob("test1", blob);
addNamedBlob("test2", createSomeBlob());
console.log(getNamedBlob(blob)); //should be test1

Finally i have found a solution that works to my liking. For our application we already used a serviceworker which has caching active. So i ended up writing the module files into the serviceworker cache whenever somebody has debug mode turned on.
Since the url portion of the resource files is static this way, all the nice browser features such as breakpoints are now useable again.
Below i've posted the relevant code of the serviceworker. The rest of the code is just plain serviceworker caching.
api.serveScript = function(module, script, content){
try{
content = atob(content);
} catch(err){}
return new Promise(function(resolve, reject){
var init = {
status: 200,
statusText: "OK",
headers: {'Content-Type': 'text/javascript'}
};
caches.open("modulecache-1").then(function(cache) {
console.log('[ServiceWorker] Caching ' + module + "/" + script);
cache.put("/R/" + module + "/script/" + script, new Response(content, init));
resolve("/R/" + module + "/script/" + script);
});
});
}
Thanks for your answers and help. I hope this solution is going to help some others too.

#Keith's option is probably the best one. (create a Map of your blobURIs and easy to read file names).
You could also do a dynamic router that will point some nice url to the blobURIs, but if you are open to do this, then just don't use blobURIs.
An other hackish workaround, really less cleaner than the Map, would be to append a fragment identifier to your blobURI blob:https://example.com/0613efd7-6977-4872-981f-519eea0bc911#script_name.js.
Beware, This should work for application/javascript Blobs or some other resource types, but not for documents (html/svg/...) where this fragment identifier has a special meaning.
var hello = new Blob(["alert('hello')"], {type:'application/javascript'});
var script = document.createElement('script');
script.src = URL.createObjectURL(hello) + '#hello.js';
document.head.appendChild(script);
console.log(script.src);
var css = new Blob(["body{background:red}"], {type:'text/css'});
var style = document.createElement('link');
style.href = URL.createObjectURL(css) + '#style.css';
style.rel = 'stylesheet';
document.head.appendChild(style);
console.log(style.href);
And as a fiddle for browsers which doesn't like null origined StackSnippet's iframes.

Related

Open a directory of images into separate layers using Adobe extension

I am developing an Adobe extension, from within the extension I want to load a directory of images into separate layers within a document. I am completely impartial to how this is done - so if there is a better approach, please share it with me. My current working method involves using the open() method which opens a file in a new document, then duplicate the layer of the new document into the original document. An example of this can be seen below.
// open new document
var originalDoc = app.activeDocument;
var doc = open( new File( filePath ) );
// duplicate to original document
var layer = doc.activeLayer;
var newLayer = layer.duplicate(originalDoc, ElementPlacement.PLACEATBEGINNING);
// close new document
doc.close(SaveOptions.DONOTSAVECHANGES);
This method is extraordinarily slow, especially for large images. After doing some Googling I discovered that Photoshop has a built-in method for creating an image stack. This feature uses a .jsx script itself and it can be found on GitHub. Looking around online I found a few people trying to load a folders contents as layers, perfect. The main code I was interested in is below.
var folder = new Folder('~/Desktop/MyFolder');
function runLoadStack(folderPath) {
var loadLayersFromScript = true;
// #include 'Load Files into Stack.jsx'
var fList = folder.getFiles('*.png')
var aFlag = true;
loadLayers.intoStack(fList, aFlag);
}
runLoadStack(folder)
I immediately noticed the #include method of importing the stack methods, I can not find any official documentation for this (also not friendly with minification). Also, if the script is not placed with the same directory as Load Files into Stack.jsx it will throw the error Unable to open file: anonymous. And even after solving all of these issues when I run the .jsx script from within my extension using $.evalFile() I am having the same error as if the script is not in the correct directory: Unable to open file: anonymous. Error is being thrown on line 762 of an imported jsx.
Any help resolving the error I am experiencing or simply on how to load an array of image paths into layers (faster method) will be greatly appreciated!
Here is the code I am using within my extension:
var loadLayersFromScript = true;
var strPresets = localize("$$$/ApplicationPresetsFolder/Presets=Presets");
var strScripts = localize("$$$/PSBI/Automate/ImageProcessor/Photoshop/Scripts=Scripts");
var jsxFilePath = app.path + "/" + strPresets + "/" + strScripts + "/Load Files into Stack.jsx";
$.evalFile( new File( jsxFilePath ) );
loadLayers.intoStack( new Folder("/c/Users/Me/teststack").getFiles(), true );
Photoshop's inbuilt scripts has a script to do this here's the github link
https://github.com/ES-Collection/Photoshop-Scripts/blob/master/Import%20Folder%20As%20Layers.jsx
use this script inside your CEP extension

Unity WebGL External Assets

I'm developing some webGL project in Unity that has to load some external images from a directory, it runs all fine in the editor, however when I build it, it throws a Directory Not Found exception in web console. I am putting the images in Assets/StreamingAssets folder, that will become StreamingAssets folder in the built project (at root, same as index.html). Images are located there, yet browser still complains about not being able to find that directory. (I'm opening it on my own computer, no running web server)
I guess I'm missing something very obvious, but it seems like I could use some help, I've just started learning unity a week ago, and I'm not that great with C# or JavaScript (I'm trying to get better...) Is this somehow related to some javascript security issues?
Could someone please point me in the right direction, how I should be reading images(no writing need to be done) in Unity WebGL?
string appPath = Application.dataPath;
string[] filePaths = Directory.GetFiles(appPath, "*.jpg");
According to unity3d.com in webGL builds everything except threading and reflection is supported, so IO should be working - or so I thought:S
I was working around a bit and now I'm trying to load a text file containing the paths of the images (separated by ';'):
TextAsset ta = Resources.Load<TextAsset>("texManifest");
string[] lines = ta.text.Split(';');
Then I convert all lines to proper path, and add them to a list:
string temp = Application.streamingAssetsPath + "/textures/" + s;
filePaths.Add(temp);
Debug.Log tells me it looks like this:
file://////Downloads/FurnitureDresser/build/StreamingAssets/textures/79.jpg
So that seems to be allright except for all those slashes (That looks a bit odd to me)
And finally create the texture:
WWW www = new WWW("file://" + filePaths[i]);
yield return www;
Texture2D new_texture = new Texture2D(120, 80);
www.LoadImageIntoTexture(new_texture);
And around this last part (unsure: webgl projects does not seem easily debuggable) it tells me: NS_ERROR_DOM_BAD_URI: Access to restricted URI denied
Can someone please enlighten me what is happening? And most of all, what would be proper to solution to create a directory from where I can load images during runtime?
I realise this question is now a couple of years old, but, since this still appears to be commonly asked question, here is one solution (sorry, the code is C# but I am guessing the javascript implementation is similar). Basically you need to use UnityWebRequest and Coroutines to access a file from the StreamingAssets folder.
1) Create a new Loading scene (which does nothing but query the files; you could have it display some status text or a progress bar to let the user knows what is happening).
2) Add a script called Loader to the Main Camera in the Loading scene.
3) In the Loader script, add a variable to indicate whether the asset has been read successfully:
private bool isAssetRead;
4) In the Start() method of the Loading script:
void Start ()
{
// if webGL, this will be something like "http://..."
string assetPath = Application.streamingAssetsPath;
bool isWebGl = assetPath.Contains("://") ||
assetPath.Contains(":///");
try
{
if (isWebGl)
{
StartCoroutine(
SendRequest(
Path.Combine(
assetPath, "myAsset")));
}
else // desktop app
{
// do whatever you need is app is not WebGL
}
}
catch
{
// handle failure
}
}
5) In the Update() method of the Loading script:
void Update ()
{
// check to see if asset has been successfully read yet
if (isAssetRead)
{
// once asset is successfully read,
// load the next screen (e.g. main menu or gameplay)
SceneManager.LoadScene("NextScene");
}
// need to consider what happens if
// asset fails to be read for some reason
}
6) In the SendRequest() method of the Loading script:
private IEnumerator SendRequest(string url)
{
using (UnityWebRequest request = UnityWebRequest.Get(url))
{
yield return request.SendWebRequest();
if (request.isNetworkError || request.isHttpError)
{
// handle failure
}
else
{
try
{
// entire file is returned via downloadHandler
//string fileContents = request.downloadHandler.text;
// or
//byte[] fileContents = request.downloadHandler.data;
// do whatever you need to do with the file contents
if (loadAsset(fileContents))
isAssetRead = true;
}
catch (Exception x)
{
// handle failure
}
}
}
}
Put your image in the Resources folder and use Resources.Load to open the file and use it.
For example:
Texture2D texture = Resources.Load("images/Texture") as Texture2D;
if (texture != null)
{
GetComponent<Renderer>().material.mainTexture = texture;
}
The directory listing and file APIs are not available in webgl builds.
Basically no low level IO operations are supported.

Do we need a web server (like Apache) to access a .json file?

I was trying to read an info.json file, using the jQuery API. Please find the code below, which is part of test.html.
$.getJSON('info.json', function(data) {
var items = [];
$.each(data, function(key, val) {
items.push('<li id="' + key + '">' + val + '</li>');
});
The test.html file resides on my local machine and when I try to open it in the browser, the Ajax call is not getting triggered and the info.json file is not read.
Is it not working because I don't have a web server? Or am I doing anything wrong in the code? (I don't see any errors in the Firebug console though).
Thanks in advance.
You will always have to host your site from where you are making AJAX call. Otherwise it will throw this exception.
origin null is not allowed by access-control-allow-origin
Host your page on localhost server and I guess everything will work nicely.
While technically you don't need a web server for this, some of the libraries you use to abstract network access may not work with local files and some browsers don't let local files do a lot, so something like a little test web server for static files would be very useful for your development and testing.
Install a small webserver like http://jetty.codehaus.org/jetty/
easy to install, and small download ;)
By putting your JSON string into a text file and loading it in a iframe, you can extrapolate the data. (Most browsers can load .txt files in iframes.)
var frame = document.createElement("IFRAME"); //Create new iframe
var body = document.body;
frame.onload = function() { //Extrapolate JSON once loaded
data = JSON.parse(frame.contentDocument.documentElement.innerText); //Loads as a global.
body.removeChild(frame); //Removes the frame once no longer necessary.
}
frame.style.display = "none"; //Because the frame will have to be appended to the body.
body.appendChild(frame);
frame.src = "your JSON.txt"; //Select source after the onload function is set.

Mozilla (Firefox, Thunderbird) Extension: How to get extension id (from install.rdf)?

If you are developing an extension for one of the mozilla applications (e.g. Firefox, Thunderbird, etc.) you define a extension id in the install.rdf.
If for some reason you need to know the extension id e.g. to retrieve the extension dir in local file system (1) or if you want to send it to a webservice (useage statistic) etc. it would be nice to get it from the install.rdf in favour to have it hardcoded in your javascript code.
But how to access the extension id from within my extension?
1) example code:
var extId = "myspecialthunderbirdextid#mydomain.com";
var filename = "install.rdf";
var file = extManager.getInstallLocation(extId).getItemFile(extId, filename);
var fullPathToFile = file.path;
I'm fairly sure the 'hard-coded ID' should never change throughout the lifetime of an extension. That's the entire purpose of the ID: it's unique to that extension, permanently. Just store it as a constant and use that constant in your libraries. There's nothing wrong with that.
What IS bad practice is using the install.rdf, which exists for the sole purpose of... well, installing. Once the extension is developed, the install.rdf file's state is irrelevant and could well be inconsistent.
"An Install Manifest is the file an Add-on Manager-enabled XUL application uses to determine information about an add-on as it is being installed" [1]
To give it an analogy, it's like accessing the memory of a deleted object from an overflow. That object still exists in memory but it's not logically longer relevant and using its data is a really, really bad idea.
[1] https://developer.mozilla.org/en/install_manifests
Like lwburk, I don't think its available through Mozilla's API's, but I have an idea which works, but it seems like a complex hack. The basic steps are:
Set up a custom resource url to point to your extension's base directory
Read the file and parse it into XML
Pull the id out using XPath
Add the following line to your chrome.manifest file
resource packagename-base-dir chrome/../
Then we can grab and parse the file with the following code:
function myId(){
var req = new XMLHttpRequest();
// synchronous request
req.open('GET', "resource://packagename-base-dir/install.rdf", false);
req.send(null);
if( req.status !== 0){
throw("file not found");
}
var data = req.responseText;
// this is so that we can query xpath with namespaces
var nsResolver = function(prefix){
var ns = {
"rdf" : "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"em" : "http://www.mozilla.org/2004/em-rdf#"
};
return ns[prefix] || null;
};
var parser = CCIN("#mozilla.org/xmlextras/domparser;1", Ci.nsIDOMParser);
var doc = parser.parseFromString(data, "text/xml");
// you might have to change this xpath expression a bit to fit your setup
var myExtId = doc.evaluate("//em:targetApplication//em:id", doc, nsResolver,
Ci.nsIDOMXPathResult.FIRST_ORDERED_NODE_TYPE, null);
return myExtId.singleNodeValue.textContent;
}
I chose to use a XMLHttpRequest(as opposed to simply reading from a file) to retrieve the contents since in Firefox 4, extensions aren't necessarily unzipped. However, XMLHttpRequest will still work if the extension remains packed (haven't tested this, but have read about it).
Please note that resource URL's are shared by all installed extensions, so if packagename-base-dir isn't unique, you'll run into problems. You might be able to leverage Programmatically adding aliases to solve this problem.
This question prompted me to join StackOverflow tonight, and I'm looking forward participating more... I'll be seeing you guys around!
As Firefox now just uses Chrome's WebExtension API, you can use #serg's answer at How to get my extension's id from JavaScript?:
You can get it like this (no extra permissions required) in two
different ways:
Using runtime api: var myid = chrome.runtime.id;
Using i18n api: var myid = chrome.i18n.getMessage("##extension_id");
I can't prove a negative, but I've done some research and I don't think this is possible. Evidence:
This question, which shows that
the nsIExtensionManager interface
expects you to retrieve extension
information by ID
The full nsIExtensionManager interface
description, which shows no
method that helps
The interface does allow you to retrieve a full list of installed extensions, so it's possible to retrieve information about your extension using something other than the ID. See this code, for example:
var em = Cc['#mozilla.org/extensions/manager;1']
.getService(Ci.nsIExtensionManager);
const nsIUpdateItem = Ci.nsIUpdateItem;
var extension_type = nsIUpdateItem.TYPE_EXTENSION;
items = em.getItemList(extension_type, {});
items.forEach(function(item, index, array) {
alert(item.name + " / " + item.id + " version: " + item.version);
});
But you'd still be relying on hardcoded properties, of which the ID is the only one guaranteed to be unique.
Take a look on this add-on, maybe its author could help you, or yourself can figure out:
[Extension Manager] Extended is very
simple to use. After installing, just
open the extension manager by going to
Tools and the clicking Extensions. You
will now see next to each extension
the id of that extension.
(Not compatible yet with Firefox 4.0)
https://addons.mozilla.org/firefox/addon/2195

WIX: Where and how should my CustomAction create and read a temporary file?

I have a script CustomAction (Yes, I know all about the opinions that say don't use script CustomActions. I have a different opinion.)
I'd like to run a command, and capture the output. I can do this using the WScript.Shell COM object, then invoking shell.Exec(). But, this flashes a visible console window for the executed command.
To avoid that, I understand I can use the shell.Run() call, and specify "hidden" for the window appearance. But .Run() doesn't give me access to the StdOut of the executed process, so that means I'd need to create a temporary file and redirect the exe output to the temp file, then later read that temp file in script.
Some questions:
is this gonna work?
How do I generate a name for the temporary file? In .NET I could use a static method in the System.IO namespace, but I am using script here. I need to insure that the use has RW access, and also that no anti-virus program is going to puke on this.
Better ideas? I am trying very hard to avoid C/C++.
I could avoid all this if there were a way to query websites in IIS7 from script, without resorting to the IIS6 Compatibility pack, without using .NET (Microsoft.Web.Administration.ServerManager), and without execing a process (appcmd list sites).
I already asked a separate question on that topic; any suggestions on that would also be appreciated.
Answering my own question...
yes, this is going to work.
Use the Scripting.FileSystemObject thing within Javascript. There's a GetTempName() method that produces a file name suitable for temporary use, and a GetSpecialFolder() method that gets the location of the temp folder. There's even a BuildPath() method to combine them.
so far I don't have any better ideas.
Here's the code I used:
function GetWebSites_IIS7_B()
{
var ParseOneLine = function(oneLine) {
...regex parsing of output...
};
LogMessage("GetWebSites_IIS7_B() ENTER");
var shell = new ActiveXObject("WScript.Shell");
var fso = new ActiveXObject("Scripting.FileSystemObject");
var tmpdir = fso.GetSpecialFolder(SpecialFolders.TemporaryFolder);
var tmpFileName = fso.BuildPath(tmpdir, fso.GetTempName());
var windir = fso.GetSpecialFolder(SpecialFolders.WindowsFolder);
var appcmd = fso.BuildPath(windir,"system32\\inetsrv\\appcmd.exe") + " list sites";
// use cmd.exe to redirect the output
var rc = shell.Run("%comspec% /c " + appcmd + "> " + tmpFileName, WindowStyle.Hidden, true);
// WindowStyle.Hidden == 0
var ts = fso.OpenTextFile(tmpFileName, OpenMode.ForReading);
var sites = [];
// Read from the file and parse the results.
while (!ts.AtEndOfStream) {
var oneLine = ts.ReadLine();
var line = ParseOneLine(oneLine);
LogMessage(" site: " + line.name);
sites.push(line);
}
ts.Close();
fso.DeleteFile(tmpFileName);
return sites;
}

Categories

Resources