Microsoft released update kb4088776 in the past couple days, which has had a devastating effect on performance of indexedDb openCursor.
The simple fiddle here shows the problem. With the update, the "retrieval" time is 40 seconds or more. Prior to the update, it is around 1 second.
https://jsfiddle.net/L7q55ad6/23/
Relevant retrieval portion is here:
var _currentVer = 1;
function _openDatabase(fnSuccess) {
var _custDb = window.indexedDB.open("MyDatabase", _currentVer);
_custDb.onsuccess = function (event) {
var db = event.target.result;
fnSuccess(db);
}
_custDb.onerror = function (event) {
_custDb = null;
fnSuccess(null); // should use localData
}
_custDb.onupgradeneeded = function (event) {
var db = event.target.result;
var txn = event.target.transaction;
// Create an objectStore for this database
if (event.oldVersion < _currentVer) {
var customer = db.createObjectStore("customer", { keyPath: "guid" });
var index = customer.createIndex("by_id", "id", { unique: false });
}
};
}
function _retrieveCustomers(fn) {
_openDatabase(function (db) {
if (db == null)
{
alert("not supported");
return;
}
var customers = [];
var transaction = db.transaction("customer", "readonly");
var objectStore = transaction.objectStore("customer");
if (typeof objectStore.getAll === 'function') {
console.log("using getAll");
objectStore.getAll().onsuccess = function (event) {
fn(event.target.result);
};
}
else {
console.log("using openCursor");
objectStore.openCursor().onsuccess = function (event) {
var cursor = event.target.result;
if (cursor) {
customers.push(cursor.value);
cursor.continue();
}
else {
fn(customers);
}
};
}
});
}
The time to create and add the customers is basically normal, only the retrieval is bad. Edge has never supported the getAll method and it still doesn't after the update.
The only workaround I can think of would be to use localStorage instead, but unfortunately our data set is too large to fit into the 10MB limit. It is actually faster now to retrieve from our servers and convert the text to javascript objects, defeating the main purpose of indexeddb.
I don't have Edge so I can't test this, but does it happen with get too, or just openCursor? If get still performs well, you could store an index (in your example, the list of primary keys; in your real app, maybe something more complicated) in localStorage, and then use that to call get on each one.
Related
I have a long script which is designed to edit a specific row in the Cloud SQL table. The code is long so i will shorten it.
Client Side:
function build_profile(){
var cbid = sessionStorage.getItem("client_id");
var self = this;
var createSuccess = function(data){
var statuse = ["Active", "Wiating", "Discharged"];
if(data !== false){
data = data.split(",");
var dec = app.pages.Profile.descendants;
dec.fname.text = data[1];
dec.sname.text = data[3];
sessionStorage.setItem("school_id", data[9]);
app.popups.Loading.visible = false;
}
};
var init = function() {google.script.run.withSuccessHandler(createSuccess).get_user_data(cbid);};
app.popups.Loading.visible = true;
init();
}
function save_profile() {
var createSuccess = function(data){
var dec = app.pages.Profile.descendants;
console.log(data);
if(data !== -1){
var ds = app.datasources.Clients;
ds.load(function(){
ds.selectIndex(data);
console.log("editing:"+ds.item.CBID);
ds.item.fname = dec.fname_edit.value;
ds.item.sname = dec.sname_edit.value;
ds.load(function(){build_profile();});
});
}
}};
var init = function() {google.script.run.withSuccessHandler(createSuccess).update_client(sessionStorage.getItem("client_id"));};
init();
}
Server Side:
function get_user_data(cbid){
try{
var query = app.models.Clients.newQuery();
query.filters.CBID._equals = parseInt(cbid);
var results = query.run();
if(results.length > 0){
var arr = [
results[0].Id, //0
results[0].fname, //1
results[0].sname //3
];
return arr.join(",");
}else{
return false;
}
}catch(e){
console.error(e);
console.log("function get_user_data");
return false;
}
}
function update_client(cbid) {
try{
var ds = app.models.Clients;
var query = ds.newQuery();
query.filters.CBID._equals = parseInt(cbid);
var results = query.run();
if(results.length > 0){
var id = results[0]._key;
return id+1;
}else{
return -1;
}
}catch(e){
console.error(e);
return -1;
}
}
This gets the Clients table and updates the row for the selected client, then rebuilds the profile with the new information.
EDIT: I have managed to get to a point where its telling me that i cannot run the query (ds.load()) while processing its results. There does not seem to be a manual check to see if it has processed?
Note: datasource.saveChanges() does not work as it saves automatically.
You error is being produced by the client side function save_profile() and it is exactly in this block:
ds.load(function(){
ds.selectIndex(data);
console.log("editing:"+ds.item.CBID);
ds.item.fname = dec.fname_edit.value;
ds.item.sname = dec.sname_edit.value;
ds.load(function(){build_profile();});
});
So what you are doing is reloading the datasource almost immediately before it finishes loading hence you are getting that error
cannot run the query (ds.load()) while processing its results
This is just a matter of timing. A setTimeout can take of the issue. Just do the following:
ds.load(function(){
ds.selectIndex(data);
console.log("editing:"+ds.item.CBID);
ds.item.fname = dec.fname_edit.value;
ds.item.sname = dec.sname_edit.value;
setTimeout(function(){
ds.load(function(){build_profile();});
},1000);
});
I have manage to find a solution to this particular issue. It requires Manual Saving but it saves a lot of hassle as one of the inbuilt solutions can be used rather than relying on dealing with errors or timeouts.
function client_query_and_result(){
var createSuccess = function(data){ //callback function
console.log(data);
};
app.datasources.SomeTable.saveChanges(function(){//ensures all changes have been saved
app.datasources.SomeTable.load(function(){//makes sure to reload the datasource
google.script.run.withSuccessHandler(createSuccess).server_query_and_result(); //at this point All data has been saved and reloaded
});
});
}
The Server side code is the exact same methods. To enable manual saving you can select the table in App Maker -> Datasources -> check "Manual save mode".
Hope this can be useful to someone else.
I have a simple presence user-count set up for firebase based on their example. The problem is that it relies on removing counts on disconnect. However, firebase seems to go down every 2 months and removes the ondisconnect handlers. This means that over time the counts get more and more wrong. Is there any way to fix this?
ty.Presence = function() {
this.rooms = {}
this.presence = fb.child('presence')
this.connectedRef = fb.child('.info/connected');
if (!localStorage.fb_presence_id) {
localStorage.fb_presence_id = Math.random().toString(36).slice(2)
}
this.browserID = localStorage.fb_presence_id
var first = false
}
ty.Presence.prototype.add = function(roomID, userobj) {
var self = this
var userListRef = this.presence.child(roomID)
// Generate a reference to a new location for my user with push.
var obj = {
s: "on",
id: this.browserID
}
if (userobj) {
obj.u = {
_id: userobj._id,
n: userobj.username
}
if (userobj.a) {
obj.u.a = userobj.a
}
}
var myUserRef = userListRef.push(obj)
this.rooms[roomID] = myUserRef
this.connectedRef.on("value", function(isOnline) {
if (isOnline.val()) {
// If we lose our internet connection, we want ourselves removed from the list.
myUserRef.onDisconnect().remove();
}
});
};
ty.Presence.prototype.count = function(roomID, cb) {
var self = this
var userListRef = this.presence.child(roomID)
var count = 0
function res () {
var usersArr = _.pluck(users, 'id')
usersArr = _.uniq(usersArr)
count = usersArr.length
if (cb) cb(count)
}
var users = {}
userListRef.on("child_added", function(css) {
users[css.name()] = css.val();
res()
});
userListRef.on("child_removed", function(css) {
delete users[css.name()]
res()
});
cb(count)
};
ty.Presence.prototype.get = function(ref) {
return this[ref]
};
ty.Presence.prototype.setGlobal = function(object) {
var self = this
_.each(this.rooms, function (myUserRef) {
myUserRef.set(object)
})
};
ty.Presence.prototype.remove = function(roomID) {
if (this.rooms[roomID])
this.rooms[roomID].remove();
};
ty.Presence.prototype.off = function(roomID) {
var userListRef = this.presence.child(roomID)
userListRef.off()
};
ty.presence = new ty.Presence()
ty.presence.add('all')
The onDisconnect handlers can be lost if a Firebase is restarted (e.g. when a new release is pushed live). One simple approach is to attach a timestamp as a priority to the records when they are stored. As long as the client remains online, have him update the timestamp occasionally.
setInterval(function() {
connectedRef.setPriority(Date.now());
}, 1000*60*60*4 /* every 4 hours */ );
Thus, any record which reaches, say, 24 hours old, would obviously be an orphan. A challenge could take place by clients (e.g. when a new client receives the list for the first time) or by a server process (e.g. a node.js script with a setInterval() to check for records older than X).
presenceRef.endAt(Date.now()-24*60*60*1000 /* 24 hours ago */).remove();
Less than ideal, sure, but a functional workaround I've utilized in apps.
I am using Lawnchair.js on a mobile app I am building at work targeting iOS, Android, and Windows phones. My question is I have a relatively simple function(see below), that reads data from an object and saves it in the indexeddb database. It's about 4MB of data and on the first go round when I inspect in Internet explorer(via internet options), I can see the database is about 7MB. If I reload the page and re-run the same function with the same data, it increases to 14MB and then 20MB. Im using the same keys so my understanding is that this should just update the record but it's almost as if it's just inserting all new records every time. I have also had similar behavior using Lawnchair on mobile safari using websql adapter. Has anyone seen this before or have any suggestions as to why this might be ??.
The following code is from a function I am using to populate the database.
populateDatabase: function(database,callback) {
'use strict';
var key;
try {
for(key in MasterData){
if(MasterData.hasOwnProperty(key)){
var itemInfo = DataConfig.checkForDataUpdates[DataConfig.keyMap[key]];
database.save({key:itemInfo["name"],hash:itemInfo["version"],url:itemInfo["url"],data:MasterData[key]});
}
}
callback(true);
} catch(e){
callback(false);
}
}
MasterData is the large data file and itemInfo contains the key name, a hash that is later used to check an api for updates, and the relative url of where to update from. After I create the database I pass it into this function and then pass back true if the inserts are successful and false otherwise.
As previously mentioned, I have seen similar issues in iOS where calling database.save() was allocating a lot of memory but not releasing it and eventually causing a crash if it populated the database and then tried to update some records. Removing Lawnchair from the equation has kept it from crashing but it is still allocating a lot of memory when saving data. Not sure if this is normal for persistent storage on mobile devices, a bug in Lawnchair, or me being a noob and doing something terribly wrong but I could use some pointers on this as well as why indexeddb just keeps getting larger and larger on every save (at least during initial testing in IE10)??
EDIT: Source Code for indexed-db adapter is here:
https://github.com/brianleroux/lawnchair/blob/master/src/adapters/indexed-db.js
and here is the code for the save function I am using:
save:function(obj, callback) {
var self = this;
if(!this.store) {
this.waiting.push(function() {
this.save(obj, callback);
});
return;
}
var objs = (this.isArray(obj) ? obj : [obj]).map(function(o){if(!o.key) { o.key = self.uuid()} return o})
var win = function (e) {
if (callback) { self.lambda(callback).call(self, self.isArray(obj) ? objs : objs[0] ) }
};
var trans = this.db.transaction(this.record, READ_WRITE);
var store = trans.objectStore(this.record);
for (var i = 0; i < objs.length; i++) {
var o = objs[i];
store.put(o, o.key);
}
store.transaction.oncomplete = win;
store.transaction.onabort = fail;
return this;
},
When Creating a new instance, Lawnchair uses the init function from the indexed-db adapter which is the following.
init:function(options, callback) {
this.idb = getIDB();
this.waiting = [];
this.useAutoIncrement = useAutoIncrement();
var request = this.idb.open(this.name, STORE_VERSION);
var self = this;
var cb = self.fn(self.name, callback);
if (cb && typeof cb != 'function') throw 'callback not valid';
var win = function() {
// manually clean up event handlers on request; this helps on chrome
request.onupgradeneeded = request.onsuccess = request.error = null;
if(cb) return cb.call(self, self);
};
var upgrade = function(from, to) {
// don't try to migrate dbs, just recreate
try {
self.db.deleteObjectStore('teststore'); // old adapter
} catch (e1) { /* ignore */ }
try {
self.db.deleteObjectStore(self.record);
} catch (e2) { /* ignore */ }
// ok, create object store.
var params = {};
if (self.useAutoIncrement) { params.autoIncrement = true; }
self.db.createObjectStore(self.record, params);
self.store = true;
};
request.onupgradeneeded = function(event) {
self.db = request.result;
self.transaction = request.transaction;
upgrade(event.oldVersion, event.newVersion);
// will end up in onsuccess callback
};
request.onsuccess = function(event) {
self.db = event.target.result;
if(self.db.version != (''+STORE_VERSION)) {
// DEPRECATED API: modern implementations will fire the
// upgradeneeded event instead.
var oldVersion = self.db.version;
var setVrequest = self.db.setVersion(''+STORE_VERSION);
// onsuccess is the only place we can create Object Stores
setVrequest.onsuccess = function(event) {
var transaction = setVrequest.result;
setVrequest.onsuccess = setVrequest.onerror = null;
// can't upgrade w/o versionchange transaction.
upgrade(oldVersion, STORE_VERSION);
transaction.oncomplete = function() {
for (var i = 0; i < self.waiting.length; i++) {
self.waiting[i].call(self);
}
self.waiting = [];
win();
};
};
setVrequest.onerror = function(e) {
setVrequest.onsuccess = setVrequest.onerror = null;
console.error("Failed to create objectstore " + e);
fail(e);
};
} else {
self.store = true;
for (var i = 0; i < self.waiting.length; i++) {
self.waiting[i].call(self);
}
self.waiting = [];
win();
}
}
request.onerror = function(ev) {
if (request.errorCode === getIDBDatabaseException().VERSION_ERR) {
// xxx blow it away
self.idb.deleteDatabase(self.name);
// try it again.
return self.init(options, callback);
}
console.error('Failed to open database');
};
},
I think you keep adding data instead of updating the present data.
Can you provide some more information about the configuration of the store. Are you using an inline or external key? If it's an internal what is the keypath.
I have a forloop like this:
for (var name in myperson.firstname){
var myphone = new phone(myperson, firstname);
myphone.get(function(phonenumbers){
if(myphone.phonearray){
myperson.save();
//Can I put a break here?;
}
});
}
What it does is that it searches for phone-numbers in a database based on various first-names. What I want to achieve is that once it finds a number associated with any of the first names, it performs myperson.save and then stops all the iterations, so that no duplicates get saved. Sometimes, none of the names return any phone-numbers.
myphone.get contains a server request and the callback is triggered on success
If I put a break inside the response, what will happen with the other iterations of the loop? Most likely the other http-requests have already been initiated. I don't want them to perform the save. One solution I have thought of is to put a variable outside of the forloop and set it to save, and then check when the other callbacks get's triggered, but I'm not sure if that's the best way to go.
You could write a helper function to restrict invocations:
function callUntilTrue(cb) {
var done = false;
return function () {
if (done) {
log("previous callback succeeded. not calling others.");
return;
}
var res = cb.apply(null, arguments);
done = !! res;
};
}
var myperson = {
firstname: {
"tom": null,
"jerry": null,
"micky": null
},
save: function () {
log("save " + JSON.stringify(this, null, 2));
}
};
var cb = function (myperson_, phonenumbers) {
if (myperson_.phonearray) {
log("person already has phone numbers. returning.");
return false;
}
if (phonenumbers.length < 1) {
log("response has no phone numbers. returning.");
return false;
}
log("person has no existing phone numbers. saving ", phonenumbers);
myperson_.phonearray = phonenumbers;
myperson_.save();
return true;
};
var restrictedCb = callUntilTrue(cb.bind(null, myperson));
for (var name in myperson.firstname) {
var myphone = new phone(myperson, name);
myphone.get(restrictedCb);
}
Sample Console:
results for tom-0 after 1675 ms
response has no phone numbers. returning.
results for jerry-1 after 1943 ms
person has no existing phone numbers. saving , [
"jerry-1-0-number"
]
save {
"firstname": {
"tom": null,
"jerry": null,
"micky": null
},
"phonearray": [
"jerry-1-0-number"
]
}
results for micky-2 after 4440 ms
previous callback succeeded. not calling others.
Full example in this jsfiddle with fake timeouts.
EDIT Added HTML output as well as console.log.
The first result callback will only ever happen after the loop, because of the single-threaded nature of javascript and because running code isn't interrupted if events arrive.
If you you still want requests to happen in parallel, you may use a flag
var saved = false;
for (var name in myperson.firstname){
var myphone = new phone(myperson, firstname /* name? */);
myphone.get(function(phonenumbers){
if (!saved && myphone.phonearray){
saved = true;
myperson.save();
}
});
}
This will not cancel any pending requests, however, just prevent the save once they return.
It would be better if your .get() would return something cancelable (the request itself, maybe).
var saved = false;
var requests = [];
for (var name in myperson.firstname){
var myphone = new phone(myperson, firstname /* name? */);
var r;
requests.push(r = myphone.get(function(phonenumbers){
// Remove current request.
requests = requests.filter(function(i) {
return r !== i;
});
if (saved || !myphone.phonearray) {
return;
}
saved = true;
// Kill other pending/unfinished requests.
requests.forEach(function(r) {
r.abort();
});
myperson.save();
}));
}
Even better, don't start all requests at once. Instead construct an array of all possible combinations, have a counter (a semaphore) and only start X requests.
var saved = false;
var requests = [];
// Use requests.length as the implicit counter.
var waiting = []; // Wait queue.
for (var name in myperson.firstname){
var myphone = new phone(myperson, firstname /* name? */);
var r;
if (requests.length >= 4) {
// Put in wait queue instead.
waiting.push(myphone);
continue;
}
requests.push(r = myphone.get(function cb(phonenumbers){
// Remove current request.
requests = requests.filter(function(i) {
return r !== i;
});
if (saved) {
return;
}
if (!myphone.phonearray) {
// Start next request.
var w = waiting.shift();
if (w) {
requests.push(w.get(cb));
)
return;
}
saved = true;
// Kill other pending/unfinished requests.
requests.forEach(function(r) {
r.abort();
});
myperson.save();
}));
}
Update: I guess the subject gave a wrong notion that I'm looking for an existing addon. This is a custom problem and I do NOT want an existing solution.
I wish to WRITE (or more appropriately, modify and existing) Addon.
Here's my requirement:
I want my addon to work for a particular site only
The data on the pages are encoded using a 2 way hash
A good deal of info is loaded by XHR requests, and sometimes
displayed in animated bubbles etc.
The current version of my addon parses the page via XPath
expressions, decodes the data, and replaces them
The issue comes in with those bubblified boxes that are displayed
on mouse-over event
Thus, I realized that it might be a good idea to create an XHR
bridge that could listen to all the data and decode/encode on the fly
After a couple of searches, I came across nsITraceableInterface[1][2][3]
Just wanted to know if I am on the correct path. If "yes", then kindly
provide any extra pointers and suggestions that may be appropriate;
and if "No", then.. well, please help with correct pointers :)
Thanks,
Bipin.
[1]. https://developer.mozilla.org/en/NsITraceableChannel
[2]. http://www.softwareishard.com/blog/firebug/nsitraceablechannel-intercept-http-traffic/
[3]. http://www.ashita.org/howto-xhr-listening-by-a-firefox-addon/
nsITraceableChannel is indeed the way to go here. the blog posts by Jan Odvarko (softwareishard.com) and myself (ashita.org) show how to do this. You may also want to see http://www.ashita.org/implementing-an-xpcom-firefox-interface-and-creating-observers/, however it isn't really necessary to do this in an XPCOM component.
The steps are basically:
Create Object prototype implementing nsITraceableChannel; and create observer to listen to http-on-modify-request and http-on-examine-response
register observer
observer listening to the two request types adds our nsITraceableChannel object into the chain of listeners and make sure that our nsITC knows who is next in the chain
nsITC object provides three callbacks and each will be called at the appropriate stage: onStartRequest, onDataAvailable, and onStopRequest
in each of the callbacks above, our nsITC object must pass on the data to the next item in the chain
Below is actual code from a site-specific add-on I wrote that behaves very similarly to yours from what I can tell.
function TracingListener() {
//this.receivedData = [];
}
TracingListener.prototype =
{
originalListener: null,
receivedData: null, // array for incoming data.
onDataAvailable: function(request, context, inputStream, offset, count)
{
var binaryInputStream = CCIN("#mozilla.org/binaryinputstream;1", "nsIBinaryInputStream");
var storageStream = CCIN("#mozilla.org/storagestream;1", "nsIStorageStream");
binaryInputStream.setInputStream(inputStream);
storageStream.init(8192, count, null);
var binaryOutputStream = CCIN("#mozilla.org/binaryoutputstream;1",
"nsIBinaryOutputStream");
binaryOutputStream.setOutputStream(storageStream.getOutputStream(0));
// Copy received data as they come.
var data = binaryInputStream.readBytes(count);
//var data = inputStream.readBytes(count);
this.receivedData.push(data);
binaryOutputStream.writeBytes(data, count);
this.originalListener.onDataAvailable(request, context,storageStream.newInputStream(0), offset, count);
},
onStartRequest: function(request, context) {
this.receivedData = [];
this.originalListener.onStartRequest(request, context);
},
onStopRequest: function(request, context, statusCode)
{
try
{
request.QueryInterface(Ci.nsIHttpChannel);
if (request.originalURI && piratequesting.baseURL == request.originalURI.prePath && request.originalURI.path.indexOf("/index.php?ajax=") == 0)
{
var data = null;
if (request.requestMethod.toLowerCase() == "post")
{
var postText = this.readPostTextFromRequest(request, context);
if (postText)
data = ((String)(postText)).parseQuery();
}
var date = Date.parse(request.getResponseHeader("Date"));
var responseSource = this.receivedData.join('');
//fix leading spaces bug
responseSource = responseSource.replace(/^\s+(\S[\s\S]+)/, "$1");
piratequesting.ProcessRawResponse(request.originalURI.spec, responseSource, date, data);
}
}
catch (e)
{
dumpError(e);
}
this.originalListener.onStopRequest(request, context, statusCode);
},
QueryInterface: function (aIID) {
if (aIID.equals(Ci.nsIStreamListener) ||
aIID.equals(Ci.nsISupports)) {
return this;
}
throw Components.results.NS_NOINTERFACE;
},
readPostTextFromRequest : function(request, context) {
try
{
var is = request.QueryInterface(Ci.nsIUploadChannel).uploadStream;
if (is)
{
var ss = is.QueryInterface(Ci.nsISeekableStream);
var prevOffset;
if (ss)
{
prevOffset = ss.tell();
ss.seek(Ci.nsISeekableStream.NS_SEEK_SET, 0);
}
// Read data from the stream..
var charset = "UTF-8";
var text = this.readFromStream(is, charset, true);
// Seek locks the file so, seek to the beginning only if necko hasn't read it yet,
// since necko doesn't seek to 0 before reading (at lest not till 459384 is fixed).
if (ss && prevOffset == 0)
ss.seek(Ci.nsISeekableStream.NS_SEEK_SET, 0);
return text;
}
else {
dump("Failed to Query Interface for upload stream.\n");
}
}
catch(exc)
{
dumpError(exc);
}
return null;
},
readFromStream : function(stream, charset, noClose) {
var sis = CCSV("#mozilla.org/binaryinputstream;1", "nsIBinaryInputStream");
sis.setInputStream(stream);
var segments = [];
for (var count = stream.available(); count; count = stream.available())
segments.push(sis.readBytes(count));
if (!noClose)
sis.close();
var text = segments.join("");
return text;
}
}
hRO = {
observe: function(request, aTopic, aData){
try {
if (typeof Cc == "undefined") {
var Cc = Components.classes;
}
if (typeof Ci == "undefined") {
var Ci = Components.interfaces;
}
if (aTopic == "http-on-examine-response") {
request.QueryInterface(Ci.nsIHttpChannel);
if (request.originalURI && piratequesting.baseURL == request.originalURI.prePath && request.originalURI.path.indexOf("/index.php?ajax=") == 0) {
var newListener = new TracingListener();
request.QueryInterface(Ci.nsITraceableChannel);
newListener.originalListener = request.setNewListener(newListener);
}
}
} catch (e) {
dump("\nhRO error: \n\tMessage: " + e.message + "\n\tFile: " + e.fileName + " line: " + e.lineNumber + "\n");
}
},
QueryInterface: function(aIID){
if (typeof Cc == "undefined") {
var Cc = Components.classes;
}
if (typeof Ci == "undefined") {
var Ci = Components.interfaces;
}
if (aIID.equals(Ci.nsIObserver) ||
aIID.equals(Ci.nsISupports)) {
return this;
}
throw Components.results.NS_NOINTERFACE;
},
};
var observerService = Cc["#mozilla.org/observer-service;1"]
.getService(Ci.nsIObserverService);
observerService.addObserver(hRO,
"http-on-examine-response", false);
In the above code, originalListener is the listener we are inserting ourselves before in the chain. It is vital that you keep that info when creating the Tracing Listener and pass on the data in all three callbacks. Otherwise nothing will work (pages won't even load. Firefox itself is last in the chain).
Note: there are some functions called in the code above which are part of the piratequesting add-on, e.g.: parseQuery() and dumpError()
Tamper Data Add-on. See also the How to Use it page
You could try making a Greasemonkey script and overwriting the XMLHttpRequest.
The code would look something like:
function request () {
};
request.prototype.open = function (type, path, block) {
GM_xmlhttpRequest({
method: type,
url: path,
onload: function (response) {
// some code here
}
});
};
unsafeWindow.XMLHttpRequest = request;
Also note that you can turn a GM script into an addon for Firefox.