I have a spinner element in my view which is corresponding with vm.wuDataLoading. Spinner is displayed while vm.wuDataLoading is true. I want to stop displaying spinner when data is loaded using reportDataService.getStandardReportGridData . Is this a correct way to do it or I need to somehow ensure that vm.wuDataLoading is not changed to false before the data is fully loaded?
function changeTitle(title) {
/////some code
function getData() {
vm.wuDataLoading = true;
reportDataService.getStandardReportGridData(projectId, webreportIdWuSet).then(function(data3) {
vm.setRowWu01 = (formatNumber(data3.Values[0][1]));
vm.setRowWu02 = (formatNumber(data3.Values[0][2]));
});
////some code
vm.wuDataLoading = false;
});
getData();
};
Presumably reportDataService.getStandardReportGridData returns a promise for a reason: It's asynchronous. So naturally, you don't want to clear the loading flag until that asynchronous work is complete (e.g., within a then callback). Something like this:
function getData() {
vm.wuDataLoading = true;
reportDataService.getStandardReportGridData(projectId, webreportIdWuSet).then(function(data3) {
vm.setRowWu01 = (formatNumber(data3.Values[0][1]));
vm.setRowWu02 = (formatNumber(data3.Values[0][2]));
})
// "finally"
.catch(e => {
// Do something about the fact the promise was rejected, but don't throw
// and don't return a rejected promise
})
.then(() => {
// Because our `catch` above converts the rejection to resolution, this gets
// called regardless of whether the original promise rejects or resolves
vm.wuDataLoading = false;
});
////some code <== This may or may not need to be in a callback above, depending on whether
// you want it to run before or after the work is complete
});
or I need to somehow ensure that vm.wuDataLoading is not changed to false before the data is fully loaded
If other things are also going to use the same flag, I'd suggest using a counter instead:
function getData() {
// Increment the counter, where >0 means "loading"
++vm.wuDataLoading;
reportDataService.getStandardReportGridData(projectId, webreportIdWuSet).then(function(data3) {
vm.setRowWu01 = (formatNumber(data3.Values[0][1]));
vm.setRowWu02 = (formatNumber(data3.Values[0][2]));
})
// "finally"
.catch(e => {
// Do something about the fact the promise was rejected, but don't throw
// and don't return a rejected promise
})
.then(() => {
// Because our `catch` above converts the rejection to resolution, this gets
// called regardless of whether the original promise rejects or resolves
// Decrement the counter again
--vm.wuDataLoading;
});
////some code <== This may or may not need to be in a callback above, depending on whether
// you want it to run before or after the work is complete
});
Note that you would only use the pattern above if, as in getData, you weren't going to pass on the promise to the calling code. If you did (by returning the result of calling then), it would be inappropriate to convert rejection to resolution via a catch handler like that.
In that case, isolate any code you want to run when the promise settles (regardless of how) in a local function, and use it both in your then handler and a catch handler that propagates the error:
function getData() {
function cleanup() {
// Decrement the counter
--vm.wuDataLoading;
}
// Increment the counter, where >0 means "loading"
++vm.wuDataLoading;
return reportDataService.getStandardReportGridData(projectId, webreportIdWuSet).then(function(data3) {
vm.setRowWu01 = (formatNumber(data3.Values[0][1]));
vm.setRowWu02 = (formatNumber(data3.Values[0][2]));
cleanup();
})
.catch(e => {
cleanup();
throw e;
});
});
Related
I'm removing content (a div) but first waiting for an animation (if none is provided, then the animation is just a resolved Promise by default). Check this out:
clearContent = (animation = Promise.resolve()) => {
return new Promise((resolve, reject) => {
const child = $('#child');
animation.then(animationEvent => {
const eventPackage = {
'divRemoved': 'divIDPlaceholder',
'itemRemoved': 'contentIDPlaceholder',
'remainingItemsCount': 2,
'remainingItems': 1
};
child.remove();
const contentRemovedEvent = new CustomEvent('contentRemovedFromPlaceholder', {
'detail': eventPackage
});
window.dispatchEvent(contentRemovedEvent);
console.log('Removed!');
return resolve(eventPackage);
});
});
}
const testAnimationThatTakesASecond = () => {
return new Promise((resolve, reject) => {
setTimeout(() => {
return resolve()
}, 1000);
});
}
$('#child').on('click', () => {
clearContent(testAnimationThatTakesASecond());
});
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<div id="child">Remove me! The removal process can be animated as well!</div>
My constraints are that I'm waiting on an animation by anime.js and I'd like the whole removal process (that is the animation + the removal of the div itself) to be dependable on, because I might choose to chain some things based on it..
The problem is that although this is a wrapper function, I sometimes find that it doesn't work properly on low CPU. I'm thinking maybe because the .remove itself removes after the promise itself is resolved.
Am I wrong to think that a Promise will always wait for whatever code it has inside to finish?
Your function is fine, but from your last sentence and comments it seems like your expectation about promises is not quite correct.
If you are calling the code like this:
clearContent(testAnimationThatTakesASecond());
console.log('something after resolved');
The console.log will be executed before the animation finishes. The execution will not be halted in the calling context.
If you need to do something after the animation is resolved you need to use it like this:
clearContent(testAnimationThatTakesASecond())
.then(() => console.log('something after resolved'));
Or using async:
$('#child').on('click', async () => {
await clearContent(testAnimationThatTakesASecond());
console.log('something after resolved');
});
When you put async in front of a function you're telling that it will return a promise with the value from the return statement resolved!
When you await you're waiting for the promise to be fulfilled.
$('#child').on('click',async () => {
await testAnimationThatTakesASecond();
clearContent();
});
Edit:With promises also, we can use then to make sure that promise is resolved, and just then run the second phase.
Depend on our desired asynchronous order for our callbacks to get execute, if our they where doSomething() and doSomethingElse() to get them execute in order we can use
doSomething().then(doSomethingElse)
This is the only way that we are able to use the result of doSomething, inside doSomethingElse, It's like we ran:
doSomethingElse(resultOfDoSomething):
And also It's then-able in the way that if we had third operation, it get called after doSomethingElse finishes like: finalHandler(resultOfDoSomethingElse)
var ing_data = savedata.ingredients.split(',');
for(var i =0; i<ing_data.length; i++){
var d = {
content_name: ing_data[i],
dogFoodId: dogId
}
db.dog_ingredients.create(d).then(function(data){
}, function(e){
console.log(e);
res.status(403).send('Error');
//break for loop this point
});
}
how to break for loop in promise?
I'm using node express, sequelize module
The loop will be over before the first then callback is triggered; this is one of the guarantees of promises (assuming that create operation returns a proper promise, not just a thenable; or at least that the thenable it returns completes asynchronously).
You can use the reduce trick to loop through adding those ingredients serially (one at a time); a promise rejection along the way will skip the remaining ingredients:
savedata.ingredients.split(',').reduce(function(p, ing) {
// Chain this ingredient on the end of the promise, return
// the new promise `then` returns, which gets passed to the
// next iteration
return p.then(function() {
var d = {
content_name: ing,
dogFoodId: dogId
};
// Return the promise from `create`
return db.dog_ingredients.create(d);
});
}, Promise.resolve()/* Seeds the loop above */)
.catch(function(e) {
// We got a rejection, which bypasses any pending resolution
// handlers we set up above; process the rejection.
console.log(e);
res.status(403).send('Error');
return Promise.reject(e); // Only need to propgate the rejection like this
// this if something will use the return value of
// this overall structure
});
That looks massive, but that's mostly comments and the object initializer; we could also write it like this (assuming we didn't need to propagate the rejection):
savedata.ingredients.split(',').reduce(function(p, ing) {
return p.then(function() {
return db.dog_ingredients.create({ content_name: ing, dogFoodId: dogId });
});
}, Promise.resolve())
.catch(function(e) {
res.status(403).send('Error');
});
(Or you can even get smaller, but for me debugging suffers — leave minifying to the minifier.)
I assume you don't want to add the ingredients in parallel since you've indicated you want to stop on the "first" error. But if you did, the code would be simpler:
Promise.all(savedata.ingredients.split(',').map(function(ing) {
return db.dog_ingredients.create({ content_name: ing, dogFoodId: dogId });
}).catch(function(e) {
res.status(403).send('Error');
return Promise.reject(e);
});
(Assumes we don't need to propagate the rejection.)
Again, though, that's parallel.
I found the term "The Ghost Promise" here, which looks like my case.
I have the code like this:
return Q.Promise(function(resolve, reject) {
firstFunctionThatReturnPromise()
.then(function(firstResult) {
_check(firstResult) ? resolve(firstResult) : return secondFunctionThatReturnPromise();
})
.then(function(secondResult) {
console.log(secondResult);
return thirdFunctionThatReturnPromise(secondResult);
})
.then(function(thirdResult) {
resolve(thirdResult);
})
.catch(function(e) {
reject(e)
});
});
The problem is, even though the _check returns true, it still proceeds to the console.log command (which results in undefined).
In case the _check returns false, things work as expected.
So my question is:
If the behavior described above is normal?
If there is a more elegant way to handle this case?
Update 1: Many questioned that why I use Q.Promise instead of returning the result directly. It's because this is a generic function, shared by other functions.
// Usage in other functions
genericFunction()
.then(function(finalResult) {
doSomething(finalResult);
})
.catch(function(err) {
handleError(err);
});
First off, there's no reason to wrap a new promise around any of this. Your operations already return promises so it is an error prone anti-pattern to rewrap them in a new promise.
Second off, as others have said, a .then() handler has these choices:
It can return a result which will be passed to the next .then() handler. Not returning anything passes undefined to the next .then() handler.
It can return a promise whose resolved value will be passed to the next .then() handler or rejected value will be passed to the next reject handler.
It can throw which will reject the current promise.
There is no way from a .then() handler to tell a promise chain to conditionally skip some following .then() handlers other than rejecting.
So, if you want to branch your promise based on some condition logic, then you need to actually nest your .then() handlers according to your branching logic:
a().then(function(result1) {
if (result1) {
return result1;
} else {
// b() is only executed here in this conditional
return b().then(...);
}
}).then(function(result2) {
// as long as no rejection, this is executed for both branches of the above conditional
// result2 will either be result1 or the resolved value of b()
// depending upon your conditional
})
So, when you want to branch, you make a new nested chain that lets you control what happens based on the conditional branching.
Using your psuedo-code, it would look something like this:
firstFunctionThatReturnPromise().then(function (firstResult) {
if (_check(firstResult)) {
return firstResult;
} else {
return secondFunctionThatReturnPromise().then(function (secondResult) {
console.log(secondResult);
return thirdFunctionThatReturnPromise(secondResult);
})
}
}).then(function (finalResult) {
console.log(finalResult);
return finalResult;
}).catch(function (err) {
console.log(err);
throw err;
});
Even if this is inside a genericFunction, you can still just return the promise you already have:
function genericFunction() {
return firstFunctionThatReturnPromise().then(function (firstResult) {
if (_check(firstResult)) {
return firstResult;
} else {
return secondFunctionThatReturnPromise().then(function (secondResult) {
console.log(secondResult);
return thirdFunctionThatReturnPromise(secondResult);
})
}
}).then(function (finalResult) {
console.log(finalResult);
return finalResult;
}).catch(function (err) {
console.log(err);
throw err;
});
}
// usage
genericFunction().then(...).catch(...)
The behavior is expected. When you chain your .then() statements, you cannot break out of the chain early except by throwing an error.
Your top-level promise (the one returned by Q.Promise()) gets resolved after _check(); but you actually have an inner promise chain that continues to execute.
By specification, then() returns a promise: https://promisesaplus.com/#point-40
You can see for yourself in the source code of Q: https://github.com/kriskowal/q/blob/v1/q.js#L899
For your desired behavior, you'll actually need another nested promise chain.
return Q.Promise(function(resolve, reject) {
firstFunctionThatReturnPromise().then(function(firstResult) {
if (_check(firstResult)) {
resolve(firstResult);
} else {
return secondFunctionThatReturnPromise().then(function(secondResult) {
console.log(secondResult);
return thirdFunctionThatReturnPromise(secondResult);
});
}
});
});
I have never used Q, but everything a promise returns is transformed into a promise and passed to the next .then(). Here, your first .then() don't return anything. So it returns undefined. So undefined is wrapped in a new Promise and passed to the next handler, where you get secondResult == undefined.
You can see it in action in the following CodePen : http://codepen.io/JesmoDrazik/pen/xOaXKE
I'm writing a background job function on Parse.com CloudCode. The job needs to call the same function (that includes a Parse.Query.each()call) several times with different parameters, and I want to chain these calls with promises. Here's what I have so far:
Parse.Cloud.job("threadAutoReminders", function(request, response) {
processThreads(parameters1).then(function() {
return processThreads(parameters2);
}).then(function() {
return processThreads(parameters3);
}).then(function() {
return processThreads(parameters4);
}).then(function() {
response.success("Success");
}, function(error) {
response.error(JSON.stringify(error));
});
});
Below is the processThreads() function:
function processThreads(parameters) {
var threadQuery = new Parse.Query("Thread");
threadQuery... // set up query using parameters
return threadQuery.each(function(thread) {
console.log("Hello");
// do something
});
}
My questions are:
Am I chaining function calls using promises correctly?
What happens in threadQuery.each() returns zero results? Will the promise chain continue with execution? I'm asking because at the moment "Hello" never gets logged..
Am I chaining function calls using promises correctly?
Yes.
What happens in threadQuery.each() returns zero results? Will the promise chain continue with execution? I'm asking because at the moment "Hello" never gets logged.
I think I'm right in saying that, if "do something" is synchronous, then zero "Hello" messages can only happen if :
an uncaught error occurs in "do something" before a would-be "Hello" is logged, or
every stage gives no results (suspect your data, your query or your expectation).
You can immunise yourself against uncaught errors by catching them. As Parse promises are not throw-safe, you need to catch them manually :
function processThreads(parameters) {
var threadQuery = new Parse.Query("Thread");
threadQuery... // set up query using parameters
return threadQuery.each(function(thread) {
console.log("Hello");
try {
doSomething(); // synchronous
} catch(e) {
//do nothing
}
});
}
That should ensure that the iteration continues and that a fulfilled promise is returned.
The following example shows as use promises inside your function using a web browser implementation.
function processThreads(parameters) {
var promise = new Promise();
var threadQuery = new Parse.Query("Thread");
threadQuery... // set up query using parameters
try {
threadQuery.each(function(thread) {
console.log("Hello");
if (condition) {
throw "Something was wrong with the thread with id " + thread.id;
}
});
} catch (e) {
promise.reject(e);
return promise;
}
promise.resolve();
return promise;
}
Implementations of promise:
Web Browser https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise
jQuery https://api.jquery.com/promise/
Angular https://docs.angularjs.org/api/ng/service/$q
I was trying to use promises to force serialization of a series of Ajax calls. These Ajax calls are made one for each time a user presses a button. I can successfully serialize the operations like this:
// sample async function
// real-world this is an Ajax call
function delay(val) {
log("start: ", val);
return new Promise(function(resolve) {
setTimeout(function() {
log("end: ", val);
resolve();
}, 500);
});
}
// initialize p to a resolved promise
var p = Promise.resolve();
var v = 1;
// each click adds a new task to
// the serially executed queue
$("#run").click(function() {
// How to detect here that there are no other unresolved .then()
// handlers on the current value of p?
p = p.then(function() {
return delay(v++);
});
});
Working demo: http://jsfiddle.net/jfriend00/4hfyahs3/
But, this builds a potentially never ending promise chain since the variable p that stores the last promise is never cleared. Every new operation just chains onto the prior promise. So, I was thinking that for good memory management, I should be able to detect when there are no more .then() handlers left to run on the current value of p and I can then reset the value of p, making sure that any objects that the previous chain of promise handlers might have held in closures will be eligible for garbage collection.
So, I was wondering how I would know in a given .then() handler that there are no more .then() handlers to be called in this chain and thus, I can just do p = Promise.resolve() to reset p and release the previous promise chain rather than just continually adding onto it.
I'm being told that a "good" promise implementation would not cause accumulating memory from an indefinitely growing promise chain. But, there is apparently no standard that requires or describes this (other than good programming practices) and we have lots of newbie Promise implementations out there so I have not yet decided if it's wise to rely on this good behavior.
My years of coding experience suggest that when implementations are new, facts are lacking that all implementations behave a certain way and there's no specification that says they should behave that way, then it might be wise to write your code in as "safe" a way as possible. In fact, it's often less work to just code around an uncertain behavior than it is to go test all relevant implementations to find out how they behave.
In that vein, here's an implementation of my code that seems to be "safe" in this regard. It just saves a local copy of the global last promise variable for each .then() handler and when that .then() handler runs, if the global promise variable still has the same value, then my code has not chained any more items onto it so this must be the currently last .then() handler. It seems to work in this jsFiddle:
// sample async function
// real-world this is an Ajax call
function delay(val) {
log("start: ", val);
return new Promise(function(resolve) {
setTimeout(function() {
log("end: ", val);
resolve();
}, 500);
});
}
// initialize p to a resolved promise
var p = Promise.resolve();
var v = 1;
// each click adds a new task to
// the serially executed queue
$("#run").click(function() {
var origP = p = p.then(function() {
return delay(v++);
}).then(function() {
if (p === origP) {
// no more are chained by my code
log("no more chained - resetting promise head");
// set fresh promise head so no chance of GC leaks
// on prior promises
p = Promise.resolve();
v = 1;
}
// clear promise reference in case this closure is leaked
origP = null;
}, function() {
origP = null;
});
});
… so that I can then reset the value of p, making sure that any objects that the previous chain of promise handlers might have held in closures will be eligible for garbage collection.
No. A promise handler that has been executed (when the promise has settled) is no more needed and implicitly eligible for garbage collection. A resolved promise does not hold onto anything but the resolution value.
You don't need to do "good memory management" for promises (asynchronous values), your promise library does take care of that itself. It has to "release the previous promise chain" automatically, if it doesn't then that's a bug. Your pattern works totally fine as is.
How do you know when the promise chain has completely finished?
I would take a pure, recursive approach for this:
function extendedChain(p, stream, action) {
// chains a new action to p on every stream event
// until the chain ends before the next event comes
// resolves with the result of the chain and the advanced stream
return Promise.race([
p.then(res => ({res}) ), // wrap in object to distinguish from event
stream // a promise that resolves with a .next promise
]).then(({next, res}) =>
next
? extendedChain(p.then(action), next, action) // a stream event happened first
: {res, next:stream}; // the chain fulfilled first
);
}
function rec(stream, action, partDone) {
return stream.then(({next}) =>
extendedChain(action(), next, action).then(({res, next}) => {
partDone(res);
return rec(next, action, partDone);
});
);
}
var v = 1;
rec(getEvents($("#run"), "click"), () => delay(v++), res => {
console.log("all current done, none waiting");
console.log("last result", res);
}); // forever
with a helper function for event streams like
function getEvents(emitter, name) {
var next;
function get() {
return new Promise((res) => {
next = res;
});
}
emitter.on(name, function() {
next({next: get()});
});
return get();
}
(Demo at jsfiddle.net)
It is impossible to detect when no more handlers are added.
It is in fact an undecidable problem. It is not very hard to show a reduction to the halting (or the Atm problem). I can add a formal reduction if you'd like but in handwavey: Given an input program, put a promise at its first line and chain to it at every return or throw - assuming we have a program that solves the problem you describe in this question - apply it to the input problem - we now know if it runs forever or not solving the halting problem. That is, your problem is at least as hard as the halting problem.
You can detect when a promise is "resolved" and update it on new ones.
This is common in "last" or in "flatMap". A good use case is autocomplete search where you only want the latest results. Here is an [implementation by Domenic
(https://github.com/domenic/last):
function last(operation) {
var latestPromise = null; // keep track of the latest
return function () {
// call the operation
var promiseForResult = operation.apply(this, arguments);
// it is now the latest operation, so set it to that.
latestPromise = promiseForResult;
return promiseForResult.then(
function (value) {
// if we are _still_ the last value when it resovled
if (latestPromise === promiseForResult) {
return value; // the operation is done, you can set it to Promise.resolve here
} else {
return pending; // wait for more time
}
},
function (reason) {
if (latestPromise === promiseForResult) { // same as above
throw reason;
} else {
return pending;
}
}
);
};
};
I adapted Domenic's code and documented it for your problem.
You can safely not optimize this
Sane promise implementations do not keep promises which are "up the chain", so setting it to Promise.resolve() will not save memory. If a promise does not do this it is a memory leak and you should file a bug against it.
I tried to check if we can see the promise's state in code, apprantly that is only possible from console, not from code, so I used a flag to moniter the status, not sure if there is a loophole somewhere:
var p
, v = 1
, promiseFulfilled = true;
function addPromise() {
if(!p || promiseFulfilled){
console.log('reseting promise...');
p = Promise.resolve();
}
p = p.then(function() {
promiseFulfilled = false;
return delay(v++);
}).then(function(){
promiseFulfilled = true;
});
}
fiddle demo
You could push the promises onto an array and use Promise.all:
var p = Promise.resolve,
promiseArray = [],
allFinishedPromise;
function cleanup(promise, resolvedValue) {
// You have to do this funkiness to check if more promises
// were pushed since you registered the callback, though.
var wereMorePromisesPushed = allFinishedPromise !== promise;
if (!wereMorePromisesPushed) {
// do cleanup
promiseArray.splice(0, promiseArray.length);
p = Promise.resolve(); // reset promise
}
}
$("#run").click(function() {
p = p.then(function() {
return delay(v++);
});
promiseArray.push(p)
allFinishedPromise = Promise.all(promiseArray);
allFinishedPromise.then(cleanup.bind(null, allFinishedPromise));
});
Alternatively, since you know they are executed sequentially, you could have each completion callback remove that promise from the array and just reset the promise when the array is empty.
var p = Promise.resolve(),
promiseArray = [];
function onPromiseComplete() {
promiseArray.shift();
if (!promiseArray.length) {
p = Promise.resolve();
}
}
$("#run").click(function() {
p = p.then(function() {
onPromiseComplete();
return delay(v++);
});
promiseArray.push(p);
});
Edit: If the array is likely to get very long, though, you should go with the first option b/c shifting the array is O(N).
Edit: As you noted, there's no reason to keep the array around. A counter will work just fine.
var p = Promise.resolve(),
promiseCounter = 0;
function onPromiseComplete() {
promiseCounter--;
if (!promiseCounter) {
p = Promise.resolve();
}
}
$("#run").click(function() {
p = p.then(function() {
onPromiseComplete();
return delay(v++);
});
promiseCounter++;
});