summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2008-10-08 09:16:04 +0200
committerIngo Molnar <mingo@elte.hu>2008-10-08 12:20:26 +0200
commit2fb7635c4cea310992a39580133099dd99ad151c (patch)
treee898c690d05c3dfeaa733fb3472eff400669be3a /kernel
parent990d0f2ced23052abc7efa09bd05bff34e00cf73 (diff)
downloadkernel-common-2fb7635c4cea310992a39580133099dd99ad151c.tar.gz
kernel-common-2fb7635c4cea310992a39580133099dd99ad151c.tar.bz2
kernel-common-2fb7635c4cea310992a39580133099dd99ad151c.zip
sched: sync wakeups vs avg_overlap
While looking at the code I wondered why we always do: sync && avg_overlap < migration_cost Which is a bit odd, since the overlap test was meant to detect sync wakeups so using it to specialize sync wakeups doesn't make much sense. Hence change the code to do: sync || avg_overlap < migration_cost Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index fcbe850a5a90..18fd17172eb6 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1103,6 +1103,11 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
return 0;
+ if (!sync && sched_feat(SYNC_WAKEUPS) &&
+ curr->se.avg_overlap < sysctl_sched_migration_cost &&
+ p->se.avg_overlap < sysctl_sched_migration_cost)
+ sync = 1;
+
/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
@@ -1127,11 +1132,8 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
* a reasonable amount of time then attract this newly
* woken task:
*/
- if (sync && balanced) {
- if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
- p->se.avg_overlap < sysctl_sched_migration_cost)
- return 1;
- }
+ if (sync && balanced)
+ return 1;
schedstat_inc(p, se.nr_wakeups_affine_attempts);
tl_per_task = cpu_avg_load_per_task(this_cpu);
@@ -1268,9 +1270,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
if (!sched_feat(WAKEUP_PREEMPT))
return;
- if (sched_feat(WAKEUP_OVERLAP) && sync &&
- se->avg_overlap < sysctl_sched_migration_cost &&
- pse->avg_overlap < sysctl_sched_migration_cost) {
+ if (sched_feat(WAKEUP_OVERLAP) && (sync ||
+ (se->avg_overlap < sysctl_sched_migration_cost &&
+ pse->avg_overlap < sysctl_sched_migration_cost))) {
resched_task(curr);
return;
}