summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSiddha, Suresh B <suresh.b.siddha@intel.com>2005-09-10 00:26:21 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-10 10:06:24 -0700
commitfa3b6ddc3f4a8eadba52234134cdb59c28b5332d (patch)
tree9aa1b8211adb63bb2983be0d4ab2afdfab88e1a3 /kernel
parent5927ad78ec75870b1bdfa65a10ad1300cd664d36 (diff)
downloadkernel-common-fa3b6ddc3f4a8eadba52234134cdb59c28b5332d.tar.gz
kernel-common-fa3b6ddc3f4a8eadba52234134cdb59c28b5332d.tar.bz2
kernel-common-fa3b6ddc3f4a8eadba52234134cdb59c28b5332d.zip
[PATCH] sched: don't kick ALB in the presence of pinned task
Jack Steiner brought this issue at my OLS talk. Take a scenario where two tasks are pinned to two HT threads in a physical package. Idle packages in the system will keep kicking migration_thread on the busy package with out any success. We will run into similar scenarios in the presence of CMP/NUMA. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 103f705b245c..1dc29dec38a9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2125,6 +2125,16 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
spin_lock(&busiest->lock);
+
+ /* don't kick the migration_thread, if the curr
+ * task on busiest cpu can't be moved to this_cpu
+ */
+ if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
+ spin_unlock(&busiest->lock);
+ all_pinned = 1;
+ goto out_one_pinned;
+ }
+
if (!busiest->active_balance) {
busiest->active_balance = 1;
busiest->push_cpu = this_cpu;
@@ -2165,6 +2175,8 @@ out_balanced:
schedstat_inc(sd, lb_balanced[idle]);
sd->nr_balance_failed = 0;
+
+out_one_pinned:
/* tune up the balancing interval */
if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
(sd->balance_interval < sd->max_interval))
@@ -2357,7 +2369,8 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
if (j - sd->last_balance >= interval) {
if (load_balance(this_cpu, this_rq, sd, idle)) {
- /* We've pulled tasks over so either we're no
+ /*
+ * We've pulled tasks over so either we're no
* longer idle, or one of our SMT siblings is
* not idle.
*/