summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-01-25 11:50:51 +0100
committerIngo Molnar <mingo@elte.hu>2012-01-26 19:38:09 +0100
commit4ca9b72b71f10147bd21969c1805f5b2c4ca7b7b (patch)
tree05d6072a28029ffca57ba6c7d30036698348b59a /kernel/sched
parent87f71ae2dd7471c1b4c94100be1f218e91dc64c3 (diff)
downloadkernel-common-4ca9b72b71f10147bd21969c1805f5b2c4ca7b7b.tar.gz
kernel-common-4ca9b72b71f10147bd21969c1805f5b2c4ca7b7b.tar.bz2
kernel-common-4ca9b72b71f10147bd21969c1805f5b2c4ca7b7b.zip
sched: Fix rq->nr_uninterruptible update race
KOSAKI Motohiro noticed the following race: > CPU0 CPU1 > -------------------------------------------------------- > deactivate_task() > task->state = TASK_UNINTERRUPTIBLE; > activate_task() > rq->nr_uninterruptible--; > > schedule() > deactivate_task() > rq->nr_uninterruptible++; > Kosaki-San's scenario is possible when CPU0 runs __sched_setscheduler() against CPU1's current @task. __sched_setscheduler() does a dequeue/enqueue in order to move the task to its new queue (position) to reflect the newly provided scheduling parameters. However it should be completely invariant to nr_uninterruptible accounting, sched_setscheduler() doesn't affect readyness to run, merely policy on when to run. So convert the inappropriate activate/deactivate_task usage to enqueue/dequeue_task, which avoids the nr_uninterruptible accounting. Also convert the two other sites: __migrate_task() and normalize_task() that still use activate/deactivate_task. These sites aren't really a problem since __migrate_task() will only be called on non-running task (and therefore are immume to the described problem) and normalize_task() isn't ever used on regular systems. Also remove the comments from activate/deactivate_task since they're misleading at best. Reported-by: KOSAKI Motohiro <kosaki.motohiro@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1327486224.2614.45.camel@laptop Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index df00cb09263e..e067df1fd01a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -723,9 +723,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
p->sched_class->dequeue_task(rq, p, flags);
}
-/*
- * activate_task - move a task to the runqueue.
- */
void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
@@ -734,9 +731,6 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
enqueue_task(rq, p, flags);
}
-/*
- * deactivate_task - remove a task from the runqueue.
- */
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
@@ -4134,7 +4128,7 @@ recheck:
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
- deactivate_task(rq, p, 0);
+ dequeue_task(rq, p, 0);
if (running)
p->sched_class->put_prev_task(rq, p);
@@ -4147,7 +4141,7 @@ recheck:
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq)
- activate_task(rq, p, 0);
+ enqueue_task(rq, p, 0);
check_class_changed(rq, p, prev_class, oldprio);
task_rq_unlock(rq, p, &flags);
@@ -4998,9 +4992,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
* placed properly.
*/
if (p->on_rq) {
- deactivate_task(rq_src, p, 0);
+ dequeue_task(rq_src, p, 0);
set_task_cpu(p, dest_cpu);
- activate_task(rq_dest, p, 0);
+ enqueue_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p, 0);
}
done:
@@ -7032,10 +7026,10 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
on_rq = p->on_rq;
if (on_rq)
- deactivate_task(rq, p, 0);
+ dequeue_task(rq, p, 0);
__setscheduler(rq, p, SCHED_NORMAL, 0);
if (on_rq) {
- activate_task(rq, p, 0);
+ enqueue_task(rq, p, 0);
resched_task(rq->curr);
}