summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-05 17:23:48 +0200
committerIngo Molnar <mingo@elte.hu>2011-04-14 08:52:37 +0200
commit3fe1698b7fe05aeb063564e71e40d09f28d8e80c (patch)
treed068bd6a075cd85c64a040cf9fc8e661f8be00a8
parent74f8e4b2335de45485b8d5b31a504747f13c8070 (diff)
downloadkernel-mfld-blackbay-3fe1698b7fe05aeb063564e71e40d09f28d8e80c.tar.gz
kernel-mfld-blackbay-3fe1698b7fe05aeb063564e71e40d09f28d8e80c.tar.bz2
kernel-mfld-blackbay-3fe1698b7fe05aeb063564e71e40d09f28d8e80c.zip
sched: Deal with non-atomic min_vruntime reads on 32bits
In order to avoid reading partial updated min_vruntime values on 32bit implement a seqcount like solution. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110405152729.111378493@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/sched_fair.c19
2 files changed, 20 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 46f42cac4eb..7a5eb262078 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -312,6 +312,9 @@ struct cfs_rq {
u64 exec_clock;
u64 min_vruntime;
+#ifndef CONFIG_64BIT
+ u64 min_vruntime_copy;
+#endif
struct rb_root tasks_timeline;
struct rb_node *rb_leftmost;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ad4c414f456..054cebb81f7 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -358,6 +358,10 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
}
cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
+#ifndef CONFIG_64BIT
+ smp_wmb();
+ cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
+#endif
}
/*
@@ -1376,10 +1380,21 @@ static void task_waking_fair(struct task_struct *p)
{
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ u64 min_vruntime;
- lockdep_assert_held(&task_rq(p)->lock);
+#ifndef CONFIG_64BIT
+ u64 min_vruntime_copy;
- se->vruntime -= cfs_rq->min_vruntime;
+ do {
+ min_vruntime_copy = cfs_rq->min_vruntime_copy;
+ smp_rmb();
+ min_vruntime = cfs_rq->min_vruntime;
+ } while (min_vruntime != min_vruntime_copy);
+#else
+ min_vruntime = cfs_rq->min_vruntime;
+#endif
+
+ se->vruntime -= min_vruntime;
}
#ifdef CONFIG_FAIR_GROUP_SCHED