summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-04 15:15:43 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-04 15:15:43 -0700
commitf2809d61d6cf47c2ed2963ba3b4c59e709144ccb (patch)
tree4cfc2aadc16b52ef4562833ea838b34723e6b3fb
parentd93ac51c7a129db7a1431d859a3ef45a0b1f3fc5 (diff)
parent8b46f880841aac821af8efa6581bb0e46b8b9845 (diff)
downloadlinux-3.10-f2809d61d6cf47c2ed2963ba3b4c59e709144ccb.tar.gz
linux-3.10-f2809d61d6cf47c2ed2963ba3b4c59e709144ccb.tar.bz2
linux-3.10-f2809d61d6cf47c2ed2963ba3b4c59e709144ccb.zip
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: rcu: Fix RCU lockdep splat on freezer_fork path rcu: Fix RCU lockdep splat in set_task_cpu on fork path mutex: Don't spin when the owner CPU is offline or other weird cases
-rw-r--r--kernel/cgroup_freezer.c5
-rw-r--r--kernel/sched.c18
2 files changed, 18 insertions, 5 deletions
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index da5e1397553..e5c0244962b 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -205,9 +205,12 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
* No lock is needed, since the task isn't on tasklist yet,
* so it can't be moved to another cgroup, which means the
* freezer won't be removed and will be valid during this
- * function call.
+ * function call. Nevertheless, apply RCU read-side critical
+ * section to suppress RCU lockdep false positives.
*/
+ rcu_read_lock();
freezer = task_freezer(task);
+ rcu_read_unlock();
/*
* The root cgroup is non-freezable, so we can skip the
diff --git a/kernel/sched.c b/kernel/sched.c
index 6af210a7de7..3c2a54f70ff 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p)
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
{
+ /*
+ * Strictly speaking this rcu_read_lock() is not needed since the
+ * task_group is tied to the cgroup, which in turn can never go away
+ * as long as there are tasks attached to it.
+ *
+ * However since task_group() uses task_subsys_state() which is an
+ * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
+ */
+ rcu_read_lock();
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
p->se.parent = task_group(p)->se[cpu];
@@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
p->rt.rt_rq = task_group(p)->rt_rq[cpu];
p->rt.parent = task_group(p)->rt_se[cpu];
#endif
+ rcu_read_unlock();
}
#else
@@ -3780,7 +3790,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
* the mutex owner just released it and exited.
*/
if (probe_kernel_address(&owner->cpu, cpu))
- goto out;
+ return 0;
#else
cpu = owner->cpu;
#endif
@@ -3790,14 +3800,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
* the cpu field may no longer be valid.
*/
if (cpu >= nr_cpumask_bits)
- goto out;
+ return 0;
/*
* We need to validate that we can do a
* get_cpu() and that we have the percpu area.
*/
if (!cpu_online(cpu))
- goto out;
+ return 0;
rq = cpu_rq(cpu);
@@ -3816,7 +3826,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
cpu_relax();
}
-out:
+
return 1;
}
#endif