Commit f2809d61 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-fixes-for-linus' of...

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  rcu: Fix RCU lockdep splat on freezer_fork path
  rcu: Fix RCU lockdep splat in set_task_cpu on fork path
  mutex: Don't spin when the owner CPU is offline or other weird cases
parents d93ac51c 8b46f880
...@@ -205,9 +205,12 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) ...@@ -205,9 +205,12 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
* No lock is needed, since the task isn't on tasklist yet, * No lock is needed, since the task isn't on tasklist yet,
* so it can't be moved to another cgroup, which means the * so it can't be moved to another cgroup, which means the
* freezer won't be removed and will be valid during this * freezer won't be removed and will be valid during this
* function call. * function call. Nevertheless, apply RCU read-side critical
* section to suppress RCU lockdep false positives.
*/ */
rcu_read_lock();
freezer = task_freezer(task); freezer = task_freezer(task);
rcu_read_unlock();
/* /*
* The root cgroup is non-freezable, so we can skip the * The root cgroup is non-freezable, so we can skip the
......
...@@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p) ...@@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p)
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
{ {
/*
* Strictly speaking this rcu_read_lock() is not needed since the
* task_group is tied to the cgroup, which in turn can never go away
* as long as there are tasks attached to it.
*
* However since task_group() uses task_subsys_state() which is an
* rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
*/
rcu_read_lock();
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
p->se.parent = task_group(p)->se[cpu]; p->se.parent = task_group(p)->se[cpu];
...@@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) ...@@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
p->rt.rt_rq = task_group(p)->rt_rq[cpu]; p->rt.rt_rq = task_group(p)->rt_rq[cpu];
p->rt.parent = task_group(p)->rt_se[cpu]; p->rt.parent = task_group(p)->rt_se[cpu];
#endif #endif
rcu_read_unlock();
} }
#else #else
...@@ -3780,7 +3790,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) ...@@ -3780,7 +3790,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
* the mutex owner just released it and exited. * the mutex owner just released it and exited.
*/ */
if (probe_kernel_address(&owner->cpu, cpu)) if (probe_kernel_address(&owner->cpu, cpu))
goto out; return 0;
#else #else
cpu = owner->cpu; cpu = owner->cpu;
#endif #endif
...@@ -3790,14 +3800,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) ...@@ -3790,14 +3800,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
* the cpu field may no longer be valid. * the cpu field may no longer be valid.
*/ */
if (cpu >= nr_cpumask_bits) if (cpu >= nr_cpumask_bits)
goto out; return 0;
/* /*
* We need to validate that we can do a * We need to validate that we can do a
* get_cpu() and that we have the percpu area. * get_cpu() and that we have the percpu area.
*/ */
if (!cpu_online(cpu)) if (!cpu_online(cpu))
goto out; return 0;
rq = cpu_rq(cpu); rq = cpu_rq(cpu);
...@@ -3816,7 +3826,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) ...@@ -3816,7 +3826,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
cpu_relax(); cpu_relax();
} }
out:
return 1; return 1;
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment