Commit 7332e524 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

sched: cleanup wake_idle

A more readable version, with a few differences:

 - don't check against the root domain, but instead check
   SD_LOAD_BALANCE

 - don't re-iterate the cpus already iterated on the previous SD

 - use rcu_read_lock() around the sd iteration
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarDinakar Guniguntala <dino@in.ibm.com>
Cc: John Stultz <johnstul@us.ibm.com>
Cc: Darren Hart <dvhltc@us.ibm.com>
Cc: John Kacur <jkacur@redhat.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 674134b9
...@@ -1080,14 +1080,13 @@ static int wake_idle_power_save(int cpu, struct task_struct *p) ...@@ -1080,14 +1080,13 @@ static int wake_idle_power_save(int cpu, struct task_struct *p)
* not idle and an idle cpu is available. The span of cpus to * not idle and an idle cpu is available. The span of cpus to
* search starts with cpus closest then further out as needed, * search starts with cpus closest then further out as needed,
* so we always favor a closer, idle cpu. * so we always favor a closer, idle cpu.
* Domains may include CPUs that are not usable for migration,
* hence we need to mask them out (cpu_active_mask)
* *
* Returns the CPU we should wake onto. * Returns the CPU we should wake onto.
*/ */
static int wake_idle(int cpu, struct task_struct *p) static int wake_idle(int cpu, struct task_struct *p)
{ {
struct sched_domain *sd; struct rq *task_rq = task_rq(p);
struct sched_domain *sd, *child = NULL;
int i; int i;
i = wake_idle_power_save(cpu, p); i = wake_idle_power_save(cpu, p);
...@@ -1106,24 +1105,34 @@ static int wake_idle(int cpu, struct task_struct *p) ...@@ -1106,24 +1105,34 @@ static int wake_idle(int cpu, struct task_struct *p)
if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1) if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1)
return cpu; return cpu;
for_each_domain(cpu, sd) { rcu_read_lock();
if ((sd->flags & SD_WAKE_IDLE) for_each_domain(cpu, sd) {
|| ((sd->flags & SD_WAKE_IDLE_FAR) if (!(sd->flags & SD_LOAD_BALANCE))
&& !task_hot(p, task_rq(p)->clock, sd))) { break;
for_each_cpu_and(i, sched_domain_span(sd),
&p->cpus_allowed) { if (!(sd->flags & SD_WAKE_IDLE) &&
if (cpu_active(i) && idle_cpu(i)) { (task_hot(p, task_rq->clock, sd) || !(sd->flags & SD_WAKE_IDLE_FAR)))
if (i != task_cpu(p)) {
schedstat_inc(p,
se.nr_wakeups_idle);
}
return i;
}
}
} else {
break; break;
}
} for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
if (child && cpumask_test_cpu(i, sched_domain_span(child)))
continue;
if (!idle_cpu(i))
continue;
if (task_cpu(p) != i)
schedstat_inc(p, se.nr_wakeups_idle);
cpu = i;
goto unlock;
}
child = sd;
}
unlock:
rcu_read_unlock();
return cpu; return cpu;
} }
#else /* !ARCH_HAS_SCHED_WAKE_IDLE*/ #else /* !ARCH_HAS_SCHED_WAKE_IDLE*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment