Commit 56e91477 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

sched: dynamic cpu_power

Recompute the cpu_power for each cpu during load-balance
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarDinakar Guniguntala <dino@in.ibm.com>
Cc: John Stultz <johnstul@us.ibm.com>
Cc: Darren Hart <dvhltc@us.ibm.com>
Cc: John Kacur <jkacur@redhat.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 581d1c64
...@@ -3780,14 +3780,46 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, ...@@ -3780,14 +3780,46 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
} }
#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
static void update_sched_power(struct sched_domain *sd) unsigned long __weak arch_smt_gain(struct sched_domain *sd, int cpu)
{
unsigned long weight = cpumask_weight(sched_domain_span(sd));
unsigned long smt_gain = sd->smt_gain;
smt_gain /= weight;
return smt_gain;
}
static void update_cpu_power(struct sched_domain *sd, int cpu)
{
unsigned long weight = cpumask_weight(sched_domain_span(sd));
unsigned long power = SCHED_LOAD_SCALE;
struct sched_group *sdg = sd->groups;
unsigned long old = sdg->__cpu_power;
/* here we could scale based on cpufreq */
if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
power *= arch_smt_gain(sd, cpu);
power >>= SCHED_LOAD_SHIFT;
}
/* here we could scale based on RT time */
if (power != old) {
sdg->__cpu_power = power;
sdg->reciprocal_cpu_power = reciprocal_value(power);
}
}
static void update_group_power(struct sched_domain *sd, int cpu)
{ {
struct sched_domain *child = sd->child; struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups; struct sched_group *group, *sdg = sd->groups;
unsigned long power = sdg->__cpu_power; unsigned long power = sdg->__cpu_power;
if (!child) { if (!child) {
/* compute cpu power for this cpu */ update_cpu_power(sd, cpu);
return; return;
} }
...@@ -3830,7 +3862,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, ...@@ -3830,7 +3862,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
if (local_group) { if (local_group) {
balance_cpu = group_first_cpu(group); balance_cpu = group_first_cpu(group);
if (balance_cpu == this_cpu) if (balance_cpu == this_cpu)
update_sched_power(sd); update_group_power(sd, this_cpu);
} }
/* Tally up the load of all CPUs in the group */ /* Tally up the load of all CPUs in the group */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment