Commit 02644174 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

Provide an arch specific hook for cpufreq based scaling of cpu_power.

[ dino: backport to 31-rt ]
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarDinakar Guniguntala <dino@in.ibm.com>
Cc: John Stultz <johnstul@us.ibm.com>
Cc: Darren Hart <dvhltc@us.ibm.com>
Cc: John Kacur <jkacur@redhat.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 9c8871b3
...@@ -3793,7 +3793,18 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, ...@@ -3793,7 +3793,18 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
} }
#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
{
return SCHED_LOAD_SCALE;
}
unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
{
return default_scale_freq_power(sd, cpu);
}
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
{ {
unsigned long weight = cpumask_weight(sched_domain_span(sd)); unsigned long weight = cpumask_weight(sched_domain_span(sd));
unsigned long smt_gain = sd->smt_gain; unsigned long smt_gain = sd->smt_gain;
...@@ -3803,6 +3814,11 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) ...@@ -3803,6 +3814,11 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
return smt_gain; return smt_gain;
} }
unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
{
return default_scale_smt_power(sd, cpu);
}
unsigned long scale_rt_power(int cpu) unsigned long scale_rt_power(int cpu)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
...@@ -3827,7 +3843,8 @@ static void update_cpu_power(struct sched_domain *sd, int cpu) ...@@ -3827,7 +3843,8 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
unsigned long power = SCHED_LOAD_SCALE; unsigned long power = SCHED_LOAD_SCALE;
struct sched_group *sdg = sd->groups; struct sched_group *sdg = sd->groups;
/* here we could scale based on cpufreq */ power *= arch_scale_freq_power(sd, cpu);
power >> SCHED_LOAD_SHIFT;
if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
power *= arch_scale_smt_power(sd, cpu); power *= arch_scale_smt_power(sd, cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment