Commit 9c8871b3 authored by Dinakar Guniguntala's avatar Dinakar Guniguntala Committed by Thomas Gleixner

x86: Add generic aperf/mperf code

Move some of the aperf/mperf code out from the cpufreq driver
thingy so that other people can enjoy it too.
Signed-off-by: default avatarDinakar Guniguntala <dino@in.ibm.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 1755a782
...@@ -27,6 +27,7 @@ struct mm_struct; ...@@ -27,6 +27,7 @@ struct mm_struct;
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/math64.h>
#include <linux/init.h> #include <linux/init.h>
/* /*
...@@ -1000,4 +1001,33 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip, ...@@ -1000,4 +1001,33 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
extern int get_tsc_mode(unsigned long adr); extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val); extern int set_tsc_mode(unsigned int val);
struct aperfmperf {
u64 aperf, mperf;
};
static inline void get_aperfmperf(struct aperfmperf *am)
{
WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));
rdmsrl(MSR_IA32_APERF, am->aperf);
rdmsrl(MSR_IA32_MPERF, am->mperf);
}
#define APERFMPERF_SHIFT 10
static inline
unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
struct aperfmperf *new)
{
u64 aperf = new->aperf - old->aperf;
u64 mperf = new->mperf - old->mperf;
unsigned long ratio = aperf;
mperf >>= APERFMPERF_SHIFT;
if (mperf)
ratio = div64_u64(aperf, mperf);
return ratio;
}
#endif /* _ASM_X86_PROCESSOR_H */ #endif /* _ASM_X86_PROCESSOR_H */
...@@ -70,11 +70,7 @@ struct acpi_cpufreq_data { ...@@ -70,11 +70,7 @@ struct acpi_cpufreq_data {
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
struct acpi_msr_data { static DEFINE_PER_CPU(struct aperfmperf, old_perf);
u64 saved_aperf, saved_mperf;
};
static DEFINE_PER_CPU(struct acpi_msr_data, msr_data);
DEFINE_TRACE(power_mark); DEFINE_TRACE(power_mark);
...@@ -243,23 +239,12 @@ static u32 get_cur_val(const struct cpumask *mask) ...@@ -243,23 +239,12 @@ static u32 get_cur_val(const struct cpumask *mask)
return cmd.val; return cmd.val;
} }
struct perf_pair {
union {
struct {
u32 lo;
u32 hi;
} split;
u64 whole;
} aperf, mperf;
};
/* Called via smp_call_function_single(), on the target CPU */ /* Called via smp_call_function_single(), on the target CPU */
static void read_measured_perf_ctrs(void *_cur) static void read_measured_perf_ctrs(void *_cur)
{ {
struct perf_pair *cur = _cur; struct aperfmperf *am = _cur;
rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi); get_aperfmperf(am);
rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi);
} }
/* /*
...@@ -278,63 +263,17 @@ static void read_measured_perf_ctrs(void *_cur) ...@@ -278,63 +263,17 @@ static void read_measured_perf_ctrs(void *_cur)
static unsigned int get_measured_perf(struct cpufreq_policy *policy, static unsigned int get_measured_perf(struct cpufreq_policy *policy,
unsigned int cpu) unsigned int cpu)
{ {
struct perf_pair readin, cur; struct aperfmperf perf;
unsigned int perf_percent; unsigned long ratio;
unsigned int retval; unsigned int retval;
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1)) if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
return 0; return 0;
cur.aperf.whole = readin.aperf.whole - ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf);
per_cpu(msr_data, cpu).saved_aperf; per_cpu(old_perf, cpu) = perf;
cur.mperf.whole = readin.mperf.whole -
per_cpu(msr_data, cpu).saved_mperf;
per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole;
per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole;
#ifdef __i386__
/*
* We dont want to do 64 bit divide with 32 bit kernel
* Get an approximate value. Return failure in case we cannot get
* an approximate value.
*/
if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) {
int shift_count;
u32 h;
h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi);
shift_count = fls(h);
cur.aperf.whole >>= shift_count;
cur.mperf.whole >>= shift_count;
}
if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) {
int shift_count = 7;
cur.aperf.split.lo >>= shift_count;
cur.mperf.split.lo >>= shift_count;
}
if (cur.aperf.split.lo && cur.mperf.split.lo)
perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo;
else
perf_percent = 0;
#else
if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) {
int shift_count = 7;
cur.aperf.whole >>= shift_count;
cur.mperf.whole >>= shift_count;
}
if (cur.aperf.whole && cur.mperf.whole)
perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole;
else
perf_percent = 0;
#endif
retval = (policy->cpuinfo.max_freq * perf_percent) / 100; retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
return retval; return retval;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment