Commit c619b8ff authored by Robert Richter's avatar Robert Richter Committed by Ingo Molnar

perf_counter, x86: introduce max_period variable

In x86 pmus the allowed counter period to programm differs. This
introduces a max_period value and allows the generic implementation
for all models to check the max period.

[ Impact: generalize code ]
Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-27-git-send-email-robert.richter@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 4b7bfd0d
...@@ -54,6 +54,7 @@ struct x86_pmu { ...@@ -54,6 +54,7 @@ struct x86_pmu {
int num_counters_fixed; int num_counters_fixed;
int counter_bits; int counter_bits;
u64 counter_mask; u64 counter_mask;
u64 max_period;
}; };
static struct x86_pmu x86_pmu __read_mostly; static struct x86_pmu x86_pmu __read_mostly;
...@@ -279,14 +280,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter) ...@@ -279,14 +280,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
hwc->nmi = 1; hwc->nmi = 1;
hwc->irq_period = hw_event->irq_period; hwc->irq_period = hw_event->irq_period;
/* if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period)
* Intel PMCs cannot be accessed sanely above 32 bit width, hwc->irq_period = x86_pmu.max_period;
* so we install an artificial 1<<31 period regardless of
* the generic counter period:
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
hwc->irq_period = 0x7FFFFFFF;
atomic64_set(&hwc->period_left, hwc->irq_period); atomic64_set(&hwc->period_left, hwc->irq_period);
...@@ -910,6 +905,12 @@ static struct x86_pmu intel_pmu = { ...@@ -910,6 +905,12 @@ static struct x86_pmu intel_pmu = {
.event_map = intel_pmu_event_map, .event_map = intel_pmu_event_map,
.raw_event = intel_pmu_raw_event, .raw_event = intel_pmu_raw_event,
.max_events = ARRAY_SIZE(intel_perfmon_event_map), .max_events = ARRAY_SIZE(intel_perfmon_event_map),
/*
* Intel PMCs cannot be accessed sanely above 32 bit width,
* so we install an artificial 1<<31 period regardless of
* the generic counter period:
*/
.max_period = (1ULL << 31) - 1,
}; };
static struct x86_pmu amd_pmu = { static struct x86_pmu amd_pmu = {
...@@ -927,6 +928,8 @@ static struct x86_pmu amd_pmu = { ...@@ -927,6 +928,8 @@ static struct x86_pmu amd_pmu = {
.num_counters = 4, .num_counters = 4,
.counter_bits = 48, .counter_bits = 48,
.counter_mask = (1ULL << 48) - 1, .counter_mask = (1ULL << 48) - 1,
/* use highest bit to detect overflow */
.max_period = (1ULL << 47) - 1,
}; };
static int intel_pmu_init(void) static int intel_pmu_init(void)
...@@ -999,6 +1002,7 @@ void __init init_hw_perf_counters(void) ...@@ -999,6 +1002,7 @@ void __init init_hw_perf_counters(void)
perf_max_counters = x86_pmu.num_counters; perf_max_counters = x86_pmu.num_counters;
pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment