Commit 9e350de3 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Accurate period data

We currently log hw.sample_period for PERF_SAMPLE_PERIOD, however this is
incorrect. When we adjust the period, it will only take effect the next
cycle but report it for the current cycle. So when we adjust the period
for every cycle, we're always wrong.

Solve this by keeping track of the last_period.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent df1a132b
...@@ -767,6 +767,7 @@ static void power_pmu_unthrottle(struct perf_counter *counter) ...@@ -767,6 +767,7 @@ static void power_pmu_unthrottle(struct perf_counter *counter)
perf_disable(); perf_disable();
power_pmu_read(counter); power_pmu_read(counter);
left = counter->hw.sample_period; left = counter->hw.sample_period;
counter->hw.last_period = left;
val = 0; val = 0;
if (left < 0x80000000L) if (left < 0x80000000L)
val = 0x80000000L - left; val = 0x80000000L - left;
...@@ -937,7 +938,8 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) ...@@ -937,7 +938,8 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
counter->hw.config = events[n]; counter->hw.config = events[n];
counter->hw.counter_base = cflags[n]; counter->hw.counter_base = cflags[n];
atomic64_set(&counter->hw.period_left, counter->hw.sample_period); counter->hw.last_period = counter->hw.sample_period;
atomic64_set(&counter->hw.period_left, counter->hw.last_period);
/* /*
* See if we need to reserve the PMU. * See if we need to reserve the PMU.
...@@ -1002,8 +1004,9 @@ static void record_and_restart(struct perf_counter *counter, long val, ...@@ -1002,8 +1004,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
*/ */
if (record) { if (record) {
struct perf_sample_data data = { struct perf_sample_data data = {
.regs = regs, .regs = regs,
.addr = 0, .addr = 0,
.period = counter->hw.last_period,
}; };
if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
......
...@@ -698,6 +698,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) ...@@ -698,6 +698,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
if (!hwc->sample_period) { if (!hwc->sample_period) {
hwc->sample_period = x86_pmu.max_period; hwc->sample_period = x86_pmu.max_period;
hwc->last_period = hwc->sample_period;
atomic64_set(&hwc->period_left, hwc->sample_period); atomic64_set(&hwc->period_left, hwc->sample_period);
} }
...@@ -880,12 +881,14 @@ x86_perf_counter_set_period(struct perf_counter *counter, ...@@ -880,12 +881,14 @@ x86_perf_counter_set_period(struct perf_counter *counter,
if (unlikely(left <= -period)) { if (unlikely(left <= -period)) {
left = period; left = period;
atomic64_set(&hwc->period_left, left); atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1; ret = 1;
} }
if (unlikely(left <= 0)) { if (unlikely(left <= 0)) {
left += period; left += period;
atomic64_set(&hwc->period_left, left); atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1; ret = 1;
} }
/* /*
...@@ -1257,9 +1260,12 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) ...@@ -1257,9 +1260,12 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
if (val & (1ULL << (x86_pmu.counter_bits - 1))) if (val & (1ULL << (x86_pmu.counter_bits - 1)))
continue; continue;
/* counter overflow */ /*
handled = 1; * counter overflow
inc_irq_stat(apic_perf_irqs); */
handled = 1;
data.period = counter->hw.last_period;
if (!x86_perf_counter_set_period(counter, hwc, idx)) if (!x86_perf_counter_set_period(counter, hwc, idx))
continue; continue;
...@@ -1267,6 +1273,9 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) ...@@ -1267,6 +1273,9 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
amd_pmu_disable_counter(hwc, idx); amd_pmu_disable_counter(hwc, idx);
} }
if (handled)
inc_irq_stat(apic_perf_irqs);
return handled; return handled;
} }
......
...@@ -366,6 +366,7 @@ struct hw_perf_counter { ...@@ -366,6 +366,7 @@ struct hw_perf_counter {
}; };
atomic64_t prev_count; atomic64_t prev_count;
u64 sample_period; u64 sample_period;
u64 last_period;
atomic64_t period_left; atomic64_t period_left;
u64 interrupts; u64 interrupts;
...@@ -606,8 +607,9 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader, ...@@ -606,8 +607,9 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
extern void perf_counter_update_userpage(struct perf_counter *counter); extern void perf_counter_update_userpage(struct perf_counter *counter);
struct perf_sample_data { struct perf_sample_data {
struct pt_regs *regs; struct pt_regs *regs;
u64 addr; u64 addr;
u64 period;
}; };
extern int perf_counter_overflow(struct perf_counter *counter, int nmi, extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
......
...@@ -2495,7 +2495,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, ...@@ -2495,7 +2495,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
perf_output_put(&handle, cpu_entry); perf_output_put(&handle, cpu_entry);
if (sample_type & PERF_SAMPLE_PERIOD) if (sample_type & PERF_SAMPLE_PERIOD)
perf_output_put(&handle, counter->hw.sample_period); perf_output_put(&handle, data->period);
/* /*
* XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
...@@ -3040,11 +3040,13 @@ static void perf_swcounter_set_period(struct perf_counter *counter) ...@@ -3040,11 +3040,13 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
if (unlikely(left <= -period)) { if (unlikely(left <= -period)) {
left = period; left = period;
atomic64_set(&hwc->period_left, left); atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
} }
if (unlikely(left <= 0)) { if (unlikely(left <= 0)) {
left += period; left += period;
atomic64_add(period, &hwc->period_left); atomic64_add(period, &hwc->period_left);
hwc->last_period = period;
} }
atomic64_set(&hwc->prev_count, -left); atomic64_set(&hwc->prev_count, -left);
...@@ -3086,8 +3088,9 @@ static void perf_swcounter_overflow(struct perf_counter *counter, ...@@ -3086,8 +3088,9 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
int nmi, struct pt_regs *regs, u64 addr) int nmi, struct pt_regs *regs, u64 addr)
{ {
struct perf_sample_data data = { struct perf_sample_data data = {
.regs = regs, .regs = regs,
.addr = addr, .addr = addr,
.period = counter->hw.last_period,
}; };
perf_swcounter_update(counter); perf_swcounter_update(counter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment