Commit 8a7b8cb9 authored by Paul Mackerras's avatar Paul Mackerras Committed by Ingo Molnar

perf_counter: powerpc: Implement interrupt throttling

This implements interrupt throttling on powerpc.  Since we don't have
individual count enable/disable or interrupt enable/disable controls
per counter, this simply sets the hardware counter to 0, meaning that
it will not interrupt again until it has counted 2^31 counts, which
will take at least 2^30 cycles assuming a maximum of 2 counts per
cycle.  Also, we set counter->hw.period_left to the maximum possible
value (2^63 - 1), so we won't report overflows for this counter for
the forseeable future.

The unthrottle operation restores counter->hw.period_left and the
hardware counter so that we will once again report a counter overflow
after counter->hw.irq_period counts.

[ Impact: new perfcounters robustness feature on PowerPC ]
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <18971.35823.643362.446774@cargo.ozlabs.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 0127c3ea
......@@ -740,10 +740,37 @@ static void power_pmu_disable(struct perf_counter *counter)
local_irq_restore(flags);
}
/*
* Re-enable interrupts on a counter after they were throttled
* because they were coming too fast.
*/
static void power_pmu_unthrottle(struct perf_counter *counter)
{
s64 val, left;
unsigned long flags;
if (!counter->hw.idx || !counter->hw.irq_period)
return;
local_irq_save(flags);
perf_disable();
power_pmu_read(counter);
left = counter->hw.irq_period;
val = 0;
if (left < 0x80000000L)
val = 0x80000000L - left;
write_pmc(counter->hw.idx, val);
atomic64_set(&counter->hw.prev_count, val);
atomic64_set(&counter->hw.period_left, left);
perf_counter_update_userpage(counter);
perf_enable();
local_irq_restore(flags);
}
struct pmu power_pmu = {
.enable = power_pmu_enable,
.disable = power_pmu_disable,
.read = power_pmu_read,
.unthrottle = power_pmu_unthrottle,
};
/*
......@@ -957,10 +984,6 @@ static void record_and_restart(struct perf_counter *counter, long val,
if (left < 0x80000000L)
val = 0x80000000L - left;
}
write_pmc(counter->hw.idx, val);
atomic64_set(&counter->hw.prev_count, val);
atomic64_set(&counter->hw.period_left, left);
perf_counter_update_userpage(counter);
/*
* Finally record data if requested.
......@@ -983,8 +1006,23 @@ static void record_and_restart(struct perf_counter *counter, long val,
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
addr = mfspr(SPRN_SDAR);
}
perf_counter_overflow(counter, nmi, regs, addr);
if (perf_counter_overflow(counter, nmi, regs, addr)) {
/*
* Interrupts are coming too fast - throttle them
* by setting the counter to 0, so it will be
* at least 2^30 cycles until the next interrupt
* (assuming each counter counts at most 2 counts
* per cycle).
*/
val = 0;
left = ~0ULL >> 1;
}
}
write_pmc(counter->hw.idx, val);
atomic64_set(&counter->hw.prev_count, val);
atomic64_set(&counter->hw.period_left, left);
perf_counter_update_userpage(counter);
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment