Commit 89713ed1 authored by Anton Blanchard's avatar Anton Blanchard Committed by Benjamin Herrenschmidt

powerpc: Add timer, performance monitor and machine check counts to /proc/interrupts

With NO_HZ it is useful to know how often the decrementer is going off. The
patch below adds an entry for it and also adds it into the /proc/stat
summaries.

While here, I added performance monitoring and machine check exceptions.
I found it useful to keep an eye on the PMU exception rate
when using the perf tool. Since it's possible to take a completely
handled machine check on a System p box it also sounds like a good idea to
keep a machine check summary.

The event naming matches x86 to keep gratuitous differences to a minimum.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent fc380c0c
...@@ -6,6 +6,9 @@ ...@@ -6,6 +6,9 @@
typedef struct { typedef struct {
unsigned int __softirq_pending; unsigned int __softirq_pending;
unsigned int timer_irqs;
unsigned int pmu_irqs;
unsigned int mce_exceptions;
} ____cacheline_aligned irq_cpustat_t; } ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
...@@ -19,4 +22,10 @@ static inline void ack_bad_irq(unsigned int irq) ...@@ -19,4 +22,10 @@ static inline void ack_bad_irq(unsigned int irq)
printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
} }
extern u64 arch_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu arch_irq_stat_cpu
extern u64 arch_irq_stat(void);
#define arch_irq_stat arch_irq_stat
#endif /* _ASM_POWERPC_HARDIRQ_H */ #endif /* _ASM_POWERPC_HARDIRQ_H */
...@@ -196,6 +196,21 @@ static int show_other_interrupts(struct seq_file *p, int prec) ...@@ -196,6 +196,21 @@ static int show_other_interrupts(struct seq_file *p, int prec)
} }
#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
seq_printf(p, "%*s: ", prec, "LOC");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
seq_printf(p, " Local timer interrupts\n");
seq_printf(p, "%*s: ", prec, "CNT");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
seq_printf(p, " Performance monitoring interrupts\n");
seq_printf(p, "%*s: ", prec, "MCE");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
seq_printf(p, " Machine check exceptions\n");
seq_printf(p, "%*s: %10u\n", prec, "BAD", ppc_spurious_interrupts); seq_printf(p, "%*s: %10u\n", prec, "BAD", ppc_spurious_interrupts);
return 0; return 0;
...@@ -258,6 +273,26 @@ out: ...@@ -258,6 +273,26 @@ out:
return 0; return 0;
} }
/*
* /proc/stat helpers
*/
u64 arch_irq_stat_cpu(unsigned int cpu)
{
u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
sum += per_cpu(irq_stat, cpu).pmu_irqs;
sum += per_cpu(irq_stat, cpu).mce_exceptions;
return sum;
}
u64 arch_irq_stat(void)
{
u64 sum = ppc_spurious_interrupts;
return sum;
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(cpumask_t map) void fixup_irqs(cpumask_t map)
{ {
......
...@@ -575,6 +575,8 @@ void timer_interrupt(struct pt_regs * regs) ...@@ -575,6 +575,8 @@ void timer_interrupt(struct pt_regs * regs)
trace_timer_interrupt_entry(regs); trace_timer_interrupt_entry(regs);
__get_cpu_var(irq_stat).timer_irqs++;
/* Ensure a positive value is written to the decrementer, or else /* Ensure a positive value is written to the decrementer, or else
* some CPUs will continuue to take decrementer exceptions */ * some CPUs will continuue to take decrementer exceptions */
set_dec(DECREMENTER_MAX); set_dec(DECREMENTER_MAX);
......
...@@ -483,6 +483,8 @@ void machine_check_exception(struct pt_regs *regs) ...@@ -483,6 +483,8 @@ void machine_check_exception(struct pt_regs *regs)
{ {
int recover = 0; int recover = 0;
__get_cpu_var(irq_stat).mce_exceptions++;
/* See if any machine dependent calls. In theory, we would want /* See if any machine dependent calls. In theory, we would want
* to call the CPU first, and call the ppc_md. one if the CPU * to call the CPU first, and call the ppc_md. one if the CPU
* one returns a positive number. However there is existing code * one returns a positive number. However there is existing code
...@@ -965,6 +967,8 @@ void vsx_unavailable_exception(struct pt_regs *regs) ...@@ -965,6 +967,8 @@ void vsx_unavailable_exception(struct pt_regs *regs)
void performance_monitor_exception(struct pt_regs *regs) void performance_monitor_exception(struct pt_regs *regs)
{ {
__get_cpu_var(irq_stat).pmu_irqs++;
perf_irq(regs); perf_irq(regs);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment