Commit 3f731ca6 authored by Paul Mackerras's avatar Paul Mackerras Committed by Ingo Molnar

perf_counter: Fix cpu migration counter

This fixes the cpu migration software counter to count
correctly even when contexts get swapped from one task to
another.  Previously the cpu migration counts reported by perf
stat were bogus, ranging from negative to several thousand for
a single "lat_ctx 2 8 32" run.  With this patch the cpu
migration count reported for "lat_ctx 2 8 32" is almost always
between 35 and 44.

This fixes the problem by adding a call into the perf_counter
code from set_task_cpu when tasks are migrated.  This enables
us to use the generic swcounter code (with some modifications)
for the cpu migration counter.

This modifies the swcounter code to allow a NULL regs pointer
to be passed in to perf_swcounter_ctx_event() etc.  The cpu
migration counter does this because there isn't necessarily a
pt_regs struct for the task available.  In this case, the
counter will not have interrupt capability - but the migration
counter didn't have interrupt capability before, so this is no
loss.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18979.35006.819769.416327@cargo.ozlabs.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f38b0820
...@@ -615,6 +615,8 @@ extern void perf_counter_munmap(unsigned long addr, unsigned long len, ...@@ -615,6 +615,8 @@ extern void perf_counter_munmap(unsigned long addr, unsigned long len,
extern void perf_counter_comm(struct task_struct *tsk); extern void perf_counter_comm(struct task_struct *tsk);
extern void perf_counter_task_migration(struct task_struct *task, int cpu);
#define MAX_STACK_DEPTH 255 #define MAX_STACK_DEPTH 255
struct perf_callchain_entry { struct perf_callchain_entry {
...@@ -668,6 +670,8 @@ perf_counter_munmap(unsigned long addr, unsigned long len, ...@@ -668,6 +670,8 @@ perf_counter_munmap(unsigned long addr, unsigned long len,
static inline void perf_counter_comm(struct task_struct *tsk) { } static inline void perf_counter_comm(struct task_struct *tsk) { }
static inline void perf_counter_init(void) { } static inline void perf_counter_init(void) { }
static inline void perf_counter_task_migration(struct task_struct *task,
int cpu) { }
#endif #endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -2921,11 +2921,13 @@ static int perf_swcounter_match(struct perf_counter *counter, ...@@ -2921,11 +2921,13 @@ static int perf_swcounter_match(struct perf_counter *counter,
if (counter->hw_event.config != event_config) if (counter->hw_event.config != event_config)
return 0; return 0;
if (counter->hw_event.exclude_user && user_mode(regs)) if (regs) {
return 0; if (counter->hw_event.exclude_user && user_mode(regs))
return 0;
if (counter->hw_event.exclude_kernel && !user_mode(regs)) if (counter->hw_event.exclude_kernel && !user_mode(regs))
return 0; return 0;
}
return 1; return 1;
} }
...@@ -2935,7 +2937,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr, ...@@ -2935,7 +2937,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
{ {
int neg = atomic64_add_negative(nr, &counter->hw.count); int neg = atomic64_add_negative(nr, &counter->hw.count);
if (counter->hw.irq_period && !neg) if (counter->hw.irq_period && !neg && regs)
perf_swcounter_overflow(counter, nmi, regs, addr); perf_swcounter_overflow(counter, nmi, regs, addr);
} }
...@@ -3151,55 +3153,24 @@ static const struct pmu perf_ops_task_clock = { ...@@ -3151,55 +3153,24 @@ static const struct pmu perf_ops_task_clock = {
/* /*
* Software counter: cpu migrations * Software counter: cpu migrations
*/ */
void perf_counter_task_migration(struct task_struct *task, int cpu)
static inline u64 get_cpu_migrations(struct perf_counter *counter)
{
struct task_struct *curr = counter->ctx->task;
if (curr)
return curr->se.nr_migrations;
return cpu_nr_migrations(smp_processor_id());
}
static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
{
u64 prev, now;
s64 delta;
prev = atomic64_read(&counter->hw.prev_count);
now = get_cpu_migrations(counter);
atomic64_set(&counter->hw.prev_count, now);
delta = now - prev;
atomic64_add(delta, &counter->count);
}
static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
{ {
cpu_migrations_perf_counter_update(counter); struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
} struct perf_counter_context *ctx;
static int cpu_migrations_perf_counter_enable(struct perf_counter *counter) perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
{ PERF_COUNT_CPU_MIGRATIONS,
if (counter->prev_state <= PERF_COUNTER_STATE_OFF) 1, 1, NULL, 0);
atomic64_set(&counter->hw.prev_count,
get_cpu_migrations(counter));
return 0;
}
static void cpu_migrations_perf_counter_disable(struct perf_counter *counter) ctx = perf_pin_task_context(task);
{ if (ctx) {
cpu_migrations_perf_counter_update(counter); perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
PERF_COUNT_CPU_MIGRATIONS,
1, 1, NULL, 0);
perf_unpin_context(ctx);
}
} }
static const struct pmu perf_ops_cpu_migrations = {
.enable = cpu_migrations_perf_counter_enable,
.disable = cpu_migrations_perf_counter_disable,
.read = cpu_migrations_perf_counter_read,
};
#ifdef CONFIG_EVENT_PROFILE #ifdef CONFIG_EVENT_PROFILE
void perf_tpcounter_event(int event_id) void perf_tpcounter_event(int event_id)
{ {
...@@ -3272,11 +3243,8 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) ...@@ -3272,11 +3243,8 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
case PERF_COUNT_PAGE_FAULTS_MIN: case PERF_COUNT_PAGE_FAULTS_MIN:
case PERF_COUNT_PAGE_FAULTS_MAJ: case PERF_COUNT_PAGE_FAULTS_MAJ:
case PERF_COUNT_CONTEXT_SWITCHES: case PERF_COUNT_CONTEXT_SWITCHES:
pmu = &perf_ops_generic;
break;
case PERF_COUNT_CPU_MIGRATIONS: case PERF_COUNT_CPU_MIGRATIONS:
if (!counter->hw_event.exclude_kernel) pmu = &perf_ops_generic;
pmu = &perf_ops_cpu_migrations;
break; break;
} }
......
...@@ -1977,6 +1977,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) ...@@ -1977,6 +1977,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (task_hot(p, old_rq->clock, NULL)) if (task_hot(p, old_rq->clock, NULL))
schedstat_inc(p, se.nr_forced2_migrations); schedstat_inc(p, se.nr_forced2_migrations);
#endif #endif
perf_counter_task_migration(p, new_cpu);
} }
p->se.vruntime -= old_cfsrq->min_vruntime - p->se.vruntime -= old_cfsrq->min_vruntime -
new_cfsrq->min_vruntime; new_cfsrq->min_vruntime;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment