Commit a96bbc16 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Fix race in counter initialization

We need the PID namespace and counter ID available when the
counter overflows and we need to generate a sample event.

[ Impact: fix kernel crash with high-frequency sampling ]
Reported-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <new-submission>
[ fixed a further crash and cleaned up the initialization a bit ]
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 8229289b
...@@ -48,6 +48,8 @@ int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ ...@@ -48,6 +48,8 @@ int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */ int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
static atomic64_t perf_counter_id;
/* /*
* Lock for (sysadmin-configurable) counter reservations: * Lock for (sysadmin-configurable) counter reservations:
*/ */
...@@ -3358,7 +3360,11 @@ perf_counter_alloc(struct perf_counter_attr *attr, ...@@ -3358,7 +3360,11 @@ perf_counter_alloc(struct perf_counter_attr *attr,
counter->ctx = ctx; counter->ctx = ctx;
counter->oncpu = -1; counter->oncpu = -1;
counter->ns = get_pid_ns(current->nsproxy->pid_ns);
counter->id = atomic64_inc_return(&perf_counter_id);
counter->state = PERF_COUNTER_STATE_INACTIVE; counter->state = PERF_COUNTER_STATE_INACTIVE;
if (attr->disabled) if (attr->disabled)
counter->state = PERF_COUNTER_STATE_OFF; counter->state = PERF_COUNTER_STATE_OFF;
...@@ -3402,6 +3408,8 @@ done: ...@@ -3402,6 +3408,8 @@ done:
err = PTR_ERR(pmu); err = PTR_ERR(pmu);
if (err) { if (err) {
if (counter->ns)
put_pid_ns(counter->ns);
kfree(counter); kfree(counter);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -3419,8 +3427,6 @@ done: ...@@ -3419,8 +3427,6 @@ done:
return counter; return counter;
} }
static atomic64_t perf_counter_id;
/** /**
* sys_perf_counter_open - open a performance counter, associate it to a task/cpu * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
* *
...@@ -3515,9 +3521,6 @@ SYSCALL_DEFINE5(perf_counter_open, ...@@ -3515,9 +3521,6 @@ SYSCALL_DEFINE5(perf_counter_open,
list_add_tail(&counter->owner_entry, &current->perf_counter_list); list_add_tail(&counter->owner_entry, &current->perf_counter_list);
mutex_unlock(&current->perf_counter_mutex); mutex_unlock(&current->perf_counter_mutex);
counter->ns = get_pid_ns(current->nsproxy->pid_ns);
counter->id = atomic64_inc_return(&perf_counter_id);
fput_light(counter_file, fput_needed2); fput_light(counter_file, fput_needed2);
out_fput: out_fput:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment