Commit fccc714b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Sanitize counter->mutex

s/counter->mutex/counter->child_mutex/ and make sure its only
used to protect child_list.

The usage in __perf_counter_exit_task() doesn't appear to be
problematic since ctx->mutex also covers anything related to fd
tear-down.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090523163012.533186528@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent e220d2dc
...@@ -452,9 +452,6 @@ struct perf_counter { ...@@ -452,9 +452,6 @@ struct perf_counter {
struct perf_counter_context *ctx; struct perf_counter_context *ctx;
struct file *filp; struct file *filp;
struct perf_counter *parent;
struct list_head child_list;
/* /*
* These accumulate total time (in nanoseconds) that children * These accumulate total time (in nanoseconds) that children
* counters have been enabled and running, respectively. * counters have been enabled and running, respectively.
...@@ -465,7 +462,9 @@ struct perf_counter { ...@@ -465,7 +462,9 @@ struct perf_counter {
/* /*
* Protect attach/detach and child_list: * Protect attach/detach and child_list:
*/ */
struct mutex mutex; struct mutex child_mutex;
struct list_head child_list;
struct perf_counter *parent;
int oncpu; int oncpu;
int cpu; int cpu;
......
...@@ -111,6 +111,10 @@ static void put_ctx(struct perf_counter_context *ctx) ...@@ -111,6 +111,10 @@ static void put_ctx(struct perf_counter_context *ctx)
} }
} }
/*
* Add a counter from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void static void
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{ {
...@@ -136,7 +140,7 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) ...@@ -136,7 +140,7 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
/* /*
* Remove a counter from the lists for its context. * Remove a counter from the lists for its context.
* Must be called with counter->mutex and ctx->mutex held. * Must be called with ctx->mutex and ctx->lock held.
*/ */
static void static void
list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
...@@ -276,7 +280,7 @@ static void __perf_counter_remove_from_context(void *info) ...@@ -276,7 +280,7 @@ static void __perf_counter_remove_from_context(void *info)
/* /*
* Remove the counter from a task's (or a CPU's) list of counters. * Remove the counter from a task's (or a CPU's) list of counters.
* *
* Must be called with counter->mutex and ctx->mutex held. * Must be called with ctx->mutex held.
* *
* CPU counters are removed with a smp call. For task counters we only * CPU counters are removed with a smp call. For task counters we only
* call when the task is on a CPU. * call when the task is on a CPU.
...@@ -1407,11 +1411,7 @@ static int perf_release(struct inode *inode, struct file *file) ...@@ -1407,11 +1411,7 @@ static int perf_release(struct inode *inode, struct file *file)
file->private_data = NULL; file->private_data = NULL;
mutex_lock(&ctx->mutex); mutex_lock(&ctx->mutex);
mutex_lock(&counter->mutex);
perf_counter_remove_from_context(counter); perf_counter_remove_from_context(counter);
mutex_unlock(&counter->mutex);
mutex_unlock(&ctx->mutex); mutex_unlock(&ctx->mutex);
free_counter(counter); free_counter(counter);
...@@ -1437,7 +1437,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) ...@@ -1437,7 +1437,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
if (counter->state == PERF_COUNTER_STATE_ERROR) if (counter->state == PERF_COUNTER_STATE_ERROR)
return 0; return 0;
mutex_lock(&counter->mutex); mutex_lock(&counter->child_mutex);
values[0] = perf_counter_read(counter); values[0] = perf_counter_read(counter);
n = 1; n = 1;
if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
...@@ -1446,7 +1446,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) ...@@ -1446,7 +1446,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = counter->total_time_running + values[n++] = counter->total_time_running +
atomic64_read(&counter->child_total_time_running); atomic64_read(&counter->child_total_time_running);
mutex_unlock(&counter->mutex); mutex_unlock(&counter->child_mutex);
if (count < n * sizeof(u64)) if (count < n * sizeof(u64))
return -EINVAL; return -EINVAL;
...@@ -1510,11 +1510,11 @@ static void perf_counter_for_each_child(struct perf_counter *counter, ...@@ -1510,11 +1510,11 @@ static void perf_counter_for_each_child(struct perf_counter *counter,
{ {
struct perf_counter *child; struct perf_counter *child;
mutex_lock(&counter->mutex); mutex_lock(&counter->child_mutex);
func(counter); func(counter);
list_for_each_entry(child, &counter->child_list, child_list) list_for_each_entry(child, &counter->child_list, child_list)
func(child); func(child);
mutex_unlock(&counter->mutex); mutex_unlock(&counter->child_mutex);
} }
static void perf_counter_for_each(struct perf_counter *counter, static void perf_counter_for_each(struct perf_counter *counter,
...@@ -1522,11 +1522,11 @@ static void perf_counter_for_each(struct perf_counter *counter, ...@@ -1522,11 +1522,11 @@ static void perf_counter_for_each(struct perf_counter *counter,
{ {
struct perf_counter *child; struct perf_counter *child;
mutex_lock(&counter->mutex); mutex_lock(&counter->child_mutex);
perf_counter_for_each_sibling(counter, func); perf_counter_for_each_sibling(counter, func);
list_for_each_entry(child, &counter->child_list, child_list) list_for_each_entry(child, &counter->child_list, child_list)
perf_counter_for_each_sibling(child, func); perf_counter_for_each_sibling(child, func);
mutex_unlock(&counter->mutex); mutex_unlock(&counter->child_mutex);
} }
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
...@@ -3106,7 +3106,9 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, ...@@ -3106,7 +3106,9 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
if (!group_leader) if (!group_leader)
group_leader = counter; group_leader = counter;
mutex_init(&counter->mutex); mutex_init(&counter->child_mutex);
INIT_LIST_HEAD(&counter->child_list);
INIT_LIST_HEAD(&counter->list_entry); INIT_LIST_HEAD(&counter->list_entry);
INIT_LIST_HEAD(&counter->event_entry); INIT_LIST_HEAD(&counter->event_entry);
INIT_LIST_HEAD(&counter->sibling_list); INIT_LIST_HEAD(&counter->sibling_list);
...@@ -3114,8 +3116,6 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, ...@@ -3114,8 +3116,6 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
mutex_init(&counter->mmap_mutex); mutex_init(&counter->mmap_mutex);
INIT_LIST_HEAD(&counter->child_list);
counter->cpu = cpu; counter->cpu = cpu;
counter->hw_event = *hw_event; counter->hw_event = *hw_event;
counter->group_leader = group_leader; counter->group_leader = group_leader;
...@@ -3346,10 +3346,9 @@ inherit_counter(struct perf_counter *parent_counter, ...@@ -3346,10 +3346,9 @@ inherit_counter(struct perf_counter *parent_counter,
/* /*
* Link this into the parent counter's child list * Link this into the parent counter's child list
*/ */
mutex_lock(&parent_counter->mutex); mutex_lock(&parent_counter->child_mutex);
list_add_tail(&child_counter->child_list, &parent_counter->child_list); list_add_tail(&child_counter->child_list, &parent_counter->child_list);
mutex_unlock(&parent_counter->child_mutex);
mutex_unlock(&parent_counter->mutex);
return child_counter; return child_counter;
} }
...@@ -3396,9 +3395,9 @@ static void sync_child_counter(struct perf_counter *child_counter, ...@@ -3396,9 +3395,9 @@ static void sync_child_counter(struct perf_counter *child_counter,
/* /*
* Remove this counter from the parent's list * Remove this counter from the parent's list
*/ */
mutex_lock(&parent_counter->mutex); mutex_lock(&parent_counter->child_mutex);
list_del_init(&child_counter->child_list); list_del_init(&child_counter->child_list);
mutex_unlock(&parent_counter->mutex); mutex_unlock(&parent_counter->child_mutex);
/* /*
* Release the parent counter, if this was the last * Release the parent counter, if this was the last
...@@ -3414,17 +3413,9 @@ __perf_counter_exit_task(struct task_struct *child, ...@@ -3414,17 +3413,9 @@ __perf_counter_exit_task(struct task_struct *child,
{ {
struct perf_counter *parent_counter; struct perf_counter *parent_counter;
/*
* Protect against concurrent operations on child_counter
* due its fd getting closed, etc.
*/
mutex_lock(&child_counter->mutex);
update_counter_times(child_counter); update_counter_times(child_counter);
list_del_counter(child_counter, child_ctx); list_del_counter(child_counter, child_ctx);
mutex_unlock(&child_counter->mutex);
parent_counter = child_counter->parent; parent_counter = child_counter->parent;
/* /*
* It can happen that parent exits first, and has counters * It can happen that parent exits first, and has counters
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment