Commit 6ab423e0 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Propagate inheritance failures down the fork() path

Fail fork() when we fail inheritance for some reason (-ENOMEM most likely).
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090525124600.324656474@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 771d7cde
...@@ -566,7 +566,7 @@ extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); ...@@ -566,7 +566,7 @@ extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
extern void perf_counter_task_sched_out(struct task_struct *task, extern void perf_counter_task_sched_out(struct task_struct *task,
struct task_struct *next, int cpu); struct task_struct *next, int cpu);
extern void perf_counter_task_tick(struct task_struct *task, int cpu); extern void perf_counter_task_tick(struct task_struct *task, int cpu);
extern void perf_counter_init_task(struct task_struct *child); extern int perf_counter_init_task(struct task_struct *child);
extern void perf_counter_exit_task(struct task_struct *child); extern void perf_counter_exit_task(struct task_struct *child);
extern void perf_counter_do_pending(void); extern void perf_counter_do_pending(void);
extern void perf_counter_print_debug(void); extern void perf_counter_print_debug(void);
...@@ -631,7 +631,7 @@ perf_counter_task_sched_out(struct task_struct *task, ...@@ -631,7 +631,7 @@ perf_counter_task_sched_out(struct task_struct *task,
struct task_struct *next, int cpu) { } struct task_struct *next, int cpu) { }
static inline void static inline void
perf_counter_task_tick(struct task_struct *task, int cpu) { } perf_counter_task_tick(struct task_struct *task, int cpu) { }
static inline void perf_counter_init_task(struct task_struct *child) { } static inline int perf_counter_init_task(struct task_struct *child) { }
static inline void perf_counter_exit_task(struct task_struct *child) { } static inline void perf_counter_exit_task(struct task_struct *child) { }
static inline void perf_counter_do_pending(void) { } static inline void perf_counter_do_pending(void) { }
static inline void perf_counter_print_debug(void) { } static inline void perf_counter_print_debug(void) { }
......
...@@ -1095,7 +1095,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1095,7 +1095,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
/* Perform scheduler related setup. Assign this task to a CPU. */ /* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags); sched_fork(p, clone_flags);
perf_counter_init_task(p);
retval = perf_counter_init_task(p);
if (retval)
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p))) if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_policy; goto bad_fork_cleanup_policy;
...@@ -1295,6 +1298,7 @@ bad_fork_cleanup_semundo: ...@@ -1295,6 +1298,7 @@ bad_fork_cleanup_semundo:
bad_fork_cleanup_audit: bad_fork_cleanup_audit:
audit_free(p); audit_free(p);
bad_fork_cleanup_policy: bad_fork_cleanup_policy:
perf_counter_exit_task(p);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
mpol_put(p->mempolicy); mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup: bad_fork_cleanup_cgroup:
......
...@@ -3434,18 +3434,23 @@ again: ...@@ -3434,18 +3434,23 @@ again:
/* /*
* Initialize the perf_counter context in task_struct * Initialize the perf_counter context in task_struct
*/ */
void perf_counter_init_task(struct task_struct *child) int perf_counter_init_task(struct task_struct *child)
{ {
struct perf_counter_context *child_ctx, *parent_ctx; struct perf_counter_context *child_ctx, *parent_ctx;
struct perf_counter *counter; struct perf_counter *counter;
struct task_struct *parent = current; struct task_struct *parent = current;
int inherited_all = 1; int inherited_all = 1;
int ret = 0;
child->perf_counter_ctxp = NULL; child->perf_counter_ctxp = NULL;
mutex_init(&child->perf_counter_mutex); mutex_init(&child->perf_counter_mutex);
INIT_LIST_HEAD(&child->perf_counter_list); INIT_LIST_HEAD(&child->perf_counter_list);
parent_ctx = parent->perf_counter_ctxp;
if (likely(!parent_ctx || !parent_ctx->nr_counters))
return 0;
/* /*
* This is executed from the parent task context, so inherit * This is executed from the parent task context, so inherit
* counters that have been marked for cloning. * counters that have been marked for cloning.
...@@ -3454,11 +3459,7 @@ void perf_counter_init_task(struct task_struct *child) ...@@ -3454,11 +3459,7 @@ void perf_counter_init_task(struct task_struct *child)
child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
if (!child_ctx) if (!child_ctx)
return; return -ENOMEM;
parent_ctx = parent->perf_counter_ctxp;
if (likely(!parent_ctx || !parent_ctx->nr_counters))
return;
__perf_counter_init_context(child_ctx, child); __perf_counter_init_context(child_ctx, child);
child->perf_counter_ctxp = child_ctx; child->perf_counter_ctxp = child_ctx;
...@@ -3482,8 +3483,9 @@ void perf_counter_init_task(struct task_struct *child) ...@@ -3482,8 +3483,9 @@ void perf_counter_init_task(struct task_struct *child)
continue; continue;
} }
if (inherit_group(counter, parent, ret = inherit_group(counter, parent, parent_ctx,
parent_ctx, child, child_ctx)) { child, child_ctx);
if (ret) {
inherited_all = 0; inherited_all = 0;
break; break;
} }
...@@ -3505,6 +3507,8 @@ void perf_counter_init_task(struct task_struct *child) ...@@ -3505,6 +3507,8 @@ void perf_counter_init_task(struct task_struct *child)
} }
mutex_unlock(&parent_ctx->mutex); mutex_unlock(&parent_ctx->mutex);
return ret;
} }
static void __cpuinit perf_counter_init_cpu(int cpu) static void __cpuinit perf_counter_init_cpu(int cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment