Commit 1dce8d99 authored by Ingo Molnar's avatar Ingo Molnar

perf_counter: convert perf_resource_mutex to a spinlock

Now percpu counters can be initialized very early. But the init
sequence uses mutex_lock(). Fortunately, perf_resource_mutex should
be a spinlock anyway, so convert it.

[ Impact: fix crash due to early init mutex use ]

LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 0d905bca
...@@ -46,9 +46,9 @@ static atomic_t nr_comm_tracking __read_mostly; ...@@ -46,9 +46,9 @@ static atomic_t nr_comm_tracking __read_mostly;
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
/* /*
* Mutex for (sysadmin-configurable) counter reservations: * Lock for (sysadmin-configurable) counter reservations:
*/ */
static DEFINE_MUTEX(perf_resource_mutex); static DEFINE_SPINLOCK(perf_resource_lock);
/* /*
* Architecture provided APIs - weak aliases: * Architecture provided APIs - weak aliases:
...@@ -3207,9 +3207,9 @@ static void __cpuinit perf_counter_init_cpu(int cpu) ...@@ -3207,9 +3207,9 @@ static void __cpuinit perf_counter_init_cpu(int cpu)
cpuctx = &per_cpu(perf_cpu_context, cpu); cpuctx = &per_cpu(perf_cpu_context, cpu);
__perf_counter_init_context(&cpuctx->ctx, NULL); __perf_counter_init_context(&cpuctx->ctx, NULL);
mutex_lock(&perf_resource_mutex); spin_lock(&perf_resource_lock);
cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
mutex_unlock(&perf_resource_mutex); spin_unlock(&perf_resource_lock);
hw_perf_counter_setup(cpu); hw_perf_counter_setup(cpu);
} }
...@@ -3292,7 +3292,7 @@ perf_set_reserve_percpu(struct sysdev_class *class, ...@@ -3292,7 +3292,7 @@ perf_set_reserve_percpu(struct sysdev_class *class,
if (val > perf_max_counters) if (val > perf_max_counters)
return -EINVAL; return -EINVAL;
mutex_lock(&perf_resource_mutex); spin_lock(&perf_resource_lock);
perf_reserved_percpu = val; perf_reserved_percpu = val;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
cpuctx = &per_cpu(perf_cpu_context, cpu); cpuctx = &per_cpu(perf_cpu_context, cpu);
...@@ -3302,7 +3302,7 @@ perf_set_reserve_percpu(struct sysdev_class *class, ...@@ -3302,7 +3302,7 @@ perf_set_reserve_percpu(struct sysdev_class *class,
cpuctx->max_pertask = mpt; cpuctx->max_pertask = mpt;
spin_unlock_irq(&cpuctx->ctx.lock); spin_unlock_irq(&cpuctx->ctx.lock);
} }
mutex_unlock(&perf_resource_mutex); spin_unlock(&perf_resource_lock);
return count; return count;
} }
...@@ -3324,9 +3324,9 @@ perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) ...@@ -3324,9 +3324,9 @@ perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
if (val > 1) if (val > 1)
return -EINVAL; return -EINVAL;
mutex_lock(&perf_resource_mutex); spin_lock(&perf_resource_lock);
perf_overcommit = val; perf_overcommit = val;
mutex_unlock(&perf_resource_mutex); spin_unlock(&perf_resource_lock);
return count; return count;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment