Commit ba77813a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: x86: fixup nmi_watchdog vs perf_counter boo-boo

Invert the atomic_inc_not_zero() test so that we will indeed detect the
first activation.

Also rename the global num_counters, since its easy to confuse with
x86_pmu.num_counters.

[ Impact: fix non-working perfcounters on AMD CPUs, cleanup ]
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241455664.7620.4938.camel@twins>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b82914ce
...@@ -171,7 +171,7 @@ again: ...@@ -171,7 +171,7 @@ again:
return new_raw_count; return new_raw_count;
} }
static atomic_t num_counters; static atomic_t active_counters;
static DEFINE_MUTEX(pmc_reserve_mutex); static DEFINE_MUTEX(pmc_reserve_mutex);
static bool reserve_pmc_hardware(void) static bool reserve_pmc_hardware(void)
...@@ -224,7 +224,7 @@ static void release_pmc_hardware(void) ...@@ -224,7 +224,7 @@ static void release_pmc_hardware(void)
static void hw_perf_counter_destroy(struct perf_counter *counter) static void hw_perf_counter_destroy(struct perf_counter *counter)
{ {
if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) { if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
release_pmc_hardware(); release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex); mutex_unlock(&pmc_reserve_mutex);
} }
...@@ -248,12 +248,12 @@ static int __hw_perf_counter_init(struct perf_counter *counter) ...@@ -248,12 +248,12 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
return -ENODEV; return -ENODEV;
err = 0; err = 0;
if (atomic_inc_not_zero(&num_counters)) { if (!atomic_inc_not_zero(&active_counters)) {
mutex_lock(&pmc_reserve_mutex); mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware()) if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
err = -EBUSY; err = -EBUSY;
else else
atomic_inc(&num_counters); atomic_inc(&active_counters);
mutex_unlock(&pmc_reserve_mutex); mutex_unlock(&pmc_reserve_mutex);
} }
if (err) if (err)
...@@ -280,7 +280,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) ...@@ -280,7 +280,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
if (capable(CAP_SYS_ADMIN) && hw_event->nmi) if (capable(CAP_SYS_ADMIN) && hw_event->nmi)
hwc->nmi = 1; hwc->nmi = 1;
hwc->irq_period = hw_event->irq_period; hwc->irq_period = hw_event->irq_period;
if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period) if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period)
hwc->irq_period = x86_pmu.max_period; hwc->irq_period = x86_pmu.max_period;
...@@ -871,7 +871,7 @@ perf_counter_nmi_handler(struct notifier_block *self, ...@@ -871,7 +871,7 @@ perf_counter_nmi_handler(struct notifier_block *self,
struct pt_regs *regs; struct pt_regs *regs;
int ret; int ret;
if (!atomic_read(&num_counters)) if (!atomic_read(&active_counters))
return NOTIFY_DONE; return NOTIFY_DONE;
switch (cmd) { switch (cmd) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment