Commit f833bab8 authored by Suresh Siddha's avatar Suresh Siddha Committed by Thomas Gleixner

clockevent: Prevent dead lock on clockevents_lock

Currently clockevents_notify() is called with interrupts enabled at
some places and interrupts disabled at some other places.

This results in a deadlock in this scenario.

cpu A holds clockevents_lock in clockevents_notify() with irqs enabled
cpu B waits for clockevents_lock in clockevents_notify() with irqs disabled
cpu C doing set_mtrr() which will try to rendezvous of all the cpus.

This will result in C and A come to the rendezvous point and waiting
for B. B is stuck forever waiting for the spinlock and thus not
reaching the rendezvous point.

Fix the clockevents code so that clockevents_lock is taken with
interrupts disabled and thus avoid the above deadlock.

Also call lapic_timer_propagate_broadcast() on the destination cpu so
that we avoid calling smp_call_function() in the clockevents notifier
chain.

This issue left us wondering if we need to change the MTRR rendezvous
logic to use stop machine logic (instead of smp_call_function) or add
a check in spinlock debug code to see if there are other spinlocks
which gets taken under both interrupts enabled/disabled conditions.
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: "Pallipadi Venkatesh" <venkatesh.pallipadi@intel.com>
Cc: "Brown Len" <len.brown@intel.com>
LKML-Reference: <1250544899.2709.210.camel@sbs-t61.sc.intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent de809347
...@@ -519,16 +519,12 @@ static void c1e_idle(void) ...@@ -519,16 +519,12 @@ static void c1e_idle(void)
if (!cpumask_test_cpu(cpu, c1e_mask)) { if (!cpumask_test_cpu(cpu, c1e_mask)) {
cpumask_set_cpu(cpu, c1e_mask); cpumask_set_cpu(cpu, c1e_mask);
/* /*
* Force broadcast so ACPI can not interfere. Needs * Force broadcast so ACPI can not interfere.
* to run with interrupts enabled as it uses
* smp_function_call.
*/ */
local_irq_enable();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
&cpu); &cpu);
printk(KERN_INFO "Switch to broadcast mode on CPU%d\n", printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
cpu); cpu);
local_irq_disable();
} }
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
......
...@@ -162,8 +162,9 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr, ...@@ -162,8 +162,9 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
pr->power.timer_broadcast_on_state = state; pr->power.timer_broadcast_on_state = state;
} }
static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) static void lapic_timer_propagate_broadcast(void *arg)
{ {
struct acpi_processor *pr = (struct acpi_processor *) arg;
unsigned long reason; unsigned long reason;
reason = pr->power.timer_broadcast_on_state < INT_MAX ? reason = pr->power.timer_broadcast_on_state < INT_MAX ?
...@@ -635,7 +636,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) ...@@ -635,7 +636,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
working++; working++;
} }
lapic_timer_propagate_broadcast(pr); smp_call_function_single(pr->id, lapic_timer_propagate_broadcast,
pr, 1);
return (working); return (working);
} }
......
...@@ -137,11 +137,12 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, ...@@ -137,11 +137,12 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
*/ */
int clockevents_register_notifier(struct notifier_block *nb) int clockevents_register_notifier(struct notifier_block *nb)
{ {
unsigned long flags;
int ret; int ret;
spin_lock(&clockevents_lock); spin_lock_irqsave(&clockevents_lock, flags);
ret = raw_notifier_chain_register(&clockevents_chain, nb); ret = raw_notifier_chain_register(&clockevents_chain, nb);
spin_unlock(&clockevents_lock); spin_unlock_irqrestore(&clockevents_lock, flags);
return ret; return ret;
} }
...@@ -178,16 +179,18 @@ static void clockevents_notify_released(void) ...@@ -178,16 +179,18 @@ static void clockevents_notify_released(void)
*/ */
void clockevents_register_device(struct clock_event_device *dev) void clockevents_register_device(struct clock_event_device *dev)
{ {
unsigned long flags;
BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
BUG_ON(!dev->cpumask); BUG_ON(!dev->cpumask);
spin_lock(&clockevents_lock); spin_lock_irqsave(&clockevents_lock, flags);
list_add(&dev->list, &clockevent_devices); list_add(&dev->list, &clockevent_devices);
clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
clockevents_notify_released(); clockevents_notify_released();
spin_unlock(&clockevents_lock); spin_unlock_irqrestore(&clockevents_lock, flags);
} }
EXPORT_SYMBOL_GPL(clockevents_register_device); EXPORT_SYMBOL_GPL(clockevents_register_device);
...@@ -235,8 +238,9 @@ void clockevents_exchange_device(struct clock_event_device *old, ...@@ -235,8 +238,9 @@ void clockevents_exchange_device(struct clock_event_device *old,
void clockevents_notify(unsigned long reason, void *arg) void clockevents_notify(unsigned long reason, void *arg)
{ {
struct list_head *node, *tmp; struct list_head *node, *tmp;
unsigned long flags;
spin_lock(&clockevents_lock); spin_lock_irqsave(&clockevents_lock, flags);
clockevents_do_notify(reason, arg); clockevents_do_notify(reason, arg);
switch (reason) { switch (reason) {
...@@ -251,7 +255,7 @@ void clockevents_notify(unsigned long reason, void *arg) ...@@ -251,7 +255,7 @@ void clockevents_notify(unsigned long reason, void *arg)
default: default:
break; break;
} }
spin_unlock(&clockevents_lock); spin_unlock_irqrestore(&clockevents_lock, flags);
} }
EXPORT_SYMBOL_GPL(clockevents_notify); EXPORT_SYMBOL_GPL(clockevents_notify);
#endif #endif
...@@ -205,11 +205,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) ...@@ -205,11 +205,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
* Powerstate information: The system enters/leaves a state, where * Powerstate information: The system enters/leaves a state, where
* affected devices might stop * affected devices might stop
*/ */
static void tick_do_broadcast_on_off(void *why) static void tick_do_broadcast_on_off(unsigned long *reason)
{ {
struct clock_event_device *bc, *dev; struct clock_event_device *bc, *dev;
struct tick_device *td; struct tick_device *td;
unsigned long flags, *reason = why; unsigned long flags;
int cpu, bc_stopped; int cpu, bc_stopped;
spin_lock_irqsave(&tick_broadcast_lock, flags); spin_lock_irqsave(&tick_broadcast_lock, flags);
...@@ -276,8 +276,7 @@ void tick_broadcast_on_off(unsigned long reason, int *oncpu) ...@@ -276,8 +276,7 @@ void tick_broadcast_on_off(unsigned long reason, int *oncpu)
printk(KERN_ERR "tick-broadcast: ignoring broadcast for " printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
"offline CPU #%d\n", *oncpu); "offline CPU #%d\n", *oncpu);
else else
smp_call_function_single(*oncpu, tick_do_broadcast_on_off, tick_do_broadcast_on_off(&reason);
&reason, 1);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment