Commit 64aa234c authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Greg Kroah-Hartman

xen: only enable interrupts while actually blocking for spinlock

commit 4d576b57 upstream.

Where possible we enable interrupts while waiting for a spinlock to
become free, in order to reduce big latency spikes in interrupt handling.

However, at present if we manage to pick up the spinlock just before
blocking, we'll end up holding the lock with interrupts enabled for a
while.  This will cause a deadlock if we recieve an interrupt in that
window, and the interrupt handler tries to take the lock too.

Solve this by shrinking the interrupt-enabled region to just around the
blocking call.

[ Impact: avoid race/deadlock when using Xen PV spinlocks ]
Reported-by: default avatar"Yang, Xiaowei" <xiaowei.yang@intel.com>
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent b04a12a2
...@@ -187,7 +187,6 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl ...@@ -187,7 +187,6 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
struct xen_spinlock *prev; struct xen_spinlock *prev;
int irq = __get_cpu_var(lock_kicker_irq); int irq = __get_cpu_var(lock_kicker_irq);
int ret; int ret;
unsigned long flags;
u64 start; u64 start;
/* If kicker interrupts not initialized yet, just spin */ /* If kicker interrupts not initialized yet, just spin */
...@@ -199,16 +198,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl ...@@ -199,16 +198,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
/* announce we're spinning */ /* announce we're spinning */
prev = spinning_lock(xl); prev = spinning_lock(xl);
flags = __raw_local_save_flags();
if (irq_enable) {
ADD_STATS(taken_slow_irqenable, 1);
raw_local_irq_enable();
}
ADD_STATS(taken_slow, 1); ADD_STATS(taken_slow, 1);
ADD_STATS(taken_slow_nested, prev != NULL); ADD_STATS(taken_slow_nested, prev != NULL);
do { do {
unsigned long flags;
/* clear pending */ /* clear pending */
xen_clear_irq_pending(irq); xen_clear_irq_pending(irq);
...@@ -228,6 +223,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl ...@@ -228,6 +223,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
goto out; goto out;
} }
flags = __raw_local_save_flags();
if (irq_enable) {
ADD_STATS(taken_slow_irqenable, 1);
raw_local_irq_enable();
}
/* /*
* Block until irq becomes pending. If we're * Block until irq becomes pending. If we're
* interrupted at this point (after the trylock but * interrupted at this point (after the trylock but
...@@ -238,13 +239,15 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl ...@@ -238,13 +239,15 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
* pending. * pending.
*/ */
xen_poll_irq(irq); xen_poll_irq(irq);
raw_local_irq_restore(flags);
ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
} while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
out: out:
raw_local_irq_restore(flags);
unspinning_lock(xl, prev); unspinning_lock(xl, prev);
spin_time_accum_blocked(start); spin_time_accum_blocked(start);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment