Commit 5af8888b authored by Harry Fearnhamm's avatar Harry Fearnhamm Committed by Catalin Marinas

Spinlocks using LDREX and STREX instructions can livelock

According to the ARM11MPCore Erratum 351422 (r0p0), under extremely
rare conditions, in an MPCore node consisting of at least 3 CPUs, two
CPUs trying to perform a STREX to data on the same shared cache line
can enter a livelock situation. This patch adds variable spinning time
to the locking routines.
Signed-off-by: default avatarHarry Fearnhamm <Harry.Fearnhamm@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 2d62afe3
......@@ -488,6 +488,18 @@ config ARM_ERRATA_411920
does not affect the MPCore. This option enables the ARM Ltd.
recommended workaround.
config ARM_ERRATA_351422
bool "Spinlocks using LDREX and STREX instructions can livelock"
depends on CPU_V6 && SMP
default n
help
According to the ARM11MPCore Erratum 351422 (r0p0), under
extremely rare conditions, in an MPCore node consisting of
at least 3 CPUs, two CPUs trying to perform a STREX to data
on the same shared cache line can enter a livelock
situation. This option adds variable spinning time to the
locking routines.
endmenu
source "arch/arm/common/Kconfig"
......
......@@ -5,6 +5,13 @@
and r3, r0, #7 @ Get bit offset
add r1, r1, r0, lsr #3 @ Get byte offset
mov r3, r2, lsl r3
#ifdef CONFIG_ARM_ERRATA_351422
mrc p15, 0, r0, c0, c0, 5
and r0, r0, #0xf
mov r0, r0, lsl #8
3: subs r0, r0, #1
bpl 3b
#endif
1: ldrexb r2, [r1]
\instr r2, r2, r3
strexb r0, r2, [r1]
......@@ -18,6 +25,13 @@
mov r2, #1
add r1, r1, r0, lsr #3 @ Get byte offset
mov r3, r2, lsl r3 @ create mask
#ifdef CONFIG_ARM_ERRATA_351422
mrc p15, 0, r0, c0, c0, 5
and r0, r0, #0xf
mov r0, r0, lsl #8
3: subs r0, r0, #1
bpl 3b
#endif
1: ldrexb r2, [r1]
ands r0, r2, r3 @ save old value of bit
\instr r2, r2, r3 @ toggle bit
......
......@@ -23,6 +23,26 @@ typedef struct { volatile int counter; } atomic_t;
#if __LINUX_ARM_ARCH__ >= 6
#ifdef CONFIG_ARM_ERRATA_351422
static inline int atomic_backoff_delay(void)
{
unsigned int delay;
__asm__ __volatile__(
" mrc p15, 0, %0, c0, c0, 5\n"
" and %0, %0, #0xf\n"
" mov %0, %0, lsl #8\n"
"1: subs %0, %0, #1\n"
" bpl 1b\n"
: "=&r" (delay)
:
: "cc" );
return 1;
}
#else
#define atomic_backoff_delay() 1
#endif
/*
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop
......@@ -34,14 +54,14 @@ static inline void atomic_set(atomic_t *v, int i)
{
unsigned long tmp;
do {
__asm__ __volatile__("@ atomic_set\n"
"1: ldrex %0, [%1]\n"
" strex %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
: "r" (&v->counter), "r" (i)
: "cc");
} while (tmp && atomic_backoff_delay());
}
static inline int atomic_add_return(int i, atomic_t *v)
......@@ -49,15 +69,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
unsigned long tmp;
int result;
do {
__asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
} while (tmp && atomic_backoff_delay());
return result;
}
......@@ -67,15 +87,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
unsigned long tmp;
int result;
do {
__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
} while (tmp && atomic_backoff_delay());
return result;
}
......@@ -93,7 +113,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
: "=&r" (res), "=&r" (oldval)
: "r" (&ptr->counter), "Ir" (old), "r" (new)
: "cc");
} while (res);
} while (res && atomic_backoff_delay());
return oldval;
}
......@@ -102,15 +122,15 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long tmp, tmp2;
do {
__asm__ __volatile__("@ atomic_clear_mask\n"
"1: ldrex %0, [%2]\n"
" bic %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (addr), "Ir" (mask)
: "cc");
} while (tmp && atomic_backoff_delay());
}
#else /* ARM_ARCH_6 */
......
......@@ -23,12 +23,32 @@
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#ifdef CONFIG_ARM_ERRATA_351422
#define spinlock_backoff_delay() \
{ \
unsigned int delay; \
__asm__ __volatile__( \
"1: mrc p15, 0, %0, c0, c0, 5\n" \
" and %0, %0, #0xf\n" \
" mov %0, %0, lsl #8\n" \
"2: subs %0, %0, #1\n" \
" bpl 2b\n" \
: "=&r" (delay) \
: \
: "cc" ); \
}
#else
#define spinlock_backoff_delay() \
__asm__ __volatile__("1: \n");
#endif
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
unsigned long tmp;
spinlock_backoff_delay();
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" ldrex %0, [%1]\n"
" teq %0, #0\n"
#ifdef CONFIG_CPU_32v6K
" wfene\n"
......@@ -47,6 +67,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
unsigned long tmp;
spinlock_backoff_delay();
__asm__ __volatile__(
" ldrex %0, [%1]\n"
" teq %0, #0\n"
......@@ -90,8 +111,9 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
{
unsigned long tmp;
spinlock_backoff_delay();
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" ldrex %0, [%1]\n"
" teq %0, #0\n"
#ifdef CONFIG_CPU_32v6K
" wfene\n"
......@@ -110,6 +132,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
{
unsigned long tmp;
spinlock_backoff_delay();
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
......@@ -160,8 +183,9 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
{
unsigned long tmp, tmp2;
spinlock_backoff_delay();
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
#ifdef CONFIG_CPU_32v6K
......@@ -182,8 +206,9 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
smp_mb();
spinlock_backoff_delay();
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment