Commit 3fe656e0 authored by Harry Fearnhamm's avatar Harry Fearnhamm Committed by Catalin Marinas

MPCore r0p0 Errata workaround

RealView/EB MPCore revB and revC contains r0p0 silicon, which needs
this workaround.
Signed-off-by: default avatarHarry Fearnhamm <Harry.Fearnhamm@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent ce79d9be
......@@ -5,6 +5,11 @@
and r3, r0, #7 @ Get bit offset
add r1, r1, r0, lsr #3 @ Get byte offset
mov r3, r2, lsl r3
mrc p15, 0, r0, c0, c0, 5
and r0, r0, #0xf
mov r0, r0, lsl #8
3: subs r0, r0, #1
bpl 3b
1: ldrexb r2, [r1]
\instr r2, r2, r3
strexb r0, r2, [r1]
......@@ -18,9 +23,14 @@
mov r2, #1
add r1, r1, r0, lsr #3 @ Get byte offset
mov r3, r2, lsl r3 @ create mask
mrc p15, 0, r0, c0, c0, 5
and r0, r0, #0xf
mov r0, r0, lsl #8
3: subs r0, r0, #1
bpl 3b
1: ldrexb r2, [r1]
ands r0, r2, r3 @ save old value of bit
\instr r2, r2, r3 @ toggle bit
\instr r2, r2, r3 @ toggle bit
strexb ip, r2, [r1]
cmp ip, #0
bne 1b
......
......@@ -23,6 +23,29 @@ typedef struct { volatile int counter; } atomic_t;
#if __LINUX_ARM_ARCH__ >= 6
#if defined(CONFIG_SMP) && defined(CONFIG_REALVIEW_MPCORE)
/* RevB and RevC MPCore contains r0p0 silicon, which needs this workaround */
static inline int atomic_backoff_delay(void)
{
unsigned int delay;
__asm__ __volatile__(
" mrc p15, 0, %0, c0, c0, 5\n"
" and %0, %0, #0xf\n"
" mov %0, %0, lsl #8\n"
"1: subs %0, %0, #1\n"
" bpl 1b\n"
: "=&r" (delay)
:
: "cc" );
return 1;
}
#else
#define atomic_backoff_delay() 1
#endif
/*
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop
......@@ -34,14 +57,14 @@ static inline void atomic_set(atomic_t *v, int i)
{
unsigned long tmp;
do {
__asm__ __volatile__("@ atomic_set\n"
"1: ldrex %0, [%1]\n"
" ldrex %0, [%1]\n"
" strex %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
: "r" (&v->counter), "r" (i)
: "cc");
} while (tmp && atomic_backoff_delay());
}
static inline int atomic_add_return(int i, atomic_t *v)
......@@ -49,15 +72,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
unsigned long tmp;
int result;
do {
__asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%2]\n"
" ldrex %0, [%2]\n"
" add %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
} while (tmp && atomic_backoff_delay());
return result;
}
......@@ -67,15 +90,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
unsigned long tmp;
int result;
do {
__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%2]\n"
" ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
} while (tmp && atomic_backoff_delay());
return result;
}
......@@ -84,16 +107,16 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
unsigned long oldval, res;
do {
do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
"ldrex %1, [%2]\n"
" ldrex %1, [%2]\n"
"mov %0, #0\n"
"teq %1, %3\n"
"strexeq %0, %4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (&ptr->counter), "Ir" (old), "r" (new)
: "cc");
} while (res);
} while (res && atomic_backoff_delay());
return oldval;
}
......@@ -102,15 +125,15 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long tmp, tmp2;
do {
__asm__ __volatile__("@ atomic_clear_mask\n"
"1: ldrex %0, %2\n"
" ldrex %0, %2\n"
" bic %0, %0, %3\n"
" strex %1, %0, %2\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (addr), "Ir" (mask)
: "cc");
} while (tmp && atomic_backoff_delay());
}
#else /* ARM_ARCH_6 */
......
......@@ -23,10 +23,34 @@
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#if defined(CONFIG_SMP) && defined(CONFIG_REALVIEW_MPCORE)
/* RevB and RevC MPCore contains r0p0 silicon, which needs this workaround */
static inline int spinlock_backoff_delay(void)
{
unsigned int delay;
__asm__ __volatile__(
"2: mrc p15, 0, %0, c0, c0, 5\n"
" and %0, %0, #0xf\n"
" mov %0, %0, lsl #8\n"
"1: subs %0, %0, #1\n"
" bpl 1b\n"
: "=&r" (delay)
:
: "cc" );
return 1;
}
#else
#define spinlock_backoff_delay()
#endif
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
unsigned long tmp;
spinlock_backoff_delay();
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
......@@ -35,7 +59,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
#endif
" strexeq %0, %2, [%1]\n"
" teqeq %0, #0\n"
" bne 1b"
" bne 2b"
: "=&r" (tmp)
: "r" (&lock->lock), "r" (1)
: "cc");
......@@ -47,6 +71,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
unsigned long tmp;
spinlock_backoff_delay();
__asm__ __volatile__(
" ldrex %0, [%1]\n"
" teq %0, #0\n"
......@@ -91,6 +116,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
{
unsigned long tmp;
spinlock_backoff_delay();
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
......@@ -99,7 +125,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
#endif
" strexeq %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
" bne 2b"
: "=&r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "cc");
......@@ -111,6 +137,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
{
unsigned long tmp;
spinlock_backoff_delay();
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
......@@ -161,6 +188,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
{
unsigned long tmp, tmp2;
spinlock_backoff_delay();
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
......@@ -169,7 +197,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
" wfemi\n"
#endif
" rsbpls %0, %1, #0\n"
" bmi 1b"
" bmi 2b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
: "cc");
......@@ -183,12 +211,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
smp_mb();
spinlock_backoff_delay();
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
" bne 2b"
#ifdef CONFIG_CPU_32v6K
"\n cmp %0, #0\n"
" mcreq p15, 0, %0, c7, c10, 4\n"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment