Commit 1d082a19 authored by Catalin Marinas's avatar Catalin Marinas

Thumb-2: Implement unified locking support

This patch adds ARM/Thumb-2 unified support for spinlocks, mutexes,
semaphores and the atomic operations.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent ac04c319
......@@ -182,6 +182,7 @@ int __down_trylock(struct semaphore * sem)
asm(" .section .sched.text,\"ax\",%progbits \n\
.align 5 \n\
.globl __down_failed \n\
.type __down_failed, %function \n\
__down_failed: \n\
stmfd sp!, {r0 - r4, lr} \n\
mov r0, ip \n\
......@@ -190,6 +191,7 @@ __down_failed: \n\
\n\
.align 5 \n\
.globl __down_interruptible_failed \n\
.type __down_interruptible_failed, %function \n\
__down_interruptible_failed: \n\
stmfd sp!, {r0 - r4, lr} \n\
mov r0, ip \n\
......@@ -199,6 +201,7 @@ __down_interruptible_failed: \n\
\n\
.align 5 \n\
.globl __down_trylock_failed \n\
.type __down_trylock_failed, %function\n\
__down_trylock_failed: \n\
stmfd sp!, {r0 - r4, lr} \n\
mov r0, ip \n\
......@@ -208,6 +211,7 @@ __down_trylock_failed: \n\
\n\
.align 5 \n\
.globl __up_wakeup \n\
.type __up_wakeup, %function \n\
__up_wakeup: \n\
stmfd sp!, {r0 - r4, lr} \n\
mov r0, ip \n\
......
......@@ -90,6 +90,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
"ldrex %1, [%2]\n"
"mov %0, #0\n"
"teq %1, %3\n"
"it eq\n"
"strexeq %0, %4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (&ptr->counter), "Ir" (old), "r" (new)
......
......@@ -24,6 +24,7 @@
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" itt mi\n" \
" movmi ip, %0\n" \
" blmi " #fail \
: \
......@@ -43,6 +44,7 @@
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" itet mi\n" \
" movmi ip, %1\n" \
" movpl ip, #0\n" \
" blmi " #fail "\n" \
......@@ -65,6 +67,7 @@
" teq ip, #0\n" \
" bne 1b\n" \
" cmp lr, #0\n" \
" itt le\n" \
" movle ip, %0\n" \
" blle " #wake \
: \
......@@ -91,6 +94,7 @@
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" itt ne\n" \
" movne ip, %0\n" \
" blne " #fail \
: \
......@@ -150,6 +154,7 @@
" subs lr, lr, %1\n" \
" str lr, [%0]\n" \
" msr cpsr_c, ip\n" \
" itt mi\n" \
" movmi ip, %0\n" \
" blmi " #fail \
: \
......@@ -170,6 +175,7 @@
" subs lr, lr, %2\n" \
" str lr, [%1]\n" \
" msr cpsr_c, ip\n" \
" itet mi\n" \
" movmi ip, %1\n" \
" movpl ip, #0\n" \
" blmi " #fail "\n" \
......@@ -193,6 +199,7 @@
" adds lr, lr, %1\n" \
" str lr, [%0]\n" \
" msr cpsr_c, ip\n" \
" itt le\n" \
" movle ip, %0\n" \
" blle " #wake \
: \
......@@ -220,6 +227,7 @@
" subs lr, lr, %1\n" \
" str lr, [%0]\n" \
" msr cpsr_c, ip\n" \
" itt ne\n" \
" movne ip, %0\n" \
" blne " #fail \
: \
......@@ -239,6 +247,7 @@
" adds lr, lr, %1\n" \
" str lr, [%0]\n" \
" msr cpsr_c, ip\n" \
" itt cs\n" \
" movcs ip, %0\n" \
" blcs " #wake \
: \
......@@ -262,6 +271,7 @@
" adds lr, lr, %1\n" \
" str lr, [%0]\n" \
" msr cpsr_c, ip\n" \
" itt eq\n" \
" moveq ip, %0\n" \
" bleq " #wake \
: \
......
......@@ -111,8 +111,11 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
"1: ldrex %0, [%3] \n\t"
"subs %1, %0, #1 \n\t"
"it eq\n\t"
"strexeq %2, %1, [%3] \n\t"
"it lt\n\t"
"movlt %0, #0 \n\t"
"it eq\n\t"
"cmpeq %2, #0 \n\t"
"bgt 1b "
......
......@@ -31,7 +31,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
#ifdef CONFIG_CPU_32v6K
" itee ne\n"
" wfene\n"
#else
" itt eq\n"
#endif
" strexeq %0, %2, [%1]\n"
" teqeq %0, #0\n"
......@@ -50,6 +53,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
__asm__ __volatile__(
" ldrex %0, [%1]\n"
" teq %0, #0\n"
" it eq\n"
" strexeq %0, %2, [%1]"
: "=&r" (tmp)
: "r" (&lock->lock), "r" (1)
......@@ -94,7 +98,10 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
#ifdef CONFIG_CPU_32v6K
" itee ne\n"
" wfene\n"
#else
" itt eq\n"
#endif
" strexeq %0, %2, [%1]\n"
" teq %0, #0\n"
......@@ -113,6 +120,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
" it eq\n"
" strexeq %0, %2, [%1]"
: "=&r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
......@@ -163,6 +171,11 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
#ifdef CONFIG_CPU_32v6K
" itet pl\n"
#else
" itt pl\n"
#endif
" strexpl %1, %0, [%2]\n"
#ifdef CONFIG_CPU_32v6K
" wfemi\n"
......@@ -190,6 +203,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
" bne 1b"
#ifdef CONFIG_CPU_32v6K
"\n cmp %0, #0\n"
" itt eq\n"
" mcreq p15, 0, %0, c7, c10, 4\n"
" seveq"
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment