Commit 7ee81321 authored by Catalin Marinas's avatar Catalin Marinas

Clear the exclusive monitor in the vector stub

The patch adds CLREX (or a dummy STREX) to the vector stub and removes
it from the __switch_to function. This allows the atomic_set to be a
simple STR instruction.

Linux currently clears the exclusive monitor at every context switch,
allowing threads to use plain STR for atomic store
operations. However, this is not enough for atomic operations in
signal handler. The patch fixes this case as well.

Note that for the SMP case, plain STR clears the state of the global
exclusive monitor. The effect of an STR on the local exclusive monitor
is implementation defined and therefore a CLREX is needed at exception
taking.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 08ed90d4
......@@ -709,13 +709,6 @@ ENTRY(__switch_to)
#ifdef CONFIG_MMU
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
#if __LINUX_ARM_ARCH__ >= 6
#ifdef CONFIG_CPU_32v6K
clrex
#else
strex r5, r4, [ip] @ Clear exclusive monitor
#endif
#endif
#if defined(CONFIG_HAS_TLS_REG)
mcr p15, 0, r3, c13, c0, 3 @ set TLS register
#elif !defined(CONFIG_TLS_REG_EMUL)
......@@ -1050,6 +1043,13 @@ vector_\name:
@ (parent CPSR)
@
stmia sp, {r0, lr} @ save r0, lr
#if __LINUX_ARM_ARCH__ >= 6
#ifdef CONFIG_CPU_32v6K
clrex @ clear the exclusive monitor
#else
strex lr, r0, [sp] @ clear the exclusive monitor
#endif
#endif
mrs lr, spsr
str lr, [sp, #8] @ save spsr
......
......@@ -20,11 +20,6 @@
*/
.align 5
ENTRY(v6_early_abort)
#ifdef CONFIG_CPU_32v6K
clrex
#else
strex r0, r1, [sp] @ Clear the exclusive monitor
#endif
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
/*
......
......@@ -16,12 +16,6 @@
*/
.align 5
ENTRY(v7_early_abort)
/*
* The effect of data aborts on on the exclusive access monitor are
* UNPREDICTABLE. Do a CLREX to clear the state
*/
clrex
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
......
......@@ -21,6 +21,7 @@ typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#if __LINUX_ARM_ARCH__ >= 6
......@@ -31,20 +32,6 @@ typedef struct { volatile int counter; } atomic_t;
* without using the following operations WILL break the atomic
* nature of these ops.
*/
static inline void atomic_set(atomic_t *v, int i)
{
unsigned long tmp;
__asm__ __volatile__("@ atomic_set\n"
"1: ldrex %0, [%1]\n"
" strex %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
: "r" (&v->counter), "r" (i)
: "cc");
}
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long tmp;
......@@ -123,8 +110,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#error SMP not supported on pre-ARMv6 CPUs
#endif
#define atomic_set(v,i) (((v)->counter) = (i))
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment