Commit d475f3f4 authored by Ivan Kokshaysky's avatar Ivan Kokshaysky Committed by Linus Torvalds

[PATCH] alpha: additional smp barriers

As stated in Documentation/atomic_ops.txt, atomic functions
returning values must have the memory barriers both before and after
the operation.

Thanks to DaveM for pointing that out.
Signed-off-by: default avatarIvan Kokshaysky <ink@jurassic.park.msu.ru>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 4595f251
...@@ -100,18 +100,19 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) ...@@ -100,18 +100,19 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
static __inline__ long atomic_add_return(int i, atomic_t * v) static __inline__ long atomic_add_return(int i, atomic_t * v)
{ {
long temp, result; long temp, result;
smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"1: ldl_l %0,%1\n" "1: ldl_l %0,%1\n"
" addl %0,%3,%2\n" " addl %0,%3,%2\n"
" addl %0,%3,%0\n" " addl %0,%3,%0\n"
" stl_c %0,%1\n" " stl_c %0,%1\n"
" beq %0,2f\n" " beq %0,2f\n"
" mb\n"
".subsection 2\n" ".subsection 2\n"
"2: br 1b\n" "2: br 1b\n"
".previous" ".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result) :"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory"); :"Ir" (i), "m" (v->counter) : "memory");
smp_mb();
return result; return result;
} }
...@@ -120,54 +121,57 @@ static __inline__ long atomic_add_return(int i, atomic_t * v) ...@@ -120,54 +121,57 @@ static __inline__ long atomic_add_return(int i, atomic_t * v)
static __inline__ long atomic64_add_return(long i, atomic64_t * v) static __inline__ long atomic64_add_return(long i, atomic64_t * v)
{ {
long temp, result; long temp, result;
smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"1: ldq_l %0,%1\n" "1: ldq_l %0,%1\n"
" addq %0,%3,%2\n" " addq %0,%3,%2\n"
" addq %0,%3,%0\n" " addq %0,%3,%0\n"
" stq_c %0,%1\n" " stq_c %0,%1\n"
" beq %0,2f\n" " beq %0,2f\n"
" mb\n"
".subsection 2\n" ".subsection 2\n"
"2: br 1b\n" "2: br 1b\n"
".previous" ".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result) :"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory"); :"Ir" (i), "m" (v->counter) : "memory");
smp_mb();
return result; return result;
} }
static __inline__ long atomic_sub_return(int i, atomic_t * v) static __inline__ long atomic_sub_return(int i, atomic_t * v)
{ {
long temp, result; long temp, result;
smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"1: ldl_l %0,%1\n" "1: ldl_l %0,%1\n"
" subl %0,%3,%2\n" " subl %0,%3,%2\n"
" subl %0,%3,%0\n" " subl %0,%3,%0\n"
" stl_c %0,%1\n" " stl_c %0,%1\n"
" beq %0,2f\n" " beq %0,2f\n"
" mb\n"
".subsection 2\n" ".subsection 2\n"
"2: br 1b\n" "2: br 1b\n"
".previous" ".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result) :"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory"); :"Ir" (i), "m" (v->counter) : "memory");
smp_mb();
return result; return result;
} }
static __inline__ long atomic64_sub_return(long i, atomic64_t * v) static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
{ {
long temp, result; long temp, result;
smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"1: ldq_l %0,%1\n" "1: ldq_l %0,%1\n"
" subq %0,%3,%2\n" " subq %0,%3,%2\n"
" subq %0,%3,%0\n" " subq %0,%3,%0\n"
" stq_c %0,%1\n" " stq_c %0,%1\n"
" beq %0,2f\n" " beq %0,2f\n"
" mb\n"
".subsection 2\n" ".subsection 2\n"
"2: br 1b\n" "2: br 1b\n"
".previous" ".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result) :"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory"); :"Ir" (i), "m" (v->counter) : "memory");
smp_mb();
return result; return result;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment