Commit b2d28b7e authored by Ralf Baechle's avatar Ralf Baechle Committed by

MIPS: Get rid of atomic_lock.

    
It was resulting in build errors for some configurations.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 2d5e7b9f
...@@ -24,10 +24,9 @@ ...@@ -24,10 +24,9 @@
#define _ASM_ATOMIC_H #define _ASM_ATOMIC_H
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/interrupt.h>
#include <asm/war.h> #include <asm/war.h>
extern spinlock_t atomic_lock;
typedef struct { volatile int counter; } atomic_t; typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
...@@ -85,9 +84,9 @@ static __inline__ void atomic_add(int i, atomic_t * v) ...@@ -85,9 +84,9 @@ static __inline__ void atomic_add(int i, atomic_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&atomic_lock, flags); local_irq_save(flags);
v->counter += i; v->counter += i;
spin_unlock_irqrestore(&atomic_lock, flags); local_irq_restore(flags);
} }
} }
...@@ -127,9 +126,9 @@ static __inline__ void atomic_sub(int i, atomic_t * v) ...@@ -127,9 +126,9 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&atomic_lock, flags); local_irq_save(flags);
v->counter -= i; v->counter -= i;
spin_unlock_irqrestore(&atomic_lock, flags); local_irq_restore(flags);
} }
} }
...@@ -173,11 +172,11 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) ...@@ -173,11 +172,11 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&atomic_lock, flags); local_irq_save(flags);
result = v->counter; result = v->counter;
result += i; result += i;
v->counter = result; v->counter = result;
spin_unlock_irqrestore(&atomic_lock, flags); local_irq_restore(flags);
} }
return result; return result;
...@@ -220,11 +219,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) ...@@ -220,11 +219,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&atomic_lock, flags); local_irq_save(flags);
result = v->counter; result = v->counter;
result -= i; result -= i;
v->counter = result; v->counter = result;
spin_unlock_irqrestore(&atomic_lock, flags); local_irq_restore(flags);
} }
return result; return result;
...@@ -277,12 +276,12 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ...@@ -277,12 +276,12 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&atomic_lock, flags); local_irq_save(flags);
result = v->counter; result = v->counter;
result -= i; result -= i;
if (result >= 0) if (result >= 0)
v->counter = result; v->counter = result;
spin_unlock_irqrestore(&atomic_lock, flags); local_irq_restore(flags);
} }
return result; return result;
...@@ -433,9 +432,9 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) ...@@ -433,9 +432,9 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&atomic_lock, flags); local_irq_save(flags);
v->counter += i; v->counter += i;
spin_unlock_irqrestore(&atomic_lock, flags); local_irq_restore(flags);
} }
} }
...@@ -475,9 +474,9 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) ...@@ -475,9 +474,9 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&atomic_lock, flags); local_irq_save(flags);
v->counter -= i; v->counter -= i;
spin_unlock_irqrestore(&atomic_lock, flags); local_irq_restore(flags);
} }
} }
...@@ -521,11 +520,11 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) ...@@ -521,11 +520,11 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&atomic_lock, flags); local_irq_save(flags);
result = v->counter; result = v->counter;
result += i; result += i;
v->counter = result; v->counter = result;
spin_unlock_irqrestore(&atomic_lock, flags); local_irq_restore(flags);
} }
return result; return result;
...@@ -568,11 +567,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) ...@@ -568,11 +567,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&atomic_lock, flags); local_irq_save(flags);
result = v->counter; result = v->counter;
result -= i; result -= i;
v->counter = result; v->counter = result;
spin_unlock_irqrestore(&atomic_lock, flags); local_irq_restore(flags);
} }
return result; return result;
...@@ -625,12 +624,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ...@@ -625,12 +624,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&atomic_lock, flags); local_irq_save(flags);
result = v->counter; result = v->counter;
result -= i; result -= i;
if (result >= 0) if (result >= 0)
v->counter = result; v->counter = result;
spin_unlock_irqrestore(&atomic_lock, flags); local_irq_restore(flags);
} }
return result; return result;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment