Commit a57004e1 authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] atomic: dec_and_lock use atomic primitives

Convert atomic_dec_and_lock to use new atomic primitives.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 8382bf2e
#include <linux/module.h> #include <linux/module.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/system.h>
#ifdef __HAVE_ARCH_CMPXCHG
/* /*
* This is an implementation of the notion of "decrement a * This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero". * reference count, and return locked if it decremented to zero".
* *
* This implementation can be used on any architecture that
* has a cmpxchg, and where atomic->value is an int holding
* the value of the atomic (i.e. the high bits aren't used
* for a lock or anything like that).
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
int counter;
int newcount;
for (;;) {
counter = atomic_read(atomic);
newcount = counter - 1;
if (!newcount)
break; /* do it the slow way */
newcount = cmpxchg(&atomic->counter, counter, newcount);
if (newcount == counter)
return 0;
}
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}
#else
/*
* This is an architecture-neutral, but slow,
* implementation of the notion of "decrement
* a reference count, and return locked if it
* decremented to zero".
*
* NOTE NOTE NOTE! This is _not_ equivalent to * NOTE NOTE NOTE! This is _not_ equivalent to
* *
* if (atomic_dec_and_test(&atomic)) { * if (atomic_dec_and_test(&atomic)) {
...@@ -52,21 +16,20 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) ...@@ -52,21 +16,20 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
* *
* because the spin-lock and the decrement must be * because the spin-lock and the decrement must be
* "atomic". * "atomic".
*
* This slow version gets the spinlock unconditionally,
* and releases it if it isn't needed. Architectures
* are encouraged to come up with better approaches,
* this is trivially done efficiently using a load-locked
* store-conditional approach, for example.
*/ */
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{ {
#ifdef CONFIG_SMP
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
#endif
/* Otherwise do it the slow way */
spin_lock(lock); spin_lock(lock);
if (atomic_dec_and_test(atomic)) if (atomic_dec_and_test(atomic))
return 1; return 1;
spin_unlock(lock); spin_unlock(lock);
return 0; return 0;
} }
#endif
EXPORT_SYMBOL(_atomic_dec_and_lock); EXPORT_SYMBOL(_atomic_dec_and_lock);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment