Commit bb7bed08 authored by Heiko Carstens's avatar Heiko Carstens Committed by Ingo Molnar

locking: Simplify spinlock inlining

For !DEBUG_SPINLOCK && !PREEMPT && SMP the spin_unlock()
functions were always inlined by using special defines which
would call the __raw* functions.

The out-of-line variants for these functions would be generated
anyway.

Use the new per unlock/locking variant mechanism to force
inlining of the unlock functions like before. This is not a
functional change, we just get rid of one additional way to
force inlining.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Horst Hartmann <horsth@linux.vnet.ibm.com>
Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <20090831124418.848735034@de.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 892a7c67
...@@ -259,50 +259,16 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } ...@@ -259,50 +259,16 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
#define spin_lock_irq(lock) _spin_lock_irq(lock) #define spin_lock_irq(lock) _spin_lock_irq(lock)
#define spin_lock_bh(lock) _spin_lock_bh(lock) #define spin_lock_bh(lock) _spin_lock_bh(lock)
#define read_lock_irq(lock) _read_lock_irq(lock) #define read_lock_irq(lock) _read_lock_irq(lock)
#define read_lock_bh(lock) _read_lock_bh(lock) #define read_lock_bh(lock) _read_lock_bh(lock)
#define write_lock_irq(lock) _write_lock_irq(lock) #define write_lock_irq(lock) _write_lock_irq(lock)
#define write_lock_bh(lock) _write_lock_bh(lock) #define write_lock_bh(lock) _write_lock_bh(lock)
#define spin_unlock(lock) _spin_unlock(lock)
/* #define read_unlock(lock) _read_unlock(lock)
* We inline the unlock functions in the nondebug case: #define write_unlock(lock) _write_unlock(lock)
*/ #define spin_unlock_irq(lock) _spin_unlock_irq(lock)
#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ #define read_unlock_irq(lock) _read_unlock_irq(lock)
!defined(CONFIG_SMP) #define write_unlock_irq(lock) _write_unlock_irq(lock)
# define spin_unlock(lock) _spin_unlock(lock)
# define read_unlock(lock) _read_unlock(lock)
# define write_unlock(lock) _write_unlock(lock)
# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
# define read_unlock_irq(lock) _read_unlock_irq(lock)
# define write_unlock_irq(lock) _write_unlock_irq(lock)
#else
# define spin_unlock(lock) \
do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0)
# define read_unlock(lock) \
do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0)
# define write_unlock(lock) \
do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0)
# define spin_unlock_irq(lock) \
do { \
__raw_spin_unlock(&(lock)->raw_lock); \
__release(lock); \
local_irq_enable(); \
} while (0)
# define read_unlock_irq(lock) \
do { \
__raw_read_unlock(&(lock)->raw_lock); \
__release(lock); \
local_irq_enable(); \
} while (0)
# define write_unlock_irq(lock) \
do { \
__raw_write_unlock(&(lock)->raw_lock); \
__release(lock); \
local_irq_enable(); \
} while (0)
#endif
#define spin_unlock_irqrestore(lock, flags) \ #define spin_unlock_irqrestore(lock, flags) \
do { \ do { \
......
...@@ -60,6 +60,18 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) ...@@ -60,6 +60,18 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock); __releases(lock);
/*
* We inline the unlock functions in the nondebug case:
*/
#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
#define __always_inline__spin_unlock
#define __always_inline__read_unlock
#define __always_inline__write_unlock
#define __always_inline__spin_unlock_irq
#define __always_inline__read_unlock_irq
#define __always_inline__write_unlock_irq
#endif
#ifndef CONFIG_DEBUG_SPINLOCK #ifndef CONFIG_DEBUG_SPINLOCK
#ifndef CONFIG_GENERIC_LOCKBREAK #ifndef CONFIG_GENERIC_LOCKBREAK
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment