Commit 2f0c8457 authored by Thomas Gleixner's avatar Thomas Gleixner

rt: Add the preempt-rt lock replacement APIs

Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex
based locking functions for preempt-rt.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 2c237cf3
......@@ -173,6 +173,8 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
return (sem->count != 0);
}
#ifndef CONFIG_PREEMPT_RT
struct rw_semaphore {
/* XXX this should be able to be an atomic_t -- paulus */
signed int count;
......@@ -213,6 +215,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return (sem->count != 0);
}
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_RWSEM_H */
......@@ -261,6 +261,8 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
return (sem->count != 0);
}
#ifndef CONFIG_PREEMPT_RT
struct rw_semaphore {
signed long count;
spinlock_t wait_lock;
......@@ -301,6 +303,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return (sem->count != 0);
}
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_X86_RWSEM_H */
......@@ -12,11 +12,85 @@
#include <linux/list.h>
#include <linux/spinlock_types.h>
#include <linux/rt_lock.h>
#include <linux/linkage.h>
#include <linux/lockdep.h>
#include <asm/atomic.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
, .dep_map = { .name = #lockname }
#else
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
#ifdef CONFIG_PREEMPT_RT
#include <linux/rtmutex.h>
struct mutex {
struct rt_mutex lock;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
#define __MUTEX_INITIALIZER(mutexname) \
{ \
.lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
__DEP_MAP_MUTEX_INITIALIZER(mutexname) \
}
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
extern void
__mutex_init(struct mutex *lock, char *name, struct lock_class_key *key);
extern void __lockfunc _mutex_lock(struct mutex *lock);
extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
extern int __lockfunc
_mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
extern int __lockfunc
_mutex_lock_killable_nested(struct mutex *lock, int subclass);
extern int __lockfunc _mutex_trylock(struct mutex *lock);
extern void __lockfunc _mutex_unlock(struct mutex *lock);
#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
#define mutex_lock(l) _mutex_lock(l)
#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
#define mutex_lock_killable(l) _mutex_lock_killable(l)
#define mutex_trylock(l) _mutex_trylock(l)
#define mutex_unlock(l) _mutex_unlock(l)
#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
# define mutex_lock_interruptible_nested(l, s) \
_mutex_lock_interruptible_nested(l, s)
# define mutex_lock_killable_nested(l, s) \
_mutex_lock_killable_nested(l, s)
#else
# define mutex_lock_nested(l, s) _mutex_lock(l)
# define mutex_lock_interruptible_nested(l, s) \
_mutex_lock_interruptible(l)
# define mutex_lock_killable_nested(l, s) \
_mutex_lock_killable(l)
#endif
# define mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
\
__mutex_init((mutex), #mutex, &__key); \
} while (0)
#else /* PREEMPT_RT */
/*
* Simple, straightforward mutexes with strict semantics:
*
......@@ -87,13 +161,6 @@ do { \
# define mutex_destroy(mutex) do { } while (0)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
, .dep_map = { .name = #lockname }
#else
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
#define __MUTEX_INITIALIZER(lockname) \
{ .count = ATOMIC_INIT(1) \
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
......@@ -150,6 +217,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
*/
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
#endif /* !PREEMPT_RT */
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#endif
......@@ -75,14 +75,16 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock_types.h>
struct spinlock;
struct atomic_spinlock;
struct plist_head {
struct list_head prio_list;
struct list_head node_list;
#ifdef CONFIG_DEBUG_PI_LIST
atomic_spinlock_t *alock;
spinlock_t *slock;
struct atomic_spinlock *alock;
struct spinlock *slock;
#endif
};
......@@ -142,7 +144,7 @@ struct plist_node {
* @lock: list spinlock, remembered for debugging
*/
static inline void
plist_head_init(struct plist_head *head, spinlock_t *lock)
plist_head_init(struct plist_head *head, struct spinlock *lock)
{
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
......@@ -158,7 +160,7 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
* @lock: list atomic_spinlock, remembered for debugging
*/
static inline void
plist_head_init_atomic(struct plist_head *head, atomic_spinlock_t *lock)
plist_head_init_atomic(struct plist_head *head, struct atomic_spinlock *lock)
{
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
......
#ifndef __LINUX_RT_LOCK_H
#define __LINUX_RT_LOCK_H
/*
* Real-Time Preemption Support
*
* started by Ingo Molnar:
*
* Copyright (C) 2004, 2005 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* This file contains the main data structure definitions.
*/
#include <linux/rtmutex.h>
#include <asm/atomic.h>
#include <linux/spinlock_types.h>
#ifdef CONFIG_PREEMPT_RT
static inline int preempt_rt(void) { return 1; }
/*
* spinlocks - an RT mutex plus lock-break field:
*/
typedef struct spinlock {
struct rt_mutex lock;
unsigned int break_lock;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} spinlock_t;
#ifdef CONFIG_DEBUG_RT_MUTEXES
# define __RT_SPIN_INITIALIZER(name) \
{ \
.wait_lock = __ATOMIC_SPIN_LOCK_UNLOCKED(name), \
.save_state = 1, \
.file = __FILE__, \
.line = __LINE__ , \
}
#else
# define __RT_SPIN_INITIALIZER(name) \
{ .wait_lock = __ATOMIC_SPIN_LOCK_UNLOCKED(name) }
#endif
#define __SPIN_LOCK_UNLOCKED(name) \
{ .lock = __RT_SPIN_INITIALIZER(name), \
SPIN_DEP_MAP_INIT(name) }
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(spin_old_style)
#define __DEFINE_SPINLOCK(name) \
spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
#define DEFINE_SPINLOCK(name) \
spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
extern void
__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
#define spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__rt_spin_lock_init(lock, #lock, &__key); \
} while (0)
extern void __lockfunc rt_spin_lock(spinlock_t *lock);
extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
extern int __lockfunc
rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
/*
* lockdep-less calls, for derived types like rwlock:
* (for trylock they can use rt_mutex_trylock() directly.
*/
extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
/*
* rwlocks - an RW semaphore plus lock-break field:
*/
typedef struct {
struct rt_mutex lock;
int read_depth;
unsigned int break_lock;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} rwlock_t;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
#else
# define RW_DEP_MAP_INIT(lockname)
#endif
#define __RW_LOCK_UNLOCKED(name) \
{ .lock = __RT_SPIN_INITIALIZER(name), \
RW_DEP_MAP_INIT(name) }
#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(rw_old_style)
#define DEFINE_RWLOCK(name) \
rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock,
unsigned long *flags);
extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
extern void
__rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
#define rwlock_init(rwl) \
do { \
static struct lock_class_key __key; \
\
__rt_rwlock_init(rwl, #rwl, &__key); \
} while (0)
/*
* RW-semaphores are a spinlock plus a reader-depth count.
*
* Note that the semantics are different from the usual
* Linux rw-sems, in PREEMPT_RT mode we do not allow
* multiple readers to hold the lock at once, we only allow
* a read-lock owner to read-lock recursively. This is
* better for latency, makes the implementation inherently
* fair and makes it simpler as well:
*/
struct rw_semaphore {
struct rt_mutex lock;
int read_depth;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
#define __RWSEM_INITIALIZER(name) \
{ .lock = __RT_MUTEX_INITIALIZER(name.lock), \
RW_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(lockname) \
struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
extern void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
struct lock_class_key *key);
# define rt_init_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
__rt_rwsem_init((sem), #sem, &__key); \
} while (0)
extern void rt_down_write(struct rw_semaphore *rwsem);
extern void
rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
extern void
rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
extern void rt_down_read(struct rw_semaphore *rwsem);
extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
extern void rt_up_read(struct rw_semaphore *rwsem);
extern void rt_up_write(struct rw_semaphore *rwsem);
extern void rt_downgrade_write(struct rw_semaphore *rwsem);
/*
* Semaphores - a spinlock plus the semaphore count:
*/
struct semaphore {
atomic_t count;
struct rt_mutex lock;
};
#define DEFINE_SEMAPHORE(name) \
struct semaphore name = \
{ .count = { 1 }, .lock = __RT_MUTEX_INITIALIZER(name.lock) }
extern void
__sema_init(struct semaphore *sem, int val, char *name, char *file, int line);
#define rt_sema_init(sem, val) \
__sema_init(sem, val, #sem, __FILE__, __LINE__)
/*
* No locked initialization for RT semaphores
*/
extern void rt_down(struct semaphore *sem);
extern int rt_down_interruptible(struct semaphore *sem);
extern int rt_down_timeout(struct semaphore *sem, long jiffies);
extern int rt_down_trylock(struct semaphore *sem);
extern void rt_up(struct semaphore *sem);
#define rt_sem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
#define rt_sema_count(s) atomic_read(&(s)->count)
#else
static inline int preempt_rt(void) { return 0; }
#endif /* CONFIG_PREEMPT_RT */
#endif
......@@ -5,6 +5,60 @@
# error "please don't include this file directly"
#endif
#ifdef CONFIG_PREEMPT_RT
#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
#define write_trylock_irqsave(lock, flags) \
__cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
#define write_lock(lock) rt_write_lock(lock)
#define read_lock(lock) rt_read_lock(lock)
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = rt_read_lock_irqsave(lock); \
} while (0)
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = rt_write_lock_irqsave(lock); \
} while (0)
#define read_lock_irq(lock) rt_read_lock(lock)
#define read_lock_bh(lock) rt_read_lock(lock)
#define write_lock_irq(lock) rt_write_lock(lock)
#define write_lock_bh(lock) rt_write_lock(lock)
#define read_unlock(lock) rt_read_unlock(lock)
#define write_unlock(lock) rt_write_unlock(lock)
#define read_unlock_irq(lock) rt_read_unlock(lock)
#define write_unlock_irq(lock) rt_write_unlock(lock)
#define read_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
(void) flags; \
rt_read_unlock(lock); \
} while (0)
#define read_unlock_bh(lock) rt_read_unlock(lock)
#define write_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
(void) flags; \
rt_write_unlock(lock); \
} while (0)
#define write_unlock_bh(lock) rt_write_unlock(lock)
#else
/*
* rwlock related methods
*
......@@ -147,5 +201,6 @@ do { \
write_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
#endif
#endif /* __LINUX_RWLOCK_H */
......@@ -77,6 +77,7 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
return (sem->activity != 0);
}
#ifndef CONFIG_PREEMPT_RT
/*
* Non preempt-rt implementation of rw_semaphore. Same as above, but
* restricted vs. ownership. i.e. ownerless locked state and non owner
......@@ -129,6 +130,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return (sem->activity != 0);
}
#endif
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_SPINLOCK_H */
......@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/rt_lock.h>
#include <asm/system.h>
#include <asm/atomic.h>
......@@ -89,6 +90,59 @@ extern void anon_up_read_non_owner(struct rw_anon_semaphore *sem);
# define anon_up_read_non_owner(sem) anon_up_read(sem)
#endif
#ifdef CONFIG_PREEMPT_RT
#include <linux/rt_lock.h>
#define init_rwsem(sem) rt_init_rwsem(sem)
#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
static inline void down_read(struct rw_semaphore *sem)
{
rt_down_read(sem);
}
static inline int down_read_trylock(struct rw_semaphore *sem)
{
return rt_down_read_trylock(sem);
}
static inline void down_write(struct rw_semaphore *sem)
{
rt_down_write(sem);
}
static inline int down_write_trylock(struct rw_semaphore *sem)
{
return rt_down_write_trylock(sem);
}
static inline void up_read(struct rw_semaphore *sem)
{
rt_up_read(sem);
}
static inline void up_write(struct rw_semaphore *sem)
{
rt_up_write(sem);
}
static inline void downgrade_write(struct rw_semaphore *sem)
{
rt_downgrade_write(sem);
}
static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
{
return rt_down_read_nested(sem, subclass);
}
static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
{
rt_down_write_nested(sem, subclass);
}
#else
/*
* Non preempt-rt implementations
*/
......@@ -136,5 +190,6 @@ static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
{
anon_down_write_nested((struct rw_anon_semaphore *)sem, subclass);
}
#endif
#endif /* _LINUX_RWSEM_H */
......@@ -58,6 +58,46 @@ extern int __must_check anon_down_trylock(struct anon_semaphore *sem);
extern int __must_check anon_down_timeout(struct anon_semaphore *sem, long jiffies);
extern void anon_up(struct anon_semaphore *sem);
#ifdef CONFIG_PREEMPT_RT
static inline void sema_init(struct semaphore *sem, int val)
{
rt_sema_init(sem, val);
}
static inline void semaphore_init(struct semaphore *sem)
{
sema_init(sem, 1);
}
static inline void down(struct semaphore *sem)
{
rt_down(sem);
}
static inline int __must_check down_interruptible(struct semaphore *sem)
{
return rt_down_interruptible(sem);
}
static inline int __must_check down_trylock(struct semaphore *sem)
{
return rt_down_trylock(sem);
}
static inline int __must_check
down_timeout(struct semaphore *sem, long jiffies)
{
return rt_down_timeout(sem, jiffies);
}
static inline void up(struct semaphore *sem)
{
rt_up(sem);
}
#else
/*
* Non preempt-rt maps semaphores to anon semaphores
*/
......@@ -125,5 +165,6 @@ static inline void up(struct semaphore *sem)
{
anon_up((struct anon_semaphore *)sem);
}
#endif
#endif /* __LINUX_SEMAPHORE_H */
......@@ -57,23 +57,6 @@
#include <asm/system.h>
/*
* Must define these before including other files, inline functions need them
*/
#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
#define LOCK_SECTION_START(extra) \
".subsection 1\n\t" \
extra \
".ifndef " LOCK_SECTION_NAME "\n\t" \
LOCK_SECTION_NAME ":\n\t" \
".endif\n"
#define LOCK_SECTION_END \
".previous\n\t"
#define __lockfunc __attribute__((section(".spinlock.text")))
/*
* Pull the raw_spinlock_t and raw_rwlock_t definitions:
*/
......@@ -283,6 +266,98 @@ _atomic_dec_and_atomic_lock(atomic_t *atomic, atomic_spinlock_t *lock);
#define atomic_dec_and_atomic_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_atomic_lock(atomic, lock))
#ifdef CONFIG_PREEMPT_RT
#include <linux/rt_lock.h>
#define spin_lock(lock) rt_spin_lock(lock)
#define spin_lock_bh(lock) rt_spin_lock(lock)
#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
#ifdef CONFIG_LOCKDEP
# define spin_lock_nested(lock, subclass) \
rt_spin_lock_nested(lock, subclass)
# define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = 0; \
rt_spin_lock_nested(lock, subclass); \
} while (0)
#else
# define spin_lock_nested(lock, subclass) \
rt_spin_lock(lock)
# define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = 0; \
rt_spin_lock(lock); \
} while (0)
#endif
#define spin_lock_irq(lock) rt_spin_lock(lock)
#define spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = 0; \
rt_spin_lock(lock); \
} while (0)
/* FIXME: we need rt_spin_lock_nested */
#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
#define spin_unlock(lock) rt_spin_unlock(lock)
#define spin_unlock_bh(lock) rt_spin_unlock(lock)
#define spin_unlock_irq(lock) rt_spin_unlock(lock)
#define spin_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
(void) flags; \
rt_spin_unlock(lock); \
} while (0)
#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock(lock))
#define spin_trylock_irq(lock) __cond_lock(lock, rt_spin_trylock(lock))
#define spin_trylock_irqsave(lock, flags) \
({ \
typecheck(unsigned long, flags); \
flags = 0; \
__cond_lock(lock, rt_spin_trylock(lock)); \
})
#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
# define spin_is_contended(lock) ((lock)->break_lock)
#else
# define spin_is_contended(lock) (((void)(lock), 0))
#endif
static inline int spin_can_locked(spinlock_t *lock)
{
return !rt_mutex_is_locked(&lock->lock);
}
static inline int spin_is_locked(spinlock_t *lock)
{
return rt_mutex_is_locked(&lock->lock);
}
static inline void assert_spin_locked(spinlock_t *lock)
{
BUG_ON(!spin_is_locked(lock));
}
#define atomic_dec_and_lock(atomic, lock) \
atomic_dec_and_spin_lock(atomic, lock)
#else
/*
* Map spin* to atomic_spin* for PREEMPT_RT=n
*/
......@@ -420,6 +495,8 @@ do { \
atomic_dec_and_atomic_lock(atomic, (atomic_spinlock_t *)lock); \
})
#endif /* !PREEMPT_RT */
/*
* Get the rwlock part
*/
......
......@@ -58,7 +58,7 @@ void __lockfunc
_atomic_spin_unlock_irqrestore(atomic_spinlock_t *lock, unsigned long flags)
__releases(lock);
#ifndef CONFIG_PREEMPT_RT
void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
......@@ -85,5 +85,6 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
#endif
#endif /* __LINUX_SPINLOCK_API_SMP_H */
......@@ -9,6 +9,23 @@
* Released under the General Public License (GPL).
*/
/*
* Must define these before including other files, inline functions need them
*/
#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
#define LOCK_SECTION_START(extra) \
".subsection 1\n\t" \
extra \
".ifndef " LOCK_SECTION_NAME "\n\t" \
LOCK_SECTION_NAME ":\n\t" \
".endif\n"
#define LOCK_SECTION_END \
".previous\n\t"
#define __lockfunc __attribute__((section(".spinlock.text")))
#if defined(CONFIG_SMP)
# include <asm/spinlock_types.h>
#else
......@@ -17,7 +34,7 @@
#include <linux/lockdep.h>
typedef struct {
typedef struct atomic_spinlock {
raw_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
......@@ -64,11 +81,12 @@ typedef struct {
#define DEFINE_ATOMIC_SPINLOCK(x) \
atomic_spinlock_t x = __ATOMIC_SPIN_LOCK_UNLOCKED(x)
#ifndef CONFIG_PREEMPT_RT
/*
* For PREEMPT_RT=n we use the same data structures and the spinlock
* functions are mapped to the atomic_spinlock functions
*/
typedef struct {
typedef struct spinlock {
raw_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
......@@ -109,4 +127,6 @@ typedef struct {
#include <linux/rwlock_types.h>
#endif
#endif /* __LINUX_SPINLOCK_TYPES_H */
......@@ -7,7 +7,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
sysctl.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
async.o
......@@ -28,7 +28,10 @@ obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += time/
ifneq ($(CONFIG_PREEMPT_RT),y)
obj-y += mutex.o
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
endif
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
......@@ -40,6 +43,7 @@ endif
obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_PREEMPT_RT) += rt.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
ifneq ($(CONFIG_SMP),y)
......
......@@ -82,7 +82,11 @@ int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
#ifdef CONFIG_PREEMPT_RT
DEFINE_RWLOCK(tasklist_lock); /* outer */
#else
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
#endif
int nr_processes(void)
{
......
/*
* kernel/rt.c
*
* Real-Time Preemption Support
*
* started by Ingo Molnar:
*
* Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
* historic credit for proving that Linux spinlocks can be implemented via
* RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
* and others) who prototyped it on 2.4 and did lots of comparative
* research and analysis; TimeSys, for proving that you can implement a
* fully preemptible kernel via the use of IRQ threading and mutexes;
* Bill Huey for persuasively arguing on lkml that the mutex model is the
* right one; and to MontaVista, who ported pmutexes to 2.6.
*
* This code is a from-scratch implementation and is not based on pmutexes,
* but the idea of converting spinlocks to mutexes is used here too.
*
* lock debugging, locking tree, deadlock detection:
*
* Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
* Released under the General Public License (GPL).
*
* Includes portions of the generic R/W semaphore implementation from:
*
* Copyright (c) 2001 David Howells (dhowells@redhat.com).
* - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
* - Derived also from comments by Linus
*
* Pending ownership of locks and ownership stealing:
*
* Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
*
* (also by Steven Rostedt)
* - Converted single pi_lock to individual task locks.
*
* By Esben Nielsen:
* Doing priority inheritance with help of the scheduler.
*
* Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
* - major rework based on Esben Nielsens initial patch
* - replaced thread_info references by task_struct refs
* - removed task->pending_owner dependency
* - BKL drop/reacquire for semaphore style locks to avoid deadlocks
* in the scheduler return path as discussed with Steven Rostedt
*
* Copyright (C) 2006, Kihon Technologies Inc.
* Steven Rostedt <rostedt@goodmis.org>
* - debugged and patched Thomas Gleixner's rework.
* - added back the cmpxchg to the rework.
* - turned atomic require back on for SMP.
*/
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/syscalls.h>
#include <linux/interrupt.h>
#include <linux/plist.h>
#include <linux/fs.h>
#include <linux/futex.h>
#include <linux/hrtimer.h>
#include "rtmutex_common.h"
#ifdef CONFIG_PREEMPT_RT
/*
* Unlock these on crash:
*/
void zap_rt_locks(void)
{
//trace_lock_init();
}
#endif
/*
* struct mutex functions
*/
void __mutex_init(struct mutex *lock, char *name, struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
__rt_mutex_init(&lock->lock, name);
}
EXPORT_SYMBOL(__mutex_init);
void __lockfunc _mutex_lock(struct mutex *lock)
{
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
rt_mutex_lock(&lock->lock);
}
EXPORT_SYMBOL(_mutex_lock);
int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
{
int ret;
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
ret = rt_mutex_lock_interruptible(&lock->lock, 0);
if (ret)
mutex_release(&lock->dep_map, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(_mutex_lock_interruptible);
int __lockfunc _mutex_lock_killable(struct mutex *lock)
{
int ret;
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
ret = rt_mutex_lock_killable(&lock->lock, 0);
if (ret)
mutex_release(&lock->dep_map, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(_mutex_lock_killable);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
{
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
rt_mutex_lock(&lock->lock);
}
EXPORT_SYMBOL(_mutex_lock_nested);
int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
{
int ret;
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
ret = rt_mutex_lock_interruptible(&lock->lock, 0);
if (ret)
mutex_release(&lock->dep_map, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
{
int ret;
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
ret = rt_mutex_lock_killable(&lock->lock, 0);
if (ret)
mutex_release(&lock->dep_map, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(_mutex_lock_killable_nested);
#endif
int __lockfunc _mutex_trylock(struct mutex *lock)
{
int ret = rt_mutex_trylock(&lock->lock);
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(_mutex_trylock);
void __lockfunc _mutex_unlock(struct mutex *lock)
{
mutex_release(&lock->dep_map, 1, _RET_IP_);
rt_mutex_unlock(&lock->lock);
}
EXPORT_SYMBOL(_mutex_unlock);
/*
* rwlock_t functions
*/
int __lockfunc rt_write_trylock(rwlock_t *rwlock)
{
int ret = rt_mutex_trylock(&rwlock->lock);
if (ret)
rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(rt_write_trylock);
int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
{
*flags = 0;
return rt_write_trylock(rwlock);
}
EXPORT_SYMBOL(rt_write_trylock_irqsave);
int __lockfunc rt_read_trylock(rwlock_t *rwlock)
{
struct rt_mutex *lock = &rwlock->lock;
unsigned long flags;
int ret;
/*
* Read locks within the self-held write lock succeed.
*/
spin_lock_irqsave(&lock->wait_lock, flags);
if (rt_mutex_real_owner(lock) == current) {
spin_unlock_irqrestore(&lock->wait_lock, flags);
rwlock->read_depth++;
rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
return 1;
}
spin_unlock_irqrestore(&lock->wait_lock, flags);
ret = rt_mutex_trylock(lock);
if (ret)
rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(rt_read_trylock);
void __lockfunc rt_write_lock(rwlock_t *rwlock)
{
rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
__rt_spin_lock(&rwlock->lock);
}
EXPORT_SYMBOL(rt_write_lock);
void __lockfunc rt_read_lock(rwlock_t *rwlock)
{
unsigned long flags;
struct rt_mutex *lock = &rwlock->lock;
rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
/*
* Read locks within the write lock succeed.
*/
spin_lock_irqsave(&lock->wait_lock, flags);
if (rt_mutex_real_owner(lock) == current) {
spin_unlock_irqrestore(&lock->wait_lock, flags);
rwlock->read_depth++;
return;
}
spin_unlock_irqrestore(&lock->wait_lock, flags);
__rt_spin_lock(lock);
}
EXPORT_SYMBOL(rt_read_lock);
void __lockfunc rt_write_unlock(rwlock_t *rwlock)
{
/* NOTE: we always pass in '1' for nested, for simplicity */
rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
__rt_spin_unlock(&rwlock->lock);
}
EXPORT_SYMBOL(rt_write_unlock);
void __lockfunc rt_read_unlock(rwlock_t *rwlock)
{
struct rt_mutex *lock = &rwlock->lock;
unsigned long flags;
rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
// TRACE_WARN_ON(lock->save_state != 1);
/*
* Read locks within the self-held write lock succeed.
*/
spin_lock_irqsave(&lock->wait_lock, flags);
if (rt_mutex_real_owner(lock) == current && rwlock->read_depth) {
spin_unlock_irqrestore(&lock->wait_lock, flags);
rwlock->read_depth--;
return;
}
spin_unlock_irqrestore(&lock->wait_lock, flags);
__rt_spin_unlock(&rwlock->lock);
}
EXPORT_SYMBOL(rt_read_unlock);
unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
{
rt_write_lock(rwlock);
return 0;
}
EXPORT_SYMBOL(rt_write_lock_irqsave);
unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
{
rt_read_lock(rwlock);
return 0;
}
EXPORT_SYMBOL(rt_read_lock_irqsave);
void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
lockdep_init_map(&rwlock->dep_map, name, key, 0);
#endif
__rt_mutex_init(&rwlock->lock, name);
rwlock->read_depth = 0;
}
EXPORT_SYMBOL(__rt_rwlock_init);
/*
* rw_semaphores
*/
void rt_up_write(struct rw_semaphore *rwsem)
{
rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
rt_mutex_unlock(&rwsem->lock);
}
EXPORT_SYMBOL(rt_up_write);
void rt_up_read(struct rw_semaphore *rwsem)
{
unsigned long flags;
rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
/*
* Read locks within the self-held write lock succeed.
*/
spin_lock_irqsave(&rwsem->lock.wait_lock, flags);
if (rt_mutex_real_owner(&rwsem->lock) == current && rwsem->read_depth) {
spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
rwsem->read_depth--;
return;
}
spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
rt_mutex_unlock(&rwsem->lock);
}
EXPORT_SYMBOL(rt_up_read);
/*
* downgrade a write lock into a read lock
* - just wake up any readers at the front of the queue
*/
void rt_downgrade_write(struct rw_semaphore *rwsem)
{
BUG();
}
EXPORT_SYMBOL(rt_downgrade_write);
int rt_down_write_trylock(struct rw_semaphore *rwsem)
{
int ret = rt_mutex_trylock(&rwsem->lock);
if (ret)
rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(rt_down_write_trylock);
void rt_down_write(struct rw_semaphore *rwsem)
{
rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
rt_mutex_lock(&rwsem->lock);
}
EXPORT_SYMBOL(rt_down_write);
void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
{
rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
rt_mutex_lock(&rwsem->lock);
}
EXPORT_SYMBOL(rt_down_write_nested);
int rt_down_read_trylock(struct rw_semaphore *rwsem)
{
unsigned long flags;
int ret;
/*
* Read locks within the self-held write lock succeed.
*/
spin_lock_irqsave(&rwsem->lock.wait_lock, flags);
if (rt_mutex_real_owner(&rwsem->lock) == current) {
spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
rwsem_acquire_read(&rwsem->dep_map, 0, 1, _RET_IP_);
rwsem->read_depth++;
return 1;
}
spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
ret = rt_mutex_trylock(&rwsem->lock);
if (ret)
rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(rt_down_read_trylock);
static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
{
unsigned long flags;
rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
/*
* Read locks within the write lock succeed.
*/
spin_lock_irqsave(&rwsem->lock.wait_lock, flags);
if (rt_mutex_real_owner(&rwsem->lock) == current) {
spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
rwsem->read_depth++;
return;
}
spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
rt_mutex_lock(&rwsem->lock);
}
void rt_down_read(struct rw_semaphore *rwsem)
{
__rt_down_read(rwsem, 0);
}
EXPORT_SYMBOL(rt_down_read);
void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
{
__rt_down_read(rwsem, subclass);
}
EXPORT_SYMBOL(rt_down_read_nested);
void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
lockdep_init_map(&rwsem->dep_map, name, key, 0);
#endif
__rt_mutex_init(&rwsem->lock, name);
rwsem->read_depth = 0;
}
EXPORT_SYMBOL(__rt_rwsem_init);
/*
* Semaphores
*/
/*
* Linux Semaphores implemented via RT-mutexes.
*
* In the down() variants we use the mutex as the semaphore blocking
* object: we always acquire it, decrease the counter and keep the lock
* locked if we did the 1->0 transition. The next down() will then block.
*
* In the up() path we atomically increase the counter and do the
* unlock if we were the one doing the 0->1 transition.
*/
static inline void __down_complete(struct semaphore *sem)
{
int count = atomic_dec_return(&sem->count);
if (unlikely(count > 0))
rt_mutex_unlock(&sem->lock);
}
void rt_down(struct semaphore *sem)
{
rt_mutex_lock(&sem->lock);
__down_complete(sem);
}
EXPORT_SYMBOL(rt_down);
int rt_down_interruptible(struct semaphore *sem)
{
int ret;
ret = rt_mutex_lock_interruptible(&sem->lock, 0);
if (ret)
return ret;
__down_complete(sem);
return 0;
}
EXPORT_SYMBOL(rt_down_interruptible);
int rt_down_timeout(struct semaphore *sem, long jiff)
{
struct hrtimer_sleeper t;
struct timespec ts;
unsigned long expires = jiffies + jiff + 1;
int ret;
/*
* rt_mutex_slowlock can use an interruptible, but this needs to
* be TASK_INTERRUPTIBLE. The down_timeout uses TASK_UNINTERRUPTIBLE.
* To handle this we loop if a signal caused the timeout and the
* we recalculate the new timeout.
* Yes Thomas, this is a hack! But we can fix it right later.
*/
do {
jiffies_to_timespec(jiff, &ts);
hrtimer_init_on_stack(&t.timer, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
t.timer._expires = timespec_to_ktime(ts);
ret = rt_mutex_timed_lock(&sem->lock, &t, 0);
if (ret != -EINTR)
break;
/* signal occured, but the down_timeout doesn't handle them */
jiff = expires - jiffies;
} while (jiff > 0);
if (!ret)
__down_complete(sem);
else
ret = -ETIME;
return ret;
}
EXPORT_SYMBOL(rt_down_timeout);
/*
* try to down the semaphore, 0 on success and 1 on failure. (inverted)
*/
int rt_down_trylock(struct semaphore *sem)
{
/*
* Here we are a tiny bit different from ordinary Linux semaphores,
* because we can get 'transient' locking-failures when say a
* process decreases the count from 9 to 8 and locks/releases the
* embedded mutex internally. It would be quite complex to remove
* these transient failures so lets try it the simple way first:
*/
if (rt_mutex_trylock(&sem->lock)) {
__down_complete(sem);
return 0;
}
return 1;
}
EXPORT_SYMBOL(rt_down_trylock);
void rt_up(struct semaphore *sem)
{
int count;
/*
* Disable preemption to make sure a highprio trylock-er cannot
* preempt us here and get into an infinite loop:
*/
preempt_disable();
count = atomic_inc_return(&sem->count);
/*
* If we did the 0 -> 1 transition then we are the ones to unlock it:
*/
if (likely(count == 1))
rt_mutex_unlock(&sem->lock);
preempt_enable();
}
EXPORT_SYMBOL(rt_up);
void __sema_init(struct semaphore *sem, int val,
char *name, char *file, int line)
{
atomic_set(&sem->count, val);
switch (val) {
case 0:
__rt_mutex_init(&sem->lock, name);
rt_mutex_lock(&sem->lock);
break;
default:
__rt_mutex_init(&sem->lock, name);
break;
}
}
EXPORT_SYMBOL(__sema_init);
......@@ -14,6 +14,8 @@
* frame contact the architecture maintainers.
*/
#ifndef CONFIG_PREEMPT_RT
#include <linux/linkage.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
......@@ -221,3 +223,4 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock)
}
EXPORT_SYMBOL(_write_unlock_bh);
#endif
......@@ -31,6 +31,7 @@ void __atomic_spin_lock_init(atomic_spinlock_t *lock, const char *name,
EXPORT_SYMBOL(__atomic_spin_lock_init);
#ifndef CONFIG_PREEMPT_RT
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
{
......@@ -46,8 +47,8 @@ void __rwlock_init(rwlock_t *lock, const char *name,
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
EXPORT_SYMBOL(__rwlock_init);
#endif
static void spin_bug(atomic_spinlock_t *lock, const char *msg)
{
......@@ -154,6 +155,8 @@ void _raw_spin_unlock(atomic_spinlock_t *lock)
__raw_spin_unlock(&lock->raw_lock);
}
#ifndef CONFIG_PREEMPT_RT
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
if (!debug_locks_off())
......@@ -295,3 +298,4 @@ void _raw_write_unlock(rwlock_t *lock)
debug_write_unlock(lock);
__raw_write_unlock(&lock->raw_lock);
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment