Commit 2f0c8457 authored by Thomas Gleixner's avatar Thomas Gleixner

rt: Add the preempt-rt lock replacement APIs

Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex
based locking functions for preempt-rt.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 2c237cf3
...@@ -173,6 +173,8 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem) ...@@ -173,6 +173,8 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
return (sem->count != 0); return (sem->count != 0);
} }
#ifndef CONFIG_PREEMPT_RT
struct rw_semaphore { struct rw_semaphore {
/* XXX this should be able to be an atomic_t -- paulus */ /* XXX this should be able to be an atomic_t -- paulus */
signed int count; signed int count;
...@@ -213,6 +215,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) ...@@ -213,6 +215,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
{ {
return (sem->count != 0); return (sem->count != 0);
} }
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_RWSEM_H */ #endif /* _ASM_POWERPC_RWSEM_H */
...@@ -261,6 +261,8 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem) ...@@ -261,6 +261,8 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
return (sem->count != 0); return (sem->count != 0);
} }
#ifndef CONFIG_PREEMPT_RT
struct rw_semaphore { struct rw_semaphore {
signed long count; signed long count;
spinlock_t wait_lock; spinlock_t wait_lock;
...@@ -301,6 +303,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) ...@@ -301,6 +303,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
{ {
return (sem->count != 0); return (sem->count != 0);
} }
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_X86_RWSEM_H */ #endif /* _ASM_X86_RWSEM_H */
...@@ -12,11 +12,85 @@ ...@@ -12,11 +12,85 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock_types.h> #include <linux/spinlock_types.h>
#include <linux/rt_lock.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
, .dep_map = { .name = #lockname }
#else
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
#ifdef CONFIG_PREEMPT_RT
#include <linux/rtmutex.h>
struct mutex {
struct rt_mutex lock;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
#define __MUTEX_INITIALIZER(mutexname) \
{ \
.lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
__DEP_MAP_MUTEX_INITIALIZER(mutexname) \
}
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
extern void
__mutex_init(struct mutex *lock, char *name, struct lock_class_key *key);
extern void __lockfunc _mutex_lock(struct mutex *lock);
extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
extern int __lockfunc
_mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
extern int __lockfunc
_mutex_lock_killable_nested(struct mutex *lock, int subclass);
extern int __lockfunc _mutex_trylock(struct mutex *lock);
extern void __lockfunc _mutex_unlock(struct mutex *lock);
#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
#define mutex_lock(l) _mutex_lock(l)
#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
#define mutex_lock_killable(l) _mutex_lock_killable(l)
#define mutex_trylock(l) _mutex_trylock(l)
#define mutex_unlock(l) _mutex_unlock(l)
#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
# define mutex_lock_interruptible_nested(l, s) \
_mutex_lock_interruptible_nested(l, s)
# define mutex_lock_killable_nested(l, s) \
_mutex_lock_killable_nested(l, s)
#else
# define mutex_lock_nested(l, s) _mutex_lock(l)
# define mutex_lock_interruptible_nested(l, s) \
_mutex_lock_interruptible(l)
# define mutex_lock_killable_nested(l, s) \
_mutex_lock_killable(l)
#endif
# define mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
\
__mutex_init((mutex), #mutex, &__key); \
} while (0)
#else /* PREEMPT_RT */
/* /*
* Simple, straightforward mutexes with strict semantics: * Simple, straightforward mutexes with strict semantics:
* *
...@@ -87,13 +161,6 @@ do { \ ...@@ -87,13 +161,6 @@ do { \
# define mutex_destroy(mutex) do { } while (0) # define mutex_destroy(mutex) do { } while (0)
#endif #endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
, .dep_map = { .name = #lockname }
#else
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
#define __MUTEX_INITIALIZER(lockname) \ #define __MUTEX_INITIALIZER(lockname) \
{ .count = ATOMIC_INIT(1) \ { .count = ATOMIC_INIT(1) \
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
...@@ -150,6 +217,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); ...@@ -150,6 +217,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
*/ */
extern int mutex_trylock(struct mutex *lock); extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock);
#endif /* !PREEMPT_RT */
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#endif #endif
...@@ -75,14 +75,16 @@ ...@@ -75,14 +75,16 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock_types.h>
struct spinlock;
struct atomic_spinlock;
struct plist_head { struct plist_head {
struct list_head prio_list; struct list_head prio_list;
struct list_head node_list; struct list_head node_list;
#ifdef CONFIG_DEBUG_PI_LIST #ifdef CONFIG_DEBUG_PI_LIST
atomic_spinlock_t *alock; struct atomic_spinlock *alock;
spinlock_t *slock; struct spinlock *slock;
#endif #endif
}; };
...@@ -142,7 +144,7 @@ struct plist_node { ...@@ -142,7 +144,7 @@ struct plist_node {
* @lock: list spinlock, remembered for debugging * @lock: list spinlock, remembered for debugging
*/ */
static inline void static inline void
plist_head_init(struct plist_head *head, spinlock_t *lock) plist_head_init(struct plist_head *head, struct spinlock *lock)
{ {
INIT_LIST_HEAD(&head->prio_list); INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list); INIT_LIST_HEAD(&head->node_list);
...@@ -158,7 +160,7 @@ plist_head_init(struct plist_head *head, spinlock_t *lock) ...@@ -158,7 +160,7 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
* @lock: list atomic_spinlock, remembered for debugging * @lock: list atomic_spinlock, remembered for debugging
*/ */
static inline void static inline void
plist_head_init_atomic(struct plist_head *head, atomic_spinlock_t *lock) plist_head_init_atomic(struct plist_head *head, struct atomic_spinlock *lock)
{ {
INIT_LIST_HEAD(&head->prio_list); INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list); INIT_LIST_HEAD(&head->node_list);
......
#ifndef __LINUX_RT_LOCK_H
#define __LINUX_RT_LOCK_H
/*
* Real-Time Preemption Support
*
* started by Ingo Molnar:
*
* Copyright (C) 2004, 2005 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* This file contains the main data structure definitions.
*/
#include <linux/rtmutex.h>
#include <asm/atomic.h>
#include <linux/spinlock_types.h>
#ifdef CONFIG_PREEMPT_RT
static inline int preempt_rt(void) { return 1; }
/*
* spinlocks - an RT mutex plus lock-break field:
*/
typedef struct spinlock {
struct rt_mutex lock;
unsigned int break_lock;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} spinlock_t;
#ifdef CONFIG_DEBUG_RT_MUTEXES
# define __RT_SPIN_INITIALIZER(name) \
{ \
.wait_lock = __ATOMIC_SPIN_LOCK_UNLOCKED(name), \
.save_state = 1, \
.file = __FILE__, \
.line = __LINE__ , \
}
#else
# define __RT_SPIN_INITIALIZER(name) \
{ .wait_lock = __ATOMIC_SPIN_LOCK_UNLOCKED(name) }
#endif
#define __SPIN_LOCK_UNLOCKED(name) \
{ .lock = __RT_SPIN_INITIALIZER(name), \
SPIN_DEP_MAP_INIT(name) }
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(spin_old_style)
#define __DEFINE_SPINLOCK(name) \
spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
#define DEFINE_SPINLOCK(name) \
spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
extern void
__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
#define spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__rt_spin_lock_init(lock, #lock, &__key); \
} while (0)
extern void __lockfunc rt_spin_lock(spinlock_t *lock);
extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
extern int __lockfunc
rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
/*
* lockdep-less calls, for derived types like rwlock:
* (for trylock they can use rt_mutex_trylock() directly.
*/
extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
/*
* rwlocks - an RW semaphore plus lock-break field:
*/
typedef struct {
struct rt_mutex lock;
int read_depth;
unsigned int break_lock;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} rwlock_t;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
#else
# define RW_DEP_MAP_INIT(lockname)
#endif
#define __RW_LOCK_UNLOCKED(name) \
{ .lock = __RT_SPIN_INITIALIZER(name), \
RW_DEP_MAP_INIT(name) }
#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(rw_old_style)
#define DEFINE_RWLOCK(name) \
rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock,
unsigned long *flags);
extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
extern void
__rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
#define rwlock_init(rwl) \
do { \
static struct lock_class_key __key; \
\
__rt_rwlock_init(rwl, #rwl, &__key); \
} while (0)
/*
* RW-semaphores are a spinlock plus a reader-depth count.
*
* Note that the semantics are different from the usual
* Linux rw-sems, in PREEMPT_RT mode we do not allow
* multiple readers to hold the lock at once, we only allow
* a read-lock owner to read-lock recursively. This is
* better for latency, makes the implementation inherently
* fair and makes it simpler as well:
*/
struct rw_semaphore {
struct rt_mutex lock;
int read_depth;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
#define __RWSEM_INITIALIZER(name) \
{ .lock = __RT_MUTEX_INITIALIZER(name.lock), \
RW_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(lockname) \
struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
extern void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
struct lock_class_key *key);
# define rt_init_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
__rt_rwsem_init((sem), #sem, &__key); \
} while (0)
extern void rt_down_write(struct rw_semaphore *rwsem);
extern void
rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
extern void
rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
extern void rt_down_read(struct rw_semaphore *rwsem);
extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
extern void rt_up_read(struct rw_semaphore *rwsem);
extern void rt_up_write(struct rw_semaphore *rwsem);
extern void rt_downgrade_write(struct rw_semaphore *rwsem);
/*
* Semaphores - a spinlock plus the semaphore count:
*/
struct semaphore {
atomic_t count;
struct rt_mutex lock;
};
#define DEFINE_SEMAPHORE(name) \
struct semaphore name = \
{ .count = { 1 }, .lock = __RT_MUTEX_INITIALIZER(name.lock) }
extern void
__sema_init(struct semaphore *sem, int val, char *name, char *file, int line);
#define rt_sema_init(sem, val) \
__sema_init(sem, val, #sem, __FILE__, __LINE__)
/*
* No locked initialization for RT semaphores
*/
extern void rt_down(struct semaphore *sem);
extern int rt_down_interruptible(struct semaphore *sem);
extern int rt_down_timeout(struct semaphore *sem, long jiffies);
extern int rt_down_trylock(struct semaphore *sem);
extern void rt_up(struct semaphore *sem);
#define rt_sem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
#define rt_sema_count(s) atomic_read(&(s)->count)
#else
static inline int preempt_rt(void) { return 0; }
#endif /* CONFIG_PREEMPT_RT */
#endif
...@@ -5,6 +5,60 @@ ...@@ -5,6 +5,60 @@
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
#ifdef CONFIG_PREEMPT_RT
#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
#define write_trylock_irqsave(lock, flags) \
__cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
#define write_lock(lock) rt_write_lock(lock)
#define read_lock(lock) rt_read_lock(lock)
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = rt_read_lock_irqsave(lock); \
} while (0)
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = rt_write_lock_irqsave(lock); \
} while (0)
#define read_lock_irq(lock) rt_read_lock(lock)
#define read_lock_bh(lock) rt_read_lock(lock)
#define write_lock_irq(lock) rt_write_lock(lock)
#define write_lock_bh(lock) rt_write_lock(lock)
#define read_unlock(lock) rt_read_unlock(lock)
#define write_unlock(lock) rt_write_unlock(lock)
#define read_unlock_irq(lock) rt_read_unlock(lock)
#define write_unlock_irq(lock) rt_write_unlock(lock)
#define read_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
(void) flags; \
rt_read_unlock(lock); \
} while (0)
#define read_unlock_bh(lock) rt_read_unlock(lock)
#define write_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
(void) flags; \
rt_write_unlock(lock); \
} while (0)
#define write_unlock_bh(lock) rt_write_unlock(lock)
#else
/* /*
* rwlock related methods * rwlock related methods
* *
...@@ -147,5 +201,6 @@ do { \ ...@@ -147,5 +201,6 @@ do { \
write_trylock(lock) ? \ write_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \ 1 : ({ local_irq_restore(flags); 0; }); \
}) })
#endif
#endif /* __LINUX_RWLOCK_H */ #endif /* __LINUX_RWLOCK_H */
...@@ -77,6 +77,7 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem) ...@@ -77,6 +77,7 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
return (sem->activity != 0); return (sem->activity != 0);
} }
#ifndef CONFIG_PREEMPT_RT
/* /*
* Non preempt-rt implementation of rw_semaphore. Same as above, but * Non preempt-rt implementation of rw_semaphore. Same as above, but
* restricted vs. ownership. i.e. ownerless locked state and non owner * restricted vs. ownership. i.e. ownerless locked state and non owner
...@@ -129,6 +130,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) ...@@ -129,6 +130,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
{ {
return (sem->activity != 0); return (sem->activity != 0);
} }
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_SPINLOCK_H */ #endif /* _LINUX_RWSEM_SPINLOCK_H */
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/rt_lock.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/atomic.h> #include <asm/atomic.h>
...@@ -89,6 +90,59 @@ extern void anon_up_read_non_owner(struct rw_anon_semaphore *sem); ...@@ -89,6 +90,59 @@ extern void anon_up_read_non_owner(struct rw_anon_semaphore *sem);
# define anon_up_read_non_owner(sem) anon_up_read(sem) # define anon_up_read_non_owner(sem) anon_up_read(sem)
#endif #endif
#ifdef CONFIG_PREEMPT_RT
#include <linux/rt_lock.h>
#define init_rwsem(sem) rt_init_rwsem(sem)
#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
static inline void down_read(struct rw_semaphore *sem)
{
rt_down_read(sem);
}
static inline int down_read_trylock(struct rw_semaphore *sem)
{
return rt_down_read_trylock(sem);
}
static inline void down_write(struct rw_semaphore *sem)
{
rt_down_write(sem);
}
static inline int down_write_trylock(struct rw_semaphore *sem)
{
return rt_down_write_trylock(sem);
}
static inline void up_read(struct rw_semaphore *sem)
{
rt_up_read(sem);
}
static inline void up_write(struct rw_semaphore *sem)
{
rt_up_write(sem);
}
static inline void downgrade_write(struct rw_semaphore *sem)
{
rt_downgrade_write(sem);
}
static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
{
return rt_down_read_nested(sem, subclass);
}
static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
{
rt_down_write_nested(sem, subclass);
}
#else
/* /*
* Non preempt-rt implementations * Non preempt-rt implementations
*/ */
...@@ -136,5 +190,6 @@ static inline void down_write_nested(struct rw_semaphore *sem, int subclass) ...@@ -136,5 +190,6 @@ static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
{ {
anon_down_write_nested((struct rw_anon_semaphore *)sem, subclass); anon_down_write_nested((struct rw_anon_semaphore *)sem, subclass);
} }
#endif
#endif /* _LINUX_RWSEM_H */ #endif /* _LINUX_RWSEM_H */
...@@ -58,6 +58,46 @@ extern int __must_check anon_down_trylock(struct anon_semaphore *sem); ...@@ -58,6 +58,46 @@ extern int __must_check anon_down_trylock(struct anon_semaphore *sem);
extern int __must_check anon_down_timeout(struct anon_semaphore *sem, long jiffies); extern int __must_check anon_down_timeout(struct anon_semaphore *sem, long jiffies);
extern void anon_up(struct anon_semaphore *sem); extern void anon_up(struct anon_semaphore *sem);
#ifdef CONFIG_PREEMPT_RT
static inline void sema_init(struct semaphore *sem, int val)
{
rt_sema_init(sem, val);
}
static inline void semaphore_init(struct semaphore *sem)
{
sema_init(sem, 1);
}
static inline void down(struct semaphore *sem)
{
rt_down(sem);
}
static inline int __must_check down_interruptible(struct semaphore *sem)
{
return rt_down_interruptible(sem);
}
static inline int __must_check down_trylock(struct semaphore *sem)
{
return rt_down_trylock(sem);
}
static inline int __must_check
down_timeout(struct semaphore *sem, long jiffies)
{
return rt_down_timeout(sem, jiffies);
}
static inline void up(struct semaphore *sem)
{
rt_up(sem);
}
#else
/* /*
* Non preempt-rt maps semaphores to anon semaphores * Non preempt-rt maps semaphores to anon semaphores
*/ */
...@@ -125,5 +165,6 @@ static inline void up(struct semaphore *sem) ...@@ -125,5 +165,6 @@ static inline void up(struct semaphore *sem)
{ {
anon_up((struct anon_semaphore *)sem); anon_up((struct anon_semaphore *)sem);
} }
#endif
#endif /* __LINUX_SEMAPHORE_H */ #endif /* __LINUX_SEMAPHORE_H */
...@@ -57,23 +57,6 @@ ...@@ -57,23 +57,6 @@
#include <asm/system.h> #include <asm/system.h>
/*
* Must define these before including other files, inline functions need them
*/
#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
#define LOCK_SECTION_START(extra) \
".subsection 1\n\t" \
extra \
".ifndef " LOCK_SECTION_NAME "\n\t" \
LOCK_SECTION_NAME ":\n\t" \
".endif\n"
#define LOCK_SECTION_END \
".previous\n\t"
#define __lockfunc __attribute__((section(".spinlock.text")))
/* /*
* Pull the raw_spinlock_t and raw_rwlock_t definitions: * Pull the raw_spinlock_t and raw_rwlock_t definitions:
*/ */
...@@ -283,6 +266,98 @@ _atomic_dec_and_atomic_lock(atomic_t *atomic, atomic_spinlock_t *lock); ...@@ -283,6 +266,98 @@ _atomic_dec_and_atomic_lock(atomic_t *atomic, atomic_spinlock_t *lock);
#define atomic_dec_and_atomic_lock(atomic, lock) \ #define atomic_dec_and_atomic_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_atomic_lock(atomic, lock)) __cond_lock(lock, _atomic_dec_and_atomic_lock(atomic, lock))
#ifdef CONFIG_PREEMPT_RT
#include <linux/rt_lock.h>
#define spin_lock(lock) rt_spin_lock(lock)
#define spin_lock_bh(lock) rt_spin_lock(lock)
#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
#ifdef CONFIG_LOCKDEP
# define spin_lock_nested(lock, subclass) \
rt_spin_lock_nested(lock, subclass)
# define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = 0; \
rt_spin_lock_nested(lock, subclass); \
} while (0)
#else
# define spin_lock_nested(lock, subclass) \
rt_spin_lock(lock)
# define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = 0; \
rt_spin_lock(lock); \
} while (0)
#endif
#define spin_lock_irq(lock) rt_spin_lock(lock)
#define spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = 0; \
rt_spin_lock(lock); \
} while (0)
/* FIXME: we need rt_spin_lock_nested */
#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
#define spin_unlock(lock) rt_spin_unlock(lock)
#define spin_unlock_bh(lock) rt_spin_unlock(lock)
#define spin_unlock_irq(lock) rt_spin_unlock(lock)
#define spin_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
(void) flags; \
rt_spin_unlock(lock); \
} while (0)
#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock(lock))
#define spin_trylock_irq(lock) __cond_lock(lock, rt_spin_trylock(lock))
#define spin_trylock_irqsave(lock, flags) \
({ \
typecheck(unsigned long, flags); \
flags = 0; \
__cond_lock(lock, rt_spin_trylock(lock)); \
})
#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
# define spin_is_contended(lock) ((lock)->break_lock)
#else
# define spin_is_contended(lock) (((void)(lock), 0))
#endif
static inline int spin_can_locked(spinlock_t *lock)
{
return !rt_mutex_is_locked(&lock->lock);
}
static inline int spin_is_locked(spinlock_t *lock)
{
return rt_mutex_is_locked(&lock->lock);
}
static inline void assert_spin_locked(spinlock_t *lock)
{
BUG_ON(!spin_is_locked(lock));
}
#define atomic_dec_and_lock(atomic, lock) \
atomic_dec_and_spin_lock(atomic, lock)
#else
/* /*
* Map spin* to atomic_spin* for PREEMPT_RT=n * Map spin* to atomic_spin* for PREEMPT_RT=n
*/ */
...@@ -420,6 +495,8 @@ do { \ ...@@ -420,6 +495,8 @@ do { \
atomic_dec_and_atomic_lock(atomic, (atomic_spinlock_t *)lock); \ atomic_dec_and_atomic_lock(atomic, (atomic_spinlock_t *)lock); \
}) })
#endif /* !PREEMPT_RT */
/* /*
* Get the rwlock part * Get the rwlock part
*/ */
......
...@@ -58,7 +58,7 @@ void __lockfunc ...@@ -58,7 +58,7 @@ void __lockfunc
_atomic_spin_unlock_irqrestore(atomic_spinlock_t *lock, unsigned long flags) _atomic_spin_unlock_irqrestore(atomic_spinlock_t *lock, unsigned long flags)
__releases(lock); __releases(lock);
#ifndef CONFIG_PREEMPT_RT
void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
...@@ -85,5 +85,6 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) ...@@ -85,5 +85,6 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock); __releases(lock);
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock); __releases(lock);
#endif
#endif /* __LINUX_SPINLOCK_API_SMP_H */ #endif /* __LINUX_SPINLOCK_API_SMP_H */
...@@ -9,6 +9,23 @@ ...@@ -9,6 +9,23 @@
* Released under the General Public License (GPL). * Released under the General Public License (GPL).
*/ */
/*
* Must define these before including other files, inline functions need them
*/
#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
#define LOCK_SECTION_START(extra) \
".subsection 1\n\t" \
extra \
".ifndef " LOCK_SECTION_NAME "\n\t" \
LOCK_SECTION_NAME ":\n\t" \
".endif\n"
#define LOCK_SECTION_END \
".previous\n\t"
#define __lockfunc __attribute__((section(".spinlock.text")))
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
# include <asm/spinlock_types.h> # include <asm/spinlock_types.h>
#else #else
...@@ -17,7 +34,7 @@ ...@@ -17,7 +34,7 @@
#include <linux/lockdep.h> #include <linux/lockdep.h>
typedef struct { typedef struct atomic_spinlock {
raw_spinlock_t raw_lock; raw_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK #ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock; unsigned int break_lock;
...@@ -64,11 +81,12 @@ typedef struct { ...@@ -64,11 +81,12 @@ typedef struct {
#define DEFINE_ATOMIC_SPINLOCK(x) \ #define DEFINE_ATOMIC_SPINLOCK(x) \
atomic_spinlock_t x = __ATOMIC_SPIN_LOCK_UNLOCKED(x) atomic_spinlock_t x = __ATOMIC_SPIN_LOCK_UNLOCKED(x)
#ifndef CONFIG_PREEMPT_RT
/* /*
* For PREEMPT_RT=n we use the same data structures and the spinlock * For PREEMPT_RT=n we use the same data structures and the spinlock
* functions are mapped to the atomic_spinlock functions * functions are mapped to the atomic_spinlock functions
*/ */
typedef struct { typedef struct spinlock {
raw_spinlock_t raw_lock; raw_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK #ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock; unsigned int break_lock;
...@@ -109,4 +127,6 @@ typedef struct { ...@@ -109,4 +127,6 @@ typedef struct {
#include <linux/rwlock_types.h> #include <linux/rwlock_types.h>
#endif
#endif /* __LINUX_SPINLOCK_TYPES_H */ #endif /* __LINUX_SPINLOCK_TYPES_H */
...@@ -7,7 +7,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ ...@@ -7,7 +7,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
sysctl.o capability.o ptrace.o timer.o user.o \ sysctl.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o \ signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o extable.o params.o posix-timers.o \ rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
async.o async.o
...@@ -28,7 +28,10 @@ obj-$(CONFIG_PROFILING) += profile.o ...@@ -28,7 +28,10 @@ obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += time/ obj-y += time/
ifneq ($(CONFIG_PREEMPT_RT),y)
obj-y += mutex.o
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
endif
obj-$(CONFIG_LOCKDEP) += lockdep.o obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y) ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
...@@ -40,6 +43,7 @@ endif ...@@ -40,6 +43,7 @@ endif
obj-$(CONFIG_RT_MUTEXES) += rtmutex.o obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_PREEMPT_RT) += rt.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
ifneq ($(CONFIG_SMP),y) ifneq ($(CONFIG_SMP),y)
......
...@@ -82,7 +82,11 @@ int max_threads; /* tunable limit on nr_threads */ ...@@ -82,7 +82,11 @@ int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0; DEFINE_PER_CPU(unsigned long, process_counts) = 0;
#ifdef CONFIG_PREEMPT_RT
DEFINE_RWLOCK(tasklist_lock); /* outer */
#else
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
#endif
int nr_processes(void) int nr_processes(void)
{ {
......
This diff is collapsed.
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
* frame contact the architecture maintainers. * frame contact the architecture maintainers.
*/ */
#ifndef CONFIG_PREEMPT_RT
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -221,3 +223,4 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock) ...@@ -221,3 +223,4 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock)
} }
EXPORT_SYMBOL(_write_unlock_bh); EXPORT_SYMBOL(_write_unlock_bh);
#endif
...@@ -31,6 +31,7 @@ void __atomic_spin_lock_init(atomic_spinlock_t *lock, const char *name, ...@@ -31,6 +31,7 @@ void __atomic_spin_lock_init(atomic_spinlock_t *lock, const char *name,
EXPORT_SYMBOL(__atomic_spin_lock_init); EXPORT_SYMBOL(__atomic_spin_lock_init);
#ifndef CONFIG_PREEMPT_RT
void __rwlock_init(rwlock_t *lock, const char *name, void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key) struct lock_class_key *key)
{ {
...@@ -46,8 +47,8 @@ void __rwlock_init(rwlock_t *lock, const char *name, ...@@ -46,8 +47,8 @@ void __rwlock_init(rwlock_t *lock, const char *name,
lock->owner = SPINLOCK_OWNER_INIT; lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1; lock->owner_cpu = -1;
} }
EXPORT_SYMBOL(__rwlock_init); EXPORT_SYMBOL(__rwlock_init);
#endif
static void spin_bug(atomic_spinlock_t *lock, const char *msg) static void spin_bug(atomic_spinlock_t *lock, const char *msg)
{ {
...@@ -154,6 +155,8 @@ void _raw_spin_unlock(atomic_spinlock_t *lock) ...@@ -154,6 +155,8 @@ void _raw_spin_unlock(atomic_spinlock_t *lock)
__raw_spin_unlock(&lock->raw_lock); __raw_spin_unlock(&lock->raw_lock);
} }
#ifndef CONFIG_PREEMPT_RT
static void rwlock_bug(rwlock_t *lock, const char *msg) static void rwlock_bug(rwlock_t *lock, const char *msg)
{ {
if (!debug_locks_off()) if (!debug_locks_off())
...@@ -295,3 +298,4 @@ void _raw_write_unlock(rwlock_t *lock) ...@@ -295,3 +298,4 @@ void _raw_write_unlock(rwlock_t *lock)
debug_write_unlock(lock); debug_write_unlock(lock);
__raw_write_unlock(&lock->raw_lock); __raw_write_unlock(&lock->raw_lock);
} }
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment