Commit 34ca9f9b authored by Thomas Gleixner's avatar Thomas Gleixner

spinlocks: Create atomic_spinlock and convert rq->lock

atomic_spinlock_t will be used to annotate locks which are not
converted to sleeping spinlocks on preempt-rt.

rq->lock must be converted right away as rq->lock is handled by a few
raw lock operations.

Fix also the plist implementation so debugging can handle both
spinlocks and atomic_spinlocks.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent f7c0160a
......@@ -81,7 +81,8 @@ struct plist_head {
struct list_head prio_list;
struct list_head node_list;
#ifdef CONFIG_DEBUG_PI_LIST
spinlock_t *lock;
atomic_spinlock_t *alock;
spinlock_t *slock;
#endif
};
......@@ -91,9 +92,11 @@ struct plist_node {
};
#ifdef CONFIG_DEBUG_PI_LIST
# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
# define PLIST_HEAD_LOCK_INIT(_lock) .slock = _lock
# define PLIST_HEAD_LOCK_INIT_ATOMIC(_lock) .alock = _lock
#else
# define PLIST_HEAD_LOCK_INIT(_lock)
# define PLIST_HEAD_LOCK_INIT_ATOMIC(_lock)
#endif
#define _PLIST_HEAD_INIT(head) \
......@@ -111,6 +114,17 @@ struct plist_node {
PLIST_HEAD_LOCK_INIT(&(_lock)) \
}
/**
* PLIST_HEAD_INIT_ATOMIC - static struct plist_head initializer
* @head: struct plist_head variable name
* @_lock: lock to initialize for this list
*/
#define PLIST_HEAD_INIT_ATOMIC(head, _lock) \
{ \
_PLIST_HEAD_INIT(head), \
PLIST_HEAD_LOCK_INIT_ATOMIC(&(_lock)) \
}
/**
* PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
......@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST
head->lock = lock;
head->slock = lock;
head->alock = NULL;
#endif
}
/**
* plist_head_init_atomic - dynamic struct plist_head initializer
* @head: &struct plist_head pointer
* @lock: list atomic_spinlock, remembered for debugging
*/
static inline void
plist_head_init_atomic(struct plist_head *head, atomic_spinlock_t *lock)
{
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST
head->alock = lock;
head->slock = NULL;
#endif
}
......
......@@ -91,30 +91,32 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __spin_lock_init(spinlock_t *lock, const char *name,
extern void __atomic_spin_lock_init(atomic_spinlock_t *lock,
const char *name,
struct lock_class_key *key);
# define spin_lock_init(lock) \
# define atomic_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__spin_lock_init((lock), #lock, &__key); \
__atomic_spin_lock_init((lock), #lock, &__key); \
} while (0)
#else
# define spin_lock_init(lock) \
do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
# define atomic_spin_lock_init(lock) \
do { *(lock) = __ATOMIC_SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
#define atomic_spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
#define spin_is_contended(lock) ((lock)->break_lock)
#define atomic_spin_is_contended(lock) ((lock)->break_lock)
#else
#ifdef __raw_spin_is_contended
#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
#define atomic_spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
#else
#define spin_is_contended(lock) (((void)(lock), 0))
#define atomic_spin_is_contended(lock) (((void)(lock), 0))
#endif /*__raw_spin_is_contended*/
#endif
......@@ -127,7 +129,7 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
* spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
*/
#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
#define atomic_spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
......@@ -139,10 +141,10 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
extern void _raw_spin_lock(spinlock_t *lock);
extern void _raw_spin_lock(atomic_spinlock_t *lock);
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
extern int _raw_spin_trylock(spinlock_t *lock);
extern void _raw_spin_unlock(spinlock_t *lock);
extern int _raw_spin_trylock(atomic_spinlock_t *lock);
extern void _raw_spin_unlock(atomic_spinlock_t *lock);
#else
# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
# define _raw_spin_lock_flags(lock, flags) \
......@@ -152,75 +154,79 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif
/*
* Define the various spin_lock and methods. Note we define these
* Define the various spin_lock methods. Note we define these
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
* various methods are defined as nops in the case they are not
* required.
*/
#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
#define atomic_spin_trylock(lock) __cond_lock(lock, _atomic_spin_trylock(lock))
#define spin_lock(lock) _spin_lock(lock)
#define atomic_spin_lock(lock) _atomic_spin_lock(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
# define spin_lock_nest_lock(lock, nest_lock) \
# define atomic_spin_lock_nested(lock, subclass) \
_atomic_spin_lock_nested(lock, subclass)
# define atomic_spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
_atomic_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);\
} while (0)
#else
# define spin_lock_nested(lock, subclass) _spin_lock(lock)
# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
# define atomic_spin_lock_nested(lock, subclass) _atomic_spin_lock(lock)
# define atomic_spin_lock_nest_lock(lock, nest_lock) _atomic_spin_lock(lock)
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define spin_lock_irqsave(lock, flags) \
#define atomic_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _spin_lock_irqsave(lock); \
flags = _atomic_spin_lock_irqsave(lock);\
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define spin_lock_irqsave_nested(lock, flags, subclass) \
#define atomic_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = _spin_lock_irqsave_nested(lock, subclass); \
flags = _atomic_spin_lock_irqsave_nested(lock, subclass);\
} while (0)
#else
#define spin_lock_irqsave_nested(lock, flags, subclass) \
#define atomic_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = _spin_lock_irqsave(lock); \
flags = _atomic_spin_lock_irqsave(lock); \
} while (0)
#endif
#else
#define spin_lock_irqsave(lock, flags) \
#define atomic_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_spin_lock_irqsave(lock, flags); \
_atomic_spin_lock_irqsave(lock, flags); \
} while (0)
#define spin_lock_irqsave_nested(lock, flags, subclass) \
spin_lock_irqsave(lock, flags)
#define atomic_spin_lock_irqsave_nested(lock, flags, subclass) \
atomic_spin_lock_irqsave(lock, flags)
#endif
#define spin_lock_irq(lock) _spin_lock_irq(lock)
#define spin_lock_bh(lock) _spin_lock_bh(lock)
#define atomic_spin_lock_irq(lock) _atomic_spin_lock_irq(lock)
#define atomic_spin_lock_bh(lock) _atomic_spin_lock_bh(lock)
/*
* We inline the unlock functions in the nondebug case:
*/
#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
!defined(CONFIG_SMP)
# define spin_unlock(lock) _spin_unlock(lock)
# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
# define atomic_spin_unlock(lock) _atomic_spin_unlock(lock)
# define atomic_spin_unlock_irq(lock) _atomic_spin_unlock_irq(lock)
#else
# define spin_unlock(lock) \
# define atomic_spin_unlock(lock) \
do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0)
# define spin_unlock_irq(lock) \
# define atomic_spin_unlock_irq(lock) \
do { \
__raw_spin_unlock(&(lock)->raw_lock); \
__release(lock); \
......@@ -228,29 +234,36 @@ do { \
} while (0)
#endif
#define spin_unlock_irqrestore(lock, flags) \
#define atomic_spin_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_spin_unlock_irqrestore(lock, flags); \
_atomic_spin_unlock_irqrestore(lock, flags);\
} while (0)
#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
#define atomic_spin_unlock_bh(lock) _atomic_spin_unlock_bh(lock)
#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
#define atomic_spin_trylock_bh(lock) \
__cond_lock(lock, _atomic_spin_trylock_bh(lock))
#define spin_trylock_irq(lock) \
#define atomic_spin_trylock_irq(lock) \
({ \
local_irq_disable(); \
spin_trylock(lock) ? \
atomic_spin_trylock(lock) ? \
1 : ({ local_irq_enable(); 0; }); \
})
#define spin_trylock_irqsave(lock, flags) \
#define atomic_spin_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
spin_trylock(lock) ? \
atomic_spin_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
/**
* spin_can_lock - would spin_trylock() succeed?
* @lock: the spinlock in question.
*/
#define atomic_spin_can_lock(lock) (!atomic_spin_is_locked(lock))
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
......@@ -264,16 +277,152 @@ do { \
* Decrements @atomic by 1. If the result is 0, returns true and locks
* @lock. Returns false for all other cases.
*/
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
extern int
_atomic_dec_and_atomic_lock(atomic_t *atomic, atomic_spinlock_t *lock);
/**
* spin_can_lock - would spin_trylock() succeed?
* @lock: the spinlock in question.
#define atomic_dec_and_atomic_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_atomic_lock(atomic, lock))
/*
* Map spin* to atomic_spin* for PREEMPT_RT=n
*/
#define spin_can_lock(lock) (!spin_is_locked(lock))
static inline void spin_lockcheck(spinlock_t *lock) { }
#define spin_lock_init(lock) \
do { \
spin_lockcheck(lock); \
atomic_spin_lock_init((atomic_spinlock_t *)lock); \
} while (0)
#define spin_lock(lock) \
do { \
spin_lockcheck(lock); \
atomic_spin_lock((atomic_spinlock_t *)lock); \
} while (0)
#define spin_lock_bh(lock) \
do { \
spin_lockcheck(lock); \
atomic_spin_lock_bh((atomic_spinlock_t *)lock); \
} while (0)
#define spin_trylock(lock) \
({ \
spin_lockcheck(lock); \
atomic_spin_trylock((atomic_spinlock_t *)lock); \
})
#define spin_lock_nested(lock, subclass) \
do { \
spin_lockcheck(lock); \
atomic_spin_lock_nested((atomic_spinlock_t *)lock, subclass); \
} while (0)
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
spin_lockcheck(lock); \
atomic_spin_lock_nest_lock((atomic_spinlock_t *)lock, nest_lock); \
} while (0)
#define spin_lock_irq(lock) \
do { \
spin_lockcheck(lock); \
atomic_spin_lock_irq((atomic_spinlock_t *)lock); \
} while (0)
#define spin_lock_irqsave(lock, flags) \
do { \
spin_lockcheck(lock); \
atomic_spin_lock_irqsave((atomic_spinlock_t *)lock, flags); \
} while (0)
#define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
spin_lockcheck(lock); \
atomic_spin_lock_irqsave_nested((atomic_spinlock_t *)lock, flags, subclass); \
} while (0)
#define spin_unlock(lock) \
do { \
spin_lockcheck(lock); \
atomic_spin_unlock((atomic_spinlock_t *)lock); \
} while (0)
#define spin_unlock_bh(lock) \
do { \
spin_lockcheck(lock); \
atomic_spin_unlock_bh((atomic_spinlock_t *)lock); \
} while (0)
#define spin_unlock_irq(lock) \
do { \
spin_lockcheck(lock); \
atomic_spin_unlock_irq((atomic_spinlock_t *)lock); \
} while (0)
#define spin_unlock_irqrestore(lock, flags) \
do { \
spin_lockcheck(lock); \
atomic_spin_unlock_irqrestore((atomic_spinlock_t *)lock, flags); \
} while (0)
#define spin_trylock_bh(lock) \
({ \
spin_lockcheck(lock); \
atomic_spin_trylock_bh((atomic_spinlock_t *)lock); \
})
#define spin_trylock_irq(lock) \
({ \
spin_lockcheck(lock); \
atomic_spin_trylock_irq((atomic_spinlock_t *)lock); \
})
#define spin_trylock_irqsave(lock, flags) \
({ \
spin_lockcheck(lock); \
atomic_spin_trylock_irqsave((atomic_spinlock_t *)lock, flags); \
})
#define spin_unlock_wait(lock) \
do { \
spin_lockcheck(lock); \
atomic_spin_unlock_wait((atomic_spinlock_t *)lock); \
} while (0)
#define spin_is_locked(lock) \
({ \
spin_lockcheck(lock); \
atomic_spin_is_locked((atomic_spinlock_t *)lock); \
})
#define spin_is_contended(lock) \
({ \
spin_lockcheck(lock); \
atomic_spin_is_contended((atomic_spinlock_t *)lock); \
})
#define spin_can_lock(lock) \
({ \
spin_lockcheck(lock); \
atomic_spin_can_lock((atomic_spinlock_t *)lock); \
})
#define assert_spin_locked(lock) \
do { \
spin_lockcheck(lock); \
assert_atomic_spin_locked((atomic_spinlock_t *)lock); \
} while (0)
#define atomic_dec_and_lock(atomic, lock) \
({ \
spin_lockcheck(lock); \
atomic_dec_and_atomic_lock(atomic, (atomic_spinlock_t *)lock); \
})
/*
* Get the rwlock part
*/
#include <linux/rwlock.h>
#endif /* __LINUX_SPINLOCK_H */
......@@ -17,44 +17,70 @@
int in_lock_functions(unsigned long addr);
#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
#define assert_atomic_spin_locked(x) BUG_ON(!atomic_spin_is_locked(x))
void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
void __lockfunc
_atomic_spin_lock(atomic_spinlock_t *lock) __acquires(lock);
void __lockfunc
_atomic_spin_lock_nested(atomic_spinlock_t *lock, int subclass)
__acquires(lock);
void __lockfunc
_atomic_spin_lock_nest_lock(atomic_spinlock_t *lock, struct lockdep_map *map)
__acquires(lock);
void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
void __lockfunc
_atomic_spin_lock_bh(atomic_spinlock_t *lock) __acquires(lock);
void __lockfunc
_atomic_spin_lock_irq(atomic_spinlock_t *lock) __acquires(lock);
unsigned long __lockfunc
_atomic_spin_lock_irqsave(atomic_spinlock_t *lock) __acquires(lock);
unsigned long __lockfunc
_atomic_spin_lock_irqsave_nested(atomic_spinlock_t *lock, int subclass)
__acquires(lock);
int __lockfunc _atomic_spin_trylock(atomic_spinlock_t *lock);
int __lockfunc _atomic_spin_trylock_bh(atomic_spinlock_t *lock);
void __lockfunc
_atomic_spin_unlock(atomic_spinlock_t *lock) __releases(lock);
void __lockfunc
_atomic_spin_unlock_bh(atomic_spinlock_t *lock) __releases(lock);
void __lockfunc
_atomic_spin_unlock_irq(atomic_spinlock_t *lock) __releases(lock);
void __lockfunc
_atomic_spin_unlock_irqrestore(atomic_spinlock_t *lock, unsigned long flags)
__releases(lock);
void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
__acquires(lock);
unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
__acquires(lock);
unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
__acquires(lock);
unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
__acquires(lock);
int __lockfunc _spin_trylock(spinlock_t *lock);
int __lockfunc _read_trylock(rwlock_t *lock);
int __lockfunc _write_trylock(rwlock_t *lock);
int __lockfunc _spin_trylock_bh(spinlock_t *lock);
void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
__releases(lock);
void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
......
......@@ -16,7 +16,7 @@
#define in_lock_functions(ADDR) 0
#define assert_spin_locked(lock) do { (void)(lock); } while (0)
#define assert_atomic_spin_locked(lock) do { (void)(lock); } while (0)
/*
* In the UP-nondebug case there's no real locking going on, so the
......@@ -48,33 +48,35 @@
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
#define _spin_lock(lock) __LOCK(lock)
#define _spin_lock_nested(lock, subclass) __LOCK(lock)
#define _atomic_spin_lock(lock) __LOCK(lock)
#define _atomic_spin_lock_nested(lock, subclass) \
__LOCK(lock)
#define _read_lock(lock) __LOCK(lock)
#define _write_lock(lock) __LOCK(lock)
#define _spin_lock_bh(lock) __LOCK_BH(lock)
#define _atomic_spin_lock_bh(lock) __LOCK_BH(lock)
#define _read_lock_bh(lock) __LOCK_BH(lock)
#define _write_lock_bh(lock) __LOCK_BH(lock)
#define _spin_lock_irq(lock) __LOCK_IRQ(lock)
#define _atomic_spin_lock_irq(lock) __LOCK_IRQ(lock)
#define _read_lock_irq(lock) __LOCK_IRQ(lock)
#define _write_lock_irq(lock) __LOCK_IRQ(lock)
#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define _atomic_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define _spin_trylock(lock) ({ __LOCK(lock); 1; })
#define _atomic_spin_trylock(lock) ({ __LOCK(lock); 1; })
#define _read_trylock(lock) ({ __LOCK(lock); 1; })
#define _write_trylock(lock) ({ __LOCK(lock); 1; })
#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
#define _spin_unlock(lock) __UNLOCK(lock)
#define _atomic_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
#define _atomic_spin_unlock(lock) __UNLOCK(lock)
#define _read_unlock(lock) __UNLOCK(lock)
#define _write_unlock(lock) __UNLOCK(lock)
#define _spin_unlock_bh(lock) __UNLOCK_BH(lock)
#define _atomic_spin_unlock_bh(lock) __UNLOCK_BH(lock)
#define _write_unlock_bh(lock) __UNLOCK_BH(lock)
#define _read_unlock_bh(lock) __UNLOCK_BH(lock)
#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _atomic_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
#define _atomic_spin_unlock_irqrestore(lock, flags) \
__UNLOCK_IRQRESTORE(lock, flags)
#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
......
......@@ -29,7 +29,7 @@ typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} spinlock_t;
} atomic_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
......@@ -41,6 +41,47 @@ typedef struct {
# define SPIN_DEP_MAP_INIT(lockname)
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
# define __ATOMIC_SPIN_LOCK_UNLOCKED(lockname) \
(atomic_spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
.magic = SPINLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
SPIN_DEP_MAP_INIT(lockname) }
#else
# define __ATOMIC_SPIN_LOCK_UNLOCKED(lockname) \
(atomic_spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
SPIN_DEP_MAP_INIT(lockname) }
#endif
/*
* SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
* deprecated.
*
* Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
* appropriate.
*/
#define DEFINE_ATOMIC_SPINLOCK(x) \
atomic_spinlock_t x = __ATOMIC_SPIN_LOCK_UNLOCKED(x)
/*
* For PREEMPT_RT=n we use the same data structures and the spinlock
* functions are mapped to the atomic_spinlock functions
*/
typedef struct {
raw_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} spinlock_t;
#ifdef CONFIG_DEBUG_SPINLOCK
# define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
......@@ -57,6 +98,7 @@ typedef struct {
/*
* SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
* deprecated.
*
* Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
* appropriate.
*/
......
......@@ -999,7 +999,7 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
#ifdef CONFIG_DEBUG_PI_LIST
q->list.plist.lock = &hb2->lock;
q->list.plist.slock = &hb2->lock;
#endif
}
get_futex_key_refs(key2);
......@@ -1337,7 +1337,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
plist_node_init(&q->list, prio);
#ifdef CONFIG_DEBUG_PI_LIST
q->list.plist.lock = &hb->lock;
q->list.plist.slock = &hb->lock;
#endif
plist_add(&q->list, &hb->chain);
q->task = current;
......
......@@ -564,7 +564,7 @@ static struct root_domain def_root_domain;
*/
struct rq {
/* runqueue lock: */
spinlock_t lock;
atomic_spinlock_t lock;
/*
* nr_running and cpu_load should be in the same cacheline because
......@@ -721,7 +721,7 @@ int runqueue_is_locked(void)
struct rq *rq = cpu_rq(cpu);
int ret;
ret = spin_is_locked(&rq->lock);
ret = atomic_spin_is_locked(&rq->lock);
put_cpu();
return ret;
}
......@@ -922,7 +922,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
spin_unlock_irq(&rq->lock);
atomic_spin_unlock_irq(&rq->lock);
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
......@@ -978,10 +978,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
{
for (;;) {
struct rq *rq = task_rq(p);
spin_lock(&rq->lock);
atomic_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
spin_unlock(&rq->lock);
atomic_spin_unlock(&rq->lock);
}
}
......@@ -998,10 +998,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
for (;;) {
local_irq_save(*flags);
rq = task_rq(p);
spin_lock(&rq->lock);
atomic_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
spin_unlock_irqrestore(&rq->lock, *flags);
atomic_spin_unlock_irqrestore(&rq->lock, *flags);
}
}
......@@ -1010,19 +1010,19 @@ void task_rq_unlock_wait(struct task_struct *p)
struct rq *rq = task_rq(p);
smp_mb(); /* spin-unlock-wait is not a full memory barrier */
spin_unlock_wait(&rq->lock);
atomic_spin_unlock_wait(&rq->lock);
}
static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
spin_unlock(&rq->lock);
atomic_spin_unlock(&rq->lock);
}
static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
__releases(rq->lock)
{
spin_unlock_irqrestore(&rq->lock, *flags);
atomic_spin_unlock_irqrestore(&rq->lock, *flags);
}
/*
......@@ -1035,7 +1035,7 @@ static struct rq *this_rq_lock(void)
local_irq_disable();
rq = this_rq();
spin_lock(&rq->lock);
atomic_spin_lock(&rq->lock);
return rq;
}
......@@ -1082,10 +1082,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
spin_lock(&rq->lock);
atomic_spin_lock(&rq->lock);
update_rq_clock(rq);
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
spin_unlock(&rq->lock);
atomic_spin_unlock(&rq->lock);
return HRTIMER_NORESTART;
}
......@@ -1098,10 +1098,10 @@ static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
spin_lock(&rq->lock);
atomic_spin_lock(&rq->lock);
hrtimer_restart(&rq->hrtick_timer);
rq->hrtick_csd_pending = 0;
spin_unlock(&rq->lock);
atomic_spin_unlock(&rq->lock);
}
/*
......@@ -1208,7 +1208,7 @@ static void resched_task(struct task_struct *p)
{
int cpu;
assert_spin_locked(&task_rq(p)->lock);
assert_atomic_spin_locked(&task_rq(p)->lock);
if (test_tsk_need_resched(p))
return;
......@@ -1230,10 +1230,10 @@ static void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
if (!spin_trylock_irqsave(&rq->lock, flags))
if (!atomic_spin_trylock_irqsave(&rq->lock, flags))
return;
resched_task(cpu_curr(cpu));
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
#ifdef CONFIG_NO_HZ
......@@ -1281,7 +1281,7 @@ void wake_up_idle_cpu(int cpu)
#else /* !CONFIG_SMP */
static void resched_task(struct task_struct *p)
{
assert_spin_locked(&task_rq(p)->lock);
assert_atomic_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
#endif /* CONFIG_SMP */
......@@ -1544,11 +1544,11 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
atomic_spin_lock_irqsave(&rq->lock, flags);
tg->cfs_rq[cpu]->shares = shares;
__set_se_shares(tg->se[cpu], shares);
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
}
......@@ -1627,9 +1627,9 @@ static void update_shares(struct sched_domain *sd)
static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
{
spin_unlock(&rq->lock);
atomic_spin_unlock(&rq->lock);
update_shares(sd);
spin_lock(&rq->lock);
atomic_spin_lock(&rq->lock);
}
static void update_h_load(long cpu)
......@@ -1664,7 +1664,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
spin_unlock(&this_rq->lock);
atomic_spin_unlock(&this_rq->lock);
double_rq_lock(this_rq, busiest);
return 1;
......@@ -1685,14 +1685,14 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
int ret = 0;
if (unlikely(!spin_trylock(&busiest->lock))) {
if (unlikely(!atomic_spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
atomic_spin_unlock(&this_rq->lock);
atomic_spin_lock(&busiest->lock);
atomic_spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
ret = 1;
} else
spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
atomic_spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
}
return ret;
}
......@@ -1706,7 +1706,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
spin_unlock(&this_rq->lock);
atomic_spin_unlock(&this_rq->lock);
BUG_ON(1);
}
......@@ -1716,7 +1716,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
spin_unlock(&busiest->lock);
atomic_spin_unlock(&busiest->lock);
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}
#endif
......@@ -3091,15 +3091,17 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
{
BUG_ON(!irqs_disabled());
if (rq1 == rq2) {
spin_lock(&rq1->lock);
atomic_spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
if (rq1 < rq2) {
spin_lock(&rq1->lock);
spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
atomic_spin_lock(&rq1->lock);
atomic_spin_lock_nested(&rq2->lock,
SINGLE_DEPTH_NESTING);
} else {
spin_lock(&rq2->lock);
spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
atomic_spin_lock(&rq2->lock);
atomic_spin_lock_nested(&rq1->lock,
SINGLE_DEPTH_NESTING);
}
}
update_rq_clock(rq1);
......@@ -3116,9 +3118,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__releases(rq1->lock)
__releases(rq2->lock)
{
spin_unlock(&rq1->lock);
atomic_spin_unlock(&rq1->lock);
if (rq1 != rq2)
spin_unlock(&rq2->lock);
atomic_spin_unlock(&rq2->lock);
else
__release(rq2->lock);
}
......@@ -4105,14 +4107,15 @@ redo:
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
spin_lock_irqsave(&busiest->lock, flags);
atomic_spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the migration_thread, if the curr
* task on busiest cpu can't be moved to this_cpu
*/
if (!cpumask_test_cpu(this_cpu,
&busiest->curr->cpus_allowed)) {
spin_unlock_irqrestore(&busiest->lock, flags);
atomic_spin_unlock_irqrestore(&busiest->lock,
flags);
all_pinned = 1;
goto out_one_pinned;
}
......@@ -4122,7 +4125,7 @@ redo:
busiest->push_cpu = this_cpu;
active_balance = 1;
}
spin_unlock_irqrestore(&busiest->lock, flags);
atomic_spin_unlock_irqrestore(&busiest->lock, flags);
if (active_balance)
wake_up_process(busiest->migration_thread);
......@@ -4304,10 +4307,10 @@ redo:
/*
* Should not call ttwu while holding a rq->lock
*/
spin_unlock(&this_rq->lock);
atomic_spin_unlock(&this_rq->lock);
if (active_balance)
wake_up_process(busiest->migration_thread);
spin_lock(&this_rq->lock);
atomic_spin_lock(&this_rq->lock);
} else
sd->nr_balance_failed = 0;
......@@ -5139,11 +5142,11 @@ void scheduler_tick(void)
sched_clock_tick();
spin_lock(&rq->lock);
atomic_spin_lock(&rq->lock);
update_rq_clock(rq);
update_cpu_load(rq);
curr->sched_class->task_tick(rq, curr, 0);
spin_unlock(&rq->lock);
atomic_spin_unlock(&rq->lock);
perf_counter_task_tick(curr, cpu);
......@@ -5337,7 +5340,7 @@ need_resched_nonpreemptible:
if (sched_feat(HRTICK))
hrtick_clear(rq);
spin_lock_irq(&rq->lock);
atomic_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
clear_tsk_need_resched(prev);
......@@ -5376,7 +5379,7 @@ need_resched_nonpreemptible:
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
spin_unlock_irq(&rq->lock);
atomic_spin_unlock_irq(&rq->lock);
if (unlikely(reacquire_kernel_lock(current) < 0))
goto need_resched_nonpreemptible;
......@@ -6870,7 +6873,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
atomic_spin_lock_irqsave(&rq->lock, flags);
__sched_fork(idle);
idle->se.exec_start = sched_clock();
......@@ -6883,7 +6886,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
idle->oncpu = 1;
#endif
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
#if defined(CONFIG_PREEMPT)
......@@ -7069,10 +7072,10 @@ static int migration_thread(void *data)
struct migration_req *req;
struct list_head *head;
spin_lock_irq(&rq->lock);
atomic_spin_lock_irq(&rq->lock);
if (cpu_is_offline(cpu)) {
spin_unlock_irq(&rq->lock);
atomic_spin_unlock_irq(&rq->lock);
break;
}
......@@ -7084,7 +7087,7 @@ static int migration_thread(void *data)
head = &rq->migration_queue;
if (list_empty(head)) {
spin_unlock_irq(&rq->lock);
atomic_spin_unlock_irq(&rq->lock);
schedule();
set_current_state(TASK_INTERRUPTIBLE);
continue;
......@@ -7092,7 +7095,7 @@ static int migration_thread(void *data)
req = list_entry(head->next, struct migration_req, list);
list_del_init(head->next);
spin_unlock(&rq->lock);
atomic_spin_unlock(&rq->lock);
__migrate_task(req->task, cpu, req->dest_cpu);
local_irq_enable();
......@@ -7214,14 +7217,14 @@ void sched_idle_next(void)
* Strictly not necessary since rest of the CPUs are stopped by now
* and interrupts disabled on the current cpu.
*/
spin_lock_irqsave(&rq->lock, flags);
atomic_spin_lock_irqsave(&rq->lock, flags);
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
update_rq_clock(rq);
activate_task(rq, p, 0);
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
/*
......@@ -7257,9 +7260,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
* that's OK. No task can be added to this CPU, so iteration is
* fine.
*/
spin_unlock_irq(&rq->lock);
atomic_spin_unlock_irq(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p);
spin_lock_irq(&rq->lock);
atomic_spin_lock_irq(&rq->lock);
put_task_struct(p);
}
......@@ -7526,13 +7529,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
/* Update our root-domain */
rq = cpu_rq(cpu);
spin_lock_irqsave(&rq->lock, flags);
atomic_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq);
}
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
break;
#ifdef CONFIG_HOTPLUG_CPU
......@@ -7557,14 +7560,14 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
put_task_struct(rq->migration_thread);
rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
spin_lock_irq(&rq->lock);
atomic_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
deactivate_task(rq, rq->idle, 0);
rq->idle->static_prio = MAX_PRIO;
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
spin_unlock_irq(&rq->lock);
atomic_spin_unlock_irq(&rq->lock);
cpuset_unlock();
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
......@@ -7574,30 +7577,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
* they didn't take sched_hotcpu_mutex. Just wake up
* the requestors.
*/
spin_lock_irq(&rq->lock);
atomic_spin_lock_irq(&rq->lock);
while (!list_empty(&rq->migration_queue)) {
struct migration_req *req;
req = list_entry(rq->migration_queue.next,
struct migration_req, list);
list_del_init(&req->list);
spin_unlock_irq(&rq->lock);
atomic_spin_unlock_irq(&rq->lock);
complete(&req->done);
spin_lock_irq(&rq->lock);
atomic_spin_lock_irq(&rq->lock);
}
spin_unlock_irq(&rq->lock);
atomic_spin_unlock_irq(&rq->lock);
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
/* Update our root-domain */
rq = cpu_rq(cpu);
spin_lock_irqsave(&rq->lock, flags);
atomic_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
break;
#endif
}
......@@ -7818,7 +7821,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
struct root_domain *old_rd = NULL;
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
atomic_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
old_rd = rq->rd;
......@@ -7844,7 +7847,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
if (cpumask_test_cpu(rq->cpu, cpu_online_mask))
set_rq_online(rq);
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
free_rootdomain(old_rd);
......@@ -9097,7 +9100,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
plist_head_init_atomic(&rt_rq->pushable_tasks, &rq->lock);
#endif
rt_rq->rt_time = 0;
......@@ -9263,7 +9266,7 @@ void __init sched_init(void)
struct rq *rq;
rq = cpu_rq(i);
spin_lock_init(&rq->lock);
atomic_spin_lock_init(&rq->lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
......@@ -9839,9 +9842,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
struct rq *rq = cfs_rq->rq;
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
atomic_spin_lock_irqsave(&rq->lock, flags);
__set_se_shares(se, shares);
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
static DEFINE_MUTEX(shares_mutex);
......@@ -10412,9 +10415,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
/*
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
*/
spin_lock_irq(&cpu_rq(cpu)->lock);
atomic_spin_lock_irq(&cpu_rq(cpu)->lock);
data = *cpuusage;
spin_unlock_irq(&cpu_rq(cpu)->lock);
atomic_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
data = *cpuusage;
#endif
......@@ -10430,9 +10433,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
/*
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
*/
spin_lock_irq(&cpu_rq(cpu)->lock);
atomic_spin_lock_irq(&cpu_rq(cpu)->lock);
*cpuusage = val;
spin_unlock_irq(&cpu_rq(cpu)->lock);
atomic_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
*cpuusage = val;
#endif
......
......@@ -184,7 +184,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
SPLIT_NS(cfs_rq->exec_clock));
spin_lock_irqsave(&rq->lock, flags);
atomic_spin_lock_irqsave(&rq->lock, flags);
if (cfs_rq->rb_leftmost)
MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq);
......@@ -192,7 +192,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
max_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
SPLIT_NS(MIN_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
......
......@@ -34,10 +34,10 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
{
spin_unlock_irq(&rq->lock);
atomic_spin_unlock_irq(&rq->lock);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
spin_lock_irq(&rq->lock);
atomic_spin_lock_irq(&rq->lock);
}
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
......
......@@ -441,9 +441,9 @@ static void disable_runtime(struct rq *rq)
{
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
atomic_spin_lock_irqsave(&rq->lock, flags);
__disable_runtime(rq);
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
static void __enable_runtime(struct rq *rq)
......@@ -473,9 +473,9 @@ static void enable_runtime(struct rq *rq)
{
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
atomic_spin_lock_irqsave(&rq->lock, flags);
__enable_runtime(rq);
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
static int balance_runtime(struct rt_rq *rt_rq)
......@@ -511,7 +511,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
spin_lock(&rq->lock);
atomic_spin_lock(&rq->lock);
if (rt_rq->rt_time) {
u64 runtime;
......@@ -532,7 +532,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
spin_unlock(&rq->lock);
atomic_spin_unlock(&rq->lock);
}
return idle;
......@@ -1244,7 +1244,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
task_running(rq, task) ||
!task->se.on_rq)) {
spin_unlock(&lowest_rq->lock);
atomic_spin_unlock(&lowest_rq->lock);
lowest_rq = NULL;
break;
}
......@@ -1480,9 +1480,9 @@ static void post_schedule_rt(struct rq *rq)
* This is only called if needs_post_schedule_rt() indicates that
* we need to push tasks away
*/
spin_lock_irq(&rq->lock);
atomic_spin_lock_irq(&rq->lock);
push_rt_tasks(rq);
spin_unlock_irq(&rq->lock);
atomic_spin_unlock_irq(&rq->lock);
}
/*
......
......@@ -23,7 +23,7 @@
#include "lock-internals.h"
int __lockfunc _spin_trylock(spinlock_t *lock)
int __lockfunc _atomic_spin_trylock(atomic_spinlock_t *lock)
{
preempt_disable();
if (_raw_spin_trylock(lock)) {
......@@ -33,7 +33,7 @@ int __lockfunc _spin_trylock(spinlock_t *lock)
preempt_enable();
return 0;
}
EXPORT_SYMBOL(_spin_trylock);
EXPORT_SYMBOL(_atomic_spin_trylock);
/*
* If lockdep is enabled then we use the non-preemption spin-ops
......@@ -42,7 +42,7 @@ EXPORT_SYMBOL(_spin_trylock);
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
unsigned long __lockfunc _atomic_spin_lock_irqsave(atomic_spinlock_t *lock)
{
unsigned long flags;
......@@ -61,33 +61,33 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
#endif
return flags;
}
EXPORT_SYMBOL(_spin_lock_irqsave);
EXPORT_SYMBOL(_atomic_spin_lock_irqsave);
void __lockfunc _spin_lock_irq(spinlock_t *lock)
void __lockfunc _atomic_spin_lock_irq(atomic_spinlock_t *lock)
{
local_irq_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_irq);
EXPORT_SYMBOL(_atomic_spin_lock_irq);
void __lockfunc _spin_lock_bh(spinlock_t *lock)
void __lockfunc _atomic_spin_lock_bh(atomic_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_bh);
EXPORT_SYMBOL(_atomic_spin_lock_bh);
void __lockfunc _spin_lock(spinlock_t *lock)
void __lockfunc _atomic_spin_lock(atomic_spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock);
EXPORT_SYMBOL(_atomic_spin_lock);
#else /* CONFIG_PREEMPT: */
......@@ -95,26 +95,27 @@ EXPORT_SYMBOL(_spin_lock);
* Build preemption-friendly versions of the following
* lock-spinning functions:
*
* _spin_lock()
* _spin_lock_irq()
* _spin_lock_irqsave()
* _spin_lock_bh()
* _atomic_spin_lock()
* _atomic_spin_lock_irq()
* _atomic_spin_lock_irqsave()
* _atomic_spin_lock_bh()
*/
BUILD_LOCK_OPS(spin, spinlock);
BUILD_LOCK_OPS(atomic_spin, atomic_spinlock);
#endif /* CONFIG_PREEMPT */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
void __lockfunc _atomic_spin_lock_nested(atomic_spinlock_t *lock, int subclass)
{
preempt_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_nested);
EXPORT_SYMBOL(_atomic_spin_lock_nested);
unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
unsigned long __lockfunc
_atomic_spin_lock_irqsave_nested(atomic_spinlock_t *lock, int subclass)
{
unsigned long flags;
......@@ -125,55 +126,56 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
_raw_spin_lock_flags, &flags);
return flags;
}
EXPORT_SYMBOL(_spin_lock_irqsave_nested);
EXPORT_SYMBOL(_atomic_spin_lock_irqsave_nested);
void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
void __lockfunc _atomic_spin_lock_nest_lock(atomic_spinlock_t *lock,
struct lockdep_map *nest_lock)
{
preempt_disable();
spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_nest_lock);
EXPORT_SYMBOL(_atomic_spin_lock_nest_lock);
#endif
void __lockfunc _spin_unlock(spinlock_t *lock)
void __lockfunc _atomic_spin_unlock(atomic_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
preempt_enable();
}
EXPORT_SYMBOL(_spin_unlock);
EXPORT_SYMBOL(_atomic_spin_unlock);
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
void __lockfunc
_atomic_spin_unlock_irqrestore(atomic_spinlock_t *lock, unsigned long flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
local_irq_restore(flags);
preempt_enable();
}
EXPORT_SYMBOL(_spin_unlock_irqrestore);
EXPORT_SYMBOL(_atomic_spin_unlock_irqrestore);
void __lockfunc _spin_unlock_irq(spinlock_t *lock)
void __lockfunc _atomic_spin_unlock_irq(atomic_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
local_irq_enable();
preempt_enable();
}
EXPORT_SYMBOL(_spin_unlock_irq);
EXPORT_SYMBOL(_atomic_spin_unlock_irq);
void __lockfunc _spin_unlock_bh(spinlock_t *lock)
void __lockfunc _atomic_spin_unlock_bh(atomic_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
EXPORT_SYMBOL(_spin_unlock_bh);
EXPORT_SYMBOL(_atomic_spin_unlock_bh);
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
int __lockfunc _atomic_spin_trylock_bh(atomic_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
......@@ -186,7 +188,7 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock)
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
return 0;
}
EXPORT_SYMBOL(_spin_trylock_bh);
EXPORT_SYMBOL(_atomic_spin_trylock_bh);
notrace int in_lock_functions(unsigned long addr)
{
......
......@@ -17,18 +17,18 @@
* because the spin-lock and the decrement must be
* "atomic".
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
int _atomic_dec_and_atomic_lock(atomic_t *atomic, atomic_spinlock_t *lock)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
/* Otherwise do it the slow way */
spin_lock(lock);
atomic_spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
atomic_spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);
EXPORT_SYMBOL(_atomic_dec_and_atomic_lock);
......@@ -20,7 +20,7 @@
*
* Don't use in new code.
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
static __cacheline_aligned_in_smp DEFINE_ATOMIC_SPINLOCK(kernel_flag);
/*
......@@ -79,7 +79,7 @@ static inline void __lock_kernel(void)
*/
do {
preempt_enable();
while (spin_is_locked(&kernel_flag))
while (atomic_spin_is_locked(&kernel_flag))
cpu_relax();
preempt_disable();
} while (!_raw_spin_trylock(&kernel_flag));
......
......@@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top)
static void plist_check_head(struct plist_head *head)
{
WARN_ON(!head->lock);
if (head->lock)
WARN_ON_SMP(!spin_is_locked(head->lock));
WARN_ON(!head->alock && !head->slock);
if (head->alock)
WARN_ON_SMP(!atomic_spin_is_locked(head->alock));
if (head->slock)
WARN_ON_SMP(!spin_is_locked(head->slock));
plist_check_list(&head->prio_list);
plist_check_list(&head->node_list);
}
......
......@@ -13,7 +13,7 @@
#include <linux/delay.h>
#include <linux/module.h>
void __spin_lock_init(spinlock_t *lock, const char *name,
void __atomic_spin_lock_init(atomic_spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
......@@ -29,7 +29,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
lock->owner_cpu = -1;
}
EXPORT_SYMBOL(__spin_lock_init);
EXPORT_SYMBOL(__atomic_spin_lock_init);
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
......@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
EXPORT_SYMBOL(__rwlock_init);
static void spin_bug(spinlock_t *lock, const char *msg)
static void spin_bug(atomic_spinlock_t *lock, const char *msg)
{
struct task_struct *owner = NULL;
......@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
static inline void
debug_spin_lock_before(spinlock_t *lock)
debug_spin_lock_before(atomic_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
......@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
lock, "cpu recursion");
}
static inline void debug_spin_lock_after(spinlock_t *lock)
static inline void debug_spin_lock_after(atomic_spinlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
static inline void debug_spin_unlock(spinlock_t *lock)
static inline void debug_spin_unlock(atomic_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(!atomic_spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
......@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
lock->owner_cpu = -1;
}
static void __spin_lock_debug(spinlock_t *lock)
static void __spin_lock_debug(atomic_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
......@@ -125,7 +125,7 @@ static void __spin_lock_debug(spinlock_t *lock)
}
}
void _raw_spin_lock(spinlock_t *lock)
void _raw_spin_lock(atomic_spinlock_t *lock)
{
debug_spin_lock_before(lock);
if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
......@@ -133,7 +133,7 @@ void _raw_spin_lock(spinlock_t *lock)
debug_spin_lock_after(lock);
}
int _raw_spin_trylock(spinlock_t *lock)
int _raw_spin_trylock(atomic_spinlock_t *lock)
{
int ret = __raw_spin_trylock(&lock->raw_lock);
......@@ -148,7 +148,7 @@ int _raw_spin_trylock(spinlock_t *lock)
return ret;
}
void _raw_spin_unlock(spinlock_t *lock)
void _raw_spin_unlock(atomic_spinlock_t *lock)
{
debug_spin_unlock(lock);
__raw_spin_unlock(&lock->raw_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment