Commit 34ca9f9b authored by Thomas Gleixner's avatar Thomas Gleixner

spinlocks: Create atomic_spinlock and convert rq->lock

atomic_spinlock_t will be used to annotate locks which are not
converted to sleeping spinlocks on preempt-rt.

rq->lock must be converted right away as rq->lock is handled by a few
raw lock operations.

Fix also the plist implementation so debugging can handle both
spinlocks and atomic_spinlocks.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent f7c0160a
......@@ -81,7 +81,8 @@ struct plist_head {
struct list_head prio_list;
struct list_head node_list;
#ifdef CONFIG_DEBUG_PI_LIST
spinlock_t *lock;
atomic_spinlock_t *alock;
spinlock_t *slock;
#endif
};
......@@ -91,9 +92,11 @@ struct plist_node {
};
#ifdef CONFIG_DEBUG_PI_LIST
# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
# define PLIST_HEAD_LOCK_INIT(_lock) .slock = _lock
# define PLIST_HEAD_LOCK_INIT_ATOMIC(_lock) .alock = _lock
#else
# define PLIST_HEAD_LOCK_INIT(_lock)
# define PLIST_HEAD_LOCK_INIT_ATOMIC(_lock)
#endif
#define _PLIST_HEAD_INIT(head) \
......@@ -107,10 +110,21 @@ struct plist_node {
*/
#define PLIST_HEAD_INIT(head, _lock) \
{ \
_PLIST_HEAD_INIT(head), \
_PLIST_HEAD_INIT(head), \
PLIST_HEAD_LOCK_INIT(&(_lock)) \
}
/**
* PLIST_HEAD_INIT_ATOMIC - static struct plist_head initializer
* @head: struct plist_head variable name
* @_lock: lock to initialize for this list
*/
#define PLIST_HEAD_INIT_ATOMIC(head, _lock) \
{ \
_PLIST_HEAD_INIT(head), \
PLIST_HEAD_LOCK_INIT_ATOMIC(&(_lock)) \
}
/**
* PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
......@@ -119,7 +133,7 @@ struct plist_node {
#define PLIST_NODE_INIT(node, __prio) \
{ \
.prio = (__prio), \
.plist = { _PLIST_HEAD_INIT((node).plist) }, \
.plist = { _PLIST_HEAD_INIT((node).plist) }, \
}
/**
......@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST
head->lock = lock;
head->slock = lock;
head->alock = NULL;
#endif
}
/**
* plist_head_init_atomic - dynamic struct plist_head initializer
* @head: &struct plist_head pointer
* @lock: list atomic_spinlock, remembered for debugging
*/
static inline void
plist_head_init_atomic(struct plist_head *head, atomic_spinlock_t *lock)
{
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST
head->alock = lock;
head->slock = NULL;
#endif
}
......
This diff is collapsed.
......@@ -17,44 +17,70 @@
int in_lock_functions(unsigned long addr);
#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
#define assert_atomic_spin_locked(x) BUG_ON(!atomic_spin_is_locked(x))
void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
void __lockfunc
_atomic_spin_lock(atomic_spinlock_t *lock) __acquires(lock);
void __lockfunc
_atomic_spin_lock_nested(atomic_spinlock_t *lock, int subclass)
__acquires(lock);
void __lockfunc
_atomic_spin_lock_nest_lock(atomic_spinlock_t *lock, struct lockdep_map *map)
__acquires(lock);
void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
void __lockfunc
_atomic_spin_lock_bh(atomic_spinlock_t *lock) __acquires(lock);
void __lockfunc
_atomic_spin_lock_irq(atomic_spinlock_t *lock) __acquires(lock);
unsigned long __lockfunc
_atomic_spin_lock_irqsave(atomic_spinlock_t *lock) __acquires(lock);
unsigned long __lockfunc
_atomic_spin_lock_irqsave_nested(atomic_spinlock_t *lock, int subclass)
__acquires(lock);
int __lockfunc _atomic_spin_trylock(atomic_spinlock_t *lock);
int __lockfunc _atomic_spin_trylock_bh(atomic_spinlock_t *lock);
void __lockfunc
_atomic_spin_unlock(atomic_spinlock_t *lock) __releases(lock);
void __lockfunc
_atomic_spin_unlock_bh(atomic_spinlock_t *lock) __releases(lock);
void __lockfunc
_atomic_spin_unlock_irq(atomic_spinlock_t *lock) __releases(lock);
void __lockfunc
_atomic_spin_unlock_irqrestore(atomic_spinlock_t *lock, unsigned long flags)
__releases(lock);
void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
__acquires(lock);
unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
__acquires(lock);
unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
__acquires(lock);
unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
__acquires(lock);
int __lockfunc _spin_trylock(spinlock_t *lock);
int __lockfunc _read_trylock(rwlock_t *lock);
int __lockfunc _write_trylock(rwlock_t *lock);
int __lockfunc _spin_trylock_bh(spinlock_t *lock);
void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
__releases(lock);
void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
......
......@@ -16,7 +16,7 @@
#define in_lock_functions(ADDR) 0
#define assert_spin_locked(lock) do { (void)(lock); } while (0)
#define assert_atomic_spin_locked(lock) do { (void)(lock); } while (0)
/*
* In the UP-nondebug case there's no real locking going on, so the
......@@ -48,33 +48,35 @@
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
#define _spin_lock(lock) __LOCK(lock)
#define _spin_lock_nested(lock, subclass) __LOCK(lock)
#define _atomic_spin_lock(lock) __LOCK(lock)
#define _atomic_spin_lock_nested(lock, subclass) \
__LOCK(lock)
#define _read_lock(lock) __LOCK(lock)
#define _write_lock(lock) __LOCK(lock)
#define _spin_lock_bh(lock) __LOCK_BH(lock)
#define _atomic_spin_lock_bh(lock) __LOCK_BH(lock)
#define _read_lock_bh(lock) __LOCK_BH(lock)
#define _write_lock_bh(lock) __LOCK_BH(lock)
#define _spin_lock_irq(lock) __LOCK_IRQ(lock)
#define _atomic_spin_lock_irq(lock) __LOCK_IRQ(lock)
#define _read_lock_irq(lock) __LOCK_IRQ(lock)
#define _write_lock_irq(lock) __LOCK_IRQ(lock)
#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define _atomic_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define _spin_trylock(lock) ({ __LOCK(lock); 1; })
#define _atomic_spin_trylock(lock) ({ __LOCK(lock); 1; })
#define _read_trylock(lock) ({ __LOCK(lock); 1; })
#define _write_trylock(lock) ({ __LOCK(lock); 1; })
#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
#define _spin_unlock(lock) __UNLOCK(lock)
#define _atomic_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
#define _atomic_spin_unlock(lock) __UNLOCK(lock)
#define _read_unlock(lock) __UNLOCK(lock)
#define _write_unlock(lock) __UNLOCK(lock)
#define _spin_unlock_bh(lock) __UNLOCK_BH(lock)
#define _atomic_spin_unlock_bh(lock) __UNLOCK_BH(lock)
#define _write_unlock_bh(lock) __UNLOCK_BH(lock)
#define _read_unlock_bh(lock) __UNLOCK_BH(lock)
#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _atomic_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
#define _atomic_spin_unlock_irqrestore(lock, flags) \
__UNLOCK_IRQRESTORE(lock, flags)
#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
......
......@@ -29,7 +29,7 @@ typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} spinlock_t;
} atomic_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
......@@ -42,21 +42,63 @@ typedef struct {
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
# define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
# define __ATOMIC_SPIN_LOCK_UNLOCKED(lockname) \
(atomic_spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
.magic = SPINLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
SPIN_DEP_MAP_INIT(lockname) }
#else
# define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
# define __ATOMIC_SPIN_LOCK_UNLOCKED(lockname) \
(atomic_spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
SPIN_DEP_MAP_INIT(lockname) }
#endif
/*
* SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
* deprecated.
*
* Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
* appropriate.
*/
#define DEFINE_ATOMIC_SPINLOCK(x) \
atomic_spinlock_t x = __ATOMIC_SPIN_LOCK_UNLOCKED(x)
/*
* For PREEMPT_RT=n we use the same data structures and the spinlock
* functions are mapped to the atomic_spinlock functions
*/
typedef struct {
raw_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} spinlock_t;
#ifdef CONFIG_DEBUG_SPINLOCK
# define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
.magic = SPINLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
SPIN_DEP_MAP_INIT(lockname) }
#else
# define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
SPIN_DEP_MAP_INIT(lockname) }
#endif
/*
* SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
* deprecated.
*
* Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
* appropriate.
*/
......
......@@ -999,7 +999,7 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
#ifdef CONFIG_DEBUG_PI_LIST
q->list.plist.lock = &hb2->lock;
q->list.plist.slock = &hb2->lock;
#endif
}
get_futex_key_refs(key2);
......@@ -1337,7 +1337,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
plist_node_init(&q->list, prio);
#ifdef CONFIG_DEBUG_PI_LIST
q->list.plist.lock = &hb->lock;
q->list.plist.slock = &hb->lock;
#endif
plist_add(&q->list, &hb->chain);
q->task = current;
......
This diff is collapsed.
......@@ -184,7 +184,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
SPLIT_NS(cfs_rq->exec_clock));
spin_lock_irqsave(&rq->lock, flags);
atomic_spin_lock_irqsave(&rq->lock, flags);
if (cfs_rq->rb_leftmost)
MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq);
......@@ -192,7 +192,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
max_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
SPLIT_NS(MIN_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
......
......@@ -34,10 +34,10 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
{
spin_unlock_irq(&rq->lock);
atomic_spin_unlock_irq(&rq->lock);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
spin_lock_irq(&rq->lock);
atomic_spin_lock_irq(&rq->lock);
}
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
......
......@@ -441,9 +441,9 @@ static void disable_runtime(struct rq *rq)
{
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
atomic_spin_lock_irqsave(&rq->lock, flags);
__disable_runtime(rq);
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
static void __enable_runtime(struct rq *rq)
......@@ -473,9 +473,9 @@ static void enable_runtime(struct rq *rq)
{
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
atomic_spin_lock_irqsave(&rq->lock, flags);
__enable_runtime(rq);
spin_unlock_irqrestore(&rq->lock, flags);
atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
static int balance_runtime(struct rt_rq *rt_rq)
......@@ -511,7 +511,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
spin_lock(&rq->lock);
atomic_spin_lock(&rq->lock);
if (rt_rq->rt_time) {
u64 runtime;
......@@ -532,7 +532,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
spin_unlock(&rq->lock);
atomic_spin_unlock(&rq->lock);
}
return idle;
......@@ -1244,7 +1244,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
task_running(rq, task) ||
!task->se.on_rq)) {
spin_unlock(&lowest_rq->lock);
atomic_spin_unlock(&lowest_rq->lock);
lowest_rq = NULL;
break;
}
......@@ -1480,9 +1480,9 @@ static void post_schedule_rt(struct rq *rq)
* This is only called if needs_post_schedule_rt() indicates that
* we need to push tasks away
*/
spin_lock_irq(&rq->lock);
atomic_spin_lock_irq(&rq->lock);
push_rt_tasks(rq);
spin_unlock_irq(&rq->lock);
atomic_spin_unlock_irq(&rq->lock);
}
/*
......
......@@ -23,7 +23,7 @@
#include "lock-internals.h"
int __lockfunc _spin_trylock(spinlock_t *lock)
int __lockfunc _atomic_spin_trylock(atomic_spinlock_t *lock)
{
preempt_disable();
if (_raw_spin_trylock(lock)) {
......@@ -33,7 +33,7 @@ int __lockfunc _spin_trylock(spinlock_t *lock)
preempt_enable();
return 0;
}
EXPORT_SYMBOL(_spin_trylock);
EXPORT_SYMBOL(_atomic_spin_trylock);
/*
* If lockdep is enabled then we use the non-preemption spin-ops
......@@ -42,7 +42,7 @@ EXPORT_SYMBOL(_spin_trylock);
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
unsigned long __lockfunc _atomic_spin_lock_irqsave(atomic_spinlock_t *lock)
{
unsigned long flags;
......@@ -61,33 +61,33 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
#endif
return flags;
}
EXPORT_SYMBOL(_spin_lock_irqsave);
EXPORT_SYMBOL(_atomic_spin_lock_irqsave);
void __lockfunc _spin_lock_irq(spinlock_t *lock)
void __lockfunc _atomic_spin_lock_irq(atomic_spinlock_t *lock)
{
local_irq_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_irq);
EXPORT_SYMBOL(_atomic_spin_lock_irq);
void __lockfunc _spin_lock_bh(spinlock_t *lock)
void __lockfunc _atomic_spin_lock_bh(atomic_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_bh);
EXPORT_SYMBOL(_atomic_spin_lock_bh);
void __lockfunc _spin_lock(spinlock_t *lock)
void __lockfunc _atomic_spin_lock(atomic_spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock);
EXPORT_SYMBOL(_atomic_spin_lock);
#else /* CONFIG_PREEMPT: */
......@@ -95,26 +95,27 @@ EXPORT_SYMBOL(_spin_lock);
* Build preemption-friendly versions of the following
* lock-spinning functions:
*
* _spin_lock()
* _spin_lock_irq()
* _spin_lock_irqsave()
* _spin_lock_bh()
* _atomic_spin_lock()
* _atomic_spin_lock_irq()
* _atomic_spin_lock_irqsave()
* _atomic_spin_lock_bh()
*/
BUILD_LOCK_OPS(spin, spinlock);
BUILD_LOCK_OPS(atomic_spin, atomic_spinlock);
#endif /* CONFIG_PREEMPT */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
void __lockfunc _atomic_spin_lock_nested(atomic_spinlock_t *lock, int subclass)
{
preempt_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_nested);
EXPORT_SYMBOL(_atomic_spin_lock_nested);
unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
unsigned long __lockfunc
_atomic_spin_lock_irqsave_nested(atomic_spinlock_t *lock, int subclass)
{
unsigned long flags;
......@@ -125,55 +126,56 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
_raw_spin_lock_flags, &flags);
return flags;
}
EXPORT_SYMBOL(_spin_lock_irqsave_nested);
EXPORT_SYMBOL(_atomic_spin_lock_irqsave_nested);
void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
void __lockfunc _atomic_spin_lock_nest_lock(atomic_spinlock_t *lock,
struct lockdep_map *nest_lock)
{
preempt_disable();
spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_nest_lock);
EXPORT_SYMBOL(_atomic_spin_lock_nest_lock);
#endif
void __lockfunc _spin_unlock(spinlock_t *lock)
void __lockfunc _atomic_spin_unlock(atomic_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
preempt_enable();
}
EXPORT_SYMBOL(_spin_unlock);
EXPORT_SYMBOL(_atomic_spin_unlock);
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
void __lockfunc
_atomic_spin_unlock_irqrestore(atomic_spinlock_t *lock, unsigned long flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
local_irq_restore(flags);
preempt_enable();
}
EXPORT_SYMBOL(_spin_unlock_irqrestore);
EXPORT_SYMBOL(_atomic_spin_unlock_irqrestore);
void __lockfunc _spin_unlock_irq(spinlock_t *lock)
void __lockfunc _atomic_spin_unlock_irq(atomic_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
local_irq_enable();
preempt_enable();
}
EXPORT_SYMBOL(_spin_unlock_irq);
EXPORT_SYMBOL(_atomic_spin_unlock_irq);
void __lockfunc _spin_unlock_bh(spinlock_t *lock)
void __lockfunc _atomic_spin_unlock_bh(atomic_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
EXPORT_SYMBOL(_spin_unlock_bh);
EXPORT_SYMBOL(_atomic_spin_unlock_bh);
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
int __lockfunc _atomic_spin_trylock_bh(atomic_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
......@@ -186,7 +188,7 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock)
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
return 0;
}
EXPORT_SYMBOL(_spin_trylock_bh);
EXPORT_SYMBOL(_atomic_spin_trylock_bh);
notrace int in_lock_functions(unsigned long addr)
{
......
......@@ -17,18 +17,18 @@
* because the spin-lock and the decrement must be
* "atomic".
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
int _atomic_dec_and_atomic_lock(atomic_t *atomic, atomic_spinlock_t *lock)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
/* Otherwise do it the slow way */
spin_lock(lock);
atomic_spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
atomic_spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);
EXPORT_SYMBOL(_atomic_dec_and_atomic_lock);
......@@ -20,7 +20,7 @@
*
* Don't use in new code.
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
static __cacheline_aligned_in_smp DEFINE_ATOMIC_SPINLOCK(kernel_flag);
/*
......@@ -79,7 +79,7 @@ static inline void __lock_kernel(void)
*/
do {
preempt_enable();
while (spin_is_locked(&kernel_flag))
while (atomic_spin_is_locked(&kernel_flag))
cpu_relax();
preempt_disable();
} while (!_raw_spin_trylock(&kernel_flag));
......
......@@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top)
static void plist_check_head(struct plist_head *head)
{
WARN_ON(!head->lock);
if (head->lock)
WARN_ON_SMP(!spin_is_locked(head->lock));
WARN_ON(!head->alock && !head->slock);
if (head->alock)
WARN_ON_SMP(!atomic_spin_is_locked(head->alock));
if (head->slock)
WARN_ON_SMP(!spin_is_locked(head->slock));
plist_check_list(&head->prio_list);
plist_check_list(&head->node_list);
}
......
......@@ -13,8 +13,8 @@
#include <linux/delay.h>
#include <linux/module.h>
void __spin_lock_init(spinlock_t *lock, const char *name,
struct lock_class_key *key)
void __atomic_spin_lock_init(atomic_spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
......@@ -29,7 +29,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
lock->owner_cpu = -1;
}
EXPORT_SYMBOL(__spin_lock_init);
EXPORT_SYMBOL(__atomic_spin_lock_init);
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
......@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
EXPORT_SYMBOL(__rwlock_init);
static void spin_bug(spinlock_t *lock, const char *msg)
static void spin_bug(atomic_spinlock_t *lock, const char *msg)
{
struct task_struct *owner = NULL;
......@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
static inline void
debug_spin_lock_before(spinlock_t *lock)
debug_spin_lock_before(atomic_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
......@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
lock, "cpu recursion");
}
static inline void debug_spin_lock_after(spinlock_t *lock)
static inline void debug_spin_lock_after(atomic_spinlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
static inline void debug_spin_unlock(spinlock_t *lock)
static inline void debug_spin_unlock(atomic_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(!atomic_spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
......@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
lock->owner_cpu = -1;
}
static void __spin_lock_debug(spinlock_t *lock)
static void __spin_lock_debug(atomic_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
......@@ -125,7 +125,7 @@ static void __spin_lock_debug(spinlock_t *lock)
}
}
void _raw_spin_lock(spinlock_t *lock)
void _raw_spin_lock(atomic_spinlock_t *lock)
{
debug_spin_lock_before(lock);
if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
......@@ -133,7 +133,7 @@ void _raw_spin_lock(spinlock_t *lock)
debug_spin_lock_after(lock);
}
int _raw_spin_trylock(spinlock_t *lock)
int _raw_spin_trylock(atomic_spinlock_t *lock)
{
int ret = __raw_spin_trylock(&lock->raw_lock);
......@@ -148,7 +148,7 @@ int _raw_spin_trylock(spinlock_t *lock)
return ret;
}
void _raw_spin_unlock(spinlock_t *lock)
void _raw_spin_unlock(atomic_spinlock_t *lock)
{
debug_spin_unlock(lock);
__raw_spin_unlock(&lock->raw_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment