Commit 36cf3b5c authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Linus Torvalds

FUTEX: Tidy up the code

The recent PRIVATE and REQUEUE_PI changes to the futex code made it hard to
read.  Tidy it up.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0746aec3
...@@ -120,6 +120,24 @@ static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; ...@@ -120,6 +120,24 @@ static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
/* Futex-fs vfsmount entry: */ /* Futex-fs vfsmount entry: */
static struct vfsmount *futex_mnt; static struct vfsmount *futex_mnt;
/*
* Take mm->mmap_sem, when futex is shared
*/
static inline void futex_lock_mm(struct rw_semaphore *fshared)
{
if (fshared)
down_read(fshared);
}
/*
* Release mm->mmap_sem, when the futex is shared
*/
static inline void futex_unlock_mm(struct rw_semaphore *fshared)
{
if (fshared)
up_read(fshared);
}
/* /*
* We hash on the keys returned from get_futex_key (see below). * We hash on the keys returned from get_futex_key (see below).
*/ */
...@@ -287,7 +305,18 @@ void drop_futex_key_refs(union futex_key *key) ...@@ -287,7 +305,18 @@ void drop_futex_key_refs(union futex_key *key)
} }
EXPORT_SYMBOL_GPL(drop_futex_key_refs); EXPORT_SYMBOL_GPL(drop_futex_key_refs);
static inline int get_futex_value_locked(u32 *dest, u32 __user *from) static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
{
u32 curval;
pagefault_disable();
curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
pagefault_enable();
return curval;
}
static int get_futex_value_locked(u32 *dest, u32 __user *from)
{ {
int ret; int ret;
...@@ -620,9 +649,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) ...@@ -620,9 +649,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
newval = FUTEX_WAITERS | new_owner->pid; newval = FUTEX_WAITERS | new_owner->pid;
pagefault_disable(); curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
pagefault_enable();
if (curval == -EFAULT) if (curval == -EFAULT)
ret = -EFAULT; ret = -EFAULT;
...@@ -659,9 +686,7 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval) ...@@ -659,9 +686,7 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
* There is no waiter, so we unlock the futex. The owner died * There is no waiter, so we unlock the futex. The owner died
* bit has not to be preserved here. We are the owner: * bit has not to be preserved here. We are the owner:
*/ */
pagefault_disable(); oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
pagefault_enable();
if (oldval == -EFAULT) if (oldval == -EFAULT)
return oldval; return oldval;
...@@ -700,8 +725,7 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -700,8 +725,7 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
union futex_key key; union futex_key key;
int ret; int ret;
if (fshared) futex_lock_mm(fshared);
down_read(fshared);
ret = get_futex_key(uaddr, fshared, &key); ret = get_futex_key(uaddr, fshared, &key);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -725,8 +749,7 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -725,8 +749,7 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
spin_unlock(&hb->lock); spin_unlock(&hb->lock);
out: out:
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
return ret; return ret;
} }
...@@ -746,8 +769,7 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared, ...@@ -746,8 +769,7 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
int ret, op_ret, attempt = 0; int ret, op_ret, attempt = 0;
retryfull: retryfull:
if (fshared) futex_lock_mm(fshared);
down_read(fshared);
ret = get_futex_key(uaddr1, fshared, &key1); ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -793,7 +815,7 @@ retry: ...@@ -793,7 +815,7 @@ retry:
*/ */
if (attempt++) { if (attempt++) {
ret = futex_handle_fault((unsigned long)uaddr2, ret = futex_handle_fault((unsigned long)uaddr2,
fshared, attempt); fshared, attempt);
if (ret) if (ret)
goto out; goto out;
goto retry; goto retry;
...@@ -803,8 +825,7 @@ retry: ...@@ -803,8 +825,7 @@ retry:
* If we would have faulted, release mmap_sem, * If we would have faulted, release mmap_sem,
* fault it in and start all over again. * fault it in and start all over again.
*/ */
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
ret = get_user(dummy, uaddr2); ret = get_user(dummy, uaddr2);
if (ret) if (ret)
...@@ -841,8 +862,8 @@ retry: ...@@ -841,8 +862,8 @@ retry:
if (hb1 != hb2) if (hb1 != hb2)
spin_unlock(&hb2->lock); spin_unlock(&hb2->lock);
out: out:
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
return ret; return ret;
} }
...@@ -861,8 +882,7 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, ...@@ -861,8 +882,7 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
int ret, drop_count = 0; int ret, drop_count = 0;
retry: retry:
if (fshared) futex_lock_mm(fshared);
down_read(fshared);
ret = get_futex_key(uaddr1, fshared, &key1); ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -890,8 +910,7 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, ...@@ -890,8 +910,7 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
* If we would have faulted, release mmap_sem, fault * If we would have faulted, release mmap_sem, fault
* it in and start all over again. * it in and start all over again.
*/ */
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
ret = get_user(curval, uaddr1); ret = get_user(curval, uaddr1);
...@@ -944,8 +963,7 @@ out_unlock: ...@@ -944,8 +963,7 @@ out_unlock:
drop_futex_key_refs(&key1); drop_futex_key_refs(&key1);
out: out:
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
return ret; return ret;
} }
...@@ -1113,10 +1131,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, ...@@ -1113,10 +1131,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
while (!ret) { while (!ret) {
newval = (uval & FUTEX_OWNER_DIED) | newtid; newval = (uval & FUTEX_OWNER_DIED) | newtid;
pagefault_disable(); curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
curval = futex_atomic_cmpxchg_inatomic(uaddr,
uval, newval);
pagefault_enable();
if (curval == -EFAULT) if (curval == -EFAULT)
ret = -EFAULT; ret = -EFAULT;
...@@ -1134,6 +1149,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, ...@@ -1134,6 +1149,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
#define ARG3_SHARED 1 #define ARG3_SHARED 1
static long futex_wait_restart(struct restart_block *restart); static long futex_wait_restart(struct restart_block *restart);
static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
u32 val, ktime_t *abs_time) u32 val, ktime_t *abs_time)
{ {
...@@ -1148,8 +1164,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1148,8 +1164,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
q.pi_state = NULL; q.pi_state = NULL;
retry: retry:
if (fshared) futex_lock_mm(fshared);
down_read(fshared);
ret = get_futex_key(uaddr, fshared, &q.key); ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -1186,8 +1201,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1186,8 +1201,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
* If we would have faulted, release mmap_sem, fault it in and * If we would have faulted, release mmap_sem, fault it in and
* start all over again. * start all over again.
*/ */
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
ret = get_user(uval, uaddr); ret = get_user(uval, uaddr);
...@@ -1206,8 +1220,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1206,8 +1220,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
* Now the futex is queued and we have checked the data, we * Now the futex is queued and we have checked the data, we
* don't want to hold mmap_sem while we sleep. * don't want to hold mmap_sem while we sleep.
*/ */
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
/* /*
* There might have been scheduling since the queue_me(), as we * There might have been scheduling since the queue_me(), as we
...@@ -1285,8 +1298,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1285,8 +1298,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
queue_unlock(&q, hb); queue_unlock(&q, hb);
out_release_sem: out_release_sem:
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
return ret; return ret;
} }
...@@ -1333,8 +1345,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1333,8 +1345,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
q.pi_state = NULL; q.pi_state = NULL;
retry: retry:
if (fshared) futex_lock_mm(fshared);
down_read(fshared);
ret = get_futex_key(uaddr, fshared, &q.key); ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -1353,9 +1364,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1353,9 +1364,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
*/ */
newval = current->pid; newval = current->pid;
pagefault_disable(); curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
pagefault_enable();
if (unlikely(curval == -EFAULT)) if (unlikely(curval == -EFAULT))
goto uaddr_faulted; goto uaddr_faulted;
...@@ -1398,9 +1407,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1398,9 +1407,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
lock_taken = 1; lock_taken = 1;
} }
pagefault_disable(); curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
pagefault_enable();
if (unlikely(curval == -EFAULT)) if (unlikely(curval == -EFAULT))
goto uaddr_faulted; goto uaddr_faulted;
...@@ -1428,8 +1435,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1428,8 +1435,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* exit to complete. * exit to complete.
*/ */
queue_unlock(&q, hb); queue_unlock(&q, hb);
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
cond_resched(); cond_resched();
goto retry; goto retry;
...@@ -1465,8 +1471,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1465,8 +1471,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* Now the futex is queued and we have checked the data, we * Now the futex is queued and we have checked the data, we
* don't want to hold mmap_sem while we sleep. * don't want to hold mmap_sem while we sleep.
*/ */
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
WARN_ON(!q.pi_state); WARN_ON(!q.pi_state);
/* /*
...@@ -1480,8 +1485,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1480,8 +1485,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
ret = ret ? 0 : -EWOULDBLOCK; ret = ret ? 0 : -EWOULDBLOCK;
} }
if (fshared) futex_lock_mm(fshared);
down_read(fshared);
spin_lock(q.lock_ptr); spin_lock(q.lock_ptr);
if (!ret) { if (!ret) {
...@@ -1518,8 +1522,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1518,8 +1522,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
/* Unqueue and drop the lock */ /* Unqueue and drop the lock */
unqueue_me_pi(&q); unqueue_me_pi(&q);
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
return ret != -EINTR ? ret : -ERESTARTNOINTR; return ret != -EINTR ? ret : -ERESTARTNOINTR;
...@@ -1527,8 +1530,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1527,8 +1530,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
queue_unlock(&q, hb); queue_unlock(&q, hb);
out_release_sem: out_release_sem:
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
return ret; return ret;
uaddr_faulted: uaddr_faulted:
...@@ -1550,8 +1552,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1550,8 +1552,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
goto retry_unlocked; goto retry_unlocked;
} }
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
ret = get_user(uval, uaddr); ret = get_user(uval, uaddr);
if (!ret && (uval != -EFAULT)) if (!ret && (uval != -EFAULT))
...@@ -1585,8 +1586,7 @@ retry: ...@@ -1585,8 +1586,7 @@ retry:
/* /*
* First take all the futex related locks: * First take all the futex related locks:
*/ */
if (fshared) futex_lock_mm(fshared);
down_read(fshared);
ret = get_futex_key(uaddr, fshared, &key); ret = get_futex_key(uaddr, fshared, &key);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -1601,11 +1601,9 @@ retry_unlocked: ...@@ -1601,11 +1601,9 @@ retry_unlocked:
* again. If it succeeds then we can return without waking * again. If it succeeds then we can return without waking
* anyone else up: * anyone else up:
*/ */
if (!(uval & FUTEX_OWNER_DIED)) { if (!(uval & FUTEX_OWNER_DIED))
pagefault_disable(); uval = cmpxchg_futex_value_locked(uaddr, current->pid, 0);
uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
pagefault_enable();
}
if (unlikely(uval == -EFAULT)) if (unlikely(uval == -EFAULT))
goto pi_faulted; goto pi_faulted;
...@@ -1647,8 +1645,7 @@ retry_unlocked: ...@@ -1647,8 +1645,7 @@ retry_unlocked:
out_unlock: out_unlock:
spin_unlock(&hb->lock); spin_unlock(&hb->lock);
out: out:
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
return ret; return ret;
...@@ -1671,8 +1668,7 @@ pi_faulted: ...@@ -1671,8 +1668,7 @@ pi_faulted:
goto retry_unlocked; goto retry_unlocked;
} }
if (fshared) futex_unlock_mm(fshared);
up_read(fshared);
ret = get_user(uval, uaddr); ret = get_user(uval, uaddr);
if (!ret && (uval != -EFAULT)) if (!ret && (uval != -EFAULT))
...@@ -1729,8 +1725,8 @@ static int futex_fd(u32 __user *uaddr, int signal) ...@@ -1729,8 +1725,8 @@ static int futex_fd(u32 __user *uaddr, int signal)
if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) { if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) {
printk(KERN_WARNING "Process `%s' used FUTEX_FD, which " printk(KERN_WARNING "Process `%s' used FUTEX_FD, which "
"will be removed from the kernel in June 2007\n", "will be removed from the kernel in June 2007\n",
current->comm); current->comm);
} }
ret = -EINVAL; ret = -EINVAL;
...@@ -1908,10 +1904,8 @@ retry: ...@@ -1908,10 +1904,8 @@ retry:
* Wake robust non-PI futexes here. The wakeup of * Wake robust non-PI futexes here. The wakeup of
* PI futexes happens in exit_pi_state(): * PI futexes happens in exit_pi_state():
*/ */
if (!pi) { if (!pi && (uval & FUTEX_WAITERS))
if (uval & FUTEX_WAITERS)
futex_wake(uaddr, &curr->mm->mmap_sem, 1); futex_wake(uaddr, &curr->mm->mmap_sem, 1);
}
} }
return 0; return 0;
} }
......
...@@ -29,12 +29,6 @@ ...@@ -29,12 +29,6 @@
#include "rtmutex_common.h" #include "rtmutex_common.h"
#ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h"
#else
# include "rtmutex.h"
#endif
# define TRACE_WARN_ON(x) WARN_ON(x) # define TRACE_WARN_ON(x) WARN_ON(x)
# define TRACE_BUG_ON(x) BUG_ON(x) # define TRACE_BUG_ON(x) BUG_ON(x)
......
...@@ -17,12 +17,6 @@ ...@@ -17,12 +17,6 @@
#include "rtmutex_common.h" #include "rtmutex_common.h"
#ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h"
#else
# include "rtmutex.h"
#endif
/* /*
* lock->owner state tracking: * lock->owner state tracking:
* *
......
...@@ -103,7 +103,7 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) ...@@ -103,7 +103,7 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock) static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock)
{ {
return (struct task_struct *) return (struct task_struct *)
((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
} }
...@@ -120,4 +120,11 @@ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, ...@@ -120,4 +120,11 @@ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner); struct task_struct *proxy_owner);
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
struct task_struct *proxy_owner); struct task_struct *proxy_owner);
#ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h"
#else
# include "rtmutex.h"
#endif
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment