Commit edeb8fe6 authored by Thomas Gleixner's avatar Thomas Gleixner

rtmutex: cleanup the adaptive spin code

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c3c6a61d
...@@ -708,41 +708,22 @@ update_current(unsigned long new_state, unsigned long *saved_state) ...@@ -708,41 +708,22 @@ update_current(unsigned long new_state, unsigned long *saved_state)
static int adaptive_wait(struct rt_mutex_waiter *waiter, static int adaptive_wait(struct rt_mutex_waiter *waiter,
struct task_struct *orig_owner) struct task_struct *orig_owner)
{ {
int sleep = 0;
for (;;) { for (;;) {
/* we are the owner? */ /* we are the owner? */
if (!waiter->task) if (!waiter->task)
break; return 0;
/*
* We need to read the owner of the lock and then check
* its state. But we can't let the owner task be freed
* while we read the state. We grab the rcu_lock and
* this makes sure that the owner task wont disappear
* between testing that it still has the lock, and checking
* its state.
*/
rcu_read_lock();
/* Owner changed? Then lets update the original */ /* Owner changed? Then lets update the original */
if (orig_owner != rt_mutex_owner(waiter->lock)) { if (orig_owner != rt_mutex_owner(waiter->lock))
rcu_read_unlock(); return 0;
break;
}
/* Owner went to bed, so should we */ /* Owner went to bed, so should we */
if (!task_is_current(orig_owner)) { if (!task_is_current(orig_owner))
sleep = 1; return 1;
rcu_read_unlock();
break;
}
rcu_read_unlock();
cpu_relax(); cpu_relax();
} }
return sleep;
} }
#else #else
static int adaptive_wait(struct rt_mutex_waiter *waiter, static int adaptive_wait(struct rt_mutex_waiter *waiter,
...@@ -820,11 +801,13 @@ rt_spin_lock_slowlock(struct rt_mutex *lock) ...@@ -820,11 +801,13 @@ rt_spin_lock_slowlock(struct rt_mutex *lock)
*/ */
current->lock_depth = -1; current->lock_depth = -1;
orig_owner = rt_mutex_owner(lock); orig_owner = rt_mutex_owner(lock);
get_task_struct(orig_owner);
atomic_spin_unlock_irqrestore(&lock->wait_lock, flags); atomic_spin_unlock_irqrestore(&lock->wait_lock, flags);
debug_rt_mutex_print_deadlock(&waiter); debug_rt_mutex_print_deadlock(&waiter);
if (adaptive_wait(&waiter, orig_owner)) { if (adaptive_wait(&waiter, orig_owner)) {
put_task_struct(orig_owner);
update_current(TASK_UNINTERRUPTIBLE, &saved_state); update_current(TASK_UNINTERRUPTIBLE, &saved_state);
/* /*
* The xchg() in update_current() is an implicit * The xchg() in update_current() is an implicit
...@@ -833,7 +816,8 @@ rt_spin_lock_slowlock(struct rt_mutex *lock) ...@@ -833,7 +816,8 @@ rt_spin_lock_slowlock(struct rt_mutex *lock)
*/ */
if (waiter.task) if (waiter.task)
schedule_rt_mutex(lock); schedule_rt_mutex(lock);
} } else
put_task_struct(orig_owner);
atomic_spin_lock_irqsave(&lock->wait_lock, flags); atomic_spin_lock_irqsave(&lock->wait_lock, flags);
current->lock_depth = saved_lock_depth; current->lock_depth = saved_lock_depth;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment