Commit 090e2db9 authored by Gregory Haskins's avatar Gregory Haskins Committed by Thomas Gleixner

rtmutex: Rearrange the code

The current logic makes rather coarse adjustments to current->state since
it is planning on sleeping anyway.  We want to eventually move to an
adaptive (e.g. optional sleep) algorithm, so we tighten the scope of the
adjustments to bracket the schedule().  This should yield correct behavior
with or without the adaptive features that are added later in the series.
We add it here as a separate patch for greater review clarity on smaller
changes.
Signed-off-by: default avatarGregory Haskins <ghaskins@novell.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 74804766
...@@ -656,6 +656,14 @@ rt_spin_lock_fastunlock(struct rt_mutex *lock, ...@@ -656,6 +656,14 @@ rt_spin_lock_fastunlock(struct rt_mutex *lock,
slowfn(lock); slowfn(lock);
} }
static inline void
update_current(unsigned long new_state, unsigned long *saved_state)
{
unsigned long state = xchg(&current->state, new_state);
if (unlikely(state == TASK_RUNNING))
*saved_state = TASK_RUNNING;
}
/* /*
* Slow path lock function spin_lock style: this variant is very * Slow path lock function spin_lock style: this variant is very
* careful not to miss any non-lock wakeups. * careful not to miss any non-lock wakeups.
...@@ -695,7 +703,7 @@ rt_spin_lock_slowlock(struct rt_mutex *lock) ...@@ -695,7 +703,7 @@ rt_spin_lock_slowlock(struct rt_mutex *lock)
* saved_state accordingly. If we did not get a real wakeup * saved_state accordingly. If we did not get a real wakeup
* then we return with the saved state. * then we return with the saved state.
*/ */
saved_state = xchg(&current->state, TASK_UNINTERRUPTIBLE); saved_state = current->state;
for (;;) { for (;;) {
int saved_lock_depth = current->lock_depth; int saved_lock_depth = current->lock_depth;
...@@ -725,13 +733,14 @@ rt_spin_lock_slowlock(struct rt_mutex *lock) ...@@ -725,13 +733,14 @@ rt_spin_lock_slowlock(struct rt_mutex *lock)
debug_rt_mutex_print_deadlock(&waiter); debug_rt_mutex_print_deadlock(&waiter);
schedule_rt_mutex(lock); update_current(TASK_UNINTERRUPTIBLE, &saved_state);
if (waiter.task)
schedule_rt_mutex(lock);
else
update_current(TASK_RUNNING_MUTEX, &saved_state);
atomic_spin_lock_irqsave(&lock->wait_lock, flags); atomic_spin_lock_irqsave(&lock->wait_lock, flags);
current->lock_depth = saved_lock_depth; current->lock_depth = saved_lock_depth;
state = xchg(&current->state, TASK_UNINTERRUPTIBLE);
if (unlikely(state == TASK_RUNNING))
saved_state = TASK_RUNNING;
} }
state = xchg(&current->state, saved_state); state = xchg(&current->state, saved_state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment