Commit a22d7fc1 authored by Gregory Haskins's avatar Gregory Haskins Committed by Ingo Molnar

sched: wake-balance fixes

We have logic to detect whether the system has migratable tasks, but we are
not using it when deciding whether to push tasks away.  So we add support
for considering this new information.
Signed-off-by: default avatarGregory Haskins <ghaskins@novell.com>
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 6e1254d2
...@@ -346,6 +346,7 @@ struct rt_rq { ...@@ -346,6 +346,7 @@ struct rt_rq {
unsigned long rt_nr_migratory; unsigned long rt_nr_migratory;
/* highest queued rt task prio */ /* highest queued rt task prio */
int highest_prio; int highest_prio;
int overloaded;
}; };
/* /*
...@@ -6770,6 +6771,7 @@ void __init sched_init(void) ...@@ -6770,6 +6771,7 @@ void __init sched_init(void)
rq->migration_thread = NULL; rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue); INIT_LIST_HEAD(&rq->migration_queue);
rq->rt.highest_prio = MAX_RT_PRIO; rq->rt.highest_prio = MAX_RT_PRIO;
rq->rt.overloaded = 0;
#endif #endif
atomic_set(&rq->nr_iowait, 0); atomic_set(&rq->nr_iowait, 0);
......
...@@ -16,6 +16,7 @@ static inline cpumask_t *rt_overload(void) ...@@ -16,6 +16,7 @@ static inline cpumask_t *rt_overload(void)
} }
static inline void rt_set_overload(struct rq *rq) static inline void rt_set_overload(struct rq *rq)
{ {
rq->rt.overloaded = 1;
cpu_set(rq->cpu, rt_overload_mask); cpu_set(rq->cpu, rt_overload_mask);
/* /*
* Make sure the mask is visible before we set * Make sure the mask is visible before we set
...@@ -32,6 +33,7 @@ static inline void rt_clear_overload(struct rq *rq) ...@@ -32,6 +33,7 @@ static inline void rt_clear_overload(struct rq *rq)
/* the order here really doesn't matter */ /* the order here really doesn't matter */
atomic_dec(&rto_count); atomic_dec(&rto_count);
cpu_clear(rq->cpu, rt_overload_mask); cpu_clear(rq->cpu, rt_overload_mask);
rq->rt.overloaded = 0;
} }
static void update_rt_migration(struct rq *rq) static void update_rt_migration(struct rq *rq)
...@@ -448,6 +450,9 @@ static int push_rt_task(struct rq *rq) ...@@ -448,6 +450,9 @@ static int push_rt_task(struct rq *rq)
assert_spin_locked(&rq->lock); assert_spin_locked(&rq->lock);
if (!rq->rt.overloaded)
return 0;
next_task = pick_next_highest_task_rt(rq, -1); next_task = pick_next_highest_task_rt(rq, -1);
if (!next_task) if (!next_task)
return 0; return 0;
...@@ -675,7 +680,7 @@ static void schedule_tail_balance_rt(struct rq *rq) ...@@ -675,7 +680,7 @@ static void schedule_tail_balance_rt(struct rq *rq)
* the lock was owned by prev, we need to release it * the lock was owned by prev, we need to release it
* first via finish_lock_switch and then reaquire it here. * first via finish_lock_switch and then reaquire it here.
*/ */
if (unlikely(rq->rt.rt_nr_running > 1)) { if (unlikely(rq->rt.overloaded)) {
spin_lock_irq(&rq->lock); spin_lock_irq(&rq->lock);
push_rt_tasks(rq); push_rt_tasks(rq);
spin_unlock_irq(&rq->lock); spin_unlock_irq(&rq->lock);
...@@ -687,7 +692,8 @@ static void wakeup_balance_rt(struct rq *rq, struct task_struct *p) ...@@ -687,7 +692,8 @@ static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
{ {
if (unlikely(rt_task(p)) && if (unlikely(rt_task(p)) &&
!task_running(rq, p) && !task_running(rq, p) &&
(p->prio >= rq->curr->prio)) (p->prio >= rq->rt.highest_prio) &&
rq->rt.overloaded)
push_rt_tasks(rq); push_rt_tasks(rq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment