Commit 7ebefa8c authored by Dmitry Adamushko's avatar Dmitry Adamushko Committed by Ingo Molnar

sched: rework of "prioritize non-migratable tasks over migratable ones"

(1) handle in a generic way all cases when a newly woken-up task is
not migratable (not just a corner case when "rt_se->nr_cpus_allowed ==
1")

(2) if current is to be preempted, then make sure "p" will be picked
up by pick_next_task_rt().
i.e. move task's group at the head of its list as well.

currently, it's not a case for the group-scheduling case as described
here: http://www.ussg.iu.edu/hypermail/linux/kernel/0807.0/0134.htmlSigned-off-by: default avatarDmitry Adamushko <dmitry.adamushko@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 13b40c1e
...@@ -599,11 +599,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) ...@@ -599,11 +599,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
return; return;
if (rt_se->nr_cpus_allowed == 1) list_add_tail(&rt_se->run_list, queue);
list_add(&rt_se->run_list, queue);
else
list_add_tail(&rt_se->run_list, queue);
__set_bit(rt_se_prio(rt_se), array->bitmap); __set_bit(rt_se_prio(rt_se), array->bitmap);
inc_rt_tasks(rt_se, rt_rq); inc_rt_tasks(rt_se, rt_rq);
...@@ -688,32 +684,34 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) ...@@ -688,32 +684,34 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
* Put task to the end of the run list without the overhead of dequeue * Put task to the end of the run list without the overhead of dequeue
* followed by enqueue. * followed by enqueue.
*/ */
static static void
void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
{ {
struct rt_prio_array *array = &rt_rq->active;
if (on_rt_rq(rt_se)) { if (on_rt_rq(rt_se)) {
list_del_init(&rt_se->run_list); struct rt_prio_array *array = &rt_rq->active;
list_add_tail(&rt_se->run_list, struct list_head *queue = array->queue + rt_se_prio(rt_se);
array->queue + rt_se_prio(rt_se));
if (head)
list_move(&rt_se->run_list, queue);
else
list_move_tail(&rt_se->run_list, queue);
} }
} }
static void requeue_task_rt(struct rq *rq, struct task_struct *p) static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
{ {
struct sched_rt_entity *rt_se = &p->rt; struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq; struct rt_rq *rt_rq;
for_each_sched_rt_entity(rt_se) { for_each_sched_rt_entity(rt_se) {
rt_rq = rt_rq_of_se(rt_se); rt_rq = rt_rq_of_se(rt_se);
requeue_rt_entity(rt_rq, rt_se); requeue_rt_entity(rt_rq, rt_se, head);
} }
} }
static void yield_task_rt(struct rq *rq) static void yield_task_rt(struct rq *rq)
{ {
requeue_task_rt(rq, rq->curr); requeue_task_rt(rq, rq->curr, 0);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -753,6 +751,30 @@ static int select_task_rq_rt(struct task_struct *p, int sync) ...@@ -753,6 +751,30 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
*/ */
return task_cpu(p); return task_cpu(p);
} }
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
cpumask_t mask;
if (rq->curr->rt.nr_cpus_allowed == 1)
return;
if (p->rt.nr_cpus_allowed != 1
&& cpupri_find(&rq->rd->cpupri, p, &mask))
return;
if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
return;
/*
* There appears to be other cpus that can accept
* current and none to run 'p', so lets reschedule
* to try and push current away:
*/
requeue_task_rt(rq, p, 1);
resched_task(rq->curr);
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* /*
...@@ -778,18 +800,8 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) ...@@ -778,18 +800,8 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
* to move current somewhere else, making room for our non-migratable * to move current somewhere else, making room for our non-migratable
* task. * task.
*/ */
if((p->prio == rq->curr->prio) if (p->prio == rq->curr->prio && !need_resched())
&& p->rt.nr_cpus_allowed == 1 check_preempt_equal_prio(rq, p);
&& rq->curr->rt.nr_cpus_allowed != 1) {
cpumask_t mask;
if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
/*
* There appears to be other cpus that can accept
* current, so lets reschedule to try and push it away
*/
resched_task(rq->curr);
}
#endif #endif
} }
...@@ -1415,7 +1427,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) ...@@ -1415,7 +1427,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
* on the queue: * on the queue:
*/ */
if (p->rt.run_list.prev != p->rt.run_list.next) { if (p->rt.run_list.prev != p->rt.run_list.next) {
requeue_task_rt(rq, p); requeue_task_rt(rq, p, 0);
set_tsk_need_resched(p); set_tsk_need_resched(p);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment