Commit 9a897c5a authored by Steven Rostedt's avatar Steven Rostedt Committed by Ingo Molnar

sched: RT-balance, replace hooks with pre/post schedule and wakeup methods

To make the main sched.c code more agnostic to the schedule classes.
Instead of having specific hooks in the schedule code for the RT class
balancing. They are replaced with a pre_schedule, post_schedule
and task_wake_up methods. These methods may be used by any of the classes
but currently, only the sched_rt class implements them.
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 4bf0b771
...@@ -843,6 +843,9 @@ struct sched_class { ...@@ -843,6 +843,9 @@ struct sched_class {
int (*move_one_task) (struct rq *this_rq, int this_cpu, int (*move_one_task) (struct rq *this_rq, int this_cpu,
struct rq *busiest, struct sched_domain *sd, struct rq *busiest, struct sched_domain *sd,
enum cpu_idle_type idle); enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
#endif #endif
void (*set_curr_task) (struct rq *rq); void (*set_curr_task) (struct rq *rq);
......
...@@ -1625,7 +1625,10 @@ out_activate: ...@@ -1625,7 +1625,10 @@ out_activate:
out_running: out_running:
p->state = TASK_RUNNING; p->state = TASK_RUNNING;
wakeup_balance_rt(rq, p); #ifdef CONFIG_SMP
if (p->sched_class->task_wake_up)
p->sched_class->task_wake_up(rq, p);
#endif
out: out:
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
...@@ -1748,7 +1751,10 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) ...@@ -1748,7 +1751,10 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
inc_nr_running(p, rq); inc_nr_running(p, rq);
} }
check_preempt_curr(rq, p); check_preempt_curr(rq, p);
wakeup_balance_rt(rq, p); #ifdef CONFIG_SMP
if (p->sched_class->task_wake_up)
p->sched_class->task_wake_up(rq, p);
#endif
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
} }
...@@ -1869,7 +1875,10 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) ...@@ -1869,7 +1875,10 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
prev_state = prev->state; prev_state = prev->state;
finish_arch_switch(prev); finish_arch_switch(prev);
finish_lock_switch(rq, prev); finish_lock_switch(rq, prev);
schedule_tail_balance_rt(rq); #ifdef CONFIG_SMP
if (current->sched_class->post_schedule)
current->sched_class->post_schedule(rq);
#endif
fire_sched_in_preempt_notifiers(current); fire_sched_in_preempt_notifiers(current);
if (mm) if (mm)
...@@ -3638,7 +3647,10 @@ need_resched_nonpreemptible: ...@@ -3638,7 +3647,10 @@ need_resched_nonpreemptible:
switch_count = &prev->nvcsw; switch_count = &prev->nvcsw;
} }
schedule_balance_rt(rq, prev); #ifdef CONFIG_SMP
if (prev->sched_class->pre_schedule)
prev->sched_class->pre_schedule(rq, prev);
#endif
if (unlikely(!rq->nr_running)) if (unlikely(!rq->nr_running))
idle_balance(cpu, rq); idle_balance(cpu, rq);
......
...@@ -689,14 +689,14 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -689,14 +689,14 @@ static int pull_rt_task(struct rq *this_rq)
return ret; return ret;
} }
static void schedule_balance_rt(struct rq *rq, struct task_struct *prev) static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
{ {
/* Try to pull RT tasks here if we lower this rq's prio */ /* Try to pull RT tasks here if we lower this rq's prio */
if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
pull_rt_task(rq); pull_rt_task(rq);
} }
static void schedule_tail_balance_rt(struct rq *rq) static void post_schedule_rt(struct rq *rq)
{ {
/* /*
* If we have more than one rt_task queued, then * If we have more than one rt_task queued, then
...@@ -713,10 +713,9 @@ static void schedule_tail_balance_rt(struct rq *rq) ...@@ -713,10 +713,9 @@ static void schedule_tail_balance_rt(struct rq *rq)
} }
static void wakeup_balance_rt(struct rq *rq, struct task_struct *p) static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
{ {
if (unlikely(rt_task(p)) && if (!task_running(rq, p) &&
!task_running(rq, p) &&
(p->prio >= rq->rt.highest_prio) && (p->prio >= rq->rt.highest_prio) &&
rq->rt.overloaded) rq->rt.overloaded)
push_rt_tasks(rq); push_rt_tasks(rq);
...@@ -780,11 +779,6 @@ static void leave_domain_rt(struct rq *rq) ...@@ -780,11 +779,6 @@ static void leave_domain_rt(struct rq *rq)
if (rq->rt.overloaded) if (rq->rt.overloaded)
rt_clear_overload(rq); rt_clear_overload(rq);
} }
#else /* CONFIG_SMP */
# define schedule_tail_balance_rt(rq) do { } while (0)
# define schedule_balance_rt(rq, prev) do { } while (0)
# define wakeup_balance_rt(rq, p) do { } while (0)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
static void task_tick_rt(struct rq *rq, struct task_struct *p) static void task_tick_rt(struct rq *rq, struct task_struct *p)
...@@ -840,6 +834,9 @@ const struct sched_class rt_sched_class = { ...@@ -840,6 +834,9 @@ const struct sched_class rt_sched_class = {
.set_cpus_allowed = set_cpus_allowed_rt, .set_cpus_allowed = set_cpus_allowed_rt,
.join_domain = join_domain_rt, .join_domain = join_domain_rt,
.leave_domain = leave_domain_rt, .leave_domain = leave_domain_rt,
.pre_schedule = pre_schedule_rt,
.post_schedule = post_schedule_rt,
.task_wake_up = task_wake_up_rt,
#endif #endif
.set_curr_task = set_curr_task_rt, .set_curr_task = set_curr_task_rt,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment