Commit 464b7527 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: re-instate vruntime based wakeup preemption

The advantage is that vruntime based wakeup preemption has a better
conceptual model. Here wakeup_gran = 0 means: preempt when 'fair'.
Therefore wakeup_gran is the granularity of unfairness we allow in order
to make progress.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarMike Galbraith <efault@gmx.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 0d13033b
...@@ -143,6 +143,49 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) ...@@ -143,6 +143,49 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
return se->parent; return se->parent;
} }
/* return depth at which a sched entity is present in the hierarchy */
static inline int depth_se(struct sched_entity *se)
{
int depth = 0;
for_each_sched_entity(se)
depth++;
return depth;
}
static void
find_matching_se(struct sched_entity **se, struct sched_entity **pse)
{
int se_depth, pse_depth;
/*
* preemption test can be made between sibling entities who are in the
* same cfs_rq i.e who have a common parent. Walk up the hierarchy of
* both tasks until we find their ancestors who are siblings of common
* parent.
*/
/* First walk up until both entities are at same depth */
se_depth = depth_se(*se);
pse_depth = depth_se(*pse);
while (se_depth > pse_depth) {
se_depth--;
*se = parent_entity(*se);
}
while (pse_depth > se_depth) {
pse_depth--;
*pse = parent_entity(*pse);
}
while (!is_same_group(*se, *pse)) {
*se = parent_entity(*se);
*pse = parent_entity(*pse);
}
}
#else /* CONFIG_FAIR_GROUP_SCHED */ #else /* CONFIG_FAIR_GROUP_SCHED */
static inline struct rq *rq_of(struct cfs_rq *cfs_rq) static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
...@@ -193,6 +236,11 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) ...@@ -193,6 +236,11 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
return NULL; return NULL;
} }
static inline void
find_matching_se(struct sched_entity **se, struct sched_entity **pse)
{
}
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
...@@ -1244,12 +1292,41 @@ static unsigned long wakeup_gran(struct sched_entity *se) ...@@ -1244,12 +1292,41 @@ static unsigned long wakeup_gran(struct sched_entity *se)
* More easily preempt - nice tasks, while not making it harder for * More easily preempt - nice tasks, while not making it harder for
* + nice tasks. * + nice tasks.
*/ */
if (sched_feat(ASYM_GRAN)) if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD)
gran = calc_delta_mine(gran, NICE_0_LOAD, &se->load); gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
return gran; return gran;
} }
/*
* Should 'se' preempt 'curr'.
*
* |s1
* |s2
* |s3
* g
* |<--->|c
*
* w(c, s1) = -1
* w(c, s2) = 0
* w(c, s3) = 1
*
*/
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
{
s64 gran, vdiff = curr->vruntime - se->vruntime;
if (vdiff <= 0)
return -1;
gran = wakeup_gran(curr);
if (vdiff > gran)
return 1;
return 0;
}
/* /*
* Preempt the current task with a newly woken task if needed: * Preempt the current task with a newly woken task if needed:
*/ */
...@@ -1258,7 +1335,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) ...@@ -1258,7 +1335,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
struct task_struct *curr = rq->curr; struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr); struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *se = &curr->se, *pse = &p->se; struct sched_entity *se = &curr->se, *pse = &p->se;
s64 delta_exec;
if (unlikely(rt_prio(p->prio))) { if (unlikely(rt_prio(p->prio))) {
update_rq_clock(rq); update_rq_clock(rq);
...@@ -1296,9 +1372,19 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) ...@@ -1296,9 +1372,19 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
return; return;
} }
delta_exec = se->sum_exec_runtime - se->prev_sum_exec_runtime; find_matching_se(&se, &pse);
if (delta_exec > wakeup_gran(pse))
resched_task(curr); while (se) {
BUG_ON(!pse);
if (wakeup_preempt_entity(se, pse) == 1) {
resched_task(curr);
break;
}
se = parent_entity(se);
pse = parent_entity(pse);
}
} }
static struct task_struct *pick_next_task_fair(struct rq *rq) static struct task_struct *pick_next_task_fair(struct rq *rq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment