Commit 02e0431a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: better min_vruntime tracking

Better min_vruntime tracking: update it every time 'curr' is
updated - not just when a task is enqueued into the tree.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarMike Galbraith <efault@gmx.de>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 35a6ff54
...@@ -116,22 +116,28 @@ static inline struct task_struct *task_of(struct sched_entity *se) ...@@ -116,22 +116,28 @@ static inline struct task_struct *task_of(struct sched_entity *se)
* Scheduling class tree data structure manipulation methods: * Scheduling class tree data structure manipulation methods:
*/ */
static inline u64
max_vruntime(u64 min_vruntime, u64 vruntime)
{
if ((vruntime > min_vruntime) ||
(min_vruntime > (1ULL << 61) && vruntime < (1ULL << 50)))
min_vruntime = vruntime;
return min_vruntime;
}
static inline void static inline void
set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost) set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost)
{ {
struct sched_entity *se; struct sched_entity *se;
cfs_rq->rb_leftmost = leftmost; cfs_rq->rb_leftmost = leftmost;
if (leftmost) { if (leftmost)
se = rb_entry(leftmost, struct sched_entity, run_node); se = rb_entry(leftmost, struct sched_entity, run_node);
if ((se->vruntime > cfs_rq->min_vruntime) ||
(cfs_rq->min_vruntime > (1ULL << 61) &&
se->vruntime < (1ULL << 50)))
cfs_rq->min_vruntime = se->vruntime;
}
} }
s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) static inline s64
entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
return se->fair_key - cfs_rq->min_vruntime; return se->fair_key - cfs_rq->min_vruntime;
} }
...@@ -254,6 +260,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, ...@@ -254,6 +260,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
unsigned long delta_exec) unsigned long delta_exec)
{ {
unsigned long delta_exec_weighted; unsigned long delta_exec_weighted;
u64 next_vruntime, min_vruntime;
schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
...@@ -265,6 +272,25 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, ...@@ -265,6 +272,25 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
&curr->load); &curr->load);
} }
curr->vruntime += delta_exec_weighted; curr->vruntime += delta_exec_weighted;
/*
* maintain cfs_rq->min_vruntime to be a monotonic increasing
* value tracking the leftmost vruntime in the tree.
*/
if (first_fair(cfs_rq)) {
next_vruntime = __pick_next_entity(cfs_rq)->vruntime;
/* min_vruntime() := !max_vruntime() */
min_vruntime = max_vruntime(curr->vruntime, next_vruntime);
if (min_vruntime == next_vruntime)
min_vruntime = curr->vruntime;
else
min_vruntime = next_vruntime;
} else
min_vruntime = curr->vruntime;
cfs_rq->min_vruntime =
max_vruntime(cfs_rq->min_vruntime, min_vruntime);
} }
static void update_curr(struct cfs_rq *cfs_rq) static void update_curr(struct cfs_rq *cfs_rq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment