Commit d281918d authored by Ingo Molnar's avatar Ingo Molnar

sched: remove 'now' use from assignments

change all 'now' timestamp uses in assignments to rq->clock.

( this is an identity transformation that causes no functionality change:
  all such new rq->clock is necessarily preceded by an update_rq_clock()
  call. )
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent eb594494
...@@ -788,8 +788,8 @@ static void update_curr_load(struct rq *rq, u64 now) ...@@ -788,8 +788,8 @@ static void update_curr_load(struct rq *rq, u64 now)
u64 start; u64 start;
start = ls->load_update_start; start = ls->load_update_start;
ls->load_update_start = now; ls->load_update_start = rq->clock;
ls->delta_stat += now - start; ls->delta_stat += rq->clock - start;
/* /*
* Stagger updates to ls->delta_fair. Very frequent updates * Stagger updates to ls->delta_fair. Very frequent updates
* can be expensive. * can be expensive.
...@@ -1979,8 +1979,8 @@ static void update_cpu_load(struct rq *this_rq) ...@@ -1979,8 +1979,8 @@ static void update_cpu_load(struct rq *this_rq)
exec_delta64 = ls->delta_exec + 1; exec_delta64 = ls->delta_exec + 1;
ls->delta_exec = 0; ls->delta_exec = 0;
sample_interval64 = now - ls->load_update_last; sample_interval64 = this_rq->clock - ls->load_update_last;
ls->load_update_last = now; ls->load_update_last = this_rq->clock;
if ((s64)sample_interval64 < (s64)TICK_NSEC) if ((s64)sample_interval64 < (s64)TICK_NSEC)
sample_interval64 = TICK_NSEC; sample_interval64 = TICK_NSEC;
......
...@@ -333,7 +333,7 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now) ...@@ -333,7 +333,7 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now)
* since the last time we changed load (this cannot * since the last time we changed load (this cannot
* overflow on 32 bits): * overflow on 32 bits):
*/ */
delta_exec = (unsigned long)(now - curr->exec_start); delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start);
curr->delta_exec += delta_exec; curr->delta_exec += delta_exec;
...@@ -341,14 +341,14 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now) ...@@ -341,14 +341,14 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now)
__update_curr(cfs_rq, curr, now); __update_curr(cfs_rq, curr, now);
curr->delta_exec = 0; curr->delta_exec = 0;
} }
curr->exec_start = now; curr->exec_start = rq_of(cfs_rq)->clock;
} }
static inline void static inline void
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
{ {
se->wait_start_fair = cfs_rq->fair_clock; se->wait_start_fair = cfs_rq->fair_clock;
schedstat_set(se->wait_start, now); schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
} }
/* /*
...@@ -421,7 +421,8 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) ...@@ -421,7 +421,8 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
{ {
unsigned long delta_fair = se->delta_fair_run; unsigned long delta_fair = se->delta_fair_run;
schedstat_set(se->wait_max, max(se->wait_max, now - se->wait_start)); schedstat_set(se->wait_max, max(se->wait_max,
rq_of(cfs_rq)->clock - se->wait_start));
if (unlikely(se->load.weight != NICE_0_LOAD)) if (unlikely(se->load.weight != NICE_0_LOAD))
delta_fair = calc_weighted(delta_fair, se->load.weight, delta_fair = calc_weighted(delta_fair, se->load.weight,
...@@ -470,7 +471,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) ...@@ -470,7 +471,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
/* /*
* We are starting a new run period: * We are starting a new run period:
*/ */
se->exec_start = now; se->exec_start = rq_of(cfs_rq)->clock;
} }
/* /*
...@@ -545,7 +546,7 @@ enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) ...@@ -545,7 +546,7 @@ enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
if (se->sleep_start) { if (se->sleep_start) {
u64 delta = now - se->sleep_start; u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
if ((s64)delta < 0) if ((s64)delta < 0)
delta = 0; delta = 0;
...@@ -557,7 +558,7 @@ enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) ...@@ -557,7 +558,7 @@ enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
se->sum_sleep_runtime += delta; se->sum_sleep_runtime += delta;
} }
if (se->block_start) { if (se->block_start) {
u64 delta = now - se->block_start; u64 delta = rq_of(cfs_rq)->clock - se->block_start;
if ((s64)delta < 0) if ((s64)delta < 0)
delta = 0; delta = 0;
...@@ -599,9 +600,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, ...@@ -599,9 +600,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
struct task_struct *tsk = task_of(se); struct task_struct *tsk = task_of(se);
if (tsk->state & TASK_INTERRUPTIBLE) if (tsk->state & TASK_INTERRUPTIBLE)
se->sleep_start = now; se->sleep_start = rq_of(cfs_rq)->clock;
if (tsk->state & TASK_UNINTERRUPTIBLE) if (tsk->state & TASK_UNINTERRUPTIBLE)
se->block_start = now; se->block_start = rq_of(cfs_rq)->clock;
} }
cfs_rq->wait_runtime -= se->wait_runtime; cfs_rq->wait_runtime -= se->wait_runtime;
#endif #endif
......
...@@ -15,14 +15,14 @@ static inline void update_curr_rt(struct rq *rq, u64 now) ...@@ -15,14 +15,14 @@ static inline void update_curr_rt(struct rq *rq, u64 now)
if (!task_has_rt_policy(curr)) if (!task_has_rt_policy(curr))
return; return;
delta_exec = now - curr->se.exec_start; delta_exec = rq->clock - curr->se.exec_start;
if (unlikely((s64)delta_exec < 0)) if (unlikely((s64)delta_exec < 0))
delta_exec = 0; delta_exec = 0;
schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec; curr->se.sum_exec_runtime += delta_exec;
curr->se.exec_start = now; curr->se.exec_start = rq->clock;
} }
static void static void
...@@ -89,7 +89,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now) ...@@ -89,7 +89,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now)
queue = array->queue + idx; queue = array->queue + idx;
next = list_entry(queue->next, struct task_struct, run_list); next = list_entry(queue->next, struct task_struct, run_list);
next->se.exec_start = now; next->se.exec_start = rq->clock;
return next; return next;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment