Commit a25707f3 authored by Ingo Molnar's avatar Ingo Molnar

sched: remove precise CPU load

CPU load calculations are statistical anyway, and there's little benefit
from having it calculated on every scheduling event. So remove this code,
it gets rid of a divide from the scheduler wakeup and context-switch
fastpath.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarMike Galbraith <efault@gmx.de>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 8ebc91d9
......@@ -1972,42 +1972,11 @@ unsigned long nr_active(void)
*/
static void update_cpu_load(struct rq *this_rq)
{
u64 fair_delta64, exec_delta64, idle_delta64, sample_interval64, tmp64;
unsigned long total_load = this_rq->ls.load.weight;
unsigned long this_load = total_load;
struct load_stat *ls = &this_rq->ls;
int i, scale;
this_rq->nr_load_updates++;
if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
goto do_avg;
/* Update delta_fair/delta_exec fields first */
update_curr_load(this_rq);
fair_delta64 = ls->delta_fair + 1;
ls->delta_fair = 0;
exec_delta64 = ls->delta_exec + 1;
ls->delta_exec = 0;
sample_interval64 = this_rq->clock - ls->load_update_last;
ls->load_update_last = this_rq->clock;
if ((s64)sample_interval64 < (s64)TICK_NSEC)
sample_interval64 = TICK_NSEC;
if (exec_delta64 > sample_interval64)
exec_delta64 = sample_interval64;
idle_delta64 = sample_interval64 - exec_delta64;
tmp64 = div64_64(SCHED_LOAD_SCALE * exec_delta64, fair_delta64);
tmp64 = div64_64(tmp64 * exec_delta64, sample_interval64);
this_load = (unsigned long)tmp64;
do_avg:
/* Update our load: */
for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
......@@ -2017,7 +1986,13 @@ do_avg:
old_load = this_rq->cpu_load[i];
new_load = this_load;
/*
* Round up the averaging division if load is increasing. This
* prevents us from getting stuck on 9 if the load is 10, for
* example.
*/
if (new_load > old_load)
new_load += scale-1;
this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
}
}
......@@ -6484,7 +6459,6 @@ static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
void __init sched_init(void)
{
u64 now = sched_clock();
int highest_cpu = 0;
int i, j;
......@@ -6509,8 +6483,6 @@ void __init sched_init(void)
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
list_add(&rq->cfs.leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
#endif
rq->ls.load_update_last = now;
rq->ls.load_update_start = now;
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
rq->cpu_load[j] = 0;
......
......@@ -145,8 +145,6 @@ static void print_cpu(struct seq_file *m, int cpu)
P(nr_running);
SEQ_printf(m, " .%-30s: %lu\n", "load",
rq->ls.load.weight);
P(ls.delta_fair);
P(ls.delta_exec);
P(nr_switches);
P(nr_load_updates);
P(nr_uninterruptible);
......
......@@ -94,16 +94,14 @@ enum {
SCHED_FEAT_FAIR_SLEEPERS = 1,
SCHED_FEAT_SLEEPER_AVG = 2,
SCHED_FEAT_SLEEPER_LOAD_AVG = 4,
SCHED_FEAT_PRECISE_CPU_LOAD = 8,
SCHED_FEAT_START_DEBIT = 16,
SCHED_FEAT_SKIP_INITIAL = 32,
SCHED_FEAT_START_DEBIT = 8,
SCHED_FEAT_SKIP_INITIAL = 16,
};
const_debug unsigned int sysctl_sched_features =
SCHED_FEAT_FAIR_SLEEPERS *1 |
SCHED_FEAT_SLEEPER_AVG *0 |
SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
SCHED_FEAT_PRECISE_CPU_LOAD *1 |
SCHED_FEAT_START_DEBIT *1 |
SCHED_FEAT_SKIP_INITIAL *0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment