Commit b6a86c74 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: fix sched_domain aggregation

Keeping the aggregate on the first cpu of the sched domain has two problems:
 - it could collide between different sched domains on different cpus
 - it could slow things down because of the remote accesses
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 32df2ee8
...@@ -765,7 +765,6 @@ struct sched_domain { ...@@ -765,7 +765,6 @@ struct sched_domain {
struct sched_domain *child; /* bottom domain must be null terminated */ struct sched_domain *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */ struct sched_group *groups; /* the balancing groups of the domain */
cpumask_t span; /* span of all CPUs in this domain */ cpumask_t span; /* span of all CPUs in this domain */
int first_cpu; /* cache of the first cpu in this domain */
unsigned long min_interval; /* Minimum balance interval ms */ unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */ unsigned long max_interval; /* Maximum balance interval ms */
unsigned int busy_factor; /* less balancing by factor if busy */ unsigned int busy_factor; /* less balancing by factor if busy */
......
This diff is collapsed.
...@@ -1429,11 +1429,11 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, ...@@ -1429,11 +1429,11 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
/* /*
* empty group * empty group
*/ */
if (!aggregate(tg, sd)->task_weight) if (!aggregate(tg, this_cpu)->task_weight)
continue; continue;
rem_load = rem_load_move * aggregate(tg, sd)->rq_weight; rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight;
rem_load /= aggregate(tg, sd)->load + 1; rem_load /= aggregate(tg, this_cpu)->load + 1;
this_weight = tg->cfs_rq[this_cpu]->task_weight; this_weight = tg->cfs_rq[this_cpu]->task_weight;
busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight; busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight;
...@@ -1451,10 +1451,10 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, ...@@ -1451,10 +1451,10 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
if (!moved_load) if (!moved_load)
continue; continue;
move_group_shares(tg, sd, busiest_cpu, this_cpu); move_group_shares(tg, this_cpu, sd, busiest_cpu, this_cpu);
moved_load *= aggregate(tg, sd)->load; moved_load *= aggregate(tg, this_cpu)->load;
moved_load /= aggregate(tg, sd)->rq_weight + 1; moved_load /= aggregate(tg, this_cpu)->rq_weight + 1;
rem_load_move -= moved_load; rem_load_move -= moved_load;
if (rem_load_move < 0) if (rem_load_move < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment