Commit 0eab9146 authored by Ingo Molnar's avatar Ingo Molnar

sched: style cleanup, #2

style cleanup of various changes that were done recently.

no code changed:

      text    data     bss     dec     hex filename
     26399    2578      48   29025    7161 sched.o.before
     26399    2578      48   29025    7161 sched.o.after
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent d7876a08
...@@ -235,17 +235,17 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares); ...@@ -235,17 +235,17 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares);
* Every task in system belong to this group at bootup. * Every task in system belong to this group at bootup.
*/ */
struct task_group init_task_group = { struct task_group init_task_group = {
.se = init_sched_entity_p, .se = init_sched_entity_p,
.cfs_rq = init_cfs_rq_p, .cfs_rq = init_cfs_rq_p,
}; };
#ifdef CONFIG_FAIR_USER_SCHED #ifdef CONFIG_FAIR_USER_SCHED
# define INIT_TASK_GROUP_LOAD 2*NICE_0_LOAD # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
#else #else
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD # define INIT_TASK_GROUP_LOAD NICE_0_LOAD
#endif #endif
#define MIN_GROUP_SHARES 2 #define MIN_GROUP_SHARES 2
static int init_task_group_load = INIT_TASK_GROUP_LOAD; static int init_task_group_load = INIT_TASK_GROUP_LOAD;
...@@ -352,8 +352,8 @@ struct rt_rq { ...@@ -352,8 +352,8 @@ struct rt_rq {
/* /*
* We add the notion of a root-domain which will be used to define per-domain * We add the notion of a root-domain which will be used to define per-domain
* variables. Each exclusive cpuset essentially defines an island domain by * variables. Each exclusive cpuset essentially defines an island domain by
* fully partitioning the member cpus from any other cpuset. Whenever a new * fully partitioning the member cpus from any other cpuset. Whenever a new
* exclusive cpuset is created, we also create and attach a new root-domain * exclusive cpuset is created, we also create and attach a new root-domain
* object. * object.
* *
...@@ -365,12 +365,12 @@ struct root_domain { ...@@ -365,12 +365,12 @@ struct root_domain {
cpumask_t span; cpumask_t span;
cpumask_t online; cpumask_t online;
/* /*
* The "RT overload" flag: it gets set if a CPU has more than * The "RT overload" flag: it gets set if a CPU has more than
* one runnable RT task. * one runnable RT task.
*/ */
cpumask_t rto_mask; cpumask_t rto_mask;
atomic_t rto_count; atomic_t rto_count;
}; };
static struct root_domain def_root_domain; static struct root_domain def_root_domain;
...@@ -434,7 +434,7 @@ struct rq { ...@@ -434,7 +434,7 @@ struct rq {
atomic_t nr_iowait; atomic_t nr_iowait;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
struct root_domain *rd; struct root_domain *rd;
struct sched_domain *sd; struct sched_domain *sd;
/* For active balancing */ /* For active balancing */
...@@ -5066,7 +5066,7 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) ...@@ -5066,7 +5066,7 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
if (p->sched_class->set_cpus_allowed) if (p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, &new_mask); p->sched_class->set_cpus_allowed(p, &new_mask);
else { else {
p->cpus_allowed = new_mask; p->cpus_allowed = new_mask;
p->nr_cpus_allowed = cpus_weight(new_mask); p->nr_cpus_allowed = cpus_weight(new_mask);
} }
...@@ -5847,9 +5847,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) ...@@ -5847,9 +5847,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
if (rq->rd) { if (rq->rd) {
struct root_domain *old_rd = rq->rd; struct root_domain *old_rd = rq->rd;
for (class = sched_class_highest; class; class = class->next) for (class = sched_class_highest; class; class = class->next) {
if (class->leave_domain) if (class->leave_domain)
class->leave_domain(rq); class->leave_domain(rq);
}
if (atomic_dec_and_test(&old_rd->refcount)) if (atomic_dec_and_test(&old_rd->refcount))
kfree(old_rd); kfree(old_rd);
...@@ -5858,9 +5859,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) ...@@ -5858,9 +5859,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
atomic_inc(&rd->refcount); atomic_inc(&rd->refcount);
rq->rd = rd; rq->rd = rd;
for (class = sched_class_highest; class; class = class->next) for (class = sched_class_highest; class; class = class->next) {
if (class->join_domain) if (class->join_domain)
class->join_domain(rq); class->join_domain(rq);
}
spin_unlock_irqrestore(&rq->lock, flags); spin_unlock_irqrestore(&rq->lock, flags);
} }
...@@ -5895,11 +5897,11 @@ static struct root_domain *alloc_rootdomain(const cpumask_t *map) ...@@ -5895,11 +5897,11 @@ static struct root_domain *alloc_rootdomain(const cpumask_t *map)
} }
/* /*
* Attach the domain 'sd' to 'cpu' as its base domain. Callers must * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* hold the hotplug lock. * hold the hotplug lock.
*/ */
static void cpu_attach_domain(struct sched_domain *sd, static void
struct root_domain *rd, int cpu) cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp; struct sched_domain *tmp;
...@@ -7095,7 +7097,7 @@ static int rebalance_shares(struct sched_domain *sd, int this_cpu) ...@@ -7095,7 +7097,7 @@ static int rebalance_shares(struct sched_domain *sd, int this_cpu)
for_each_cpu_mask(i, sdspan) for_each_cpu_mask(i, sdspan)
total_load += tg->cfs_rq[i]->load.weight; total_load += tg->cfs_rq[i]->load.weight;
/* Nothing to do if this group has no load */ /* Nothing to do if this group has no load */
if (!total_load) if (!total_load)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment