Commit 738ddd30 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  sched: run_rebalance_domains: s/SCHED_IDLE/CPU_IDLE/
  sched: fix sleeper bonus
  sched: make global code static
parents cc75b92d de0cf899
...@@ -41,8 +41,6 @@ extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr); ...@@ -41,8 +41,6 @@ extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr);
extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs); extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs); extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs);
extern struct sysdev_attribute attr_sched_mc_power_savings;
extern struct sysdev_attribute attr_sched_smt_power_savings;
extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
......
...@@ -3106,7 +3106,7 @@ static void run_rebalance_domains(struct softirq_action *h) ...@@ -3106,7 +3106,7 @@ static void run_rebalance_domains(struct softirq_action *h)
if (need_resched()) if (need_resched())
break; break;
rebalance_domains(balance_cpu, SCHED_IDLE); rebalance_domains(balance_cpu, CPU_IDLE);
rq = cpu_rq(balance_cpu); rq = cpu_rq(balance_cpu);
if (time_after(this_rq->next_balance, rq->next_balance)) if (time_after(this_rq->next_balance, rq->next_balance))
...@@ -6328,7 +6328,7 @@ int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) ...@@ -6328,7 +6328,7 @@ int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
} }
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
int arch_reinit_sched_domains(void) static int arch_reinit_sched_domains(void)
{ {
int err; int err;
...@@ -6357,24 +6357,6 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) ...@@ -6357,24 +6357,6 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
return ret ? ret : count; return ret ? ret : count;
} }
int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
{
int err = 0;
#ifdef CONFIG_SCHED_SMT
if (smt_capable())
err = sysfs_create_file(&cls->kset.kobj,
&attr_sched_smt_power_savings.attr);
#endif
#ifdef CONFIG_SCHED_MC
if (!err && mc_capable())
err = sysfs_create_file(&cls->kset.kobj,
&attr_sched_mc_power_savings.attr);
#endif
return err;
}
#endif
#ifdef CONFIG_SCHED_MC #ifdef CONFIG_SCHED_MC
static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
{ {
...@@ -6385,7 +6367,7 @@ static ssize_t sched_mc_power_savings_store(struct sys_device *dev, ...@@ -6385,7 +6367,7 @@ static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
{ {
return sched_power_savings_store(buf, count, 0); return sched_power_savings_store(buf, count, 0);
} }
SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
sched_mc_power_savings_store); sched_mc_power_savings_store);
#endif #endif
...@@ -6399,10 +6381,28 @@ static ssize_t sched_smt_power_savings_store(struct sys_device *dev, ...@@ -6399,10 +6381,28 @@ static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
{ {
return sched_power_savings_store(buf, count, 1); return sched_power_savings_store(buf, count, 1);
} }
SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
sched_smt_power_savings_store); sched_smt_power_savings_store);
#endif #endif
int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
{
int err = 0;
#ifdef CONFIG_SCHED_SMT
if (smt_capable())
err = sysfs_create_file(&cls->kset.kobj,
&attr_sched_smt_power_savings.attr);
#endif
#ifdef CONFIG_SCHED_MC
if (!err && mc_capable())
err = sysfs_create_file(&cls->kset.kobj,
&attr_sched_mc_power_savings.attr);
#endif
return err;
}
#endif
/* /*
* Force a reinitialization of the sched domains hierarchy. The domains * Force a reinitialization of the sched domains hierarchy. The domains
* and groups cannot be updated in place without racing with the balancing * and groups cannot be updated in place without racing with the balancing
......
...@@ -75,7 +75,7 @@ enum { ...@@ -75,7 +75,7 @@ enum {
unsigned int sysctl_sched_features __read_mostly = unsigned int sysctl_sched_features __read_mostly =
SCHED_FEAT_FAIR_SLEEPERS *1 | SCHED_FEAT_FAIR_SLEEPERS *1 |
SCHED_FEAT_SLEEPER_AVG *1 | SCHED_FEAT_SLEEPER_AVG *0 |
SCHED_FEAT_SLEEPER_LOAD_AVG *1 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
SCHED_FEAT_PRECISE_CPU_LOAD *1 | SCHED_FEAT_PRECISE_CPU_LOAD *1 |
SCHED_FEAT_START_DEBIT *1 | SCHED_FEAT_START_DEBIT *1 |
...@@ -304,11 +304,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) ...@@ -304,11 +304,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) { if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) {
delta = calc_delta_mine(cfs_rq->sleeper_bonus, delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec);
curr->load.weight, lw); delta = calc_delta_mine(delta, curr->load.weight, lw);
if (unlikely(delta > cfs_rq->sleeper_bonus)) delta = min((u64)delta, cfs_rq->sleeper_bonus);
delta = cfs_rq->sleeper_bonus;
cfs_rq->sleeper_bonus -= delta; cfs_rq->sleeper_bonus -= delta;
delta_mine -= delta; delta_mine -= delta;
} }
...@@ -521,6 +519,8 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -521,6 +519,8 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
* Track the amount of bonus we've given to sleepers: * Track the amount of bonus we've given to sleepers:
*/ */
cfs_rq->sleeper_bonus += delta_fair; cfs_rq->sleeper_bonus += delta_fair;
if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit;
schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment