Commit 54e840dd authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  sched: reduce schedstat variable overhead a bit
  sched: add KERN_CONT annotation
  sched: cleanup, make struct rq comments more consistent
  sched: cleanup, fix spacing
  sched: fix return value of wait_for_completion_interruptible()
parents 32c15bb9 480b9434
...@@ -569,7 +569,7 @@ struct sched_info { ...@@ -569,7 +569,7 @@ struct sched_info {
last_queued; /* when we were last queued to run */ last_queued; /* when we were last queued to run */
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
/* BKL stats */ /* BKL stats */
unsigned long bkl_count; unsigned int bkl_count;
#endif #endif
}; };
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
...@@ -705,34 +705,34 @@ struct sched_domain { ...@@ -705,34 +705,34 @@ struct sched_domain {
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */ /* load_balance() stats */
unsigned long lb_count[CPU_MAX_IDLE_TYPES]; unsigned int lb_count[CPU_MAX_IDLE_TYPES];
unsigned long lb_failed[CPU_MAX_IDLE_TYPES]; unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
unsigned long lb_balanced[CPU_MAX_IDLE_TYPES]; unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES]; unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
unsigned long lb_gained[CPU_MAX_IDLE_TYPES]; unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES]; unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES]; unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES]; unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
/* Active load balancing */ /* Active load balancing */
unsigned long alb_count; unsigned int alb_count;
unsigned long alb_failed; unsigned int alb_failed;
unsigned long alb_pushed; unsigned int alb_pushed;
/* SD_BALANCE_EXEC stats */ /* SD_BALANCE_EXEC stats */
unsigned long sbe_count; unsigned int sbe_count;
unsigned long sbe_balanced; unsigned int sbe_balanced;
unsigned long sbe_pushed; unsigned int sbe_pushed;
/* SD_BALANCE_FORK stats */ /* SD_BALANCE_FORK stats */
unsigned long sbf_count; unsigned int sbf_count;
unsigned long sbf_balanced; unsigned int sbf_balanced;
unsigned long sbf_pushed; unsigned int sbf_pushed;
/* try_to_wake_up() stats */ /* try_to_wake_up() stats */
unsigned long ttwu_wake_remote; unsigned int ttwu_wake_remote;
unsigned long ttwu_move_affine; unsigned int ttwu_move_affine;
unsigned long ttwu_move_balance; unsigned int ttwu_move_balance;
#endif #endif
}; };
......
...@@ -266,7 +266,8 @@ struct rt_rq { ...@@ -266,7 +266,8 @@ struct rt_rq {
* acquire operations must be ordered by ascending &runqueue. * acquire operations must be ordered by ascending &runqueue.
*/ */
struct rq { struct rq {
spinlock_t lock; /* runqueue lock */ /* runqueue lock: */
spinlock_t lock;
/* /*
* nr_running and cpu_load should be in the same cacheline because * nr_running and cpu_load should be in the same cacheline because
...@@ -279,13 +280,15 @@ struct rq { ...@@ -279,13 +280,15 @@ struct rq {
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
unsigned char in_nohz_recently; unsigned char in_nohz_recently;
#endif #endif
struct load_weight load; /* capture load from *all* tasks on this cpu */ /* capture load from *all* tasks on this cpu: */
struct load_weight load;
unsigned long nr_load_updates; unsigned long nr_load_updates;
u64 nr_switches; u64 nr_switches;
struct cfs_rq cfs; struct cfs_rq cfs;
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */ /* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
#endif #endif
struct rt_rq rt; struct rt_rq rt;
...@@ -317,7 +320,8 @@ struct rq { ...@@ -317,7 +320,8 @@ struct rq {
/* For active balancing */ /* For active balancing */
int active_balance; int active_balance;
int push_cpu; int push_cpu;
int cpu; /* cpu of this runqueue */ /* cpu of this runqueue: */
int cpu;
struct task_struct *migration_thread; struct task_struct *migration_thread;
struct list_head migration_queue; struct list_head migration_queue;
...@@ -328,22 +332,22 @@ struct rq { ...@@ -328,22 +332,22 @@ struct rq {
struct sched_info rq_sched_info; struct sched_info rq_sched_info;
/* sys_sched_yield() stats */ /* sys_sched_yield() stats */
unsigned long yld_exp_empty; unsigned int yld_exp_empty;
unsigned long yld_act_empty; unsigned int yld_act_empty;
unsigned long yld_both_empty; unsigned int yld_both_empty;
unsigned long yld_count; unsigned int yld_count;
/* schedule() stats */ /* schedule() stats */
unsigned long sched_switch; unsigned int sched_switch;
unsigned long sched_count; unsigned int sched_count;
unsigned long sched_goidle; unsigned int sched_goidle;
/* try_to_wake_up() stats */ /* try_to_wake_up() stats */
unsigned long ttwu_count; unsigned int ttwu_count;
unsigned long ttwu_local; unsigned int ttwu_local;
/* BKL stats */ /* BKL stats */
unsigned long bkl_count; unsigned int bkl_count;
#endif #endif
struct lock_class_key rq_lock_key; struct lock_class_key rq_lock_key;
}; };
...@@ -449,12 +453,12 @@ enum { ...@@ -449,12 +453,12 @@ enum {
}; };
const_debug unsigned int sysctl_sched_features = const_debug unsigned int sysctl_sched_features =
SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
SCHED_FEAT_START_DEBIT *1 | SCHED_FEAT_START_DEBIT * 1 |
SCHED_FEAT_TREE_AVG *0 | SCHED_FEAT_TREE_AVG * 0 |
SCHED_FEAT_APPROX_AVG *0 | SCHED_FEAT_APPROX_AVG * 0 |
SCHED_FEAT_WAKEUP_PREEMPT *1 | SCHED_FEAT_WAKEUP_PREEMPT * 1 |
SCHED_FEAT_PREEMPT_RESTRICT *1; SCHED_FEAT_PREEMPT_RESTRICT * 1;
#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
...@@ -3880,7 +3884,10 @@ EXPORT_SYMBOL(wait_for_completion_timeout); ...@@ -3880,7 +3884,10 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
int __sched wait_for_completion_interruptible(struct completion *x) int __sched wait_for_completion_interruptible(struct completion *x)
{ {
return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
if (t == -ERESTARTSYS)
return t;
return 0;
} }
EXPORT_SYMBOL(wait_for_completion_interruptible); EXPORT_SYMBOL(wait_for_completion_interruptible);
...@@ -4815,18 +4822,18 @@ static void show_task(struct task_struct *p) ...@@ -4815,18 +4822,18 @@ static void show_task(struct task_struct *p)
unsigned state; unsigned state;
state = p->state ? __ffs(p->state) + 1 : 0; state = p->state ? __ffs(p->state) + 1 : 0;
printk("%-13.13s %c", p->comm, printk(KERN_INFO "%-13.13s %c", p->comm,
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
#if BITS_PER_LONG == 32 #if BITS_PER_LONG == 32
if (state == TASK_RUNNING) if (state == TASK_RUNNING)
printk(" running "); printk(KERN_CONT " running ");
else else
printk(" %08lx ", thread_saved_pc(p)); printk(KERN_CONT " %08lx ", thread_saved_pc(p));
#else #else
if (state == TASK_RUNNING) if (state == TASK_RUNNING)
printk(" running task "); printk(KERN_CONT " running task ");
else else
printk(" %016lx ", thread_saved_pc(p)); printk(KERN_CONT " %016lx ", thread_saved_pc(p));
#endif #endif
#ifdef CONFIG_DEBUG_STACK_USAGE #ifdef CONFIG_DEBUG_STACK_USAGE
{ {
...@@ -4836,7 +4843,7 @@ static void show_task(struct task_struct *p) ...@@ -4836,7 +4843,7 @@ static void show_task(struct task_struct *p)
free = (unsigned long)n - (unsigned long)end_of_stack(p); free = (unsigned long)n - (unsigned long)end_of_stack(p);
} }
#endif #endif
printk("%5lu %5d %6d\n", free, p->pid, p->parent->pid); printk(KERN_CONT "%5lu %5d %6d\n", free, p->pid, p->parent->pid);
if (state != TASK_RUNNING) if (state != TASK_RUNNING)
show_stack(p, NULL); show_stack(p, NULL);
...@@ -5385,7 +5392,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) ...@@ -5385,7 +5392,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
return table; return table;
} }
static ctl_table *sd_alloc_ctl_cpu_table(int cpu) static ctl_table * sd_alloc_ctl_cpu_table(int cpu)
{ {
struct ctl_table *entry, *table; struct ctl_table *entry, *table;
struct sched_domain *sd; struct sched_domain *sd;
...@@ -5619,20 +5626,20 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) ...@@ -5619,20 +5626,20 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
} }
if (!group->__cpu_power) { if (!group->__cpu_power) {
printk("\n"); printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not " printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n"); "set\n");
break; break;
} }
if (!cpus_weight(group->cpumask)) { if (!cpus_weight(group->cpumask)) {
printk("\n"); printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: empty group\n"); printk(KERN_ERR "ERROR: empty group\n");
break; break;
} }
if (cpus_intersects(groupmask, group->cpumask)) { if (cpus_intersects(groupmask, group->cpumask)) {
printk("\n"); printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: repeated CPUs\n"); printk(KERN_ERR "ERROR: repeated CPUs\n");
break; break;
} }
...@@ -5640,11 +5647,11 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) ...@@ -5640,11 +5647,11 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
cpus_or(groupmask, groupmask, group->cpumask); cpus_or(groupmask, groupmask, group->cpumask);
cpumask_scnprintf(str, NR_CPUS, group->cpumask); cpumask_scnprintf(str, NR_CPUS, group->cpumask);
printk(" %s", str); printk(KERN_CONT " %s", str);
group = group->next; group = group->next;
} while (group != sd->groups); } while (group != sd->groups);
printk("\n"); printk(KERN_CONT "\n");
if (!cpus_equal(sd->span, groupmask)) if (!cpus_equal(sd->span, groupmask))
printk(KERN_ERR "ERROR: groups don't span " printk(KERN_ERR "ERROR: groups don't span "
......
...@@ -137,7 +137,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) ...@@ -137,7 +137,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
SEQ_printf(m, " .%-30s: %ld\n", "bkl_count", SEQ_printf(m, " .%-30s: %d\n", "bkl_count",
rq->bkl_count); rq->bkl_count);
#endif #endif
SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over", SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
......
...@@ -21,7 +21,7 @@ static int show_schedstat(struct seq_file *seq, void *v) ...@@ -21,7 +21,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
/* runqueue-specific stats */ /* runqueue-specific stats */
seq_printf(seq, seq_printf(seq,
"cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu", "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
cpu, rq->yld_both_empty, cpu, rq->yld_both_empty,
rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
rq->sched_switch, rq->sched_count, rq->sched_goidle, rq->sched_switch, rq->sched_count, rq->sched_goidle,
...@@ -42,8 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v) ...@@ -42,8 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
seq_printf(seq, "domain%d %s", dcount++, mask_str); seq_printf(seq, "domain%d %s", dcount++, mask_str);
for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
itype++) { itype++) {
seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu " seq_printf(seq, " %u %u %u %u %u %u %u %u",
"%lu",
sd->lb_count[itype], sd->lb_count[itype],
sd->lb_balanced[itype], sd->lb_balanced[itype],
sd->lb_failed[itype], sd->lb_failed[itype],
...@@ -53,8 +52,7 @@ static int show_schedstat(struct seq_file *seq, void *v) ...@@ -53,8 +52,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
sd->lb_nobusyq[itype], sd->lb_nobusyq[itype],
sd->lb_nobusyg[itype]); sd->lb_nobusyg[itype]);
} }
seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu" seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n",
" %lu %lu %lu\n",
sd->alb_count, sd->alb_failed, sd->alb_pushed, sd->alb_count, sd->alb_failed, sd->alb_pushed,
sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment