Commit db818536 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  sched: fix style in kernel/sched.c
  sched: fix style of swap() macro in kernel/sched_fair.c
  sched: report CPU usage in CFS cgroup directories
  sched: move rcu_head to task_group struct
  sched: fix incorrect assumption that cpu 0 exists
  sched: keep utime/stime monotonic
  sched: make kernel/sched.c:account_guest_time() static
parents 6a22c57b 38605cae
...@@ -358,7 +358,8 @@ static cputime_t task_utime(struct task_struct *p) ...@@ -358,7 +358,8 @@ static cputime_t task_utime(struct task_struct *p)
} }
utime = (clock_t)temp; utime = (clock_t)temp;
return clock_t_to_cputime(utime); p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
return p->prev_utime;
} }
static cputime_t task_stime(struct task_struct *p) static cputime_t task_stime(struct task_struct *p)
......
...@@ -1009,6 +1009,7 @@ struct task_struct { ...@@ -1009,6 +1009,7 @@ struct task_struct {
unsigned int rt_priority; unsigned int rt_priority;
cputime_t utime, stime, utimescaled, stimescaled; cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime; cputime_t gtime;
cputime_t prev_utime;
unsigned long nvcsw, nivcsw; /* context switch counts */ unsigned long nvcsw, nivcsw; /* context switch counts */
struct timespec start_time; /* monotonic time */ struct timespec start_time; /* monotonic time */
struct timespec real_start_time; /* boot based time */ struct timespec real_start_time; /* boot based time */
......
...@@ -1056,6 +1056,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1056,6 +1056,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->gtime = cputime_zero; p->gtime = cputime_zero;
p->utimescaled = cputime_zero; p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero; p->stimescaled = cputime_zero;
p->prev_utime = cputime_zero;
#ifdef CONFIG_TASK_XACCT #ifdef CONFIG_TASK_XACCT
p->rchar = 0; /* I/O counter: bytes read */ p->rchar = 0; /* I/O counter: bytes read */
......
...@@ -172,6 +172,7 @@ struct task_group { ...@@ -172,6 +172,7 @@ struct task_group {
unsigned long shares; unsigned long shares;
/* spinlock to serialize modification to shares */ /* spinlock to serialize modification to shares */
spinlock_t lock; spinlock_t lock;
struct rcu_head rcu;
}; };
/* Default task group's sched entity on each cpu */ /* Default task group's sched entity on each cpu */
...@@ -258,7 +259,6 @@ struct cfs_rq { ...@@ -258,7 +259,6 @@ struct cfs_rq {
*/ */
struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
struct task_group *tg; /* group that "owns" this runqueue */ struct task_group *tg; /* group that "owns" this runqueue */
struct rcu_head rcu;
#endif #endif
}; };
...@@ -3355,7 +3355,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime) ...@@ -3355,7 +3355,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
* @p: the process that the cpu time gets accounted to * @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in virtual machine since the last update * @cputime: the cpu time spent in virtual machine since the last update
*/ */
void account_guest_time(struct task_struct *p, cputime_t cputime) static void account_guest_time(struct task_struct *p, cputime_t cputime)
{ {
cputime64_t tmp; cputime64_t tmp;
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
...@@ -5365,7 +5365,7 @@ static struct ctl_table sd_ctl_dir[] = { ...@@ -5365,7 +5365,7 @@ static struct ctl_table sd_ctl_dir[] = {
.procname = "sched_domain", .procname = "sched_domain",
.mode = 0555, .mode = 0555,
}, },
{0,}, {0, },
}; };
static struct ctl_table sd_ctl_root[] = { static struct ctl_table sd_ctl_root[] = {
...@@ -5375,7 +5375,7 @@ static struct ctl_table sd_ctl_root[] = { ...@@ -5375,7 +5375,7 @@ static struct ctl_table sd_ctl_root[] = {
.mode = 0555, .mode = 0555,
.child = sd_ctl_dir, .child = sd_ctl_dir,
}, },
{0,}, {0, },
}; };
static struct ctl_table *sd_alloc_ctl_entry(int n) static struct ctl_table *sd_alloc_ctl_entry(int n)
...@@ -7019,8 +7019,8 @@ err: ...@@ -7019,8 +7019,8 @@ err:
/* rcu callback to free various structures associated with a task group */ /* rcu callback to free various structures associated with a task group */
static void free_sched_group(struct rcu_head *rhp) static void free_sched_group(struct rcu_head *rhp)
{ {
struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu); struct task_group *tg = container_of(rhp, struct task_group, rcu);
struct task_group *tg = cfs_rq->tg; struct cfs_rq *cfs_rq;
struct sched_entity *se; struct sched_entity *se;
int i; int i;
...@@ -7041,7 +7041,7 @@ static void free_sched_group(struct rcu_head *rhp) ...@@ -7041,7 +7041,7 @@ static void free_sched_group(struct rcu_head *rhp)
/* Destroy runqueue etc associated with a task group */ /* Destroy runqueue etc associated with a task group */
void sched_destroy_group(struct task_group *tg) void sched_destroy_group(struct task_group *tg)
{ {
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq = NULL;
int i; int i;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
...@@ -7049,10 +7049,10 @@ void sched_destroy_group(struct task_group *tg) ...@@ -7049,10 +7049,10 @@ void sched_destroy_group(struct task_group *tg)
list_del_rcu(&cfs_rq->leaf_cfs_rq_list); list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
} }
cfs_rq = tg->cfs_rq[0]; BUG_ON(!cfs_rq);
/* wait for possible concurrent references to cfs_rqs complete */ /* wait for possible concurrent references to cfs_rqs complete */
call_rcu(&cfs_rq->rcu, free_sched_group); call_rcu(&tg->rcu, free_sched_group);
} }
/* change task's runqueue when it moves between groups. /* change task's runqueue when it moves between groups.
...@@ -7211,25 +7211,53 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft) ...@@ -7211,25 +7211,53 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
return (u64) tg->shares; return (u64) tg->shares;
} }
static struct cftype cpu_shares = { static u64 cpu_usage_read(struct cgroup *cgrp, struct cftype *cft)
.name = "shares", {
.read_uint = cpu_shares_read_uint, struct task_group *tg = cgroup_tg(cgrp);
.write_uint = cpu_shares_write_uint, unsigned long flags;
u64 res = 0;
int i;
for_each_possible_cpu(i) {
/*
* Lock to prevent races with updating 64-bit counters
* on 32-bit arches.
*/
spin_lock_irqsave(&cpu_rq(i)->lock, flags);
res += tg->se[i]->sum_exec_runtime;
spin_unlock_irqrestore(&cpu_rq(i)->lock, flags);
}
/* Convert from ns to ms */
do_div(res, 1000000);
return res;
}
static struct cftype cpu_files[] = {
{
.name = "shares",
.read_uint = cpu_shares_read_uint,
.write_uint = cpu_shares_write_uint,
},
{
.name = "usage",
.read_uint = cpu_usage_read,
},
}; };
static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
{ {
return cgroup_add_file(cont, ss, &cpu_shares); return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
} }
struct cgroup_subsys cpu_cgroup_subsys = { struct cgroup_subsys cpu_cgroup_subsys = {
.name = "cpu", .name = "cpu",
.create = cpu_cgroup_create, .create = cpu_cgroup_create,
.destroy = cpu_cgroup_destroy, .destroy = cpu_cgroup_destroy,
.can_attach = cpu_cgroup_can_attach, .can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach, .attach = cpu_cgroup_attach,
.populate = cpu_cgroup_populate, .populate = cpu_cgroup_populate,
.subsys_id = cpu_cgroup_subsys_id, .subsys_id = cpu_cgroup_subsys_id,
.early_init = 1, .early_init = 1,
}; };
......
...@@ -1025,7 +1025,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr) ...@@ -1025,7 +1025,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
} }
} }
#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0) #define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
/* /*
* Share the fairness runtime between parent and child, thus the * Share the fairness runtime between parent and child, thus the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment