Commit fcd05809 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  sched: mark CONFIG_FAIR_GROUP_SCHED as !EXPERIMENTAL
  sched: isolate SMP balancing code a bit more
  sched: reduce balance-tasks overhead
  sched: make cpu_shares_{show,store}() static
  sched: clean up some control group code
  sched: constify sched.h
  sched: document profile=sleep requiring CONFIG_SCHEDSTATS
  sched: use show_regs() to improve __schedule_bug() output
  sched: clean up sched_domain_debug()
  sched: fix fastcall mismatch in completion APIs
  sched: fix sched_domain sysctl registration again
parents f9e83489 8ef93cf1
...@@ -1444,7 +1444,8 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -1444,7 +1444,8 @@ and is between 256 and 4096 characters. It is defined in the file
Param: "schedule" - profile schedule points. Param: "schedule" - profile schedule points.
Param: <number> - step/bucket size as a power of 2 for Param: <number> - step/bucket size as a power of 2 for
statistical time based profiling. statistical time based profiling.
Param: "sleep" - profile D-state sleeping (millisecs) Param: "sleep" - profile D-state sleeping (millisecs).
Requires CONFIG_SCHEDSTATS
Param: "kvm" - profile VM exits. Param: "kvm" - profile VM exits.
processor.max_cstate= [HW,ACPI] processor.max_cstate= [HW,ACPI]
......
...@@ -42,15 +42,15 @@ static inline void init_completion(struct completion *x) ...@@ -42,15 +42,15 @@ static inline void init_completion(struct completion *x)
init_waitqueue_head(&x->wait); init_waitqueue_head(&x->wait);
} }
extern void FASTCALL(wait_for_completion(struct completion *)); extern void wait_for_completion(struct completion *);
extern int FASTCALL(wait_for_completion_interruptible(struct completion *x)); extern int wait_for_completion_interruptible(struct completion *x);
extern unsigned long FASTCALL(wait_for_completion_timeout(struct completion *x, extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout)); unsigned long timeout);
extern unsigned long FASTCALL(wait_for_completion_interruptible_timeout( extern unsigned long wait_for_completion_interruptible_timeout(
struct completion *x, unsigned long timeout)); struct completion *x, unsigned long timeout);
extern void FASTCALL(complete(struct completion *)); extern void complete(struct completion *);
extern void FASTCALL(complete_all(struct completion *)); extern void complete_all(struct completion *);
#define INIT_COMPLETION(x) ((x).done = 0) #define INIT_COMPLETION(x) ((x).done = 0)
......
...@@ -828,12 +828,17 @@ struct sched_class { ...@@ -828,12 +828,17 @@ struct sched_class {
struct task_struct * (*pick_next_task) (struct rq *rq); struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p); void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
struct rq *busiest, struct rq *busiest, unsigned long max_load_move,
unsigned long max_nr_move, unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle, struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio); int *all_pinned, int *this_best_prio);
int (*move_one_task) (struct rq *this_rq, int this_cpu,
struct rq *busiest, struct sched_domain *sd,
enum cpu_idle_type idle);
#endif
void (*set_curr_task) (struct rq *rq); void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p); void (*task_tick) (struct rq *rq, struct task_struct *p);
void (*task_new) (struct rq *rq, struct task_struct *p); void (*task_new) (struct rq *rq, struct task_struct *p);
...@@ -1196,7 +1201,7 @@ static inline int rt_prio(int prio) ...@@ -1196,7 +1201,7 @@ static inline int rt_prio(int prio)
return 0; return 0;
} }
static inline int rt_task(struct task_struct *p) static inline int rt_task(const struct task_struct *p)
{ {
return rt_prio(p->prio); return rt_prio(p->prio);
} }
...@@ -1211,22 +1216,22 @@ static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp) ...@@ -1211,22 +1216,22 @@ static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
tsk->signal->__pgrp = pgrp; tsk->signal->__pgrp = pgrp;
} }
static inline struct pid *task_pid(struct task_struct *task) static inline struct pid *task_pid(const struct task_struct *task)
{ {
return task->pids[PIDTYPE_PID].pid; return task->pids[PIDTYPE_PID].pid;
} }
static inline struct pid *task_tgid(struct task_struct *task) static inline struct pid *task_tgid(const struct task_struct *task)
{ {
return task->group_leader->pids[PIDTYPE_PID].pid; return task->group_leader->pids[PIDTYPE_PID].pid;
} }
static inline struct pid *task_pgrp(struct task_struct *task) static inline struct pid *task_pgrp(const struct task_struct *task)
{ {
return task->group_leader->pids[PIDTYPE_PGID].pid; return task->group_leader->pids[PIDTYPE_PGID].pid;
} }
static inline struct pid *task_session(struct task_struct *task) static inline struct pid *task_session(const struct task_struct *task)
{ {
return task->group_leader->pids[PIDTYPE_SID].pid; return task->group_leader->pids[PIDTYPE_SID].pid;
} }
...@@ -1255,7 +1260,7 @@ struct pid_namespace; ...@@ -1255,7 +1260,7 @@ struct pid_namespace;
* see also pid_nr() etc in include/linux/pid.h * see also pid_nr() etc in include/linux/pid.h
*/ */
static inline pid_t task_pid_nr(struct task_struct *tsk) static inline pid_t task_pid_nr(const struct task_struct *tsk)
{ {
return tsk->pid; return tsk->pid;
} }
...@@ -1268,7 +1273,7 @@ static inline pid_t task_pid_vnr(struct task_struct *tsk) ...@@ -1268,7 +1273,7 @@ static inline pid_t task_pid_vnr(struct task_struct *tsk)
} }
static inline pid_t task_tgid_nr(struct task_struct *tsk) static inline pid_t task_tgid_nr(const struct task_struct *tsk)
{ {
return tsk->tgid; return tsk->tgid;
} }
...@@ -1281,7 +1286,7 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk) ...@@ -1281,7 +1286,7 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
} }
static inline pid_t task_pgrp_nr(struct task_struct *tsk) static inline pid_t task_pgrp_nr(const struct task_struct *tsk)
{ {
return tsk->signal->__pgrp; return tsk->signal->__pgrp;
} }
...@@ -1294,7 +1299,7 @@ static inline pid_t task_pgrp_vnr(struct task_struct *tsk) ...@@ -1294,7 +1299,7 @@ static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
} }
static inline pid_t task_session_nr(struct task_struct *tsk) static inline pid_t task_session_nr(const struct task_struct *tsk)
{ {
return tsk->signal->__session; return tsk->signal->__session;
} }
...@@ -1321,7 +1326,7 @@ static inline pid_t task_ppid_nr_ns(struct task_struct *tsk, ...@@ -1321,7 +1326,7 @@ static inline pid_t task_ppid_nr_ns(struct task_struct *tsk,
* If pid_alive fails, then pointers within the task structure * If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced. * can be stale and must not be dereferenced.
*/ */
static inline int pid_alive(struct task_struct *p) static inline int pid_alive(const struct task_struct *p)
{ {
return p->pids[PIDTYPE_PID].pid != NULL; return p->pids[PIDTYPE_PID].pid != NULL;
} }
...@@ -1332,7 +1337,7 @@ static inline int pid_alive(struct task_struct *p) ...@@ -1332,7 +1337,7 @@ static inline int pid_alive(struct task_struct *p)
* *
* Check if a task structure is the first user space task the kernel created. * Check if a task structure is the first user space task the kernel created.
*/ */
static inline int is_global_init(struct task_struct *tsk) static inline int is_global_init(const struct task_struct *tsk)
{ {
return tsk->pid == 1; return tsk->pid == 1;
} }
...@@ -1469,7 +1474,7 @@ extern int rt_mutex_getprio(struct task_struct *p); ...@@ -1469,7 +1474,7 @@ extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio); extern void rt_mutex_setprio(struct task_struct *p, int prio);
extern void rt_mutex_adjust_pi(struct task_struct *p); extern void rt_mutex_adjust_pi(struct task_struct *p);
#else #else
static inline int rt_mutex_getprio(struct task_struct *p) static inline int rt_mutex_getprio(const struct task_struct *p)
{ {
return p->normal_prio; return p->normal_prio;
} }
...@@ -1721,7 +1726,7 @@ extern void wait_task_inactive(struct task_struct * p); ...@@ -1721,7 +1726,7 @@ extern void wait_task_inactive(struct task_struct * p);
* all we care about is that we have a task with the appropriate * all we care about is that we have a task with the appropriate
* pid, we don't actually care if we have the right task. * pid, we don't actually care if we have the right task.
*/ */
static inline int has_group_leader_pid(struct task_struct *p) static inline int has_group_leader_pid(const struct task_struct *p)
{ {
return p->pid == p->tgid; return p->pid == p->tgid;
} }
...@@ -1738,7 +1743,7 @@ static inline struct task_struct *next_thread(const struct task_struct *p) ...@@ -1738,7 +1743,7 @@ static inline struct task_struct *next_thread(const struct task_struct *p)
struct task_struct, thread_group); struct task_struct, thread_group);
} }
static inline int thread_group_empty(struct task_struct *p) static inline int thread_group_empty(const struct task_struct *p)
{ {
return list_empty(&p->thread_group); return list_empty(&p->thread_group);
} }
......
...@@ -322,7 +322,6 @@ config CPUSETS ...@@ -322,7 +322,6 @@ config CPUSETS
config FAIR_GROUP_SCHED config FAIR_GROUP_SCHED
bool "Fair group CPU scheduler" bool "Fair group CPU scheduler"
default y default y
depends on EXPERIMENTAL
help help
This feature lets CPU scheduler recognize task groups and control CPU This feature lets CPU scheduler recognize task groups and control CPU
bandwidth allocation to such task groups. bandwidth allocation to such task groups.
......
...@@ -60,6 +60,7 @@ static int __init profile_setup(char * str) ...@@ -60,6 +60,7 @@ static int __init profile_setup(char * str)
int par; int par;
if (!strncmp(str, sleepstr, strlen(sleepstr))) { if (!strncmp(str, sleepstr, strlen(sleepstr))) {
#ifdef CONFIG_SCHEDSTATS
prof_on = SLEEP_PROFILING; prof_on = SLEEP_PROFILING;
if (str[strlen(sleepstr)] == ',') if (str[strlen(sleepstr)] == ',')
str += strlen(sleepstr) + 1; str += strlen(sleepstr) + 1;
...@@ -68,6 +69,10 @@ static int __init profile_setup(char * str) ...@@ -68,6 +69,10 @@ static int __init profile_setup(char * str)
printk(KERN_INFO printk(KERN_INFO
"kernel sleep profiling enabled (shift: %ld)\n", "kernel sleep profiling enabled (shift: %ld)\n",
prof_shift); prof_shift);
#else
printk(KERN_WARNING
"kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
#endif /* CONFIG_SCHEDSTATS */
} else if (!strncmp(str, schedstr, strlen(schedstr))) { } else if (!strncmp(str, schedstr, strlen(schedstr))) {
prof_on = SCHED_PROFILING; prof_on = SCHED_PROFILING;
if (str[strlen(schedstr)] == ',') if (str[strlen(schedstr)] == ',')
......
This diff is collapsed.
...@@ -876,6 +876,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) ...@@ -876,6 +876,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
} }
} }
#ifdef CONFIG_SMP
/************************************************** /**************************************************
* Fair scheduling class load-balancing methods: * Fair scheduling class load-balancing methods:
*/ */
...@@ -936,12 +937,11 @@ static int cfs_rq_best_prio(struct cfs_rq *cfs_rq) ...@@ -936,12 +937,11 @@ static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
static unsigned long static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_nr_move, unsigned long max_load_move, unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle, struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio) int *all_pinned, int *this_best_prio)
{ {
struct cfs_rq *busy_cfs_rq; struct cfs_rq *busy_cfs_rq;
unsigned long load_moved, total_nr_moved = 0, nr_moved;
long rem_load_move = max_load_move; long rem_load_move = max_load_move;
struct rq_iterator cfs_rq_iterator; struct rq_iterator cfs_rq_iterator;
...@@ -969,25 +969,48 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, ...@@ -969,25 +969,48 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
#else #else
# define maxload rem_load_move # define maxload rem_load_move
#endif #endif
/* pass busy_cfs_rq argument into /*
* pass busy_cfs_rq argument into
* load_balance_[start|next]_fair iterators * load_balance_[start|next]_fair iterators
*/ */
cfs_rq_iterator.arg = busy_cfs_rq; cfs_rq_iterator.arg = busy_cfs_rq;
nr_moved = balance_tasks(this_rq, this_cpu, busiest, rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
max_nr_move, maxload, sd, idle, all_pinned, maxload, sd, idle, all_pinned,
&load_moved, this_best_prio, &cfs_rq_iterator); this_best_prio,
&cfs_rq_iterator);
total_nr_moved += nr_moved;
max_nr_move -= nr_moved;
rem_load_move -= load_moved;
if (max_nr_move <= 0 || rem_load_move <= 0) if (rem_load_move <= 0)
break; break;
} }
return max_load_move - rem_load_move; return max_load_move - rem_load_move;
} }
static int
move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
struct cfs_rq *busy_cfs_rq;
struct rq_iterator cfs_rq_iterator;
cfs_rq_iterator.start = load_balance_start_fair;
cfs_rq_iterator.next = load_balance_next_fair;
for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
/*
* pass busy_cfs_rq argument into
* load_balance_[start|next]_fair iterators
*/
cfs_rq_iterator.arg = busy_cfs_rq;
if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
&cfs_rq_iterator))
return 1;
}
return 0;
}
#endif
/* /*
* scheduler tick hitting a task of our scheduling class: * scheduler tick hitting a task of our scheduling class:
*/ */
...@@ -1063,7 +1086,10 @@ static const struct sched_class fair_sched_class = { ...@@ -1063,7 +1086,10 @@ static const struct sched_class fair_sched_class = {
.pick_next_task = pick_next_task_fair, .pick_next_task = pick_next_task_fair,
.put_prev_task = put_prev_task_fair, .put_prev_task = put_prev_task_fair,
#ifdef CONFIG_SMP
.load_balance = load_balance_fair, .load_balance = load_balance_fair,
.move_one_task = move_one_task_fair,
#endif
.set_curr_task = set_curr_task_fair, .set_curr_task = set_curr_task_fair,
.task_tick = task_tick_fair, .task_tick = task_tick_fair,
......
...@@ -37,15 +37,24 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) ...@@ -37,15 +37,24 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{ {
} }
#ifdef CONFIG_SMP
static unsigned long static unsigned long
load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_nr_move, unsigned long max_load_move, unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle, struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio) int *all_pinned, int *this_best_prio)
{ {
return 0; return 0;
} }
static int
move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
return 0;
}
#endif
static void task_tick_idle(struct rq *rq, struct task_struct *curr) static void task_tick_idle(struct rq *rq, struct task_struct *curr)
{ {
} }
...@@ -69,7 +78,10 @@ const struct sched_class idle_sched_class = { ...@@ -69,7 +78,10 @@ const struct sched_class idle_sched_class = {
.pick_next_task = pick_next_task_idle, .pick_next_task = pick_next_task_idle,
.put_prev_task = put_prev_task_idle, .put_prev_task = put_prev_task_idle,
#ifdef CONFIG_SMP
.load_balance = load_balance_idle, .load_balance = load_balance_idle,
.move_one_task = move_one_task_idle,
#endif
.set_curr_task = set_curr_task_idle, .set_curr_task = set_curr_task_idle,
.task_tick = task_tick_idle, .task_tick = task_tick_idle,
......
...@@ -98,6 +98,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) ...@@ -98,6 +98,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
p->se.exec_start = 0; p->se.exec_start = 0;
} }
#ifdef CONFIG_SMP
/* /*
* Load-balancing iterator. Note: while the runqueue stays locked * Load-balancing iterator. Note: while the runqueue stays locked
* during the whole iteration, the current task might be * during the whole iteration, the current task might be
...@@ -172,13 +173,11 @@ static struct task_struct *load_balance_next_rt(void *arg) ...@@ -172,13 +173,11 @@ static struct task_struct *load_balance_next_rt(void *arg)
static unsigned long static unsigned long
load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_nr_move, unsigned long max_load_move, unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle, struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio) int *all_pinned, int *this_best_prio)
{ {
int nr_moved;
struct rq_iterator rt_rq_iterator; struct rq_iterator rt_rq_iterator;
unsigned long load_moved;
rt_rq_iterator.start = load_balance_start_rt; rt_rq_iterator.start = load_balance_start_rt;
rt_rq_iterator.next = load_balance_next_rt; rt_rq_iterator.next = load_balance_next_rt;
...@@ -187,12 +186,24 @@ load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, ...@@ -187,12 +186,24 @@ load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
*/ */
rt_rq_iterator.arg = busiest; rt_rq_iterator.arg = busiest;
nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move, return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
max_load_move, sd, idle, all_pinned, &load_moved, idle, all_pinned, this_best_prio, &rt_rq_iterator);
this_best_prio, &rt_rq_iterator); }
static int
move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
struct rq_iterator rt_rq_iterator;
rt_rq_iterator.start = load_balance_start_rt;
rt_rq_iterator.next = load_balance_next_rt;
rt_rq_iterator.arg = busiest;
return load_moved; return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
&rt_rq_iterator);
} }
#endif
static void task_tick_rt(struct rq *rq, struct task_struct *p) static void task_tick_rt(struct rq *rq, struct task_struct *p)
{ {
...@@ -236,7 +247,10 @@ const struct sched_class rt_sched_class = { ...@@ -236,7 +247,10 @@ const struct sched_class rt_sched_class = {
.pick_next_task = pick_next_task_rt, .pick_next_task = pick_next_task_rt,
.put_prev_task = put_prev_task_rt, .put_prev_task = put_prev_task_rt,
#ifdef CONFIG_SMP
.load_balance = load_balance_rt, .load_balance = load_balance_rt,
.move_one_task = move_one_task_rt,
#endif
.set_curr_task = set_curr_task_rt, .set_curr_task = set_curr_task_rt,
.task_tick = task_tick_rt, .task_tick = task_tick_rt,
......
...@@ -129,7 +129,7 @@ static inline void uids_mutex_unlock(void) ...@@ -129,7 +129,7 @@ static inline void uids_mutex_unlock(void)
} }
/* return cpu shares held by the user */ /* return cpu shares held by the user */
ssize_t cpu_shares_show(struct kset *kset, char *buffer) static ssize_t cpu_shares_show(struct kset *kset, char *buffer)
{ {
struct user_struct *up = container_of(kset, struct user_struct, kset); struct user_struct *up = container_of(kset, struct user_struct, kset);
...@@ -137,7 +137,8 @@ ssize_t cpu_shares_show(struct kset *kset, char *buffer) ...@@ -137,7 +137,8 @@ ssize_t cpu_shares_show(struct kset *kset, char *buffer)
} }
/* modify cpu shares held by the user */ /* modify cpu shares held by the user */
ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size) static ssize_t cpu_shares_store(struct kset *kset, const char *buffer,
size_t size)
{ {
struct user_struct *up = container_of(kset, struct user_struct, kset); struct user_struct *up = container_of(kset, struct user_struct, kset);
unsigned long shares; unsigned long shares;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment