Commit fcd05809 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  sched: mark CONFIG_FAIR_GROUP_SCHED as !EXPERIMENTAL
  sched: isolate SMP balancing code a bit more
  sched: reduce balance-tasks overhead
  sched: make cpu_shares_{show,store}() static
  sched: clean up some control group code
  sched: constify sched.h
  sched: document profile=sleep requiring CONFIG_SCHEDSTATS
  sched: use show_regs() to improve __schedule_bug() output
  sched: clean up sched_domain_debug()
  sched: fix fastcall mismatch in completion APIs
  sched: fix sched_domain sysctl registration again
parents f9e83489 8ef93cf1
......@@ -1444,7 +1444,8 @@ and is between 256 and 4096 characters. It is defined in the file
Param: "schedule" - profile schedule points.
Param: <number> - step/bucket size as a power of 2 for
statistical time based profiling.
Param: "sleep" - profile D-state sleeping (millisecs)
Param: "sleep" - profile D-state sleeping (millisecs).
Requires CONFIG_SCHEDSTATS
Param: "kvm" - profile VM exits.
processor.max_cstate= [HW,ACPI]
......
......@@ -42,15 +42,15 @@ static inline void init_completion(struct completion *x)
init_waitqueue_head(&x->wait);
}
extern void FASTCALL(wait_for_completion(struct completion *));
extern int FASTCALL(wait_for_completion_interruptible(struct completion *x));
extern unsigned long FASTCALL(wait_for_completion_timeout(struct completion *x,
unsigned long timeout));
extern unsigned long FASTCALL(wait_for_completion_interruptible_timeout(
struct completion *x, unsigned long timeout));
extern void FASTCALL(complete(struct completion *));
extern void FASTCALL(complete_all(struct completion *));
extern void wait_for_completion(struct completion *);
extern int wait_for_completion_interruptible(struct completion *x);
extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
extern unsigned long wait_for_completion_interruptible_timeout(
struct completion *x, unsigned long timeout);
extern void complete(struct completion *);
extern void complete_all(struct completion *);
#define INIT_COMPLETION(x) ((x).done = 0)
......
......@@ -828,12 +828,17 @@ struct sched_class {
struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
struct rq *busiest,
unsigned long max_nr_move, unsigned long max_load_move,
struct rq *busiest, unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio);
int (*move_one_task) (struct rq *this_rq, int this_cpu,
struct rq *busiest, struct sched_domain *sd,
enum cpu_idle_type idle);
#endif
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p);
void (*task_new) (struct rq *rq, struct task_struct *p);
......@@ -1196,7 +1201,7 @@ static inline int rt_prio(int prio)
return 0;
}
static inline int rt_task(struct task_struct *p)
static inline int rt_task(const struct task_struct *p)
{
return rt_prio(p->prio);
}
......@@ -1211,22 +1216,22 @@ static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
tsk->signal->__pgrp = pgrp;
}
static inline struct pid *task_pid(struct task_struct *task)
static inline struct pid *task_pid(const struct task_struct *task)
{
return task->pids[PIDTYPE_PID].pid;
}
static inline struct pid *task_tgid(struct task_struct *task)
static inline struct pid *task_tgid(const struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_PID].pid;
}
static inline struct pid *task_pgrp(struct task_struct *task)
static inline struct pid *task_pgrp(const struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_PGID].pid;
}
static inline struct pid *task_session(struct task_struct *task)
static inline struct pid *task_session(const struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_SID].pid;
}
......@@ -1255,7 +1260,7 @@ struct pid_namespace;
* see also pid_nr() etc in include/linux/pid.h
*/
static inline pid_t task_pid_nr(struct task_struct *tsk)
static inline pid_t task_pid_nr(const struct task_struct *tsk)
{
return tsk->pid;
}
......@@ -1268,7 +1273,7 @@ static inline pid_t task_pid_vnr(struct task_struct *tsk)
}
static inline pid_t task_tgid_nr(struct task_struct *tsk)
static inline pid_t task_tgid_nr(const struct task_struct *tsk)
{
return tsk->tgid;
}
......@@ -1281,7 +1286,7 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
}
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
static inline pid_t task_pgrp_nr(const struct task_struct *tsk)
{
return tsk->signal->__pgrp;
}
......@@ -1294,7 +1299,7 @@ static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
}
static inline pid_t task_session_nr(struct task_struct *tsk)
static inline pid_t task_session_nr(const struct task_struct *tsk)
{
return tsk->signal->__session;
}
......@@ -1321,7 +1326,7 @@ static inline pid_t task_ppid_nr_ns(struct task_struct *tsk,
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
*/
static inline int pid_alive(struct task_struct *p)
static inline int pid_alive(const struct task_struct *p)
{
return p->pids[PIDTYPE_PID].pid != NULL;
}
......@@ -1332,7 +1337,7 @@ static inline int pid_alive(struct task_struct *p)
*
* Check if a task structure is the first user space task the kernel created.
*/
static inline int is_global_init(struct task_struct *tsk)
static inline int is_global_init(const struct task_struct *tsk)
{
return tsk->pid == 1;
}
......@@ -1469,7 +1474,7 @@ extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
extern void rt_mutex_adjust_pi(struct task_struct *p);
#else
static inline int rt_mutex_getprio(struct task_struct *p)
static inline int rt_mutex_getprio(const struct task_struct *p)
{
return p->normal_prio;
}
......@@ -1721,7 +1726,7 @@ extern void wait_task_inactive(struct task_struct * p);
* all we care about is that we have a task with the appropriate
* pid, we don't actually care if we have the right task.
*/
static inline int has_group_leader_pid(struct task_struct *p)
static inline int has_group_leader_pid(const struct task_struct *p)
{
return p->pid == p->tgid;
}
......@@ -1738,7 +1743,7 @@ static inline struct task_struct *next_thread(const struct task_struct *p)
struct task_struct, thread_group);
}
static inline int thread_group_empty(struct task_struct *p)
static inline int thread_group_empty(const struct task_struct *p)
{
return list_empty(&p->thread_group);
}
......
......@@ -322,7 +322,6 @@ config CPUSETS
config FAIR_GROUP_SCHED
bool "Fair group CPU scheduler"
default y
depends on EXPERIMENTAL
help
This feature lets CPU scheduler recognize task groups and control CPU
bandwidth allocation to such task groups.
......
......@@ -60,6 +60,7 @@ static int __init profile_setup(char * str)
int par;
if (!strncmp(str, sleepstr, strlen(sleepstr))) {
#ifdef CONFIG_SCHEDSTATS
prof_on = SLEEP_PROFILING;
if (str[strlen(sleepstr)] == ',')
str += strlen(sleepstr) + 1;
......@@ -68,6 +69,10 @@ static int __init profile_setup(char * str)
printk(KERN_INFO
"kernel sleep profiling enabled (shift: %ld)\n",
prof_shift);
#else
printk(KERN_WARNING
"kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
#endif /* CONFIG_SCHEDSTATS */
} else if (!strncmp(str, schedstr, strlen(schedstr))) {
prof_on = SCHED_PROFILING;
if (str[strlen(schedstr)] == ',')
......
This diff is collapsed.
......@@ -876,6 +876,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
}
}
#ifdef CONFIG_SMP
/**************************************************
* Fair scheduling class load-balancing methods:
*/
......@@ -936,12 +937,11 @@ static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_nr_move, unsigned long max_load_move,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
struct cfs_rq *busy_cfs_rq;
unsigned long load_moved, total_nr_moved = 0, nr_moved;
long rem_load_move = max_load_move;
struct rq_iterator cfs_rq_iterator;
......@@ -969,25 +969,48 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
#else
# define maxload rem_load_move
#endif
/* pass busy_cfs_rq argument into
/*
* pass busy_cfs_rq argument into
* load_balance_[start|next]_fair iterators
*/
cfs_rq_iterator.arg = busy_cfs_rq;
nr_moved = balance_tasks(this_rq, this_cpu, busiest,
max_nr_move, maxload, sd, idle, all_pinned,
&load_moved, this_best_prio, &cfs_rq_iterator);
total_nr_moved += nr_moved;
max_nr_move -= nr_moved;
rem_load_move -= load_moved;
rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
maxload, sd, idle, all_pinned,
this_best_prio,
&cfs_rq_iterator);
if (max_nr_move <= 0 || rem_load_move <= 0)
if (rem_load_move <= 0)
break;
}
return max_load_move - rem_load_move;
}
static int
move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
struct cfs_rq *busy_cfs_rq;
struct rq_iterator cfs_rq_iterator;
cfs_rq_iterator.start = load_balance_start_fair;
cfs_rq_iterator.next = load_balance_next_fair;
for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
/*
* pass busy_cfs_rq argument into
* load_balance_[start|next]_fair iterators
*/
cfs_rq_iterator.arg = busy_cfs_rq;
if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
&cfs_rq_iterator))
return 1;
}
return 0;
}
#endif
/*
* scheduler tick hitting a task of our scheduling class:
*/
......@@ -1063,7 +1086,10 @@ static const struct sched_class fair_sched_class = {
.pick_next_task = pick_next_task_fair,
.put_prev_task = put_prev_task_fair,
#ifdef CONFIG_SMP
.load_balance = load_balance_fair,
.move_one_task = move_one_task_fair,
#endif
.set_curr_task = set_curr_task_fair,
.task_tick = task_tick_fair,
......
......@@ -37,15 +37,24 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
}
#ifdef CONFIG_SMP
static unsigned long
load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_nr_move, unsigned long max_load_move,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
return 0;
}
static int
move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
return 0;
}
#endif
static void task_tick_idle(struct rq *rq, struct task_struct *curr)
{
}
......@@ -69,7 +78,10 @@ const struct sched_class idle_sched_class = {
.pick_next_task = pick_next_task_idle,
.put_prev_task = put_prev_task_idle,
#ifdef CONFIG_SMP
.load_balance = load_balance_idle,
.move_one_task = move_one_task_idle,
#endif
.set_curr_task = set_curr_task_idle,
.task_tick = task_tick_idle,
......
......@@ -98,6 +98,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
p->se.exec_start = 0;
}
#ifdef CONFIG_SMP
/*
* Load-balancing iterator. Note: while the runqueue stays locked
* during the whole iteration, the current task might be
......@@ -172,13 +173,11 @@ static struct task_struct *load_balance_next_rt(void *arg)
static unsigned long
load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_nr_move, unsigned long max_load_move,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
int nr_moved;
struct rq_iterator rt_rq_iterator;
unsigned long load_moved;
rt_rq_iterator.start = load_balance_start_rt;
rt_rq_iterator.next = load_balance_next_rt;
......@@ -187,12 +186,24 @@ load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
*/
rt_rq_iterator.arg = busiest;
nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move,
max_load_move, sd, idle, all_pinned, &load_moved,
this_best_prio, &rt_rq_iterator);
return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
idle, all_pinned, this_best_prio, &rt_rq_iterator);
}
static int
move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
struct rq_iterator rt_rq_iterator;
rt_rq_iterator.start = load_balance_start_rt;
rt_rq_iterator.next = load_balance_next_rt;
rt_rq_iterator.arg = busiest;
return load_moved;
return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
&rt_rq_iterator);
}
#endif
static void task_tick_rt(struct rq *rq, struct task_struct *p)
{
......@@ -236,7 +247,10 @@ const struct sched_class rt_sched_class = {
.pick_next_task = pick_next_task_rt,
.put_prev_task = put_prev_task_rt,
#ifdef CONFIG_SMP
.load_balance = load_balance_rt,
.move_one_task = move_one_task_rt,
#endif
.set_curr_task = set_curr_task_rt,
.task_tick = task_tick_rt,
......
......@@ -129,7 +129,7 @@ static inline void uids_mutex_unlock(void)
}
/* return cpu shares held by the user */
ssize_t cpu_shares_show(struct kset *kset, char *buffer)
static ssize_t cpu_shares_show(struct kset *kset, char *buffer)
{
struct user_struct *up = container_of(kset, struct user_struct, kset);
......@@ -137,7 +137,8 @@ ssize_t cpu_shares_show(struct kset *kset, char *buffer)
}
/* modify cpu shares held by the user */
ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size)
static ssize_t cpu_shares_store(struct kset *kset, const char *buffer,
size_t size)
{
struct user_struct *up = container_of(kset, struct user_struct, kset);
unsigned long shares;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment