Commit 8c60bfb0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-fixes-for-linus' of...

Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  cpuset: fix regression when failed to generate sched domains
  sched, signals: fix the racy usage of ->signal in account_group_xxx/run_posix_cpu_timers
  sched: fix kernel warning on /proc/sched_debug access
  sched: correct sched-rt-group.txt pathname in init/Kconfig
parents b6584065 700018e0
...@@ -354,7 +354,7 @@ config RT_GROUP_SCHED ...@@ -354,7 +354,7 @@ config RT_GROUP_SCHED
setting below. If enabled, it will also make it impossible to setting below. If enabled, it will also make it impossible to
schedule realtime tasks for non-root users until you allocate schedule realtime tasks for non-root users until you allocate
realtime bandwidth for them. realtime bandwidth for them.
See Documentation/sched-rt-group.txt for more information. See Documentation/scheduler/sched-rt-group.txt for more information.
choice choice
depends on GROUP_SCHED depends on GROUP_SCHED
......
...@@ -587,7 +587,6 @@ static int generate_sched_domains(cpumask_t **domains, ...@@ -587,7 +587,6 @@ static int generate_sched_domains(cpumask_t **domains,
int ndoms; /* number of sched domains in result */ int ndoms; /* number of sched domains in result */
int nslot; /* next empty doms[] cpumask_t slot */ int nslot; /* next empty doms[] cpumask_t slot */
ndoms = 0;
doms = NULL; doms = NULL;
dattr = NULL; dattr = NULL;
csa = NULL; csa = NULL;
...@@ -674,10 +673,8 @@ restart: ...@@ -674,10 +673,8 @@ restart:
* Convert <csn, csa> to <ndoms, doms> and populate cpu masks. * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
*/ */
doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
if (!doms) { if (!doms)
ndoms = 0;
goto done; goto done;
}
/* /*
* The rest of the code, including the scheduler, can deal with * The rest of the code, including the scheduler, can deal with
...@@ -732,6 +729,13 @@ restart: ...@@ -732,6 +729,13 @@ restart:
done: done:
kfree(csa); kfree(csa);
/*
* Fallback to the default domain if kmalloc() failed.
* See comments in partition_sched_domains().
*/
if (doms == NULL)
ndoms = 1;
*domains = doms; *domains = doms;
*attributes = dattr; *attributes = dattr;
return ndoms; return ndoms;
......
...@@ -1308,9 +1308,10 @@ static inline int task_cputime_expired(const struct task_cputime *sample, ...@@ -1308,9 +1308,10 @@ static inline int task_cputime_expired(const struct task_cputime *sample,
*/ */
static inline int fastpath_timer_check(struct task_struct *tsk) static inline int fastpath_timer_check(struct task_struct *tsk)
{ {
struct signal_struct *sig = tsk->signal; struct signal_struct *sig;
if (unlikely(!sig)) /* tsk == current, ensure it is safe to use ->signal/sighand */
if (unlikely(tsk->exit_state))
return 0; return 0;
if (!task_cputime_zero(&tsk->cputime_expires)) { if (!task_cputime_zero(&tsk->cputime_expires)) {
...@@ -1323,6 +1324,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk) ...@@ -1323,6 +1324,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
return 1; return 1;
} }
sig = tsk->signal;
if (!task_cputime_zero(&sig->cputime_expires)) { if (!task_cputime_zero(&sig->cputime_expires)) {
struct task_cputime group_sample; struct task_cputime group_sample;
......
...@@ -7789,13 +7789,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, ...@@ -7789,13 +7789,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
* *
* The passed in 'doms_new' should be kmalloc'd. This routine takes * The passed in 'doms_new' should be kmalloc'd. This routine takes
* ownership of it and will kfree it when done with it. If the caller * ownership of it and will kfree it when done with it. If the caller
* failed the kmalloc call, then it can pass in doms_new == NULL, * failed the kmalloc call, then it can pass in doms_new == NULL &&
* and partition_sched_domains() will fallback to the single partition * ndoms_new == 1, and partition_sched_domains() will fallback to
* 'fallback_doms', it also forces the domains to be rebuilt. * the single partition 'fallback_doms', it also forces the domains
* to be rebuilt.
* *
* If doms_new==NULL it will be replaced with cpu_online_map. * If doms_new == NULL it will be replaced with cpu_online_map.
* ndoms_new==0 is a special case for destroying existing domains. * ndoms_new == 0 is a special case for destroying existing domains,
* It will not create the default domain. * and it will not create the default domain.
* *
* Call with hotplug lock held * Call with hotplug lock held
*/ */
......
...@@ -423,10 +423,11 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) ...@@ -423,10 +423,11 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
#undef __P #undef __P
{ {
unsigned int this_cpu = raw_smp_processor_id();
u64 t0, t1; u64 t0, t1;
t0 = sched_clock(); t0 = cpu_clock(this_cpu);
t1 = sched_clock(); t1 = cpu_clock(this_cpu);
SEQ_printf(m, "%-35s:%21Ld\n", SEQ_printf(m, "%-35s:%21Ld\n",
"clock-delta", (long long)(t1-t0)); "clock-delta", (long long)(t1-t0));
} }
......
...@@ -298,9 +298,11 @@ static inline void account_group_user_time(struct task_struct *tsk, ...@@ -298,9 +298,11 @@ static inline void account_group_user_time(struct task_struct *tsk,
{ {
struct signal_struct *sig; struct signal_struct *sig;
sig = tsk->signal; /* tsk == current, ensure it is safe to use ->signal */
if (unlikely(!sig)) if (unlikely(tsk->exit_state))
return; return;
sig = tsk->signal;
if (sig->cputime.totals) { if (sig->cputime.totals) {
struct task_cputime *times; struct task_cputime *times;
...@@ -325,9 +327,11 @@ static inline void account_group_system_time(struct task_struct *tsk, ...@@ -325,9 +327,11 @@ static inline void account_group_system_time(struct task_struct *tsk,
{ {
struct signal_struct *sig; struct signal_struct *sig;
sig = tsk->signal; /* tsk == current, ensure it is safe to use ->signal */
if (unlikely(!sig)) if (unlikely(tsk->exit_state))
return; return;
sig = tsk->signal;
if (sig->cputime.totals) { if (sig->cputime.totals) {
struct task_cputime *times; struct task_cputime *times;
...@@ -353,8 +357,11 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, ...@@ -353,8 +357,11 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
struct signal_struct *sig; struct signal_struct *sig;
sig = tsk->signal; sig = tsk->signal;
/* see __exit_signal()->task_rq_unlock_wait() */
barrier();
if (unlikely(!sig)) if (unlikely(!sig))
return; return;
if (sig->cputime.totals) { if (sig->cputime.totals) {
struct task_cputime *times; struct task_cputime *times;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment