Commit 54898cb3 authored by Thomas Gleixner's avatar Thomas Gleixner

Merge branch 'rt/head' into rt/2.6.31

parents b07514b6 f39bec65
...@@ -991,8 +991,6 @@ NORET_TYPE void do_exit(long code) ...@@ -991,8 +991,6 @@ NORET_TYPE void do_exit(long code)
tsk->mempolicy = NULL; tsk->mempolicy = NULL;
#endif #endif
#ifdef CONFIG_FUTEX #ifdef CONFIG_FUTEX
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
if (unlikely(current->pi_state_cache)) if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache); kfree(current->pi_state_cache);
#endif #endif
......
...@@ -575,12 +575,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) ...@@ -575,12 +575,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
/* Get rid of any futexes when releasing the mm */ /* Get rid of any futexes when releasing the mm */
#ifdef CONFIG_FUTEX #ifdef CONFIG_FUTEX
if (unlikely(tsk->robust_list)) if (unlikely(tsk->robust_list)) {
exit_robust_list(tsk); exit_robust_list(tsk);
tsk->robust_list = NULL;
}
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
if (unlikely(tsk->compat_robust_list)) if (unlikely(tsk->compat_robust_list)) {
compat_exit_robust_list(tsk); compat_exit_robust_list(tsk);
tsk->compat_robust_list = NULL;
}
#endif #endif
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
#endif #endif
/* Get rid of any cached register state */ /* Get rid of any cached register state */
......
...@@ -2139,7 +2139,6 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, ...@@ -2139,7 +2139,6 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
* Unqueue the futex_q and determine which it was. * Unqueue the futex_q and determine which it was.
*/ */
plist_del(&q->list, &q->list.plist); plist_del(&q->list, &q->list.plist);
drop_futex_key_refs(&q->key);
if (timeout && !timeout->task) if (timeout && !timeout->task)
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
......
...@@ -1161,6 +1161,8 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, ...@@ -1161,6 +1161,8 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL; per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL;
} }
for (i = 0; i < NR_SOFTIRQS; i++) { for (i = 0; i < NR_SOFTIRQS; i++) {
if (!softirq_names[i])
continue;
p = kthread_create(ksoftirqd, p = kthread_create(ksoftirqd,
&per_cpu(ksoftirqd, hotcpu)[i], &per_cpu(ksoftirqd, hotcpu)[i],
"sirq-%s/%d", softirq_names[i], "sirq-%s/%d", softirq_names[i],
...@@ -1177,8 +1179,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, ...@@ -1177,8 +1179,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
break; break;
case CPU_ONLINE: case CPU_ONLINE:
case CPU_ONLINE_FROZEN: case CPU_ONLINE_FROZEN:
for (i = 0; i < NR_SOFTIRQS; i++) for (i = 0; i < NR_SOFTIRQS; i++) {
wake_up_process(per_cpu(ksoftirqd, hotcpu)[i].tsk); p = per_cpu(ksoftirqd, hotcpu)[i].tsk;
if (p)
wake_up_process(p);
}
break; break;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED: case CPU_UP_CANCELED:
...@@ -1192,10 +1197,12 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, ...@@ -1192,10 +1197,12 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
for (i = 0; i < NR_SOFTIRQS; i++) { for (i = 0; i < NR_SOFTIRQS; i++) {
param.sched_priority = MAX_RT_PRIO-1; param.sched_priority = MAX_RT_PRIO-1;
p = per_cpu(ksoftirqd, hotcpu)[i].tsk; p = per_cpu(ksoftirqd, hotcpu)[i].tsk;
if (p) {
sched_setscheduler(p, SCHED_FIFO, &param); sched_setscheduler(p, SCHED_FIFO, &param);
per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL; per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL;
kthread_stop(p); kthread_stop(p);
} }
}
takeover_tasklets(hotcpu); takeover_tasklets(hotcpu);
break; break;
} }
......
...@@ -231,6 +231,13 @@ void tick_nohz_stop_sched_tick(int inidle) ...@@ -231,6 +231,13 @@ void tick_nohz_stop_sched_tick(int inidle)
if (!inidle && !ts->inidle) if (!inidle && !ts->inidle)
goto end; goto end;
/*
* Set ts->inidle unconditionally. Even if the system did not
* switch to NOHZ mode the cpu frequency governers rely on the
* update of the idle time accounting in tick_nohz_start_idle().
*/
ts->inidle = 1;
now = tick_nohz_start_idle(ts); now = tick_nohz_start_idle(ts);
/* /*
...@@ -248,8 +255,6 @@ void tick_nohz_stop_sched_tick(int inidle) ...@@ -248,8 +255,6 @@ void tick_nohz_stop_sched_tick(int inidle)
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
goto end; goto end;
ts->inidle = 1;
if (need_resched()) if (need_resched())
goto end; goto end;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment