Commit c2171f66 authored by Thomas Gleixner's avatar Thomas Gleixner

Merge branch 'rt/head' into rt/2.6.31

parents 476ced36 d99f9884
...@@ -1492,7 +1492,6 @@ struct task_struct { ...@@ -1492,7 +1492,6 @@ struct task_struct {
#endif #endif
struct list_head pi_state_list; struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache; struct futex_pi_state *pi_state_cache;
struct task_struct *futex_wakeup;
#endif #endif
#ifdef CONFIG_PERF_COUNTERS #ifdef CONFIG_PERF_COUNTERS
struct perf_counter_context *perf_counter_ctxp; struct perf_counter_context *perf_counter_ctxp;
......
...@@ -1190,7 +1190,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1190,7 +1190,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#endif #endif
INIT_LIST_HEAD(&p->pi_state_list); INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL; p->pi_state_cache = NULL;
p->futex_wakeup = NULL;
#endif #endif
/* /*
* sigaltstack should be cleared when sharing the same VM * sigaltstack should be cleared when sharing the same VM
......
...@@ -713,7 +713,7 @@ retry: ...@@ -713,7 +713,7 @@ retry:
* The hash bucket lock must be held when this is called. * The hash bucket lock must be held when this is called.
* Afterwards, the futex_q must not be accessed. * Afterwards, the futex_q must not be accessed.
*/ */
static void wake_futex(struct task_struct **wake_list, struct futex_q *q) static void wake_futex(struct futex_q *q)
{ {
struct task_struct *p = q->task; struct task_struct *p = q->task;
...@@ -736,51 +736,8 @@ static void wake_futex(struct task_struct **wake_list, struct futex_q *q) ...@@ -736,51 +736,8 @@ static void wake_futex(struct task_struct **wake_list, struct futex_q *q)
smp_wmb(); smp_wmb();
q->lock_ptr = NULL; q->lock_ptr = NULL;
/* wake_up_state(p, TASK_NORMAL);
* Atomically grab the task, if ->futex_wakeup is !0 already it means
* its already queued (either by us or someone else) and will get the
* wakeup due to that.
*
* This cmpxchg() implies a full barrier, which pairs with the write
* barrier implied by the wakeup in wake_futex_list().
*/
if (cmpxchg(&p->futex_wakeup, 0, p) != 0) {
/*
* It was already queued, drop the extra ref and we're done.
*/
put_task_struct(p); put_task_struct(p);
return;
}
/*
* Put the task on our wakeup list by atomically switching it with
* the list head. (XXX its a local list, no possible concurrency,
* this could be written without cmpxchg).
*/
do {
p->futex_wakeup = *wake_list;
} while (cmpxchg(wake_list, p->futex_wakeup, p) != p->futex_wakeup);
}
/*
* For each task on the list, deliver the pending wakeup and release the
* task reference obtained in wake_futex().
*/
static void wake_futex_list(struct task_struct *head)
{
while (head != &init_task) {
struct task_struct *next = head->futex_wakeup;
head->futex_wakeup = NULL;
/*
* wake_up_state() implies a wmb() to pair with the queueing
* in wake_futex() so as to not miss wakeups.
*/
wake_up_state(head, TASK_NORMAL);
put_task_struct(head);
head = next;
}
} }
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
...@@ -894,7 +851,6 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) ...@@ -894,7 +851,6 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
struct futex_q *this, *next; struct futex_q *this, *next;
struct plist_head *head; struct plist_head *head;
union futex_key key = FUTEX_KEY_INIT; union futex_key key = FUTEX_KEY_INIT;
struct task_struct *wake_list = &init_task;
int ret; int ret;
if (!bitset) if (!bitset)
...@@ -919,7 +875,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) ...@@ -919,7 +875,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
if (!(this->bitset & bitset)) if (!(this->bitset & bitset))
continue; continue;
wake_futex(&wake_list, this); wake_futex(this);
if (++ret >= nr_wake) if (++ret >= nr_wake)
break; break;
} }
...@@ -927,8 +883,6 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) ...@@ -927,8 +883,6 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
spin_unlock(&hb->lock); spin_unlock(&hb->lock);
put_futex_key(fshared, &key); put_futex_key(fshared, &key);
wake_futex_list(wake_list);
out: out:
return ret; return ret;
} }
...@@ -945,7 +899,6 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, ...@@ -945,7 +899,6 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
struct futex_hash_bucket *hb1, *hb2; struct futex_hash_bucket *hb1, *hb2;
struct plist_head *head; struct plist_head *head;
struct futex_q *this, *next; struct futex_q *this, *next;
struct task_struct *wake_list = &init_task;
int ret, op_ret; int ret, op_ret;
retry: retry:
...@@ -996,7 +949,7 @@ retry_private: ...@@ -996,7 +949,7 @@ retry_private:
plist_for_each_entry_safe(this, next, head, list) { plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key1)) { if (match_futex (&this->key, &key1)) {
wake_futex(&wake_list, this); wake_futex(this);
if (++ret >= nr_wake) if (++ret >= nr_wake)
break; break;
} }
...@@ -1008,7 +961,7 @@ retry_private: ...@@ -1008,7 +961,7 @@ retry_private:
op_ret = 0; op_ret = 0;
plist_for_each_entry_safe(this, next, head, list) { plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key2)) { if (match_futex (&this->key, &key2)) {
wake_futex(&wake_list, this); wake_futex(this);
if (++op_ret >= nr_wake2) if (++op_ret >= nr_wake2)
break; break;
} }
...@@ -1021,8 +974,6 @@ out_put_keys: ...@@ -1021,8 +974,6 @@ out_put_keys:
put_futex_key(fshared, &key2); put_futex_key(fshared, &key2);
out_put_key1: out_put_key1:
put_futex_key(fshared, &key1); put_futex_key(fshared, &key1);
wake_futex_list(wake_list);
out: out:
return ret; return ret;
} }
...@@ -1177,7 +1128,6 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, ...@@ -1177,7 +1128,6 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
struct futex_hash_bucket *hb1, *hb2; struct futex_hash_bucket *hb1, *hb2;
struct plist_head *head1; struct plist_head *head1;
struct futex_q *this, *next; struct futex_q *this, *next;
struct task_struct *wake_list = &init_task;
u32 curval2; u32 curval2;
if (requeue_pi) { if (requeue_pi) {
...@@ -1322,7 +1272,7 @@ retry_private: ...@@ -1322,7 +1272,7 @@ retry_private:
* woken by futex_unlock_pi(). * woken by futex_unlock_pi().
*/ */
if (++task_count <= nr_wake && !requeue_pi) { if (++task_count <= nr_wake && !requeue_pi) {
wake_futex(&wake_list, this); wake_futex(this);
continue; continue;
} }
...@@ -1368,8 +1318,6 @@ out_put_keys: ...@@ -1368,8 +1318,6 @@ out_put_keys:
put_futex_key(fshared, &key2); put_futex_key(fshared, &key2);
out_put_key1: out_put_key1:
put_futex_key(fshared, &key1); put_futex_key(fshared, &key1);
wake_futex_list(wake_list);
out: out:
if (pi_state != NULL) if (pi_state != NULL)
free_pi_state(pi_state); free_pi_state(pi_state);
...@@ -1805,6 +1753,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, ...@@ -1805,6 +1753,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
current->timer_slack_ns); current->timer_slack_ns);
} }
retry:
/* Prepare to wait on uaddr. */ /* Prepare to wait on uaddr. */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret) if (ret)
...@@ -1822,9 +1771,14 @@ static int futex_wait(u32 __user *uaddr, int fshared, ...@@ -1822,9 +1771,14 @@ static int futex_wait(u32 __user *uaddr, int fshared,
goto out_put_key; goto out_put_key;
/* /*
* We expect signal_pending(current), but another thread may * We expect signal_pending(current), but we might be the
* have handled it for us already. * victim of a spurious wakeup as well.
*/ */
if (!signal_pending(current)) {
put_futex_key(fshared, &q.key);
goto retry;
}
ret = -ERESTARTSYS; ret = -ERESTARTSYS;
if (!abs_time) if (!abs_time)
goto out_put_key; goto out_put_key;
...@@ -2131,9 +2085,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, ...@@ -2131,9 +2085,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
*/ */
plist_del(&q->list, &q->list.plist); plist_del(&q->list, &q->list.plist);
/* Handle spurious wakeups gracefully */
ret = -EAGAIN;
if (timeout && !timeout->task) if (timeout && !timeout->task)
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
else else if (signal_pending(current))
ret = -ERESTARTNOINTR; ret = -ERESTARTNOINTR;
} }
return ret; return ret;
...@@ -2215,6 +2171,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, ...@@ -2215,6 +2171,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
q.bitset = bitset; q.bitset = bitset;
q.rt_waiter = &rt_waiter; q.rt_waiter = &rt_waiter;
retry:
key2 = FUTEX_KEY_INIT; key2 = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -2307,6 +2264,9 @@ out_put_keys: ...@@ -2307,6 +2264,9 @@ out_put_keys:
out_key2: out_key2:
put_futex_key(fshared, &key2); put_futex_key(fshared, &key2);
/* Spurious wakeup ? */
if (ret == -EAGAIN)
goto retry;
out: out:
if (to) { if (to) {
hrtimer_cancel(&to->timer); hrtimer_cancel(&to->timer);
......
...@@ -1139,9 +1139,7 @@ static const char *softirq_names [] = ...@@ -1139,9 +1139,7 @@ static const char *softirq_names [] =
[NET_RX_SOFTIRQ] = "net-rx", [NET_RX_SOFTIRQ] = "net-rx",
[BLOCK_SOFTIRQ] = "block", [BLOCK_SOFTIRQ] = "block",
[TASKLET_SOFTIRQ] = "tasklet", [TASKLET_SOFTIRQ] = "tasklet",
#ifdef CONFIG_HIGH_RES_TIMERS
[HRTIMER_SOFTIRQ] = "hrtimer", [HRTIMER_SOFTIRQ] = "hrtimer",
#endif
[RCU_SOFTIRQ] = "rcu", [RCU_SOFTIRQ] = "rcu",
}; };
...@@ -1161,8 +1159,6 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, ...@@ -1161,8 +1159,6 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL; per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL;
} }
for (i = 0; i < NR_SOFTIRQS; i++) { for (i = 0; i < NR_SOFTIRQS; i++) {
if (!softirq_names[i])
continue;
p = kthread_create(ksoftirqd, p = kthread_create(ksoftirqd,
&per_cpu(ksoftirqd, hotcpu)[i], &per_cpu(ksoftirqd, hotcpu)[i],
"sirq-%s/%d", softirq_names[i], "sirq-%s/%d", softirq_names[i],
...@@ -1179,11 +1175,8 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, ...@@ -1179,11 +1175,8 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
break; break;
case CPU_ONLINE: case CPU_ONLINE:
case CPU_ONLINE_FROZEN: case CPU_ONLINE_FROZEN:
for (i = 0; i < NR_SOFTIRQS; i++) { for (i = 0; i < NR_SOFTIRQS; i++)
p = per_cpu(ksoftirqd, hotcpu)[i].tsk; wake_up_process(per_cpu(ksoftirqd, hotcpu)[i].tsk);
if (p)
wake_up_process(p);
}
break; break;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED: case CPU_UP_CANCELED:
...@@ -1197,12 +1190,10 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, ...@@ -1197,12 +1190,10 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
for (i = 0; i < NR_SOFTIRQS; i++) { for (i = 0; i < NR_SOFTIRQS; i++) {
param.sched_priority = MAX_RT_PRIO-1; param.sched_priority = MAX_RT_PRIO-1;
p = per_cpu(ksoftirqd, hotcpu)[i].tsk; p = per_cpu(ksoftirqd, hotcpu)[i].tsk;
if (p) {
sched_setscheduler(p, SCHED_FIFO, &param); sched_setscheduler(p, SCHED_FIFO, &param);
per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL; per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL;
kthread_stop(p); kthread_stop(p);
} }
}
takeover_tasklets(hotcpu); takeover_tasklets(hotcpu);
break; break;
} }
......
...@@ -164,6 +164,9 @@ static void slab_irq_disable_GFP_WAIT(gfp_t flags, int *cpu) ...@@ -164,6 +164,9 @@ static void slab_irq_disable_GFP_WAIT(gfp_t flags, int *cpu)
local_irq_disable(); local_irq_disable();
} }
#define slab_spin_trylock_irq(lock, cpu) \
({ int __l = spin_trylock_irq(lock); if (__l) (cpu) = smp_processor_id(); __l; })
# define slab_spin_lock_irq(lock, cpu) \ # define slab_spin_lock_irq(lock, cpu) \
do { spin_lock_irq(lock); (cpu) = smp_processor_id(); } while (0) do { spin_lock_irq(lock); (cpu) = smp_processor_id(); } while (0)
# define slab_spin_unlock_irq(lock, cpu) spin_unlock_irq(lock) # define slab_spin_unlock_irq(lock, cpu) spin_unlock_irq(lock)
...@@ -241,10 +244,26 @@ static void slab_irq_disable_GFP_WAIT(gfp_t flags, int *cpu) ...@@ -241,10 +244,26 @@ static void slab_irq_disable_GFP_WAIT(gfp_t flags, int *cpu)
slab_irq_disable(*cpu); slab_irq_disable(*cpu);
} }
static int _slab_spin_trylock_irq(spinlock_t *lock, int *cpu)
{
int locked;
slab_irq_disable(*cpu);
locked = spin_trylock(lock);
if (!locked)
slab_irq_enable(*cpu);
return locked;
}
# define slab_spin_trylock_irq(lock, cpu) \
_slab_spin_trylock_irq((lock), &(cpu))
# define slab_spin_lock_irq(lock, cpu) \ # define slab_spin_lock_irq(lock, cpu) \
do { slab_irq_disable(cpu); spin_lock(lock); } while (0) do { slab_irq_disable(cpu); spin_lock(lock); } while (0)
# define slab_spin_unlock_irq(lock, cpu) \ # define slab_spin_unlock_irq(lock, cpu) \
do { spin_unlock(lock); slab_irq_enable(cpu); } while (0) do { spin_unlock(lock); slab_irq_enable(cpu); } while (0)
# define slab_spin_lock_irqsave(lock, flags, cpu) \ # define slab_spin_lock_irqsave(lock, flags, cpu) \
do { slab_irq_disable(cpu); spin_lock_irqsave(lock, flags); } while (0) do { slab_irq_disable(cpu); spin_lock_irqsave(lock, flags); } while (0)
# define slab_spin_unlock_irqrestore(lock, flags, cpu) \ # define slab_spin_unlock_irqrestore(lock, flags, cpu) \
...@@ -1063,7 +1082,7 @@ static int transfer_objects(struct array_cache *to, ...@@ -1063,7 +1082,7 @@ static int transfer_objects(struct array_cache *to,
#ifndef CONFIG_NUMA #ifndef CONFIG_NUMA
#define drain_alien_cache(cachep, alien) do { } while (0) #define drain_alien_cache(cachep, alien) do { } while (0)
#define reap_alien(cachep, l3, this_cpu) 0 #define reap_alien(cachep, l3) 0
static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
{ {
...@@ -1161,16 +1180,17 @@ static void __drain_alien_cache(struct kmem_cache *cachep, ...@@ -1161,16 +1180,17 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
* Called from cache_reap() to regularly drain alien caches round robin. * Called from cache_reap() to regularly drain alien caches round robin.
*/ */
static int static int
reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3, int *this_cpu) reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
{ {
int node = per_cpu(reap_node, *this_cpu); int node = __get_cpu_var(reap_node);
int this_cpu;
if (l3->alien) { if (l3->alien) {
struct array_cache *ac = l3->alien[node]; struct array_cache *ac = l3->alien[node];
if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { if (ac && ac->avail && slab_spin_trylock_irq(&ac->lock, this_cpu)) {
__drain_alien_cache(cachep, ac, node, this_cpu); __drain_alien_cache(cachep, ac, node, &this_cpu);
spin_unlock_irq(&ac->lock); slab_spin_unlock_irq(&ac->lock, this_cpu);
return 1; return 1;
} }
} }
...@@ -4274,7 +4294,7 @@ int drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, ...@@ -4274,7 +4294,7 @@ int drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
*/ */
static void cache_reap(struct work_struct *w) static void cache_reap(struct work_struct *w)
{ {
int this_cpu = raw_smp_processor_id(), node = cpu_to_node(this_cpu); int this_cpu = smp_processor_id(), node = cpu_to_node(this_cpu);
struct kmem_cache *searchp; struct kmem_cache *searchp;
struct kmem_list3 *l3; struct kmem_list3 *l3;
struct delayed_work *work = to_delayed_work(w); struct delayed_work *work = to_delayed_work(w);
...@@ -4294,7 +4314,7 @@ static void cache_reap(struct work_struct *w) ...@@ -4294,7 +4314,7 @@ static void cache_reap(struct work_struct *w)
*/ */
l3 = searchp->nodelists[node]; l3 = searchp->nodelists[node];
work_done += reap_alien(searchp, l3, &this_cpu); work_done += reap_alien(searchp, l3);
node = cpu_to_node(this_cpu); node = cpu_to_node(this_cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment