Commit 2b3582fb authored by Ingo Molnar's avatar Ingo Molnar Committed by Thomas Gleixner

tasklet: redesign: make it saner and make it easier to thread.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>

----
 include/linux/interrupt.h |   33 ++++----
 kernel/softirq.c          |  184 ++++++++++++++++++++++++++++++++--------------
 2 files changed, 149 insertions(+), 68 deletions(-)
parent 5a90027b
...@@ -422,8 +422,9 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu, ...@@ -422,8 +422,9 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
to be executed on some cpu at least once after this. to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its excecution is still not * If the tasklet is already scheduled, but its excecution is still not
started, it will be executed only once. started, it will be executed only once.
* If this tasklet is already running on another CPU (or schedule is called * If this tasklet is already running on another CPU, it is rescheduled
from tasklet itself), it is rescheduled for later. for later.
* Schedule must not be called from the tasklet itself (a lockup occurs)
* Tasklet is strictly serialized wrt itself, but not * Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization, wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks. he makes it with spinlocks.
...@@ -448,15 +449,25 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } ...@@ -448,15 +449,25 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
enum enum
{ {
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
TASKLET_STATE_PENDING /* Tasklet is pending */
}; };
#ifdef CONFIG_SMP #define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
static inline int tasklet_trylock(struct tasklet_struct *t) static inline int tasklet_trylock(struct tasklet_struct *t)
{ {
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
} }
static inline int tasklet_tryunlock(struct tasklet_struct *t)
{
return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
}
static inline void tasklet_unlock(struct tasklet_struct *t) static inline void tasklet_unlock(struct tasklet_struct *t)
{ {
smp_mb__before_clear_bit(); smp_mb__before_clear_bit();
...@@ -469,6 +480,7 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t) ...@@ -469,6 +480,7 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t)
} }
#else #else
#define tasklet_trylock(t) 1 #define tasklet_trylock(t) 1
#define tasklet_tryunlock(t) 1
#define tasklet_unlock_wait(t) do { } while (0) #define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0) #define tasklet_unlock(t) do { } while (0)
#endif #endif
...@@ -517,17 +529,8 @@ static inline void tasklet_disable(struct tasklet_struct *t) ...@@ -517,17 +529,8 @@ static inline void tasklet_disable(struct tasklet_struct *t)
smp_mb(); smp_mb();
} }
static inline void tasklet_enable(struct tasklet_struct *t) extern void tasklet_enable(struct tasklet_struct *t);
{ extern void tasklet_hi_enable(struct tasklet_struct *t);
smp_mb__before_atomic_dec();
atomic_dec(&t->count);
}
static inline void tasklet_hi_enable(struct tasklet_struct *t)
{
smp_mb__before_atomic_dec();
atomic_dec(&t->count);
}
extern void tasklet_kill(struct tasklet_struct *t); extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
......
...@@ -398,15 +398,45 @@ struct tasklet_head ...@@ -398,15 +398,45 @@ struct tasklet_head
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
static void inline
__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
{
if (tasklet_trylock(t)) {
again:
/* We may have been preempted before tasklet_trylock
* and __tasklet_action may have already run.
* So double check the sched bit while the takslet
* is locked before adding it to the list.
*/
if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
t->next = NULL;
*head->tail = t;
head->tail = &(t->next);
raise_softirq_irqoff(nr);
tasklet_unlock(t);
} else {
/* This is subtle. If we hit the corner case above
* It is possible that we get preempted right here,
* and another task has successfully called
* tasklet_schedule(), then this function, and
* failed on the trylock. Thus we must be sure
* before releasing the tasklet lock, that the
* SCHED_BIT is clear. Otherwise the tasklet
* may get its SCHED_BIT set, but not added to the
* list
*/
if (!tasklet_tryunlock(t))
goto again;
}
}
}
void __tasklet_schedule(struct tasklet_struct *t) void __tasklet_schedule(struct tasklet_struct *t)
{ {
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
t->next = NULL; __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ);
*__get_cpu_var(tasklet_vec).tail = t;
__get_cpu_var(tasklet_vec).tail = &(t->next);
raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -417,10 +447,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) ...@@ -417,10 +447,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
t->next = NULL; __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), HI_SOFTIRQ);
*__get_cpu_var(tasklet_hi_vec).tail = t;
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -437,74 +464,125 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) ...@@ -437,74 +464,125 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
EXPORT_SYMBOL(__tasklet_hi_schedule_first); EXPORT_SYMBOL(__tasklet_hi_schedule_first);
static void tasklet_action(struct softirq_action *a) void tasklet_enable(struct tasklet_struct *t)
{ {
struct tasklet_struct *list; if (!atomic_dec_and_test(&t->count))
return;
if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
tasklet_schedule(t);
}
local_irq_disable(); EXPORT_SYMBOL(tasklet_enable);
list = __get_cpu_var(tasklet_vec).head;
__get_cpu_var(tasklet_vec).head = NULL; void tasklet_hi_enable(struct tasklet_struct *t)
__get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; {
local_irq_enable(); if (!atomic_dec_and_test(&t->count))
return;
if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
tasklet_hi_schedule(t);
}
EXPORT_SYMBOL(tasklet_hi_enable);
static void
__tasklet_action(struct softirq_action *a, struct tasklet_struct *list)
{
int loops = 1000000;
while (list) { while (list) {
struct tasklet_struct *t = list; struct tasklet_struct *t = list;
list = list->next; list = list->next;
if (tasklet_trylock(t)) { /*
if (!atomic_read(&t->count)) { * Should always succeed - after a tasklist got on the
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) * list (after getting the SCHED bit set from 0 to 1),
BUG(); * nothing but the tasklet softirq it got queued to can
t->func(t->data); * lock it:
tasklet_unlock(t); */
continue; if (!tasklet_trylock(t)) {
} WARN_ON(1);
tasklet_unlock(t); continue;
} }
local_irq_disable();
t->next = NULL; t->next = NULL;
*__get_cpu_var(tasklet_vec).tail = t;
__get_cpu_var(tasklet_vec).tail = &(t->next); /*
__do_raise_softirq_irqoff(TASKLET_SOFTIRQ); * If we cannot handle the tasklet because it's disabled,
local_irq_enable(); * mark it as pending. tasklet_enable() will later
* re-schedule the tasklet.
*/
if (unlikely(atomic_read(&t->count))) {
out_disabled:
/* implicit unlock: */
wmb();
t->state = TASKLET_STATEF_PENDING;
continue;
}
/*
* After this point on the tasklet might be rescheduled
* on another CPU, but it can only be added to another
* CPU's tasklet list if we unlock the tasklet (which we
* dont do yet).
*/
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
WARN_ON(1);
again:
t->func(t->data);
/*
* Try to unlock the tasklet. We must use cmpxchg, because
* another CPU might have scheduled or disabled the tasklet.
* We only allow the STATE_RUN -> 0 transition here.
*/
while (!tasklet_tryunlock(t)) {
/*
* If it got disabled meanwhile, bail out:
*/
if (atomic_read(&t->count))
goto out_disabled;
/*
* If it got scheduled meanwhile, re-execute
* the tasklet function:
*/
if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
goto again;
if (!--loops) {
printk("hm, tasklet state: %08lx\n", t->state);
WARN_ON(1);
tasklet_unlock(t);
break;
}
}
} }
} }
static void tasklet_hi_action(struct softirq_action *a) static void tasklet_action(struct softirq_action *a)
{ {
struct tasklet_struct *list; struct tasklet_struct *list;
local_irq_disable(); local_irq_disable();
list = __get_cpu_var(tasklet_hi_vec).head; list = __get_cpu_var(tasklet_vec).head;
__get_cpu_var(tasklet_hi_vec).head = NULL; __get_cpu_var(tasklet_vec).head = NULL;
__get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head; __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
local_irq_enable(); local_irq_enable();
while (list) { __tasklet_action(a, list);
struct tasklet_struct *t = list; }
list = list->next; static void tasklet_hi_action(struct softirq_action *a)
{
struct tasklet_struct *list;
if (tasklet_trylock(t)) { local_irq_disable();
if (!atomic_read(&t->count)) { list = __get_cpu_var(tasklet_hi_vec).head;
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) __get_cpu_var(tasklet_hi_vec).head = NULL;
BUG(); __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_vec).head;
t->func(t->data); local_irq_enable();
tasklet_unlock(t);
continue;
}
tasklet_unlock(t);
}
local_irq_disable(); __tasklet_action(a, list);
t->next = NULL;
*__get_cpu_var(tasklet_hi_vec).tail = t;
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
__do_raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_enable();
}
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment