Commit 2b3582fb authored by Ingo Molnar's avatar Ingo Molnar Committed by Thomas Gleixner

tasklet: redesign: make it saner and make it easier to thread.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>

----
 include/linux/interrupt.h |   33 ++++----
 kernel/softirq.c          |  184 ++++++++++++++++++++++++++++++++--------------
 2 files changed, 149 insertions(+), 68 deletions(-)
parent 5a90027b
......@@ -422,8 +422,9 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its excecution is still not
started, it will be executed only once.
* If this tasklet is already running on another CPU (or schedule is called
from tasklet itself), it is rescheduled for later.
* If this tasklet is already running on another CPU, it is rescheduled
for later.
* Schedule must not be called from the tasklet itself (a lockup occurs)
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
......@@ -448,15 +449,25 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
TASKLET_STATE_PENDING /* Tasklet is pending */
};
#ifdef CONFIG_SMP
#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}
static inline int tasklet_tryunlock(struct tasklet_struct *t)
{
return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
}
static inline void tasklet_unlock(struct tasklet_struct *t)
{
smp_mb__before_clear_bit();
......@@ -469,6 +480,7 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t)
}
#else
#define tasklet_trylock(t) 1
#define tasklet_tryunlock(t) 1
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
......@@ -517,17 +529,8 @@ static inline void tasklet_disable(struct tasklet_struct *t)
smp_mb();
}
static inline void tasklet_enable(struct tasklet_struct *t)
{
smp_mb__before_atomic_dec();
atomic_dec(&t->count);
}
static inline void tasklet_hi_enable(struct tasklet_struct *t)
{
smp_mb__before_atomic_dec();
atomic_dec(&t->count);
}
extern void tasklet_enable(struct tasklet_struct *t);
extern void tasklet_hi_enable(struct tasklet_struct *t);
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
......
......@@ -398,15 +398,45 @@ struct tasklet_head
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
static void inline
__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
{
if (tasklet_trylock(t)) {
again:
/* We may have been preempted before tasklet_trylock
* and __tasklet_action may have already run.
* So double check the sched bit while the takslet
* is locked before adding it to the list.
*/
if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
t->next = NULL;
*head->tail = t;
head->tail = &(t->next);
raise_softirq_irqoff(nr);
tasklet_unlock(t);
} else {
/* This is subtle. If we hit the corner case above
* It is possible that we get preempted right here,
* and another task has successfully called
* tasklet_schedule(), then this function, and
* failed on the trylock. Thus we must be sure
* before releasing the tasklet lock, that the
* SCHED_BIT is clear. Otherwise the tasklet
* may get its SCHED_BIT set, but not added to the
* list
*/
if (!tasklet_tryunlock(t))
goto again;
}
}
}
void __tasklet_schedule(struct tasklet_struct *t)
{
unsigned long flags;
local_irq_save(flags);
t->next = NULL;
*__get_cpu_var(tasklet_vec).tail = t;
__get_cpu_var(tasklet_vec).tail = &(t->next);
raise_softirq_irqoff(TASKLET_SOFTIRQ);
__tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ);
local_irq_restore(flags);
}
......@@ -417,10 +447,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
unsigned long flags;
local_irq_save(flags);
t->next = NULL;
*__get_cpu_var(tasklet_hi_vec).tail = t;
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
raise_softirq_irqoff(HI_SOFTIRQ);
__tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), HI_SOFTIRQ);
local_irq_restore(flags);
}
......@@ -437,39 +464,112 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
static void tasklet_action(struct softirq_action *a)
void tasklet_enable(struct tasklet_struct *t)
{
struct tasklet_struct *list;
if (!atomic_dec_and_test(&t->count))
return;
if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
tasklet_schedule(t);
}
local_irq_disable();
list = __get_cpu_var(tasklet_vec).head;
__get_cpu_var(tasklet_vec).head = NULL;
__get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
local_irq_enable();
EXPORT_SYMBOL(tasklet_enable);
void tasklet_hi_enable(struct tasklet_struct *t)
{
if (!atomic_dec_and_test(&t->count))
return;
if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
tasklet_hi_schedule(t);
}
EXPORT_SYMBOL(tasklet_hi_enable);
static void
__tasklet_action(struct softirq_action *a, struct tasklet_struct *list)
{
int loops = 1000000;
while (list) {
struct tasklet_struct *t = list;
list = list->next;
if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) {
/*
* Should always succeed - after a tasklist got on the
* list (after getting the SCHED bit set from 0 to 1),
* nothing but the tasklet softirq it got queued to can
* lock it:
*/
if (!tasklet_trylock(t)) {
WARN_ON(1);
continue;
}
t->next = NULL;
/*
* If we cannot handle the tasklet because it's disabled,
* mark it as pending. tasklet_enable() will later
* re-schedule the tasklet.
*/
if (unlikely(atomic_read(&t->count))) {
out_disabled:
/* implicit unlock: */
wmb();
t->state = TASKLET_STATEF_PENDING;
continue;
}
/*
* After this point on the tasklet might be rescheduled
* on another CPU, but it can only be added to another
* CPU's tasklet list if we unlock the tasklet (which we
* dont do yet).
*/
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
BUG();
WARN_ON(1);
again:
t->func(t->data);
/*
* Try to unlock the tasklet. We must use cmpxchg, because
* another CPU might have scheduled or disabled the tasklet.
* We only allow the STATE_RUN -> 0 transition here.
*/
while (!tasklet_tryunlock(t)) {
/*
* If it got disabled meanwhile, bail out:
*/
if (atomic_read(&t->count))
goto out_disabled;
/*
* If it got scheduled meanwhile, re-execute
* the tasklet function:
*/
if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
goto again;
if (!--loops) {
printk("hm, tasklet state: %08lx\n", t->state);
WARN_ON(1);
tasklet_unlock(t);
continue;
break;
}
}
tasklet_unlock(t);
}
}
static void tasklet_action(struct softirq_action *a)
{
struct tasklet_struct *list;
local_irq_disable();
t->next = NULL;
*__get_cpu_var(tasklet_vec).tail = t;
__get_cpu_var(tasklet_vec).tail = &(t->next);
__do_raise_softirq_irqoff(TASKLET_SOFTIRQ);
list = __get_cpu_var(tasklet_vec).head;
__get_cpu_var(tasklet_vec).head = NULL;
__get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
local_irq_enable();
}
__tasklet_action(a, list);
}
static void tasklet_hi_action(struct softirq_action *a)
......@@ -479,32 +579,10 @@ static void tasklet_hi_action(struct softirq_action *a)
local_irq_disable();
list = __get_cpu_var(tasklet_hi_vec).head;
__get_cpu_var(tasklet_hi_vec).head = NULL;
__get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
__get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_vec).head;
local_irq_enable();
while (list) {
struct tasklet_struct *t = list;
list = list->next;
if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) {
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
BUG();
t->func(t->data);
tasklet_unlock(t);
continue;
}
tasklet_unlock(t);
}
local_irq_disable();
t->next = NULL;
*__get_cpu_var(tasklet_hi_vec).tail = t;
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
__do_raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_enable();
}
__tasklet_action(a, list);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment