Commit c2a1643b authored by Ingo Molnar's avatar Ingo Molnar Committed by Thomas Gleixner

softirq: Full preempt-rt support

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 254dc502
......@@ -107,7 +107,7 @@ static void trigger_softirqs(void)
}
}
#ifndef CONFIG_PREEMPT_RT
#ifndef CONFIG_PREEMPT_HARDIRQS
/*
* This one is for softirq.c-internal use,
......@@ -161,7 +161,6 @@ EXPORT_SYMBOL(local_bh_disable);
*/
void _local_bh_enable(void)
{
WARN_ON_ONCE(in_irq());
WARN_ON_ONCE(!irqs_disabled());
if (softirq_count() == SOFTIRQ_OFFSET)
......@@ -171,42 +170,67 @@ void _local_bh_enable(void)
EXPORT_SYMBOL(_local_bh_enable);
static inline void _local_bh_enable_ip(unsigned long ip)
void local_bh_enable(void)
{
WARN_ON_ONCE(in_irq() || irqs_disabled());
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_disable();
unsigned long flags;
WARN_ON_ONCE(in_irq());
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_save(flags);
#endif
/*
* Are softirqs going to be turned on now:
*/
if (softirq_count() == SOFTIRQ_OFFSET)
trace_softirqs_on(ip);
trace_softirqs_on((unsigned long)__builtin_return_address(0));
/*
* Keep preemption disabled until we are done with
* softirq processing:
*/
sub_preempt_count(SOFTIRQ_OFFSET - 1);
*/
sub_preempt_count(SOFTIRQ_OFFSET - 1);
if (unlikely(!in_interrupt() && local_softirq_pending()))
do_softirq();
dec_preempt_count();
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_enable();
local_irq_restore(flags);
#endif
preempt_check_resched();
}
void local_bh_enable(void)
{
_local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
EXPORT_SYMBOL(local_bh_enable);
void local_bh_enable_ip(unsigned long ip)
{
_local_bh_enable_ip(ip);
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned long flags;
WARN_ON_ONCE(in_irq());
local_irq_save(flags);
#endif
/*
* Are softirqs going to be turned on now:
*/
if (softirq_count() == SOFTIRQ_OFFSET)
trace_softirqs_on(ip);
/*
* Keep preemption disabled until we are done with
* softirq processing:
*/
sub_preempt_count(SOFTIRQ_OFFSET - 1);
if (unlikely(!in_interrupt() && local_softirq_pending()))
do_softirq();
dec_preempt_count();
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_restore(flags);
#endif
preempt_check_resched();
}
EXPORT_SYMBOL(local_bh_enable_ip);
......@@ -221,56 +245,118 @@ EXPORT_SYMBOL(local_bh_enable_ip);
* we want to handle softirqs as soon as possible, but they
* should not be able to lock up the box.
*/
#define MAX_SOFTIRQ_RESTART 10
#define MAX_SOFTIRQ_RESTART 20
static void ___do_softirq(void)
static DEFINE_PER_CPU(u32, softirq_running);
/*
* Debug check for leaking preempt counts in h->action handlers:
*/
static inline void debug_check_preempt_count_start(__u32 *preempt_count)
{
struct softirq_action *h;
__u32 pending;
#ifdef CONFIG_DEBUG_PREEMPT
*preempt_count = preempt_count();
#endif
}
static inline void
debug_check_preempt_count_stop(__u32 *preempt_count, struct softirq_action *h)
{
#ifdef CONFIG_DEBUG_PREEMPT
if (*preempt_count == preempt_count())
return;
print_symbol("BUG: %Ps exited with wrong preemption count!\n",
(unsigned long)h->action);
printk("=> enter: %08x, exit: %08x.\n", *preempt_count, preempt_count());
preempt_count() = *preempt_count;
#endif
}
/*
* Execute softirq handlers:
*/
static void ___do_softirq(const int same_prio_only)
{
__u32 pending, available_mask, same_prio_skipped, preempt_count;
int max_restart = MAX_SOFTIRQ_RESTART;
int cpu;
struct softirq_action *h;
int cpu, softirq;
pending = local_softirq_pending();
account_system_vtime(current);
cpu = smp_processor_id();
restart:
available_mask = -1;
softirq = 0;
same_prio_skipped = 0;
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
local_irq_enable();
h = softirq_vec;
do {
if (pending & 1) {
int prev_count = preempt_count();
kstat_incr_softirqs_this_cpu(h - softirq_vec);
trace_softirq_entry(h, softirq_vec);
h->action(h);
trace_softirq_exit(h, softirq_vec);
if (unlikely(prev_count != preempt_count())) {
printk(KERN_ERR "huh, entered softirq %td %s %p"
"with preempt_count %08x,"
" exited with %08x?\n", h - softirq_vec,
softirq_to_name[h - softirq_vec],
h->action, prev_count, preempt_count());
preempt_count() = prev_count;
}
u32 softirq_mask = 1 << softirq;
rcu_bh_qsctr_inc(cpu);
cond_resched_softirq_context();
if (!(pending & 1))
goto next;
debug_check_preempt_count_start(&preempt_count);
#if defined(CONFIG_PREEMPT_SOFTIRQS) && defined(CONFIG_PREEMPT_HARDIRQS)
/*
* If executed by a same-prio hardirq thread
* then skip pending softirqs that belong
* to softirq threads with different priority:
*/
if (same_prio_only) {
struct task_struct *tsk;
tsk = __get_cpu_var(ksoftirqd)[softirq].tsk;
if (tsk && tsk->normal_prio != current->normal_prio) {
same_prio_skipped |= softirq_mask;
available_mask &= ~softirq_mask;
goto next;
}
}
#endif
/*
* Is this softirq already being processed?
*/
if (per_cpu(softirq_running, cpu) & softirq_mask) {
available_mask &= ~softirq_mask;
goto next;
}
per_cpu(softirq_running, cpu) |= softirq_mask;
kstat_incr_softirqs_this_cpu(h - softirq_vec);
local_irq_enable();
trace_softirq_entry(h, softirq_vec);
h->action(h);
trace_softirq_exit(h, softirq_vec);
debug_check_preempt_count_stop(&preempt_count, h);
rcu_bh_qsctr_inc(cpu);
cond_resched_softirq_context();
local_irq_disable();
per_cpu(softirq_running, cpu) &= ~softirq_mask;
next:
h++;
softirq++;
pending >>= 1;
} while (pending);
local_irq_disable();
or_softirq_pending(same_prio_skipped);
pending = local_softirq_pending();
if (pending && --max_restart)
goto restart;
if (pending & available_mask) {
if (--max_restart)
goto restart;
}
if (pending)
trigger_softirqs();
......@@ -294,7 +380,7 @@ asmlinkage void __do_softirq(void)
__local_bh_disable((unsigned long)__builtin_return_address(0));
lockdep_softirq_enter();
___do_softirq();
___do_softirq(0);
lockdep_softirq_exit();
......@@ -453,7 +539,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
unsigned long flags;
local_irq_save(flags);
__tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), HI_SOFTIRQ);
__tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ);
local_irq_restore(flags);
}
......@@ -461,11 +547,7 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
void __tasklet_hi_schedule_first(struct tasklet_struct *t)
{
BUG_ON(!irqs_disabled());
t->next = __get_cpu_var(tasklet_hi_vec).head;
__get_cpu_var(tasklet_hi_vec).head = t;
__raise_softirq_irqoff(HI_SOFTIRQ);
__tasklet_hi_schedule(t);
}
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
......@@ -585,7 +667,7 @@ static void tasklet_hi_action(struct softirq_action *a)
local_irq_disable();
list = __get_cpu_var(tasklet_hi_vec).head;
__get_cpu_var(tasklet_hi_vec).head = NULL;
__get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_vec).head;
__get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
local_irq_enable();
__tasklet_action(a, list);
......@@ -846,8 +928,9 @@ static int ksoftirqd(void * __data)
/* Priority needs to be below hardirqs */
struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2 - 1};
struct softirqdata *data = __data;
u32 mask = (1 << data->nr);
u32 softirq_mask = (1 << data->nr);
struct softirq_action *h;
int cpu = data->cpu;
sys_sched_setscheduler(current->pid, SCHED_FIFO, &param);
current->flags |= PF_SOFTIRQ;
......@@ -855,7 +938,8 @@ static int ksoftirqd(void * __data)
while (!kthread_should_stop()) {
preempt_disable();
if (!(local_softirq_pending() & mask)) {
if (!(local_softirq_pending() & softirq_mask)) {
sleep_more:
preempt_enable_and_schedule();
preempt_disable();
}
......@@ -863,16 +947,26 @@ static int ksoftirqd(void * __data)
__set_current_state(TASK_RUNNING);
data->running = 1;
while (local_softirq_pending() & mask) {
while (local_softirq_pending() & softirq_mask) {
/* Preempt disable stops cpu going offline.
If already offline, we'll be on wrong CPU:
don't process */
if (cpu_is_offline(data->cpu))
if (cpu_is_offline(cpu))
goto wait_to_die;
/*
* Is the softirq already being executed by
* a hardirq context?
*/
local_irq_disable();
if (per_cpu(softirq_running, cpu) & softirq_mask) {
local_irq_enable();
set_current_state(TASK_INTERRUPTIBLE);
goto sleep_more;
}
per_cpu(softirq_running, cpu) |= softirq_mask;
__preempt_enable_no_resched();
set_softirq_pending(local_softirq_pending() & ~mask);
set_softirq_pending(local_softirq_pending() & ~softirq_mask);
local_bh_disable();
local_irq_enable();
......@@ -882,6 +976,7 @@ static int ksoftirqd(void * __data)
rcu_bh_qsctr_inc(data->cpu);
local_irq_disable();
per_cpu(softirq_running, cpu) &= ~softirq_mask;
_local_bh_enable();
local_irq_enable();
......@@ -1000,7 +1095,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
for (i = 0; i < NR_SOFTIRQS; i++) {
p = kthread_create(ksoftirqd,
&per_cpu(ksoftirqd, hotcpu)[i],
"softirq-%s/%d", softirq_names[i],
"sirq-%s/%d", softirq_names[i],
hotcpu);
if (IS_ERR(p)) {
printk("ksoftirqd %d for %i failed\n", i,
......@@ -1020,22 +1115,17 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
#if 0
for (i = 0; i < NR_SOFTIRQS; i++) {
if (!per_cpu(ksoftirqd, hotcpu)[i].tsk)
continue;
kthread_bind(per_cpu(ksoftirqd, hotcpu)[i].tsk,
cpumask_any(cpu_online_mask));
}
#endif
/* Fall trough */
case CPU_DEAD:
case CPU_DEAD_FROZEN: {
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
struct sched_param param;
for (i = 0; i < NR_SOFTIRQS; i++) {
param.sched_priority = MAX_RT_PRIO-1;
p = per_cpu(ksoftirqd, hotcpu)[i].tsk;
sched_setscheduler(p, SCHED_FIFO, &param);
per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL;
sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
kthread_stop(p);
}
takeover_tasklets(hotcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment