Commit 403d8efc authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: irq_32 move 4kstacks code to one place

Move the 4KSTACKS related code to one place. This allows to un#ifdef
do_IRQ() and share the executed on stack for the stack overflow printk
and the softirq call.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent de9b10af
...@@ -83,26 +83,28 @@ union irq_ctx { ...@@ -83,26 +83,28 @@ union irq_ctx {
static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
static inline void call_on_stack(void *func, void *stack, static char softirq_stack[NR_CPUS * THREAD_SIZE]
unsigned long arg1, void *arg2) __attribute__((__section__(".bss.page_aligned")));
static char hardirq_stack[NR_CPUS * THREAD_SIZE]
__attribute__((__section__(".bss.page_aligned")));
static void call_on_stack(void *func, void *stack)
{ {
unsigned long bx; asm volatile("xchgl %%ebx,%%esp \n"
"call *%%edi \n"
asm volatile( "movl %%ebx,%%esp \n"
" xchgl %%ebx,%%esp \n" : "=b" (stack)
" call *%%edi \n" : "0" (stack),
" movl %%ebx,%%esp \n" "D"(func)
: "=a" (arg1), "=d" (arg2), "=b" (bx) : "memory", "cc", "edx", "ecx", "eax");
: "0" (arg1), "1" (arg2), "2" (stack),
"D" (func)
: "memory", "cc", "ecx");
} }
static inline int static inline int
execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
{ {
union irq_ctx *curctx, *irqctx; union irq_ctx *curctx, *irqctx;
u32 *isp; u32 *isp, arg1, arg2;
curctx = (union irq_ctx *) current_thread_info(); curctx = (union irq_ctx *) current_thread_info();
irqctx = hardirq_ctx[smp_processor_id()]; irqctx = hardirq_ctx[smp_processor_id()];
...@@ -130,64 +132,22 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) ...@@ -130,64 +132,22 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
(curctx->tinfo.preempt_count & SOFTIRQ_MASK); (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
if (unlikely(overflow)) if (unlikely(overflow))
call_on_stack(print_stack_overflow, isp, 0, NULL); call_on_stack(print_stack_overflow, isp);
call_on_stack(desc->handle_irq, isp, irq, desc); asm volatile("xchgl %%ebx,%%esp \n"
"call *%%edi \n"
return 1; "movl %%ebx,%%esp \n"
} : "=a" (arg1), "=d" (arg2), "=b" (isp)
: "0" (irq), "1" (desc), "2" (isp),
#else "D" (desc->handle_irq)
static inline int : "memory", "cc", "ecx");
execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
#endif
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
unsigned int do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs;
/* high bit used in ret_from_ code */
int overflow, irq = ~regs->orig_ax;
struct irq_desc *desc = irq_desc + irq;
if (unlikely((unsigned)irq >= NR_IRQS)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, irq);
BUG();
}
old_regs = set_irq_regs(regs);
irq_enter();
overflow = check_stack_overflow();
if (!execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow))
print_stack_overflow();
desc->handle_irq(irq, desc);
}
irq_exit();
set_irq_regs(old_regs);
return 1; return 1;
} }
#ifdef CONFIG_4KSTACKS
static char softirq_stack[NR_CPUS * THREAD_SIZE]
__attribute__((__section__(".bss.page_aligned")));
static char hardirq_stack[NR_CPUS * THREAD_SIZE]
__attribute__((__section__(".bss.page_aligned")));
/* /*
* allocate per-cpu stacks for hardirq and for softirq processing * allocate per-cpu stacks for hardirq and for softirq processing
*/ */
void irq_ctx_init(int cpu) void __cpuinit irq_ctx_init(int cpu)
{ {
union irq_ctx *irqctx; union irq_ctx *irqctx;
...@@ -212,7 +172,7 @@ void irq_ctx_init(int cpu) ...@@ -212,7 +172,7 @@ void irq_ctx_init(int cpu)
softirq_ctx[cpu] = irqctx; softirq_ctx[cpu] = irqctx;
printk("CPU %u irqstacks, hard=%p soft=%p\n", printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
cpu,hardirq_ctx[cpu],softirq_ctx[cpu]); cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
} }
...@@ -242,14 +202,7 @@ asmlinkage void do_softirq(void) ...@@ -242,14 +202,7 @@ asmlinkage void do_softirq(void)
/* build the stack frame on the softirq stack */ /* build the stack frame on the softirq stack */
isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
asm volatile( call_on_stack(__do_softirq, isp);
" xchgl %%ebx,%%esp \n"
" call __do_softirq \n"
" movl %%ebx,%%esp \n"
: "=b"(isp)
: "0"(isp)
: "memory", "cc", "edx", "ecx", "eax"
);
/* /*
* Shouldnt happen, we returned above if in_interrupt(): * Shouldnt happen, we returned above if in_interrupt():
*/ */
...@@ -258,8 +211,46 @@ asmlinkage void do_softirq(void) ...@@ -258,8 +211,46 @@ asmlinkage void do_softirq(void)
local_irq_restore(flags); local_irq_restore(flags);
} }
#else
static inline int
execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
#endif #endif
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
unsigned int do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs;
/* high bit used in ret_from_ code */
int overflow, irq = ~regs->orig_ax;
struct irq_desc *desc = irq_desc + irq;
if (unlikely((unsigned)irq >= NR_IRQS)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, irq);
BUG();
}
old_regs = set_irq_regs(regs);
irq_enter();
overflow = check_stack_overflow();
if (!execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow))
print_stack_overflow();
desc->handle_irq(irq, desc);
}
irq_exit();
set_irq_regs(old_regs);
return 1;
}
/* /*
* Interrupt statistics: * Interrupt statistics:
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment