diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-05-05 15:58:15 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-05-12 21:28:06 +0200 |
commit | de9b10af1287bf25b9c0433de53a2e95ef611aa7 (patch) | |
tree | 9bc83f349f9ca651ce9d7c56aee9c3c3c7ae79e0 /arch/x86/kernel/irq_32.c | |
parent | 04b361abfdc522239e3a071f3afdebf5787d9f03 (diff) |
x86: janitor stack overflow warning patch
Add KERN_WARNING to the printk as this could not be done in the
original patch, which allegedly only moves code around.
Un#ifdef do_IRQ.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/irq_32.c')
-rw-r--r-- | arch/x86/kernel/irq_32.c | 136 |
1 files changed, 75 insertions, 61 deletions
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 3f76561da815..1c470d2e5af7 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -48,6 +48,29 @@ void ack_bad_irq(unsigned int irq) #endif } +#ifdef CONFIG_DEBUG_STACKOVERFLOW +/* Debugging check for stack overflow: is there less than 1KB free? */ +static int check_stack_overflow(void) +{ + long sp; + + __asm__ __volatile__("andl %%esp,%0" : + "=r" (sp) : "0" (THREAD_SIZE - 1)); + + return sp < (sizeof(struct thread_info) + STACK_WARN); +} + +static void print_stack_overflow(void) +{ + printk(KERN_WARNING "low stack detected by irq handler\n"); + dump_stack(); +} + +#else +static inline int check_stack_overflow(void) { return 0; } +static inline void print_stack_overflow(void) { } +#endif + #ifdef CONFIG_4KSTACKS /* * per-CPU IRQ handling contexts (thread information and stack) @@ -59,18 +82,12 @@ union irq_ctx { static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; -#endif - -static void stack_overflow(void) -{ - printk("low stack detected by irq handler\n"); - dump_stack(); -} -static inline void call_on_stack2(void *func, void *stack, - unsigned long arg1, unsigned long arg2) +static inline void call_on_stack(void *func, void *stack, + unsigned long arg1, void *arg2) { unsigned long bx; + asm volatile( " xchgl %%ebx,%%esp \n" " call *%%edi \n" @@ -81,22 +98,61 @@ static inline void call_on_stack2(void *func, void *stack, : "memory", "cc", "ecx"); } +static inline int +execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) +{ + union irq_ctx *curctx, *irqctx; + u32 *isp; + + curctx = (union irq_ctx *) current_thread_info(); + irqctx = hardirq_ctx[smp_processor_id()]; + + /* + * this is where we switch to the IRQ stack. However, if we are + * already using the IRQ stack (because we interrupted a hardirq + * handler) we can't do that and just have to keep using the + * current stack (which is the irq stack already after all) + */ + if (unlikely(curctx == irqctx)) + return 0; + + /* build the stack frame on the IRQ stack */ + isp = (u32 *) ((char*)irqctx + sizeof(*irqctx)); + irqctx->tinfo.task = curctx->tinfo.task; + irqctx->tinfo.previous_esp = current_stack_pointer; + + /* + * Copy the softirq bits in preempt_count so that the + * softirq checks work in the hardirq context. + */ + irqctx->tinfo.preempt_count = + (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | + (curctx->tinfo.preempt_count & SOFTIRQ_MASK); + + if (unlikely(overflow)) + call_on_stack(print_stack_overflow, isp, 0, NULL); + + call_on_stack(desc->handle_irq, isp, irq, desc); + + return 1; +} + +#else +static inline int +execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } +#endif + /* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ unsigned int do_IRQ(struct pt_regs *regs) -{ +{ struct pt_regs *old_regs; /* high bit used in ret_from_ code */ - int irq = ~regs->orig_ax; + int overflow, irq = ~regs->orig_ax; struct irq_desc *desc = irq_desc + irq; -#ifdef CONFIG_4KSTACKS - union irq_ctx *curctx, *irqctx; - u32 *isp; -#endif - int overflow = 0; if (unlikely((unsigned)irq >= NR_IRQS)) { printk(KERN_EMERG "%s: cannot handle IRQ %d\n", @@ -106,54 +162,12 @@ unsigned int do_IRQ(struct pt_regs *regs) old_regs = set_irq_regs(regs); irq_enter(); -#ifdef CONFIG_DEBUG_STACKOVERFLOW - /* Debugging check for stack overflow: is there less than 1KB free? */ - { - long sp; - - __asm__ __volatile__("andl %%esp,%0" : - "=r" (sp) : "0" (THREAD_SIZE - 1)); - if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) - overflow = 1; - } -#endif - -#ifdef CONFIG_4KSTACKS - curctx = (union irq_ctx *) current_thread_info(); - irqctx = hardirq_ctx[smp_processor_id()]; + overflow = check_stack_overflow(); - /* - * this is where we switch to the IRQ stack. However, if we are - * already using the IRQ stack (because we interrupted a hardirq - * handler) we can't do that and just have to keep using the - * current stack (which is the irq stack already after all) - */ - if (curctx != irqctx) { - /* build the stack frame on the IRQ stack */ - isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); - irqctx->tinfo.task = curctx->tinfo.task; - irqctx->tinfo.previous_esp = current_stack_pointer; - - /* - * Copy the softirq bits in preempt_count so that the - * softirq checks work in the hardirq context. - */ - irqctx->tinfo.preempt_count = - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | - (curctx->tinfo.preempt_count & SOFTIRQ_MASK); - - /* Execute warning on interrupt stack */ + if (!execute_on_irq_stack(overflow, desc, irq)) { if (unlikely(overflow)) - call_on_stack2(stack_overflow, isp, 0, 0); - - call_on_stack2(desc->handle_irq, isp, irq, (unsigned long)desc); - } else -#endif - { - /* AK: Slightly bogus here */ - if (overflow) - stack_overflow(); + print_stack_overflow(); desc->handle_irq(irq, desc); } |