arm64: remove irq_count and do_softirq_own_stack()
authorJames Morse <james.morse@arm.com>
Fri, 18 Dec 2015 16:01:47 +0000 (16:01 +0000)
committerWill Deacon <will.deacon@arm.com>
Mon, 21 Dec 2015 17:26:01 +0000 (17:26 +0000)
sysrq_handle_reboot() re-enables interrupts while on the irq stack. The
irq_stack implementation wrongly assumed this would only ever happen
via the softirq path, allowing it to update irq_count late, in
do_softirq_own_stack().

This means if an irq occurs in sysrq_handle_reboot(), during
emergency_restart() the stack will be corrupted, as irq_count wasn't
updated.

Lose the optimisation, and instead of moving the adding/subtracting of
irq_count into irq_stack_entry/irq_stack_exit, remove it, and compare
sp_el0 (struct thread_info) with sp & ~(THREAD_SIZE - 1). This tells us
if we are on a task stack, if so, we can safely switch to the irq stack.
Finally, remove do_softirq_own_stack(), we don't need it anymore.

Reported-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
[will: use get_thread_info macro]
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm64/include/asm/irq.h
arch/arm64/kernel/entry.S
arch/arm64/kernel/irq.c

index 3bece43..b77197d 100644 (file)
@@ -11,8 +11,6 @@
 #include <asm-generic/irq.h>
 #include <asm/thread_info.h>
 
-#define __ARCH_HAS_DO_SOFTIRQ
-
 struct pt_regs;
 
 DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
index 0667fb7..c0db321 100644 (file)
@@ -181,19 +181,20 @@ alternative_endif
        .macro  irq_stack_entry
        mov     x19, sp                 // preserve the original sp
 
-       this_cpu_ptr irq_stack, x25, x26
-
        /*
-        * Check the lowest address on irq_stack for the irq_count value,
-        * incremented by do_softirq_own_stack if we have re-enabled irqs
-        * while on the irq_stack.
+        * Compare sp with the current thread_info, if the top
+        * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
+        * should switch to the irq stack.
         */
-       ldr     x26, [x25]
-       cbnz    x26, 9998f              // recursive use?
+       and     x25, x19, #~(THREAD_SIZE - 1)
+       cmp     x25, tsk
+       b.ne    9998f
 
-       /* switch to the irq stack */
+       this_cpu_ptr irq_stack, x25, x26
        mov     x26, #IRQ_STACK_START_SP
        add     x26, x25, x26
+
+       /* switch to the irq stack */
        mov     sp, x26
 
        /*
@@ -405,10 +406,10 @@ el1_irq:
        bl      trace_hardirqs_off
 #endif
 
+       get_thread_info tsk
        irq_handler
 
 #ifdef CONFIG_PREEMPT
-       get_thread_info tsk
        ldr     w24, [tsk, #TI_PREEMPT]         // get preempt count
        cbnz    w24, 1f                         // preempt count != 0
        ldr     x0, [tsk, #TI_FLAGS]            // get flags
index ff7ebb7..2386b26 100644 (file)
 #include <linux/irq.h>
 #include <linux/smp.h>
 #include <linux/init.h>
-#include <linux/interrupt.h>
 #include <linux/irqchip.h>
 #include <linux/seq_file.h>
 
 unsigned long irq_err_count;
 
-/*
- * irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned.
- * irq_stack[0] is used as irq_count, a non-zero value indicates the stack
- * is in use, and el?_irq() shouldn't switch to it. This is used to detect
- * recursive use of the irq_stack, it is lazily updated by
- * do_softirq_own_stack(), which is called on the irq_stack, before
- * re-enabling interrupts to process softirqs.
- */
+/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
 DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16);
 
-#define IRQ_COUNT()    (*per_cpu(irq_stack, smp_processor_id()))
-
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
        show_ipi_list(p, prec);
@@ -66,29 +56,3 @@ void __init init_IRQ(void)
        if (!handle_arch_irq)
                panic("No interrupt controller found.");
 }
-
-/*
- * do_softirq_own_stack() is called from irq_exit() before __do_softirq()
- * re-enables interrupts, at which point we may re-enter el?_irq(). We
- * increase irq_count here so that el1_irq() knows that it is already on the
- * irq stack.
- *
- * Called with interrupts disabled, so we don't worry about moving cpu, or
- * being interrupted while modifying irq_count.
- *
- * This function doesn't actually switch stack.
- */
-void do_softirq_own_stack(void)
-{
-       int cpu = smp_processor_id();
-
-       WARN_ON_ONCE(!irqs_disabled());
-
-       if (on_irq_stack(current_stack_pointer, cpu)) {
-               IRQ_COUNT()++;
-               __do_softirq();
-               IRQ_COUNT()--;
-       } else {
-               __do_softirq();
-       }
-}