.macro irq_stack_entry
mov x19, sp // preserve the original sp
- this_cpu_ptr irq_stack, x25, x26
-
/*
- * Check the lowest address on irq_stack for the irq_count value,
- * incremented by do_softirq_own_stack if we have re-enabled irqs
- * while on the irq_stack.
+ * Compare sp with the current thread_info, if the top
+ * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
+ * should switch to the irq stack.
*/
- ldr x26, [x25]
- cbnz x26, 9998f // recursive use?
+ and x25, x19, #~(THREAD_SIZE - 1)
+ cmp x25, tsk
+ b.ne 9998f
- /* switch to the irq stack */
+ this_cpu_ptr irq_stack, x25, x26
mov x26, #IRQ_STACK_START_SP
add x26, x25, x26
+
+ /* switch to the irq stack */
mov sp, x26
/*
bl trace_hardirqs_off
#endif
+ get_thread_info tsk
irq_handler
#ifdef CONFIG_PREEMPT
- get_thread_info tsk
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
cbnz w24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/init.h>
-#include <linux/interrupt.h>
#include <linux/irqchip.h>
#include <linux/seq_file.h>
unsigned long irq_err_count;
-/*
- * irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned.
- * irq_stack[0] is used as irq_count, a non-zero value indicates the stack
- * is in use, and el?_irq() shouldn't switch to it. This is used to detect
- * recursive use of the irq_stack, it is lazily updated by
- * do_softirq_own_stack(), which is called on the irq_stack, before
- * re-enabling interrupts to process softirqs.
- */
+/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16);
-#define IRQ_COUNT() (*per_cpu(irq_stack, smp_processor_id()))
-
int arch_show_interrupts(struct seq_file *p, int prec)
{
show_ipi_list(p, prec);
if (!handle_arch_irq)
panic("No interrupt controller found.");
}
-
-/*
- * do_softirq_own_stack() is called from irq_exit() before __do_softirq()
- * re-enables interrupts, at which point we may re-enter el?_irq(). We
- * increase irq_count here so that el1_irq() knows that it is already on the
- * irq stack.
- *
- * Called with interrupts disabled, so we don't worry about moving cpu, or
- * being interrupted while modifying irq_count.
- *
- * This function doesn't actually switch stack.
- */
-void do_softirq_own_stack(void)
-{
- int cpu = smp_processor_id();
-
- WARN_ON_ONCE(!irqs_disabled());
-
- if (on_irq_stack(current_stack_pointer, cpu)) {
- IRQ_COUNT()++;
- __do_softirq();
- IRQ_COUNT()--;
- } else {
- __do_softirq();
- }
-}