1/* 2 * Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com> 3 * 4 * SPDX-License-Identifier: Apache-2.0 5 */ 6 7/* 8 * ARM64 Cortex-A ISRs wrapper 9 */ 10 11#include <zephyr/toolchain.h> 12#include <zephyr/linker/sections.h> 13#include <offsets_short.h> 14#include <zephyr/arch/cpu.h> 15#include <zephyr/sw_isr_table.h> 16#include <zephyr/drivers/interrupt_controller/gic.h> 17#include "macro_priv.inc" 18 19_ASM_FILE_PROLOGUE 20 21GDATA(_sw_isr_table) 22 23/* 24 * Wrapper around ISRs when inserted in software ISR table 25 * 26 * When inserted in the vector table, _isr_wrapper() demuxes the ISR table 27 * using the running interrupt number as the index, and invokes the registered 28 * ISR with its corresponding argument. When returning from the ISR, it 29 * determines if a context switch needs to happen. 30 */ 31 32GTEXT(_isr_wrapper) 33SECTION_FUNC(TEXT, _isr_wrapper) 34 35 /* ++_current_cpu->nested to be checked by arch_is_in_isr() */ 36 get_cpu x0 37 ldr w1, [x0, #___cpu_t_nested_OFFSET] 38 add w2, w1, #1 39 str w2, [x0, #___cpu_t_nested_OFFSET] 40 41 /* If not nested: switch to IRQ stack and save current sp on it. */ 42 cbnz w1, 1f 43 ldr x1, [x0, #___cpu_t_irq_stack_OFFSET] 44 mov x2, sp 45 mov sp, x1 46 str x2, [sp, #-16]! 47#if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK) 48 sub x1, x1, #CONFIG_ISR_STACK_SIZE 49 str x1, [x0, #_cpu_offset_to_current_stack_limit] 50#endif 511: 52#ifdef CONFIG_SCHED_THREAD_USAGE 53 bl z_sched_usage_stop 54#endif 55 56#ifdef CONFIG_TRACING 57 bl sys_trace_isr_enter 58#endif 59 60 /* Get active IRQ number from the interrupt controller */ 61#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) 62 bl arm_gic_get_active 63#else 64 bl z_soc_irq_get_active 65#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ 66 67#if CONFIG_GIC_VER >= 3 68 /* 69 * Ignore Special INTIDs 1020..1023 see 2.2.1 of Arm Generic Interrupt Controller 70 * Architecture Specification GIC architecture version 3 and version 4 71 */ 72 cmp x0, 1019 73 b.le oob 74 cmp x0, 1023 75 b.gt oob 76 b spurious_continue 77 78oob: 79#endif 80 /* IRQ out of bounds */ 81 mov x1, #(CONFIG_NUM_IRQS - 1) 82 cmp x0, x1 83 b.hi spurious_continue 84 85 stp x0, xzr, [sp, #-16]! 86 87 /* Retrieve the interrupt service routine */ 88 ldr x1, =_sw_isr_table 89 add x1, x1, x0, lsl #4 /* table is 16-byte wide */ 90 ldp x0, x3, [x1] /* arg in x0, ISR in x3 */ 91 92 /* 93 * Call the ISR. Unmask and mask again the IRQs to support nested 94 * exception handlers 95 */ 96 msr daifclr, #(DAIFCLR_IRQ_BIT) 97 blr x3 98 msr daifset, #(DAIFSET_IRQ_BIT) 99 100 /* Signal end-of-interrupt */ 101 ldp x0, xzr, [sp], #16 102 103spurious_continue: 104#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) 105 bl arm_gic_eoi 106#else 107 bl z_soc_irq_eoi 108#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ 109 110#ifdef CONFIG_TRACING 111 bl sys_trace_isr_exit 112#endif 113 114GTEXT(z_arm64_irq_done) 115z_arm64_irq_done: 116 /* if (--_current_cpu->nested != 0) exit */ 117 get_cpu x0 118 ldr w1, [x0, #___cpu_t_nested_OFFSET] 119 subs w1, w1, #1 120 str w1, [x0, #___cpu_t_nested_OFFSET] 121 bne exit 122 123 /* No more nested: retrieve the task's stack. */ 124 ldr x1, [sp] 125 mov sp, x1 126 127 /* retrieve pointer to the current thread */ 128 ldr x1, [x0, #___cpu_t_current_OFFSET] 129 130#if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK) 131 /* arch_curr_cpu()->arch.current_stack_limit = thread->arch.stack_limit */ 132 ldr x2, [x1, #_thread_offset_to_stack_limit] 133 str x2, [x0, #_cpu_offset_to_current_stack_limit] 134#endif 135 136 /* 137 * Get next thread to schedule with z_get_next_switch_handle(). 138 * We pass it a NULL as we didn't save the whole thread context yet. 139 * If no scheduling is necessary then NULL will be returned. 140 */ 141 str x1, [sp, #-16]! 142 mov x0, xzr 143 bl z_get_next_switch_handle 144 ldr x1, [sp], #16 145 cbz x0, exit 146 147 /* 148 * Switch thread 149 * x0: new thread 150 * x1: old thread 151 */ 152 bl z_arm64_context_switch 153 154exit: 155#ifdef CONFIG_STACK_SENTINEL 156 bl z_check_stack_sentinel 157#endif 158 b z_arm64_exit_exc 159 160