1/* 2 * Copyright (c) 2016 Intel Corporation 3 * 4 * SPDX-License-Identifier: Apache-2.0 5 */ 6 7#include <zephyr/toolchain.h> 8#include <zephyr/linker/sections.h> 9#include <offsets_short.h> 10 11/* exports */ 12GTEXT(_exception) 13 14/* import */ 15GTEXT(_Fault) 16GTEXT(arch_swap) 17#ifdef CONFIG_IRQ_OFFLOAD 18GTEXT(z_irq_do_offload) 19GTEXT(_offload_routine) 20#endif 21 22/* Allows use of r1/at register, otherwise reserved for assembler use */ 23.set noat 24 25/* Placed into special 'exception' section so that the linker can put this code 26 * at ALT_CPU_EXCEPTION_ADDR defined in system.h 27 * 28 * This is the common entry point for processor exceptions and interrupts from 29 * the Internal Interrupt Controller (IIC). 30 * 31 * If the External (EIC) controller is in use, then we will never get here on 32 * behalf of an interrupt, instead the EIC driver will have set up a vector 33 * table and the processor will jump directly into the appropriate table 34 * entry. 35 */ 36SECTION_FUNC(exception.entry, _exception) 37 /* Reserve thread stack space for saving context */ 38 subi sp, sp, __z_arch_esf_t_SIZEOF 39 40 /* Preserve all caller-saved registers onto the thread's stack */ 41 stw ra, __z_arch_esf_t_ra_OFFSET(sp) 42 stw r1, __z_arch_esf_t_r1_OFFSET(sp) 43 stw r2, __z_arch_esf_t_r2_OFFSET(sp) 44 stw r3, __z_arch_esf_t_r3_OFFSET(sp) 45 stw r4, __z_arch_esf_t_r4_OFFSET(sp) 46 stw r5, __z_arch_esf_t_r5_OFFSET(sp) 47 stw r6, __z_arch_esf_t_r6_OFFSET(sp) 48 stw r7, __z_arch_esf_t_r7_OFFSET(sp) 49 stw r8, __z_arch_esf_t_r8_OFFSET(sp) 50 stw r9, __z_arch_esf_t_r9_OFFSET(sp) 51 stw r10, __z_arch_esf_t_r10_OFFSET(sp) 52 stw r11, __z_arch_esf_t_r11_OFFSET(sp) 53 stw r12, __z_arch_esf_t_r12_OFFSET(sp) 54 stw r13, __z_arch_esf_t_r13_OFFSET(sp) 55 stw r14, __z_arch_esf_t_r14_OFFSET(sp) 56 stw r15, __z_arch_esf_t_r15_OFFSET(sp) 57 58 /* Store value of estatus control register */ 59 rdctl et, estatus 60 stw et, __z_arch_esf_t_estatus_OFFSET(sp) 61 62 /* ea-4 is the address of the instruction when the exception happened, 63 * put this in the stack frame as well 64 */ 65 addi r15, ea, -4 66 stw r15, __z_arch_esf_t_instr_OFFSET(sp) 67 68 /* Figure out whether we are here because of an interrupt or an 69 * exception. If an interrupt, switch stacks and enter IRQ handling 70 * code. If an exception, remain on current stack and enter exception 71 * handing code. From the CPU manual, ipending must be nonzero and 72 * estatis.PIE must be enabled for this to be considered an interrupt. 73 * 74 * Stick ipending in r4 since it will be an arg for _enter_irq 75 */ 76 rdctl r4, ipending 77 beq r4, zero, not_interrupt 78 /* We stashed estatus in et earlier */ 79 andi r15, et, 1 80 beq r15, zero, not_interrupt 81 82is_interrupt: 83 /* If we get here, this is an interrupt */ 84 85 /* Grab a reference to _kernel in r10 so we can determine the 86 * current irq stack pointer 87 */ 88 movhi r10, %hi(_kernel) 89 ori r10, r10, %lo(_kernel) 90 91 /* Stash a copy of thread's sp in r12 so that we can put it on the IRQ 92 * stack 93 */ 94 mov r12, sp 95 96 /* Switch to interrupt stack */ 97 ldw sp, _kernel_offset_to_irq_stack(r10) 98 99 /* Store thread stack pointer onto IRQ stack */ 100 addi sp, sp, -4 101 stw r12, 0(sp) 102 103on_irq_stack: 104 105 /* Enter C interrupt handling code. Value of ipending will be the 106 * function parameter since we put it in r4 107 */ 108 call _enter_irq 109 110 /* Interrupt handler finished and the interrupt should be serviced 111 * now, the appropriate bits in ipending should be cleared */ 112 113 /* Get a reference to _kernel again in r10 */ 114 movhi r10, %hi(_kernel) 115 ori r10, r10, %lo(_kernel) 116 117#ifdef CONFIG_PREEMPT_ENABLED 118 ldw r11, _kernel_offset_to_current(r10) 119 /* Determine whether the exception of the ISR requires context 120 * switch 121 */ 122 123 /* Call into the kernel to see if a scheduling decision is necessary */ 124 ldw r2, _kernel_offset_to_ready_q_cache(r10) 125 beq r2, r11, no_reschedule 126 127 /* 128 * A context reschedule is required: keep the volatile registers of 129 * the interrupted thread on the context's stack. Utilize 130 * the existing arch_swap() primitive to save the remaining 131 * thread's registers (including floating point) and perform 132 * a switch to the new thread. 133 */ 134 135 /* We put the thread stack pointer on top of the IRQ stack before 136 * we switched stacks. Restore it to go back to thread stack 137 */ 138 ldw sp, 0(sp) 139 140 /* Argument to Swap() is estatus since that's the state of the 141 * status register before the exception happened. When coming 142 * out of the context switch we need this info to restore 143 * IRQ lock state. We put this value in et earlier. 144 */ 145 mov r4, et 146 147 call arch_swap 148 jmpi _exception_exit 149#else 150 jmpi no_reschedule 151#endif /* CONFIG_PREEMPT_ENABLED */ 152 153not_interrupt: 154 155 /* Since this wasn't an interrupt we're not going to restart the 156 * faulting instruction. 157 * 158 * We earlier put ea - 4 in the stack frame, replace it with just ea 159 */ 160 stw ea, __z_arch_esf_t_instr_OFFSET(sp) 161 162#ifdef CONFIG_IRQ_OFFLOAD 163 /* Check the contents of _offload_routine. If non-NULL, jump into 164 * the interrupt code anyway. 165 */ 166 movhi r10, %hi(_offload_routine) 167 ori r10, r10, %lo(_offload_routine) 168 ldw r11, (r10) 169 bne r11, zero, is_interrupt 170#endif 171 172_exception_enter_fault: 173 /* If we get here, the exception wasn't in interrupt or an 174 * invocation of irq_oflload(). Let _Fault() handle it in 175 * C domain 176 */ 177 178 mov r4, sp 179 call _Fault 180 jmpi _exception_exit 181 182no_reschedule: 183 184 /* We put the thread stack pointer on top of the IRQ stack before 185 * we switched stacks. Restore it to go back to thread stack 186 */ 187 ldw sp, 0(sp) 188 189 /* Fall through */ 190 191_exception_exit: 192 /* We are on the thread stack. Restore all saved registers 193 * and return to the interrupted context */ 194 195 /* Return address from the exception */ 196 ldw ea, __z_arch_esf_t_instr_OFFSET(sp) 197 198 /* Restore estatus 199 * XXX is this right??? */ 200 ldw r5, __z_arch_esf_t_estatus_OFFSET(sp) 201 wrctl estatus, r5 202 203 /* Restore caller-saved registers */ 204 ldw ra, __z_arch_esf_t_ra_OFFSET(sp) 205 ldw r1, __z_arch_esf_t_r1_OFFSET(sp) 206 ldw r2, __z_arch_esf_t_r2_OFFSET(sp) 207 ldw r3, __z_arch_esf_t_r3_OFFSET(sp) 208 ldw r4, __z_arch_esf_t_r4_OFFSET(sp) 209 ldw r5, __z_arch_esf_t_r5_OFFSET(sp) 210 ldw r6, __z_arch_esf_t_r6_OFFSET(sp) 211 ldw r7, __z_arch_esf_t_r7_OFFSET(sp) 212 ldw r8, __z_arch_esf_t_r8_OFFSET(sp) 213 ldw r9, __z_arch_esf_t_r9_OFFSET(sp) 214 ldw r10, __z_arch_esf_t_r10_OFFSET(sp) 215 ldw r11, __z_arch_esf_t_r11_OFFSET(sp) 216 ldw r12, __z_arch_esf_t_r12_OFFSET(sp) 217 ldw r13, __z_arch_esf_t_r13_OFFSET(sp) 218 ldw r14, __z_arch_esf_t_r14_OFFSET(sp) 219 ldw r15, __z_arch_esf_t_r15_OFFSET(sp) 220 221 /* Put the stack pointer back where it was when we entered 222 * exception state 223 */ 224 addi sp, sp, __z_arch_esf_t_SIZEOF 225 226 /* All done, copy estatus into status and transfer to ea */ 227 eret 228