1/* 2 * Copyright (c) 2019-2020 Cobham Gaisler AB 3 * 4 * SPDX-License-Identifier: Apache-2.0 5 */ 6 7#include <zephyr/toolchain.h> 8#include <zephyr/linker/sections.h> 9#include <offsets_short.h> 10#include <zephyr/arch/sparc/sparc.h> 11#include "stack_offsets.h" 12 13GTEXT(__sparc_trap_interrupt) 14GTEXT(__sparc_trap_irq_offload) 15 16/* 17 * Interrupt trap handler 18 * 19 * - IU state is saved and restored 20 * 21 * On entry: 22 * %l0: psr (set by trap code) 23 * %l1: pc 24 * %l2: npc 25 * %l3: SPARC interrupt request level (bp_IRL) 26 * %fp: %sp of current register window at trap time 27 * 28 * This module also implements the IRQ offload support. The handling is the 29 * same as for asynchronous maskable interrupts, with the following exceptions: 30 * - Do not re-execute the causing (ta) instruction at trap exit. 31 * - A dedicated interrupt request level (0x8d) is used. 32 * - z_sparc_enter_irq() knows how to interpret this interrupt request level. 33 */ 34SECTION_SUBSEC_FUNC(TEXT, __sparc_trap_interrupt, __sparc_trap_irq_offload) 35 /* Preparation in the case of synchronous IRQ offload. */ 36 mov %l2, %l1 37 add %l2, 4, %l2 38 set 0x8d, %l3 39 40__sparc_trap_interrupt: 41 /* %g2, %g3 are used at manual window overflow so save temporarily */ 42 mov %g2, %l4 43 mov %g3, %l5 44 45 /* We may have trapped into the invalid window. If so, make it valid. */ 46 rd %wim, %g2 47 srl %g2, %l0, %g3 48 cmp %g3, 1 49 bne .Lwodone 50 nop 51 52 /* Do the window overflow. */ 53 sll %g2, (CONFIG_SPARC_NWIN-1), %g3 54 srl %g2, 1, %g2 55 or %g2, %g3, %g2 56 57 /* Enter window to save. */ 58 save 59 /* Install new wim calculated above. */ 60 mov %g2, %wim 61 nop 62 nop 63 nop 64 /* Put registers on the dedicated save area of the ABI stack frame. */ 65 std %l0, [%sp + 0x00] 66 std %l2, [%sp + 0x08] 67 std %l4, [%sp + 0x10] 68 std %l6, [%sp + 0x18] 69 std %i0, [%sp + 0x20] 70 std %i2, [%sp + 0x28] 71 std %i4, [%sp + 0x30] 72 std %i6, [%sp + 0x38] 73 /* Leave saved window. */ 74 restore 75 76.Lwodone: 77 /* 78 * %l4: %g2 at trap time 79 * %l5: %g3 at trap time 80 * 81 * Save the state of the interrupted task including global registers on 82 * the task stack. 83 * 84 * IMPORTANT: Globals are saved here as well on the task stack, since a 85 * context switch might happen before the context of this interrupted 86 * task is restored. 87 */ 88 89 /* Allocate stack for isr context. */ 90 sub %fp, ISF_SIZE, %sp 91 /* 92 * %fp: %sp of interrupted task 93 * %sp: %sp of interrupted task - ISF_SIZE. 94 * (fits what we store here) 95 * 96 * Save the interrupted context. 97 */ 98 std %l0, [%sp + ISF_PSR_OFFSET] /* psr pc */ 99 st %l2, [%sp + ISF_NPC_OFFSET] /* npc */ 100 st %g1, [%sp + ISF_G1_OFFSET] /* g1 */ 101 std %l4, [%sp + ISF_G2_OFFSET] /* g2 g3 */ 102 st %g4, [%sp + ISF_G4_OFFSET] /* g4 */ 103 rd %y, %g1 104 st %g1, [%sp + ISF_Y_OFFSET] /* y */ 105 106 /* %l5: reference to _kernel */ 107 set _kernel, %l5 108 /* Switch to interrupt stack. */ 109 mov %sp, %fp 110 ld [%l5 + _kernel_offset_to_irq_stack], %sp 111 112 /* Allocate a full C stack frame */ 113 sub %sp, STACK_FRAME_SIZE, %sp 114 /* 115 * %fp: %sp of interrupted task - ISF_SIZE. 116 * %sp: irq stack - 96. An ABI frame 117 */ 118 119 /* Enable traps, raise PIL to mask all maskable interrupts. */ 120 or %l0, PSR_PIL, %l6 121 122#if defined(CONFIG_FPU) 123 /* 124 * We now check if the interrupted context was using the FPU. The 125 * result is stored in register l5 which will either get the value 0 126 * (FPU not used) or PSR_EF (FPU used). 127 * 128 * If the FPU was used by the interrupted context, then we do two 129 * things: 130 * 1. Store FSR to memory. This has the side-effect of completing all 131 * pending FPU operations. 132 * 2. Disable FPU. Floating point instructions in the ISR will trap. 133 * 134 * The FPU is be enabled again if needed after the ISR has returned. 135 */ 136 set PSR_EF, %l5 137 andcc %l0, %l5, %l5 138 bne,a 1f 139 st %fsr, [%sp] 1401: 141 andn %l6, %l5, %l6 142#endif 143 wr %l6, PSR_ET, %psr 144 nop 145 nop 146 nop 147 148#ifdef CONFIG_SCHED_THREAD_USAGE 149 call z_sched_usage_stop 150 nop 151#endif 152 153#ifdef CONFIG_TRACING_ISR 154 call sys_trace_isr_enter 155 nop 156#endif 157 158 /* SPARC interrupt request level is the first argument */ 159 call z_sparc_enter_irq 160 mov %l3, %o0 161 162#ifdef CONFIG_TRACING_ISR 163 call sys_trace_isr_exit 164 nop 165#endif 166 167 /* 168 * %fp: %sp of interrupted task - ISF_SIZE. 169 * %sp: irq stack - 96. An ABI frame 170 */ 171 172#ifdef CONFIG_PREEMPT_ENABLED 173 /* allocate stack for calling C function and for its output value */ 174 sub %fp, (96+8), %sp 175 /* 176 * %fp: %sp of interrupted task - ISF_SIZE. 177 * %sp: %sp of interrupted task - ISF_SIZE - STACK_FRAME_SIZE - 8. 178 */ 179 call z_arch_get_next_switch_handle 180 add %sp, 96, %o0 181 /* we get old thread as "return value" on stack */ 182 ld [%sp + 96], %o1 183 /* 184 * o0: new thread 185 * o1: old thread 186 */ 187 cmp %o0, %o1 188 beq .Lno_reschedule 189 /* z_sparc_context_switch() is a leaf function not using stack. */ 190 add %sp, (96+8-64), %sp 191 192#if defined(CONFIG_FPU_SHARING) 193 /* IF PSR_EF at trap time then store the FP context. */ 194 cmp %l5, 0 195 be .Lno_fp_context 196 nop 197 198 /* 199 * PSR_EF was 1 at trap time so save the FP registers on stack. 200 * - Set PSR_EF so we can access the FP registers. 201 * - Allocate space for the FP registers above the save area used for 202 * the z_sparc_context_switch() call. 203 */ 204 wr %l6, %l5, %psr 205 nop 206 nop 207 nop 208 209 sub %sp, 34 * 4, %sp 210 std %f0, [%sp + 64 + 0x00] 211 std %f2, [%sp + 64 + 0x08] 212 std %f4, [%sp + 64 + 0x10] 213 std %f6, [%sp + 64 + 0x18] 214 std %f8, [%sp + 64 + 0x20] 215 std %f10, [%sp + 64 + 0x28] 216 std %f12, [%sp + 64 + 0x30] 217 std %f14, [%sp + 64 + 0x38] 218 std %f16, [%sp + 64 + 0x40] 219 std %f18, [%sp + 64 + 0x48] 220 std %f20, [%sp + 64 + 0x50] 221 std %f22, [%sp + 64 + 0x58] 222 std %f24, [%sp + 64 + 0x60] 223 std %f26, [%sp + 64 + 0x68] 224 std %f28, [%sp + 64 + 0x70] 225 std %f30, [%sp + 64 + 0x78] 226 227 call z_sparc_context_switch 228 st %fsr, [%sp + 64 + 0x80] 229 230 ldd [%sp + 64 + 0x00], %f0 231 ldd [%sp + 64 + 0x08], %f2 232 ldd [%sp + 64 + 0x10], %f4 233 ldd [%sp + 64 + 0x18], %f6 234 ldd [%sp + 64 + 0x20], %f8 235 ldd [%sp + 64 + 0x28], %f10 236 ldd [%sp + 64 + 0x30], %f12 237 ldd [%sp + 64 + 0x38], %f14 238 ldd [%sp + 64 + 0x40], %f16 239 ldd [%sp + 64 + 0x48], %f18 240 ldd [%sp + 64 + 0x50], %f20 241 ldd [%sp + 64 + 0x58], %f22 242 ldd [%sp + 64 + 0x60], %f24 243 ldd [%sp + 64 + 0x68], %f26 244 ldd [%sp + 64 + 0x70], %f28 245 ldd [%sp + 64 + 0x78], %f30 246 ld [%sp + 64 + 0x80], %fsr 247 ba .Lno_reschedule 248 add %sp, 34 * 4, %sp 249.Lno_fp_context: 250#endif /* CONFIG_FPU_SHARING */ 251 252 call z_sparc_context_switch 253 nop 254.Lno_reschedule: 255#endif /* CONFIG_PREEMPT_ENABLED */ 256 257 /* Restore the interrupted context. */ 258 ld [%fp + ISF_Y_OFFSET], %g1 259 wr %g1, 0, %y 260 261 ldd [%fp + ISF_PSR_OFFSET], %l0 /* psr, pc */ 262 ld [%fp + ISF_NPC_OFFSET], %l2 /* npc */ 263 /* NOTE: %g1 will be restored later */ 264 265 /* %g1 is used to access the stack frame later */ 266 mov %fp, %g1 267 ldd [%fp + ISF_G2_OFFSET], %g2 268 ld [%fp + ISF_G4_OFFSET], %g4 269 add %fp, ISF_SIZE, %fp 270 271 /* 272 * Install the PSR we got from the interrupt context. Current PSR.CWP 273 * is preserved. Keep PSR.ET=0 until we do "rett". 274 */ 275 rd %psr, %l3 276 and %l3, PSR_CWP, %l3 277 andn %l0, (PSR_CWP | PSR_ET), %l0 278 or %l3, %l0, %l0 279 mov %l0, %psr 280 nop 281 nop 282 nop 283 284 /* Calculate %l6 := (cwp+1) % NWIN */ 285 rd %wim, %l3 286 set (CONFIG_SPARC_NWIN), %l7 287 add %l0, 1, %l6 288 and %l6, PSR_CWP, %l6 289 cmp %l6, %l7 290 bge,a .Lwrapok 291 mov 0, %l6 292 293.Lwrapok: 294 /* Determine if we must prepare the return window. */ 295 /* %l5 := %wim >> (cwp+1) */ 296 srl %l3, %l6, %l5 297 /* %l5 is 1 if (cwp+1) is an invalid window */ 298 cmp %l5, 1 299 bne .Lwudone 300 sub %l7, 1, %l7 /* %l7 := NWIN - 1 */ 301 302 /* Do the window underflow. */ 303 sll %l3, 1, %l4 304 srl %l3, %l7, %l5 305 wr %l4, %l5, %wim 306 nop 307 nop 308 nop 309 310 restore 311 ldd [%g1 + 0x00], %l0 312 ldd [%g1 + 0x08], %l2 313 ldd [%g1 + 0x10], %l4 314 ldd [%g1 + 0x18], %l6 315 ldd [%g1 + 0x20], %i0 316 ldd [%g1 + 0x28], %i2 317 ldd [%g1 + 0x30], %i4 318 ldd [%g1 + 0x38], %i6 319 save 320 321.Lwudone: 322 /* 323 * Restore %psr since we may have trashed condition codes. PSR.ET is 324 * still 0. 325 */ 326 wr %l0, %psr 327 nop 328 nop 329 nop 330 331 /* restore g1 */ 332 ld [%g1 + ISF_G1_OFFSET], %g1 333 334 jmp %l1 335 rett %l2 336