1 /* 2 * Copyright (C) 2024 Nordic Semiconductor ASA 3 * SPDX-License-Identifier: Apache-2.0 4 */ 5 6 #ifndef SOC_RISCV_NORDIC_NRF_COMMON_VPR_SOC_ISR_STACKING_H_ 7 #define SOC_RISCV_NORDIC_NRF_COMMON_VPR_SOC_ISR_STACKING_H_ 8 9 #include <zephyr/arch/riscv/irq.h> 10 11 #if !defined(_ASMLANGUAGE) 12 13 #include <zephyr/devicetree.h> 14 15 #define VPR_CPU DT_INST(0, nordic_vpr) 16 17 #if DT_PROP(VPR_CPU, nordic_bus_width) == 64 18 19 #define SOC_ISR_STACKING_ESF_DECLARE \ 20 struct arch_esf { \ 21 unsigned long s0; \ 22 unsigned long mstatus; \ 23 struct soc_esf soc_context; \ 24 \ 25 unsigned long t2; \ 26 unsigned long ra; \ 27 unsigned long t0; \ 28 unsigned long t1; \ 29 unsigned long a4; \ 30 unsigned long a5; \ 31 unsigned long a2; \ 32 unsigned long a3; \ 33 unsigned long a0; \ 34 unsigned long a1; \ 35 unsigned long mepc; \ 36 unsigned long _mcause; \ 37 } __aligned(16); 38 39 #else /* DT_PROP(VPR_CPU, nordic_bus_width) == 32 */ 40 41 #define SOC_ISR_STACKING_ESF_DECLARE \ 42 struct arch_esf { \ 43 unsigned long s0; \ 44 unsigned long mstatus; \ 45 struct soc_esf soc_context; \ 46 \ 47 unsigned long ra; \ 48 unsigned long t2; \ 49 unsigned long t1; \ 50 unsigned long t0; \ 51 unsigned long a5; \ 52 unsigned long a4; \ 53 unsigned long a3; \ 54 unsigned long a2; \ 55 unsigned long a1; \ 56 unsigned long a0; \ 57 unsigned long _mcause; \ 58 unsigned long mepc; \ 59 } __aligned(16); 60 61 #endif /* DT_PROP(VPR_CPU, nordic_bus_width) == 64 */ 62 63 /* 64 * VPR stacked mcause needs to have proper value on initial stack. 65 * Initial mret will restore this value. 66 */ 67 #define SOC_ISR_STACKING_ESR_INIT \ 68 stack_init->_mcause = 0; 69 70 #else /* _ASMLANGUAGE */ 71 72 /* 73 * Size of the HW managed part of the ESF: 74 * sizeof(_mcause) + sizeof(_mepc) 75 */ 76 #define ESF_HW_SIZEOF (0x8) 77 78 /* 79 * Size of the SW managed part of the ESF in case of exception 80 */ 81 #define ESF_SW_EXC_SIZEOF (__struct_arch_esf_SIZEOF - ESF_HW_SIZEOF) 82 83 /* 84 * Size of the SW managed part of the ESF in case of interrupt 85 * sizeof(__padding) + ... + sizeof(soc_context) 86 */ 87 #define ESF_SW_IRQ_SIZEOF (0x10) 88 89 /* 90 * VPR needs aligned(8) SP when doing HW stacking, if this condition is not fulfilled it will move 91 * SP by additional 4 bytes when HW stacking is done. This will be indicated by LSB bit in stacked 92 * MEPC. This bit needs to be saved and then restored because zephyr is managing MEPC and doesn't 93 * know anything about this additional offset. 94 */ 95 #define MEPC_SP_ALIGN_BIT_MASK (0x1UL) 96 97 #define STORE_SP_ALIGN_BIT_FROM_MEPC \ 98 addi t1, sp, __struct_arch_esf_soc_context_OFFSET; \ 99 lr t0, __struct_arch_esf_mepc_OFFSET(sp); \ 100 andi t0, t0, MEPC_SP_ALIGN_BIT_MASK; \ 101 sr t0, __soc_esf_t_sp_align_OFFSET(t1) 102 103 #define RESTORE_SP_ALIGN_BIT_TO_MEPC \ 104 addi t1, sp, __struct_arch_esf_soc_context_OFFSET; \ 105 lr t0, __soc_esf_t_sp_align_OFFSET(t1); \ 106 lr t1, __struct_arch_esf_mepc_OFFSET(sp); \ 107 or t2, t1, t0; \ 108 sr t2, __struct_arch_esf_mepc_OFFSET(sp) 109 110 #define SOC_ISR_SW_STACKING \ 111 csrw mscratch, t0; \ 112 \ 113 csrr t0, mcause; \ 114 srli t0, t0, RISCV_MCAUSE_IRQ_POS; \ 115 bnez t0, stacking_is_interrupt; \ 116 \ 117 csrrw t0, mscratch, zero; \ 118 \ 119 addi sp, sp, -ESF_SW_EXC_SIZEOF; \ 120 DO_CALLER_SAVED(sr); \ 121 j stacking_keep_going; \ 122 \ 123 stacking_is_interrupt: \ 124 addi sp, sp, -ESF_SW_IRQ_SIZEOF; \ 125 \ 126 stacking_keep_going: \ 127 STORE_SP_ALIGN_BIT_FROM_MEPC 128 129 #define SOC_ISR_SW_UNSTACKING \ 130 RESTORE_SP_ALIGN_BIT_TO_MEPC; \ 131 csrr t0, mcause; \ 132 srli t0, t0, RISCV_MCAUSE_IRQ_POS; \ 133 bnez t0, unstacking_is_interrupt; \ 134 \ 135 DO_CALLER_SAVED(lr); \ 136 addi sp, sp, ESF_SW_EXC_SIZEOF; \ 137 j unstacking_keep_going; \ 138 \ 139 unstacking_is_interrupt: \ 140 addi sp, sp, ESF_SW_IRQ_SIZEOF; \ 141 \ 142 unstacking_keep_going: 143 144 #endif /* _ASMLANGUAGE */ 145 146 #endif /* SOC_RISCV_NORDIC_NRF_COMMON_VPR_SOC_ISR_STACKING_H_ */ 147