1/*************************************************************************** 2 * Copyright (c) 2024 Microsoft Corporation 3 * 4 * This program and the accompanying materials are made available under the 5 * terms of the MIT License which is available at 6 * https://opensource.org/licenses/MIT. 7 * 8 * SPDX-License-Identifier: MIT 9 **************************************************************************/ 10 11 12/**************************************************************************/ 13/**************************************************************************/ 14/** */ 15/** ThreadX Component */ 16/** */ 17/** Thread */ 18/** */ 19/**************************************************************************/ 20/**************************************************************************/ 21 22#include "tx_port.h" 23 24 .section .text 25/**************************************************************************/ 26/* */ 27/* FUNCTION RELEASE */ 28/* */ 29/* _tx_thread_context_restore RISC-V64/GNU */ 30/* 6.2.1 */ 31/* AUTHOR */ 32/* */ 33/* Scott Larson, Microsoft Corporation */ 34/* */ 35/* DESCRIPTION */ 36/* */ 37/* This function restores the interrupt context if it is processing a */ 38/* nested interrupt. If not, it returns to the interrupt thread if no */ 39/* preemption is necessary. Otherwise, if preemption is necessary or */ 40/* if no thread was running, the function returns to the scheduler. */ 41/* */ 42/* INPUT */ 43/* */ 44/* None */ 45/* */ 46/* OUTPUT */ 47/* */ 48/* None */ 49/* */ 50/* CALLS */ 51/* */ 52/* _tx_thread_schedule Thread scheduling routine */ 53/* */ 54/* CALLED BY */ 55/* */ 56/* ISRs Interrupt Service Routines */ 57/* */ 58/* RELEASE HISTORY */ 59/* */ 60/* DATE NAME DESCRIPTION */ 61/* */ 62/* 03-08-2023 Scott Larson Initial Version 6.2.1 */ 63/* */ 64/**************************************************************************/ 65/* VOID _tx_thread_context_restore(VOID) 66{ */ 67 .global _tx_thread_context_restore 68_tx_thread_context_restore: 69 70 /* Lockout interrupts. */ 71 72 csrci mstatus, 0x08 // Disable interrupts 73 74#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY 75 call _tx_execution_isr_exit // Call the ISR execution exit function 76#endif 77 78 /* Determine if interrupts are nested. */ 79 /* if (--_tx_thread_system_state) 80 { */ 81 82 la t0, _tx_thread_system_state // Pickup addr of nested interrupt count 83 LOAD t1, 0(t0) // Pickup nested interrupt count 84 addi t1, t1, -1 // Decrement the nested interrupt counter 85 STORE t1, 0(t0) // Store new nested count 86 beqz t1, _tx_thread_not_nested_restore // If 0, not nested restore 87 88 /* Interrupts are nested. */ 89 90 /* Just recover the saved registers and return to the point of 91 interrupt. */ 92 93 /* Recover floating point registers. */ 94#if defined(__riscv_float_abi_single) 95 flw f0, 31*REGBYTES(sp) // Recover ft0 96 flw f1, 32*REGBYTES(sp) // Recover ft1 97 flw f2, 33*REGBYTES(sp) // Recover ft2 98 flw f3, 34*REGBYTES(sp) // Recover ft3 99 flw f4, 35*REGBYTES(sp) // Recover ft4 100 flw f5, 36*REGBYTES(sp) // Recover ft5 101 flw f6, 37*REGBYTES(sp) // Recover ft6 102 flw f7, 38*REGBYTES(sp) // Recover ft7 103 flw f10,41*REGBYTES(sp) // Recover fa0 104 flw f11,42*REGBYTES(sp) // Recover fa1 105 flw f12,43*REGBYTES(sp) // Recover fa2 106 flw f13,44*REGBYTES(sp) // Recover fa3 107 flw f14,45*REGBYTES(sp) // Recover fa4 108 flw f15,46*REGBYTES(sp) // Recover fa5 109 flw f16,47*REGBYTES(sp) // Recover fa6 110 flw f17,48*REGBYTES(sp) // Recover fa7 111 flw f28,59*REGBYTES(sp) // Recover ft8 112 flw f29,60*REGBYTES(sp) // Recover ft9 113 flw f30,61*REGBYTES(sp) // Recover ft10 114 flw f31,62*REGBYTES(sp) // Recover ft11 115 lw t0, 63*REGBYTES(sp) // Recover fcsr 116 csrw fcsr, t0 // 117#elif defined(__riscv_float_abi_double) 118 fld f0, 31*REGBYTES(sp) // Recover ft0 119 fld f1, 32*REGBYTES(sp) // Recover ft1 120 fld f2, 33*REGBYTES(sp) // Recover ft2 121 fld f3, 34*REGBYTES(sp) // Recover ft3 122 fld f4, 35*REGBYTES(sp) // Recover ft4 123 fld f5, 36*REGBYTES(sp) // Recover ft5 124 fld f6, 37*REGBYTES(sp) // Recover ft6 125 fld f7, 38*REGBYTES(sp) // Recover ft7 126 fld f10,41*REGBYTES(sp) // Recover fa0 127 fld f11,42*REGBYTES(sp) // Recover fa1 128 fld f12,43*REGBYTES(sp) // Recover fa2 129 fld f13,44*REGBYTES(sp) // Recover fa3 130 fld f14,45*REGBYTES(sp) // Recover fa4 131 fld f15,46*REGBYTES(sp) // Recover fa5 132 fld f16,47*REGBYTES(sp) // Recover fa6 133 fld f17,48*REGBYTES(sp) // Recover fa7 134 fld f28,59*REGBYTES(sp) // Recover ft8 135 fld f29,60*REGBYTES(sp) // Recover ft9 136 fld f30,61*REGBYTES(sp) // Recover ft10 137 fld f31,62*REGBYTES(sp) // Recover ft11 138 LOAD t0, 63*REGBYTES(sp) // Recover fcsr 139 csrw fcsr, t0 // 140#endif 141 142 /* Recover standard registers. */ 143 144 /* Restore registers, 145 Skip global pointer because that does not change. 146 Also skip the saved registers since they have been restored by any function we called, 147 except s0 since we use it ourselves. */ 148 149 LOAD t0, 30*REGBYTES(sp) // Recover mepc 150 csrw mepc, t0 // Setup mepc 151 li t0, 0x1880 // Prepare MPIP 152 csrw mstatus, t0 // Enable MPIP 153 154 LOAD x1, 28*REGBYTES(sp) // Recover RA 155 LOAD x5, 19*REGBYTES(sp) // Recover t0 156 LOAD x6, 18*REGBYTES(sp) // Recover t1 157 LOAD x7, 17*REGBYTES(sp) // Recover t2 158 LOAD x8, 12*REGBYTES(sp) // Recover s0 159 LOAD x10, 27*REGBYTES(sp) // Recover a0 160 LOAD x11, 26*REGBYTES(sp) // Recover a1 161 LOAD x12, 25*REGBYTES(sp) // Recover a2 162 LOAD x13, 24*REGBYTES(sp) // Recover a3 163 LOAD x14, 23*REGBYTES(sp) // Recover a4 164 LOAD x15, 22*REGBYTES(sp) // Recover a5 165 LOAD x16, 21*REGBYTES(sp) // Recover a6 166 LOAD x17, 20*REGBYTES(sp) // Recover a7 167 LOAD x28, 16*REGBYTES(sp) // Recover t3 168 LOAD x29, 15*REGBYTES(sp) // Recover t4 169 LOAD x30, 14*REGBYTES(sp) // Recover t5 170 LOAD x31, 13*REGBYTES(sp) // Recover t6 171 172#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) 173 addi sp, sp, 65*REGBYTES // Recover stack frame - with floating point enabled 174#else 175 addi sp, sp, 32*REGBYTES // Recover stack frame - without floating point enabled 176#endif 177 mret // Return to point of interrupt 178 179 /* } */ 180_tx_thread_not_nested_restore: 181 /* Determine if a thread was interrupted and no preemption is required. */ 182 /* else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr) 183 || (_tx_thread_preempt_disable)) 184 { */ 185 186 LOAD t1, _tx_thread_current_ptr // Pickup current thread pointer 187 beqz t1, _tx_thread_idle_system_restore // If NULL, idle system restore 188 189 LOAD t2, _tx_thread_preempt_disable // Pickup preempt disable flag 190 bgtz t2, _tx_thread_no_preempt_restore // If set, restore interrupted thread 191 192 LOAD t2, _tx_thread_execute_ptr // Pickup thread execute pointer 193 bne t1, t2, _tx_thread_preempt_restore // If higher-priority thread is ready, preempt 194 195 196_tx_thread_no_preempt_restore: 197 /* Restore interrupted thread or ISR. */ 198 199 /* Pickup the saved stack pointer. */ 200 /* SP = _tx_thread_current_ptr -> tx_thread_stack_ptr; */ 201 202 LOAD sp, 2*REGBYTES(t1) // Switch back to thread's stack 203 204 /* Recover floating point registers. */ 205#if defined(__riscv_float_abi_single) 206 flw f0, 31*REGBYTES(sp) // Recover ft0 207 flw f1, 32*REGBYTES(sp) // Recover ft1 208 flw f2, 33*REGBYTES(sp) // Recover ft2 209 flw f3, 34*REGBYTES(sp) // Recover ft3 210 flw f4, 35*REGBYTES(sp) // Recover ft4 211 flw f5, 36*REGBYTES(sp) // Recover ft5 212 flw f6, 37*REGBYTES(sp) // Recover ft6 213 flw f7, 38*REGBYTES(sp) // Recover ft7 214 flw f10,41*REGBYTES(sp) // Recover fa0 215 flw f11,42*REGBYTES(sp) // Recover fa1 216 flw f12,43*REGBYTES(sp) // Recover fa2 217 flw f13,44*REGBYTES(sp) // Recover fa3 218 flw f14,45*REGBYTES(sp) // Recover fa4 219 flw f15,46*REGBYTES(sp) // Recover fa5 220 flw f16,47*REGBYTES(sp) // Recover fa6 221 flw f17,48*REGBYTES(sp) // Recover fa7 222 flw f28,59*REGBYTES(sp) // Recover ft8 223 flw f29,60*REGBYTES(sp) // Recover ft9 224 flw f30,61*REGBYTES(sp) // Recover ft10 225 flw f31,62*REGBYTES(sp) // Recover ft11 226 lw t0, 63*REGBYTES(sp) // Recover fcsr 227 csrw fcsr, t0 // 228#elif defined(__riscv_float_abi_double) 229 fld f0, 31*REGBYTES(sp) // Recover ft0 230 fld f1, 32*REGBYTES(sp) // Recover ft1 231 fld f2, 33*REGBYTES(sp) // Recover ft2 232 fld f3, 34*REGBYTES(sp) // Recover ft3 233 fld f4, 35*REGBYTES(sp) // Recover ft4 234 fld f5, 36*REGBYTES(sp) // Recover ft5 235 fld f6, 37*REGBYTES(sp) // Recover ft6 236 fld f7, 38*REGBYTES(sp) // Recover ft7 237 fld f10,41*REGBYTES(sp) // Recover fa0 238 fld f11,42*REGBYTES(sp) // Recover fa1 239 fld f12,43*REGBYTES(sp) // Recover fa2 240 fld f13,44*REGBYTES(sp) // Recover fa3 241 fld f14,45*REGBYTES(sp) // Recover fa4 242 fld f15,46*REGBYTES(sp) // Recover fa5 243 fld f16,47*REGBYTES(sp) // Recover fa6 244 fld f17,48*REGBYTES(sp) // Recover fa7 245 fld f28,59*REGBYTES(sp) // Recover ft8 246 fld f29,60*REGBYTES(sp) // Recover ft9 247 fld f30,61*REGBYTES(sp) // Recover ft10 248 fld f31,62*REGBYTES(sp) // Recover ft11 249 LOAD t0, 63*REGBYTES(sp) // Recover fcsr 250 csrw fcsr, t0 // 251#endif 252 253 /* Recover the saved context and return to the point of interrupt. */ 254 255 /* Recover standard registers. */ 256 /* Restore registers, 257 Skip global pointer because that does not change */ 258 259 LOAD t0, 240(sp) // Recover mepc 260 csrw mepc, t0 // Setup mepc 261 li t0, 0x1880 // Prepare MPIP 262 csrw mstatus, t0 // Enable MPIP 263 264 LOAD x1, 28*REGBYTES(sp) // Recover RA 265 LOAD x5, 19*REGBYTES(sp) // Recover t0 266 LOAD x6, 18*REGBYTES(sp) // Recover t1 267 LOAD x7, 17*REGBYTES(sp) // Recover t2 268 LOAD x8, 12*REGBYTES(sp) // Recover s0 269 LOAD x10, 27*REGBYTES(sp) // Recover a0 270 LOAD x11, 26*REGBYTES(sp) // Recover a1 271 LOAD x12, 25*REGBYTES(sp) // Recover a2 272 LOAD x13, 24*REGBYTES(sp) // Recover a3 273 LOAD x14, 23*REGBYTES(sp) // Recover a4 274 LOAD x15, 22*REGBYTES(sp) // Recover a5 275 LOAD x16, 21*REGBYTES(sp) // Recover a6 276 LOAD x17, 20*REGBYTES(sp) // Recover a7 277 LOAD x28, 16*REGBYTES(sp) // Recover t3 278 LOAD x29, 15*REGBYTES(sp) // Recover t4 279 LOAD x30, 14*REGBYTES(sp) // Recover t5 280 LOAD x31, 13*REGBYTES(sp) // Recover t6 281 282#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) 283 addi sp, sp, 65*REGBYTES // Recover stack frame - with floating point enabled 284#else 285 addi sp, sp, 32*REGBYTES // Recover stack frame - without floating point enabled 286#endif 287 mret // Return to point of interrupt 288 289 /* } 290 else 291 { */ 292_tx_thread_preempt_restore: 293 /* Instead of directly activating the thread again, ensure we save the 294 entire stack frame by saving the remaining registers. */ 295 296 LOAD t0, 2*REGBYTES(t1) // Pickup thread's stack pointer 297 ori t3, x0, 1 // Build interrupt stack type 298 STORE t3, 0(t0) // Store stack type 299 300 /* Store floating point preserved registers. */ 301#ifdef __riscv_float_abi_single 302 fsw f8, 39*REGBYTES(t0) // Store fs0 303 fsw f9, 40*REGBYTES(t0) // Store fs1 304 fsw f18, 49*REGBYTES(t0) // Store fs2 305 fsw f19, 50*REGBYTES(t0) // Store fs3 306 fsw f20, 51*REGBYTES(t0) // Store fs4 307 fsw f21, 52*REGBYTES(t0) // Store fs5 308 fsw f22, 53*REGBYTES(t0) // Store fs6 309 fsw f23, 54*REGBYTES(t0) // Store fs7 310 fsw f24, 55*REGBYTES(t0) // Store fs8 311 fsw f25, 56*REGBYTES(t0) // Store fs9 312 fsw f26, 57*REGBYTES(t0) // Store fs10 313 fsw f27, 58*REGBYTES(t0) // Store fs11 314#elif defined(__riscv_float_abi_double) 315 fsd f8, 39*REGBYTES(t0) // Store fs0 316 fsd f9, 40*REGBYTES(t0) // Store fs1 317 fsd f18, 49*REGBYTES(t0) // Store fs2 318 fsd f19, 50*REGBYTES(t0) // Store fs3 319 fsd f20, 51*REGBYTES(t0) // Store fs4 320 fsd f21, 52*REGBYTES(t0) // Store fs5 321 fsd f22, 53*REGBYTES(t0) // Store fs6 322 fsd f23, 54*REGBYTES(t0) // Store fs7 323 fsd f24, 55*REGBYTES(t0) // Store fs8 324 fsd f25, 56*REGBYTES(t0) // Store fs9 325 fsd f26, 57*REGBYTES(t0) // Store fs10 326 fsd f27, 58*REGBYTES(t0) // Store fs11 327#endif 328 329 /* Store standard preserved registers. */ 330 331 STORE x9, 11*REGBYTES(t0) // Store s1 332 STORE x18, 10*REGBYTES(t0) // Store s2 333 STORE x19, 9*REGBYTES(t0) // Store s3 334 STORE x20, 8*REGBYTES(t0) // Store s4 335 STORE x21, 7*REGBYTES(t0) // Store s5 336 STORE x22, 6*REGBYTES(t0) // Store s6 337 STORE x23, 5*REGBYTES(t0) // Store s7 338 STORE x24, 4*REGBYTES(t0) // Store s8 339 STORE x25, 3*REGBYTES(t0) // Store s9 340 STORE x26, 2*REGBYTES(t0) // Store s10 341 STORE x27, 1*REGBYTES(t0) // Store s11 342 // Note: s0 is already stored! 343 344 /* Save the remaining time-slice and disable it. */ 345 /* if (_tx_timer_time_slice) 346 { */ 347 348 la t0, _tx_timer_time_slice // Pickup time slice variable address 349 LOAD t2, 0(t0) // Pickup time slice 350 beqz t2, _tx_thread_dont_save_ts // If 0, skip time slice processing 351 352 /* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice 353 _tx_timer_time_slice = 0; */ 354 355 STORE t2, 6*REGBYTES(t1) // Save current time slice 356 STORE x0, 0(t0) // Clear global time slice 357 358 359 /* } */ 360_tx_thread_dont_save_ts: 361 /* Clear the current task pointer. */ 362 /* _tx_thread_current_ptr = TX_NULL; */ 363 364 /* Return to the scheduler. */ 365 /* _tx_thread_schedule(); */ 366 367 STORE x0, _tx_thread_current_ptr, t0 // Clear current thread pointer*/ 368 /* } */ 369 370_tx_thread_idle_system_restore: 371 /* Just return back to the scheduler! */ 372 j _tx_thread_schedule // Return to scheduler 373 374/* } */ 375