1/* 2 * SPDX-FileCopyrightText: 2015-2019 Cadence Design Systems, Inc. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD 7 */ 8/* 9 * Copyright (c) 2015-2019 Cadence Design Systems, Inc. 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining 12 * a copy of this software and associated documentation files (the 13 * "Software"), to deal in the Software without restriction, including 14 * without limitation the rights to use, copy, modify, merge, publish, 15 * distribute, sublicense, and/or sell copies of the Software, and to 16 * permit persons to whom the Software is furnished to do so, subject to 17 * the following conditions: 18 * 19 * The above copyright notice and this permission notice shall be included 20 * in all copies or substantial portions of the Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 25 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 26 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 27 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 28 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 29 */ 30 31/******************************************************************************* 32 33 XTENSA CONTEXT SAVE AND RESTORE ROUTINES 34 35Low-level Call0 functions for handling generic context save and restore of 36registers not specifically addressed by the interrupt vectors and handlers. 37Those registers (not handled by these functions) are PC, PS, A0, A1 (SP). 38Except for the calls to RTOS functions, this code is generic to Xtensa. 39 40Note that in Call0 ABI, interrupt handlers are expected to preserve the callee- 41save regs (A12-A15), which is always the case if the handlers are coded in C. 42However A12, A13 are made available as scratch registers for interrupt dispatch 43code, so are presumed saved anyway, and are always restored even in Call0 ABI. 44Only A14, A15 are truly handled as callee-save regs. 45 46Because Xtensa is a configurable architecture, this port supports all user 47generated configurations (except restrictions stated in the release notes). 48This is accomplished by conditional compilation using macros and functions 49defined in the Xtensa HAL (hardware adaptation layer) for your configuration. 50Only the processor state included in your configuration is saved and restored, 51including any processor state added by user configuration options or TIE. 52 53*******************************************************************************/ 54 55/* Warn nicely if this file gets named with a lowercase .s instead of .S: */ 56#define NOERROR # 57NOERROR: .error "C preprocessor needed for this file: make sure its filename\ 58 ends in uppercase .S, or use xt-xcc's -x assembler-with-cpp option." 59 60 61#include "xtensa_rtos.h" 62#include "xtensa_context.h" 63#include "esp_idf_version.h" 64#if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0)) 65#include "xt_asm_utils.h" 66#endif 67 68#ifdef XT_USE_OVLY 69#include <xtensa/overlay_os_asm.h> 70#endif 71 72 .text 73 74/******************************************************************************* 75 76_xt_context_save 77 78 !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !! 79 80Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the 81interrupt stack frame defined in xtensa_rtos.h. 82Its counterpart is _xt_context_restore (which also restores A12, A13). 83 84Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame. 85This function preserves A12 & A13 in order to provide the caller with 2 scratch 86regs that need not be saved over the call to this function. The choice of which 872 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw, 88to avoid moving data more than necessary. Caller can assign regs accordingly. 89 90Entry Conditions: 91 A0 = Return address in caller. 92 A1 = Stack pointer of interrupted thread or handler ("interruptee"). 93 Original A12, A13 have already been saved in the interrupt stack frame. 94 Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the 95 point of interruption. 96 If windowed ABI, PS.EXCM = 1 (exceptions disabled). 97 98Exit conditions: 99 A0 = Return address in caller. 100 A1 = Stack pointer of interrupted thread or handler ("interruptee"). 101 A12, A13 as at entry (preserved). 102 If windowed ABI, PS.EXCM = 1 (exceptions disabled). 103 104*******************************************************************************/ 105 106 .global _xt_context_save 107 .type _xt_context_save,@function 108 .align 4 109 .literal_position 110 .align 4 111 112_xt_context_save: 113 114 s32i a2, sp, XT_STK_A2 115 s32i a3, sp, XT_STK_A3 116 s32i a4, sp, XT_STK_A4 117 s32i a5, sp, XT_STK_A5 118 s32i a6, sp, XT_STK_A6 119 s32i a7, sp, XT_STK_A7 120 s32i a8, sp, XT_STK_A8 121 s32i a9, sp, XT_STK_A9 122 s32i a10, sp, XT_STK_A10 123 s32i a11, sp, XT_STK_A11 124 125 /* 126 Call0 ABI callee-saved regs a12-15 do not need to be saved here. 127 a12-13 are the caller's responsibility so it can use them as scratch. 128 So only need to save a14-a15 here for Windowed ABI (not Call0). 129 */ 130 #ifndef __XTENSA_CALL0_ABI__ 131 s32i a14, sp, XT_STK_A14 132 s32i a15, sp, XT_STK_A15 133 #endif 134 135 rsr a3, SAR 136 s32i a3, sp, XT_STK_SAR 137 138 #if XCHAL_HAVE_LOOPS 139 rsr a3, LBEG 140 s32i a3, sp, XT_STK_LBEG 141 rsr a3, LEND 142 s32i a3, sp, XT_STK_LEND 143 rsr a3, LCOUNT 144 s32i a3, sp, XT_STK_LCOUNT 145 #endif 146 147 #ifdef XT_USE_SWPRI 148 /* Save virtual priority mask */ 149 movi a3, _xt_vpri_mask 150 l32i a3, a3, 0 151 s32i a3, sp, XT_STK_VPRI 152 #endif 153 154 #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__) 155 mov a9, a0 /* preserve ret addr */ 156 #endif 157 158 #if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0)) 159 #ifndef __XTENSA_CALL0_ABI__ 160 /* 161 To spill the reg windows, temp. need pre-interrupt stack ptr and a4-15. 162 Need to save a9,12,13 temporarily (in frame temps) and recover originals. 163 Interrupts need to be disabled below XCHAL_EXCM_LEVEL and window overflow 164 and underflow exceptions disabled (assured by PS.EXCM == 1). 165 */ 166 s32i a12, sp, XT_STK_TMP0 /* temp. save stuff in stack frame */ 167 s32i a13, sp, XT_STK_TMP1 168 s32i a9, sp, XT_STK_TMP2 169 170 /* 171 Save the overlay state if we are supporting overlays. Since we just saved 172 three registers, we can conveniently use them here. Note that as of now, 173 overlays only work for windowed calling ABI. 174 */ 175 #ifdef XT_USE_OVLY 176 l32i a9, sp, XT_STK_PC /* recover saved PC */ 177 _xt_overlay_get_state a9, a12, a13 178 s32i a9, sp, XT_STK_OVLY /* save overlay state */ 179 #endif 180 181 l32i a12, sp, XT_STK_A12 /* recover original a9,12,13 */ 182 l32i a13, sp, XT_STK_A13 183 l32i a9, sp, XT_STK_A9 184 addi sp, sp, XT_STK_FRMSZ /* restore the interruptee's SP */ 185 call0 xthal_window_spill_nw /* preserves only a4,5,8,9,12,13 */ 186 addi sp, sp, -XT_STK_FRMSZ 187 l32i a12, sp, XT_STK_TMP0 /* recover stuff from stack frame */ 188 l32i a13, sp, XT_STK_TMP1 189 l32i a9, sp, XT_STK_TMP2 190 #endif 191 #endif /* (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0)) */ 192 193 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0)) 194 s32i a12, sp, XT_STK_TMP0 /* temp. save stuff in stack frame */ 195 s32i a13, sp, XT_STK_TMP1 196 s32i a9, sp, XT_STK_TMP2 197 198 l32i a12, sp, XT_STK_A12 /* recover original a9,12,13 */ 199 l32i a13, sp, XT_STK_A13 200 l32i a9, sp, XT_STK_A9 201 #endif 202 203 #if XCHAL_EXTRA_SA_SIZE > 0 204 addi a2, sp, XT_STK_EXTRA /* where to save it */ 205 # if XCHAL_EXTRA_SA_ALIGN > 16 206 movi a3, -XCHAL_EXTRA_SA_ALIGN 207 and a2, a2, a3 /* align dynamically >16 bytes */ 208 # endif 209 call0 xthal_save_extra_nw /* destroys a0,2,3 */ 210 #endif 211 212 #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0)) 213 #ifndef __XTENSA_CALL0_ABI__ 214 #ifdef XT_USE_OVLY 215 l32i a9, sp, XT_STK_PC /* recover saved PC */ 216 _xt_overlay_get_state a9, a12, a13 217 s32i a9, sp, XT_STK_OVLY /* save overlay state */ 218 #endif 219 220 /* SPILL_ALL_WINDOWS macro requires window overflow exceptions to be enabled, 221 * i.e. PS.EXCM cleared and PS.WOE set. 222 * Since we are going to clear PS.EXCM, we also need to increase INTLEVEL 223 * at least to XCHAL_EXCM_LEVEL. This matches that value of effective INTLEVEL 224 * at entry (CINTLEVEL=max(PS.INTLEVEL, XCHAL_EXCM_LEVEL) when PS.EXCM is set. 225 * Since WindowOverflow exceptions will trigger inside SPILL_ALL_WINDOWS, 226 * need to save/restore EPC1 as well. 227 * Note: even though a4-a15 are saved into the exception frame, we should not 228 * clobber them until after SPILL_ALL_WINDOWS. This is because these registers 229 * may contain live windows belonging to previous frames in the call stack. 230 * These frames will be spilled by SPILL_ALL_WINDOWS, and if the register was 231 * used as a temporary by this code, the temporary value would get stored 232 * onto the stack, instead of the real value. 233 */ 234 rsr a2, PS /* to be restored after SPILL_ALL_WINDOWS */ 235 movi a0, PS_INTLEVEL_MASK 236 and a3, a2, a0 /* get the current INTLEVEL */ 237 bgeui a3, XCHAL_EXCM_LEVEL, 1f /* calculate max(INTLEVEL, XCHAL_EXCM_LEVEL) */ 238 movi a3, XCHAL_EXCM_LEVEL 2391: 240 movi a0, PS_UM | PS_WOE /* clear EXCM, enable window overflow, set new INTLEVEL */ 241 or a3, a3, a0 242 wsr a3, ps 243 rsr a0, EPC1 /* to be restored after SPILL_ALL_WINDOWS */ 244 245 addi sp, sp, XT_STK_FRMSZ /* restore the interruptee's SP */ 246 SPILL_ALL_WINDOWS 247 addi sp, sp, -XT_STK_FRMSZ /* return the current stack pointer and proceed with context save*/ 248 249 250 wsr a2, PS /* restore to the value at entry */ 251 rsync 252 wsr a0, EPC1 /* likewise */ 253 254 #endif /* __XTENSA_CALL0_ABI__ */ 255 256 l32i a12, sp, XT_STK_TMP0 /* restore the temp saved registers */ 257 l32i a13, sp, XT_STK_TMP1 /* our return address is there */ 258 l32i a9, sp, XT_STK_TMP2 259 #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */ 260 261 #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__) 262 mov a0, a9 /* retrieve ret addr */ 263 #endif 264 265 ret 266 267/******************************************************************************* 268 269_xt_context_restore 270 271 !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !! 272 273Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0 274ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt 275stack frame defined in xtensa_rtos.h . 276Its counterpart is _xt_context_save (whose caller saved A12, A13). 277 278Caller is responsible to restore PC, PS, A0, A1 (SP). 279 280Entry Conditions: 281 A0 = Return address in caller. 282 A1 = Stack pointer of interrupted thread or handler ("interruptee"). 283 284Exit conditions: 285 A0 = Return address in caller. 286 A1 = Stack pointer of interrupted thread or handler ("interruptee"). 287 Other processor state except PC, PS, A0, A1 (SP), is as at the point 288 of interruption. 289 290*******************************************************************************/ 291 292 .global _xt_context_restore 293 .type _xt_context_restore,@function 294 .align 4 295 .literal_position 296 .align 4 297_xt_context_restore: 298 299 #if XCHAL_EXTRA_SA_SIZE > 0 300 /* 301 NOTE: Normally the xthal_restore_extra_nw macro only affects address 302 registers a2-a5. It is theoretically possible for Xtensa processor 303 designers to write TIE that causes more address registers to be 304 affected, but it is generally unlikely. If that ever happens, 305 more registers need to be saved/restored around this macro invocation. 306 Here we only assume a13 is preserved. 307 Future Xtensa tools releases might limit the regs that can be affected. 308 */ 309 mov a13, a0 /* preserve ret addr */ 310 addi a2, sp, XT_STK_EXTRA /* where to find it */ 311 # if XCHAL_EXTRA_SA_ALIGN > 16 312 movi a3, -XCHAL_EXTRA_SA_ALIGN 313 and a2, a2, a3 /* align dynamically >16 bytes */ 314 # endif 315 call0 xthal_restore_extra_nw /* destroys a0,2,3,4,5 */ 316 mov a0, a13 /* retrieve ret addr */ 317 #endif 318 319 #if XCHAL_HAVE_LOOPS 320 l32i a2, sp, XT_STK_LBEG 321 l32i a3, sp, XT_STK_LEND 322 wsr a2, LBEG 323 l32i a2, sp, XT_STK_LCOUNT 324 wsr a3, LEND 325 wsr a2, LCOUNT 326 #endif 327 328 #ifdef XT_USE_OVLY 329 /* 330 If we are using overlays, this is a good spot to check if we need 331 to restore an overlay for the incoming task. Here we have a bunch 332 of registers to spare. Note that this step is going to use a few 333 bytes of storage below SP (SP-20 to SP-32) if an overlay is going 334 to be restored. 335 */ 336 l32i a2, sp, XT_STK_PC /* retrieve PC */ 337 l32i a3, sp, XT_STK_PS /* retrieve PS */ 338 l32i a4, sp, XT_STK_OVLY /* retrieve overlay state */ 339 l32i a5, sp, XT_STK_A1 /* retrieve stack ptr */ 340 _xt_overlay_check_map a2, a3, a4, a5, a6 341 s32i a2, sp, XT_STK_PC /* save updated PC */ 342 s32i a3, sp, XT_STK_PS /* save updated PS */ 343 #endif 344 345 #ifdef XT_USE_SWPRI 346 /* Restore virtual interrupt priority and interrupt enable */ 347 movi a3, _xt_intdata 348 l32i a4, a3, 0 /* a4 = _xt_intenable */ 349 l32i a5, sp, XT_STK_VPRI /* a5 = saved _xt_vpri_mask */ 350 and a4, a4, a5 351 wsr a4, INTENABLE /* update INTENABLE */ 352 s32i a5, a3, 4 /* restore _xt_vpri_mask */ 353 #endif 354 355 l32i a3, sp, XT_STK_SAR 356 l32i a2, sp, XT_STK_A2 357 wsr a3, SAR 358 l32i a3, sp, XT_STK_A3 359 l32i a4, sp, XT_STK_A4 360 l32i a5, sp, XT_STK_A5 361 l32i a6, sp, XT_STK_A6 362 l32i a7, sp, XT_STK_A7 363 l32i a8, sp, XT_STK_A8 364 l32i a9, sp, XT_STK_A9 365 l32i a10, sp, XT_STK_A10 366 l32i a11, sp, XT_STK_A11 367 368 /* 369 Call0 ABI callee-saved regs a12-15 do not need to be restored here. 370 However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(), 371 so need to be restored anyway, despite being callee-saved in Call0. 372 */ 373 l32i a12, sp, XT_STK_A12 374 l32i a13, sp, XT_STK_A13 375 #ifndef __XTENSA_CALL0_ABI__ 376 l32i a14, sp, XT_STK_A14 377 l32i a15, sp, XT_STK_A15 378 #endif 379 380 ret 381 382 383/******************************************************************************* 384 385_xt_coproc_init 386 387Initializes global co-processor management data, setting all co-processors 388to "unowned". Leaves CPENABLE as it found it (does NOT clear it). 389 390Called during initialization of the RTOS, before any threads run. 391 392This may be called from normal Xtensa single-threaded application code which 393might use co-processors. The Xtensa run-time initialization enables all 394co-processors. They must remain enabled here, else a co-processor exception 395might occur outside of a thread, which the exception handler doesn't expect. 396 397Entry Conditions: 398 Xtensa single-threaded run-time environment is in effect. 399 No thread is yet running. 400 401Exit conditions: 402 None. 403 404Obeys ABI conventions per prototype: 405 void _xt_coproc_init(void) 406 407*******************************************************************************/ 408 409#if XCHAL_CP_NUM > 0 410 411 .global _xt_coproc_init 412 .type _xt_coproc_init,@function 413 .align 4 414 .literal_position 415 .align 4 416_xt_coproc_init: 417 ENTRY0 418 419 /* Initialize thread co-processor ownerships to 0 (unowned). */ 420 movi a2, _xt_coproc_owner_sa /* a2 = base of owner array */ 421 addi a3, a2, (XCHAL_CP_MAX*portNUM_PROCESSORS) << 2 /* a3 = top+1 of owner array */ 422 movi a4, 0 /* a4 = 0 (unowned) */ 4231: s32i a4, a2, 0 424 addi a2, a2, 4 425 bltu a2, a3, 1b 426 427 RET0 428 429#endif 430 431 432/******************************************************************************* 433 434_xt_coproc_release 435 436Releases any and all co-processors owned by a given thread. The thread is 437identified by it's co-processor state save area defined in xtensa_context.h . 438 439Must be called before a thread's co-proc save area is deleted to avoid 440memory corruption when the exception handler tries to save the state. 441May be called when a thread terminates or completes but does not delete 442the co-proc save area, to avoid the exception handler having to save the 443thread's co-proc state before another thread can use it (optimization). 444 445Needs to be called on the processor the thread was running on. Unpinned threads 446won't have an entry here because they get pinned as soon they use a coprocessor. 447 448Entry Conditions: 449 A2 = Pointer to base of co-processor state save area. 450 451Exit conditions: 452 None. 453 454Obeys ABI conventions per prototype: 455 void _xt_coproc_release(void * coproc_sa_base) 456 457*******************************************************************************/ 458 459#if XCHAL_CP_NUM > 0 460 461 .global _xt_coproc_release 462 .type _xt_coproc_release,@function 463 .align 4 464 .literal_position 465 .align 4 466_xt_coproc_release: 467 ENTRY0 /* a2 = base of save area */ 468 469 getcoreid a5 470 movi a3, XCHAL_CP_MAX << 2 471 mull a5, a5, a3 472 movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */ 473 add a3, a3, a5 474 475 addi a4, a3, XCHAL_CP_MAX << 2 /* a4 = top+1 of owner array */ 476 movi a5, 0 /* a5 = 0 (unowned) */ 477 478 rsil a6, XCHAL_EXCM_LEVEL /* lock interrupts */ 479 4801: l32i a7, a3, 0 /* a7 = owner at a3 */ 481 bne a2, a7, 2f /* if (coproc_sa_base == owner) */ 482 s32i a5, a3, 0 /* owner = unowned */ 4832: addi a3, a3, 1<<2 /* a3 = next entry in owner array */ 484 bltu a3, a4, 1b /* repeat until end of array */ 485 4863: wsr a6, PS /* restore interrupts */ 487 488 RET0 489 490#endif 491 492 493/******************************************************************************* 494_xt_coproc_savecs 495 496If there is a current thread and it has a coprocessor state save area, then 497save all callee-saved state into this area. This function is called from the 498solicited context switch handler. It calls a system-specific function to get 499the coprocessor save area base address. 500 501Entry conditions: 502 - The thread being switched out is still the current thread. 503 - CPENABLE state reflects which coprocessors are active. 504 - Registers have been saved/spilled already. 505 506Exit conditions: 507 - All necessary CP callee-saved state has been saved. 508 - Registers a2-a7, a13-a15 have been trashed. 509 510Must be called from assembly code only, using CALL0. 511*******************************************************************************/ 512#if XCHAL_CP_NUM > 0 513 514 .extern _xt_coproc_sa_offset /* external reference */ 515 516 .global _xt_coproc_savecs 517 .type _xt_coproc_savecs,@function 518 .align 4 519 .literal_position 520 .align 4 521_xt_coproc_savecs: 522 523 /* At entry, CPENABLE should be showing which CPs are enabled. */ 524 525 rsr a2, CPENABLE /* a2 = which CPs are enabled */ 526 beqz a2, .Ldone /* quick exit if none */ 527 mov a14, a0 /* save return address */ 528 call0 XT_RTOS_CP_STATE /* get address of CP save area */ 529 mov a0, a14 /* restore return address */ 530 beqz a15, .Ldone /* if none then nothing to do */ 531 s16i a2, a15, XT_CP_CS_ST /* save mask of CPs being stored */ 532 movi a13, _xt_coproc_sa_offset /* array of CP save offsets */ 533 l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */ 534 535#if XCHAL_CP0_SA_SIZE 536 bbci.l a2, 0, 2f /* CP 0 not enabled */ 537 l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */ 538 add a3, a14, a15 /* a3 = save area for CP 0 */ 539 xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5402: 541#endif 542 543#if XCHAL_CP1_SA_SIZE 544 bbci.l a2, 1, 2f /* CP 1 not enabled */ 545 l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */ 546 add a3, a14, a15 /* a3 = save area for CP 1 */ 547 xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5482: 549#endif 550 551#if XCHAL_CP2_SA_SIZE 552 bbci.l a2, 2, 2f 553 l32i a14, a13, 8 554 add a3, a14, a15 555 xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5562: 557#endif 558 559#if XCHAL_CP3_SA_SIZE 560 bbci.l a2, 3, 2f 561 l32i a14, a13, 12 562 add a3, a14, a15 563 xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5642: 565#endif 566 567#if XCHAL_CP4_SA_SIZE 568 bbci.l a2, 4, 2f 569 l32i a14, a13, 16 570 add a3, a14, a15 571 xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5722: 573#endif 574 575#if XCHAL_CP5_SA_SIZE 576 bbci.l a2, 5, 2f 577 l32i a14, a13, 20 578 add a3, a14, a15 579 xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5802: 581#endif 582 583#if XCHAL_CP6_SA_SIZE 584 bbci.l a2, 6, 2f 585 l32i a14, a13, 24 586 add a3, a14, a15 587 xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5882: 589#endif 590 591#if XCHAL_CP7_SA_SIZE 592 bbci.l a2, 7, 2f 593 l32i a14, a13, 28 594 add a3, a14, a15 595 xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5962: 597#endif 598 599.Ldone: 600 ret 601#endif 602 603 604/******************************************************************************* 605_xt_coproc_restorecs 606 607Restore any callee-saved coprocessor state for the incoming thread. 608This function is called from coprocessor exception handling, when giving 609ownership to a thread that solicited a context switch earlier. It calls a 610system-specific function to get the coprocessor save area base address. 611 612Entry conditions: 613 - The incoming thread is set as the current thread. 614 - CPENABLE is set up correctly for all required coprocessors. 615 - a2 = mask of coprocessors to be restored. 616 617Exit conditions: 618 - All necessary CP callee-saved state has been restored. 619 - CPENABLE - unchanged. 620 - Registers a2-a7, a13-a15 have been trashed. 621 622Must be called from assembly code only, using CALL0. 623*******************************************************************************/ 624#if XCHAL_CP_NUM > 0 625 626 .global _xt_coproc_restorecs 627 .type _xt_coproc_restorecs,@function 628 .align 4 629 .literal_position 630 .align 4 631_xt_coproc_restorecs: 632 633 mov a14, a0 /* save return address */ 634 call0 XT_RTOS_CP_STATE /* get address of CP save area */ 635 mov a0, a14 /* restore return address */ 636 beqz a15, .Ldone2 /* if none then nothing to do */ 637 l16ui a3, a15, XT_CP_CS_ST /* a3 = which CPs have been saved */ 638 xor a3, a3, a2 /* clear the ones being restored */ 639 s32i a3, a15, XT_CP_CS_ST /* update saved CP mask */ 640 movi a13, _xt_coproc_sa_offset /* array of CP save offsets */ 641 l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */ 642 643#if XCHAL_CP0_SA_SIZE 644 bbci.l a2, 0, 2f /* CP 0 not enabled */ 645 l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */ 646 add a3, a14, a15 /* a3 = save area for CP 0 */ 647 xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6482: 649#endif 650 651#if XCHAL_CP1_SA_SIZE 652 bbci.l a2, 1, 2f /* CP 1 not enabled */ 653 l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */ 654 add a3, a14, a15 /* a3 = save area for CP 1 */ 655 xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6562: 657#endif 658 659#if XCHAL_CP2_SA_SIZE 660 bbci.l a2, 2, 2f 661 l32i a14, a13, 8 662 add a3, a14, a15 663 xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6642: 665#endif 666 667#if XCHAL_CP3_SA_SIZE 668 bbci.l a2, 3, 2f 669 l32i a14, a13, 12 670 add a3, a14, a15 671 xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6722: 673#endif 674 675#if XCHAL_CP4_SA_SIZE 676 bbci.l a2, 4, 2f 677 l32i a14, a13, 16 678 add a3, a14, a15 679 xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6802: 681#endif 682 683#if XCHAL_CP5_SA_SIZE 684 bbci.l a2, 5, 2f 685 l32i a14, a13, 20 686 add a3, a14, a15 687 xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6882: 689#endif 690 691#if XCHAL_CP6_SA_SIZE 692 bbci.l a2, 6, 2f 693 l32i a14, a13, 24 694 add a3, a14, a15 695 xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6962: 697#endif 698 699#if XCHAL_CP7_SA_SIZE 700 bbci.l a2, 7, 2f 701 l32i a14, a13, 28 702 add a3, a14, a15 703 xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 7042: 705#endif 706 707.Ldone2: 708 ret 709 710#endif 711