1/******************************************************************************* 2Copyright (c) 2006-2015 Cadence Design Systems Inc. 3 4Permission is hereby granted, free of charge, to any person obtaining 5a copy of this software and associated documentation files (the 6"Software"), to deal in the Software without restriction, including 7without limitation the rights to use, copy, modify, merge, publish, 8distribute, sublicense, and/or sell copies of the Software, and to 9permit persons to whom the Software is furnished to do so, subject to 10the following conditions: 11 12The above copyright notice and this permission notice shall be included 13in all copies or substantial portions of the Software. 14 15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 17MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 18IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 19CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 20TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 21SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22-------------------------------------------------------------------------------- 23 24 XTENSA CONTEXT SAVE AND RESTORE ROUTINES 25 26Low-level Call0 functions for handling generic context save and restore of 27registers not specifically addressed by the interrupt vectors and handlers. 28Those registers (not handled by these functions) are PC, PS, A0, A1 (SP). 29Except for the calls to RTOS functions, this code is generic to Xtensa. 30 31Note that in Call0 ABI, interrupt handlers are expected to preserve the callee- 32save regs (A12-A15), which is always the case if the handlers are coded in C. 33However A12, A13 are made available as scratch registers for interrupt dispatch 34code, so are presumed saved anyway, and are always restored even in Call0 ABI. 35Only A14, A15 are truly handled as callee-save regs. 36 37Because Xtensa is a configurable architecture, this port supports all user 38generated configurations (except restrictions stated in the release notes). 39This is accomplished by conditional compilation using macros and functions 40defined in the Xtensa HAL (hardware adaptation layer) for your configuration. 41Only the processor state included in your configuration is saved and restored, 42including any processor state added by user configuration options or TIE. 43 44*******************************************************************************/ 45 46/* Warn nicely if this file gets named with a lowercase .s instead of .S: */ 47#define NOERROR # 48NOERROR: .error "C preprocessor needed for this file: make sure its filename\ 49 ends in uppercase .S, or use xt-xcc's -x assembler-with-cpp option." 50 51 52#include "xtensa_rtos.h" 53#include "xtensa_context.h" 54#include "xt_asm_utils.h" 55 56#ifdef XT_USE_OVLY 57#include <xtensa/overlay_os_asm.h> 58#endif 59 60 .text 61 62/******************************************************************************* 63 64_xt_context_save 65 66 !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !! 67 68Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the 69interrupt stack frame defined in xtensa_rtos.h. 70Its counterpart is _xt_context_restore (which also restores A12, A13). 71 72Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame. 73This function preserves A12 & A13 in order to provide the caller with 2 scratch 74regs that need not be saved over the call to this function. The choice of which 752 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw, 76to avoid moving data more than necessary. Caller can assign regs accordingly. 77 78Entry Conditions: 79 A0 = Return address in caller. 80 A1 = Stack pointer of interrupted thread or handler ("interruptee"). 81 Original A12, A13 have already been saved in the interrupt stack frame. 82 Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the 83 point of interruption. 84 If windowed ABI, PS.EXCM = 1 (exceptions disabled). 85 86Exit conditions: 87 A0 = Return address in caller. 88 A1 = Stack pointer of interrupted thread or handler ("interruptee"). 89 A12, A13 as at entry (preserved). 90 If windowed ABI, PS.EXCM = 1 (exceptions disabled). 91 92*******************************************************************************/ 93 94 .global _xt_context_save 95 .type _xt_context_save,@function 96 .align 4 97 .literal_position 98 .align 4 99 100_xt_context_save: 101 s32i a2, sp, XT_STK_A2 102 s32i a3, sp, XT_STK_A3 103 s32i a4, sp, XT_STK_A4 104 s32i a5, sp, XT_STK_A5 105 s32i a6, sp, XT_STK_A6 106 s32i a7, sp, XT_STK_A7 107 s32i a8, sp, XT_STK_A8 108 s32i a9, sp, XT_STK_A9 109 s32i a10, sp, XT_STK_A10 110 s32i a11, sp, XT_STK_A11 111 112 /* 113 Call0 ABI callee-saved regs a12-15 do not need to be saved here. 114 a12-13 are the caller's responsibility so it can use them as scratch. 115 So only need to save a14-a15 here for Windowed ABI (not Call0). 116 */ 117 #ifndef __XTENSA_CALL0_ABI__ 118 s32i a14, sp, XT_STK_A14 119 s32i a15, sp, XT_STK_A15 120 #endif 121 122 rsr a3, SAR 123 s32i a3, sp, XT_STK_SAR 124 125 #if XCHAL_HAVE_LOOPS 126 rsr a3, LBEG 127 s32i a3, sp, XT_STK_LBEG 128 rsr a3, LEND 129 s32i a3, sp, XT_STK_LEND 130 rsr a3, LCOUNT 131 s32i a3, sp, XT_STK_LCOUNT 132 #endif 133 134 #ifdef XT_USE_SWPRI 135 /* Save virtual priority mask */ 136 movi a3, _xt_vpri_mask 137 l32i a3, a3, 0 138 s32i a3, sp, XT_STK_VPRI 139 #endif 140 141 #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__) 142 mov a9, a0 /* preserve ret addr */ 143 #endif 144 145 s32i a12, sp, XT_STK_TMP0 /* temp. save stuff in stack frame */ 146 s32i a13, sp, XT_STK_TMP1 147 s32i a9, sp, XT_STK_TMP2 148 149 l32i a12, sp, XT_STK_A12 /* recover original a9,12,13 */ 150 l32i a13, sp, XT_STK_A13 151 l32i a9, sp, XT_STK_A9 152 153 #if XCHAL_EXTRA_SA_SIZE > 0 154 addi a2, sp, XT_STK_EXTRA /* where to save it */ 155 # if XCHAL_EXTRA_SA_ALIGN > 16 156 movi a3, -XCHAL_EXTRA_SA_ALIGN 157 and a2, a2, a3 /* align dynamically >16 bytes */ 158 # endif 159 call0 xthal_save_extra_nw /* destroys a0,2,3 */ 160 #endif 161 162 #ifndef __XTENSA_CALL0_ABI__ 163 #ifdef XT_USE_OVLY 164 l32i a9, sp, XT_STK_PC /* recover saved PC */ 165 _xt_overlay_get_state a9, a12, a13 166 s32i a9, sp, XT_STK_OVLY /* save overlay state */ 167 #endif 168 169 /* SPILL_ALL_WINDOWS macro requires window overflow exceptions to be enabled, 170 * i.e. PS.EXCM cleared and PS.WOE set. 171 * Since we are going to clear PS.EXCM, we also need to increase INTLEVEL 172 * at least to XCHAL_EXCM_LEVEL. This matches that value of effective INTLEVEL 173 * at entry (CINTLEVEL=max(PS.INTLEVEL, XCHAL_EXCM_LEVEL) when PS.EXCM is set. 174 * Since WindowOverflow exceptions will trigger inside SPILL_ALL_WINDOWS, 175 * need to save/restore EPC1 as well. 176 * Note: even though a4-a15 are saved into the exception frame, we should not 177 * clobber them until after SPILL_ALL_WINDOWS. This is because these registers 178 * may contain live windows belonging to previous frames in the call stack. 179 * These frames will be spilled by SPILL_ALL_WINDOWS, and if the register was 180 * used as a temporary by this code, the temporary value would get stored 181 * onto the stack, instead of the real value. 182 */ 183 rsr a2, PS /* to be restored after SPILL_ALL_WINDOWS */ 184 movi a0, PS_INTLEVEL_MASK 185 and a3, a2, a0 /* get the current INTLEVEL */ 186 bgeui a3, XCHAL_EXCM_LEVEL, 1f /* calculate max(INTLEVEL, XCHAL_EXCM_LEVEL) */ 187 movi a3, XCHAL_EXCM_LEVEL 1881: 189 movi a0, PS_UM | PS_WOE /* clear EXCM, enable window overflow, set new INTLEVEL */ 190 or a3, a3, a0 191 wsr a3, ps 192 rsr a0, EPC1 /* to be restored after SPILL_ALL_WINDOWS */ 193 194 addi sp, sp, XT_STK_FRMSZ /* go back to spill register region */ 195 SPILL_ALL_WINDOWS /* place the live register windows there */ 196 addi sp, sp, -XT_STK_FRMSZ /* return the current stack pointer and proceed with context save*/ 197 198 wsr a2, PS /* restore to the value at entry */ 199 rsync 200 wsr a0, EPC1 /* likewise */ 201 202 #endif /* __XTENSA_CALL0_ABI__ */ 203 204 l32i a12, sp, XT_STK_TMP0 /* restore the temp saved registers */ 205 l32i a13, sp, XT_STK_TMP1 /* our return address is there */ 206 l32i a9, sp, XT_STK_TMP2 207 208 #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__) 209 mov a0, a9 /* retrieve ret addr */ 210 #endif 211 212 ret 213 214/******************************************************************************* 215 216_xt_context_restore 217 218 !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !! 219 220Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0 221ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt 222stack frame defined in xtensa_rtos.h . 223Its counterpart is _xt_context_save (whose caller saved A12, A13). 224 225Caller is responsible to restore PC, PS, A0, A1 (SP). 226 227Entry Conditions: 228 A0 = Return address in caller. 229 A1 = Stack pointer of interrupted thread or handler ("interruptee"). 230 231Exit conditions: 232 A0 = Return address in caller. 233 A1 = Stack pointer of interrupted thread or handler ("interruptee"). 234 Other processor state except PC, PS, A0, A1 (SP), is as at the point 235 of interruption. 236 237*******************************************************************************/ 238 239 .global _xt_context_restore 240 .type _xt_context_restore,@function 241 .align 4 242 .literal_position 243 .align 4 244_xt_context_restore: 245 246 #if XCHAL_EXTRA_SA_SIZE > 0 247 /* 248 NOTE: Normally the xthal_restore_extra_nw macro only affects address 249 registers a2-a5. It is theoretically possible for Xtensa processor 250 designers to write TIE that causes more address registers to be 251 affected, but it is generally unlikely. If that ever happens, 252 more registers need to be saved/restored around this macro invocation. 253 Here we only assume a13 is preserved. 254 Future Xtensa tools releases might limit the regs that can be affected. 255 */ 256 mov a13, a0 /* preserve ret addr */ 257 addi a2, sp, XT_STK_EXTRA /* where to find it */ 258 # if XCHAL_EXTRA_SA_ALIGN > 16 259 movi a3, -XCHAL_EXTRA_SA_ALIGN 260 and a2, a2, a3 /* align dynamically >16 bytes */ 261 # endif 262 call0 xthal_restore_extra_nw /* destroys a0,2,3,4,5 */ 263 mov a0, a13 /* retrieve ret addr */ 264 #endif 265 266 #if XCHAL_HAVE_LOOPS 267 l32i a2, sp, XT_STK_LBEG 268 l32i a3, sp, XT_STK_LEND 269 wsr a2, LBEG 270 l32i a2, sp, XT_STK_LCOUNT 271 wsr a3, LEND 272 wsr a2, LCOUNT 273 #endif 274 275 #ifdef XT_USE_OVLY 276 /* 277 If we are using overlays, this is a good spot to check if we need 278 to restore an overlay for the incoming task. Here we have a bunch 279 of registers to spare. Note that this step is going to use a few 280 bytes of storage below SP (SP-20 to SP-32) if an overlay is going 281 to be restored. 282 */ 283 l32i a2, sp, XT_STK_PC /* retrieve PC */ 284 l32i a3, sp, XT_STK_PS /* retrieve PS */ 285 l32i a4, sp, XT_STK_OVLY /* retrieve overlay state */ 286 l32i a5, sp, XT_STK_A1 /* retrieve stack ptr */ 287 _xt_overlay_check_map a2, a3, a4, a5, a6 288 s32i a2, sp, XT_STK_PC /* save updated PC */ 289 s32i a3, sp, XT_STK_PS /* save updated PS */ 290 #endif 291 292 #ifdef XT_USE_SWPRI 293 /* Restore virtual interrupt priority and interrupt enable */ 294 movi a3, _xt_intdata 295 l32i a4, a3, 0 /* a4 = _xt_intenable */ 296 l32i a5, sp, XT_STK_VPRI /* a5 = saved _xt_vpri_mask */ 297 and a4, a4, a5 298 wsr a4, INTENABLE /* update INTENABLE */ 299 s32i a5, a3, 4 /* restore _xt_vpri_mask */ 300 #endif 301 302 l32i a3, sp, XT_STK_SAR 303 l32i a2, sp, XT_STK_A2 304 wsr a3, SAR 305 l32i a3, sp, XT_STK_A3 306 l32i a4, sp, XT_STK_A4 307 l32i a5, sp, XT_STK_A5 308 l32i a6, sp, XT_STK_A6 309 l32i a7, sp, XT_STK_A7 310 l32i a8, sp, XT_STK_A8 311 l32i a9, sp, XT_STK_A9 312 l32i a10, sp, XT_STK_A10 313 l32i a11, sp, XT_STK_A11 314 315 /* 316 Call0 ABI callee-saved regs a12-15 do not need to be restored here. 317 However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(), 318 so need to be restored anyway, despite being callee-saved in Call0. 319 */ 320 l32i a12, sp, XT_STK_A12 321 l32i a13, sp, XT_STK_A13 322 #ifndef __XTENSA_CALL0_ABI__ 323 l32i a14, sp, XT_STK_A14 324 l32i a15, sp, XT_STK_A15 325 #endif 326 327 ret 328 329 330/******************************************************************************* 331 332_xt_coproc_init 333 334Initializes global co-processor management data, setting all co-processors 335to "unowned". Leaves CPENABLE as it found it (does NOT clear it). 336 337Called during initialization of the RTOS, before any threads run. 338 339This may be called from normal Xtensa single-threaded application code which 340might use co-processors. The Xtensa run-time initialization enables all 341co-processors. They must remain enabled here, else a co-processor exception 342might occur outside of a thread, which the exception handler doesn't expect. 343 344Entry Conditions: 345 Xtensa single-threaded run-time environment is in effect. 346 No thread is yet running. 347 348Exit conditions: 349 None. 350 351Obeys ABI conventions per prototype: 352 void _xt_coproc_init(void) 353 354*******************************************************************************/ 355 356#if XCHAL_CP_NUM > 0 357 358 .global _xt_coproc_init 359 .type _xt_coproc_init,@function 360 .align 4 361 .literal_position 362 .align 4 363_xt_coproc_init: 364 ENTRY0 365 366 /* Initialize thread co-processor ownerships to 0 (unowned). */ 367 movi a2, _xt_coproc_owner_sa /* a2 = base of owner array */ 368 addi a3, a2, (XCHAL_CP_MAX*portNUM_PROCESSORS) << 2 /* a3 = top+1 of owner array */ 369 movi a4, 0 /* a4 = 0 (unowned) */ 3701: s32i a4, a2, 0 371 addi a2, a2, 4 372 bltu a2, a3, 1b 373 374 RET0 375 376#endif 377 378 379/******************************************************************************* 380 381_xt_coproc_release 382 383Releases any and all co-processors owned by a given thread. The thread is 384identified by it's co-processor state save area defined in xtensa_context.h . 385 386Must be called before a thread's co-proc save area is deleted to avoid 387memory corruption when the exception handler tries to save the state. 388May be called when a thread terminates or completes but does not delete 389the co-proc save area, to avoid the exception handler having to save the 390thread's co-proc state before another thread can use it (optimization). 391 392Needs to be called on the processor the thread was running on. Unpinned threads 393won't have an entry here because they get pinned as soon they use a coprocessor. 394 395Entry Conditions: 396 A2 = Pointer to base of co-processor state save area. 397 398Exit conditions: 399 None. 400 401Obeys ABI conventions per prototype: 402 void _xt_coproc_release(void * coproc_sa_base) 403 404*******************************************************************************/ 405 406#if XCHAL_CP_NUM > 0 407 408 .global _xt_coproc_release 409 .type _xt_coproc_release,@function 410 .align 4 411 .literal_position 412 .align 4 413_xt_coproc_release: 414 ENTRY0 /* a2 = base of save area */ 415 416 getcoreid a5 417 movi a3, XCHAL_CP_MAX << 2 418 mull a5, a5, a3 419 movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */ 420 add a3, a3, a5 421 422 addi a4, a3, XCHAL_CP_MAX << 2 /* a4 = top+1 of owner array */ 423 movi a5, 0 /* a5 = 0 (unowned) */ 424 425 rsil a6, XCHAL_EXCM_LEVEL /* lock interrupts */ 426 4271: l32i a7, a3, 0 /* a7 = owner at a3 */ 428 bne a2, a7, 2f /* if (coproc_sa_base == owner) */ 429 s32i a5, a3, 0 /* owner = unowned */ 4302: addi a3, a3, 1<<2 /* a3 = next entry in owner array */ 431 bltu a3, a4, 1b /* repeat until end of array */ 432 4333: wsr a6, PS /* restore interrupts */ 434 435 RET0 436 437#endif 438 439 440/******************************************************************************* 441_xt_coproc_savecs 442 443If there is a current thread and it has a coprocessor state save area, then 444save all callee-saved state into this area. This function is called from the 445solicited context switch handler. It calls a system-specific function to get 446the coprocessor save area base address. 447 448Entry conditions: 449 - The thread being switched out is still the current thread. 450 - CPENABLE state reflects which coprocessors are active. 451 - Registers have been saved/spilled already. 452 453Exit conditions: 454 - All necessary CP callee-saved state has been saved. 455 - Registers a2-a7, a13-a15 have been trashed. 456 457Must be called from assembly code only, using CALL0. 458*******************************************************************************/ 459#if XCHAL_CP_NUM > 0 460 461 .extern _xt_coproc_sa_offset /* external reference */ 462 463 .global _xt_coproc_savecs 464 .type _xt_coproc_savecs,@function 465 .align 4 466 .literal_position 467 .align 4 468_xt_coproc_savecs: 469 470 /* At entry, CPENABLE should be showing which CPs are enabled. */ 471 472 rsr a2, CPENABLE /* a2 = which CPs are enabled */ 473 beqz a2, .Ldone /* quick exit if none */ 474 mov a14, a0 /* save return address */ 475 call0 XT_RTOS_CP_STATE /* get address of CP save area */ 476 mov a0, a14 /* restore return address */ 477 beqz a15, .Ldone /* if none then nothing to do */ 478 s16i a2, a15, XT_CP_CS_ST /* save mask of CPs being stored */ 479 movi a13, _xt_coproc_sa_offset /* array of CP save offsets */ 480 l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */ 481 482#if XCHAL_CP0_SA_SIZE 483 bbci.l a2, 0, 2f /* CP 0 not enabled */ 484 l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */ 485 add a3, a14, a15 /* a3 = save area for CP 0 */ 486 xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 4872: 488#endif 489 490#if XCHAL_CP1_SA_SIZE 491 bbci.l a2, 1, 2f /* CP 1 not enabled */ 492 l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */ 493 add a3, a14, a15 /* a3 = save area for CP 1 */ 494 xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 4952: 496#endif 497 498#if XCHAL_CP2_SA_SIZE 499 bbci.l a2, 2, 2f 500 l32i a14, a13, 8 501 add a3, a14, a15 502 xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5032: 504#endif 505 506#if XCHAL_CP3_SA_SIZE 507 bbci.l a2, 3, 2f 508 l32i a14, a13, 12 509 add a3, a14, a15 510 xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5112: 512#endif 513 514#if XCHAL_CP4_SA_SIZE 515 bbci.l a2, 4, 2f 516 l32i a14, a13, 16 517 add a3, a14, a15 518 xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5192: 520#endif 521 522#if XCHAL_CP5_SA_SIZE 523 bbci.l a2, 5, 2f 524 l32i a14, a13, 20 525 add a3, a14, a15 526 xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5272: 528#endif 529 530#if XCHAL_CP6_SA_SIZE 531 bbci.l a2, 6, 2f 532 l32i a14, a13, 24 533 add a3, a14, a15 534 xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5352: 536#endif 537 538#if XCHAL_CP7_SA_SIZE 539 bbci.l a2, 7, 2f 540 l32i a14, a13, 28 541 add a3, a14, a15 542 xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5432: 544#endif 545 546.Ldone: 547 ret 548#endif 549 550 551/******************************************************************************* 552_xt_coproc_restorecs 553 554Restore any callee-saved coprocessor state for the incoming thread. 555This function is called from coprocessor exception handling, when giving 556ownership to a thread that solicited a context switch earlier. It calls a 557system-specific function to get the coprocessor save area base address. 558 559Entry conditions: 560 - The incoming thread is set as the current thread. 561 - CPENABLE is set up correctly for all required coprocessors. 562 - a2 = mask of coprocessors to be restored. 563 564Exit conditions: 565 - All necessary CP callee-saved state has been restored. 566 - CPENABLE - unchanged. 567 - Registers a2-a7, a13-a15 have been trashed. 568 569Must be called from assembly code only, using CALL0. 570*******************************************************************************/ 571#if XCHAL_CP_NUM > 0 572 573 .global _xt_coproc_restorecs 574 .type _xt_coproc_restorecs,@function 575 .align 4 576 .literal_position 577 .align 4 578_xt_coproc_restorecs: 579 580 mov a14, a0 /* save return address */ 581 call0 XT_RTOS_CP_STATE /* get address of CP save area */ 582 mov a0, a14 /* restore return address */ 583 beqz a15, .Ldone2 /* if none then nothing to do */ 584 l16ui a3, a15, XT_CP_CS_ST /* a3 = which CPs have been saved */ 585 xor a3, a3, a2 /* clear the ones being restored */ 586 s32i a3, a15, XT_CP_CS_ST /* update saved CP mask */ 587 movi a13, _xt_coproc_sa_offset /* array of CP save offsets */ 588 l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */ 589 590#if XCHAL_CP0_SA_SIZE 591 bbci.l a2, 0, 2f /* CP 0 not enabled */ 592 l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */ 593 add a3, a14, a15 /* a3 = save area for CP 0 */ 594 xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 5952: 596#endif 597 598#if XCHAL_CP1_SA_SIZE 599 bbci.l a2, 1, 2f /* CP 1 not enabled */ 600 l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */ 601 add a3, a14, a15 /* a3 = save area for CP 1 */ 602 xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6032: 604#endif 605 606#if XCHAL_CP2_SA_SIZE 607 bbci.l a2, 2, 2f 608 l32i a14, a13, 8 609 add a3, a14, a15 610 xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6112: 612#endif 613 614#if XCHAL_CP3_SA_SIZE 615 bbci.l a2, 3, 2f 616 l32i a14, a13, 12 617 add a3, a14, a15 618 xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6192: 620#endif 621 622#if XCHAL_CP4_SA_SIZE 623 bbci.l a2, 4, 2f 624 l32i a14, a13, 16 625 add a3, a14, a15 626 xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6272: 628#endif 629 630#if XCHAL_CP5_SA_SIZE 631 bbci.l a2, 5, 2f 632 l32i a14, a13, 20 633 add a3, a14, a15 634 xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6352: 636#endif 637 638#if XCHAL_CP6_SA_SIZE 639 bbci.l a2, 6, 2f 640 l32i a14, a13, 24 641 add a3, a14, a15 642 xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6432: 644#endif 645 646#if XCHAL_CP7_SA_SIZE 647 bbci.l a2, 7, 2f 648 l32i a14, a13, 28 649 add a3, a14, a15 650 xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 6512: 652#endif 653 654.Ldone2: 655 ret 656 657#endif 658