1/**************************************************************************/ 2/* Copyright (c) Cadence Design Systems, Inc. */ 3/* */ 4/* Permission is hereby granted, free of charge, to any person obtaining */ 5/* a copy of this software and associated documentation files (the */ 6/* "Software"), to deal in the Software without restriction, including */ 7/* without limitation the rights to use, copy, modify, merge, publish, */ 8/* distribute, sublicense, and/or sell copies of the Software, and to */ 9/* permit persons to whom the Software is furnished to do so, subject to */ 10/* the following conditions: */ 11/* */ 12/* The above copyright notice and this permission notice shall be */ 13/* included in all copies or substantial portions of the Software. */ 14/* */ 15/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ 16/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ 17/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */ 18/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ 19/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ 20/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ 21/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ 22/**************************************************************************/ 23 24/**************************************************************************/ 25/* */ 26/* DESCRIPTION */ 27/* */ 28/* Xtensa coprocessor handling routines. This code is only active if */ 29/* one or more coprocessors are present. */ 30/* */ 31/* RELEASE HISTORY */ 32/* */ 33/* DATE NAME DESCRIPTION */ 34/* */ 35/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */ 36/* */ 37/**************************************************************************/ 38 39 40#include <xtensa/config/specreg.h> 41#include <xtensa/coreasm.h> 42 43#include "xtensa_context.h" 44#include "xtensa_rtos.h" 45 46 47#if XCHAL_CP_NUM > 0 48 49//----------------------------------------------------------------------------- 50// Coprocessor related state and precomputed values. 51//----------------------------------------------------------------------------- 52 53// Table of coprocessor owners, identified by thread's CP save area pointer. 54// Zero means coprocessor is not owned. 55 56 .data 57 .global _xt_coproc_owner_sa 58 .align 16,,XCHAL_CP_MAX << 2 // minimize crossing cache boundaries 59_xt_coproc_owner_sa: 60 .rept XCHAL_CP_MAX 61 .word 0 62 .endr 63 64// Bitmask table for CP n's enable bit, indexed by coprocessor number. 65 66 .section .rodata, "a" 67 .global _xt_coproc_mask 68 .align 16,,8 // try to keep it all in one cache line 69 .set i, 0 70_xt_coproc_mask: 71 .rept XCHAL_CP_MAX 72 .long (i<<16) | (1<<i) // upper 16-bits = i, lower = bitmask 73 .set i, i+1 74 .endr 75 76// Offset to CP n save area in thread's CP save area. 77 78 .global _xt_coproc_sa_offset 79 .align 16 // minimize crossing cache boundaries 80_xt_coproc_sa_offset: 81 .word XT_CP0_SA, XT_CP1_SA, XT_CP2_SA, XT_CP3_SA 82 .word XT_CP4_SA, XT_CP5_SA, XT_CP6_SA, XT_CP7_SA 83 84 85//----------------------------------------------------------------------------- 86// _xt_coproc_handler 87// 88// Handles coprocessor exceptions and manages lazy context switching between 89// multiple threads sharing the coprocessor(s). 90// Register use: 91// a0 - on entry, return address (must have been called via call0). 92// a1 - pointing to valid exception stack frame. 93// a2 - on entry, must hold coprocessor index. On exit, 0 if OK. 94// a3-a15 - may all be used and trashed by this routine. 95//----------------------------------------------------------------------------- 96 97 .text 98 .align 4 99 .global _xt_coproc_handler 100 101_xt_coproc_handler: 102 103 mov a7, a0 // a7 = return address 104 mov a5, a2 // a5 = CP index n 105 106 // Get coprocessor state save area of new owner thread 107 call0 XT_RTOS_CP_STATE // a15 = new owner's save area 108 beqz a15, .L_xt_coproc_invalid // not in a thread (invalid) 109 l32i a4, a15, XT_CP_ASA // actual save area address 110 beqz a4, .L_xt_coproc_invalid // thread has no save area 111 112 // Enable the co-processor's bit in CPENABLE 113 movi a0, _xt_coproc_mask 114 rsr a4, CPENABLE // a4 = CPENABLE 115 addx4 a0, a5, a0 // a0 = &_xt_coproc_mask[n] 116 l32i a0, a0, 0 // a0 = (n << 16) | (1 << n) 117 movi a3, _xt_coproc_owner_sa 118 extui a2, a0, 0, 16 // coprocessor bitmask portion 119 or a4, a4, a2 // a4 = CPENABLE | (1 << n) 120 wsr a4, CPENABLE 121 122 // Get old coprocessor owner thread (save area ptr) and assign new one 123 addx4 a3, a5, a3 // a3 = &_xt_coproc_owner_sa[n] 124 l32i a2, a3, 0 // a2 = old owner's save area 125 s32i a15, a3, 0 // _xt_coproc_owner_sa[n] = new 126 rsync // ensure wsr.CPENABLE is complete 127 128 // Do we need to context-switch this coprocessor ? 129 beq a15, a2, .L_xt_coproc_done // new owner == old, we're done 130 131 // if no old owner then nothing to save 132 beqz a2, .L_check_new 133 134 // If old owner not actively using CP then nothing to save. 135 l16ui a4, a2, XT_CPENABLE // a4 = old owner's CPENABLE 136 bnone a4, a0, .L_check_new // old owner not using CP 137 138.L_save_old: 139 // We need to save old owner's coprocessor state 140 movi a5, _xt_coproc_sa_offset 141 142 // Mark old owner state as no longer active (CPENABLE bit n clear) 143 xor a4, a4, a0 // clear CP in old owner's CPENABLE 144 s16i a4, a2, XT_CPENABLE // update old owner's CPENABLE 145 146 extui a4, a0, 16, 5 // a4 = CP index = n 147 addx4 a5, a4, a5 // a5 = &_xt_coproc_sa_offset[n] 148 149 // Mark old owner state as saved (CPSTORED bit n set) 150 l16ui a4, a2, XT_CPSTORED // a4 = old owner's CPSTORED 151 l32i a5, a5, 0 // a5 = XT_CP[n]_SA offset 152 or a4, a4, a0 // set CP in old owner's CPSTORED 153 s16i a4, a2, XT_CPSTORED // update old owner's CPSTORED 154 l32i a2, a2, XT_CP_ASA // ptr to actual (aligned) save area 155 extui a3, a0, 16, 5 // a3 = CP index = n 156 add a2, a2, a5 // a2 = old owner's area for CP n 157 158 // The config-specific HAL macro invoked below destroys a2-a6. 159 // It is theoretically possible for Xtensa processor designers to write TIE 160 // that causes more address registers to be affected, but it is generally 161 // unlikely. If that ever happens, more registers needs to be saved/restored 162 // around this macro invocation, and the value in a15 needs to be recomputed. 163 164 xchal_cpi_store_funcbody 165 166.L_check_new: 167 // Check if any state has to be restored for new owner. 168 // NOTE: a15 = new owner's save area, cannot be zero when we get here. 169 170 l16ui a3, a15, XT_CPSTORED // a3 = new owner's CPSTORED 171 movi a4, _xt_coproc_sa_offset 172 bnone a3, a0, .L_check_cs // full CP not saved, check callee-saved 173 xor a3, a3, a0 // CPSTORED bit is set, clear it 174 s16i a3, a15, XT_CPSTORED // update new owner's CPSTORED 175 176 // Adjust new owner's save area pointers to area for CP n. 177 extui a3, a0, 16, 5 // a3 = CP index = n 178 addx4 a4, a3, a4 // a4 = &_xt_coproc_sa_offset[n] 179 l32i a4, a4, 0 // a4 = XT_CP[n]_SA 180 l32i a5, a15, XT_CP_ASA // ptr to actual (aligned) save area 181 add a2, a4, a5 // a2 = new owner's area for CP 182 183 // The config-specific HAL macro invoked below destroys a2-a6. 184 // It is theoretically possible for Xtensa processor designers to write TIE 185 // that causes more address registers to be affected, but it is generally 186 // unlikely. If that ever happens, more registers needs to be saved/restored 187 // around this macro invocation. 188 189 xchal_cpi_load_funcbody 190 191.L_xt_coproc_done: 192 movi a2, 0 // a2 <- 0 == OK 193.L_xt_coproc_err: 194 mov a0, a7 // return address 195 ret 196 197.L_check_cs: 198 // a0 = CP mask in low bits, a15 = new owner's save area. 199 l16ui a2, a15, XT_CP_CS_ST // a2 = mask of CPs saved 200 bnone a2, a0, .L_xt_coproc_done // if no match then done 201 and a2, a2, a0 // a2 = which CPs to restore 202 extui a2, a2, 0, 8 // extract low 8 bits 203 call0 _xt_coproc_restorecs // restore CP registers 204 j .L_xt_coproc_done 205 206.L_xt_coproc_invalid: 207 // Coprocessor exception occurred outside a thread or the thread 208 // did not allocate space to save coprocessor state. Return error. 209 movi a2, 1 210 j .L_xt_coproc_err 211 212 213//----------------------------------------------------------------------------- 214// _tx_thread_coproc_state 215// 216// Helper function to return the save area for the current thread, if any. 217// Returns, in a15, the pointer to the save area if any, else zero. 218// If in interrupt context, returns zero. Only uses a15. 219// Must be called only via call0. 220//----------------------------------------------------------------------------- 221 222 .global _tx_thread_coproc_state 223 .type _tx_thread_coproc_state,@function 224 .align 4 225 226_tx_thread_coproc_state: 227 228 // return ( _tx_thread_system_state == 0 && _tx_thread_current_ptr != 0 229 // ? (&_tx_thread_current_ptr->tx_thread_cp_state) : 0 ) 230 231 movi a15, _tx_thread_system_state // check if interrupt state 232 l32i a15, a15, 0 233 bnez a15, 1f 234 movi a15, _tx_thread_current_ptr // check if thread running 235 l32i a15, a15, 0 236 beqz a15, 2f 237 238 // Return base address of current thread's co-prcoessor save area. 239 addi a15, a15, tx_thread_cp_state 240 ret 2411: 242 movi a15, 0 // return error 2432: 244 ret 245 246 247//----------------------------------------------------------------------------- 248// _xt_coproc_savecs 249// 250// If there is a current thread and it has a coprocessor state save area, then 251// save all callee-saved state into this area. This function is called from the 252// solicited context switch handler. It calls a system-specific function to get 253// the coprocessor save area base address. 254// 255// Entry conditions: 256// - The thread being switched out is still the current thread. 257// - CPENABLE state reflects which coprocessors are active. 258// - Registers have been saved/spilled already. 259// 260// Exit conditions: 261// - All necessary CP callee-saved state has been saved. 262// - Registers a7-a15 have been trashed. 263// 264// Must be called from assembly code only, using CALL0. 265//----------------------------------------------------------------------------- 266 267 .global _xt_coproc_savecs 268 .type _xt_coproc_savecs,@function 269 .align 4 270_xt_coproc_savecs: 271 272 // At entry, CPENABLE should be showing which CPs are enabled. 273 274 rsr a11, CPENABLE // a11 = which CPs are enabled 275 beqz a11, .Ldone // quick exit if none 276 mov a14, a0 // save return address 277 call0 XT_RTOS_CP_STATE // get address of CP save area 278 mov a0, a14 // restore return address 279 beqz a15, .Ldone // if none then nothing to do 280 l32i a14, a15, XT_CP_ASA // a14 = base of aligned save area 281 beqz a14, .Ldone // no save area, nothing to do 282 s16i a11, a15, XT_CP_CS_ST // save mask of CPs being stored 283 movi a13, _xt_coproc_sa_offset // array of CP save offsets 284 l32i a15, a15, XT_CP_ASA // a15 = base of aligned save area 285 286#if XCHAL_CP0_SA_SIZE 287 bbci.l a11, 0, 2f // CP 0 not enabled 288 l32i a14, a13, 0 // a14 = _xt_coproc_sa_offset[0] 289 add a12, a14, a15 // a12 = save area for CP 0 290 xchal_cp0_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 2912: 292#endif 293 294#if XCHAL_CP1_SA_SIZE 295 bbci.l a11, 1, 2f // CP 1 not enabled 296 l32i a14, a13, 4 // a14 = _xt_coproc_sa_offset[1] 297 add a12, a14, a15 // a12 = save area for CP 1 298 xchal_cp1_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 2992: 300#endif 301 302#if XCHAL_CP2_SA_SIZE 303 bbci.l a11, 2, 2f 304 l32i a14, a13, 8 305 add a12, a14, a15 306 xchal_cp2_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 3072: 308#endif 309 310#if XCHAL_CP3_SA_SIZE 311 bbci.l a11, 3, 2f 312 l32i a14, a13, 12 313 add a12, a14, a15 314 xchal_cp3_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 3152: 316#endif 317 318#if XCHAL_CP4_SA_SIZE 319 bbci.l a11, 4, 2f 320 l32i a14, a13, 16 321 add a12, a14, a15 322 xchal_cp4_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 3232: 324#endif 325 326#if XCHAL_CP5_SA_SIZE 327 bbci.l a11, 5, 2f 328 l32i a14, a13, 20 329 add a12, a14, a15 330 xchal_cp5_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 3312: 332#endif 333 334#if XCHAL_CP6_SA_SIZE 335 bbci.l a11, 6, 2f 336 l32i a14, a13, 24 337 add a12, a14, a15 338 xchal_cp6_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 3392: 340#endif 341 342#if XCHAL_CP7_SA_SIZE 343 bbci.l a11, 7, 2f 344 l32i a14, a13, 28 345 add a12, a14, a15 346 xchal_cp7_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 3472: 348#endif 349 350.Ldone: 351 ret 352 353 354//----------------------------------------------------------------------------- 355// _xt_coproc_restorecs 356// 357// Restore any callee-saved coprocessor state for the incoming thread. 358// This function is called from coprocessor exception handling, when giving 359// ownership to a thread that solicited a context switch earlier. It calls a 360// system-specific function to get the coprocessor save area base address. 361// 362// Entry conditions: 363// - The incoming thread is set as the current thread. 364// - CPENABLE is set up correctly for all required coprocessors. 365// - a2 = mask of coprocessors to be restored. 366// 367// Exit conditions: 368// - All necessary CP callee-saved state has been restored. 369// - CPENABLE - unchanged. 370// - Registers a2, a8-a15 have been trashed. 371// 372// Must be called from assembly code only, using CALL0. 373//----------------------------------------------------------------------------- 374 375 .global _xt_coproc_restorecs 376 .type _xt_coproc_restorecs,@function 377 .align 4 378_xt_coproc_restorecs: 379 380 mov a14, a0 // save return address 381 call0 XT_RTOS_CP_STATE // get address of CP save area 382 mov a0, a14 // restore return address 383 beqz a15, .Ldone2 // if none then nothing to do 384 l32i a14, a15, XT_CP_ASA // a14 = base of aligned save area 385 beqz a14, .Ldone2 // no save area, nothing to do 386 l16ui a13, a15, XT_CP_CS_ST // a13 = which CPs have been saved 387 xor a13, a13, a2 // clear the ones being restored 388 s16i a13, a15, XT_CP_CS_ST // update saved CP mask 389 movi a13, _xt_coproc_sa_offset // array of CP save offsets 390 l32i a15, a15, XT_CP_ASA // a15 = base of aligned save area 391 392#if XCHAL_CP0_SA_SIZE 393 bbci.l a2, 0, 2f // CP 0 not enabled 394 l32i a14, a13, 0 // a14 = _xt_coproc_sa_offset[0] 395 add a12, a14, a15 // a12 = save area for CP 0 396 xchal_cp0_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 3972: 398#endif 399 400#if XCHAL_CP1_SA_SIZE 401 bbci.l a2, 1, 2f // CP 1 not enabled 402 l32i a14, a13, 4 // a14 = _xt_coproc_sa_offset[1] 403 add a12, a14, a15 // a12 = save area for CP 1 404 xchal_cp1_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 4052: 406#endif 407 408#if XCHAL_CP2_SA_SIZE 409 bbci.l a2, 2, 2f 410 l32i a14, a13, 8 411 add a12, a14, a15 412 xchal_cp2_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 4132: 414#endif 415 416#if XCHAL_CP3_SA_SIZE 417 bbci.l a2, 3, 2f 418 l32i a14, a13, 12 419 add a12, a14, a15 420 xchal_cp3_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 4212: 422#endif 423 424#if XCHAL_CP4_SA_SIZE 425 bbci.l a2, 4, 2f 426 l32i a14, a13, 16 427 add a12, a14, a15 428 xchal_cp4_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 4292: 430#endif 431 432#if XCHAL_CP5_SA_SIZE 433 bbci.l a2, 5, 2f 434 l32i a14, a13, 20 435 add a12, a14, a15 436 xchal_cp5_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 4372: 438#endif 439 440#if XCHAL_CP6_SA_SIZE 441 bbci.l a2, 6, 2f 442 l32i a14, a13, 24 443 add a12, a14, a15 444 xchal_cp6_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 4452: 446#endif 447 448#if XCHAL_CP7_SA_SIZE 449 bbci.l a2, 7, 2f 450 l32i a14, a13, 28 451 add a12, a14, a15 452 xchal_cp7_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL 4532: 454#endif 455 456.Ldone2: 457 ret 458 459 460#if XCHAL_HAVE_XEA3 461 462//----------------------------------------------------------------------------- 463// For XEA3, coprocessor exceptions come here. This is a wrapper function that 464// calls _xt_coproc_handler() to do the actual work. We don't want the handler 465// to be interrupted because that might cause a round-robin switch and leave 466// coprocessor context in a confused state. So interrupts are disabled before 467// calling the handler. They will be re-enabled on return from exception. 468//----------------------------------------------------------------------------- 469 470 .text 471 .global _xt_coproc_exc 472 .type _xt_coproc_exc,@function 473 .align 4 474 475_xt_coproc_exc: 476#ifdef __XTENSA_CALL0_ABI__ 477 addi a1, a1, -16 // reserve 16 bytes on stack 478 s32i a0, a1, 0 // save return address 479 s32i a15, a1, 8 // must save a15 (see dispatch) 480 movi a3, PS_DI_MASK 481 xps a3, a3 // Set PS.DI, disable interrupts 482 l32i a3, a2, XT_STK_EXCCAUSE // a3 <- exccause 483 extui a2, a3, 8, 4 // a2 <- CP index 484 call0 _xt_coproc_handler 485 l32i a0, a1, 0 // restore return address 486 l32i a15, a1, 8 // restore a15 487 addi a1, a1, 16 488 ret 489#else 490 entry a1, 48 // reserve 16 bytes on stack 491 s32i a0, a1, 0 // save return address 492 movi a3, PS_DI_MASK 493 xps a3, a3 // Set PS.DI, disable interrupts 494 l32i a3, a2, XT_STK_EXCCAUSE // a3 <- exccause 495 extui a2, a3, 8, 4 // a2 <- CP index 496 call0 _xt_coproc_handler 497 l32i a0, a1, 0 // restore return address 498 retw 499#endif 500 501#endif // XCHAL_HAVE_XEA3 502 503 504#if XCHAL_HAVE_XEA2 505 506//----------------------------------------------------------------------------- 507// XEA2 coprocessor exception dispatcher. Save enough state to be able to call 508// the coprocessor handler, then restore and return. 509//----------------------------------------------------------------------------- 510 511 .text 512 .global _xt_coproc_exc 513 .type _xt_coproc_exc,@function 514 .align 4 515 516_xt_coproc_exc: 517 518 mov a0, sp // Allocate stack frame 519 addi sp, sp, -XT_STK_FRMSZ 520 s32i a0, sp, XT_STK_A1 // save SP 521#if XCHAL_HAVE_WINDOWED 522 s32e a0, sp, -12 // for debug backtrace 523#endif 524 rsr a0, PS 525 s32i a0, sp, XT_STK_PS // save PS 526 rsr a0, EPC_1 527 s32i a0, sp, XT_STK_PC // save PC 528 rsr a0, EXCSAVE_1 529 s32i a0, sp, XT_STK_A0 // retrieve and save a0 530#if XCHAL_HAVE_WINDOWED 531 s32e a0, sp, -16 // for debug backtrace 532#endif 533 s32i a2, sp, XT_STK_A2 534 s32i a3, sp, XT_STK_A3 535 s32i a4, sp, XT_STK_A4 536 s32i a5, sp, XT_STK_A5 537 s32i a6, sp, XT_STK_A6 538 s32i a7, sp, XT_STK_A7 539 s32i a8, sp, XT_STK_A8 540 s32i a9, sp, XT_STK_A9 541 s32i a10, sp, XT_STK_A10 542 s32i a11, sp, XT_STK_A11 543 s32i a12, sp, XT_STK_A12 544 s32i a13, sp, XT_STK_A13 545 s32i a14, sp, XT_STK_A14 546 s32i a15, sp, XT_STK_A15 547 548 rsr a3, EXCCAUSE // a3 <- exccause 549 addi a2, a3, -EXCCAUSE_CP0_DISABLED // a2 <- CP index 550 call0 _xt_coproc_handler 551 552 mov a0, a2 // save return value 553 l32i a2, sp, XT_STK_A2 554 l32i a3, sp, XT_STK_A3 555 l32i a4, sp, XT_STK_A4 556 l32i a5, sp, XT_STK_A5 557 l32i a6, sp, XT_STK_A6 558 l32i a7, sp, XT_STK_A7 559 l32i a8, sp, XT_STK_A8 560 l32i a9, sp, XT_STK_A9 561 l32i a10, sp, XT_STK_A10 562 l32i a11, sp, XT_STK_A11 563 l32i a12, sp, XT_STK_A12 564 l32i a13, sp, XT_STK_A13 565 l32i a14, sp, XT_STK_A14 566 l32i a15, sp, XT_STK_A15 567 bnez a0, .Lfail // abort if failure 568 l32i a0, sp, XT_STK_PC 569 wsr a0, EPC_1 // restore PC 570 l32i a0, sp, XT_STK_PS 571 wsr a0, PS // restore PS 572 l32i a0, sp, XT_STK_A0 573 addi a1, a1, XT_STK_FRMSZ // deallocate stack frame 574 rfe 575 576.Lfail: 577 call0 _xt_panic 578 579#endif // XCHAL_HAVE_XEA2 580 581#endif // XCHAL_CP_NUM > 0 582 583