1/* 2//----------------------------------------------------------------------------- 3// Copyright (c) 2003-2015 Cadence Design Systems, Inc. 4// 5// Permission is hereby granted, free of charge, to any person obtaining 6// a copy of this software and associated documentation files (the 7// "Software"), to deal in the Software without restriction, including 8// without limitation the rights to use, copy, modify, merge, publish, 9// distribute, sublicense, and/or sell copies of the Software, and to 10// permit persons to whom the Software is furnished to do so, subject to 11// the following conditions: 12// 13// The above copyright notice and this permission notice shall be included 14// in all copies or substantial portions of the Software. 15// 16// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 21// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 22// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23//----------------------------------------------------------------------------- 24*/ 25 26#include "xtensa_rtos.h" 27#include "sdkconfig.h" 28 29#define TOPOFSTACK_OFFS 0x00 /* StackType_t *pxTopOfStack */ 30#define CP_TOPOFSTACK_OFFS 0x04 /* xMPU_SETTINGS.coproc_area */ 31 32.extern pxCurrentTCB 33 34/* 35******************************************************************************* 36* Interrupt stack. The size of the interrupt stack is determined by the config 37* parameter "configISR_STACK_SIZE" in FreeRTOSConfig.h 38******************************************************************************* 39*/ 40 41 .data 42 .align 16 43 .global port_IntStack 44 .global port_IntStackTop 45 .global port_switch_flag 46port_IntStack: 47 .space configISR_STACK_SIZE*portNUM_PROCESSORS /* This allocates stacks for each individual CPU. */ 48port_IntStackTop: 49 .word 0 50port_switch_flag: 51 .space portNUM_PROCESSORS*4 /* One flag for each individual CPU. */ 52 53 .text 54 55/* 56******************************************************************************* 57* _frxt_setup_switch 58* void _frxt_setup_switch(void); 59* 60* Sets an internal flag indicating that a task switch is required on return 61* from interrupt handling. 62* 63******************************************************************************* 64*/ 65 .global _frxt_setup_switch 66 .type _frxt_setup_switch,@function 67 .align 4 68_frxt_setup_switch: 69 70 ENTRY(16) 71 72 getcoreid a3 73 movi a2, port_switch_flag 74 addx4 a2, a3, a2 75 76 movi a3, 1 77 s32i a3, a2, 0 78 79 RET(16) 80 81 82 83 84 85 86/* 87******************************************************************************* 88* _frxt_int_enter 89* void _frxt_int_enter(void) 90* 91* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_ENTER function for 92* freeRTOS. Saves the rest of the interrupt context (not already saved). 93* May only be called from assembly code by the 'call0' instruction, with 94* interrupts disabled. 95* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h. 96* 97******************************************************************************* 98*/ 99 .globl _frxt_int_enter 100 .type _frxt_int_enter,@function 101 .align 4 102_frxt_int_enter: 103 104 /* Save a12-13 in the stack frame as required by _xt_context_save. */ 105 s32i a12, a1, XT_STK_A12 106 s32i a13, a1, XT_STK_A13 107 108 /* Save return address in a safe place (free a0). */ 109 mov a12, a0 110 111 /* Save the rest of the interrupted context (preserves A12-13). */ 112 call0 _xt_context_save 113 114 /* 115 Save interrupted task's SP in TCB only if not nesting. 116 Manage nesting directly rather than call the generic IntEnter() 117 (in windowed ABI we can't call a C function here anyway because PS.EXCM is still set). 118 */ 119 getcoreid a4 120 movi a2, port_xSchedulerRunning 121 addx4 a2, a4, a2 122 movi a3, port_interruptNesting 123 addx4 a3, a4, a3 124 l32i a2, a2, 0 /* a2 = port_xSchedulerRunning */ 125 beqz a2, 1f /* scheduler not running, no tasks */ 126 l32i a2, a3, 0 /* a2 = port_interruptNesting */ 127 addi a2, a2, 1 /* increment nesting count */ 128 s32i a2, a3, 0 /* save nesting count */ 129 bnei a2, 1, .Lnested /* !=0 before incr, so nested */ 130 131 movi a2, pxCurrentTCB 132 addx4 a2, a4, a2 133 l32i a2, a2, 0 /* a2 = current TCB */ 134 beqz a2, 1f 135 s32i a1, a2, TOPOFSTACK_OFFS /* pxCurrentTCB->pxTopOfStack = SP */ 136 movi a1, port_IntStack+configISR_STACK_SIZE /* a1 = top of intr stack for CPU 0 */ 137 movi a2, configISR_STACK_SIZE /* add configISR_STACK_SIZE * cpu_num to arrive at top of stack for cpu_num */ 138 mull a2, a4, a2 139 add a1, a1, a2 /* for current proc */ 140 141 #ifdef CONFIG_FREERTOS_FPU_IN_ISR 142 #if XCHAL_CP_NUM > 0 143 rsr a3, CPENABLE /* Restore thread scope CPENABLE */ 144 addi sp, sp,-4 /* ISR will manage FPU coprocessor by forcing */ 145 s32i a3, a1, 0 /* its trigger */ 146 #endif 147 #endif 148 149.Lnested: 1501: 151 #ifdef CONFIG_FREERTOS_FPU_IN_ISR 152 #if XCHAL_CP_NUM > 0 153 movi a3, 0 /* whilst ISRs pending keep CPENABLE exception active */ 154 wsr a3, CPENABLE 155 rsync 156 #endif 157 #endif 158 159 mov a0, a12 /* restore return addr and return */ 160 ret 161 162/* 163******************************************************************************* 164* _frxt_int_exit 165* void _frxt_int_exit(void) 166* 167* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_EXIT function for 168* FreeRTOS. If required, calls vPortYieldFromInt() to perform task context 169* switching, restore the (possibly) new task's context, and return to the 170* exit dispatcher saved in the task's stack frame at XT_STK_EXIT. 171* May only be called from assembly code by the 'call0' instruction. Does not 172* return to caller. 173* See the description of the XT_RTOS_ENTER macro in xtensa_rtos.h. 174* 175******************************************************************************* 176*/ 177 .globl _frxt_int_exit 178 .type _frxt_int_exit,@function 179 .align 4 180_frxt_int_exit: 181 182 getcoreid a4 183 movi a2, port_xSchedulerRunning 184 addx4 a2, a4, a2 185 movi a3, port_interruptNesting 186 addx4 a3, a4, a3 187 rsil a0, XCHAL_EXCM_LEVEL /* lock out interrupts */ 188 l32i a2, a2, 0 /* a2 = port_xSchedulerRunning */ 189 beqz a2, .Lnoswitch /* scheduler not running, no tasks */ 190 l32i a2, a3, 0 /* a2 = port_interruptNesting */ 191 addi a2, a2, -1 /* decrement nesting count */ 192 s32i a2, a3, 0 /* save nesting count */ 193 bnez a2, .Lnesting /* !=0 after decr so still nested */ 194 195 #ifdef CONFIG_FREERTOS_FPU_IN_ISR 196 #if XCHAL_CP_NUM > 0 197 l32i a3, sp, 0 /* Grab last CPENABLE before leave ISR */ 198 addi sp, sp, 4 199 wsr a3, CPENABLE 200 rsync /* ensure CPENABLE was modified */ 201 #endif 202 #endif 203 204 movi a2, pxCurrentTCB 205 addx4 a2, a4, a2 206 l32i a2, a2, 0 /* a2 = current TCB */ 207 beqz a2, 1f /* no task ? go to dispatcher */ 208 l32i a1, a2, TOPOFSTACK_OFFS /* SP = pxCurrentTCB->pxTopOfStack */ 209 210 movi a2, port_switch_flag /* address of switch flag */ 211 addx4 a2, a4, a2 /* point to flag for this cpu */ 212 l32i a3, a2, 0 /* a3 = port_switch_flag */ 213 beqz a3, .Lnoswitch /* flag = 0 means no switch reqd */ 214 movi a3, 0 215 s32i a3, a2, 0 /* zero out the flag for next time */ 216 2171: 218 /* 219 Call0 ABI callee-saved regs a12-15 need to be saved before possible preemption. 220 However a12-13 were already saved by _frxt_int_enter(). 221 */ 222 #ifdef __XTENSA_CALL0_ABI__ 223 s32i a14, a1, XT_STK_A14 224 s32i a15, a1, XT_STK_A15 225 #endif 226 227 #ifdef __XTENSA_CALL0_ABI__ 228 call0 vPortYieldFromInt /* call dispatch inside the function; never returns */ 229 #else 230 call4 vPortYieldFromInt /* this one returns */ 231 call0 _frxt_dispatch /* tail-call dispatcher */ 232 /* Never returns here. */ 233 #endif 234 235.Lnoswitch: 236 /* 237 If we came here then about to resume the interrupted task. 238 */ 239 240.Lnesting: 241 /* 242 We come here only if there was no context switch, that is if this 243 is a nested interrupt, or the interrupted task was not preempted. 244 In either case there's no need to load the SP. 245 */ 246 247 /* Restore full context from interrupt stack frame */ 248 call0 _xt_context_restore 249 250 /* 251 Must return via the exit dispatcher corresponding to the entrypoint from which 252 this was called. Interruptee's A0, A1, PS, PC are restored and the interrupt 253 stack frame is deallocated in the exit dispatcher. 254 */ 255 l32i a0, a1, XT_STK_EXIT 256 ret 257 258 259/* 260********************************************************************************************************** 261* _frxt_timer_int 262* void _frxt_timer_int(void) 263* 264* Implements the Xtensa RTOS porting layer's XT_RTOS_TIMER_INT function for FreeRTOS. 265* Called every timer interrupt. 266* Manages the tick timer and calls xPortSysTickHandler() every tick. 267* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h. 268* 269* Callable from C (obeys ABI conventions). Implemented in assmebly code for performance. 270* 271********************************************************************************************************** 272*/ 273#ifdef CONFIG_FREERTOS_SYSTICK_USES_CCOUNT 274 .globl _frxt_timer_int 275 .type _frxt_timer_int,@function 276 .align 4 277_frxt_timer_int: 278 279 /* 280 Xtensa timers work by comparing a cycle counter with a preset value. Once the match occurs 281 an interrupt is generated, and the handler has to set a new cycle count into the comparator. 282 To avoid clock drift due to interrupt latency, the new cycle count is computed from the old, 283 not the time the interrupt was serviced. However if a timer interrupt is ever serviced more 284 than one tick late, it is necessary to process multiple ticks until the new cycle count is 285 in the future, otherwise the next timer interrupt would not occur until after the cycle 286 counter had wrapped (2^32 cycles later). 287 288 do { 289 ticks++; 290 old_ccompare = read_ccompare_i(); 291 write_ccompare_i( old_ccompare + divisor ); 292 service one tick; 293 diff = read_ccount() - old_ccompare; 294 } while ( diff > divisor ); 295 */ 296 297 ENTRY(16) 298 299 #ifdef CONFIG_PM_TRACE 300 movi a6, 1 /* = ESP_PM_TRACE_TICK */ 301 getcoreid a7 302 call4 esp_pm_trace_enter 303 #endif // CONFIG_PM_TRACE 304 305.L_xt_timer_int_catchup: 306 307 /* Update the timer comparator for the next tick. */ 308 #ifdef XT_CLOCK_FREQ 309 movi a2, XT_TICK_DIVISOR /* a2 = comparator increment */ 310 #else 311 movi a3, _xt_tick_divisor 312 l32i a2, a3, 0 /* a2 = comparator increment */ 313 #endif 314 rsr a3, XT_CCOMPARE /* a3 = old comparator value */ 315 add a4, a3, a2 /* a4 = new comparator value */ 316 wsr a4, XT_CCOMPARE /* update comp. and clear interrupt */ 317 esync 318 319 #ifdef __XTENSA_CALL0_ABI__ 320 /* Preserve a2 and a3 across C calls. */ 321 s32i a2, sp, 4 322 s32i a3, sp, 8 323 #endif 324 325 /* Call the FreeRTOS tick handler (see port_systick.c). */ 326 #ifdef __XTENSA_CALL0_ABI__ 327 call0 xPortSysTickHandler 328 #else 329 call4 xPortSysTickHandler 330 #endif 331 332 #ifdef __XTENSA_CALL0_ABI__ 333 /* Restore a2 and a3. */ 334 l32i a2, sp, 4 335 l32i a3, sp, 8 336 #endif 337 338 /* Check if we need to process more ticks to catch up. */ 339 esync /* ensure comparator update complete */ 340 rsr a4, CCOUNT /* a4 = cycle count */ 341 sub a4, a4, a3 /* diff = ccount - old comparator */ 342 blt a2, a4, .L_xt_timer_int_catchup /* repeat while diff > divisor */ 343 344#ifdef CONFIG_PM_TRACE 345 movi a6, 1 /* = ESP_PM_TRACE_TICK */ 346 getcoreid a7 347 call4 esp_pm_trace_exit 348#endif // CONFIG_PM_TRACE 349 350 RET(16) 351#endif // CONFIG_FREERTOS_SYSTICK_USES_CCOUNT 352 353 /* 354********************************************************************************************************** 355* _frxt_tick_timer_init 356* void _frxt_tick_timer_init(void) 357* 358* Initialize timer and timer interrrupt handler (_xt_tick_divisor_init() has already been been called). 359* Callable from C (obeys ABI conventions on entry). 360* 361********************************************************************************************************** 362*/ 363#ifdef CONFIG_FREERTOS_SYSTICK_USES_CCOUNT 364 .globl _frxt_tick_timer_init 365 .type _frxt_tick_timer_init,@function 366 .align 4 367_frxt_tick_timer_init: 368 369 ENTRY(16) 370 371 372 /* Set up the periodic tick timer (assume enough time to complete init). */ 373 #ifdef XT_CLOCK_FREQ 374 movi a3, XT_TICK_DIVISOR 375 #else 376 movi a2, _xt_tick_divisor 377 l32i a3, a2, 0 378 #endif 379 rsr a2, CCOUNT /* current cycle count */ 380 add a2, a2, a3 /* time of first timer interrupt */ 381 wsr a2, XT_CCOMPARE /* set the comparator */ 382 383 /* 384 Enable the timer interrupt at the device level. Don't write directly 385 to the INTENABLE register because it may be virtualized. 386 */ 387 #ifdef __XTENSA_CALL0_ABI__ 388 movi a2, XT_TIMER_INTEN 389 call0 xt_ints_on 390 #else 391 movi a6, XT_TIMER_INTEN 392 movi a3, xt_ints_on 393 callx4 a3 394 #endif 395 396 RET(16) 397#endif // CONFIG_FREERTOS_SYSTICK_USES_CCOUNT 398 399/* 400********************************************************************************************************** 401* DISPATCH THE HIGH READY TASK 402* void _frxt_dispatch(void) 403* 404* Switch context to the highest priority ready task, restore its state and dispatch control to it. 405* 406* This is a common dispatcher that acts as a shared exit path for all the context switch functions 407* including vPortYield() and vPortYieldFromInt(), all of which tail-call this dispatcher 408* (for windowed ABI vPortYieldFromInt() calls it indirectly via _frxt_int_exit() ). 409* 410* The Xtensa port uses different stack frames for solicited and unsolicited task suspension (see 411* comments on stack frames in xtensa_context.h). This function restores the state accordingly. 412* If restoring a task that solicited entry, restores the minimal state and leaves CPENABLE clear. 413* If restoring a task that was preempted, restores all state including the task's CPENABLE. 414* 415* Entry: 416* pxCurrentTCB points to the TCB of the task to suspend, 417* Because it is tail-called without a true function entrypoint, it needs no 'entry' instruction. 418* 419* Exit: 420* If incoming task called vPortYield() (solicited), this function returns as if from vPortYield(). 421* If incoming task was preempted by an interrupt, this function jumps to exit dispatcher. 422* 423********************************************************************************************************** 424*/ 425 .globl _frxt_dispatch 426 .type _frxt_dispatch,@function 427 .align 4 428_frxt_dispatch: 429 430 #ifdef __XTENSA_CALL0_ABI__ 431 call0 vTaskSwitchContext // Get next TCB to resume 432 movi a2, pxCurrentTCB 433 getcoreid a3 434 addx4 a2, a3, a2 435 #else 436 call4 vTaskSwitchContext // Get next TCB to resume 437 movi a2, pxCurrentTCB 438 getcoreid a3 439 addx4 a2, a3, a2 440 #endif 441 l32i a3, a2, 0 442 l32i sp, a3, TOPOFSTACK_OFFS /* SP = next_TCB->pxTopOfStack; */ 443 s32i a3, a2, 0 444 445 /* Determine the type of stack frame. */ 446 l32i a2, sp, XT_STK_EXIT /* exit dispatcher or solicited flag */ 447 bnez a2, .L_frxt_dispatch_stk 448 449.L_frxt_dispatch_sol: 450 451 /* Solicited stack frame. Restore minimal context and return from vPortYield(). */ 452 l32i a3, sp, XT_SOL_PS 453 #ifdef __XTENSA_CALL0_ABI__ 454 l32i a12, sp, XT_SOL_A12 455 l32i a13, sp, XT_SOL_A13 456 l32i a14, sp, XT_SOL_A14 457 l32i a15, sp, XT_SOL_A15 458 #endif 459 l32i a0, sp, XT_SOL_PC 460 #if XCHAL_CP_NUM > 0 461 /* Ensure wsr.CPENABLE is complete (should be, it was cleared on entry). */ 462 rsync 463 #endif 464 /* As soons as PS is restored, interrupts can happen. No need to sync PS. */ 465 wsr a3, PS 466 #ifdef __XTENSA_CALL0_ABI__ 467 addi sp, sp, XT_SOL_FRMSZ 468 ret 469 #else 470 retw 471 #endif 472 473.L_frxt_dispatch_stk: 474 475 #if XCHAL_CP_NUM > 0 476 /* Restore CPENABLE from task's co-processor save area. */ 477 movi a3, pxCurrentTCB /* cp_state = */ 478 getcoreid a2 479 addx4 a3, a2, a3 480 l32i a3, a3, 0 481 l32i a2, a3, CP_TOPOFSTACK_OFFS /* StackType_t *pxStack; */ 482 l16ui a3, a2, XT_CPENABLE /* CPENABLE = cp_state->cpenable; */ 483 wsr a3, CPENABLE 484 #endif 485 486 /* Interrupt stack frame. Restore full context and return to exit dispatcher. */ 487 call0 _xt_context_restore 488 489 /* In Call0 ABI, restore callee-saved regs (A12, A13 already restored). */ 490 #ifdef __XTENSA_CALL0_ABI__ 491 l32i a14, sp, XT_STK_A14 492 l32i a15, sp, XT_STK_A15 493 #endif 494 495 #if XCHAL_CP_NUM > 0 496 /* Ensure wsr.CPENABLE has completed. */ 497 rsync 498 #endif 499 500 /* 501 Must return via the exit dispatcher corresponding to the entrypoint from which 502 this was called. Interruptee's A0, A1, PS, PC are restored and the interrupt 503 stack frame is deallocated in the exit dispatcher. 504 */ 505 l32i a0, sp, XT_STK_EXIT 506 ret 507 508 509/* 510********************************************************************************************************** 511* PERFORM A SOLICTED CONTEXT SWITCH (from a task) 512* void vPortYield(void) 513* 514* This function saves the minimal state needed for a solicited task suspension, clears CPENABLE, 515* then tail-calls the dispatcher _frxt_dispatch() to perform the actual context switch 516* 517* At Entry: 518* pxCurrentTCB points to the TCB of the task to suspend 519* Callable from C (obeys ABI conventions on entry). 520* 521* Does not return to caller. 522* 523********************************************************************************************************** 524*/ 525 .globl vPortYield 526 .type vPortYield,@function 527 .align 4 528vPortYield: 529 530 #ifdef __XTENSA_CALL0_ABI__ 531 addi sp, sp, -XT_SOL_FRMSZ 532 #else 533 entry sp, XT_SOL_FRMSZ 534 #endif 535 536 rsr a2, PS 537 s32i a0, sp, XT_SOL_PC 538 s32i a2, sp, XT_SOL_PS 539 #ifdef __XTENSA_CALL0_ABI__ 540 s32i a12, sp, XT_SOL_A12 /* save callee-saved registers */ 541 s32i a13, sp, XT_SOL_A13 542 s32i a14, sp, XT_SOL_A14 543 s32i a15, sp, XT_SOL_A15 544 #else 545 /* Spill register windows. Calling xthal_window_spill() causes extra */ 546 /* spills and reloads, so we will set things up to call the _nw version */ 547 /* instead to save cycles. */ 548 movi a6, ~(PS_WOE_MASK|PS_INTLEVEL_MASK) /* spills a4-a7 if needed */ 549 and a2, a2, a6 /* clear WOE, INTLEVEL */ 550 addi a2, a2, XCHAL_EXCM_LEVEL /* set INTLEVEL */ 551 wsr a2, PS 552 rsync 553 call0 xthal_window_spill_nw 554 l32i a2, sp, XT_SOL_PS /* restore PS */ 555 wsr a2, PS 556 #endif 557 558 rsil a2, XCHAL_EXCM_LEVEL /* disable low/med interrupts */ 559 560 #if XCHAL_CP_NUM > 0 561 /* Save coprocessor callee-saved state (if any). At this point CPENABLE */ 562 /* should still reflect which CPs were in use (enabled). */ 563 call0 _xt_coproc_savecs 564 #endif 565 566 movi a2, pxCurrentTCB 567 getcoreid a3 568 addx4 a2, a3, a2 569 l32i a2, a2, 0 /* a2 = pxCurrentTCB */ 570 movi a3, 0 571 s32i a3, sp, XT_SOL_EXIT /* 0 to flag as solicited frame */ 572 s32i sp, a2, TOPOFSTACK_OFFS /* pxCurrentTCB->pxTopOfStack = SP */ 573 574 #if XCHAL_CP_NUM > 0 575 /* Clear CPENABLE, also in task's co-processor state save area. */ 576 l32i a2, a2, CP_TOPOFSTACK_OFFS /* a2 = pxCurrentTCB->cp_state */ 577 movi a3, 0 578 wsr a3, CPENABLE 579 beqz a2, 1f 580 s16i a3, a2, XT_CPENABLE /* clear saved cpenable */ 5811: 582 #endif 583 584 /* Tail-call dispatcher. */ 585 call0 _frxt_dispatch 586 /* Never reaches here. */ 587 588 589/* 590********************************************************************************************************** 591* PERFORM AN UNSOLICITED CONTEXT SWITCH (from an interrupt) 592* void vPortYieldFromInt(void) 593* 594* This calls the context switch hook (removed), saves and clears CPENABLE, then tail-calls the dispatcher 595* _frxt_dispatch() to perform the actual context switch. 596* 597* At Entry: 598* Interrupted task context has been saved in an interrupt stack frame at pxCurrentTCB->pxTopOfStack. 599* pxCurrentTCB points to the TCB of the task to suspend, 600* Callable from C (obeys ABI conventions on entry). 601* 602* At Exit: 603* Windowed ABI defers the actual context switch until the stack is unwound to interrupt entry. 604* Call0 ABI tail-calls the dispatcher directly (no need to unwind) so does not return to caller. 605* 606********************************************************************************************************** 607*/ 608 .globl vPortYieldFromInt 609 .type vPortYieldFromInt,@function 610 .align 4 611vPortYieldFromInt: 612 613 ENTRY(16) 614 615 #if XCHAL_CP_NUM > 0 616 /* Save CPENABLE in task's co-processor save area, and clear CPENABLE. */ 617 movi a3, pxCurrentTCB /* cp_state = */ 618 getcoreid a2 619 addx4 a3, a2, a3 620 l32i a3, a3, 0 621 622 l32i a2, a3, CP_TOPOFSTACK_OFFS 623 624 rsr a3, CPENABLE 625 s16i a3, a2, XT_CPENABLE /* cp_state->cpenable = CPENABLE; */ 626 movi a3, 0 627 wsr a3, CPENABLE /* disable all co-processors */ 628 #endif 629 630 #ifdef __XTENSA_CALL0_ABI__ 631 /* Tail-call dispatcher. */ 632 call0 _frxt_dispatch 633 /* Never reaches here. */ 634 #else 635 RET(16) 636 #endif 637 638/* 639********************************************************************************************************** 640* _frxt_task_coproc_state 641* void _frxt_task_coproc_state(void) 642* 643* Implements the Xtensa RTOS porting layer's XT_RTOS_CP_STATE function for FreeRTOS. 644* 645* May only be called when a task is running, not within an interrupt handler (returns 0 in that case). 646* May only be called from assembly code by the 'call0' instruction. Does NOT obey ABI conventions. 647* Returns in A15 a pointer to the base of the co-processor state save area for the current task. 648* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h. 649* 650********************************************************************************************************** 651*/ 652#if XCHAL_CP_NUM > 0 653 654 .globl _frxt_task_coproc_state 655 .type _frxt_task_coproc_state,@function 656 .align 4 657_frxt_task_coproc_state: 658 659 660 /* We can use a3 as a scratchpad, the instances of code calling XT_RTOS_CP_STATE don't seem to need it saved. */ 661 getcoreid a3 662 movi a15, port_xSchedulerRunning /* if (port_xSchedulerRunning */ 663 addx4 a15, a3,a15 664 l32i a15, a15, 0 665 beqz a15, 1f 666 movi a15, port_interruptNesting /* && port_interruptNesting == 0 */ 667 addx4 a15, a3, a15 668 l32i a15, a15, 0 669 bnez a15, 1f 670 671 movi a15, pxCurrentTCB 672 addx4 a15, a3, a15 673 l32i a15, a15, 0 /* && pxCurrentTCB != 0) { */ 674 675 beqz a15, 2f 676 l32i a15, a15, CP_TOPOFSTACK_OFFS 677 ret 678 6791: movi a15, 0 6802: ret 681 682#endif /* XCHAL_CP_NUM > 0 */ 683