1/*
2 * SPDX-FileCopyrightText: 2015-2019 Cadence Design Systems, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
7 */
8/*
9 * Copyright (c) 2015-2019 Cadence Design Systems, Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining
12 * a copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sublicense, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included
20 * in all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
26 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31#include "xtensa_rtos.h"
32#include "sdkconfig.h"
33#include "esp_idf_version.h"
34
35#define TOPOFSTACK_OFFS                 0x00    /* StackType_t *pxTopOfStack */
36#define CP_TOPOFSTACK_OFFS              0x04    /* xMPU_SETTINGS.coproc_area */
37
38.extern pxCurrentTCB
39
40/*
41*******************************************************************************
42* Interrupt stack. The size of the interrupt stack is determined by the config
43* parameter "configISR_STACK_SIZE" in FreeRTOSConfig.h
44*******************************************************************************
45*/
46
47    .data
48    .align      16
49    .global     port_IntStack
50    .global     port_IntStackTop
51    .global     port_switch_flag
52port_IntStack:
53    .space      configISR_STACK_SIZE*portNUM_PROCESSORS     /* This allocates stacks for each individual CPU. */
54port_IntStackTop:
55    .word       0
56port_switch_flag:
57    .space      portNUM_PROCESSORS*4 /* One flag for each individual CPU. */
58
59    .text
60
61/*
62*******************************************************************************
63* _frxt_setup_switch
64* void _frxt_setup_switch(void);
65*
66* Sets an internal flag indicating that a task switch is required on return
67* from interrupt handling.
68*
69*******************************************************************************
70*/
71    .global     _frxt_setup_switch
72    .type       _frxt_setup_switch,@function
73    .align      4
74_frxt_setup_switch:
75
76    ENTRY(16)
77
78    getcoreid a3
79    movi    a2, port_switch_flag
80    addx4   a2,  a3, a2
81
82    movi    a3, 1
83    s32i    a3, a2, 0
84
85    RET(16)
86
87
88
89
90
91
92/*
93*******************************************************************************
94* _frxt_int_enter
95* void _frxt_int_enter(void)
96*
97* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_ENTER function for
98* freeRTOS. Saves the rest of the interrupt context (not already saved).
99* May only be called from assembly code by the 'call0' instruction, with
100* interrupts disabled.
101* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
102*
103*******************************************************************************
104*/
105    .globl  _frxt_int_enter
106    .type   _frxt_int_enter,@function
107    .align  4
108_frxt_int_enter:
109
110    /* Save a12-13 in the stack frame as required by _xt_context_save. */
111    s32i    a12, a1, XT_STK_A12
112    s32i    a13, a1, XT_STK_A13
113
114    /* Save return address in a safe place (free a0). */
115    mov     a12, a0
116
117    /* Save the rest of the interrupted context (preserves A12-13). */
118    call0   _xt_context_save
119
120    /*
121    Save interrupted task's SP in TCB only if not nesting.
122    Manage nesting directly rather than call the generic IntEnter()
123    (in windowed ABI we can't call a C function here anyway because PS.EXCM is still set).
124    */
125    getcoreid a4
126    movi    a2,  port_xSchedulerRunning
127    addx4   a2,  a4, a2
128    movi    a3,  port_interruptNesting
129    addx4   a3,  a4, a3
130    l32i    a2,  a2, 0                  /* a2 = port_xSchedulerRunning     */
131    beqz    a2,  1f                     /* scheduler not running, no tasks */
132    l32i    a2,  a3, 0                  /* a2 = port_interruptNesting      */
133    addi    a2,  a2, 1                  /* increment nesting count         */
134    s32i    a2,  a3, 0                  /* save nesting count              */
135    bnei    a2,  1, .Lnested            /* !=0 before incr, so nested      */
136
137    movi    a2,  pxCurrentTCB
138    addx4   a2,  a4, a2
139    l32i    a2,  a2, 0                  /* a2 = current TCB                */
140    beqz    a2,  1f
141    s32i    a1,  a2, TOPOFSTACK_OFFS    /* pxCurrentTCB->pxTopOfStack = SP */
142    movi    a1,  port_IntStack+configISR_STACK_SIZE   /* a1 = top of intr stack for CPU 0  */
143    movi    a2,  configISR_STACK_SIZE   /* add configISR_STACK_SIZE * cpu_num to arrive at top of stack for cpu_num */
144    mull    a2,  a4, a2
145    add     a1,  a1, a2                 /* for current proc */
146
147    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
148    #ifdef CONFIG_FREERTOS_FPU_IN_ISR
149    #if XCHAL_CP_NUM > 0
150    rsr     a3, CPENABLE                /* Restore thread scope CPENABLE */
151    addi    sp, sp,-4                   /* ISR will manage FPU coprocessor by forcing */
152    s32i    a3, a1, 0                   /* its trigger */
153    #endif
154    #endif
155    #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
156
157.Lnested:
1581:
159    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
160    #ifdef CONFIG_FREERTOS_FPU_IN_ISR
161    #if XCHAL_CP_NUM > 0
162    movi    a3,  0              /* whilst ISRs pending keep CPENABLE exception active */
163    wsr     a3,  CPENABLE
164    rsync
165    #endif
166    #endif
167    #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
168
169    mov     a0,  a12                    /* restore return addr and return  */
170    ret
171
172/*
173*******************************************************************************
174* _frxt_int_exit
175* void _frxt_int_exit(void)
176*
177* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_EXIT function for
178* FreeRTOS. If required, calls vPortYieldFromInt() to perform task context
179* switching, restore the (possibly) new task's context, and return to the
180* exit dispatcher saved in the task's stack frame at XT_STK_EXIT.
181* May only be called from assembly code by the 'call0' instruction. Does not
182* return to caller.
183* See the description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
184*
185*******************************************************************************
186*/
187    .globl  _frxt_int_exit
188    .type   _frxt_int_exit,@function
189    .align  4
190_frxt_int_exit:
191
192    getcoreid a4
193    movi    a2,  port_xSchedulerRunning
194    addx4   a2,  a4, a2
195    movi    a3,  port_interruptNesting
196    addx4   a3,  a4, a3
197    rsil    a0,  XCHAL_EXCM_LEVEL       /* lock out interrupts             */
198    l32i    a2,  a2, 0                  /* a2 = port_xSchedulerRunning     */
199    beqz    a2,  .Lnoswitch             /* scheduler not running, no tasks */
200    l32i    a2,  a3, 0                  /* a2 = port_interruptNesting      */
201    addi    a2,  a2, -1                 /* decrement nesting count         */
202    s32i    a2,  a3, 0                  /* save nesting count              */
203    bnez    a2,  .Lnesting              /* !=0 after decr so still nested  */
204
205    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
206    #ifdef CONFIG_FREERTOS_FPU_IN_ISR
207    #if XCHAL_CP_NUM > 0
208    l32i    a3,  sp, 0                  /* Grab last CPENABLE before leave ISR */
209    addi    sp,  sp, 4
210    wsr     a3, CPENABLE
211    rsync                               /* ensure CPENABLE was modified */
212    #endif
213    #endif
214    #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
215
216    movi    a2,  pxCurrentTCB
217    addx4   a2,  a4, a2
218    l32i    a2,  a2, 0                  /* a2 = current TCB                */
219    beqz    a2,  1f                     /* no task ? go to dispatcher      */
220    l32i    a1,  a2, TOPOFSTACK_OFFS    /* SP = pxCurrentTCB->pxTopOfStack */
221
222    movi    a2,  port_switch_flag       /* address of switch flag          */
223    addx4   a2,  a4, a2                 /* point to flag for this cpu      */
224    l32i    a3,  a2, 0                  /* a3 = port_switch_flag           */
225    beqz    a3,  .Lnoswitch             /* flag = 0 means no switch reqd   */
226    movi    a3,  0
227    s32i    a3,  a2, 0                  /* zero out the flag for next time */
228
2291:
230    /*
231    Call0 ABI callee-saved regs a12-15 need to be saved before possible preemption.
232    However a12-13 were already saved by _frxt_int_enter().
233    */
234    #ifdef __XTENSA_CALL0_ABI__
235    s32i    a14, a1, XT_STK_A14
236    s32i    a15, a1, XT_STK_A15
237    #endif
238
239    #ifdef __XTENSA_CALL0_ABI__
240    call0   vPortYieldFromInt       /* call dispatch inside the function; never returns */
241    #else
242    call4   vPortYieldFromInt       /* this one returns */
243    call0   _frxt_dispatch          /* tail-call dispatcher */
244    /* Never returns here. */
245    #endif
246
247.Lnoswitch:
248    /*
249    If we came here then about to resume the interrupted task.
250    */
251
252.Lnesting:
253    /*
254    We come here only if there was no context switch, that is if this
255    is a nested interrupt, or the interrupted task was not preempted.
256    In either case there's no need to load the SP.
257    */
258
259    /* Restore full context from interrupt stack frame */
260    call0   _xt_context_restore
261
262    /*
263    Must return via the exit dispatcher corresponding to the entrypoint from which
264    this was called. Interruptee's A0, A1, PS, PC are restored and the interrupt
265    stack frame is deallocated in the exit dispatcher.
266    */
267    l32i    a0,  a1, XT_STK_EXIT
268    ret
269
270
271/*
272**********************************************************************************************************
273*                                           _frxt_timer_int
274*                                      void _frxt_timer_int(void)
275*
276* Implements the Xtensa RTOS porting layer's XT_RTOS_TIMER_INT function for FreeRTOS.
277* Called every timer interrupt.
278* Manages the tick timer and calls xPortSysTickHandler() every tick.
279* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
280*
281* Callable from C (obeys ABI conventions). Implemented in assmebly code for performance.
282*
283**********************************************************************************************************
284*/
285#ifdef CONFIG_FREERTOS_SYSTICK_USES_CCOUNT
286    .globl  _frxt_timer_int
287    .type   _frxt_timer_int,@function
288    .align  4
289_frxt_timer_int:
290
291    /*
292    Xtensa timers work by comparing a cycle counter with a preset value.  Once the match occurs
293    an interrupt is generated, and the handler has to set a new cycle count into the comparator.
294    To avoid clock drift due to interrupt latency, the new cycle count is computed from the old,
295    not the time the interrupt was serviced. However if a timer interrupt is ever serviced more
296    than one tick late, it is necessary to process multiple ticks until the new cycle count is
297    in the future, otherwise the next timer interrupt would not occur until after the cycle
298    counter had wrapped (2^32 cycles later).
299
300    do {
301        ticks++;
302        old_ccompare = read_ccompare_i();
303        write_ccompare_i( old_ccompare + divisor );
304        service one tick;
305        diff = read_ccount() - old_ccompare;
306    } while ( diff > divisor );
307    */
308
309    ENTRY(16)
310
311    #ifdef CONFIG_PM_TRACE
312    movi a6, 1 /* = ESP_PM_TRACE_TICK */
313    getcoreid a7
314    call4 esp_pm_trace_enter
315    #endif // CONFIG_PM_TRACE
316
317.L_xt_timer_int_catchup:
318
319    /* Update the timer comparator for the next tick. */
320    #ifdef XT_CLOCK_FREQ
321    movi    a2, XT_TICK_DIVISOR         /* a2 = comparator increment          */
322    #else
323    movi    a3, _xt_tick_divisor
324    l32i    a2, a3, 0                   /* a2 = comparator increment          */
325    #endif
326    rsr     a3, XT_CCOMPARE             /* a3 = old comparator value          */
327    add     a4, a3, a2                  /* a4 = new comparator value          */
328    wsr     a4, XT_CCOMPARE             /* update comp. and clear interrupt   */
329    esync
330
331    #ifdef __XTENSA_CALL0_ABI__
332    /* Preserve a2 and a3 across C calls. */
333    s32i    a2, sp, 4
334    s32i    a3, sp, 8
335    #endif
336
337    /* Call the FreeRTOS tick handler (see port_systick.c). */
338    #ifdef __XTENSA_CALL0_ABI__
339    call0   xPortSysTickHandler
340    #else
341    call4   xPortSysTickHandler
342    #endif
343
344    #ifdef __XTENSA_CALL0_ABI__
345    /* Restore a2 and a3. */
346    l32i    a2, sp, 4
347    l32i    a3, sp, 8
348    #endif
349
350    /* Check if we need to process more ticks to catch up. */
351    esync                               /* ensure comparator update complete  */
352    rsr     a4, CCOUNT                  /* a4 = cycle count                   */
353    sub     a4, a4, a3                  /* diff = ccount - old comparator     */
354    blt     a2, a4, .L_xt_timer_int_catchup  /* repeat while diff > divisor */
355
356#ifdef CONFIG_PM_TRACE
357    movi a6, 1 /* = ESP_PM_TRACE_TICK */
358    getcoreid a7
359    call4 esp_pm_trace_exit
360#endif // CONFIG_PM_TRACE
361
362    RET(16)
363#endif // CONFIG_FREERTOS_SYSTICK_USES_CCOUNT
364
365    /*
366**********************************************************************************************************
367*                                           _frxt_tick_timer_init
368*                                      void _frxt_tick_timer_init(void)
369*
370* Initialize timer and timer interrrupt handler (_xt_tick_divisor_init() has already been been called).
371* Callable from C (obeys ABI conventions on entry).
372*
373**********************************************************************************************************
374*/
375#ifdef CONFIG_FREERTOS_SYSTICK_USES_CCOUNT
376    .globl  _frxt_tick_timer_init
377    .type   _frxt_tick_timer_init,@function
378    .align  4
379_frxt_tick_timer_init:
380
381    ENTRY(16)
382
383
384    /* Set up the periodic tick timer (assume enough time to complete init). */
385    #ifdef XT_CLOCK_FREQ
386    movi    a3, XT_TICK_DIVISOR
387    #else
388    movi    a2, _xt_tick_divisor
389    l32i    a3, a2, 0
390    #endif
391    rsr     a2, CCOUNT              /* current cycle count */
392    add     a2, a2, a3              /* time of first timer interrupt */
393    wsr     a2, XT_CCOMPARE         /* set the comparator */
394
395    /*
396    Enable the timer interrupt at the device level. Don't write directly
397    to the INTENABLE register because it may be virtualized.
398    */
399    #ifdef __XTENSA_CALL0_ABI__
400    movi    a2, XT_TIMER_INTEN
401    call0   xt_ints_on
402    #else
403    movi    a6, XT_TIMER_INTEN
404    call4   xt_ints_on
405    #endif
406
407    RET(16)
408#endif // CONFIG_FREERTOS_SYSTICK_USES_CCOUNT
409
410/*
411**********************************************************************************************************
412*                                    DISPATCH THE HIGH READY TASK
413*                                     void _frxt_dispatch(void)
414*
415* Switch context to the highest priority ready task, restore its state and dispatch control to it.
416*
417* This is a common dispatcher that acts as a shared exit path for all the context switch functions
418* including vPortYield() and vPortYieldFromInt(), all of which tail-call this dispatcher
419* (for windowed ABI vPortYieldFromInt() calls it indirectly via _frxt_int_exit() ).
420*
421* The Xtensa port uses different stack frames for solicited and unsolicited task suspension (see
422* comments on stack frames in xtensa_context.h). This function restores the state accordingly.
423* If restoring a task that solicited entry, restores the minimal state and leaves CPENABLE clear.
424* If restoring a task that was preempted, restores all state including the task's CPENABLE.
425*
426* Entry:
427*   pxCurrentTCB  points to the TCB of the task to suspend,
428*   Because it is tail-called without a true function entrypoint, it needs no 'entry' instruction.
429*
430* Exit:
431*   If incoming task called vPortYield() (solicited), this function returns as if from vPortYield().
432*   If incoming task was preempted by an interrupt, this function jumps to exit dispatcher.
433*
434**********************************************************************************************************
435*/
436    .globl  _frxt_dispatch
437    .type   _frxt_dispatch,@function
438    .align  4
439_frxt_dispatch:
440
441    #ifdef __XTENSA_CALL0_ABI__
442    call0   vTaskSwitchContext  // Get next TCB to resume
443    movi    a2, pxCurrentTCB
444    getcoreid a3
445    addx4   a2,  a3, a2
446    #else
447    call4   vTaskSwitchContext  // Get next TCB to resume
448    movi    a2, pxCurrentTCB
449    getcoreid a3
450    addx4   a2,  a3, a2
451    #endif
452    l32i    a3,  a2, 0
453    l32i    sp,  a3, TOPOFSTACK_OFFS     /* SP = next_TCB->pxTopOfStack;  */
454    s32i    a3,  a2, 0
455
456    /* Determine the type of stack frame. */
457    l32i    a2,  sp, XT_STK_EXIT        /* exit dispatcher or solicited flag */
458    bnez    a2,  .L_frxt_dispatch_stk
459
460.L_frxt_dispatch_sol:
461
462    /* Solicited stack frame. Restore minimal context and return from vPortYield(). */
463    #if XCHAL_HAVE_THREADPTR
464    l32i    a2,  sp, XT_SOL_THREADPTR
465    wur.threadptr a2
466    #endif
467    l32i    a3,  sp, XT_SOL_PS
468    #ifdef __XTENSA_CALL0_ABI__
469    l32i    a12, sp, XT_SOL_A12
470    l32i    a13, sp, XT_SOL_A13
471    l32i    a14, sp, XT_SOL_A14
472    l32i    a15, sp, XT_SOL_A15
473    #endif
474    l32i    a0,  sp, XT_SOL_PC
475    #if XCHAL_CP_NUM > 0
476    /* Ensure wsr.CPENABLE is complete (should be, it was cleared on entry). */
477    rsync
478    #endif
479    /* As soons as PS is restored, interrupts can happen. No need to sync PS. */
480    wsr     a3,  PS
481    #ifdef __XTENSA_CALL0_ABI__
482    addi    sp,  sp, XT_SOL_FRMSZ
483    ret
484    #else
485    retw
486    #endif
487
488.L_frxt_dispatch_stk:
489
490    #if XCHAL_CP_NUM > 0
491    /* Restore CPENABLE from task's co-processor save area. */
492    movi    a3, pxCurrentTCB            /* cp_state =                       */
493    getcoreid a2
494    addx4   a3,  a2, a3
495    l32i    a3, a3, 0
496    l32i    a2, a3, CP_TOPOFSTACK_OFFS     /* StackType_t                       *pxStack; */
497    l16ui   a3, a2, XT_CPENABLE         /* CPENABLE = cp_state->cpenable;   */
498    wsr     a3, CPENABLE
499    #endif
500
501    /* Interrupt stack frame. Restore full context and return to exit dispatcher. */
502    call0   _xt_context_restore
503
504    /* In Call0 ABI, restore callee-saved regs (A12, A13 already restored). */
505    #ifdef __XTENSA_CALL0_ABI__
506    l32i    a14, sp, XT_STK_A14
507    l32i    a15, sp, XT_STK_A15
508    #endif
509
510    #if XCHAL_CP_NUM > 0
511    /* Ensure wsr.CPENABLE has completed. */
512    rsync
513    #endif
514
515    /*
516    Must return via the exit dispatcher corresponding to the entrypoint from which
517    this was called. Interruptee's A0, A1, PS, PC are restored and the interrupt
518    stack frame is deallocated in the exit dispatcher.
519    */
520    l32i    a0, sp, XT_STK_EXIT
521    ret
522
523
524/*
525**********************************************************************************************************
526*                            PERFORM A SOLICTED CONTEXT SWITCH (from a task)
527*                                        void vPortYield(void)
528*
529* This function saves the minimal state needed for a solicited task suspension, clears CPENABLE,
530* then tail-calls the dispatcher _frxt_dispatch() to perform the actual context switch
531*
532* At Entry:
533*   pxCurrentTCB  points to the TCB of the task to suspend
534*   Callable from C (obeys ABI conventions on entry).
535*
536* Does not return to caller.
537*
538**********************************************************************************************************
539*/
540    .globl  vPortYield
541    .type   vPortYield,@function
542    .align  4
543vPortYield:
544
545    #ifdef __XTENSA_CALL0_ABI__
546    addi    sp,  sp, -XT_SOL_FRMSZ
547    #else
548    entry   sp,  XT_SOL_FRMSZ
549    #endif
550
551    rsr     a2,  PS
552    s32i    a0,  sp, XT_SOL_PC
553    s32i    a2,  sp, XT_SOL_PS
554    #if XCHAL_HAVE_THREADPTR
555    rur.threadptr a2
556    s32i    a2,  sp, XT_SOL_THREADPTR
557    #endif
558    #ifdef __XTENSA_CALL0_ABI__
559    s32i    a12, sp, XT_SOL_A12         /* save callee-saved registers      */
560    s32i    a13, sp, XT_SOL_A13
561    s32i    a14, sp, XT_SOL_A14
562    s32i    a15, sp, XT_SOL_A15
563    #else
564    /* Spill register windows. Calling xthal_window_spill() causes extra    */
565    /* spills and reloads, so we will set things up to call the _nw version */
566    /* instead to save cycles.                                              */
567    movi    a6,  ~(PS_WOE_MASK|PS_INTLEVEL_MASK)  /* spills a4-a7 if needed */
568    and     a2,  a2, a6                           /* clear WOE, INTLEVEL    */
569    addi    a2,  a2, XCHAL_EXCM_LEVEL             /* set INTLEVEL           */
570    wsr     a2,  PS
571    rsync
572    call0   xthal_window_spill_nw
573    l32i    a2,  sp, XT_SOL_PS                    /* restore PS             */
574    wsr     a2,  PS
575    #endif
576
577    rsil    a2,  XCHAL_EXCM_LEVEL       /* disable low/med interrupts       */
578
579    #if XCHAL_CP_NUM > 0
580    /* Save coprocessor callee-saved state (if any). At this point CPENABLE */
581    /* should still reflect which CPs were in use (enabled).                */
582    call0   _xt_coproc_savecs
583    #endif
584
585    movi    a2,  pxCurrentTCB
586    getcoreid a3
587    addx4   a2,  a3, a2
588    l32i    a2,  a2, 0                  /* a2 = pxCurrentTCB                */
589    movi    a3,  0
590    s32i    a3,  sp, XT_SOL_EXIT        /* 0 to flag as solicited frame     */
591    s32i    sp,  a2, TOPOFSTACK_OFFS    /* pxCurrentTCB->pxTopOfStack = SP  */
592
593    #if XCHAL_CP_NUM > 0
594    /* Clear CPENABLE, also in task's co-processor state save area. */
595    l32i    a2,  a2, CP_TOPOFSTACK_OFFS /* a2 = pxCurrentTCB->cp_state      */
596    movi    a3,  0
597    wsr     a3,  CPENABLE
598    beqz    a2,  1f
599    s16i    a3,  a2, XT_CPENABLE        /* clear saved cpenable             */
6001:
601    #endif
602
603    /* Tail-call dispatcher. */
604    call0   _frxt_dispatch
605    /* Never reaches here. */
606
607
608/*
609**********************************************************************************************************
610*                         PERFORM AN UNSOLICITED CONTEXT SWITCH (from an interrupt)
611*                                        void vPortYieldFromInt(void)
612*
613* This calls the context switch hook (removed), saves and clears CPENABLE, then tail-calls the dispatcher
614* _frxt_dispatch() to perform the actual context switch.
615*
616* At Entry:
617*   Interrupted task context has been saved in an interrupt stack frame at pxCurrentTCB->pxTopOfStack.
618*   pxCurrentTCB  points to the TCB of the task to suspend,
619*   Callable from C (obeys ABI conventions on entry).
620*
621* At Exit:
622*   Windowed ABI defers the actual context switch until the stack is unwound to interrupt entry.
623*   Call0 ABI tail-calls the dispatcher directly (no need to unwind) so does not return to caller.
624*
625**********************************************************************************************************
626*/
627    .globl  vPortYieldFromInt
628    .type   vPortYieldFromInt,@function
629    .align  4
630vPortYieldFromInt:
631
632    ENTRY(16)
633
634    #if XCHAL_CP_NUM > 0
635    /* Save CPENABLE in task's co-processor save area, and clear CPENABLE.  */
636    movi    a3, pxCurrentTCB            /* cp_state =                       */
637    getcoreid a2
638    addx4   a3,  a2, a3
639    l32i    a3, a3, 0
640
641    l32i    a2, a3, CP_TOPOFSTACK_OFFS
642
643    rsr     a3, CPENABLE
644    s16i    a3, a2, XT_CPENABLE         /* cp_state->cpenable = CPENABLE;   */
645    movi    a3, 0
646    wsr     a3, CPENABLE                /* disable all co-processors        */
647    #endif
648
649    #ifdef __XTENSA_CALL0_ABI__
650    /* Tail-call dispatcher. */
651    call0   _frxt_dispatch
652    /* Never reaches here. */
653    #else
654    RET(16)
655    #endif
656
657/*
658**********************************************************************************************************
659*                                        _frxt_task_coproc_state
660*                                   void _frxt_task_coproc_state(void)
661*
662* Implements the Xtensa RTOS porting layer's XT_RTOS_CP_STATE function for FreeRTOS.
663*
664* May only be called when a task is running, not within an interrupt handler (returns 0 in that case).
665* May only be called from assembly code by the 'call0' instruction. Does NOT obey ABI conventions.
666* Returns in A15 a pointer to the base of the co-processor state save area for the current task.
667* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
668*
669**********************************************************************************************************
670*/
671#if XCHAL_CP_NUM > 0
672
673    .globl  _frxt_task_coproc_state
674    .type   _frxt_task_coproc_state,@function
675    .align  4
676_frxt_task_coproc_state:
677
678
679    /* We can use a3 as a scratchpad, the instances of code calling XT_RTOS_CP_STATE don't seem to need it saved. */
680    getcoreid a3
681    movi    a15, port_xSchedulerRunning /* if (port_xSchedulerRunning              */
682    addx4   a15, a3,a15
683    l32i    a15, a15, 0
684    beqz    a15, 1f
685    movi    a15, port_interruptNesting  /* && port_interruptNesting == 0           */
686    addx4   a15, a3, a15
687    l32i    a15, a15, 0
688    bnez    a15, 1f
689
690    movi    a15, pxCurrentTCB
691    addx4   a15, a3, a15
692    l32i    a15, a15, 0                 /* && pxCurrentTCB != 0) {                 */
693
694    beqz    a15, 2f
695    l32i    a15, a15, CP_TOPOFSTACK_OFFS
696    ret
697
6981:  movi    a15, 0
6992:  ret
700
701#endif /* XCHAL_CP_NUM > 0 */
702