1/*
2 * SPDX-FileCopyrightText: 2015-2019 Cadence Design Systems, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
7 */
8/*
9 * Copyright (c) 2015-2019 Cadence Design Systems, Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining
12 * a copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sublicense, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included
20 * in all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
26 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31/*******************************************************************************
32--------------------------------------------------------------------------------
33
34        XTENSA VECTORS AND LOW LEVEL HANDLERS FOR AN RTOS
35
36  Xtensa low level exception and interrupt vectors and handlers for an RTOS.
37
38  Interrupt handlers and user exception handlers support interaction with
39  the RTOS by calling XT_RTOS_INT_ENTER and XT_RTOS_INT_EXIT before and
40  after user's specific interrupt handlers. These macros are defined in
41  xtensa_<rtos>.h to call suitable functions in a specific RTOS.
42
43  Users can install application-specific interrupt handlers for low and
44  medium level interrupts, by calling xt_set_interrupt_handler(). These
45  handlers can be written in C, and must obey C calling convention. The
46  handler table is indexed by the interrupt number. Each handler may be
47  provided with an argument.
48
49  Note that the system timer interrupt is handled specially, and is
50  dispatched to the RTOS-specific handler. This timer cannot be hooked
51  by application code.
52
53  Optional hooks are also provided to install a handler per level at
54  run-time, made available by compiling this source file with
55  '-DXT_INTEXC_HOOKS' (useful for automated testing).
56
57!!  This file is a template that usually needs to be modified to handle       !!
58!!  application specific interrupts. Search USER_EDIT for helpful comments    !!
59!!  on where to insert handlers and how to write them.                        !!
60
61  Users can also install application-specific exception handlers in the
62  same way, by calling xt_set_exception_handler(). One handler slot is
63  provided for each exception type. Note that some exceptions are handled
64  by the porting layer itself, and cannot be taken over by application
65  code in this manner. These are the alloca, syscall, and coprocessor
66  exceptions.
67
68  The exception handlers can be written in C, and must follow C calling
69  convention. Each handler is passed a pointer to an exception frame as
70  its single argument. The exception frame is created on the stack, and
71  holds the saved context of the thread that took the exception. If the
72  handler returns, the context will be restored and the instruction that
73  caused the exception will be retried. If the handler makes any changes
74  to the saved state in the exception frame, the changes will be applied
75  when restoring the context.
76
77  Because Xtensa is a configurable architecture, this port supports all user
78  generated configurations (except restrictions stated in the release notes).
79  This is accomplished by conditional compilation using macros and functions
80  defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
81  Only the relevant parts of this file will be included in your RTOS build.
82  For example, this file provides interrupt vector templates for all types and
83  all priority levels, but only the ones in your configuration are built.
84
85  NOTES on the use of 'call0' for long jumps instead of 'j':
86   1. This file should be assembled with the -mlongcalls option to xt-xcc.
87   2. The -mlongcalls compiler option causes 'call0 dest' to be expanded to
88      a sequence 'l32r a0, dest' 'callx0 a0' which works regardless of the
89      distance from the call to the destination. The linker then relaxes
90      it back to 'call0 dest' if it determines that dest is within range.
91      This allows more flexibility in locating code without the performance
92      overhead of the 'l32r' literal data load in cases where the destination
93      is in range of 'call0'. There is an additional benefit in that 'call0'
94      has a longer range than 'j' due to the target being word-aligned, so
95      the 'l32r' sequence is less likely needed.
96   3. The use of 'call0' with -mlongcalls requires that register a0 not be
97      live at the time of the call, which is always the case for a function
98      call but needs to be ensured if 'call0' is used as a jump in lieu of 'j'.
99   4. This use of 'call0' is independent of the C function call ABI.
100
101*******************************************************************************/
102
103#include "xtensa_rtos.h"
104#include "esp_idf_version.h"
105#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
106#include "esp_panic.h"
107#else
108#include "esp_private/panic_reason.h"
109#endif /* ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0) */
110#include "sdkconfig.h"
111#include "soc/soc.h"
112
113/*
114  Define for workaround: pin no-cpu-affinity tasks to a cpu when fpu is used.
115  Please change this when the tcb structure is changed
116*/
117#define TASKTCB_XCOREID_OFFSET (0x38+configMAX_TASK_NAME_LEN+3)&~3
118.extern pxCurrentTCB
119
120/*
121--------------------------------------------------------------------------------
122    In order for backtracing to be able to trace from the pre-exception stack
123    across to the exception stack (including nested interrupts), we need to create
124    a pseudo base-save area to make it appear like the exception dispatcher was
125    triggered by a CALL4 from the pre-exception code. In reality, the exception
126    dispatcher uses the same window as pre-exception code, and only CALL0s are
127    used within the exception dispatcher.
128
129    To create the pseudo base-save area, we need to store a copy of the pre-exception's
130    base save area (a0 to a4) below the exception dispatcher's SP. EXCSAVE_x will
131    be used to store a copy of the SP that points to the interrupted code's exception
132    frame just in case the exception dispatcher's SP does not point to the exception
133    frame (which is the case when switching from task to interrupt stack).
134
135    Clearing the pseudo base-save area is uncessary as the interrupt dispatcher
136    will restore the current SP to that of the pre-exception SP.
137--------------------------------------------------------------------------------
138*/
139#ifdef CONFIG_FREERTOS_INTERRUPT_BACKTRACE
140#define XT_DEBUG_BACKTRACE    1
141#endif
142
143
144/*
145--------------------------------------------------------------------------------
146  Defines used to access _xtos_interrupt_table.
147--------------------------------------------------------------------------------
148*/
149#define XIE_HANDLER     0
150#define XIE_ARG         4
151#define XIE_SIZE        8
152
153
154/*
155  Macro get_percpu_entry_for - convert a per-core ID into a multicore entry.
156  Basically does reg=reg*portNUM_PROCESSORS+current_core_id
157  Multiple versions here to optimize for specific portNUM_PROCESSORS values.
158*/
159    .macro get_percpu_entry_for reg scratch
160#if (portNUM_PROCESSORS == 1)
161    /* No need to do anything */
162#elif  (portNUM_PROCESSORS == 2)
163    /* Optimized 2-core code. */
164    getcoreid \scratch
165    addx2 \reg,\reg,\scratch
166#else
167    /* Generalized n-core code. Untested! */
168    movi \scratch,portNUM_PROCESSORS
169    mull \scratch,\reg,\scratch
170    getcoreid \reg
171    add \reg,\scratch,\reg
172#endif
173   .endm
174/*
175--------------------------------------------------------------------------------
176  Macro extract_msb - return the input with only the highest bit set.
177
178  Input  : "ain"  - Input value, clobbered.
179  Output : "aout" - Output value, has only one bit set, MSB of "ain".
180  The two arguments must be different AR registers.
181--------------------------------------------------------------------------------
182*/
183
184    .macro  extract_msb     aout ain
1851:
186    addi    \aout, \ain, -1         /* aout = ain - 1        */
187    and     \ain, \ain, \aout       /* ain  = ain & aout     */
188    bnez    \ain, 1b                /* repeat until ain == 0 */
189    addi    \aout, \aout, 1         /* return aout + 1       */
190    .endm
191
192/*
193--------------------------------------------------------------------------------
194  Macro dispatch_c_isr - dispatch interrupts to user ISRs.
195  This will dispatch to user handlers (if any) that are registered in the
196  XTOS dispatch table (_xtos_interrupt_table). These handlers would have
197  been registered by calling _xtos_set_interrupt_handler(). There is one
198  exception - the timer interrupt used by the OS will not be dispatched
199  to a user handler - this must be handled by the caller of this macro.
200
201  Level triggered and software interrupts are automatically deasserted by
202  this code.
203
204  ASSUMPTIONS:
205    -- PS.INTLEVEL is set to "level" at entry
206    -- PS.EXCM = 0, C calling enabled
207
208  NOTE: For CALL0 ABI, a12-a15 have not yet been saved.
209
210  NOTE: This macro will use registers a0 and a2-a7. The arguments are:
211    level -- interrupt level
212    mask  -- interrupt bitmask for this level
213--------------------------------------------------------------------------------
214*/
215
216    .macro  dispatch_c_isr    level  mask
217
218    #ifdef CONFIG_PM_TRACE
219    movi a6, 0 /* = ESP_PM_TRACE_IDLE */
220    getcoreid a7
221    call4 esp_pm_trace_exit
222    #endif // CONFIG_PM_TRACE
223
224    /* Get mask of pending, enabled interrupts at this level into a2. */
225
226.L_xt_user_int_&level&:
227    rsr     a2, INTENABLE
228    rsr     a3, INTERRUPT
229    movi    a4, \mask
230    and     a2, a2, a3
231    and     a2, a2, a4
232    beqz    a2, 9f                          /* nothing to do */
233
234    /* This bit of code provides a nice debug backtrace in the debugger.
235       It does take a few more instructions, so undef XT_DEBUG_BACKTRACE
236       if you want to save the cycles.
237       At this point, the exception frame should have been allocated and filled,
238       and current sp points to the interrupt stack (for non-nested interrupt)
239       or below the allocated exception frame (for nested interrupts). Copy the
240       pre-exception's base save area below the current SP.
241    */
242    #ifdef XT_DEBUG_BACKTRACE
243    #ifndef __XTENSA_CALL0_ABI__
244    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
245    rsr     a0, EXCSAVE_1 + \level - 1      /* Get exception frame pointer stored in EXCSAVE_x */
246    l32i    a3, a0, XT_STK_A0               /* Copy pre-exception a0 (return address) */
247    s32e    a3, a1, -16
248    l32i    a3, a0, XT_STK_A1               /* Copy pre-exception a1 (stack pointer) */
249    s32e    a3, a1, -12
250    #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
251    /* Backtracing only needs a0 and a1, no need to create full base save area.
252       Also need to change current frame's return address to point to pre-exception's
253       last run instruction.
254     */
255    rsr     a0, EPC_1 + \level - 1          /* return address */
256    movi    a4, 0xC0000000                  /* constant with top 2 bits set (call size) */
257    or      a0, a0, a4                      /* set top 2 bits */
258    addx2   a0, a4, a0                      /* clear top bit -- simulating call4 size   */
259    #endif
260    #endif
261
262    #ifdef CONFIG_PM_ENABLE
263    call4 esp_pm_impl_isr_hook
264    #endif
265
266    #ifdef XT_INTEXC_HOOKS
267    /* Call interrupt hook if present to (pre)handle interrupts. */
268    movi    a4, _xt_intexc_hooks
269    l32i    a4, a4, \level << 2
270    beqz    a4, 2f
271    #ifdef __XTENSA_CALL0_ABI__
272    callx0  a4
273    beqz    a2, 9f
274    #else
275    mov     a6, a2
276    callx4  a4
277    beqz    a6, 9f
278    mov     a2, a6
279    #endif
2802:
281    #endif
282
283    /* Now look up in the dispatch table and call user ISR if any. */
284    /* If multiple bits are set then MSB has highest priority.     */
285
286    extract_msb  a4, a2                     /* a4 = MSB of a2, a2 trashed */
287
288    #ifdef XT_USE_SWPRI
289    /* Enable all interrupts at this level that are numerically higher
290       than the one we just selected, since they are treated as higher
291       priority.
292    */
293    movi    a3, \mask                       /* a3 = all interrupts at this level */
294    add     a2, a4, a4                      /* a2 = a4 << 1 */
295    addi    a2, a2, -1                      /* a2 = mask of 1's <= a4 bit */
296    and     a2, a2, a3                      /* a2 = mask of all bits <= a4 at this level */
297    movi    a3, _xt_intdata
298    l32i    a6, a3, 4                       /* a6 = _xt_vpri_mask */
299    neg     a2, a2
300    addi    a2, a2, -1                      /* a2 = mask to apply */
301    and     a5, a6, a2                      /* mask off all bits <= a4 bit */
302    s32i    a5, a3, 4                       /* update _xt_vpri_mask */
303    rsr     a3, INTENABLE
304    and     a3, a3, a2                      /* mask off all bits <= a4 bit */
305    wsr     a3, INTENABLE
306    rsil    a3, \level - 1                  /* lower interrupt level by 1 */
307    #endif
308
309    #ifdef XT_RTOS_TIMER_INT
310    movi    a3, XT_TIMER_INTEN              /* a3 = timer interrupt bit */
311    wsr     a4, INTCLEAR                    /* clear sw or edge-triggered interrupt */
312    beq     a3, a4, 7f                      /* if timer interrupt then skip table */
313    #else
314    wsr     a4, INTCLEAR                    /* clear sw or edge-triggered interrupt */
315    #endif // XT_RTOS_TIMER_INT
316
317    find_ms_setbit a3, a4, a3, 0            /* a3 = interrupt number */
318
319    get_percpu_entry_for a3, a12
320    movi    a4, _xt_interrupt_table
321    addx8   a3, a3, a4                      /* a3 = address of interrupt table entry */
322    l32i    a4, a3, XIE_HANDLER             /* a4 = handler address */
323    #ifdef __XTENSA_CALL0_ABI__
324    mov     a12, a6                         /* save in callee-saved reg */
325    l32i    a2, a3, XIE_ARG                 /* a2 = handler arg */
326    callx0  a4                              /* call handler */
327    mov     a2, a12
328    #else
329    mov     a2, a6                          /* save in windowed reg */
330    l32i    a6, a3, XIE_ARG                 /* a6 = handler arg */
331    callx4  a4                              /* call handler */
332    #endif
333
334    #ifdef XT_USE_SWPRI
335    j       8f
336    #else
337    j       .L_xt_user_int_&level&          /* check for more interrupts */
338    #endif
339    #ifdef XT_RTOS_TIMER_INT
3407:
341
342    .ifeq XT_TIMER_INTPRI - \level
343.L_xt_user_int_timer_&level&:
344    /*
345    Interrupt handler for the RTOS tick timer if at this level.
346    We'll be reading the interrupt state again after this call
347    so no need to preserve any registers except a6 (vpri_mask).
348    */
349
350    #ifdef __XTENSA_CALL0_ABI__
351    mov     a12, a6
352    call0   XT_RTOS_TIMER_INT
353    mov     a2, a12
354    #else
355    mov     a2, a6
356    call4   XT_RTOS_TIMER_INT
357    #endif
358    .endif
359    #endif // XT_RTOS_TIMER_INT
360
361    #ifdef XT_USE_SWPRI
362    j       8f
363    #else
364    j       .L_xt_user_int_&level&          /* check for more interrupts */
365    #endif
366
367    #ifdef XT_USE_SWPRI
3688:
369    /* Restore old value of _xt_vpri_mask from a2. Also update INTENABLE from
370       virtual _xt_intenable which _could_ have changed during interrupt
371       processing. */
372
373    movi    a3, _xt_intdata
374    l32i    a4, a3, 0                       /* a4 = _xt_intenable    */
375    s32i    a2, a3, 4                       /* update _xt_vpri_mask  */
376    and     a4, a4, a2                      /* a4 = masked intenable */
377    wsr     a4, INTENABLE                   /* update INTENABLE      */
378    #endif
379
3809:
381    /* done */
382
383    .endm
384
385#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
386/*
387--------------------------------------------------------------------------------
388  Panic handler.
389  Should be reached by call0 (preferable) or jump only. If call0, a0 says where
390  from. If on simulator, display panic message and abort, else loop indefinitely.
391--------------------------------------------------------------------------------
392*/
393
394    .section .iram1,"ax"
395    .global panicHandler
396
397    .global     _xt_panic
398    .type       _xt_panic,@function
399    .align      4
400    .literal_position
401    .align      4
402
403_xt_panic:
404    /* Allocate exception frame and save minimal context. */
405    mov     a0, sp
406    addi    sp, sp, -XT_STK_FRMSZ
407    s32i    a0, sp, XT_STK_A1
408    #if XCHAL_HAVE_WINDOWED
409    s32e    a0, sp, -12                     /* for debug backtrace */
410    #endif
411    rsr     a0, PS                          /* save interruptee's PS */
412    s32i    a0, sp, XT_STK_PS
413    rsr     a0, EPC_1                       /* save interruptee's PC */
414    s32i    a0, sp, XT_STK_PC
415    #if XCHAL_HAVE_WINDOWED
416    s32e    a0, sp, -16                     /* for debug backtrace */
417    #endif
418    s32i    a12, sp, XT_STK_A12             /* _xt_context_save requires A12- */
419    s32i    a13, sp, XT_STK_A13             /* A13 to have already been saved */
420    call0   _xt_context_save
421
422    /* Save exc cause and vaddr into exception frame */
423    rsr     a0, EXCCAUSE
424    s32i    a0, sp, XT_STK_EXCCAUSE
425    rsr     a0, EXCVADDR
426    s32i    a0, sp, XT_STK_EXCVADDR
427
428    /* _xt_context_save seems to save the current a0, but we need the interuptees a0. Fix this. */
429    rsr     a0, EXCSAVE_1                   /* save interruptee's a0 */
430
431    s32i    a0, sp, XT_STK_A0
432
433    /* Set up PS for C, disable all interrupts except NMI and debug, and clear EXCM. */
434    movi    a0, PS_INTLEVEL(5) | PS_UM | PS_WOE
435    wsr     a0, PS
436
437    //Call panic handler
438    mov     a6,sp
439    call4 panicHandler
440
441
442    .align 4
443//Call using call0. Prints the hex char in a2. Kills a3, a4, a5
444panic_print_hex:
445    movi a3,0x60000000
446    movi a4,8
447panic_print_hex_loop:
448    l32i a5, a3, 0x1c
449    extui a5, a5, 16, 8
450    bgei a5,64,panic_print_hex_loop
451
452    srli a5,a2,28
453    bgei a5,10,panic_print_hex_a
454    addi a5,a5,'0'
455    j panic_print_hex_ok
456panic_print_hex_a:
457    addi a5,a5,'A'-10
458panic_print_hex_ok:
459    s32i a5,a3,0
460    slli a2,a2,4
461
462    addi a4,a4,-1
463    bnei a4,0,panic_print_hex_loop
464    movi a5,' '
465    s32i a5,a3,0
466
467    ret
468#endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
469
470
471    .section    .rodata, "a"
472    .align      4
473
474/*
475--------------------------------------------------------------------------------
476    Hooks to dynamically install handlers for exceptions and interrupts.
477    Allows automated regression frameworks to install handlers per test.
478    Consists of an array of function pointers indexed by interrupt level,
479    with index 0 containing the entry for user exceptions.
480    Initialized with all 0s, meaning no handler is installed at each level.
481    See comment in xtensa_rtos.h for more details.
482
483    *WARNING*  This array is for all CPUs, that is, installing a hook for
484    one CPU will install it for all others as well!
485--------------------------------------------------------------------------------
486*/
487
488    #ifdef XT_INTEXC_HOOKS
489    .data
490    .global     _xt_intexc_hooks
491    .type       _xt_intexc_hooks,@object
492    .align      4
493
494_xt_intexc_hooks:
495    .fill       XT_INTEXC_HOOK_NUM, 4, 0
496    #endif
497
498
499/*
500--------------------------------------------------------------------------------
501  EXCEPTION AND LEVEL 1 INTERRUPT VECTORS AND LOW LEVEL HANDLERS
502  (except window exception vectors).
503
504  Each vector goes at a predetermined location according to the Xtensa
505  hardware configuration, which is ensured by its placement in a special
506  section known to the Xtensa linker support package (LSP). It performs
507  the minimum necessary before jumping to the handler in the .text section.
508
509  The corresponding handler goes in the normal .text section. It sets up
510  the appropriate stack frame, saves a few vector-specific registers and
511  calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
512  and enter the RTOS, then sets up a C environment. It then calls the
513  user's interrupt handler code (which may be coded in C) and finally
514  calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
515
516  While XT_RTOS_INT_EXIT does not return directly to the interruptee,
517  eventually the RTOS scheduler will want to dispatch the interrupted
518  task or handler. The scheduler will return to the exit point that was
519  saved in the interrupt stack frame at XT_STK_EXIT.
520--------------------------------------------------------------------------------
521*/
522
523
524/*
525--------------------------------------------------------------------------------
526Debug Exception.
527--------------------------------------------------------------------------------
528*/
529
530#if XCHAL_HAVE_DEBUG
531
532    .begin      literal_prefix .DebugExceptionVector
533    .section    .DebugExceptionVector.text, "ax"
534    .global     _DebugExceptionVector
535    .align      4
536    .global     xt_debugexception
537_DebugExceptionVector:
538    wsr     a0, EXCSAVE+XCHAL_DEBUGLEVEL    /* preserve a0 */
539    call0   xt_debugexception            /* load exception handler */
540
541    .end        literal_prefix
542
543#endif
544
545/*
546--------------------------------------------------------------------------------
547Double Exception.
548Double exceptions are not a normal occurrence. They indicate a bug of some kind.
549--------------------------------------------------------------------------------
550*/
551
552#ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR
553
554    .begin      literal_prefix .DoubleExceptionVector
555    .section    .DoubleExceptionVector.text, "ax"
556    .global     _DoubleExceptionVector
557    .align      4
558
559_DoubleExceptionVector:
560
561    #if XCHAL_HAVE_DEBUG
562    break   1, 4                            /* unhandled double exception */
563    #endif
564    movi    a0,PANIC_RSN_DOUBLEEXCEPTION
565    wsr     a0,EXCCAUSE
566    call0   _xt_panic                       /* does not return */
567    rfde                                    /* make a0 point here not later */
568
569    .end        literal_prefix
570
571#endif /* XCHAL_DOUBLEEXC_VECTOR_VADDR */
572
573/*
574--------------------------------------------------------------------------------
575Kernel Exception (including Level 1 Interrupt from kernel mode).
576--------------------------------------------------------------------------------
577*/
578
579    .begin      literal_prefix .KernelExceptionVector
580    .section    .KernelExceptionVector.text, "ax"
581    .global     _KernelExceptionVector
582    .align      4
583
584_KernelExceptionVector:
585
586    wsr     a0, EXCSAVE_1                   /* preserve a0 */
587    call0   _xt_kernel_exc                  /* kernel exception handler */
588    /* never returns here - call0 is used as a jump (see note at top) */
589
590    .end        literal_prefix
591
592    .section .iram1,"ax"
593    .align      4
594
595_xt_kernel_exc:
596    #if XCHAL_HAVE_DEBUG
597    break   1, 0                            /* unhandled kernel exception */
598    #endif
599    movi    a0,PANIC_RSN_KERNELEXCEPTION
600    wsr     a0,EXCCAUSE
601    call0   _xt_panic                       /* does not return */
602    rfe                                     /* make a0 point here not there */
603
604
605/*
606--------------------------------------------------------------------------------
607User Exception (including Level 1 Interrupt from user mode).
608--------------------------------------------------------------------------------
609*/
610
611    .begin      literal_prefix .UserExceptionVector
612    .section    .UserExceptionVector.text, "ax"
613    .global     _UserExceptionVector
614    .type       _UserExceptionVector,@function
615    .align      4
616
617_UserExceptionVector:
618
619    wsr     a0, EXCSAVE_1                   /* preserve a0 */
620    call0   _xt_user_exc                    /* user exception handler */
621    /* never returns here - call0 is used as a jump (see note at top) */
622
623    .end        literal_prefix
624
625/*
626--------------------------------------------------------------------------------
627  Insert some waypoints for jumping beyond the signed 8-bit range of
628  conditional branch instructions, so the conditional branchces to specific
629  exception handlers are not taken in the mainline. Saves some cycles in the
630  mainline.
631--------------------------------------------------------------------------------
632*/
633
634#ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
635    .global   LoadStoreErrorHandler
636    .global   AlignmentErrorHandler
637#endif
638
639    .section .iram1,"ax"
640
641    #if XCHAL_HAVE_WINDOWED
642    .align      4
643_xt_to_alloca_exc:
644    call0   _xt_alloca_exc                  /* in window vectors section */
645    /* never returns here - call0 is used as a jump (see note at top) */
646    #endif
647
648    .align      4
649_xt_to_syscall_exc:
650    call0   _xt_syscall_exc
651    /* never returns here - call0 is used as a jump (see note at top) */
652
653    #if XCHAL_CP_NUM > 0
654    .align      4
655_xt_to_coproc_exc:
656    call0   _xt_coproc_exc
657    /* never returns here - call0 is used as a jump (see note at top) */
658    #endif
659
660#ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
661    .align      4
662_call_loadstore_handler:
663    call0   LoadStoreErrorHandler
664    /* This will return only if wrong opcode or address out of range*/
665    j       .LS_exit
666
667    .align      4
668_call_alignment_handler:
669    call0   AlignmentErrorHandler
670    /* This will return only if wrong opcode or address out of range*/
671    addi    a0, a0, 1
672    j       .LS_exit
673#endif
674
675/*
676--------------------------------------------------------------------------------
677  User exception handler.
678--------------------------------------------------------------------------------
679*/
680
681    .type       _xt_user_exc,@function
682    .align      4
683
684_xt_user_exc:
685
686    /* If level 1 interrupt then jump to the dispatcher */
687    rsr     a0, EXCCAUSE
688    beqi    a0, EXCCAUSE_LEVEL1INTERRUPT, _xt_lowint1
689
690    /* Handle any coprocessor exceptions. Rely on the fact that exception
691       numbers above EXCCAUSE_CP0_DISABLED all relate to the coprocessors.
692    */
693    #if XCHAL_CP_NUM > 0
694    bgeui   a0, EXCCAUSE_CP0_DISABLED, _xt_to_coproc_exc
695    #endif
696
697    /* Handle alloca and syscall exceptions */
698    #if XCHAL_HAVE_WINDOWED
699    beqi    a0, EXCCAUSE_ALLOCA,  _xt_to_alloca_exc
700    #endif
701    beqi    a0, EXCCAUSE_SYSCALL, _xt_to_syscall_exc
702
703#ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
704    beqi    a0, EXCCAUSE_LOAD_STORE_ERROR, _call_loadstore_handler
705
706    addi    a0, a0, -1
707    beqi    a0, 8, _call_alignment_handler
708    addi    a0, a0, 1
709.LS_exit:
710#endif
711
712    /* Handle all other exceptions. All can have user-defined handlers. */
713    /* NOTE: we'll stay on the user stack for exception handling.       */
714
715    /* Allocate exception frame and save minimal context. */
716    mov     a0, sp
717    addi    sp, sp, -XT_STK_FRMSZ
718    s32i    a0, sp, XT_STK_A1
719    #if XCHAL_HAVE_WINDOWED
720    s32e    a0, sp, -12                     /* for debug backtrace */
721    #endif
722    rsr     a0, PS                          /* save interruptee's PS */
723    s32i    a0, sp, XT_STK_PS
724    rsr     a0, EPC_1                       /* save interruptee's PC */
725    s32i    a0, sp, XT_STK_PC
726    #if XCHAL_HAVE_WINDOWED
727    s32e    a0, sp, -16                     /* for debug backtrace */
728    #endif
729    s32i    a12, sp, XT_STK_A12             /* _xt_context_save requires A12- */
730    s32i    a13, sp, XT_STK_A13             /* A13 to have already been saved */
731    call0   _xt_context_save
732
733    /* Save exc cause and vaddr into exception frame */
734    rsr     a0, EXCCAUSE
735    s32i    a0, sp, XT_STK_EXCCAUSE
736    rsr     a0, EXCVADDR
737    s32i    a0, sp, XT_STK_EXCVADDR
738
739    /* _xt_context_save seems to save the current a0, but we need the interuptees a0. Fix this. */
740    rsr     a0, EXCSAVE_1                   /* save interruptee's a0 */
741    s32i    a0, sp, XT_STK_A0
742
743    /* Set up PS for C, reenable debug and NMI interrupts, and clear EXCM. */
744    #ifdef __XTENSA_CALL0_ABI__
745    movi    a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 2) | PS_UM
746    #else
747    movi    a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 2) | PS_UM | PS_WOE
748    #endif
749    wsr     a0, PS
750
751    /*
752        Create pseudo base save area. At this point, sp is still pointing to the
753        allocated and filled exception stack frame.
754    */
755    #ifdef XT_DEBUG_BACKTRACE
756    #ifndef __XTENSA_CALL0_ABI__
757    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
758    l32i    a3, sp, XT_STK_A0               /* Copy pre-exception a0 (return address) */
759    s32e    a3, sp, -16
760    l32i    a3, sp, XT_STK_A1               /* Copy pre-exception a1 (stack pointer) */
761    s32e    a3, sp, -12
762    #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
763    rsr     a0, EPC_1                       /* return address for debug backtrace */
764    movi    a5, 0xC0000000                  /* constant with top 2 bits set (call size) */
765    rsync                                   /* wait for WSR.PS to complete */
766    or      a0, a0, a5                      /* set top 2 bits */
767    addx2   a0, a5, a0                      /* clear top bit -- thus simulating call4 size */
768    #else
769    rsync                                   /* wait for WSR.PS to complete */
770    #endif
771    #endif
772
773    rsr     a2, EXCCAUSE                    /* recover exc cause */
774
775    #ifdef XT_INTEXC_HOOKS
776    /*
777    Call exception hook to pre-handle exceptions (if installed).
778    Pass EXCCAUSE in a2, and check result in a2 (if -1, skip default handling).
779    */
780    movi    a4, _xt_intexc_hooks
781    l32i    a4, a4, 0                       /* user exception hook index 0 */
782    beqz    a4, 1f
783.Ln_xt_user_exc_call_hook:
784    #ifdef __XTENSA_CALL0_ABI__
785    callx0  a4
786    beqi    a2, -1, .L_xt_user_done
787    #else
788    mov     a6, a2
789    callx4  a4
790    beqi    a6, -1, .L_xt_user_done
791    mov     a2, a6
792    #endif
7931:
794    #endif
795
796    rsr     a2, EXCCAUSE                    /* recover exc cause */
797    movi    a3, _xt_exception_table
798    get_percpu_entry_for a2, a4
799    addx4   a4, a2, a3                      /* a4 = address of exception table entry */
800    l32i    a4, a4, 0                       /* a4 = handler address */
801    #ifdef __XTENSA_CALL0_ABI__
802    mov     a2, sp                          /* a2 = pointer to exc frame */
803    callx0  a4                              /* call handler */
804    #else
805    mov     a6, sp                          /* a6 = pointer to exc frame */
806    callx4  a4                              /* call handler */
807    #endif
808
809.L_xt_user_done:
810
811    /* Restore context and return */
812    call0   _xt_context_restore
813    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
814    wsr     a0, PS
815    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
816    wsr     a0, EPC_1
817    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
818    l32i    sp, sp, XT_STK_A1               /* remove exception frame */
819    rsync                                   /* ensure PS and EPC written */
820    rfe                                     /* PS.EXCM is cleared */
821
822
823/*
824--------------------------------------------------------------------------------
825  Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
826  on entry and used to return to a thread or interrupted interrupt handler.
827--------------------------------------------------------------------------------
828*/
829
830    .global     _xt_user_exit
831    .type       _xt_user_exit,@function
832    .align      4
833_xt_user_exit:
834    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
835    wsr     a0, PS
836    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
837    wsr     a0, EPC_1
838    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
839    l32i    sp, sp, XT_STK_A1               /* remove interrupt stack frame */
840    rsync                                   /* ensure PS and EPC written */
841    rfe                                     /* PS.EXCM is cleared */
842
843
844/*
845
846--------------------------------------------------------------------------------
847Syscall Exception Handler (jumped to from User Exception Handler).
848Syscall 0 is required to spill the register windows (no-op in Call 0 ABI).
849Only syscall 0 is handled here. Other syscalls return -1 to caller in a2.
850--------------------------------------------------------------------------------
851*/
852
853    .section .iram1,"ax"
854    .type       _xt_syscall_exc,@function
855    .align      4
856_xt_syscall_exc:
857
858    #ifdef __XTENSA_CALL0_ABI__
859    /*
860    Save minimal regs for scratch. Syscall 0 does nothing in Call0 ABI.
861    Use a minimal stack frame (16B) to save A2 & A3 for scratch.
862    PS.EXCM could be cleared here, but unlikely to improve worst-case latency.
863    rsr     a0, PS
864    addi    a0, a0, -PS_EXCM_MASK
865    wsr     a0, PS
866    */
867    addi    sp, sp, -16
868    s32i    a2, sp, 8
869    s32i    a3, sp, 12
870    #else   /* Windowed ABI */
871    /*
872    Save necessary context and spill the register windows.
873    PS.EXCM is still set and must remain set until after the spill.
874    Reuse context save function though it saves more than necessary.
875    For this reason, a full interrupt stack frame is allocated.
876    */
877    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
878    s32i    a12, sp, XT_STK_A12             /* _xt_context_save requires A12- */
879    s32i    a13, sp, XT_STK_A13             /* A13 to have already been saved */
880    call0   _xt_context_save
881    #endif
882
883    /*
884    Grab the interruptee's PC and skip over the 'syscall' instruction.
885    If it's at the end of a zero-overhead loop and it's not on the last
886    iteration, decrement loop counter and skip to beginning of loop.
887    */
888    rsr     a2, EPC_1                       /* a2 = PC of 'syscall' */
889    addi    a3, a2, 3                       /* ++PC                 */
890    #if XCHAL_HAVE_LOOPS
891    rsr     a0, LEND                        /* if (PC == LEND       */
892    bne     a3, a0, 1f
893    rsr     a0, LCOUNT                      /*     && LCOUNT != 0)  */
894    beqz    a0, 1f                          /* {                    */
895    addi    a0, a0, -1                      /*   --LCOUNT           */
896    rsr     a3, LBEG                        /*   PC = LBEG          */
897    wsr     a0, LCOUNT                      /* }                    */
898    #endif
8991:  wsr     a3, EPC_1                       /* update PC            */
900
901    /* Restore interruptee's context and return from exception. */
902    #ifdef __XTENSA_CALL0_ABI__
903    l32i    a2, sp, 8
904    l32i    a3, sp, 12
905    addi    sp, sp, 16
906    #else
907    call0   _xt_context_restore
908    addi    sp, sp, XT_STK_FRMSZ
909    #endif
910    movi    a0, -1
911    movnez  a2, a0, a2                      /* return -1 if not syscall 0 */
912    rsr     a0, EXCSAVE_1
913    rfe
914
915/*
916--------------------------------------------------------------------------------
917Co-Processor Exception Handler (jumped to from User Exception Handler).
918These exceptions are generated by co-processor instructions, which are only
919allowed in thread code (not in interrupts or kernel code). This restriction is
920deliberately imposed to reduce the burden of state-save/restore in interrupts.
921--------------------------------------------------------------------------------
922*/
923#if XCHAL_CP_NUM > 0
924
925    .section .rodata, "a"
926
927/* Offset to CP n save area in thread's CP save area. */
928    .global _xt_coproc_sa_offset
929    .type   _xt_coproc_sa_offset,@object
930    .align  16                      /* minimize crossing cache boundaries */
931_xt_coproc_sa_offset:
932    .word   XT_CP0_SA, XT_CP1_SA, XT_CP2_SA, XT_CP3_SA
933    .word   XT_CP4_SA, XT_CP5_SA, XT_CP6_SA, XT_CP7_SA
934
935/* Bitmask for CP n's CPENABLE bit. */
936    .type   _xt_coproc_mask,@object
937    .align  16,,8                   /* try to keep it all in one cache line */
938    .set    i, 0
939_xt_coproc_mask:
940    .rept   XCHAL_CP_MAX
941    .long   (i<<16) | (1<<i)    // upper 16-bits = i, lower = bitmask
942    .set    i, i+1
943    .endr
944
945    .data
946
947/* Owner thread of CP n, identified by thread's CP save area (0 = unowned). */
948    .global _xt_coproc_owner_sa
949    .type   _xt_coproc_owner_sa,@object
950    .align  16,,XCHAL_CP_MAX<<2     /* minimize crossing cache boundaries */
951_xt_coproc_owner_sa:
952    .space  (XCHAL_CP_MAX * portNUM_PROCESSORS) << 2
953
954    .section .iram1,"ax"
955
956
957    .align  4
958.L_goto_invalid:
959    j   .L_xt_coproc_invalid    /* not in a thread (invalid) */
960    .align  4
961.L_goto_done:
962    j   .L_xt_coproc_done
963
964
965/*
966--------------------------------------------------------------------------------
967  Coprocessor exception handler.
968  At entry, only a0 has been saved (in EXCSAVE_1).
969--------------------------------------------------------------------------------
970*/
971
972    .type   _xt_coproc_exc,@function
973    .align  4
974
975_xt_coproc_exc:
976
977    /* Allocate interrupt stack frame and save minimal context. */
978    mov     a0, sp                          /* sp == a1 */
979    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
980    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
981    #if XCHAL_HAVE_WINDOWED
982    s32e    a0, sp, -12                     /* for debug backtrace */
983    #endif
984    rsr     a0, PS                          /* save interruptee's PS */
985    s32i    a0, sp, XT_STK_PS
986    rsr     a0, EPC_1                       /* save interruptee's PC */
987    s32i    a0, sp, XT_STK_PC
988    rsr     a0, EXCSAVE_1                   /* save interruptee's a0 */
989    s32i    a0, sp, XT_STK_A0
990    #if XCHAL_HAVE_WINDOWED
991    s32e    a0, sp, -16                     /* for debug backtrace */
992    #endif
993    movi    a0, _xt_user_exit               /* save exit point for dispatch */
994    s32i    a0, sp, XT_STK_EXIT
995
996    rsr     a0, EXCCAUSE
997    s32i    a5, sp, XT_STK_A5               /* save a5 */
998    addi    a5, a0, -EXCCAUSE_CP0_DISABLED  /* a5 = CP index */
999
1000    /* Save a few more of interruptee's registers (a5 was already saved). */
1001    s32i    a2,  sp, XT_STK_A2
1002    s32i    a3,  sp, XT_STK_A3
1003    s32i    a4,  sp, XT_STK_A4
1004    s32i    a15, sp, XT_STK_A15
1005
1006    /* Get co-processor state save area of new owner thread. */
1007    call0   XT_RTOS_CP_STATE                /* a15 = new owner's save area */
1008    #if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
1009    beqz    a15, .L_goto_invalid            /* not in a thread (invalid) */
1010    #else
1011    #ifndef CONFIG_FREERTOS_FPU_IN_ISR
1012    beqz    a15, .L_goto_invalid
1013    #endif
1014    #endif /* ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0) */
1015
1016    /*When FPU in ISR is enabled we could deal with zeroed a15 */
1017
1018    /* Enable the co-processor's bit in CPENABLE. */
1019    movi    a0, _xt_coproc_mask
1020    rsr     a4, CPENABLE                    /* a4 = CPENABLE */
1021    addx4   a0, a5, a0                      /* a0 = &_xt_coproc_mask[n] */
1022    l32i    a0, a0, 0                       /* a0 = (n << 16) | (1 << n) */
1023
1024    /* FPU operations are incompatible with non-pinned tasks. If we have a FPU operation
1025       here, to keep the entire thing from crashing, it's better to pin the task to whatever
1026       core we're running on now. */
1027    movi    a2, pxCurrentTCB
1028    getcoreid a3
1029    addx4     a2,  a3, a2
1030    l32i    a2, a2, 0                       /* a2 = start of pxCurrentTCB[cpuid] */
1031    addi    a2, a2, TASKTCB_XCOREID_OFFSET  /* offset to xCoreID in tcb struct */
1032    s32i    a3, a2, 0                       /* store current cpuid */
1033
1034    /* Grab correct xt_coproc_owner_sa for this core */
1035    movi    a2, XCHAL_CP_MAX << 2
1036    mull    a2, a2, a3                      /* multiply by current processor id */
1037    movi    a3, _xt_coproc_owner_sa         /* a3 = base of owner array */
1038    add     a3, a3, a2                      /* a3 = owner area needed for this processor */
1039
1040    extui   a2, a0, 0, 16                   /* coprocessor bitmask portion */
1041    or      a4, a4, a2                      /* a4 = CPENABLE | (1 << n) */
1042    wsr     a4, CPENABLE
1043
1044/*
1045Keep loading _xt_coproc_owner_sa[n] atomic (=load once, then use that value
1046everywhere): _xt_coproc_release assumes it works like this in order not to need
1047locking.
1048*/
1049
1050
1051    /* Get old coprocessor owner thread (save area ptr) and assign new one.  */
1052    addx4   a3,  a5, a3                      /* a3 = &_xt_coproc_owner_sa[n] */
1053    l32i    a2,  a3, 0                       /* a2 = old owner's save area */
1054    s32i    a15, a3, 0                       /* _xt_coproc_owner_sa[n] = new */
1055    rsync                                    /* ensure wsr.CPENABLE is complete */
1056
1057    /* Only need to context switch if new owner != old owner. */
1058    /* If float is necessary on ISR, we need to remove this check */
1059    /* below, because on restoring from ISR we may have new == old condition used
1060     * to force cp restore to next thread
1061     */
1062    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1063    #ifndef CONFIG_FREERTOS_FPU_IN_ISR
1064    #endif
1065    beq     a15, a2, .L_goto_done           /* new owner == old, we're done */
1066    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1067    #endif
1068    #endif
1069
1070    /* If no old owner then nothing to save. */
1071    beqz    a2, .L_check_new
1072
1073    /* If old owner not actively using CP then nothing to save. */
1074    l16ui   a4,  a2,  XT_CPENABLE           /* a4 = old owner's CPENABLE */
1075    bnone   a4,  a0,  .L_check_new          /* old owner not using CP    */
1076
1077.L_save_old:
1078    /* Save old owner's coprocessor state. */
1079
1080    movi    a5, _xt_coproc_sa_offset
1081
1082    /* Mark old owner state as no longer active (CPENABLE bit n clear). */
1083    xor     a4,  a4,  a0                    /* clear CP bit in CPENABLE    */
1084    s16i    a4,  a2,  XT_CPENABLE           /* update old owner's CPENABLE */
1085
1086    extui   a4,  a0,  16,  5                /* a4 = CP index = n */
1087    addx4   a5,  a4,  a5                    /* a5 = &_xt_coproc_sa_offset[n] */
1088
1089    /* Mark old owner state as saved (CPSTORED bit n set). */
1090    l16ui   a4,  a2,  XT_CPSTORED           /* a4 = old owner's CPSTORED */
1091    l32i    a5,  a5,  0                     /* a5 = XT_CP[n]_SA offset */
1092    or      a4,  a4,  a0                    /* set CP in old owner's CPSTORED */
1093    s16i    a4,  a2,  XT_CPSTORED           /* update old owner's CPSTORED */
1094    l32i    a2, a2, XT_CP_ASA               /* ptr to actual (aligned) save area */
1095    extui   a3, a0, 16, 5                   /* a3 = CP index = n */
1096    add     a2, a2, a5                      /* a2 = old owner's area for CP n */
1097
1098    /*
1099    The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
1100    It is theoretically possible for Xtensa processor designers to write TIE
1101    that causes more address registers to be affected, but it is generally
1102    unlikely. If that ever happens, more registers needs to be saved/restored
1103    around this macro invocation, and the value in a15 needs to be recomputed.
1104    */
1105    xchal_cpi_store_funcbody
1106
1107.L_check_new:
1108    /* Check if any state has to be restored for new owner. */
1109    /* NOTE: a15 = new owner's save area, cannot be zero when we get here. */
1110    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1111    beqz    a15, .L_xt_coproc_done
1112    #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1113
1114    l16ui   a3,  a15, XT_CPSTORED           /* a3 = new owner's CPSTORED */
1115    movi    a4, _xt_coproc_sa_offset
1116    bnone   a3,  a0,  .L_check_cs           /* full CP not saved, check callee-saved */
1117    xor     a3,  a3,  a0                    /* CPSTORED bit is set, clear it */
1118    s16i    a3,  a15, XT_CPSTORED           /* update new owner's CPSTORED */
1119
1120    /* Adjust new owner's save area pointers to area for CP n. */
1121    extui   a3,  a0, 16, 5                  /* a3 = CP index = n */
1122    addx4   a4,  a3, a4                     /* a4 = &_xt_coproc_sa_offset[n] */
1123    l32i    a4,  a4, 0                      /* a4 = XT_CP[n]_SA */
1124    l32i    a5, a15, XT_CP_ASA              /* ptr to actual (aligned) save area */
1125    add     a2,  a4, a5                     /* a2 = new owner's area for CP */
1126
1127    /*
1128    The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
1129    It is theoretically possible for Xtensa processor designers to write TIE
1130    that causes more address registers to be affected, but it is generally
1131    unlikely. If that ever happens, more registers needs to be saved/restored
1132    around this macro invocation.
1133    */
1134    xchal_cpi_load_funcbody
1135
1136    /* Restore interruptee's saved registers. */
1137    /* Can omit rsync for wsr.CPENABLE here because _xt_user_exit does it. */
1138.L_xt_coproc_done:
1139    l32i    a15, sp, XT_STK_A15
1140    l32i    a5,  sp, XT_STK_A5
1141    l32i    a4,  sp, XT_STK_A4
1142    l32i    a3,  sp, XT_STK_A3
1143    l32i    a2,  sp, XT_STK_A2
1144    call0   _xt_user_exit                   /* return via exit dispatcher */
1145    /* Never returns here - call0 is used as a jump (see note at top) */
1146
1147.L_check_cs:
1148    /* a0 = CP mask in low bits, a15 = new owner's save area */
1149    l16ui   a2, a15, XT_CP_CS_ST            /* a2 = mask of CPs saved    */
1150    bnone   a2,  a0, .L_xt_coproc_done      /* if no match then done     */
1151    and     a2,  a2, a0                     /* a2 = which CPs to restore */
1152    extui   a2,  a2, 0, 8                   /* extract low 8 bits        */
1153    s32i    a6,  sp, XT_STK_A6              /* save extra needed regs    */
1154    s32i    a7,  sp, XT_STK_A7
1155    s32i    a13, sp, XT_STK_A13
1156    s32i    a14, sp, XT_STK_A14
1157    call0   _xt_coproc_restorecs            /* restore CP registers      */
1158    l32i    a6,  sp, XT_STK_A6              /* restore saved registers   */
1159    l32i    a7,  sp, XT_STK_A7
1160    l32i    a13, sp, XT_STK_A13
1161    l32i    a14, sp, XT_STK_A14
1162    j       .L_xt_coproc_done
1163
1164    /* Co-processor exception occurred outside a thread (not supported). */
1165.L_xt_coproc_invalid:
1166    movi    a0,PANIC_RSN_COPROCEXCEPTION
1167    wsr     a0,EXCCAUSE
1168    call0   _xt_panic                       /* not in a thread (invalid) */
1169    /* never returns */
1170
1171
1172#endif /* XCHAL_CP_NUM */
1173
1174
1175/*
1176-------------------------------------------------------------------------------
1177  Level 1 interrupt dispatch. Assumes stack frame has not been allocated yet.
1178-------------------------------------------------------------------------------
1179*/
1180
1181    .section .iram1,"ax"
1182    .type       _xt_lowint1,@function
1183    .align      4
1184
1185_xt_lowint1:
1186    mov     a0, sp                          /* sp == a1 */
1187    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
1188    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
1189    rsr     a0, PS                          /* save interruptee's PS */
1190    s32i    a0, sp, XT_STK_PS
1191    rsr     a0, EPC_1                       /* save interruptee's PC */
1192    s32i    a0, sp, XT_STK_PC
1193    rsr     a0, EXCSAVE_1                   /* save interruptee's a0 */
1194    s32i    a0, sp, XT_STK_A0
1195    movi    a0, _xt_user_exit               /* save exit point for dispatch */
1196    s32i    a0, sp, XT_STK_EXIT
1197
1198    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1199    /* EXCSAVE_1 should now be free to use. Use it to keep a copy of the
1200    current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1201    #ifdef XT_DEBUG_BACKTRACE
1202    #ifndef __XTENSA_CALL0_ABI__
1203    mov     a0, sp
1204    wsr     a0, EXCSAVE_1
1205    #endif
1206    #endif
1207    #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1208
1209
1210    /* Save rest of interrupt context and enter RTOS. */
1211    call0   XT_RTOS_INT_ENTER               /* common RTOS interrupt entry */
1212
1213    /* !! We are now on the RTOS system stack !! */
1214
1215    /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1216    #ifdef __XTENSA_CALL0_ABI__
1217    movi    a0, PS_INTLEVEL(1) | PS_UM
1218    #else
1219    movi    a0, PS_INTLEVEL(1) | PS_UM | PS_WOE
1220    #endif
1221    wsr     a0, PS
1222    rsync
1223
1224    /* OK to call C code at this point, dispatch user ISRs */
1225
1226    dispatch_c_isr 1 XCHAL_INTLEVEL1_MASK
1227
1228    /* Done handling interrupts, transfer control to OS */
1229    call0   XT_RTOS_INT_EXIT                /* does not return directly here */
1230
1231
1232/*
1233-------------------------------------------------------------------------------
1234  MEDIUM PRIORITY (LEVEL 2+) INTERRUPT VECTORS AND LOW LEVEL HANDLERS.
1235
1236  Medium priority interrupts are by definition those with priority greater
1237  than 1 and not greater than XCHAL_EXCM_LEVEL. These are disabled by
1238  setting PS.EXCM and therefore can easily support a C environment for
1239  handlers in C, and interact safely with an RTOS.
1240
1241  Each vector goes at a predetermined location according to the Xtensa
1242  hardware configuration, which is ensured by its placement in a special
1243  section known to the Xtensa linker support package (LSP). It performs
1244  the minimum necessary before jumping to the handler in the .text section.
1245
1246  The corresponding handler goes in the normal .text section. It sets up
1247  the appropriate stack frame, saves a few vector-specific registers and
1248  calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
1249  and enter the RTOS, then sets up a C environment. It then calls the
1250  user's interrupt handler code (which may be coded in C) and finally
1251  calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
1252
1253  While XT_RTOS_INT_EXIT does not return directly to the interruptee,
1254  eventually the RTOS scheduler will want to dispatch the interrupted
1255  task or handler. The scheduler will return to the exit point that was
1256  saved in the interrupt stack frame at XT_STK_EXIT.
1257-------------------------------------------------------------------------------
1258*/
1259
1260#if XCHAL_EXCM_LEVEL >= 2
1261
1262    .begin      literal_prefix .Level2InterruptVector
1263    .section    .Level2InterruptVector.text, "ax"
1264    .global     _Level2Vector
1265    .type       _Level2Vector,@function
1266    .align      4
1267_Level2Vector:
1268    wsr     a0, EXCSAVE_2                   /* preserve a0 */
1269    call0   _xt_medint2                     /* load interrupt handler */
1270    /* never returns here - call0 is used as a jump (see note at top) */
1271
1272    .end        literal_prefix
1273
1274    .section .iram1,"ax"
1275    .type       _xt_medint2,@function
1276    .align      4
1277_xt_medint2:
1278    mov     a0, sp                          /* sp == a1 */
1279    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
1280    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
1281    rsr     a0, EPS_2                       /* save interruptee's PS */
1282    s32i    a0, sp, XT_STK_PS
1283    rsr     a0, EPC_2                       /* save interruptee's PC */
1284    s32i    a0, sp, XT_STK_PC
1285    rsr     a0, EXCSAVE_2                   /* save interruptee's a0 */
1286    s32i    a0, sp, XT_STK_A0
1287    movi    a0, _xt_medint2_exit            /* save exit point for dispatch */
1288    s32i    a0, sp, XT_STK_EXIT
1289
1290    /* EXCSAVE_2 should now be free to use. Use it to keep a copy of the
1291    current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1292    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1293    #ifdef XT_DEBUG_BACKTRACE
1294    #ifndef __XTENSA_CALL0_ABI__
1295    mov     a0, sp
1296    wsr     a0, EXCSAVE_2
1297    #endif
1298    #endif
1299    #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1300
1301
1302    /* Save rest of interrupt context and enter RTOS. */
1303    call0   XT_RTOS_INT_ENTER               /* common RTOS interrupt entry */
1304
1305    /* !! We are now on the RTOS system stack !! */
1306
1307    /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1308    #ifdef __XTENSA_CALL0_ABI__
1309    movi    a0, PS_INTLEVEL(2) | PS_UM
1310    #else
1311    movi    a0, PS_INTLEVEL(2) | PS_UM | PS_WOE
1312    #endif
1313    wsr     a0, PS
1314    rsync
1315
1316    /* OK to call C code at this point, dispatch user ISRs */
1317
1318    dispatch_c_isr 2 XCHAL_INTLEVEL2_MASK
1319
1320    /* Done handling interrupts, transfer control to OS */
1321    call0   XT_RTOS_INT_EXIT                /* does not return directly here */
1322
1323    /*
1324    Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1325    on entry and used to return to a thread or interrupted interrupt handler.
1326    */
1327    .global     _xt_medint2_exit
1328    .type       _xt_medint2_exit,@function
1329    .align      4
1330_xt_medint2_exit:
1331    /* Restore only level-specific regs (the rest were already restored) */
1332    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
1333    wsr     a0, EPS_2
1334    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
1335    wsr     a0, EPC_2
1336    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
1337    l32i    sp, sp, XT_STK_A1               /* remove interrupt stack frame */
1338    rsync                                   /* ensure EPS and EPC written */
1339    rfi     2
1340
1341#endif  /* Level 2 */
1342
1343#if XCHAL_EXCM_LEVEL >= 3
1344
1345    .begin      literal_prefix .Level3InterruptVector
1346    .section    .Level3InterruptVector.text, "ax"
1347    .global     _Level3Vector
1348    .type       _Level3Vector,@function
1349    .align      4
1350_Level3Vector:
1351    wsr     a0, EXCSAVE_3                   /* preserve a0 */
1352    call0   _xt_medint3                     /* load interrupt handler */
1353    /* never returns here - call0 is used as a jump (see note at top) */
1354
1355    .end        literal_prefix
1356
1357    .section .iram1,"ax"
1358    .type       _xt_medint3,@function
1359    .align      4
1360_xt_medint3:
1361    mov     a0, sp                          /* sp == a1 */
1362    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
1363    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
1364    rsr     a0, EPS_3                       /* save interruptee's PS */
1365    s32i    a0, sp, XT_STK_PS
1366    rsr     a0, EPC_3                       /* save interruptee's PC */
1367    s32i    a0, sp, XT_STK_PC
1368    rsr     a0, EXCSAVE_3                   /* save interruptee's a0 */
1369    s32i    a0, sp, XT_STK_A0
1370    movi    a0, _xt_medint3_exit            /* save exit point for dispatch */
1371    s32i    a0, sp, XT_STK_EXIT
1372
1373    /* EXCSAVE_3 should now be free to use. Use it to keep a copy of the
1374    current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1375    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1376    #ifdef XT_DEBUG_BACKTRACE
1377    #ifndef __XTENSA_CALL0_ABI__
1378    mov     a0, sp
1379    wsr     a0, EXCSAVE_3
1380    #endif
1381    #endif
1382    #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1383
1384
1385    /* Save rest of interrupt context and enter RTOS. */
1386    call0   XT_RTOS_INT_ENTER               /* common RTOS interrupt entry */
1387
1388    /* !! We are now on the RTOS system stack !! */
1389
1390    /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1391    #ifdef __XTENSA_CALL0_ABI__
1392    movi    a0, PS_INTLEVEL(3) | PS_UM
1393    #else
1394    movi    a0, PS_INTLEVEL(3) | PS_UM | PS_WOE
1395    #endif
1396    wsr     a0, PS
1397    rsync
1398
1399    /* OK to call C code at this point, dispatch user ISRs */
1400
1401    dispatch_c_isr 3 XCHAL_INTLEVEL3_MASK
1402
1403    /* Done handling interrupts, transfer control to OS */
1404    call0   XT_RTOS_INT_EXIT                /* does not return directly here */
1405
1406    /*
1407    Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1408    on entry and used to return to a thread or interrupted interrupt handler.
1409    */
1410    .global     _xt_medint3_exit
1411    .type       _xt_medint3_exit,@function
1412    .align      4
1413_xt_medint3_exit:
1414    /* Restore only level-specific regs (the rest were already restored) */
1415    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
1416    wsr     a0, EPS_3
1417    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
1418    wsr     a0, EPC_3
1419    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
1420    l32i    sp, sp, XT_STK_A1               /* remove interrupt stack frame */
1421    rsync                                   /* ensure EPS and EPC written */
1422    rfi     3
1423
1424#endif  /* Level 3 */
1425
1426#if XCHAL_EXCM_LEVEL >= 4
1427
1428    .begin      literal_prefix .Level4InterruptVector
1429    .section    .Level4InterruptVector.text, "ax"
1430    .global     _Level4Vector
1431    .type       _Level4Vector,@function
1432    .align      4
1433_Level4Vector:
1434    wsr     a0, EXCSAVE_4                   /* preserve a0 */
1435    call0   _xt_medint4                     /* load interrupt handler */
1436
1437    .end        literal_prefix
1438
1439    .section .iram1,"ax"
1440    .type       _xt_medint4,@function
1441    .align      4
1442_xt_medint4:
1443    mov     a0, sp                          /* sp == a1 */
1444    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
1445    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
1446    rsr     a0, EPS_4                       /* save interruptee's PS */
1447    s32i    a0, sp, XT_STK_PS
1448    rsr     a0, EPC_4                       /* save interruptee's PC */
1449    s32i    a0, sp, XT_STK_PC
1450    rsr     a0, EXCSAVE_4                   /* save interruptee's a0 */
1451    s32i    a0, sp, XT_STK_A0
1452    movi    a0, _xt_medint4_exit            /* save exit point for dispatch */
1453    s32i    a0, sp, XT_STK_EXIT
1454
1455    /* EXCSAVE_4 should now be free to use. Use it to keep a copy of the
1456    current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1457    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1458    #ifdef XT_DEBUG_BACKTRACE
1459    #ifndef __XTENSA_CALL0_ABI__
1460    mov     a0, sp
1461    wsr     a0, EXCSAVE_4
1462    #endif
1463    #endif
1464    #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1465
1466
1467    /* Save rest of interrupt context and enter RTOS. */
1468    call0   XT_RTOS_INT_ENTER               /* common RTOS interrupt entry */
1469
1470    /* !! We are now on the RTOS system stack !! */
1471
1472    /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1473    #ifdef __XTENSA_CALL0_ABI__
1474    movi    a0, PS_INTLEVEL(4) | PS_UM
1475    #else
1476    movi    a0, PS_INTLEVEL(4) | PS_UM | PS_WOE
1477    #endif
1478    wsr     a0, PS
1479    rsync
1480
1481    /* OK to call C code at this point, dispatch user ISRs */
1482
1483    dispatch_c_isr 4 XCHAL_INTLEVEL4_MASK
1484
1485    /* Done handling interrupts, transfer control to OS */
1486    call0   XT_RTOS_INT_EXIT                /* does not return directly here */
1487
1488    /*
1489    Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1490    on entry and used to return to a thread or interrupted interrupt handler.
1491    */
1492    .global     _xt_medint4_exit
1493    .type       _xt_medint4_exit,@function
1494    .align      4
1495_xt_medint4_exit:
1496    /* Restore only level-specific regs (the rest were already restored) */
1497    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
1498    wsr     a0, EPS_4
1499    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
1500    wsr     a0, EPC_4
1501    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
1502    l32i    sp, sp, XT_STK_A1               /* remove interrupt stack frame */
1503    rsync                                   /* ensure EPS and EPC written */
1504    rfi     4
1505
1506#endif  /* Level 4 */
1507
1508#if XCHAL_EXCM_LEVEL >= 5
1509
1510    .begin      literal_prefix .Level5InterruptVector
1511    .section    .Level5InterruptVector.text, "ax"
1512    .global     _Level5Vector
1513    .type       _Level5Vector,@function
1514    .align      4
1515_Level5Vector:
1516    wsr     a0, EXCSAVE_5                   /* preserve a0 */
1517    call0   _xt_medint5                     /* load interrupt handler */
1518
1519    .end        literal_prefix
1520
1521    .section .iram1,"ax"
1522    .type       _xt_medint5,@function
1523    .align      4
1524_xt_medint5:
1525    mov     a0, sp                          /* sp == a1 */
1526    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
1527    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
1528    rsr     a0, EPS_5                       /* save interruptee's PS */
1529    s32i    a0, sp, XT_STK_PS
1530    rsr     a0, EPC_5                       /* save interruptee's PC */
1531    s32i    a0, sp, XT_STK_PC
1532    rsr     a0, EXCSAVE_5                   /* save interruptee's a0 */
1533    s32i    a0, sp, XT_STK_A0
1534    movi    a0, _xt_medint5_exit            /* save exit point for dispatch */
1535    s32i    a0, sp, XT_STK_EXIT
1536
1537    /* EXCSAVE_5 should now be free to use. Use it to keep a copy of the
1538    current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1539    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1540    #ifdef XT_DEBUG_BACKTRACE
1541    #ifndef __XTENSA_CALL0_ABI__
1542    mov     a0, sp
1543    wsr     a0, EXCSAVE_5
1544    #endif
1545    #endif
1546    #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1547
1548    /* Save rest of interrupt context and enter RTOS. */
1549    call0   XT_RTOS_INT_ENTER               /* common RTOS interrupt entry */
1550
1551    /* !! We are now on the RTOS system stack !! */
1552
1553    /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1554    #ifdef __XTENSA_CALL0_ABI__
1555    movi    a0, PS_INTLEVEL(5) | PS_UM
1556    #else
1557    movi    a0, PS_INTLEVEL(5) | PS_UM | PS_WOE
1558    #endif
1559    wsr     a0, PS
1560    rsync
1561
1562    /* OK to call C code at this point, dispatch user ISRs */
1563
1564    dispatch_c_isr 5 XCHAL_INTLEVEL5_MASK
1565
1566    /* Done handling interrupts, transfer control to OS */
1567    call0   XT_RTOS_INT_EXIT                /* does not return directly here */
1568
1569    /*
1570    Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1571    on entry and used to return to a thread or interrupted interrupt handler.
1572    */
1573    .global     _xt_medint5_exit
1574    .type       _xt_medint5_exit,@function
1575    .align      4
1576_xt_medint5_exit:
1577    /* Restore only level-specific regs (the rest were already restored) */
1578    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
1579    wsr     a0, EPS_5
1580    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
1581    wsr     a0, EPC_5
1582    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
1583    l32i    sp, sp, XT_STK_A1               /* remove interrupt stack frame */
1584    rsync                                   /* ensure EPS and EPC written */
1585    rfi     5
1586
1587#endif  /* Level 5 */
1588
1589#if XCHAL_EXCM_LEVEL >= 6
1590
1591    .begin      literal_prefix .Level6InterruptVector
1592    .section    .Level6InterruptVector.text, "ax"
1593    .global     _Level6Vector
1594    .type       _Level6Vector,@function
1595    .align      4
1596_Level6Vector:
1597    wsr     a0, EXCSAVE_6                   /* preserve a0 */
1598    call0   _xt_medint6                     /* load interrupt handler */
1599
1600    .end        literal_prefix
1601
1602    .section .iram1,"ax"
1603    .type       _xt_medint6,@function
1604    .align      4
1605_xt_medint6:
1606    mov     a0, sp                          /* sp == a1 */
1607    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
1608    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
1609    rsr     a0, EPS_6                       /* save interruptee's PS */
1610    s32i    a0, sp, XT_STK_PS
1611    rsr     a0, EPC_6                       /* save interruptee's PC */
1612    s32i    a0, sp, XT_STK_PC
1613    rsr     a0, EXCSAVE_6                   /* save interruptee's a0 */
1614    s32i    a0, sp, XT_STK_A0
1615    movi    a0, _xt_medint6_exit            /* save exit point for dispatch */
1616    s32i    a0, sp, XT_STK_EXIT
1617
1618    /* EXCSAVE_6 should now be free to use. Use it to keep a copy of the
1619    current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1620    #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
1621    #ifdef XT_DEBUG_BACKTRACE
1622    #ifndef __XTENSA_CALL0_ABI__
1623    mov     a0, sp
1624    wsr     a0, EXCSAVE_6
1625    #endif
1626    #endif
1627    #endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
1628
1629    /* Save rest of interrupt context and enter RTOS. */
1630    call0   XT_RTOS_INT_ENTER               /* common RTOS interrupt entry */
1631
1632    /* !! We are now on the RTOS system stack !! */
1633
1634    /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1635    #ifdef __XTENSA_CALL0_ABI__
1636    movi    a0, PS_INTLEVEL(6) | PS_UM
1637    #else
1638    movi    a0, PS_INTLEVEL(6) | PS_UM | PS_WOE
1639    #endif
1640    wsr     a0, PS
1641    rsync
1642
1643    /* OK to call C code at this point, dispatch user ISRs */
1644
1645    dispatch_c_isr 6 XCHAL_INTLEVEL6_MASK
1646
1647    /* Done handling interrupts, transfer control to OS */
1648    call0   XT_RTOS_INT_EXIT                /* does not return directly here */
1649
1650    /*
1651    Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1652    on entry and used to return to a thread or interrupted interrupt handler.
1653    */
1654    .global     _xt_medint6_exit
1655    .type       _xt_medint6_exit,@function
1656    .align      4
1657_xt_medint6_exit:
1658    /* Restore only level-specific regs (the rest were already restored) */
1659    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
1660    wsr     a0, EPS_6
1661    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
1662    wsr     a0, EPC_6
1663    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
1664    l32i    sp, sp, XT_STK_A1               /* remove interrupt stack frame */
1665    rsync                                   /* ensure EPS and EPC written */
1666    rfi     6
1667
1668#endif  /* Level 6 */
1669
1670
1671/*******************************************************************************
1672
1673HIGH PRIORITY (LEVEL > XCHAL_EXCM_LEVEL) INTERRUPT VECTORS AND HANDLERS
1674
1675High priority interrupts are by definition those with priorities greater
1676than XCHAL_EXCM_LEVEL. This includes non-maskable (NMI). High priority
1677interrupts cannot interact with the RTOS, that is they must save all regs
1678they use and not call any RTOS function.
1679
1680A further restriction imposed by the Xtensa windowed architecture is that
1681high priority interrupts must not modify the stack area even logically
1682"above" the top of the interrupted stack (they need to provide their
1683own stack or static save area).
1684
1685Cadence Design Systems recommends high priority interrupt handlers be coded in assembly
1686and used for purposes requiring very short service times.
1687
1688Here are templates for high priority (level 2+) interrupt vectors.
1689They assume only one interrupt per level to avoid the burden of identifying
1690which interrupts at this level are pending and enabled. This allows for
1691minimum latency and avoids having to save/restore a2 in addition to a0.
1692If more than one interrupt per high priority level is configured, this burden
1693is on the handler which in any case must provide a way to save and restore
1694registers it uses without touching the interrupted stack.
1695
1696Each vector goes at a predetermined location according to the Xtensa
1697hardware configuration, which is ensured by its placement in a special
1698section known to the Xtensa linker support package (LSP). It performs
1699the minimum necessary before jumping to the handler in the .text section.
1700
1701*******************************************************************************/
1702
1703/*
1704These stubs just call xt_highintX/xt_nmi to handle the real interrupt. Please define
1705these in an external assembly source file. If these symbols are not defined anywhere
1706else, the defaults in xtensa_vector_defaults.S are used.
1707*/
1708
1709#if XCHAL_NUM_INTLEVELS >=2 && XCHAL_EXCM_LEVEL <2 && XCHAL_DEBUGLEVEL !=2
1710
1711    .begin      literal_prefix .Level2InterruptVector
1712    .section    .Level2InterruptVector.text, "ax"
1713    .global     _Level2Vector
1714    .type       _Level2Vector,@function
1715    .global     xt_highint2
1716    .align      4
1717_Level2Vector:
1718    wsr     a0, EXCSAVE_2                   /* preserve a0 */
1719    call0   xt_highint2                    /* load interrupt handler */
1720
1721    .end        literal_prefix
1722
1723#endif  /* Level 2 */
1724
1725#if XCHAL_NUM_INTLEVELS >=3 && XCHAL_EXCM_LEVEL <3 && XCHAL_DEBUGLEVEL !=3
1726
1727    .begin      literal_prefix .Level3InterruptVector
1728    .section    .Level3InterruptVector.text, "ax"
1729    .global     _Level3Vector
1730    .type       _Level3Vector,@function
1731    .global     xt_highint3
1732    .align      4
1733_Level3Vector:
1734    wsr     a0, EXCSAVE_3                   /* preserve a0 */
1735    call0   xt_highint3                    /* load interrupt handler */
1736    /* never returns here - call0 is used as a jump (see note at top) */
1737
1738    .end        literal_prefix
1739
1740#endif  /* Level 3 */
1741
1742#if XCHAL_NUM_INTLEVELS >=4 && XCHAL_EXCM_LEVEL <4 && XCHAL_DEBUGLEVEL !=4
1743
1744    .begin      literal_prefix .Level4InterruptVector
1745    .section    .Level4InterruptVector.text, "ax"
1746    .global     _Level4Vector
1747    .type       _Level4Vector,@function
1748    .global     xt_highint4
1749    .align      4
1750_Level4Vector:
1751    wsr     a0, EXCSAVE_4                   /* preserve a0 */
1752    call0   xt_highint4                    /* load interrupt handler */
1753    /* never returns here - call0 is used as a jump (see note at top) */
1754
1755    .end        literal_prefix
1756
1757#endif  /* Level 4 */
1758
1759#if XCHAL_NUM_INTLEVELS >=5 && XCHAL_EXCM_LEVEL <5 && XCHAL_DEBUGLEVEL !=5
1760
1761    .begin      literal_prefix .Level5InterruptVector
1762    .section    .Level5InterruptVector.text, "ax"
1763    .global     _Level5Vector
1764    .type       _Level5Vector,@function
1765    .global     xt_highint5
1766    .align      4
1767_Level5Vector:
1768    wsr     a0, EXCSAVE_5                   /* preserve a0 */
1769    call0   xt_highint5                    /* load interrupt handler */
1770    /* never returns here - call0 is used as a jump (see note at top) */
1771
1772    .end        literal_prefix
1773
1774#endif  /* Level 5 */
1775
1776#if XCHAL_NUM_INTLEVELS >=6 && XCHAL_EXCM_LEVEL <6 && XCHAL_DEBUGLEVEL !=6
1777
1778    .begin      literal_prefix .Level6InterruptVector
1779    .section    .Level6InterruptVector.text, "ax"
1780    .global     _Level6Vector
1781    .type       _Level6Vector,@function
1782    .global     xt_highint6
1783    .align      4
1784_Level6Vector:
1785    wsr     a0, EXCSAVE_6                   /* preserve a0 */
1786    call0   xt_highint6                    /* load interrupt handler */
1787    /* never returns here - call0 is used as a jump (see note at top) */
1788
1789    .end        literal_prefix
1790
1791#endif  /* Level 6 */
1792
1793#if XCHAL_HAVE_NMI
1794
1795    .begin      literal_prefix .NMIExceptionVector
1796    .section    .NMIExceptionVector.text, "ax"
1797    .global     _NMIExceptionVector
1798    .type       _NMIExceptionVector,@function
1799    .global     xt_nmi
1800    .align      4
1801_NMIExceptionVector:
1802    wsr     a0, EXCSAVE + XCHAL_NMILEVEL  _ /* preserve a0 */
1803    call0   xt_nmi                         /* load interrupt handler */
1804    /* never returns here - call0 is used as a jump (see note at top) */
1805
1806    .end        literal_prefix
1807
1808#endif  /* NMI */
1809
1810
1811/*******************************************************************************
1812
1813WINDOW OVERFLOW AND UNDERFLOW EXCEPTION VECTORS AND ALLOCA EXCEPTION HANDLER
1814
1815Here is the code for each window overflow/underflow exception vector and
1816(interspersed) efficient code for handling the alloca exception cause.
1817Window exceptions are handled entirely in the vector area and are very
1818tight for performance. The alloca exception is also handled entirely in
1819the window vector area so comes at essentially no cost in code size.
1820Users should never need to modify them and Cadence Design Systems recommends
1821they do not.
1822
1823Window handlers go at predetermined vector locations according to the
1824Xtensa hardware configuration, which is ensured by their placement in a
1825special section known to the Xtensa linker support package (LSP). Since
1826their offsets in that section are always the same, the LSPs do not define
1827a section per vector.
1828
1829These things are coded for XEA2 only (XEA1 is not supported).
1830
1831Note on Underflow Handlers:
1832The underflow handler for returning from call[i+1] to call[i]
1833must preserve all the registers from call[i+1]'s window.
1834In particular, a0 and a1 must be preserved because the RETW instruction
1835will be reexecuted (and may even underflow if an intervening exception
1836has flushed call[i]'s registers).
1837Registers a2 and up may contain return values.
1838
1839*******************************************************************************/
1840
1841#if XCHAL_HAVE_WINDOWED
1842
1843    .section .WindowVectors.text, "ax"
1844
1845/*
1846--------------------------------------------------------------------------------
1847Window Overflow Exception for Call4.
1848
1849Invoked if a call[i] referenced a register (a4-a15)
1850that contains data from ancestor call[j];
1851call[j] had done a call4 to call[j+1].
1852On entry here:
1853    window rotated to call[j] start point;
1854        a0-a3 are registers to be saved;
1855        a4-a15 must be preserved;
1856        a5 is call[j+1]'s stack pointer.
1857--------------------------------------------------------------------------------
1858*/
1859
1860    .org    0x0
1861    .global _WindowOverflow4
1862_WindowOverflow4:
1863
1864    s32e    a0, a5, -16     /* save a0 to call[j+1]'s stack frame */
1865    s32e    a1, a5, -12     /* save a1 to call[j+1]'s stack frame */
1866    s32e    a2, a5,  -8     /* save a2 to call[j+1]'s stack frame */
1867    s32e    a3, a5,  -4     /* save a3 to call[j+1]'s stack frame */
1868    rfwo                    /* rotates back to call[i] position */
1869
1870/*
1871--------------------------------------------------------------------------------
1872Window Underflow Exception for Call4
1873
1874Invoked by RETW returning from call[i+1] to call[i]
1875where call[i]'s registers must be reloaded (not live in ARs);
1876where call[i] had done a call4 to call[i+1].
1877On entry here:
1878        window rotated to call[i] start point;
1879        a0-a3 are undefined, must be reloaded with call[i].reg[0..3];
1880        a4-a15 must be preserved (they are call[i+1].reg[0..11]);
1881        a5 is call[i+1]'s stack pointer.
1882--------------------------------------------------------------------------------
1883*/
1884
1885    .org    0x40
1886    .global _WindowUnderflow4
1887_WindowUnderflow4:
1888
1889    l32e    a0, a5, -16     /* restore a0 from call[i+1]'s stack frame */
1890    l32e    a1, a5, -12     /* restore a1 from call[i+1]'s stack frame */
1891    l32e    a2, a5,  -8     /* restore a2 from call[i+1]'s stack frame */
1892    l32e    a3, a5,  -4     /* restore a3 from call[i+1]'s stack frame */
1893    rfwu
1894
1895/*
1896--------------------------------------------------------------------------------
1897Handle alloca exception generated by interruptee executing 'movsp'.
1898This uses space between the window vectors, so is essentially "free".
1899All interruptee's regs are intact except a0 which is saved in EXCSAVE_1,
1900and PS.EXCM has been set by the exception hardware (can't be interrupted).
1901The fact the alloca exception was taken means the registers associated with
1902the base-save area have been spilled and will be restored by the underflow
1903handler, so those 4 registers are available for scratch.
1904The code is optimized to avoid unaligned branches and minimize cache misses.
1905--------------------------------------------------------------------------------
1906*/
1907
1908    .align  4
1909    .global _xt_alloca_exc
1910_xt_alloca_exc:
1911
1912    rsr     a0, WINDOWBASE  /* grab WINDOWBASE before rotw changes it */
1913    rotw    -1              /* WINDOWBASE goes to a4, new a0-a3 are scratch */
1914    rsr     a2, PS
1915    extui   a3, a2, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS
1916    xor     a3, a3, a4      /* bits changed from old to current windowbase */
1917    rsr     a4, EXCSAVE_1   /* restore original a0 (now in a4) */
1918    slli    a3, a3, XCHAL_PS_OWB_SHIFT
1919    xor     a2, a2, a3      /* flip changed bits in old window base */
1920    wsr     a2, PS          /* update PS.OWB to new window base */
1921    rsync
1922
1923    _bbci.l a4, 31, _WindowUnderflow4
1924    rotw    -1              /* original a0 goes to a8 */
1925    _bbci.l a8, 30, _WindowUnderflow8
1926    rotw    -1
1927    j               _WindowUnderflow12
1928
1929/*
1930--------------------------------------------------------------------------------
1931Window Overflow Exception for Call8
1932
1933Invoked if a call[i] referenced a register (a4-a15)
1934that contains data from ancestor call[j];
1935call[j] had done a call8 to call[j+1].
1936On entry here:
1937    window rotated to call[j] start point;
1938        a0-a7 are registers to be saved;
1939        a8-a15 must be preserved;
1940        a9 is call[j+1]'s stack pointer.
1941--------------------------------------------------------------------------------
1942*/
1943
1944    .org    0x80
1945    .global _WindowOverflow8
1946_WindowOverflow8:
1947
1948    s32e    a0, a9, -16     /* save a0 to call[j+1]'s stack frame */
1949    l32e    a0, a1, -12     /* a0 <- call[j-1]'s sp
1950                               (used to find end of call[j]'s frame) */
1951    s32e    a1, a9, -12     /* save a1 to call[j+1]'s stack frame */
1952    s32e    a2, a9,  -8     /* save a2 to call[j+1]'s stack frame */
1953    s32e    a3, a9,  -4     /* save a3 to call[j+1]'s stack frame */
1954    s32e    a4, a0, -32     /* save a4 to call[j]'s stack frame */
1955    s32e    a5, a0, -28     /* save a5 to call[j]'s stack frame */
1956    s32e    a6, a0, -24     /* save a6 to call[j]'s stack frame */
1957    s32e    a7, a0, -20     /* save a7 to call[j]'s stack frame */
1958    rfwo                    /* rotates back to call[i] position */
1959
1960/*
1961--------------------------------------------------------------------------------
1962Window Underflow Exception for Call8
1963
1964Invoked by RETW returning from call[i+1] to call[i]
1965where call[i]'s registers must be reloaded (not live in ARs);
1966where call[i] had done a call8 to call[i+1].
1967On entry here:
1968        window rotated to call[i] start point;
1969        a0-a7 are undefined, must be reloaded with call[i].reg[0..7];
1970        a8-a15 must be preserved (they are call[i+1].reg[0..7]);
1971        a9 is call[i+1]'s stack pointer.
1972--------------------------------------------------------------------------------
1973*/
1974
1975    .org    0xC0
1976    .global _WindowUnderflow8
1977_WindowUnderflow8:
1978
1979    l32e    a0, a9, -16     /* restore a0 from call[i+1]'s stack frame */
1980    l32e    a1, a9, -12     /* restore a1 from call[i+1]'s stack frame */
1981    l32e    a2, a9,  -8     /* restore a2 from call[i+1]'s stack frame */
1982    l32e    a7, a1, -12     /* a7 <- call[i-1]'s sp
1983                               (used to find end of call[i]'s frame) */
1984    l32e    a3, a9,  -4     /* restore a3 from call[i+1]'s stack frame */
1985    l32e    a4, a7, -32     /* restore a4 from call[i]'s stack frame */
1986    l32e    a5, a7, -28     /* restore a5 from call[i]'s stack frame */
1987    l32e    a6, a7, -24     /* restore a6 from call[i]'s stack frame */
1988    l32e    a7, a7, -20     /* restore a7 from call[i]'s stack frame */
1989    rfwu
1990
1991/*
1992--------------------------------------------------------------------------------
1993Window Overflow Exception for Call12
1994
1995Invoked if a call[i] referenced a register (a4-a15)
1996that contains data from ancestor call[j];
1997call[j] had done a call12 to call[j+1].
1998On entry here:
1999    window rotated to call[j] start point;
2000        a0-a11 are registers to be saved;
2001        a12-a15 must be preserved;
2002        a13 is call[j+1]'s stack pointer.
2003--------------------------------------------------------------------------------
2004*/
2005
2006    .org    0x100
2007    .global _WindowOverflow12
2008_WindowOverflow12:
2009
2010    s32e    a0,  a13, -16   /* save a0 to call[j+1]'s stack frame */
2011    l32e    a0,  a1,  -12   /* a0 <- call[j-1]'s sp
2012                               (used to find end of call[j]'s frame) */
2013    s32e    a1,  a13, -12   /* save a1 to call[j+1]'s stack frame */
2014    s32e    a2,  a13,  -8   /* save a2 to call[j+1]'s stack frame */
2015    s32e    a3,  a13,  -4   /* save a3 to call[j+1]'s stack frame */
2016    s32e    a4,  a0,  -48   /* save a4 to end of call[j]'s stack frame */
2017    s32e    a5,  a0,  -44   /* save a5 to end of call[j]'s stack frame */
2018    s32e    a6,  a0,  -40   /* save a6 to end of call[j]'s stack frame */
2019    s32e    a7,  a0,  -36   /* save a7 to end of call[j]'s stack frame */
2020    s32e    a8,  a0,  -32   /* save a8 to end of call[j]'s stack frame */
2021    s32e    a9,  a0,  -28   /* save a9 to end of call[j]'s stack frame */
2022    s32e    a10, a0,  -24   /* save a10 to end of call[j]'s stack frame */
2023    s32e    a11, a0,  -20   /* save a11 to end of call[j]'s stack frame */
2024    rfwo                    /* rotates back to call[i] position */
2025
2026/*
2027--------------------------------------------------------------------------------
2028Window Underflow Exception for Call12
2029
2030Invoked by RETW returning from call[i+1] to call[i]
2031where call[i]'s registers must be reloaded (not live in ARs);
2032where call[i] had done a call12 to call[i+1].
2033On entry here:
2034        window rotated to call[i] start point;
2035        a0-a11 are undefined, must be reloaded with call[i].reg[0..11];
2036        a12-a15 must be preserved (they are call[i+1].reg[0..3]);
2037        a13 is call[i+1]'s stack pointer.
2038--------------------------------------------------------------------------------
2039*/
2040
2041    .org 0x140
2042    .global _WindowUnderflow12
2043_WindowUnderflow12:
2044
2045    l32e    a0,  a13, -16   /* restore a0 from call[i+1]'s stack frame */
2046    l32e    a1,  a13, -12   /* restore a1 from call[i+1]'s stack frame */
2047    l32e    a2,  a13,  -8   /* restore a2 from call[i+1]'s stack frame */
2048    l32e    a11, a1,  -12   /* a11 <- call[i-1]'s sp
2049                               (used to find end of call[i]'s frame) */
2050    l32e    a3,  a13,  -4   /* restore a3 from call[i+1]'s stack frame */
2051    l32e    a4,  a11, -48   /* restore a4 from end of call[i]'s stack frame */
2052    l32e    a5,  a11, -44   /* restore a5 from end of call[i]'s stack frame */
2053    l32e    a6,  a11, -40   /* restore a6 from end of call[i]'s stack frame */
2054    l32e    a7,  a11, -36   /* restore a7 from end of call[i]'s stack frame */
2055    l32e    a8,  a11, -32   /* restore a8 from end of call[i]'s stack frame */
2056    l32e    a9,  a11, -28   /* restore a9 from end of call[i]'s stack frame */
2057    l32e    a10, a11, -24   /* restore a10 from end of call[i]'s stack frame */
2058    l32e    a11, a11, -20   /* restore a11 from end of call[i]'s stack frame */
2059    rfwu
2060
2061#endif /* XCHAL_HAVE_WINDOWED */
2062
2063    .section    .UserEnter.text, "ax"
2064    .global     call_user_start
2065    .type       call_user_start,@function
2066    .align      4
2067    .literal_position
2068