xref: /Kernel-v10.6.2/portable/ThirdParty/XCC/Xtensa/xtensa_context.S (revision ef7b253b56c9788077f5ecd6c9deb4021923d646)
1 /*
2 * FreeRTOS Kernel V10.6.2
3 * Copyright (C) 2015-2019 Cadence Design Systems, Inc.
4 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
5 *
6 * SPDX-License-Identifier: MIT
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy of
9 * this software and associated documentation files (the "Software"), to deal in
10 * the Software without restriction, including without limitation the rights to
11 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
12 * the Software, and to permit persons to whom the Software is furnished to do so,
13 * subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in all
16 * copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
20 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
21 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
22 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * https://www.FreeRTOS.org
26 * https://github.com/FreeRTOS
27 *
28 */
29
30/*
31 * XTENSA CONTEXT SAVE AND RESTORE ROUTINES
32 *
33 * Low-level Call0 functions for handling generic context save and restore of
34 * registers not specifically addressed by the interrupt vectors and handlers.
35 * Those registers (not handled by these functions) are PC, PS, A0, A1 (SP).
36 * Except for the calls to RTOS functions, this code is generic to Xtensa.
37 *
38 * Note that in Call0 ABI, interrupt handlers are expected to preserve the callee-
39 * save regs (A12-A15), which is always the case if the handlers are coded in C.
40 * However A12, A13 are made available as scratch registers for interrupt dispatch
41 * code, so are presumed saved anyway, and are always restored even in Call0 ABI.
42 * Only A14, A15 are truly handled as callee-save regs.
43 *
44 * Because Xtensa is a configurable architecture, this port supports all user
45 * generated configurations (except restrictions stated in the release notes).
46 * This is accomplished by conditional compilation using macros and functions
47 * defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
48 * Only the processor state included in your configuration is saved and restored,
49 * including any processor state added by user configuration options or TIE.
50 */
51
52/*  Warn nicely if this file gets named with a lowercase .s instead of .S:  */
53#define NOERROR #
54NOERROR: .error "C preprocessor needed for this file: make sure its filename\
55 ends in uppercase .S, or use xt-xcc's -x assembler-with-cpp option."
56
57
58#include "xtensa_rtos.h"
59
60#ifdef XT_USE_OVLY
61#include <xtensa/overlay_os_asm.h>
62#endif
63
64    .text
65    .literal_position
66
67/*******************************************************************************
68
69_xt_context_save
70
71    !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
72
73Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the
74interrupt stack frame defined in xtensa_rtos.h.
75Its counterpart is _xt_context_restore (which also restores A12, A13).
76
77Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame.
78This function preserves A12 & A13 in order to provide the caller with 2 scratch
79regs that need not be saved over the call to this function. The choice of which
802 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw,
81to avoid moving data more than necessary. Caller can assign regs accordingly.
82
83Entry Conditions:
84    A0  = Return address in caller.
85    A1  = Stack pointer of interrupted thread or handler ("interruptee").
86    Original A12, A13 have already been saved in the interrupt stack frame.
87    Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the
88    point of interruption.
89    If windowed ABI, PS.EXCM = 1 (exceptions disabled).
90
91Exit conditions:
92    A0  = Return address in caller.
93    A1  = Stack pointer of interrupted thread or handler ("interruptee").
94    A12, A13 as at entry (preserved).
95    If windowed ABI, PS.EXCM = 1 (exceptions disabled).
96
97*******************************************************************************/
98
99    .global _xt_context_save
100    .type   _xt_context_save,@function
101    .align  4
102_xt_context_save:
103
104    s32i    a2,  sp, XT_STK_A2
105    s32i    a3,  sp, XT_STK_A3
106    s32i    a4,  sp, XT_STK_A4
107    s32i    a5,  sp, XT_STK_A5
108    s32i    a6,  sp, XT_STK_A6
109    s32i    a7,  sp, XT_STK_A7
110    s32i    a8,  sp, XT_STK_A8
111    s32i    a9,  sp, XT_STK_A9
112    s32i    a10, sp, XT_STK_A10
113    s32i    a11, sp, XT_STK_A11
114
115    /*
116    Call0 ABI callee-saved regs a12-15 do not need to be saved here.
117    a12-13 are the caller's responsibility so it can use them as scratch.
118    So only need to save a14-a15 here for Windowed ABI (not Call0).
119    */
120    #ifndef __XTENSA_CALL0_ABI__
121    s32i    a14, sp, XT_STK_A14
122    s32i    a15, sp, XT_STK_A15
123    #endif
124
125    rsr     a3,  SAR
126    s32i    a3,  sp, XT_STK_SAR
127
128    #if XCHAL_HAVE_LOOPS
129    rsr     a3,  LBEG
130    s32i    a3,  sp, XT_STK_LBEG
131    rsr     a3,  LEND
132    s32i    a3,  sp, XT_STK_LEND
133    rsr     a3,  LCOUNT
134    s32i    a3,  sp, XT_STK_LCOUNT
135    #endif
136
137    #if XT_USE_SWPRI
138    /* Save virtual priority mask */
139    movi    a3,  _xt_vpri_mask
140    l32i    a3,  a3, 0
141    s32i    a3,  sp, XT_STK_VPRI
142    #endif
143
144    #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
145    mov     a9,  a0                     /* preserve ret addr */
146    #endif
147
148    #ifndef __XTENSA_CALL0_ABI__
149    /*
150    To spill the reg windows, temp. need pre-interrupt stack ptr and a4-15.
151    Need to save a9,12,13 temporarily (in frame temps) and recover originals.
152    Interrupts need to be disabled below XCHAL_EXCM_LEVEL and window overflow
153    and underflow exceptions disabled (assured by PS.EXCM == 1).
154    */
155    s32i    a12, sp, XT_STK_TMP0        /* temp. save stuff in stack frame */
156    s32i    a13, sp, XT_STK_TMP1
157    s32i    a9,  sp, XT_STK_TMP2
158
159    /*
160    Save the overlay state if we are supporting overlays. Since we just saved
161    three registers, we can conveniently use them here. Note that as of now,
162    overlays only work for windowed calling ABI.
163    */
164    #ifdef XT_USE_OVLY
165    l32i    a9,  sp, XT_STK_PC          /* recover saved PC */
166    _xt_overlay_get_state    a9, a12, a13
167    s32i    a9,  sp, XT_STK_OVLY        /* save overlay state */
168    #endif
169
170    l32i    a12, sp, XT_STK_A12         /* recover original a9,12,13 */
171    l32i    a13, sp, XT_STK_A13
172    l32i    a9,  sp, XT_STK_A9
173    addi    sp,  sp, XT_STK_FRMSZ       /* restore the interruptee's SP */
174    call0   xthal_window_spill_nw       /* preserves only a4,5,8,9,12,13 */
175    addi    sp,  sp, -XT_STK_FRMSZ
176    l32i    a12, sp, XT_STK_TMP0        /* recover stuff from stack frame */
177    l32i    a13, sp, XT_STK_TMP1
178    l32i    a9,  sp, XT_STK_TMP2
179    #endif
180
181    #if XCHAL_EXTRA_SA_SIZE > 0
182    /*
183    NOTE: Normally the xthal_save_extra_nw macro only affects address
184    registers a2-a5. It is theoretically possible for Xtensa processor
185    designers to write TIE that causes more address registers to be
186    affected, but it is generally unlikely. If that ever happens,
187    more registers need to be saved/restored around this macro invocation.
188    Here we assume a9,12,13 are preserved.
189    Future Xtensa tools releases might limit the regs that can be affected.
190    */
191    addi    a2,  sp, XT_STK_EXTRA       /* where to save it */
192    # if XCHAL_EXTRA_SA_ALIGN > 16
193    movi    a3, -XCHAL_EXTRA_SA_ALIGN
194    and     a2, a2, a3                  /* align dynamically >16 bytes */
195    # endif
196    call0   xthal_save_extra_nw         /* destroys a0,2,3,4,5 */
197    #endif
198
199    #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
200    mov     a0, a9                      /* retrieve ret addr */
201    #endif
202
203    ret
204
205/*******************************************************************************
206
207_xt_context_restore
208
209    !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
210
211Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0
212ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt
213stack frame defined in xtensa_rtos.h .
214Its counterpart is _xt_context_save (whose caller saved A12, A13).
215
216Caller is responsible to restore PC, PS, A0, A1 (SP).
217
218Entry Conditions:
219    A0  = Return address in caller.
220    A1  = Stack pointer of interrupted thread or handler ("interruptee").
221
222Exit conditions:
223    A0  = Return address in caller.
224    A1  = Stack pointer of interrupted thread or handler ("interruptee").
225    Other processor state except PC, PS, A0, A1 (SP), is as at the point
226    of interruption.
227
228*******************************************************************************/
229
230    .global _xt_context_restore
231    .type   _xt_context_restore,@function
232    .align  4
233_xt_context_restore:
234
235    #if XCHAL_EXTRA_SA_SIZE > 0
236    /*
237    NOTE: Normally the xthal_restore_extra_nw macro only affects address
238    registers a2-a5. It is theoretically possible for Xtensa processor
239    designers to write TIE that causes more address registers to be
240    affected, but it is generally unlikely. If that ever happens,
241    more registers need to be saved/restored around this macro invocation.
242    Here we only assume a13 is preserved.
243    Future Xtensa tools releases might limit the regs that can be affected.
244    */
245    mov     a13, a0                     /* preserve ret addr */
246    addi    a2,  sp, XT_STK_EXTRA       /* where to find it */
247    # if XCHAL_EXTRA_SA_ALIGN > 16
248    movi    a3, -XCHAL_EXTRA_SA_ALIGN
249    and     a2, a2, a3                  /* align dynamically >16 bytes */
250    # endif
251    call0   xthal_restore_extra_nw      /* destroys a0,2,3,4,5 */
252    mov     a0,  a13                    /* retrieve ret addr */
253    #endif
254
255    #if XCHAL_HAVE_LOOPS
256    l32i    a2,  sp, XT_STK_LBEG
257    l32i    a3,  sp, XT_STK_LEND
258    wsr     a2,  LBEG
259    l32i    a2,  sp, XT_STK_LCOUNT
260    wsr     a3,  LEND
261    wsr     a2,  LCOUNT
262    #endif
263
264    #ifdef XT_USE_OVLY
265    /*
266    If we are using overlays, this is a good spot to check if we need
267    to restore an overlay for the incoming task. Here we have a bunch
268    of registers to spare. Note that this step is going to use a few
269    bytes of storage below SP (SP-20 to SP-32) if an overlay is going
270    to be restored.
271    */
272    l32i    a2,  sp, XT_STK_PC          /* retrieve PC */
273    l32i    a3,  sp, XT_STK_PS          /* retrieve PS */
274    l32i    a4,  sp, XT_STK_OVLY        /* retrieve overlay state */
275    l32i    a5,  sp, XT_STK_A1          /* retrieve stack ptr */
276    _xt_overlay_check_map    a2, a3, a4, a5, a6
277    s32i    a2,  sp, XT_STK_PC          /* save updated PC */
278    s32i    a3,  sp, XT_STK_PS          /* save updated PS */
279    #endif
280
281    #ifdef XT_USE_SWPRI
282    /* Restore virtual interrupt priority and interrupt enable */
283    movi    a3,  _xt_intdata
284    l32i    a4,  a3, 0                  /* a4 = _xt_intenable */
285    l32i    a5,  sp, XT_STK_VPRI        /* a5 = saved _xt_vpri_mask */
286    and     a4,  a4, a5
287    wsr     a4,  INTENABLE              /* update INTENABLE */
288    s32i    a5,  a3, 4                  /* restore _xt_vpri_mask */
289    #endif
290
291    l32i    a3,  sp, XT_STK_SAR
292    l32i    a2,  sp, XT_STK_A2
293    wsr     a3,  SAR
294    l32i    a3,  sp, XT_STK_A3
295    l32i    a4,  sp, XT_STK_A4
296    l32i    a5,  sp, XT_STK_A5
297    l32i    a6,  sp, XT_STK_A6
298    l32i    a7,  sp, XT_STK_A7
299    l32i    a8,  sp, XT_STK_A8
300    l32i    a9,  sp, XT_STK_A9
301    l32i    a10, sp, XT_STK_A10
302    l32i    a11, sp, XT_STK_A11
303
304    /*
305    Call0 ABI callee-saved regs a12-15 do not need to be restored here.
306    However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(),
307    so need to be restored anyway, despite being callee-saved in Call0.
308    */
309    l32i    a12, sp, XT_STK_A12
310    l32i    a13, sp, XT_STK_A13
311    #ifndef __XTENSA_CALL0_ABI__
312    l32i    a14, sp, XT_STK_A14
313    l32i    a15, sp, XT_STK_A15
314    #endif
315
316    ret
317
318
319/*******************************************************************************
320
321_xt_coproc_init
322
323Initializes global co-processor management data, setting all co-processors
324to "unowned". Leaves CPENABLE as it found it (does NOT clear it).
325
326Called during initialization of the RTOS, before any threads run.
327
328This may be called from normal Xtensa single-threaded application code which
329might use co-processors. The Xtensa run-time initialization enables all
330co-processors. They must remain enabled here, else a co-processor exception
331might occur outside of a thread, which the exception handler doesn't expect.
332
333Entry Conditions:
334    Xtensa single-threaded run-time environment is in effect.
335    No thread is yet running.
336
337Exit conditions:
338    None.
339
340Obeys ABI conventions per prototype:
341    void _xt_coproc_init(void)
342
343*******************************************************************************/
344
345#if XCHAL_CP_NUM > 0
346
347    .global _xt_coproc_init
348    .type   _xt_coproc_init,@function
349    .align  4
350_xt_coproc_init:
351    ENTRY0
352
353    /* Initialize thread co-processor ownerships to 0 (unowned). */
354    movi    a2, _xt_coproc_owner_sa         /* a2 = base of owner array */
355    addi    a3, a2, XCHAL_CP_MAX << 2       /* a3 = top+1 of owner array */
356    movi    a4, 0                           /* a4 = 0 (unowned) */
3571:  s32i    a4, a2, 0
358    addi    a2, a2, 4
359    bltu    a2, a3, 1b
360
361    RET0
362
363#endif
364
365
366/*******************************************************************************
367
368_xt_coproc_release
369
370Releases any and all co-processors owned by a given thread. The thread is
371identified by it's co-processor state save area defined in xtensa_context.h .
372
373Must be called before a thread's co-proc save area is deleted to avoid
374memory corruption when the exception handler tries to save the state.
375May be called when a thread terminates or completes but does not delete
376the co-proc save area, to avoid the exception handler having to save the
377thread's co-proc state before another thread can use it (optimization).
378
379Entry Conditions:
380    A2  = Pointer to base of co-processor state save area.
381
382Exit conditions:
383    None.
384
385Obeys ABI conventions per prototype:
386    void _xt_coproc_release(void * coproc_sa_base)
387
388*******************************************************************************/
389
390#if XCHAL_CP_NUM > 0
391
392    .global _xt_coproc_release
393    .type   _xt_coproc_release,@function
394    .align  4
395_xt_coproc_release:
396    ENTRY0                                  /* a2 = base of save area */
397
398    movi    a3, _xt_coproc_owner_sa         /* a3 = base of owner array */
399    addi    a4, a3, XCHAL_CP_MAX << 2       /* a4 = top+1 of owner array */
400    movi    a5, 0                           /* a5 = 0 (unowned) */
401
402    rsil    a6, XCHAL_EXCM_LEVEL            /* lock interrupts */
403
4041:  l32i    a7, a3, 0                       /* a7 = owner at a3 */
405    bne     a2, a7, 2f                      /* if (coproc_sa_base == owner) */
406    s32i    a5, a3, 0                       /*   owner = unowned */
4072:  addi    a3, a3, 1<<2                    /* a3 = next entry in owner array */
408    bltu    a3, a4, 1b                      /* repeat until end of array */
409
4103:  wsr     a6, PS                          /* restore interrupts */
411
412    RET0
413
414#endif
415
416
417/*******************************************************************************
418_xt_coproc_savecs
419
420If there is a current thread and it has a coprocessor state save area, then
421save all callee-saved state into this area. This function is called from the
422solicited context switch handler. It calls a system-specific function to get
423the coprocessor save area base address.
424
425Entry conditions:
426    - The thread being switched out is still the current thread.
427    - CPENABLE state reflects which coprocessors are active.
428    - Registers have been saved/spilled already.
429
430Exit conditions:
431    - All necessary CP callee-saved state has been saved.
432    - Registers a2-a7, a13-a15 have been trashed.
433
434Must be called from assembly code only, using CALL0.
435*******************************************************************************/
436#if XCHAL_CP_NUM > 0
437
438    .extern     _xt_coproc_sa_offset   /* external reference */
439
440    .global     _xt_coproc_savecs
441    .type       _xt_coproc_savecs,@function
442    .align      4
443_xt_coproc_savecs:
444
445    /* At entry, CPENABLE should be showing which CPs are enabled. */
446
447    rsr     a2, CPENABLE                /* a2 = which CPs are enabled      */
448    beqz    a2, .Ldone                  /* quick exit if none              */
449    mov     a14, a0                     /* save return address             */
450    call0   XT_RTOS_CP_STATE            /* get address of CP save area     */
451    mov     a0, a14                     /* restore return address          */
452    beqz    a15, .Ldone                 /* if none then nothing to do      */
453    s16i    a2, a15, XT_CP_CS_ST        /* save mask of CPs being stored   */
454    movi    a13, _xt_coproc_sa_offset   /* array of CP save offsets        */
455    l32i    a15, a15, XT_CP_ASA         /* a15 = base of aligned save area */
456
457#if XCHAL_CP0_SA_SIZE
458    bbci.l  a2, 0, 2f                   /* CP 0 not enabled                */
459    l32i    a14, a13, 0                 /* a14 = _xt_coproc_sa_offset[0]   */
460    add     a3, a14, a15                /* a3 = save area for CP 0         */
461    xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
4622:
463#endif
464
465#if XCHAL_CP1_SA_SIZE
466    bbci.l  a2, 1, 2f                   /* CP 1 not enabled                */
467    l32i    a14, a13, 4                 /* a14 = _xt_coproc_sa_offset[1]   */
468    add     a3, a14, a15                /* a3 = save area for CP 1         */
469    xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
4702:
471#endif
472
473#if XCHAL_CP2_SA_SIZE
474    bbci.l  a2, 2, 2f
475    l32i    a14, a13, 8
476    add     a3, a14, a15
477    xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
4782:
479#endif
480
481#if XCHAL_CP3_SA_SIZE
482    bbci.l  a2, 3, 2f
483    l32i    a14, a13, 12
484    add     a3, a14, a15
485    xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
4862:
487#endif
488
489#if XCHAL_CP4_SA_SIZE
490    bbci.l  a2, 4, 2f
491    l32i    a14, a13, 16
492    add     a3, a14, a15
493    xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
4942:
495#endif
496
497#if XCHAL_CP5_SA_SIZE
498    bbci.l  a2, 5, 2f
499    l32i    a14, a13, 20
500    add     a3, a14, a15
501    xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
5022:
503#endif
504
505#if XCHAL_CP6_SA_SIZE
506    bbci.l  a2, 6, 2f
507    l32i    a14, a13, 24
508    add     a3, a14, a15
509    xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
5102:
511#endif
512
513#if XCHAL_CP7_SA_SIZE
514    bbci.l  a2, 7, 2f
515    l32i    a14, a13, 28
516    add     a3, a14, a15
517    xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
5182:
519#endif
520
521.Ldone:
522    ret
523#endif
524
525
526/*******************************************************************************
527_xt_coproc_restorecs
528
529Restore any callee-saved coprocessor state for the incoming thread.
530This function is called from coprocessor exception handling, when giving
531ownership to a thread that solicited a context switch earlier. It calls a
532system-specific function to get the coprocessor save area base address.
533
534Entry conditions:
535    - The incoming thread is set as the current thread.
536    - CPENABLE is set up correctly for all required coprocessors.
537    - a2 = mask of coprocessors to be restored.
538
539Exit conditions:
540    - All necessary CP callee-saved state has been restored.
541    - CPENABLE - unchanged.
542    - Registers a2-a7, a13-a15 have been trashed.
543
544Must be called from assembly code only, using CALL0.
545*******************************************************************************/
546#if XCHAL_CP_NUM > 0
547
548    .global     _xt_coproc_restorecs
549    .type       _xt_coproc_restorecs,@function
550    .align      4
551_xt_coproc_restorecs:
552
553    mov     a14, a0                     /* save return address             */
554    call0   XT_RTOS_CP_STATE            /* get address of CP save area     */
555    mov     a0, a14                     /* restore return address          */
556    beqz    a15, .Ldone2                /* if none then nothing to do      */
557    l16ui   a3, a15, XT_CP_CS_ST        /* a3 = which CPs have been saved  */
558    xor     a3, a3, a2                  /* clear the ones being restored   */
559    s32i    a3, a15, XT_CP_CS_ST        /* update saved CP mask            */
560    movi    a13, _xt_coproc_sa_offset   /* array of CP save offsets        */
561    l32i    a15, a15, XT_CP_ASA         /* a15 = base of aligned save area */
562
563#if XCHAL_CP0_SA_SIZE
564    bbci.l  a2, 0, 2f                   /* CP 0 not enabled                */
565    l32i    a14, a13, 0                 /* a14 = _xt_coproc_sa_offset[0]   */
566    add     a3, a14, a15                /* a3 = save area for CP 0         */
567    xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
5682:
569#endif
570
571#if XCHAL_CP1_SA_SIZE
572    bbci.l  a2, 1, 2f                   /* CP 1 not enabled                */
573    l32i    a14, a13, 4                 /* a14 = _xt_coproc_sa_offset[1]   */
574    add     a3, a14, a15                /* a3 = save area for CP 1         */
575    xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
5762:
577#endif
578
579#if XCHAL_CP2_SA_SIZE
580    bbci.l  a2, 2, 2f
581    l32i    a14, a13, 8
582    add     a3, a14, a15
583    xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
5842:
585#endif
586
587#if XCHAL_CP3_SA_SIZE
588    bbci.l  a2, 3, 2f
589    l32i    a14, a13, 12
590    add     a3, a14, a15
591    xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
5922:
593#endif
594
595#if XCHAL_CP4_SA_SIZE
596    bbci.l  a2, 4, 2f
597    l32i    a14, a13, 16
598    add     a3, a14, a15
599    xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
6002:
601#endif
602
603#if XCHAL_CP5_SA_SIZE
604    bbci.l  a2, 5, 2f
605    l32i    a14, a13, 20
606    add     a3, a14, a15
607    xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
6082:
609#endif
610
611#if XCHAL_CP6_SA_SIZE
612    bbci.l  a2, 6, 2f
613    l32i    a14, a13, 24
614    add     a3, a14, a15
615    xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
6162:
617#endif
618
619#if XCHAL_CP7_SA_SIZE
620    bbci.l  a2, 7, 2f
621    l32i    a14, a13, 28
622    add     a3, a14, a15
623    xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
6242:
625#endif
626
627.Ldone2:
628    ret
629
630#endif
631