1/***************************************************************************
2 * Copyright (c) 2024 Microsoft Corporation
3 *
4 * This program and the accompanying materials are made available under the
5 * terms of the MIT License which is available at
6 * https://opensource.org/licenses/MIT.
7 *
8 * SPDX-License-Identifier: MIT
9 **************************************************************************/
10
11/**************************************************************************/
12/*   Copyright (c) Cadence Design Systems, Inc.                           */
13/*                                                                        */
14/* Permission is hereby granted, free of charge, to any person obtaining  */
15/* a copy of this software and associated documentation files (the        */
16/* "Software"), to deal in the Software without restriction, including    */
17/* without limitation the rights to use, copy, modify, merge, publish,    */
18/* distribute, sublicense, and/or sell copies of the Software, and to     */
19/* permit persons to whom the Software is furnished to do so, subject to  */
20/* the following conditions:                                              */
21/*                                                                        */
22/* The above copyright notice and this permission notice shall be         */
23/* included in all copies or substantial portions of the Software.        */
24/*                                                                        */
25/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,        */
26/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF     */
27/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
28/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY   */
29/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,   */
30/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE      */
31/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.                 */
32/**************************************************************************/
33
34/**************************************************************************/
35/**************************************************************************/
36/**                                                                       */
37/** ThreadX Component                                                     */
38/**                                                                       */
39/**   Thread                                                              */
40/**                                                                       */
41/**************************************************************************/
42/**************************************************************************/
43
44
45#include "tx_port.h"
46#include "xtensa_rtos.h"
47#include "tx_api_asm.h"
48
49    .text
50
51/**************************************************************************/
52/*                                                                        */
53/*  DESCRIPTION                                                           */
54/*                                                                        */
55/*    This function waits for a thread control block pointer to appear in */
56/*    the _tx_thread_execute_ptr variable.  Once a thread pointer appears */
57/*    in the variable, the corresponding thread is resumed.               */
58/*                                                                        */
59/*  RELEASE HISTORY                                                       */
60/*                                                                        */
61/*    DATE              NAME                      DESCRIPTION             */
62/*                                                                        */
63/*  12-31-2020      Cadence Design Systems  Initial Version 6.1.3         */
64/*  10-31-2022      Scott Larson            Updated EPK definitions,      */
65/*                                            resulting in version 6.2.0  */
66/*                                                                        */
67/**************************************************************************/
68
69//  VOID   _tx_thread_schedule(VOID)
70//  {
71    .globl  _tx_thread_schedule
72    .type   _tx_thread_schedule,@function
73    .align  4
74_tx_thread_schedule:
75
76#if XCHAL_HAVE_XEA3
77
78    /* Skip "entry" - nothing to save, never returns. */
79
80    movi    a2, PS_STACK_KERNEL | PS_DI     /* Set PS.STACK = Kernel and    */
81    movi    a3, PS_STACK_MASK | PS_DI_MASK  /* disable interrupts.          */
82    xps     a2, a3
83
84#ifdef __XTENSA_CALL0_ABI__
85    mov    a15,  a1                         /* Dispatch code expects a15 = old SP */
86#endif
87
88    movi    a0, _xt_dispatch + 3            /* Jump to dispatch code. It will */
89    ret                                     /* check for ready thread or idle */
90                                            /* and handle accordingly.        */
91
92    ill                                     /* Should never get back here. */
93
94#else
95
96    /*
97    Note on Windowed ABI:
98    Callers of this don't expect it to return to them. Most use 'call0'.
99    The only windowed (C) caller is _tx_initialize_kernel_enter().
100    There are no args or results to pass. So we don't really care if the
101    window gets rotated. We can omit the 'entry' altogether and avoid the
102    need for a special "no entry" entrypoint to this function.
103    */
104
105    #ifdef XT_ENABLE_TIMING_TEST_HACK
106    /* For timing_test "TS" numbers. INTERNAL USE ONLY. */
107    /* Always use CALL0. We may be here with windowing disabled. */
108    .extern scheduler_return
109    call0   scheduler_return
110    #endif
111
112    /*
113    Wait for a thread to execute (Idle Loop).
114    First ensure interrupts (except hi-pri) are disabled so result
115    of reading _tx_thread_execute_ptr can't change before testing.
116    While there's no thread ready, enable interrupts and wait in a
117    low power state, then disable interrupts and repeat the test.
118    */
119    //  do
120    //  {
121    movi    a3, _tx_thread_execute_ptr
122.L_tx_thread_schedule_loop:             /* Idle Loop. */
123    XT_INTS_DISABLE(a2)                 /* disable interrupts if not already */
124    l32i    a2, a3, 0                   /* a2 = _tx_thread_execute_ptr */
125    bnez    a2, .L_tx_thread_schedule_ready
126    waiti   0                           /* enable interrupts and wait for */
127                                        /*   interrupt in low power state */
128    j       .L_tx_thread_schedule_loop
129
130    //  }
131    //  while(_tx_thread_execute_ptr == TX_NULL);
132
133.L_tx_thread_schedule_ready:
134
135    /* Yes! We have a thread to execute.  Lockout interrupts and
136       transfer control to it. Interrupts are already disabled. */
137
138    /* Setup the current thread pointer.  */
139    //  _tx_thread_current_ptr =  _tx_thread_execute_ptr;
140    movi    a3, _tx_thread_current_ptr
141    l32i    a0, a2, tx_thread_run_count
142    s32i    a2, a3, 0                   /* a2 = _tx_thread_current_ptr (TCB) */
143
144    /* Increment the run count for this thread.  */
145    //  _tx_thread_current_ptr -> tx_thread_run_count++;
146    addi    a3, a0, 1
147    movi    a0, _tx_timer_time_slice
148    s32i    a3, a2, tx_thread_run_count
149
150    /* Setup time-slice, if present.  */
151    //  _tx_timer_time_slice =  _tx_thread_current_ptr -> tx_thread_time_slice;
152    l32i    a3, a2, tx_thread_time_slice
153    s32i    a3, a0, 0
154
155    #ifdef TX_THREAD_SAFE_CLIB
156    //  Load library-specific global context ptr address.  */
157
158    #if XSHAL_CLIB == XTHAL_CLIB_NEWLIB
159    movi    a0, _impure_ptr
160    #elif XSHAL_CLIB == XTHAL_CLIB_XCLIB
161    movi    a0, _reent_ptr
162    #else
163    #error TX_THREAD_SAFE_CLIB defined with unsupported C library.
164    #endif
165
166    l32i    a3, a2, tx_thread_clib_ptr
167    s32i    a3, a0, 0                   /* point to thread's reent struct */
168    #endif
169
170    /* Switch to the thread's stack.  */
171    //  SP =  _tx_thread_execute_ptr -> tx_thread_stack_ptr;
172    l32i    sp, a2, tx_thread_stack_ptr
173
174    #if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
175    /* Call the thread entry function to indicate the thread is executing. */
176    #ifdef __XTENSA_CALL0_ABI__
177    call0   _tx_execution_thread_enter
178    #else
179    call8   _tx_execution_thread_enter
180    #endif
181    #endif
182
183    /* Determine if an interrupt frame or a synchronous task suspension frame
184       is present.  */
185    l32i    a3, a2, tx_thread_solicited
186    bnez    a3, .L_tx_thread_synch_return
187
188.Ln_tx_thread_asynch_return:
189
190    #if XCHAL_CP_NUM > 0
191    /* Restore thread's CPENABLE (enable co-processors this thread owns). */
192    l16ui   a3, a2, tx_thread_cp_state + XT_CPENABLE
193    wsr     a3, CPENABLE
194    #endif
195
196    /* Here we return from unsolicited entry with an interrupt stack frame. */
197    call0   _xt_context_restore
198
199    /* In Call0 ABI, restore callee-saved regs (A12, A13 already restored). */
200    #ifdef __XTENSA_CALL0_ABI__
201    l32i    a14, sp, XT_STK_A14
202    l32i    a15, sp, XT_STK_A15
203    #endif
204
205    #if XCHAL_CP_NUM > 0
206    rsync                               /* ensure wsr.CPENABLE has completed */
207    #endif
208
209    /*
210    This does not return to its caller, but to the selected thread.
211    Must return via the exit dispatcher corresponding to the entrypoint
212    from which this was called. Interruptee's A0, A1, PS, PC are restored
213    and the interrupt stack frame is deallocated in the exit dispatcher.
214    */
215    l32i    a0,  sp, XT_STK_EXIT
216    ret
217
218.L_tx_thread_synch_return:
219
220    /* Here we return from a solicited entry with a solicited stack frame. */
221    movi    a0,  TX_FALSE
222    l32i    a3,  sp, XT_STK_PS
223    s32i    a0,  a2, tx_thread_solicited
224
225    #ifdef __XTENSA_CALL0_ABI__
226    l32i    a12, sp, XT_STK_A12
227    l32i    a13, sp, XT_STK_A13
228    l32i    a14, sp, XT_STK_A14
229    l32i    a15, sp, XT_STK_A15
230    #endif
231
232    l32i    a0,  sp, XT_STK_PC          /* return address */
233
234    #if XCHAL_CP_NUM > 0
235    /* CPENABLE should already be clear (it was cleared on entry to kernel). */
236    rsync                               /* ensure wsr.CPENABLE has completed */
237    #endif
238
239    wsr     a3,  PS                     /* no need to sync PS, delay is OK */
240
241    /* This does not return to its caller, but to the selected thread. */
242    #ifdef __XTENSA_CALL0_ABI__
243    /* 'addi sp, sp, imm' could turn into 'addmi, addi' sequence and make */
244    /* the sp briefly point to an illegal stack location. Avoid that.     */
245    addi    a2,  sp, XT_STK_FRMSZ
246    mov     sp,  a2
247    ret
248    #else
249    retw
250    #endif
251
252#endif /* XCHAL_HAVE_XEA3 */
253//  }
254
255