1/**************************************************************************/
2/*                                                                        */
3/*       Copyright (c) Microsoft Corporation. All rights reserved.        */
4/*                                                                        */
5/*       This software is licensed under the Microsoft Software License   */
6/*       Terms for Microsoft Azure RTOS. Full text of the license can be  */
7/*       found in the LICENSE file at https://aka.ms/AzureRTOS_EULA       */
8/*       and in the root directory of this software.                      */
9/*                                                                        */
10/**************************************************************************/
11
12
13/**************************************************************************/
14/**************************************************************************/
15/**                                                                       */
16/** ThreadX Component                                                     */
17/**                                                                       */
18/**   Thread                                                              */
19/**                                                                       */
20/**************************************************************************/
21/**************************************************************************/
22
23#ifdef TX_INCLUDE_USER_DEFINE_FILE
24#include "tx_user.h"
25#endif
26
27    EXTERN  _tx_thread_current_ptr
28    EXTERN  _tx_thread_execute_ptr
29    EXTERN  _tx_timer_time_slice
30    EXTERN  _tx_thread_system_stack_ptr
31    EXTERN  _tx_thread_preempt_disable
32    EXTERN  _tx_execution_thread_enter
33    EXTERN  _tx_execution_thread_exit
34    EXTERN  _tx_thread_secure_stack_context_restore
35    EXTERN  _tx_thread_secure_stack_context_save
36    EXTERN  _tx_thread_secure_mode_stack_allocate
37    EXTERN  _tx_thread_secure_mode_stack_free
38    EXTERN  _tx_thread_secure_mode_stack_initialize
39#ifdef TX_LOW_POWER
40    EXTERN  tx_low_power_enter
41    EXTERN  tx_low_power_exit
42#endif
43    SECTION `.text`:CODE:NOROOT(2)
44    THUMB
45/**************************************************************************/
46/*                                                                        */
47/*  FUNCTION                                               RELEASE        */
48/*                                                                        */
49/*    _tx_thread_schedule                               Cortex-Mxx/IAR    */
50/*                                                           6.2.1        */
51/*  AUTHOR                                                                */
52/*                                                                        */
53/*    Scott Larson, Microsoft Corporation                                 */
54/*                                                                        */
55/*  DESCRIPTION                                                           */
56/*                                                                        */
57/*    This function waits for a thread control block pointer to appear in */
58/*    the _tx_thread_execute_ptr variable.  Once a thread pointer appears */
59/*    in the variable, the corresponding thread is resumed.               */
60/*                                                                        */
61/*  INPUT                                                                 */
62/*                                                                        */
63/*    None                                                                */
64/*                                                                        */
65/*  OUTPUT                                                                */
66/*                                                                        */
67/*    None                                                                */
68/*                                                                        */
69/*  CALLS                                                                 */
70/*                                                                        */
71/*    None                                                                */
72/*                                                                        */
73/*  CALLED BY                                                             */
74/*                                                                        */
75/*    _tx_initialize_kernel_enter          ThreadX entry function         */
76/*    _tx_thread_system_return             Return to system from thread   */
77/*                                                                        */
78/*  RELEASE HISTORY                                                       */
79/*                                                                        */
80/*    DATE              NAME                      DESCRIPTION             */
81/*                                                                        */
82/*  09-30-2020      Scott Larson            Initial Version 6.1           */
83/*  04-02-2021      Scott Larson            Modified comment(s), added    */
84/*                                            low power code,             */
85/*                                            resulting in version 6.1.6  */
86/*  06-02-2021      Scott Larson            Added secure stack initialize */
87/*                                            in SVC handler,             */
88/*                                            resulting in version 6.1.7  */
89/*  04-25-2022      Scott Larson            Added BASEPRI support,        */
90/*                                            resulting in version 6.1.11 */
91/*  03-08-2023      Scott Larson            Added preproc FPU option,     */
92/*                                            resulting in version 6.2.1  */
93/*                                                                        */
94/**************************************************************************/
95// VOID   _tx_thread_schedule(VOID)
96// {
97    PUBLIC  _tx_thread_schedule
98_tx_thread_schedule:
99    /* This function should only ever be called on Cortex-M
100       from the first schedule request. Subsequent scheduling occurs
101       from the PendSV handling routine below. */
102
103    /* Clear the preempt-disable flag to enable rescheduling after initialization on Cortex-M targets.  */
104    MOV     r0, #0                                  // Build value for TX_FALSE
105    LDR     r2, =_tx_thread_preempt_disable         // Build address of preempt disable flag
106    STR     r0, [r2, #0]                            // Clear preempt disable flag
107
108#ifdef __ARMVFP__
109    /* Clear CONTROL.FPCA bit so VFP registers aren't unnecessarily stacked.  */
110    MRS     r0, CONTROL                             // Pickup current CONTROL register
111    BIC     r0, r0, #4                              // Clear the FPCA bit
112    MSR     CONTROL, r0                             // Setup new CONTROL register
113#endif
114
115    /* Enable interrupts */
116    CPSIE   i
117
118    /* Enter the scheduler for the first time.  */
119    MOV     r0, #0x10000000                         // Load PENDSVSET bit
120    MOV     r1, #0xE000E000                         // Load NVIC base
121    STR     r0, [r1, #0xD04]                        // Set PENDSVBIT in ICSR
122    DSB                                             // Complete all memory accesses
123    ISB                                             // Flush pipeline
124
125    /* Wait here for the PendSV to take place.  */
126
127__tx_wait_here:
128    B       __tx_wait_here                          // Wait for the PendSV to happen
129// }
130
131    /* Generic context switching PendSV handler.  */
132
133    PUBLIC  PendSV_Handler
134PendSV_Handler:
135__tx_ts_handler:
136
137#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
138    /* Call the thread exit function to indicate the thread is no longer executing.  */
139#ifdef TX_PORT_USE_BASEPRI
140    LDR     r1, =TX_PORT_BASEPRI                    // Mask interrupt priorities =< TX_PORT_BASEPRI
141    MSR     BASEPRI, r1
142#else
143    CPSID   i                                       // Disable interrupts
144#endif  /* TX_PORT_USE_BASEPRI */
145    PUSH    {r0, lr}                                // Save LR (and r0 just for alignment)
146    BL      _tx_execution_thread_exit               // Call the thread exit function
147    POP     {r0, lr}                                // Recover LR
148#ifdef TX_PORT_USE_BASEPRI
149    MOV     r0, 0                                   // Disable BASEPRI masking (enable interrupts)
150    MSR     BASEPRI, r0
151#else
152    CPSIE   i                                       // Enable interrupts
153#endif  /* TX_PORT_USE_BASEPRI */
154#endif  /* EXECUTION PROFILE */
155
156    LDR     r0, =_tx_thread_current_ptr             // Build current thread pointer address
157    LDR     r2, =_tx_thread_execute_ptr             // Build execute thread pointer address
158    MOV     r3, #0                                  // Build NULL value
159    LDR     r1, [r0]                                // Pickup current thread pointer
160
161    /* Determine if there is a current thread to finish preserving.  */
162
163    CBZ     r1, __tx_ts_new                         // If NULL, skip preservation
164
165    /* Recover PSP and preserve current thread context.  */
166
167    STR     r3, [r0]                                // Set _tx_thread_current_ptr to NULL
168    MRS     r12, PSP                                // Pickup PSP pointer (thread's stack pointer)
169    STMDB   r12!, {r4-r11}                          // Save its remaining registers
170#ifdef __ARMVFP__
171    TST     LR, #0x10                               // Determine if the VFP extended frame is present
172    BNE     _skip_vfp_save
173    VSTMDB  r12!,{s16-s31}                          // Yes, save additional VFP registers
174_skip_vfp_save:
175#endif
176    LDR     r4, =_tx_timer_time_slice               // Build address of time-slice variable
177    STMDB   r12!, {LR}                              // Save LR on the stack
178    STR     r12, [r1, #8]                           // Save the thread stack pointer
179
180#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
181    // Save secure context
182    LDR     r5, [r1,#0x90]                          // Load secure stack index
183    CBZ     r5, _skip_secure_save                   // Skip save if there is no secure context
184    PUSH    {r0,r1,r2,r3}                           // Save scratch registers
185    MOV     r0, r1                                  // Move thread ptr to r0
186    BL      _tx_thread_secure_stack_context_save    // Save secure stack
187    POP     {r0,r1,r2,r3}                           // Restore secure registers
188_skip_secure_save:
189#endif
190
191    /* Determine if time-slice is active. If it isn't, skip time handling processing.  */
192
193    LDR     r5, [r4]                                // Pickup current time-slice
194    CBZ     r5, __tx_ts_new                         // If not active, skip processing
195
196    /* Time-slice is active, save the current thread's time-slice and clear the global time-slice variable.  */
197
198    STR     r5, [r1, #24]                           // Save current time-slice
199
200    /* Clear the global time-slice.  */
201
202    STR     r3, [r4]                                // Clear time-slice
203
204    /* Executing thread is now completely preserved!!!  */
205
206__tx_ts_new:
207
208    /* Now we are looking for a new thread to execute!  */
209
210#ifdef TX_PORT_USE_BASEPRI
211    LDR     r1, =TX_PORT_BASEPRI                    // Mask interrupt priorities =< TX_PORT_BASEPRI
212    MSR     BASEPRI, r1
213#else
214    CPSID   i                                       // Disable interrupts
215#endif
216    LDR     r1, [r2]                                // Is there another thread ready to execute?
217    CBZ     r1, __tx_ts_wait                        // No, skip to the wait processing
218
219    /* Yes, another thread is ready for else, make the current thread the new thread.  */
220
221    STR     r1, [r0]                                // Setup the current thread pointer to the new thread
222#ifdef TX_PORT_USE_BASEPRI
223    MOV     r4, #0                                  // Disable BASEPRI masking (enable interrupts)
224    MSR     BASEPRI, r4
225#else
226    CPSIE   i                                       // Enable interrupts
227#endif
228
229    /* Increment the thread run count.  */
230
231__tx_ts_restore:
232    LDR     r7, [r1, #4]                            // Pickup the current thread run count
233    LDR     r4, =_tx_timer_time_slice               // Build address of time-slice variable
234    LDR     r5, [r1, #24]                           // Pickup thread's current time-slice
235    ADD     r7, r7, #1                              // Increment the thread run count
236    STR     r7, [r1, #4]                            // Store the new run count
237
238    /* Setup global time-slice with thread's current time-slice.  */
239
240    STR     r5, [r4]                                // Setup global time-slice
241
242#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
243    /* Call the thread entry function to indicate the thread is executing.  */
244    PUSH    {r0, r1}                                // Save r0 and r1
245    BL      _tx_execution_thread_enter              // Call the thread execution enter function
246    POP     {r0, r1}                                // Recover r0 and r1
247#endif
248
249#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
250    // Restore secure context
251    LDR     r0, [r1,#0x90]                          // Load secure stack index
252    CBZ     r0, _skip_secure_restore                // Skip restore if there is no secure context
253    PUSH    {r0,r1}                                 // Save r1 (and dummy r0)
254    MOV     r0, r1                                  // Move thread ptr to r0
255    BL      _tx_thread_secure_stack_context_restore // Restore secure stack
256    POP     {r0,r1}                                 // Restore r1 (and dummy r0)
257_skip_secure_restore:
258#endif
259
260    /* Restore the thread context and PSP.  */
261    LDR     r12, [r1, #12]                          // Get stack start
262    MSR     PSPLIM, r12                             // Set stack limit
263    LDR     r12, [r1, #8]                           // Pickup thread's stack pointer
264    LDMIA   r12!, {LR}                              // Pickup LR
265#ifdef __ARMVFP__
266    TST     LR, #0x10                               // Determine if the VFP extended frame is present
267    BNE     _skip_vfp_restore                       // If not, skip VFP restore
268    VLDMIA  r12!, {s16-s31}                         // Yes, restore additional VFP registers
269_skip_vfp_restore:
270#endif
271    LDMIA   r12!, {r4-r11}                          // Recover thread's registers
272    MSR     PSP, r12                                // Setup the thread's stack pointer
273
274    BX      lr                                      // Return to thread!
275
276    /* The following is the idle wait processing... in this case, no threads are ready for execution and the
277       system will simply be idle until an interrupt occurs that makes a thread ready. Note that interrupts
278       are disabled to allow use of WFI for waiting for a thread to arrive.  */
279
280__tx_ts_wait:
281#ifdef TX_PORT_USE_BASEPRI
282    LDR     r1, =TX_PORT_BASEPRI                    // Mask interrupt priorities =< TX_PORT_BASEPRI
283    MSR     BASEPRI, r1
284#else
285    CPSID   i                                       // Disable interrupts
286#endif
287    LDR     r1, [r2]                                // Pickup the next thread to execute pointer
288    STR     r1, [r0]                                // Store it in the current pointer
289    CBNZ    r1, __tx_ts_ready                       // If non-NULL, a new thread is ready!
290
291#ifdef TX_LOW_POWER
292    PUSH    {r0-r3}
293    BL      tx_low_power_enter                      // Possibly enter low power mode
294    POP     {r0-r3}
295#endif
296
297#ifdef TX_ENABLE_WFI
298    DSB                                             // Ensure no outstanding memory transactions
299    WFI                                             // Wait for interrupt
300    ISB                                             // Ensure pipeline is flushed
301#endif
302
303#ifdef TX_LOW_POWER
304    PUSH    {r0-r3}
305    BL      tx_low_power_exit                       // Exit low power mode
306    POP     {r0-r3}
307#endif
308
309#ifdef TX_PORT_USE_BASEPRI
310    MOV     r4, #0                                  // Disable BASEPRI masking (enable interrupts)
311    MSR     BASEPRI, r4
312#else
313    CPSIE   i                                       // Enable interrupts
314#endif
315    B       __tx_ts_wait                            // Loop to continue waiting
316
317    /* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
318       already in the handler!  */
319__tx_ts_ready:
320    MOV     r7, #0x08000000                         // Build clear PendSV value
321    MOV     r8, #0xE000E000                         // Build base NVIC address
322    STR     r7, [r8, #0xD04]                        // Clear any PendSV
323
324    /* Re-enable interrupts and restore new thread.  */
325#ifdef TX_PORT_USE_BASEPRI
326    MOV     r4, #0                                  // Disable BASEPRI masking (enable interrupts)
327    MSR     BASEPRI, r4
328#else
329    CPSIE   i                                       // Enable interrupts
330#endif
331    B       __tx_ts_restore                         // Restore the thread
332// }
333
334
335#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
336    // SVC_Handler is not needed when ThreadX is running in single mode.
337    PUBLIC  SVC_Handler
338SVC_Handler:
339    TST     lr, #0x04                               // Determine return stack from EXC_RETURN bit 2
340    ITE     EQ
341    MRSEQ   r0, MSP                                 // Get MSP if return stack is MSP
342    MRSNE   r0, PSP                                 // Get PSP if return stack is PSP
343
344    LDR     r1, [r0,#24]                            // Load saved PC from stack
345    LDRB    r1, [r1,#-2]                            // Load SVC number
346
347    CMP     r1, #1                                  // Is it a secure stack allocate request?
348    BEQ     _tx_svc_secure_alloc                    // Yes, go there
349
350    CMP     r1, #2                                  // Is it a secure stack free request?
351    BEQ     _tx_svc_secure_free                     // Yes, go there
352
353    CMP     r1, #3                                  // Is it a secure stack init request?
354    BEQ     _tx_svc_secure_init                     // Yes, go there
355
356    // Unknown SVC argument - just return
357    BX      lr
358
359_tx_svc_secure_alloc:
360    PUSH    {r0,lr}                                 // Save SP and EXC_RETURN
361    LDM     r0, {r0-r3}                             // Load function parameters from stack
362    BL      _tx_thread_secure_mode_stack_allocate
363    POP     {r12,lr}                                // Restore SP and EXC_RETURN
364    STR     r0,[r12]                                // Store function return value
365    BX      lr
366_tx_svc_secure_free:
367    PUSH    {r0,lr}                                 // Save SP and EXC_RETURN
368    LDM     r0, {r0-r3}                             // Load function parameters from stack
369    BL      _tx_thread_secure_mode_stack_free
370    POP     {r12,lr}                                // Restore SP and EXC_RETURN
371    STR     r0,[r12]                                // Store function return value
372    BX      lr
373_tx_svc_secure_init:
374    PUSH    {r0,lr}                                 // Save SP and EXC_RETURN
375    BL      _tx_thread_secure_mode_stack_initialize
376    POP     {r12,lr}                                // Restore SP and EXC_RETURN
377    BX      lr
378#endif  // End of ifndef TX_SINGLE_MODE_SECURE, TX_SINGLE_MODE_NON_SECURE
379
380
381    PUBLIC  _tx_vfp_access
382_tx_vfp_access:
383#ifdef __ARMVFP__
384    VMOV.F32 s0, s0                                 // Simply access the VFP
385#endif
386    BX       lr                                     // Return to caller
387    END
388