1/**************************************************************************/ 2/* */ 3/* Copyright (c) Microsoft Corporation. All rights reserved. */ 4/* */ 5/* This software is licensed under the Microsoft Software License */ 6/* Terms for Microsoft Azure RTOS. Full text of the license can be */ 7/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */ 8/* and in the root directory of this software. */ 9/* */ 10/**************************************************************************/ 11 12 13/**************************************************************************/ 14/**************************************************************************/ 15/** */ 16/** ThreadX Component */ 17/** */ 18/** Thread */ 19/** */ 20/**************************************************************************/ 21/**************************************************************************/ 22 23#ifdef TX_INCLUDE_USER_DEFINE_FILE 24#include "tx_user.h" 25#endif 26 27/**************************************************************************/ 28/* */ 29/* FUNCTION RELEASE */ 30/* */ 31/* _tx_thread_schedule Cortex-Mxx/GNU */ 32/* 6.2.1 */ 33/* AUTHOR */ 34/* */ 35/* Scott Larson, Microsoft Corporation */ 36/* */ 37/* DESCRIPTION */ 38/* */ 39/* This function waits for a thread control block pointer to appear in */ 40/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */ 41/* in the variable, the corresponding thread is resumed. */ 42/* */ 43/* INPUT */ 44/* */ 45/* None */ 46/* */ 47/* OUTPUT */ 48/* */ 49/* None */ 50/* */ 51/* CALLS */ 52/* */ 53/* None */ 54/* */ 55/* CALLED BY */ 56/* */ 57/* _tx_initialize_kernel_enter ThreadX entry function */ 58/* _tx_thread_system_return Return to system from thread */ 59/* */ 60/* RELEASE HISTORY */ 61/* */ 62/* DATE NAME DESCRIPTION */ 63/* */ 64/* 09-30-2020 Scott Larson Initial Version 6.1 */ 65/* 04-02-2021 Scott Larson Modified comment(s), added */ 66/* low power code, */ 67/* resulting in version 6.1.6 */ 68/* 06-02-2021 Scott Larson Added secure stack initialize */ 69/* in SVC handler, */ 70/* resulting in version 6.1.7 */ 71/* 01-31-2022 Scott Larson Fixed predefined macro name, */ 72/* resulting in version 6.1.10 */ 73/* 04-25-2022 Scott Larson Added BASEPRI support, */ 74/* resulting in version 6.1.11 */ 75/* 03-08-2023 Scott Larson Include tx_user.h, */ 76/* resulting in version 6.2.1 */ 77/* */ 78/**************************************************************************/ 79// VOID _tx_thread_schedule(VOID) 80// { 81 .section .text 82 .balign 4 83 .syntax unified 84 .eabi_attribute Tag_ABI_align_preserved, 1 85 .global _tx_thread_schedule 86 .thumb_func 87.type _tx_thread_schedule, function 88_tx_thread_schedule: 89 /* This function should only ever be called on Cortex-M 90 from the first schedule request. Subsequent scheduling occurs 91 from the PendSV handling routine below. */ 92 93 /* Clear the preempt-disable flag to enable rescheduling after initialization on Cortex-M targets. */ 94 MOV r0, #0 // Build value for TX_FALSE 95 LDR r2, =_tx_thread_preempt_disable // Build address of preempt disable flag 96 STR r0, [r2, #0] // Clear preempt disable flag 97 98#ifdef __ARM_FP 99 /* Clear CONTROL.FPCA bit so VFP registers aren't unnecessarily stacked. */ 100 MRS r0, CONTROL // Pickup current CONTROL register 101 BIC r0, r0, #4 // Clear the FPCA bit 102 MSR CONTROL, r0 // Setup new CONTROL register 103#endif 104 105 /* Enable interrupts */ 106 CPSIE i 107 108 /* Enter the scheduler for the first time. */ 109 MOV r0, #0x10000000 // Load PENDSVSET bit 110 MOV r1, #0xE000E000 // Load NVIC base 111 STR r0, [r1, #0xD04] // Set PENDSVBIT in ICSR 112 DSB // Complete all memory accesses 113 ISB // Flush pipeline 114 115 /* Wait here for the PendSV to take place. */ 116 117__tx_wait_here: 118 B __tx_wait_here // Wait for the PendSV to happen 119// } 120 121 /* Generic context switching PendSV handler. */ 122 123 .section .text 124 .balign 4 125 .syntax unified 126 .eabi_attribute Tag_ABI_align_preserved, 1 127 .global PendSV_Handler 128 .thumb_func 129.type PendSV_Handler, function 130 /* Get current thread value and new thread pointer. */ 131PendSV_Handler: 132__tx_ts_handler: 133 134#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE)) 135 /* Call the thread exit function to indicate the thread is no longer executing. */ 136#ifdef TX_PORT_USE_BASEPRI 137 LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI 138 MSR BASEPRI, r1 139#else 140 CPSID i // Disable interrupts 141#endif /* TX_PORT_USE_BASEPRI */ 142 PUSH {r0, lr} // Save LR (and r0 just for alignment) 143 BL _tx_execution_thread_exit // Call the thread exit function 144 POP {r0, lr} // Recover LR 145#ifdef TX_PORT_USE_BASEPRI 146 MOV r0, 0 // Disable BASEPRI masking (enable interrupts) 147 MSR BASEPRI, r0 148#else 149 CPSIE i // Enable interrupts 150#endif /* TX_PORT_USE_BASEPRI */ 151#endif /* EXECUTION PROFILE */ 152 153 LDR r0, =_tx_thread_current_ptr // Build current thread pointer address 154 LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address 155 MOV r3, #0 // Build NULL value 156 LDR r1, [r0] // Pickup current thread pointer 157 158 /* Determine if there is a current thread to finish preserving. */ 159 160 CBZ r1, __tx_ts_new // If NULL, skip preservation 161 162 /* Recover PSP and preserve current thread context. */ 163 164 STR r3, [r0] // Set _tx_thread_current_ptr to NULL 165 MRS r12, PSP // Pickup PSP pointer (thread's stack pointer) 166 STMDB r12!, {r4-r11} // Save its remaining registers 167#ifdef __ARM_FP 168 TST LR, #0x10 // Determine if the VFP extended frame is present 169 BNE _skip_vfp_save 170 VSTMDB r12!,{s16-s31} // Yes, save additional VFP registers 171_skip_vfp_save: 172#endif 173 LDR r4, =_tx_timer_time_slice // Build address of time-slice variable 174 STMDB r12!, {LR} // Save LR on the stack 175 STR r12, [r1, #8] // Save the thread stack pointer 176 177#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)) 178 // Save secure context 179 LDR r5, [r1,#0x90] // Load secure stack index 180 CBZ r5, _skip_secure_save // Skip save if there is no secure context 181 PUSH {r0,r1,r2,r3} // Save scratch registers 182 MOV r0, r1 // Move thread ptr to r0 183 BL _tx_thread_secure_stack_context_save // Save secure stack 184 POP {r0,r1,r2,r3} // Restore secure registers 185_skip_secure_save: 186#endif 187 188 /* Determine if time-slice is active. If it isn't, skip time handling processing. */ 189 190 LDR r5, [r4] // Pickup current time-slice 191 CBZ r5, __tx_ts_new // If not active, skip processing 192 193 /* Time-slice is active, save the current thread's time-slice and clear the global time-slice variable. */ 194 195 STR r5, [r1, #24] // Save current time-slice 196 197 /* Clear the global time-slice. */ 198 199 STR r3, [r4] // Clear time-slice 200 201 /* Executing thread is now completely preserved!!! */ 202 203__tx_ts_new: 204 205 /* Now we are looking for a new thread to execute! */ 206 207#ifdef TX_PORT_USE_BASEPRI 208 LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI 209 MSR BASEPRI, r1 210#else 211 CPSID i // Disable interrupts 212#endif 213 LDR r1, [r2] // Is there another thread ready to execute? 214 CBZ r1, __tx_ts_wait // No, skip to the wait processing 215 216 /* Yes, another thread is ready for else, make the current thread the new thread. */ 217 218 STR r1, [r0] // Setup the current thread pointer to the new thread 219#ifdef TX_PORT_USE_BASEPRI 220 MOV r4, #0 // Disable BASEPRI masking (enable interrupts) 221 MSR BASEPRI, r4 222#else 223 CPSIE i // Enable interrupts 224#endif 225 226 /* Increment the thread run count. */ 227 228__tx_ts_restore: 229 LDR r7, [r1, #4] // Pickup the current thread run count 230 LDR r4, =_tx_timer_time_slice // Build address of time-slice variable 231 LDR r5, [r1, #24] // Pickup thread's current time-slice 232 ADD r7, r7, #1 // Increment the thread run count 233 STR r7, [r1, #4] // Store the new run count 234 235 /* Setup global time-slice with thread's current time-slice. */ 236 237 STR r5, [r4] // Setup global time-slice 238 239#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE)) 240 /* Call the thread entry function to indicate the thread is executing. */ 241 PUSH {r0, r1} // Save r0 and r1 242 BL _tx_execution_thread_enter // Call the thread execution enter function 243 POP {r0, r1} // Recover r0 and r1 244#endif 245 246#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)) 247 // Restore secure context 248 LDR r0, [r1,#0x90] // Load secure stack index 249 CBZ r0, _skip_secure_restore // Skip restore if there is no secure context 250 PUSH {r0,r1} // Save r1 (and dummy r0) 251 MOV r0, r1 // Move thread ptr to r0 252 BL _tx_thread_secure_stack_context_restore // Restore secure stack 253 POP {r0,r1} // Restore r1 (and dummy r0) 254_skip_secure_restore: 255#endif 256 257 /* Restore the thread context and PSP. */ 258 LDR r12, [r1, #12] // Get stack start 259 MSR PSPLIM, r12 // Set stack limit 260 LDR r12, [r1, #8] // Pickup thread's stack pointer 261 LDMIA r12!, {LR} // Pickup LR 262#ifdef __ARM_FP 263 TST LR, #0x10 // Determine if the VFP extended frame is present 264 BNE _skip_vfp_restore // If not, skip VFP restore 265 VLDMIA r12!, {s16-s31} // Yes, restore additional VFP registers 266_skip_vfp_restore: 267#endif 268 LDMIA r12!, {r4-r11} // Recover thread's registers 269 MSR PSP, r12 // Setup the thread's stack pointer 270 271 BX lr // Return to thread! 272 273 /* The following is the idle wait processing... in this case, no threads are ready for execution and the 274 system will simply be idle until an interrupt occurs that makes a thread ready. Note that interrupts 275 are disabled to allow use of WFI for waiting for a thread to arrive. */ 276 277__tx_ts_wait: 278#ifdef TX_PORT_USE_BASEPRI 279 LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI 280 MSR BASEPRI, r1 281#else 282 CPSID i // Disable interrupts 283#endif 284 LDR r1, [r2] // Pickup the next thread to execute pointer 285 STR r1, [r0] // Store it in the current pointer 286 CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready! 287 288#ifdef TX_LOW_POWER 289 PUSH {r0-r3} 290 BL tx_low_power_enter // Possibly enter low power mode 291 POP {r0-r3} 292#endif 293 294#ifdef TX_ENABLE_WFI 295 DSB // Ensure no outstanding memory transactions 296 WFI // Wait for interrupt 297 ISB // Ensure pipeline is flushed 298#endif 299 300#ifdef TX_LOW_POWER 301 PUSH {r0-r3} 302 BL tx_low_power_exit // Exit low power mode 303 POP {r0-r3} 304#endif 305 306#ifdef TX_PORT_USE_BASEPRI 307 MOV r4, #0 // Disable BASEPRI masking (enable interrupts) 308 MSR BASEPRI, r4 309#else 310 CPSIE i // Enable interrupts 311#endif 312 B __tx_ts_wait // Loop to continue waiting 313 314 /* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are 315 already in the handler! */ 316__tx_ts_ready: 317 MOV r7, #0x08000000 // Build clear PendSV value 318 MOV r8, #0xE000E000 // Build base NVIC address 319 STR r7, [r8, #0xD04] // Clear any PendSV 320 321 /* Re-enable interrupts and restore new thread. */ 322#ifdef TX_PORT_USE_BASEPRI 323 MOV r4, #0 // Disable BASEPRI masking (enable interrupts) 324 MSR BASEPRI, r4 325#else 326 CPSIE i // Enable interrupts 327#endif 328 B __tx_ts_restore // Restore the thread 329// } 330 331 332#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)) 333 // SVC_Handler is not needed when ThreadX is running in single mode. 334 .section .text 335 .balign 4 336 .syntax unified 337 .eabi_attribute Tag_ABI_align_preserved, 1 338 .global SVC_Handler 339 .thumb_func 340.type SVC_Handler, function 341SVC_Handler: 342 TST lr, #0x04 // Determine return stack from EXC_RETURN bit 2 343 ITE EQ 344 MRSEQ r0, MSP // Get MSP if return stack is MSP 345 MRSNE r0, PSP // Get PSP if return stack is PSP 346 347 LDR r1, [r0,#24] // Load saved PC from stack 348 LDRB r1, [r1,#-2] // Load SVC number 349 350 CMP r1, #1 // Is it a secure stack allocate request? 351 BEQ _tx_svc_secure_alloc // Yes, go there 352 353 CMP r1, #2 // Is it a secure stack free request? 354 BEQ _tx_svc_secure_free // Yes, go there 355 356 CMP r1, #3 // Is it a secure stack init request? 357 BEQ _tx_svc_secure_init // Yes, go there 358 359 // Unknown SVC argument - just return 360 BX lr 361 362_tx_svc_secure_alloc: 363 PUSH {r0,lr} // Save SP and EXC_RETURN 364 LDM r0, {r0-r3} // Load function parameters from stack 365 BL _tx_thread_secure_mode_stack_allocate 366 POP {r12,lr} // Restore SP and EXC_RETURN 367 STR r0,[r12] // Store function return value 368 BX lr 369_tx_svc_secure_free: 370 PUSH {r0,lr} // Save SP and EXC_RETURN 371 LDM r0, {r0-r3} // Load function parameters from stack 372 BL _tx_thread_secure_mode_stack_free 373 POP {r12,lr} // Restore SP and EXC_RETURN 374 STR r0,[r12] // Store function return value 375 BX lr 376_tx_svc_secure_init: 377 PUSH {r0,lr} // Save SP and EXC_RETURN 378 BL _tx_thread_secure_mode_stack_initialize 379 POP {r12,lr} // Restore SP and EXC_RETURN 380 BX lr 381#endif // End of ifndef TX_SINGLE_MODE_SECURE, TX_SINGLE_MODE_NON_SECURE 382 383 384 .section .text 385 .balign 4 386 .syntax unified 387 .eabi_attribute Tag_ABI_align_preserved, 1 388 .global _tx_vfp_access 389 .thumb_func 390.type _tx_vfp_access, function 391_tx_vfp_access: 392#if TX_ENABLE_FPU_SUPPORT 393 VMOV.F32 s0, s0 // Simply access the VFP 394#endif 395 BX lr // Return to caller 396.end 397