1/**************************************************************************/ 2/* */ 3/* Copyright (c) Microsoft Corporation. All rights reserved. */ 4/* */ 5/* This software is licensed under the Microsoft Software License */ 6/* Terms for Microsoft Azure RTOS. Full text of the license can be */ 7/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */ 8/* and in the root directory of this software. */ 9/* */ 10/**************************************************************************/ 11 12 13/**************************************************************************/ 14/**************************************************************************/ 15/** */ 16/** ThreadX Component */ 17/** */ 18/** Thread */ 19/** */ 20/**************************************************************************/ 21/**************************************************************************/ 22 23#ifdef TX_INCLUDE_USER_DEFINE_FILE 24#include "tx_user.h" 25#endif 26 27#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE)) 28 .global _tx_execution_thread_enter 29 .global _tx_execution_thread_exit 30#endif 31/**************************************************************************/ 32/* */ 33/* FUNCTION RELEASE */ 34/* */ 35/* _tx_thread_schedule Cortex-Mxx/AC6 */ 36/* 6.2.1 */ 37/* AUTHOR */ 38/* */ 39/* Scott Larson, Microsoft Corporation */ 40/* */ 41/* DESCRIPTION */ 42/* */ 43/* This function waits for a thread control block pointer to appear in */ 44/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */ 45/* in the variable, the corresponding thread is resumed. */ 46/* */ 47/* INPUT */ 48/* */ 49/* None */ 50/* */ 51/* OUTPUT */ 52/* */ 53/* None */ 54/* */ 55/* CALLS */ 56/* */ 57/* None */ 58/* */ 59/* CALLED BY */ 60/* */ 61/* _tx_initialize_kernel_enter ThreadX entry function */ 62/* _tx_thread_system_return Return to system from thread */ 63/* */ 64/* RELEASE HISTORY */ 65/* */ 66/* DATE NAME DESCRIPTION */ 67/* */ 68/* 09-30-2020 Scott Larson Initial Version 6.1 */ 69/* 04-02-2021 Scott Larson Modified comment(s), added */ 70/* low power code, */ 71/* resulting in version 6.1.6 */ 72/* 06-02-2021 Scott Larson Added secure stack initialize */ 73/* in SVC handler, */ 74/* resulting in version 6.1.7 */ 75/* 04-25-2022 Scott Larson Added BASEPRI support, */ 76/* resulting in version 6.1.11 */ 77/* 03-08-2023 Scott Larson Added preproc FPU option, */ 78/* included tx_user.h, */ 79/* resulting in version 6.2.1 */ 80/* */ 81/**************************************************************************/ 82// VOID _tx_thread_schedule(VOID) 83// { 84 .section .text 85 .balign 4 86 .syntax unified 87 .eabi_attribute Tag_ABI_align_preserved, 1 88 .global _tx_thread_schedule 89 .thumb_func 90.type _tx_thread_schedule, function 91_tx_thread_schedule: 92 /* This function should only ever be called on Cortex-M 93 from the first schedule request. Subsequent scheduling occurs 94 from the PendSV handling routine below. */ 95 96 /* Clear the preempt-disable flag to enable rescheduling after initialization on Cortex-M targets. */ 97 MOV r0, #0 // Build value for TX_FALSE 98 LDR r2, =_tx_thread_preempt_disable // Build address of preempt disable flag 99 STR r0, [r2, #0] // Clear preempt disable flag 100 101#ifdef __ARM_PCS_VFP 102 /* Clear CONTROL.FPCA bit so VFP registers aren't unnecessarily stacked. */ 103 MRS r0, CONTROL // Pickup current CONTROL register 104 BIC r0, r0, #4 // Clear the FPCA bit 105 MSR CONTROL, r0 // Setup new CONTROL register 106#endif 107 108 /* Enable interrupts */ 109 CPSIE i 110 111 /* Enter the scheduler for the first time. */ 112 MOV r0, #0x10000000 // Load PENDSVSET bit 113 MOV r1, #0xE000E000 // Load NVIC base 114 STR r0, [r1, #0xD04] // Set PENDSVBIT in ICSR 115 DSB // Complete all memory accesses 116 ISB // Flush pipeline 117 118 /* Wait here for the PendSV to take place. */ 119 120__tx_wait_here: 121 B __tx_wait_here // Wait for the PendSV to happen 122// } 123 124 /* Generic context switching PendSV handler. */ 125 126 .section .text 127 .balign 4 128 .syntax unified 129 .eabi_attribute Tag_ABI_align_preserved, 1 130 .global PendSV_Handler 131 .thumb_func 132.type PendSV_Handler, function 133 /* Get current thread value and new thread pointer. */ 134PendSV_Handler: 135__tx_ts_handler: 136 137#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE)) 138 /* Call the thread exit function to indicate the thread is no longer executing. */ 139#ifdef TX_PORT_USE_BASEPRI 140 LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI 141 MSR BASEPRI, r1 142#else 143 CPSID i // Disable interrupts 144#endif /* TX_PORT_USE_BASEPRI */ 145 PUSH {r0, lr} // Save LR (and r0 just for alignment) 146 BL _tx_execution_thread_exit // Call the thread exit function 147 POP {r0, lr} // Recover LR 148#ifdef TX_PORT_USE_BASEPRI 149 MOV r0, 0 // Disable BASEPRI masking (enable interrupts) 150 MSR BASEPRI, r0 151#else 152 CPSIE i // Enable interrupts 153#endif /* TX_PORT_USE_BASEPRI */ 154#endif /* EXECUTION PROFILE */ 155 156 LDR r0, =_tx_thread_current_ptr // Build current thread pointer address 157 LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address 158 MOV r3, #0 // Build NULL value 159 LDR r1, [r0] // Pickup current thread pointer 160 161 /* Determine if there is a current thread to finish preserving. */ 162 163 CBZ r1, __tx_ts_new // If NULL, skip preservation 164 165 /* Recover PSP and preserve current thread context. */ 166 167 STR r3, [r0] // Set _tx_thread_current_ptr to NULL 168 MRS r12, PSP // Pickup PSP pointer (thread's stack pointer) 169 STMDB r12!, {r4-r11} // Save its remaining registers 170#ifdef __ARM_PCS_VFP 171 TST LR, #0x10 // Determine if the VFP extended frame is present 172 BNE _skip_vfp_save 173 VSTMDB r12!,{s16-s31} // Yes, save additional VFP registers 174_skip_vfp_save: 175#endif 176 LDR r4, =_tx_timer_time_slice // Build address of time-slice variable 177 STMDB r12!, {LR} // Save LR on the stack 178 STR r12, [r1, #8] // Save the thread stack pointer 179 180#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)) 181 // Save secure context 182 LDR r5, [r1,#0x90] // Load secure stack index 183 CBZ r5, _skip_secure_save // Skip save if there is no secure context 184 PUSH {r0,r1,r2,r3} // Save scratch registers 185 MOV r0, r1 // Move thread ptr to r0 186 BL _tx_thread_secure_stack_context_save // Save secure stack 187 POP {r0,r1,r2,r3} // Restore secure registers 188_skip_secure_save: 189#endif 190 191 /* Determine if time-slice is active. If it isn't, skip time handling processing. */ 192 193 LDR r5, [r4] // Pickup current time-slice 194 CBZ r5, __tx_ts_new // If not active, skip processing 195 196 /* Time-slice is active, save the current thread's time-slice and clear the global time-slice variable. */ 197 198 STR r5, [r1, #24] // Save current time-slice 199 200 /* Clear the global time-slice. */ 201 202 STR r3, [r4] // Clear time-slice 203 204 /* Executing thread is now completely preserved!!! */ 205 206__tx_ts_new: 207 208 /* Now we are looking for a new thread to execute! */ 209 210#ifdef TX_PORT_USE_BASEPRI 211 LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI 212 MSR BASEPRI, r1 213#else 214 CPSID i // Disable interrupts 215#endif 216 LDR r1, [r2] // Is there another thread ready to execute? 217 CBZ r1, __tx_ts_wait // No, skip to the wait processing 218 219 /* Yes, another thread is ready for else, make the current thread the new thread. */ 220 221 STR r1, [r0] // Setup the current thread pointer to the new thread 222#ifdef TX_PORT_USE_BASEPRI 223 MOV r4, #0 // Disable BASEPRI masking (enable interrupts) 224 MSR BASEPRI, r4 225#else 226 CPSIE i // Enable interrupts 227#endif 228 229 /* Increment the thread run count. */ 230 231__tx_ts_restore: 232 LDR r7, [r1, #4] // Pickup the current thread run count 233 LDR r4, =_tx_timer_time_slice // Build address of time-slice variable 234 LDR r5, [r1, #24] // Pickup thread's current time-slice 235 ADD r7, r7, #1 // Increment the thread run count 236 STR r7, [r1, #4] // Store the new run count 237 238 /* Setup global time-slice with thread's current time-slice. */ 239 240 STR r5, [r4] // Setup global time-slice 241 242#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE)) 243 /* Call the thread entry function to indicate the thread is executing. */ 244 PUSH {r0, r1} // Save r0 and r1 245 BL _tx_execution_thread_enter // Call the thread execution enter function 246 POP {r0, r1} // Recover r0 and r1 247#endif 248 249#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)) 250 // Restore secure context 251 LDR r0, [r1,#0x90] // Load secure stack index 252 CBZ r0, _skip_secure_restore // Skip restore if there is no secure context 253 PUSH {r0,r1} // Save r1 (and dummy r0) 254 MOV r0, r1 // Move thread ptr to r0 255 BL _tx_thread_secure_stack_context_restore // Restore secure stack 256 POP {r0,r1} // Restore r1 (and dummy r0) 257_skip_secure_restore: 258#endif 259 260 /* Restore the thread context and PSP. */ 261 LDR r12, [r1, #12] // Get stack start 262 MSR PSPLIM, r12 // Set stack limit 263 LDR r12, [r1, #8] // Pickup thread's stack pointer 264 LDMIA r12!, {LR} // Pickup LR 265#ifdef __ARM_PCS_VFP 266 TST LR, #0x10 // Determine if the VFP extended frame is present 267 BNE _skip_vfp_restore // If not, skip VFP restore 268 VLDMIA r12!, {s16-s31} // Yes, restore additional VFP registers 269_skip_vfp_restore: 270#endif 271 LDMIA r12!, {r4-r11} // Recover thread's registers 272 MSR PSP, r12 // Setup the thread's stack pointer 273 274 BX lr // Return to thread! 275 276 /* The following is the idle wait processing... in this case, no threads are ready for execution and the 277 system will simply be idle until an interrupt occurs that makes a thread ready. Note that interrupts 278 are disabled to allow use of WFI for waiting for a thread to arrive. */ 279 280__tx_ts_wait: 281#ifdef TX_PORT_USE_BASEPRI 282 LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI 283 MSR BASEPRI, r1 284#else 285 CPSID i // Disable interrupts 286#endif 287 LDR r1, [r2] // Pickup the next thread to execute pointer 288 STR r1, [r0] // Store it in the current pointer 289 CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready! 290 291#ifdef TX_LOW_POWER 292 PUSH {r0-r3} 293 BL tx_low_power_enter // Possibly enter low power mode 294 POP {r0-r3} 295#endif 296 297#ifdef TX_ENABLE_WFI 298 DSB // Ensure no outstanding memory transactions 299 WFI // Wait for interrupt 300 ISB // Ensure pipeline is flushed 301#endif 302 303#ifdef TX_LOW_POWER 304 PUSH {r0-r3} 305 BL tx_low_power_exit // Exit low power mode 306 POP {r0-r3} 307#endif 308 309#ifdef TX_PORT_USE_BASEPRI 310 MOV r4, #0 // Disable BASEPRI masking (enable interrupts) 311 MSR BASEPRI, r4 312#else 313 CPSIE i // Enable interrupts 314#endif 315 B __tx_ts_wait // Loop to continue waiting 316 317 /* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are 318 already in the handler! */ 319__tx_ts_ready: 320 MOV r7, #0x08000000 // Build clear PendSV value 321 MOV r8, #0xE000E000 // Build base NVIC address 322 STR r7, [r8, #0xD04] // Clear any PendSV 323 324 /* Re-enable interrupts and restore new thread. */ 325#ifdef TX_PORT_USE_BASEPRI 326 MOV r4, #0 // Disable BASEPRI masking (enable interrupts) 327 MSR BASEPRI, r4 328#else 329 CPSIE i // Enable interrupts 330#endif 331 B __tx_ts_restore // Restore the thread 332// } 333 334 335#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)) 336 // SVC_Handler is not needed when ThreadX is running in single mode. 337 .section .text 338 .balign 4 339 .syntax unified 340 .eabi_attribute Tag_ABI_align_preserved, 1 341 .global SVC_Handler 342 .thumb_func 343.type SVC_Handler, function 344SVC_Handler: 345 TST lr, #0x04 // Determine return stack from EXC_RETURN bit 2 346 ITE EQ 347 MRSEQ r0, MSP // Get MSP if return stack is MSP 348 MRSNE r0, PSP // Get PSP if return stack is PSP 349 350 LDR r1, [r0,#24] // Load saved PC from stack 351 LDRB r1, [r1,#-2] // Load SVC number 352 353 CMP r1, #1 // Is it a secure stack allocate request? 354 BEQ _tx_svc_secure_alloc // Yes, go there 355 356 CMP r1, #2 // Is it a secure stack free request? 357 BEQ _tx_svc_secure_free // Yes, go there 358 359 CMP r1, #3 // Is it a secure stack init request? 360 BEQ _tx_svc_secure_init // Yes, go there 361 362 // Unknown SVC argument - just return 363 BX lr 364 365_tx_svc_secure_alloc: 366 PUSH {r0,lr} // Save SP and EXC_RETURN 367 LDM r0, {r0-r3} // Load function parameters from stack 368 BL _tx_thread_secure_mode_stack_allocate 369 POP {r12,lr} // Restore SP and EXC_RETURN 370 STR r0,[r12] // Store function return value 371 BX lr 372_tx_svc_secure_free: 373 PUSH {r0,lr} // Save SP and EXC_RETURN 374 LDM r0, {r0-r3} // Load function parameters from stack 375 BL _tx_thread_secure_mode_stack_free 376 POP {r12,lr} // Restore SP and EXC_RETURN 377 STR r0,[r12] // Store function return value 378 BX lr 379_tx_svc_secure_init: 380 PUSH {r0,lr} // Save SP and EXC_RETURN 381 BL _tx_thread_secure_mode_stack_initialize 382 POP {r12,lr} // Restore SP and EXC_RETURN 383 BX lr 384#endif // End of ifndef TX_SINGLE_MODE_SECURE, TX_SINGLE_MODE_NON_SECURE 385 386 387 .section .text 388 .balign 4 389 .syntax unified 390 .eabi_attribute Tag_ABI_align_preserved, 1 391 .global _tx_vfp_access 392 .thumb_func 393.type _tx_vfp_access, function 394_tx_vfp_access: 395#ifdef __ARM_PCS_VFP 396 VMOV.F32 s0, s0 // Simply access the VFP 397#endif 398 BX lr // Return to caller 399.end 400