1/**************************************************************************/ 2/* */ 3/* Copyright (c) Microsoft Corporation. All rights reserved. */ 4/* */ 5/* This software is licensed under the Microsoft Software License */ 6/* Terms for Microsoft Azure RTOS. Full text of the license can be */ 7/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */ 8/* and in the root directory of this software. */ 9/* */ 10/**************************************************************************/ 11 12 13/**************************************************************************/ 14/**************************************************************************/ 15/** */ 16/** ThreadX Component */ 17/** */ 18/** Thread */ 19/** */ 20/**************************************************************************/ 21/**************************************************************************/ 22#ifdef TX_INCLUDE_USER_DEFINE_FILE 23#include "tx_user.h" 24#endif 25 26 .text 27 .align 3 28/**************************************************************************/ 29/* */ 30/* FUNCTION RELEASE */ 31/* */ 32/* _tx_thread_schedule ARMv8-A-SMP */ 33/* 6.3.0 */ 34/* AUTHOR */ 35/* */ 36/* William E. Lamie, Microsoft Corporation */ 37/* */ 38/* DESCRIPTION */ 39/* */ 40/* This function waits for a thread control block pointer to appear in */ 41/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */ 42/* in the variable, the corresponding thread is resumed. */ 43/* */ 44/* INPUT */ 45/* */ 46/* None */ 47/* */ 48/* OUTPUT */ 49/* */ 50/* None */ 51/* */ 52/* CALLS */ 53/* */ 54/* None */ 55/* */ 56/* CALLED BY */ 57/* */ 58/* _tx_initialize_kernel_enter ThreadX entry function */ 59/* _tx_thread_system_return Return to system from thread */ 60/* _tx_thread_context_restore Restore thread's context */ 61/* */ 62/* RELEASE HISTORY */ 63/* */ 64/* DATE NAME DESCRIPTION */ 65/* */ 66/* 09-30-2020 William E. Lamie Initial Version 6.1 */ 67/* 01-31-2022 Andres Mlinar Updated comments, */ 68/* added ARMv8.2-A support, */ 69/* resulting in version 6.1.10 */ 70/* 10-31-2023 Tiejun Zhou Modified comment(s), added */ 71/* #include tx_user.h, */ 72/* added memory barrier, */ 73/* resulting in version 6.3.0 */ 74/* */ 75/**************************************************************************/ 76// VOID _tx_thread_schedule(VOID) 77// { 78 .global _tx_thread_schedule 79 .type _tx_thread_schedule, @function 80_tx_thread_schedule: 81 82 /* Enable interrupts. */ 83 84 MSR DAIFClr, 0x3 // Enable interrupts 85 86 /* Pickup the CPU ID. */ 87 88 MRS x20, MPIDR_EL1 // Pickup the core ID 89#ifdef TX_ARMV8_2 90#if TX_THREAD_SMP_CLUSTERS > 1 91 UBFX x1, x20, #16, #8 // Isolate cluster ID 92#endif 93 UBFX x20, x20, #8, #8 // Isolate core ID 94#else 95#if TX_THREAD_SMP_CLUSTERS > 1 96 UBFX x1, x20, #8, #8 // Isolate cluster ID 97#endif 98 UBFX x20, x20, #0, #8 // Isolate core ID 99#endif 100#if TX_THREAD_SMP_CLUSTERS > 1 101 ADDS x20, x20, x1, LSL #2 // Calculate CPU ID 102#endif 103 104 /* Wait for a thread to execute. */ 105 // do 106 // { 107 108 LDR x1, =_tx_thread_execute_ptr // Address of thread execute ptr 109 110#ifdef TX_ENABLE_WFI 111__tx_thread_schedule_loop: 112 MSR DAIFSet, 0x3 // Lockout interrupts 113 LDR x0, [x1, x20, LSL #3] // Pickup next thread to execute 114 CMP x0, #0 // Is it NULL? 115 BNE _tx_thread_schedule_thread // 116 MSR DAIFClr, 0x3 // Enable interrupts 117 WFI // 118 B __tx_thread_schedule_loop // Keep looking for a thread 119_tx_thread_schedule_thread: 120#else 121 MSR DAIFSet, 0x3 // Lockout interrupts 122 LDR x0, [x1, x20, LSL #3] // Pickup next thread to execute 123 CMP x0, #0 // Is it NULL? 124 BEQ _tx_thread_schedule // Keep looking for a thread 125#endif 126 127 // } 128 // while(_tx_thread_execute_ptr == TX_NULL); 129 130 /* Get the lock for accessing the thread's ready bit. */ 131 132 MOV w2, #280 // Build offset to the lock 133 ADD x2, x0, x2 // Get the address to the lock 134 LDAXR w3, [x2] // Pickup the lock value 135 CMP w3, #0 // Check if it's available 136 BNE _tx_thread_schedule // No, lock not available 137 MOV w3, #1 // Build the lock set value 138 STXR w4, w3, [x2] // Try to get the lock 139 CMP w4, #0 // Check if we got the lock 140 BNE _tx_thread_schedule // No, another core got it first 141 DMB ISH // Ensure write to lock completes 142 143 /* Now make sure the thread's ready bit is set. */ 144 145 LDR w3, [x0, #260] // Pickup the thread ready bit 146 CMP w3, #0 // Is it set? 147 BNE _tx_thread_ready_for_execution // Yes, schedule the thread 148 149 /* The ready bit isn't set. Release the lock and jump back to the scheduler. */ 150 151 MOV w3, #0 // Build clear value 152 STR w3, [x2] // Release the lock 153 DMB ISH // Ensure write to lock completes 154 B _tx_thread_schedule // Jump back to the scheduler 155 156_tx_thread_ready_for_execution: 157 158 /* We have a thread to execute. */ 159 160 /* Clear the ready bit and release the lock. */ 161 162 MOV w3, #0 // Build clear value 163 STR w3, [x0, #260] // Store it back in the thread control block 164 DMB ISH 165 MOV w3, #0 // Build clear value for the lock 166 STR w3, [x2] // Release the lock 167 DMB ISH 168 169 /* Setup the current thread pointer. */ 170 // _tx_thread_current_ptr = _tx_thread_execute_ptr; 171 172 LDR x2, =_tx_thread_current_ptr // Pickup address of current thread 173 STR x0, [x2, x20, LSL #3] // Setup current thread pointer 174 DMB ISH 175 LDR x1, [x1, x20, LSL #3] // Reload the execute pointer 176 CMP w0, w1 // Did it change? 177 BEQ _execute_pointer_did_not_change // If not, skip handling 178 179 /* In the time between reading the execute pointer and assigning 180 it to the current pointer, the execute pointer was changed by 181 some external code. If the current pointer was still null when 182 the external code checked if a core preempt was necessary, then 183 it wouldn't have done it and a preemption will be missed. To 184 handle this, undo some things and jump back to the scheduler so 185 it can schedule the new thread. */ 186 187 MOV w1, #0 // Build clear value 188 STR x1, [x2, x20, LSL #3] // Clear current thread pointer 189 190 MOV w1, #1 // Build set value 191 STR w1, [x0, #260] // Re-set the ready bit 192 DMB ISH // 193 194 B _tx_thread_schedule // Jump back to the scheduler to schedule the new thread 195 196_execute_pointer_did_not_change: 197 /* Increment the run count for this thread. */ 198 // _tx_thread_current_ptr -> tx_thread_run_count++; 199 200 LDR w2, [x0, #4] // Pickup run counter 201 LDR w3, [x0, #36] // Pickup time-slice for this thread 202 ADD w2, w2, #1 // Increment thread run-counter 203 STR w2, [x0, #4] // Store the new run counter 204 205 /* Setup time-slice, if present. */ 206 // _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice; 207 208 LDR x2, =_tx_timer_time_slice // Pickup address of time slice 209 // variable 210 LDR x4, [x0, #8] // Switch stack pointers 211 MOV sp, x4 // 212 STR w3, [x2, x20, LSL #2] // Setup time-slice 213 214#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE)) 215 216 /* Call the thread entry function to indicate the thread is executing. */ 217 218 MOV x19, x0 // Save x0 219 BL _tx_execution_thread_enter // Call the thread execution enter function 220 MOV x0, x19 // Restore x0 221#endif 222 223 /* Switch to the thread's stack. */ 224 // sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr; 225 226 /* Determine if an interrupt frame or a synchronous task suspension frame 227 is present. */ 228 229 LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1 230 CMP x5, #0 // Check for synchronous context switch (ELR_EL1 = NULL) 231 BEQ _tx_solicited_return 232#ifdef EL1 233 MSR SPSR_EL1, x4 // Setup SPSR for return 234 MSR ELR_EL1, x5 // Setup point of interrupt 235#else 236#ifdef EL2 237 MSR SPSR_EL2, x4 // Setup SPSR for return 238 MSR ELR_EL2, x5 // Setup point of interrupt 239#else 240 MSR SPSR_EL3, x4 // Setup SPSR for return 241 MSR ELR_EL3, x5 // Setup point of interrupt 242#endif 243#endif 244#ifdef ENABLE_ARM_FP 245 LDR w1, [x0, #268] // Pickup FP enable flag 246 CMP w1, #0 // Is FP enabled? 247 BEQ _skip_interrupt_fp_restore // No, skip FP restore 248 LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR 249 MSR FPSR, x0 // Recover FPSR 250 MSR FPCR, x1 // Recover FPCR 251 LDP q30, q31, [sp], #32 // Recover q30, q31 252 LDP q28, q29, [sp], #32 // Recover q28, q29 253 LDP q26, q27, [sp], #32 // Recover q26, q27 254 LDP q24, q25, [sp], #32 // Recover q24, q25 255 LDP q22, q23, [sp], #32 // Recover q22, q23 256 LDP q20, q21, [sp], #32 // Recover q20, q21 257 LDP q18, q19, [sp], #32 // Recover q18, q19 258 LDP q16, q17, [sp], #32 // Recover q16, q17 259 LDP q14, q15, [sp], #32 // Recover q14, q15 260 LDP q12, q13, [sp], #32 // Recover q12, q13 261 LDP q10, q11, [sp], #32 // Recover q10, q11 262 LDP q8, q9, [sp], #32 // Recover q8, q9 263 LDP q6, q7, [sp], #32 // Recover q6, q7 264 LDP q4, q5, [sp], #32 // Recover q4, q5 265 LDP q2, q3, [sp], #32 // Recover q2, q3 266 LDP q0, q1, [sp], #32 // Recover q0, q1 267_skip_interrupt_fp_restore: 268#endif 269 LDP x28, x29, [sp], #16 // Recover x28 270 LDP x26, x27, [sp], #16 // Recover x26, x27 271 LDP x24, x25, [sp], #16 // Recover x24, x25 272 LDP x22, x23, [sp], #16 // Recover x22, x23 273 LDP x20, x21, [sp], #16 // Recover x20, x21 274 LDP x18, x19, [sp], #16 // Recover x18, x19 275 LDP x16, x17, [sp], #16 // Recover x16, x17 276 LDP x14, x15, [sp], #16 // Recover x14, x15 277 LDP x12, x13, [sp], #16 // Recover x12, x13 278 LDP x10, x11, [sp], #16 // Recover x10, x11 279 LDP x8, x9, [sp], #16 // Recover x8, x9 280 LDP x6, x7, [sp], #16 // Recover x6, x7 281 LDP x4, x5, [sp], #16 // Recover x4, x5 282 LDP x2, x3, [sp], #16 // Recover x2, x3 283 LDP x0, x1, [sp], #16 // Recover x0, x1 284 LDP x29, x30, [sp], #16 // Recover x29, x30 285 ERET // Return to point of interrupt 286 287_tx_solicited_return: 288 289#ifdef ENABLE_ARM_FP 290 LDR w1, [x0, #268] // Pickup FP enable flag 291 CMP w1, #0 // Is FP enabled? 292 BEQ _skip_solicited_fp_restore // No, skip FP restore 293 LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR 294 MSR FPSR, x0 // Recover FPSR 295 MSR FPCR, x1 // Recover FPCR 296 LDP q14, q15, [sp], #32 // Recover q14, q15 297 LDP q12, q13, [sp], #32 // Recover q12, q13 298 LDP q10, q11, [sp], #32 // Recover q10, q11 299 LDP q8, q9, [sp], #32 // Recover q8, q9 300_skip_solicited_fp_restore: 301#endif 302 LDP x27, x28, [sp], #16 // Recover x27, x28 303 LDP x25, x26, [sp], #16 // Recover x25, x26 304 LDP x23, x24, [sp], #16 // Recover x23, x24 305 LDP x21, x22, [sp], #16 // Recover x21, x22 306 LDP x19, x20, [sp], #16 // Recover x19, x20 307 LDP x29, x30, [sp], #16 // Recover x29, x30 308 MSR DAIF, x4 // Recover DAIF 309 RET // Return to caller 310// } 311