1/**************************************************************************/ 2/* */ 3/* Copyright (c) Microsoft Corporation. All rights reserved. */ 4/* */ 5/* This software is licensed under the Microsoft Software License */ 6/* Terms for Microsoft Azure RTOS. Full text of the license can be */ 7/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */ 8/* and in the root directory of this software. */ 9/* */ 10/**************************************************************************/ 11 12 13/**************************************************************************/ 14/**************************************************************************/ 15/** */ 16/** ThreadX Component */ 17/** */ 18/** Thread */ 19/** */ 20/**************************************************************************/ 21/**************************************************************************/ 22 23 24#define UserLocal $4,2 25#define C0_TCBind $2,2 26#define C0_TCContext $2,5 27#define C0_TCHalt $2,4 28 29 30#ifdef TX_THREAD_SMP_WAKEUP_LOGIC 31 .globl TX_MIPS32_1004K_VPE_YIELD 32#endif 33 34 .text 35 .set noreorder 36/**************************************************************************/ 37/* */ 38/* FUNCTION RELEASE */ 39/* */ 40/* _tx_thread_schedule MIPS32_interAptiv/GNU */ 41/* 6.2.1 */ 42/* AUTHOR */ 43/* */ 44/* Scott Larson, Microsoft Corporation */ 45/* */ 46/* DESCRIPTION */ 47/* */ 48/* This function waits for a thread control block pointer to appear in */ 49/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */ 50/* in the variable, the corresponding thread is resumed. */ 51/* */ 52/* INPUT */ 53/* */ 54/* None */ 55/* */ 56/* OUTPUT */ 57/* */ 58/* None */ 59/* */ 60/* CALLS */ 61/* */ 62/* None */ 63/* */ 64/* CALLED BY */ 65/* */ 66/* _tx_initialize_kernel_enter ThreadX entry function */ 67/* _tx_thread_system_return Return to system from thread */ 68/* _tx_thread_context_restore Restore thread's context */ 69/* */ 70/* RELEASE HISTORY */ 71/* */ 72/* DATE NAME DESCRIPTION */ 73/* */ 74/* 03-08-2023 Scott Larson Initial Version 6.2.1 */ 75/* */ 76/**************************************************************************/ 77/* VOID _tx_thread_schedule(VOID) 78{ */ 79 .globl _tx_thread_schedule_idle_system 80_tx_thread_schedule_idle_system: 81 82#ifndef TX_THREAD_SMP_WAKEUP_LOGIC 83 84 ei # Enable interrupts 85 ehb # 86 li $11,-1 # 87 yield $11, $11 # Yield so this VPE does not consume all the cycles 88 lw $8, ($9) # Pickup next thread to execute 89 beq $8, $0, _tx_thread_schedule_idle_system # If not-NULL, check the thread's ready bit 90 nop # Delay slot 91 92#else 93 94 la $8, TX_MIPS32_1004K_VPE_YIELD # Get call-out address 95 jalr $8 # Make the call 96 or $4, $0, $9 # Pass the properly indexed _tx_thread_execute_ptr[x] 97 98#endif 99 100 .globl _tx_thread_schedule 101_tx_thread_schedule: 102 103 104 /* Enable interrupts. */ 105 106 ei # Enable interrupts 107 ehb # 108 109 /* Disable interrupts. */ 110 111 di # Disable interrupts 112 ehb # 113 114 /* Pickup the executing VPE number. */ 115 116 mfc0 $25, UserLocal # Pickup VPE ID 117 sll $25, $25, 2 # Build index based on VPE number 118 119 /* Calculate the execute pointer for this VPE. */ 120 121 la $9, _tx_thread_execute_ptr # Pickup starting address of execute list 122 addu $9, $9, $25 # Build address of execute pointer for this TC 123 124 /* Wait for a thread to execute. */ 125 /* do 126 { */ 127 128_tx_thread_schedule_loop: 129 130 lw $8, ($9) # Pickup next thread to execute 131 beq $8, $0, _tx_thread_schedule_idle_system # If not-NULL, check the thread's ready bit 132 nop # Delay slot 133 134_tx_thread_check_ready_bit: 135 lw $9, 152($8) # Pickup the thread's VPE control register 136 andi $10, $9, 0x8000 # Pickup ready bit (bit 15) 137 bne $10, $0, _tx_thread_is_ready # If ready bit is set, actually schedule the thread 138 andi $9, $9, 0x7FFF # Clear the ready bit (bit 15) 139 140 b _tx_thread_schedule # Resume at the top of the scheduling loop 141 nop 142 143_tx_thread_is_ready: 144 145 146 sw $9, 152($8) # Store the cleared ready bit to prevent any other VPE from scheduling this thread 147 sync 148 149 /* } 150 while(_tx_thread_execute_ptr[VPE] == TX_NULL); */ 151 152 153_tx_thread_schedule_thread: 154 155 /* Yes! We have a thread to execute. Interrupts and multithreading are locked out. 156 Pickup the thread's register context, enable multithreading, and transfer control to 157 the thread. */ 158 159 /* Save this thread in the context register of the TC. */ 160 161 mtc0 $8, C0_TCContext # Set TCContext to current thread 162 ehb # 163 164 165#ifdef TX_ENABLE_EVENT_LOGGING 166 or $16, $8, $0 # Save thread pointer into non-volatile 167 or $4, $8, $0 # Move thread pointer into input register 168 la $9, _tx_el_thread_running # Build address of thread running event routine 169 jal $9 # Call event logging routine 170 nop # Delay slot 171 or $8, $16, $0 # Recover thread pointer 172#endif 173 174 /* Setup the current thread pointer. */ 175 /* _tx_thread_current_ptr[VPE] = _tx_thread_execute_ptr[VPE]; */ 176 177 la $9, _tx_thread_current_ptr # Pickup current thread pointer address 178 addu $9, $9, $25 # Offset to VPE specific entry 179 sw $8, ($9) # Set current thread pointer 180 181 /* Increment the run count for this thread. */ 182 /* _tx_thread_current_ptr[VPE] -> tx_thread_run_count++; */ 183 184 lw $10, 4($8) # Pickup run count 185 lw $11, 24($8) # Pickup time slice value 186 addu $10, $10, 1 # Increment run count 187 sw $10, 4($8) # Store new run count 188 189 /* Setup time-slice, if present. */ 190 /* _tx_timer_time_slice[VPE] = _tx_thread_current_ptr[VPE] -> tx_thread_time_slice; */ 191 192 la $10, _tx_timer_time_slice # Pickup time-slice variable address 193 addu $10, $10, $25 # Offset to VPE specific time-slice 194 195 /* Switch to the thread's stack. */ 196 /* SP = _tx_thread_current_ptr[VPE] -> tx_thread_stack_ptr; */ 197 198 lw $29, 8($8) # Switch to thread's stack 199 lw $15, 176($8) # Pickup FPU enable flag in TX_THREAD structure 200 sw $11, ($10) # Store new time-slice 201 202 /* Determine if an interrupt frame or a synchronous task suspension frame 203 is present. */ 204 205 lw $10, ($29) # Pickup stack type 206 beqz $10, _tx_thread_synch_return # If 0, solicited thread return 207 nop # Delay slot 208 209#ifdef TX_ENABLE_64BIT_FPU_SUPPORT 210 211 /* Check if FPU is enabled for this thread. Note that threads with FPU enabled will only be 212 scheduled in VPE 0. */ 213 214 215 lw $9,120($29) # Recover SR 216 li $10,0xDFFFFFFF # Mask for FPU enable bit 217 mfc0 $8, UserLocal # Pickup VPE ID 218 and $9, $9, $10 # Build SR with FPU enable bit masked 219 bne $8, $0, _tx_skip_fpu_int_restore # If not VPE 0, skip FPU restore 220 li $10, 0x20000000 # Build FPU enable bit 221 or $9, $9, $10 # Build SR with FPU enable 222 beq $15, $0, _tx_skip_fpu_int_restore # If FPU not enabled, skip FPU restore 223 nop 224 lw $8, 384($29) # Recover fcr31 225 ctc1 $8, $31 # Setup fcr31 226 ldc1 $f31, 128($29) # Recover f31 227 ldc1 $f30, 136($29) # Recover f30 228 ldc1 $f29, 144($29) # Recover f29 229 ldc1 $f28, 152($29) # Recover f28 230 ldc1 $f27, 160($29) # Recover f27 231 ldc1 $f26, 168($29) # Recover f26 232 ldc1 $f25, 176($29) # Recover f25 233 ldc1 $f24, 184($29) # Recover f24 234 ldc1 $f23, 192($29) # Recover f23 235 ldc1 $f22, 200($29) # Recover f22 236 ldc1 $f21, 208($29) # Recover f21 237 ldc1 $f20, 216($29) # Recover f20 238 ldc1 $f19, 224($29) # Recover f19 239 ldc1 $f18, 232($29) # Recover f18 240 ldc1 $f17, 240($29) # Recover f17 241 ldc1 $f16, 248($29) # Recover f16 242 ldc1 $f15, 256($29) # Recover f15 243 ldc1 $f14, 264($29) # Recover f14 244 ldc1 $f13, 272($29) # Recover f13 245 ldc1 $f12, 280($29) # Recover f12 246 ldc1 $f11, 288($29) # Recover f11 247 ldc1 $f10, 296($29) # Recover f10 248 ldc1 $f9, 304($29) # Recover f9 249 ldc1 $f8, 312($29) # Recover f8 250 ldc1 $f7, 320($29) # Recover f7 251 ldc1 $f6, 328($29) # Recover f6 252 ldc1 $f5, 336($29) # Recover f5 253 ldc1 $f4, 344($29) # Recover f4 254 ldc1 $f3, 352($29) # Recover f3 255 ldc1 $f2, 360($29) # Recover f2 256 ldc1 $f1, 368($29) # Recover f1 257 ldc1 $f0, 376($29) # Recover f0 258 259_tx_skip_fpu_int_restore: 260 sw $9,120($29) # Store new SR 261 262#endif 263 264 /* Recover standard registers. */ 265 266 lw $8,124($29) # Recover EPC 267 lw $9,120($29) # Recover SR 268 mtc0 $8, $14 # Setup EPC 269 ehb # 270 lw $30, 4($29) # Recover s8 271 272 mtc0 $9, $12 # Restore SR 273 ehb # Clear hazards 274 lw $23, 8($29) # Recover s7 275 lw $22, 12($29) # Recover s6 276 lw $21, 16($29) # Recover s5 277 lw $20, 20($29) # Recover s4 278 lw $19, 24($29) # Recover s3 279 lw $18, 28($29) # Recover s2 280 lw $17, 32($29) # Recover s1 281 lw $16, 36($29) # Recover s0 282 lw $8, 40($29) # Recover hi 283 lw $9, 44($29) # Recover low 284 mthi $8 # Setup hi 285 mtlo $9 # Setup lo 286 lw $25, 48($29) # Recover t9 287 lw $24, 52($29) # Recover t8 288 lw $15, 56($29) # Recover t7 289 lw $14, 60($29) # Recover t6 290 lw $13, 64($29) # Recover t5 291 lw $12, 68($29) # Recover t4 292 lw $11, 72($29) # Recover t3 293 lw $10, 76($29) # Recover t2 294 lw $9, 80($29) # Recover t1 295 lw $8, 84($29) # Recover t0 296 lw $7, 88($29) # Recover a3 297 lw $6, 92($29) # Recover a2 298 lw $5, 96($29) # Recover a1 299 lw $4, 100($29) # Recover a0 300 lw $3, 104($29) # Recover v1 301 lw $2, 108($29) # Recover v0 302 .set noat 303 lw $1, 112($29) # Recover at 304 .set at 305 lw $31,116($29) # Recover ra 306 addu $29, $29, 392 # Recover stack frame 307 emt # Enable multithreading again 308 eret # Return to point of interrupt 309 310_tx_thread_synch_return: 311 312#ifdef TX_ENABLE_64BIT_FPU_SUPPORT 313 314 /* Check if FPU is enabled for this thread. Note that threads with FPU enabled will only be 315 scheduled in VPE 0. */ 316 317 lw $9,52($29) # Recover SR 318 li $10,0xDFFFFFFF # Mask for FPU enable bit 319 mfc0 $8, UserLocal # Pickup VPE ID 320 and $9, $9, $10 # Build SR with FPU enable bit masked 321 bne $8, $0, _tx_skip_fpu_sync_restore # If not TC 0, skip FPU restore 322 li $10, 0x20000000 # Build FPU enable bit 323 or $9, $9, $10 # Build SR with FPU enable 324 beq $15, $0, _tx_skip_fpu_sync_restore # If FPU not enabled, skip FPU restore 325 nop 326 lw $8, 152($29) # Recover fcr31 327 ctc1 $8, $31 # Setup fcr31 328 ldc1 $f31, 56($29) # Recover f31 329 ldc1 $f30, 64($29) # Recover f30 330 ldc1 $f29, 72($29) # Recover f29 331 ldc1 $f28, 80($29) # Recover f28 332 ldc1 $f27, 88($29) # Recover f27 333 ldc1 $f26, 96($29) # Recover f26 334 ldc1 $f25, 104($29) # Recover f25 335 ldc1 $f24, 112($29) # Recover f24 336 ldc1 $f23, 120($29) # Recover f23 337 ldc1 $f22, 128($29) # Recover f22 338 ldc1 $f21, 136($29) # Recover f21 339 ldc1 $f20, 144($29) # Recover f20 340_tx_skip_fpu_sync_restore: 341 sw $9,52($29) # Store new SR 342#endif 343 344 /* Recover standard preserved registers. */ 345 346 lw $30, 4($29) # Recover s8 347 lw $23, 8($29) # Recover s7 348 lw $22, 12($29) # Recover s6 349 lw $21, 16($29) # Recover s5 350 lw $20, 20($29) # Recover s4 351 lw $19, 24($29) # Recover s3 352 lw $18, 28($29) # Recover s2 353 lw $17, 32($29) # Recover s1 354 lw $16, 36($29) # Recover s0 355 lw $8, 40($29) # Recover hi 356 lw $9, 44($29) # Recover low 357 mthi $8 # Setup hi 358 mtlo $9 # Setup lo 359 lw $8, 52($29) # Recover SR 360 lw $31, 48($29) # Recover ra 361 addu $29, $29, 160 # Recover stack space 362 mtc0 $8, $12 # Restore SR 363 ehb # Clear hazards 364 emt # Enable multithreading 365 jr.hb $31 # Return to thread 366 nop # 367 368/* } */ 369 370