1 /***************************************************************************
2 * Copyright (c) 2024 Microsoft Corporation
3 *
4 * This program and the accompanying materials are made available under the
5 * terms of the MIT License which is available at
6 * https://opensource.org/licenses/MIT.
7 *
8 * SPDX-License-Identifier: MIT
9 **************************************************************************/
10
11
12 /**************************************************************************/
13 /**************************************************************************/
14 /** */
15 /** ThreadX Component */
16 /** */
17 /** Thread */
18 /** */
19 /**************************************************************************/
20 /**************************************************************************/
21
22 #define TX_SOURCE_CODE
23 #define TX_THREAD_SMP_SOURCE_CODE
24
25
26 /* Include necessary system files. */
27
28 #include "tx_api.h"
29 #include "tx_thread.h"
30 #include "tx_timer.h"
31 #include "tx_trace.h"
32
33
34 /**************************************************************************/
35 /* */
36 /* FUNCTION RELEASE */
37 /* */
38 /* _tx_thread_relinquish PORTABLE SMP */
39 /* 6.1 */
40 /* AUTHOR */
41 /* */
42 /* William E. Lamie, Microsoft Corporation */
43 /* */
44 /* DESCRIPTION */
45 /* */
46 /* This function determines if there is another higher or equal */
47 /* priority, non-executing thread that can execute on this processor. */
48 /* such a thread is found, the calling thread relinquishes control. */
49 /* Otherwise, this function simply returns. */
50 /* */
51 /* INPUT */
52 /* */
53 /* None */
54 /* */
55 /* OUTPUT */
56 /* */
57 /* None */
58 /* */
59 /* CALLS */
60 /* */
61 /* _tx_thread_smp_rebalance_execute_list Rebalance the execution list */
62 /* _tx_thread_system_return Return to the system */
63 /* */
64 /* CALLED BY */
65 /* */
66 /* Application Code */
67 /* */
68 /* RELEASE HISTORY */
69 /* */
70 /* DATE NAME DESCRIPTION */
71 /* */
72 /* 09-30-2020 William E. Lamie Initial Version 6.1 */
73 /* */
74 /**************************************************************************/
_tx_thread_relinquish(VOID)75 VOID _tx_thread_relinquish(VOID)
76 {
77
78 TX_INTERRUPT_SAVE_AREA
79
80 UINT priority;
81 TX_THREAD *thread_ptr;
82 TX_THREAD *head_ptr;
83 TX_THREAD *tail_ptr;
84 TX_THREAD *next_thread;
85 TX_THREAD *previous_thread;
86 UINT core_index;
87 UINT rebalance;
88 UINT mapped_core;
89 ULONG excluded;
90
91 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
92 UINT base_priority;
93 UINT priority_bit_set;
94 UINT next_preempted;
95 ULONG priority_bit;
96 ULONG priority_map;
97 TX_THREAD *preempted_thread;
98 #if TX_MAX_PRIORITIES > 32
99 UINT map_index;
100 #endif
101 #endif
102 UINT finished;
103
104
105 /* Default finished to false. */
106 finished = TX_FALSE;
107
108 /* Initialize the rebalance flag to false. */
109 rebalance = TX_FALSE;
110
111 /* Lockout interrupts while thread attempts to relinquish control. */
112 TX_DISABLE
113
114 /* Pickup the index. */
115 core_index = TX_SMP_CORE_ID;
116
117 /* Pickup the current thread pointer. */
118 thread_ptr = _tx_thread_current_ptr[core_index];
119
120 #ifndef TX_NO_TIMER
121
122 /* Reset time slice for current thread. */
123 _tx_timer_time_slice[core_index] = thread_ptr -> tx_thread_new_time_slice;
124 #endif
125
126 #ifdef TX_ENABLE_STACK_CHECKING
127
128 /* Check this thread's stack. */
129 TX_THREAD_STACK_CHECK(thread_ptr)
130 #endif
131
132 /* If trace is enabled, insert this event into the trace buffer. */
133 TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RELINQUISH, &thread_ptr, TX_POINTER_TO_ULONG_CONVERT(thread_ptr -> tx_thread_ready_next), 0, 0, TX_TRACE_THREAD_EVENTS)
134
135 /* Log this kernel call. */
136 TX_EL_THREAD_RELINQUISH_INSERT
137
138 /* Pickup the thread's priority. */
139 priority = thread_ptr -> tx_thread_priority;
140
141 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
142
143 /* Debug entry. */
144 _tx_thread_smp_debug_entry_insert(0, 0, thread_ptr);
145 #endif
146
147 /* Pickup the next thread. */
148 next_thread = thread_ptr -> tx_thread_ready_next;
149
150 /* Pickup the head of the list. */
151 head_ptr = _tx_thread_priority_list[priority];
152
153 /* Pickup the list tail. */
154 tail_ptr = head_ptr -> tx_thread_ready_previous;
155
156 /* Determine if this thread is not the tail pointer. */
157 if (thread_ptr != tail_ptr)
158 {
159
160 /* Not the tail pointer, this thread must be moved to the end of the ready list. */
161
162 /* Determine if this thread is at the head of the list. */
163 if (head_ptr == thread_ptr)
164 {
165
166 /* Simply move the head pointer to put this thread at the end of the ready list at this priority. */
167 _tx_thread_priority_list[priority] = next_thread;
168 }
169 else
170 {
171
172 /* Now we need to remove this thread from its current position and place it at the end of the list. */
173
174 /* Pickup the previous thread pointer. */
175 previous_thread = thread_ptr -> tx_thread_ready_previous;
176
177 /* Remove the thread from the ready list. */
178 next_thread -> tx_thread_ready_previous = previous_thread;
179 previous_thread -> tx_thread_ready_next = next_thread;
180
181 /* Insert the thread at the end of the list. */
182 tail_ptr -> tx_thread_ready_next = thread_ptr;
183 head_ptr -> tx_thread_ready_previous = thread_ptr;
184 thread_ptr -> tx_thread_ready_previous = tail_ptr;
185 thread_ptr -> tx_thread_ready_next = head_ptr;
186 }
187
188 /* Pickup the mapped core of the relinquishing thread - this can be different from the current core. */
189 mapped_core = thread_ptr -> tx_thread_smp_core_mapped;
190
191 /* Determine if the relinquishing thread is no longer present in the execute list. */
192 if (thread_ptr != _tx_thread_execute_ptr[mapped_core])
193 {
194
195 /* Yes, the thread is no longer mapped. Set the rebalance flag to determine if there is a new mapping due to moving
196 this thread to the end of the priority list. */
197
198 /* Set the rebalance flag to true. */
199 rebalance = TX_TRUE;
200 }
201
202 /* Determine if preemption-threshold is in force. */
203 else if (thread_ptr -> tx_thread_preempt_threshold == priority)
204 {
205
206 /* No preemption-threshold is in force. */
207
208 /* Determine if there is a thread at the same priority that isn't currently executing. */
209 do
210 {
211
212 /* Isolate the exclusion for this core. */
213 excluded = (next_thread -> tx_thread_smp_cores_excluded >> mapped_core) & ((ULONG) 1);
214
215 /* Determine if the next thread has preemption-threshold set or is excluded from running on the
216 mapped core. */
217 if ((next_thread -> tx_thread_preempt_threshold < next_thread -> tx_thread_priority) ||
218 (excluded == ((ULONG) 1)))
219 {
220
221 /* Set the rebalance flag. */
222 rebalance = TX_TRUE;
223
224 /* Get out of the loop. We need to rebalance the list when we detect preemption-threshold. */
225 break;
226 }
227 else
228 {
229
230 /* Is the next thread already in the execute list? */
231 if (next_thread != _tx_thread_execute_ptr[next_thread -> tx_thread_smp_core_mapped])
232 {
233
234 /* No, we can place this thread in the position the relinquishing thread
235 was in. */
236
237 /* Remember this index in the thread control block. */
238 next_thread -> tx_thread_smp_core_mapped = mapped_core;
239
240 /* Setup the entry in the execution list. */
241 _tx_thread_execute_ptr[mapped_core] = next_thread;
242
243 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
244
245 /* Debug entry. */
246 _tx_thread_smp_debug_entry_insert(1, 0, next_thread);
247 #endif
248
249 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
250
251 /* Increment the number of thread relinquishes. */
252 thread_ptr -> tx_thread_performance_relinquish_count++;
253
254 /* Increment the total number of thread relinquish operations. */
255 _tx_thread_performance_relinquish_count++;
256
257 /* No, there is another thread ready to run and will be scheduled upon return. */
258 _tx_thread_performance_non_idle_return_count++;
259 #endif
260
261 #ifdef TX_ENABLE_STACK_CHECKING
262
263 /* Check this thread's stack. */
264 TX_THREAD_STACK_CHECK(next_thread)
265 #endif
266
267 #ifndef TX_NOT_INTERRUPTABLE
268
269 /* Increment the preempt disable flag in order to keep the protection. */
270 _tx_thread_preempt_disable++;
271
272 /* Restore interrupts. */
273 TX_RESTORE
274 #endif
275
276 /* Transfer control to the system so the scheduler can execute
277 the next thread. */
278 _tx_thread_system_return();
279
280
281 #ifdef TX_NOT_INTERRUPTABLE
282
283 /* Restore interrupts. */
284 TX_RESTORE
285 #endif
286
287 /* Set the finished flag. */
288 finished = TX_TRUE;
289
290 }
291
292 /* Move to the next thread at this priority. */
293 next_thread = next_thread -> tx_thread_ready_next;
294
295 }
296 } while ((next_thread != thread_ptr) && (finished == TX_FALSE));
297
298 /* Determine if we are finished. */
299 if (finished == TX_FALSE)
300 {
301
302 /* No other thread is ready at this priority... simply return. */
303
304 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
305
306 /* Debug entry. */
307 _tx_thread_smp_debug_entry_insert(1, 0, thread_ptr);
308 #endif
309
310 /* Restore interrupts. */
311 TX_RESTORE
312
313 /* Set the finished flag. */
314 finished = TX_TRUE;
315 }
316 }
317 else
318 {
319
320 /* Preemption-threshold is in force. */
321
322 /* Set the rebalance flag. */
323 rebalance = TX_TRUE;
324 }
325 }
326
327 /* Determine if preemption-threshold is in force. */
328 if (thread_ptr -> tx_thread_preempt_threshold < priority)
329 {
330
331 /* Set the rebalance flag. */
332 rebalance = TX_TRUE;
333
334 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
335
336 #if TX_MAX_PRIORITIES > 32
337
338 /* Calculate the index into the bit map array. */
339 map_index = priority/((UINT) 32);
340 #endif
341
342 /* Ensure that this thread's priority is clear in the preempt map. */
343 TX_MOD32_BIT_SET(priority, priority_bit)
344 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
345
346 #if TX_MAX_PRIORITIES > 32
347
348 /* Determine if there are any other bits set in this preempt map. */
349 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
350 {
351
352 /* No, clear the active bit to signify this preempted map has nothing set. */
353 TX_DIV32_BIT_SET(priority, priority_bit)
354 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
355 }
356 #endif
357
358 /* Clear the entry in the preempted list. */
359 _tx_thread_preemption_threshold_list[priority] = TX_NULL;
360
361 /* Does this thread have preemption-threshold? */
362 if (_tx_thread_preemption__threshold_scheduled == thread_ptr)
363 {
364
365 /* Yes, set the preempted thread to NULL. */
366 _tx_thread_preemption__threshold_scheduled = TX_NULL;
367 }
368
369 /* Calculate the first thread with preemption-threshold active. */
370 #if TX_MAX_PRIORITIES > 32
371 if (_tx_thread_preempted_map_active != ((ULONG) 0))
372 #else
373 if (_tx_thread_preempted_maps[0] != ((ULONG) 0))
374 #endif
375 {
376 #if TX_MAX_PRIORITIES > 32
377
378 /* Calculate the index to find the next highest priority thread ready for execution. */
379 priority_map = _tx_thread_preempted_map_active;
380
381 /* Calculate the lowest bit set in the priority map. */
382 TX_LOWEST_SET_BIT_CALCULATE(priority_map, map_index)
383
384 /* Calculate the base priority as well. */
385 base_priority = map_index * ((UINT) 32);
386 #else
387
388 /* Setup the base priority to zero. */
389 base_priority = ((UINT) 0);
390 #endif
391
392 /* Setup temporary preempted map. */
393 priority_map = _tx_thread_preempted_maps[MAP_INDEX];
394
395 /* Calculate the lowest bit set in the priority map. */
396 TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit_set)
397
398 /* Move priority bit set into priority bit. */
399 priority_bit = (ULONG) priority_bit_set;
400
401 /* Setup the highest priority preempted thread. */
402 next_preempted = base_priority + priority_bit;
403
404 /* Pickup the previously preempted thread. */
405 preempted_thread = _tx_thread_preemption_threshold_list[next_preempted];
406
407 /* Setup the preempted thread. */
408 _tx_thread_preemption__threshold_scheduled = preempted_thread;
409 }
410 #else
411
412 /* Determine if this thread has preemption-threshold disabled. */
413 if (thread_ptr == _tx_thread_preemption__threshold_scheduled)
414 {
415
416 /* Clear the global preemption disable flag. */
417 _tx_thread_preemption__threshold_scheduled = TX_NULL;
418 }
419 #endif
420 }
421
422 /* Check to see if there is still work to do. */
423 if (finished == TX_FALSE)
424 {
425
426 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
427
428 /* Debug entry. */
429 _tx_thread_smp_debug_entry_insert(1, 0, thread_ptr);
430 #endif
431
432 /* Determine if we need to rebalance the execute list. */
433 if (rebalance == TX_TRUE)
434 {
435
436 /* Rebalance the excute list. */
437 _tx_thread_smp_rebalance_execute_list(core_index);
438 }
439
440 /* Determine if this thread needs to return to the system. */
441 if (_tx_thread_execute_ptr[core_index] != thread_ptr)
442 {
443
444 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
445
446 /* Increment the number of thread relinquishes. */
447 thread_ptr -> tx_thread_performance_relinquish_count++;
448
449 /* Increment the total number of thread relinquish operations. */
450 _tx_thread_performance_relinquish_count++;
451
452 /* Determine if an idle system return is present. */
453 if (_tx_thread_execute_ptr[core_index] == TX_NULL)
454 {
455
456 /* Yes, increment the return to idle return count. */
457 _tx_thread_performance_idle_return_count++;
458 }
459 else
460 {
461
462 /* No, there is another thread ready to run and will be scheduled upon return. */
463 _tx_thread_performance_non_idle_return_count++;
464 }
465 #endif
466
467 #ifdef TX_ENABLE_STACK_CHECKING
468
469 /* Pickup new thread pointer. */
470 thread_ptr = _tx_thread_execute_ptr[core_index];
471
472 /* Check this thread's stack. */
473 TX_THREAD_STACK_CHECK(thread_ptr)
474 #endif
475
476 #ifndef TX_NOT_INTERRUPTABLE
477
478 /* Increment the preempt disable flag in order to keep the protection. */
479 _tx_thread_preempt_disable++;
480
481 /* Restore interrupts. */
482 TX_RESTORE
483 #endif
484
485 /* Transfer control to the system so the scheduler can execute
486 the next thread. */
487 _tx_thread_system_return();
488
489 #ifdef TX_NOT_INTERRUPTABLE
490
491 /* Restore interrupts. */
492 TX_RESTORE
493 #endif
494 }
495 else
496 {
497
498 /* Restore interrupts. */
499 TX_RESTORE
500 }
501 }
502 }
503
504