1 /**************************************************************************/
2 /* */
3 /* Copyright (c) Microsoft Corporation. All rights reserved. */
4 /* */
5 /* This software is licensed under the Microsoft Software License */
6 /* Terms for Microsoft Azure RTOS. Full text of the license can be */
7 /* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
8 /* and in the root directory of this software. */
9 /* */
10 /**************************************************************************/
11
12
13 /**************************************************************************/
14 /**************************************************************************/
15 /** */
16 /** ThreadX Component */
17 /** */
18 /** Thread - High Level SMP Support */
19 /** */
20 /**************************************************************************/
21 /**************************************************************************/
22
23 #define TX_SOURCE_CODE
24 #define TX_THREAD_SMP_SOURCE_CODE
25
26
27 /* Include necessary system files. */
28
29 #include "tx_api.h"
30 #include "tx_initialize.h"
31 #include "tx_timer.h"
32 #include "tx_thread.h"
33
34
35 /**************************************************************************/
36 /* */
37 /* FUNCTION RELEASE */
38 /* */
39 /* _tx_thread_smp_rebalance_execute_list PORTABLE SMP */
40 /* 6.1 */
41 /* AUTHOR */
42 /* */
43 /* William E. Lamie, Microsoft Corporation */
44 /* */
45 /* DESCRIPTION */
46 /* */
47 /* This function is responsible for mapping ready ThreadX threads with */
48 /* cores in the SMP . The basic idea is the standard ThreadX */
49 /* ready list is traversed to build the _tx_thread_execute_ptr list. */
50 /* Each index represents the and the corresponding entry in this */
51 /* array contains the thread that should be executed by that core. If */
52 /* the was previously running a different thread, it will be */
53 /* preempted and restarted so it can run the new thread. */
54 /* */
55 /* INPUT */
56 /* */
57 /* None */
58 /* */
59 /* OUTPUT */
60 /* */
61 /* None */
62 /* */
63 /* CALLS */
64 /* */
65 /* _tx_thread_smp_execute_list_clear Clear the thread execute list */
66 /* _tx_thread_smp_execute_list_setup Setup the thread execute list */
67 /* _tx_thread_smp_next_priority_find Find next priority with one */
68 /* or more ready threads */
69 /* _tx_thread_smp_remap_solution_find Attempt to remap threads to */
70 /* schedule another thread */
71 /* _tx_thread_smp_schedule_list_clear Clear the thread schedule list*/
72 /* */
73 /* CALLED BY */
74 /* */
75 /* _tx_mutex_priority_change Mutex priority change */
76 /* _tx_thread_create Thread create */
77 /* _tx_thread_preemption_change Thread preemption change */
78 /* _tx_thread_priority_change Thread priority change */
79 /* _tx_thread_relinquish Thread relinquish */
80 /* _tx_thread_resume Thread resume */
81 /* _tx_thread_smp_core_exclude Thread SMP core exclude */
82 /* _tx_thread_system_resume Thread system resume */
83 /* _tx_thread_system_suspend Thread suspend */
84 /* _tx_thread_time_slice Thread time-slice */
85 /* */
86 /* RELEASE HISTORY */
87 /* */
88 /* DATE NAME DESCRIPTION */
89 /* */
90 /* 09-30-2020 William E. Lamie Initial Version 6.1 */
91 /* */
92 /**************************************************************************/
_tx_thread_smp_rebalance_execute_list(UINT core_index)93 void _tx_thread_smp_rebalance_execute_list(UINT core_index)
94 {
95
96 UINT i, j, core;
97 UINT next_priority;
98 UINT last_priority;
99 TX_THREAD *schedule_thread;
100 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
101 TX_THREAD *mapped_thread;
102 #endif
103 TX_THREAD *preempted_thread;
104 ULONG possible_cores;
105 ULONG thread_possible_cores;
106 ULONG available_cores;
107 ULONG test_possible_cores;
108 ULONG test_cores;
109 UINT this_pass_complete;
110 UINT loop_finished;
111
112 #ifdef TX_THREAD_SMP_EQUAL_PRIORITY
113 TX_THREAD *highest_priority_thread;
114 #endif
115 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
116 ULONG priority_bit;
117 #if TX_MAX_PRIORITIES > 32
118 UINT map_index;
119 #endif
120 #endif
121
122
123 /* It is assumed that the preempt disable flag is still set at this point. */
124
125 /* Pickup the last schedule thread with preemption-threshold enabled. */
126 preempted_thread = _tx_thread_preemption__threshold_scheduled;
127
128 /* Clear the schedule list. */
129 _tx_thread_smp_schedule_list_clear();
130
131 /* Initialize the next priority to 0, the highest priority. */
132 next_priority = ((UINT) 0);
133
134 /* Initialize the last priority. */
135 last_priority = ((UINT) 0);
136
137 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
138
139 /* Set the possible cores bit map to all cores. */
140 possible_cores = ((ULONG) TX_THREAD_SMP_CORE_MASK);
141 #else
142
143 /* Set the possible cores bit map to all cores. */
144 possible_cores = (((ULONG) 1) << _tx_thread_smp_max_cores) - 1;
145 #endif
146
147 /* Setup the available cores bit map. */
148 available_cores = possible_cores;
149
150 /* Clear the schedule thread pointer. */
151 schedule_thread = TX_NULL;
152
153 #ifdef TX_THREAD_SMP_EQUAL_PRIORITY
154
155 /* Set the highest priority thread to NULL. */
156 highest_priority_thread = TX_NULL;
157 #endif
158
159 /* Loop to rebuild the schedule list. */
160 i = ((UINT) 0);
161 loop_finished = TX_FALSE;
162 do
163 {
164
165 /* Clear the pass complete flag, which is used to skip the remaining processing
166 of this loop on certain conditions. */
167 this_pass_complete = TX_FALSE;
168
169 /* Determine if there is a thread to schedule. */
170 if (schedule_thread == TX_NULL)
171 {
172
173 /* Calculate the next ready priority. */
174 next_priority = _tx_thread_smp_next_priority_find(next_priority);
175
176 /* Determine if there are no more threads to execute. */
177 if (next_priority == ((UINT) TX_MAX_PRIORITIES))
178 {
179
180 /* Break out of loop. */
181 loop_finished = TX_TRUE;
182 this_pass_complete = TX_TRUE;
183 }
184 else
185 {
186
187 /* Determine if a thread was executed with preemption-threshold set. */
188 if (preempted_thread != TX_NULL)
189 {
190
191 /* Yes, a thread was previously preempted. Let's first see if we reached the
192 interrupted preemption-threshold level. */
193 if (next_priority >= preempted_thread -> tx_thread_preempt_threshold)
194 {
195
196 /* Yes, now lets see if we are within the preemption-threshold level. */
197 if (next_priority <= preempted_thread -> tx_thread_priority)
198 {
199
200 /* Yes, move the next priority to the preempted priority. */
201 next_priority = preempted_thread -> tx_thread_priority;
202
203 /* Setup the schedule thread to the preempted thread. */
204 schedule_thread = preempted_thread;
205
206 /* Start at the top of the loop. */
207 this_pass_complete = TX_TRUE;
208 }
209 else
210 {
211
212 /* Nothing else is allowed to execute after the preemption-threshold thread. */
213 next_priority = ((UINT) TX_MAX_PRIORITIES);
214
215 /* Break out of loop. */
216 loop_finished = TX_TRUE;
217 this_pass_complete = TX_TRUE;
218 }
219 }
220 }
221 }
222
223 /* Determine if this pass through the loop is already complete. */
224 if (this_pass_complete == TX_FALSE)
225 {
226
227 /* Pickup the next thread to schedule. */
228 schedule_thread = _tx_thread_priority_list[next_priority];
229 }
230 }
231
232 /* Determine if this pass through the loop is already complete. */
233 if (this_pass_complete == TX_FALSE)
234 {
235
236 /* Determine what the possible cores are for this thread. */
237 thread_possible_cores = schedule_thread -> tx_thread_smp_cores_allowed;
238
239 /* Apply the current possible cores. */
240 thread_possible_cores = thread_possible_cores & (available_cores | possible_cores);
241
242 /* Determine if it is possible to schedule this thread. */
243 if (thread_possible_cores == ((ULONG) 0))
244 {
245
246 /* No, this thread can't be scheduled. */
247
248 /* Look at the next thread at the same priority level. */
249 schedule_thread = schedule_thread -> tx_thread_ready_next;
250
251 /* Determine if this is the head of the list. */
252 if (schedule_thread == _tx_thread_priority_list[next_priority])
253 {
254
255 /* Set the schedule thread to NULL to force examination of the next priority level. */
256 schedule_thread = TX_NULL;
257
258 /* Move to the next priority level. */
259 next_priority++;
260
261 /* Determine if there are no more threads to execute. */
262 if (next_priority == ((UINT) TX_MAX_PRIORITIES))
263 {
264
265 /* Break out of loop. */
266 loop_finished = TX_TRUE;
267 }
268 }
269 }
270 else
271 {
272
273 /* It is possible to schedule this thread. */
274
275 /* Determine if this thread has preemption-threshold set. */
276 if (schedule_thread -> tx_thread_preempt_threshold < schedule_thread -> tx_thread_priority)
277 {
278
279 /* Yes, preemption-threshold is set. */
280
281 /* Determine if the last priority is above the preemption-threshold. If not, we can't
282 schedule this thread with preemption-threshold set. */
283 if ((last_priority >= schedule_thread -> tx_thread_preempt_threshold) && (i != ((UINT) 0)))
284 {
285
286 /* A thread was found that violates the next thread to be scheduled's preemption-threshold. We will simply
287 skip this thread and see if there is anything else we can schedule. */
288
289 /* Look at the next thread at the same priority level. */
290 schedule_thread = schedule_thread -> tx_thread_ready_next;
291
292 /* Determine if this is the head of the list. */
293 if (schedule_thread == _tx_thread_priority_list[next_priority])
294 {
295
296 /* Set the schedule thread to NULL to force examination of the next priority level. */
297 schedule_thread = TX_NULL;
298
299 /* Move to the next priority level. */
300 next_priority++;
301
302 /* Determine if there are no more threads to execute. */
303 if (next_priority == ((UINT) TX_MAX_PRIORITIES))
304 {
305
306 /* Break out of loop. */
307 loop_finished = TX_TRUE;
308 }
309 }
310
311 /* Restart the loop. */
312 this_pass_complete = TX_TRUE;
313 }
314 }
315
316 /* Determine if this pass through the loop is already complete. */
317 if (this_pass_complete == TX_FALSE)
318 {
319
320 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
321
322 /* Initialize index to an invalid value. */
323 j = ((UINT) TX_THREAD_SMP_MAX_CORES);
324 #endif
325
326 /* Determine if there is an available core for this thread to execute on. */
327 if ((thread_possible_cores & available_cores) != ((ULONG) 0))
328 {
329
330 /* Pickup the last executed core for this thread. */
331 j = schedule_thread -> tx_thread_smp_core_mapped;
332
333 /* Is this core valid and available? */
334 if ((thread_possible_cores & available_cores & (((ULONG) 1) << j)) == ((ULONG) 0))
335 {
336
337 /* No, we must find the next core for this thread. */
338 test_cores = (thread_possible_cores & available_cores);
339 TX_LOWEST_SET_BIT_CALCULATE(test_cores, j)
340
341 /* Setup the last executed core for this thread. */
342 schedule_thread -> tx_thread_smp_core_mapped = j;
343 }
344
345 /* Place the this thread on this core. */
346 _tx_thread_smp_schedule_list[j] = schedule_thread;
347
348 /* Clear the associated available cores bit. */
349 available_cores = available_cores & ~(((ULONG) 1) << j);
350 }
351 else
352 {
353
354 /* Note that we know that the thread must have at least one core excluded at this point,
355 since we didn't find a match and we have available cores. */
356
357 /* Now we need to see if one of the other threads in the non-excluded cores can be moved to make room
358 for this thread. */
359
360 /* Determine the possible core remapping attempt. */
361 test_possible_cores = possible_cores & ~(thread_possible_cores);
362
363 /* Attempt to remap the cores in order to schedule this thread. */
364 core = _tx_thread_smp_remap_solution_find(schedule_thread, available_cores, thread_possible_cores, test_possible_cores);
365
366 /* Determine if remapping was successful. */
367 if (core != ((UINT) TX_THREAD_SMP_MAX_CORES))
368 {
369
370 /* Yes, remapping was successful. Update the available cores accordingly. */
371 available_cores = available_cores & ~(((ULONG) 1) << core);
372 }
373 else
374 {
375
376 /* We couldn't assign the thread to any of the cores possible for the thread. */
377
378 /* Check to see if the thread is the last thread preempted. */
379 if (schedule_thread == preempted_thread)
380 {
381
382 /* To honor the preemption-threshold, we cannot schedule any more threads. */
383 loop_finished = TX_TRUE;
384 }
385 else
386 {
387
388 /* update the available cores for the next pass so we don't waste time looking at them again! */
389 possible_cores = possible_cores & (~thread_possible_cores);
390
391 /* No, we couldn't load the thread because none of the required cores were available. Look at the next thread at the same priority level. */
392 schedule_thread = schedule_thread -> tx_thread_ready_next;
393
394 /* Determine if this is the head of the list. */
395 if (schedule_thread == _tx_thread_priority_list[next_priority])
396 {
397
398 /* Set the schedule thread to NULL to force examination of the next priority level. */
399 schedule_thread = TX_NULL;
400
401 /* Move to the next priority level. */
402 next_priority++;
403
404 /* Determine if there are no more threads to execute. */
405 if (next_priority == ((UINT) TX_MAX_PRIORITIES))
406 {
407
408 /* Break out of loop. */
409 loop_finished = TX_TRUE;
410 }
411 }
412 }
413
414 /* Restart the loop. */
415 this_pass_complete = TX_TRUE;
416 }
417 }
418
419 /* Determine if this pass through the loop is already complete. */
420 if (this_pass_complete == TX_FALSE)
421 {
422
423 #ifdef TX_THREAD_SMP_EQUAL_PRIORITY
424
425 /* Determine if this is the highest priority thread. */
426 if (highest_priority_thread == TX_NULL)
427 {
428
429 /* No highest priority yet, remember this thread. */
430 highest_priority_thread = schedule_thread;
431 }
432 #endif
433
434 /* Increment the number of threads loaded. */
435 i++;
436
437 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
438
439 /* Determine if the thread was mapped. */
440 if (j != ((UINT) TX_THREAD_SMP_MAX_CORES))
441 {
442
443 /* Pickup the currently mapped thread. */
444 mapped_thread = _tx_thread_execute_ptr[j];
445
446 /* Determine if preemption is present. */
447 if ((mapped_thread != TX_NULL) && (schedule_thread != mapped_thread))
448 {
449
450 /* Determine if the previously mapped thread is still ready. */
451 if (mapped_thread -> tx_thread_state == TX_READY)
452 {
453
454 /* Determine if the caller is an interrupt or from a thread. */
455 if (_tx_thread_system_state[core_index] == ((ULONG) 0))
456 {
457
458 /* Caller is a thread, so this is a solicited preemption. */
459 _tx_thread_performance_solicited_preemption_count++;
460
461 /* Increment the thread's solicited preemption counter. */
462 mapped_thread -> tx_thread_performance_solicited_preemption_count++;
463 }
464 else
465 {
466
467 /* Is this an interrupt? */
468 if (_tx_thread_system_state[core_index] < TX_INITIALIZE_IN_PROGRESS)
469 {
470
471 /* Caller is an interrupt, so this is an interrupt preemption. */
472 _tx_thread_performance_interrupt_preemption_count++;
473
474 /* Increment the thread's interrupt preemption counter. */
475 mapped_thread -> tx_thread_performance_interrupt_preemption_count++;
476 }
477 }
478 }
479 }
480 }
481 #endif
482
483 /* Determine if this thread has preemption-threshold set. */
484 if (schedule_thread -> tx_thread_preempt_threshold < schedule_thread -> tx_thread_priority)
485 {
486
487 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
488
489 /* mark the bit map to show that a thread with preemption-threshold has been executed. */
490 #if TX_MAX_PRIORITIES > 32
491
492 /* Calculate the index into the bit map array. */
493 map_index = (schedule_thread -> tx_thread_priority)/((UINT) 32);
494
495 /* Set the active bit to remember that the preempt map has something set. */
496 TX_DIV32_BIT_SET(schedule_thread -> tx_thread_priority, priority_bit)
497 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active | priority_bit;
498 #endif
499
500 /* Remember that this thread was executed with preemption-threshold set. */
501 TX_MOD32_BIT_SET(schedule_thread -> tx_thread_priority, priority_bit)
502 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] | priority_bit;
503
504 /* Place the thread in the preempted list indicating preemption-threshold is in force. */
505 _tx_thread_preemption_threshold_list[schedule_thread -> tx_thread_priority] = schedule_thread;
506 #endif
507
508 /* Set the last thread with preemption-threshold enabled. */
509 _tx_thread_preemption__threshold_scheduled = schedule_thread;
510
511 /* Now break out of the scheduling loop. */
512 loop_finished = TX_TRUE;
513 }
514 else
515 {
516
517 /* Remember the last priority. */
518 last_priority = next_priority;
519
520 /* Pickup the next ready thread at the current priority level. */
521 schedule_thread = schedule_thread -> tx_thread_ready_next;
522
523 /* Determine if this is the head of the list, which implies that we have exhausted this priority level. */
524 if (schedule_thread == _tx_thread_priority_list[next_priority])
525 {
526
527 /* Set the schedule thread to NULL to force examination of the next priority level. */
528 schedule_thread = TX_NULL;
529
530 /* Move to the next priority level. */
531 next_priority++;
532
533 #ifdef TX_THREAD_SMP_EQUAL_PRIORITY
534
535 /* Determine if there is a highest priority thread. */
536 if (highest_priority_thread)
537 {
538
539 /* Yes, break out of the loop, since only same priority threads can be
540 scheduled in this mode. */
541 loop_finished = TX_TRUE;
542 }
543 #endif
544
545 /* Determine if there are no more threads to execute. */
546 if (next_priority == ((UINT) TX_MAX_PRIORITIES))
547 {
548
549 /* Break out of loop. */
550 loop_finished = TX_TRUE;
551 }
552 }
553 }
554 }
555 }
556 }
557 }
558
559 /* Determine if the loop is finished. */
560 if (loop_finished == TX_TRUE)
561 {
562
563 /* Finished, break the loop. */
564 break;
565 }
566
567 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
568
569 } while (i < ((UINT) TX_THREAD_SMP_MAX_CORES));
570 #else
571
572 } while (i < _tx_thread_smp_max_cores);
573 #endif
574
575 /* Clear the execute list. */
576 _tx_thread_smp_execute_list_clear();
577
578 /* Setup the execute list based on the updated schedule list. */
579 _tx_thread_smp_execute_list_setup(core_index);
580 }
581
582