1 /***************************************************************************
2  * Copyright (c) 2024 Microsoft Corporation
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the MIT License which is available at
6  * https://opensource.org/licenses/MIT.
7  *
8  * SPDX-License-Identifier: MIT
9  **************************************************************************/
10 
11 
12 /**************************************************************************/
13 /**************************************************************************/
14 /**                                                                       */
15 /** ThreadX Component                                                     */
16 /**                                                                       */
17 /**   Thread                                                              */
18 /**                                                                       */
19 /**************************************************************************/
20 /**************************************************************************/
21 
22 #define TX_SOURCE_CODE
23 #define TX_THREAD_SMP_SOURCE_CODE
24 
25 
26 /* Include necessary system files.  */
27 
28 #include "tx_api.h"
29 #include "tx_thread.h"
30 
31 
32 #ifdef TX_DISABLE_INLINE
33 
34 /* Define the routine to calculate the lowest set bit.  */
35 
_tx_thread_lowest_set_bit_calculate(ULONG map)36 UINT  _tx_thread_lowest_set_bit_calculate(ULONG map)
37 {
38 UINT    bit_set;
39 
40     if ((map & ((ULONG) 0x1)) != ((ULONG) 0))
41     {
42         bit_set = ((UINT) 0);
43     }
44     else
45     {
46         map =  map & (ULONG) ((~map) + ((ULONG) 1));
47         if (map < ((ULONG) 0x100))
48         {
49             bit_set = ((UINT) 1);
50         }
51         else if (map < ((ULONG) 0x10000))
52         {
53             bit_set =  ((UINT) 9);
54             map =  map >> ((UINT) 8);
55         }
56         else if (map < ((ULONG) 0x01000000))
57         {
58             bit_set = ((UINT) 17);
59             map = map >> ((UINT) 16);
60         }
61         else
62         {
63             bit_set = ((UINT) 25);
64             map = map >> ((UINT) 24);
65         }
66         if (map >= ((ULONG) 0x10))
67         {
68             map = map >> ((UINT) 4);
69             bit_set = bit_set + ((UINT) 4);
70         }
71         if (map >= ((ULONG) 0x4))
72         {
73             map = map >> ((UINT) 2);
74             bit_set = bit_set + ((UINT) 2);
75         }
76         bit_set = bit_set - (UINT) (map & (ULONG) 0x1);
77     }
78 
79     return(bit_set);
80 }
81 
82 
83 /* Define the next priority macro. Note, that this may be overridden
84    by a port specific definition.  */
85 
86 #if TX_MAX_PRIORITIES > 32
87 
_tx_thread_smp_next_priority_find(UINT priority)88 UINT _tx_thread_smp_next_priority_find(UINT priority)
89 {
90 ULONG           map_index;
91 ULONG           local_priority_map_active;
92 ULONG           local_priority_map;
93 ULONG           priority_bit;
94 ULONG           first_bit_set;
95 ULONG           found_priority;
96 
97     found_priority =  ((UINT) TX_MAX_PRIORITIES);
98     if (priority < ((UINT) TX_MAX_PRIORITIES))
99     {
100         map_index =  priority/((UINT) 32);
101         local_priority_map =  _tx_thread_priority_maps[map_index];
102         priority_bit =        (((ULONG) 1) << (priority % ((UINT) 32)));
103         local_priority_map =  local_priority_map & ~(priority_bit - ((UINT)1));
104         if (local_priority_map != ((ULONG) 0))
105         {
106             TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
107             found_priority =  (map_index * ((UINT) 32)) + first_bit_set;
108         }
109         else
110         {
111             /* Move to next map index.  */
112             map_index++;
113             if (map_index < (((UINT) TX_MAX_PRIORITIES)/((UINT) 32)))
114             {
115                 priority_bit =               (((ULONG) 1) << (map_index));
116                 local_priority_map_active =  _tx_thread_priority_map_active & ~(priority_bit - ((UINT) 1));
117                 if (local_priority_map_active != ((ULONG) 0))
118                 {
119                     TX_LOWEST_SET_BIT_CALCULATE(local_priority_map_active, map_index)
120                     local_priority_map =  _tx_thread_priority_maps[map_index];
121                     TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
122                     found_priority =  (map_index * ((UINT) 32)) + first_bit_set;
123                 }
124             }
125         }
126     }
127     return(found_priority);
128 }
129 #else
130 
_tx_thread_smp_next_priority_find(UINT priority)131 UINT _tx_thread_smp_next_priority_find(UINT priority)
132 {
133 UINT            first_bit_set;
134 ULONG           local_priority_map;
135 UINT            next_priority;
136 
137     local_priority_map =  _tx_thread_priority_maps[0];
138     local_priority_map =  local_priority_map >> priority;
139     next_priority =  priority;
140     if (local_priority_map == ((ULONG) 0))
141     {
142         next_priority =  ((UINT) TX_MAX_PRIORITIES);
143     }
144     else
145     {
146         if (next_priority >= ((UINT) TX_MAX_PRIORITIES))
147         {
148             next_priority =  ((UINT) TX_MAX_PRIORITIES);
149         }
150         else
151         {
152             TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
153             next_priority =  priority + first_bit_set;
154         }
155     }
156 
157     return(next_priority);
158 }
159 #endif
160 
161 
_tx_thread_smp_schedule_list_clear(void)162 void  _tx_thread_smp_schedule_list_clear(void)
163 {
164 #if TX_THREAD_SMP_MAX_CORES > 6
165 UINT    i;
166 #endif
167 
168 
169     /* Clear the schedule list.  */
170     _tx_thread_smp_schedule_list[0] =  TX_NULL;
171 #if TX_THREAD_SMP_MAX_CORES > 1
172     _tx_thread_smp_schedule_list[1] =  TX_NULL;
173 #if TX_THREAD_SMP_MAX_CORES > 2
174     _tx_thread_smp_schedule_list[2] =  TX_NULL;
175 #if TX_THREAD_SMP_MAX_CORES > 3
176     _tx_thread_smp_schedule_list[3] =  TX_NULL;
177 #if TX_THREAD_SMP_MAX_CORES > 4
178     _tx_thread_smp_schedule_list[4] =  TX_NULL;
179 #if TX_THREAD_SMP_MAX_CORES > 5
180     _tx_thread_smp_schedule_list[5] =  TX_NULL;
181 #if TX_THREAD_SMP_MAX_CORES > 6
182 
183     /* Loop to clear the remainder of the schedule list.  */
184     i =  ((UINT) 6);
185 
186 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
187 
188     while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
189 #else
190 
191     while (i < _tx_thread_smp_max_cores)
192 #endif
193     {
194         /* Clear entry in schedule list.  */
195         _tx_thread_smp_schedule_list[i] =  TX_NULL;
196 
197         /* Move to next index.  */
198         i++;
199     }
200 #endif
201 #endif
202 #endif
203 #endif
204 #endif
205 #endif
206 }
207 
_tx_thread_smp_execute_list_clear(void)208 VOID  _tx_thread_smp_execute_list_clear(void)
209 {
210 #if TX_THREAD_SMP_MAX_CORES > 6
211 UINT    j;
212 #endif
213 
214     /* Clear the execute list.  */
215     _tx_thread_execute_ptr[0] =  TX_NULL;
216 #if TX_THREAD_SMP_MAX_CORES > 1
217     _tx_thread_execute_ptr[1] =  TX_NULL;
218 #if TX_THREAD_SMP_MAX_CORES > 2
219     _tx_thread_execute_ptr[2] =  TX_NULL;
220 #if TX_THREAD_SMP_MAX_CORES > 3
221     _tx_thread_execute_ptr[3] =  TX_NULL;
222 #if TX_THREAD_SMP_MAX_CORES > 4
223     _tx_thread_execute_ptr[4] =  TX_NULL;
224 #if TX_THREAD_SMP_MAX_CORES > 5
225     _tx_thread_execute_ptr[5] =  TX_NULL;
226 #if TX_THREAD_SMP_MAX_CORES > 6
227 
228     /* Loop to clear the remainder of the execute list.  */
229     j =  ((UINT) 6);
230 
231 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
232 
233     while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
234 #else
235 
236     while (j < _tx_thread_smp_max_cores)
237 #endif
238     {
239 
240         /* Clear entry in execute list.  */
241         _tx_thread_execute_ptr[j] =  TX_NULL;
242 
243         /* Move to next index.  */
244         j++;
245     }
246 #endif
247 #endif
248 #endif
249 #endif
250 #endif
251 #endif
252 }
253 
254 
_tx_thread_smp_schedule_list_setup(void)255 VOID  _tx_thread_smp_schedule_list_setup(void)
256 {
257 #if TX_THREAD_SMP_MAX_CORES > 6
258 UINT    j;
259 #endif
260 
261     _tx_thread_smp_schedule_list[0] =  _tx_thread_execute_ptr[0];
262 #if TX_THREAD_SMP_MAX_CORES > 1
263     _tx_thread_smp_schedule_list[1] =  _tx_thread_execute_ptr[1];
264 #if TX_THREAD_SMP_MAX_CORES > 2
265     _tx_thread_smp_schedule_list[2] =  _tx_thread_execute_ptr[2];
266 #if TX_THREAD_SMP_MAX_CORES > 3
267     _tx_thread_smp_schedule_list[3] =  _tx_thread_execute_ptr[3];
268 #if TX_THREAD_SMP_MAX_CORES > 4
269     _tx_thread_smp_schedule_list[4] =  _tx_thread_execute_ptr[4];
270 #if TX_THREAD_SMP_MAX_CORES > 5
271     _tx_thread_smp_schedule_list[5] =  _tx_thread_execute_ptr[5];
272 #if TX_THREAD_SMP_MAX_CORES > 6
273 
274     /* Loop to setup the remainder of the schedule list.  */
275     j =  ((UINT) 6);
276 
277 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
278     while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
279 #else
280 
281     while (j < _tx_thread_smp_max_cores)
282 #endif
283     {
284 
285         /* Setup entry in schedule list.  */
286         _tx_thread_smp_schedule_list[j] =  _tx_thread_execute_ptr[j];
287 
288         /* Move to next index.  */
289         j++;
290     }
291 #endif
292 #endif
293 #endif
294 #endif
295 #endif
296 #endif
297 }
298 
299 
300 #ifdef TX_THREAD_SMP_INTER_CORE_INTERRUPT
_tx_thread_smp_core_interrupt(TX_THREAD * thread_ptr,UINT current_core,UINT target_core)301 VOID  _tx_thread_smp_core_interrupt(TX_THREAD *thread_ptr, UINT current_core, UINT target_core)
302 {
303 
304 TX_THREAD   *current_thread;
305 
306 
307     /* Make sure this is a different core, since there is no need to interrupt the current core for
308        a scheduling change.  */
309     if (current_core != target_core)
310     {
311 
312         /* Yes, a different core is present.  */
313 
314         /* Pickup the currently executing thread.  */
315         current_thread =  _tx_thread_current_ptr[target_core];
316 
317         /* Determine if they are the same.  */
318         if ((current_thread != TX_NULL) && (thread_ptr != current_thread))
319         {
320 
321             /* Not the same and not NULL... determine if the core is running at thread level.  */
322             if (_tx_thread_system_state[target_core] < TX_INITIALIZE_IN_PROGRESS)
323             {
324 
325                 /* Preempt the mapped thread.  */
326                 _tx_thread_smp_core_preempt(target_core);
327             }
328         }
329     }
330 }
331 #endif
332 
333 
334 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
_tx_thread_smp_core_wakeup(UINT current_core,UINT target_core)335 VOID  _tx_thread_smp_core_wakeup(UINT current_core, UINT target_core)
336 {
337 
338     /* Determine if the core specified is not the current core - no need to wakeup the
339        current core.  */
340     if (target_core != current_core)
341     {
342 
343         /* Wakeup based on application's macro.  */
344         TX_THREAD_SMP_WAKEUP(target_core);
345     }
346 }
347 #endif
348 
349 
_tx_thread_smp_execute_list_setup(UINT core_index)350 VOID  _tx_thread_smp_execute_list_setup(UINT core_index)
351 {
352 
353 TX_THREAD   *schedule_thread;
354 UINT        i;
355 
356 
357     /* Loop to copy the schedule list into the execution list.  */
358     i =  ((UINT) 0);
359 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
360 
361     while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
362 #else
363 
364     while (i < _tx_thread_smp_max_cores)
365 #endif
366     {
367 
368         /* Pickup the thread to schedule.  */
369         schedule_thread =  _tx_thread_smp_schedule_list[i];
370 
371         /* Copy the schedule list into the execution list.  */
372         _tx_thread_execute_ptr[i] =  schedule_thread;
373 
374         /* If necessary, interrupt the core with the new thread to schedule.  */
375         _tx_thread_smp_core_interrupt(schedule_thread, core_index, i);
376 
377 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
378 
379         /* Does this need to be waked up?  */
380         if ((i != core_index) && (schedule_thread != TX_NULL))
381         {
382 
383             /* Wakeup based on application's macro.  */
384             TX_THREAD_SMP_WAKEUP(i);
385         }
386 #endif
387         /* Move to next index.  */
388         i++;
389     }
390 }
391 
392 
_tx_thread_smp_available_cores_get(void)393 ULONG  _tx_thread_smp_available_cores_get(void)
394 {
395 
396 #if TX_THREAD_SMP_MAX_CORES > 6
397 UINT    j;
398 #endif
399 ULONG   available_cores;
400 
401     available_cores =  ((ULONG) 0);
402     if (_tx_thread_execute_ptr[0] == TX_NULL)
403     {
404         available_cores =  ((ULONG) 1);
405     }
406 #if TX_THREAD_SMP_MAX_CORES > 1
407     if (_tx_thread_execute_ptr[1] == TX_NULL)
408     {
409         available_cores =  available_cores | ((ULONG) 2);
410     }
411 #if TX_THREAD_SMP_MAX_CORES > 2
412     if (_tx_thread_execute_ptr[2] == TX_NULL)
413     {
414         available_cores =  available_cores | ((ULONG) 4);
415     }
416 #if TX_THREAD_SMP_MAX_CORES > 3
417     if (_tx_thread_execute_ptr[3] == TX_NULL)
418     {
419         available_cores =  available_cores | ((ULONG) 8);
420     }
421 #if TX_THREAD_SMP_MAX_CORES > 4
422     if (_tx_thread_execute_ptr[4] == TX_NULL)
423     {
424         available_cores =  available_cores | ((ULONG) 0x10);
425     }
426 #if TX_THREAD_SMP_MAX_CORES > 5
427     if (_tx_thread_execute_ptr[5] == TX_NULL)
428     {
429         available_cores =  available_cores | ((ULONG) 0x20);
430     }
431 #if TX_THREAD_SMP_MAX_CORES > 6
432 
433     /* Loop to setup the remainder of the schedule list.  */
434     j =  ((UINT) 6);
435 
436 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
437     while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
438 #else
439 
440     while (j < _tx_thread_smp_max_cores)
441 #endif
442     {
443 
444         /* Determine if this core is available.  */
445         if (_tx_thread_execute_ptr[j] == TX_NULL)
446         {
447             available_cores =  available_cores | (((ULONG) 1) << j);
448         }
449 
450         /* Move to next core.  */
451         j++;
452     }
453 #endif
454 #endif
455 #endif
456 #endif
457 #endif
458 #endif
459     return(available_cores);
460 }
461 
462 
_tx_thread_smp_possible_cores_get(void)463 ULONG  _tx_thread_smp_possible_cores_get(void)
464 {
465 
466 #if TX_THREAD_SMP_MAX_CORES > 6
467 UINT    j;
468 #endif
469 ULONG       possible_cores;
470 TX_THREAD   *thread_ptr;
471 
472     possible_cores =  ((ULONG) 0);
473     thread_ptr =  _tx_thread_execute_ptr[0];
474     if (thread_ptr != TX_NULL)
475     {
476         possible_cores =  thread_ptr -> tx_thread_smp_cores_allowed;
477     }
478 #if TX_THREAD_SMP_MAX_CORES > 1
479     thread_ptr =  _tx_thread_execute_ptr[1];
480     if (thread_ptr != TX_NULL)
481     {
482         possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
483     }
484 #if TX_THREAD_SMP_MAX_CORES > 2
485     thread_ptr =  _tx_thread_execute_ptr[2];
486     if (thread_ptr != TX_NULL)
487     {
488         possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
489     }
490 #if TX_THREAD_SMP_MAX_CORES > 3
491     thread_ptr =  _tx_thread_execute_ptr[3];
492     if (thread_ptr != TX_NULL)
493     {
494         possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
495     }
496 #if TX_THREAD_SMP_MAX_CORES > 4
497     thread_ptr =  _tx_thread_execute_ptr[4];
498     if (thread_ptr != TX_NULL)
499     {
500         possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
501     }
502 #if TX_THREAD_SMP_MAX_CORES > 5
503     thread_ptr =  _tx_thread_execute_ptr[5];
504     if (thread_ptr != TX_NULL)
505     {
506         possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
507     }
508 #if TX_THREAD_SMP_MAX_CORES > 6
509 
510     /* Loop to setup the remainder of the schedule list.  */
511     j =  ((UINT) 6);
512 
513 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
514     while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
515 #else
516 
517     while (j < _tx_thread_smp_max_cores)
518 #endif
519     {
520 
521         /* Determine if this core is available.  */
522         thread_ptr =  _tx_thread_execute_ptr[j];
523         if (thread_ptr != TX_NULL)
524         {
525             possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
526         }
527 
528         /* Move to next core.  */
529         j++;
530     }
531 #endif
532 #endif
533 #endif
534 #endif
535 #endif
536 #endif
537     return(possible_cores);
538 }
539 
540 
_tx_thread_smp_lowest_priority_get(void)541 UINT  _tx_thread_smp_lowest_priority_get(void)
542 {
543 
544 #if TX_THREAD_SMP_MAX_CORES > 6
545 UINT    j;
546 #endif
547 TX_THREAD   *thread_ptr;
548 UINT        lowest_priority;
549 
550     lowest_priority =  ((UINT) 0);
551     thread_ptr =  _tx_thread_execute_ptr[0];
552     if (thread_ptr != TX_NULL)
553     {
554         if (thread_ptr -> tx_thread_priority > lowest_priority)
555         {
556             lowest_priority =  thread_ptr -> tx_thread_priority;
557         }
558     }
559 #if TX_THREAD_SMP_MAX_CORES > 1
560     thread_ptr =  _tx_thread_execute_ptr[1];
561     if (thread_ptr != TX_NULL)
562     {
563         if (thread_ptr -> tx_thread_priority > lowest_priority)
564         {
565             lowest_priority =  thread_ptr -> tx_thread_priority;
566         }
567     }
568 #if TX_THREAD_SMP_MAX_CORES > 2
569     thread_ptr =  _tx_thread_execute_ptr[2];
570     if (thread_ptr != TX_NULL)
571     {
572         if (thread_ptr -> tx_thread_priority > lowest_priority)
573         {
574             lowest_priority =  thread_ptr -> tx_thread_priority;
575         }
576     }
577 #if TX_THREAD_SMP_MAX_CORES > 3
578     thread_ptr =  _tx_thread_execute_ptr[3];
579     if (thread_ptr != TX_NULL)
580     {
581         if (thread_ptr -> tx_thread_priority > lowest_priority)
582         {
583             lowest_priority =  thread_ptr -> tx_thread_priority;
584         }
585     }
586 #if TX_THREAD_SMP_MAX_CORES > 4
587     thread_ptr =  _tx_thread_execute_ptr[4];
588     if (thread_ptr != TX_NULL)
589     {
590         if (thread_ptr -> tx_thread_priority > lowest_priority)
591         {
592             lowest_priority =  thread_ptr -> tx_thread_priority;
593         }
594     }
595 #if TX_THREAD_SMP_MAX_CORES > 5
596     thread_ptr =  _tx_thread_execute_ptr[5];
597     if (thread_ptr != TX_NULL)
598     {
599         if (thread_ptr -> tx_thread_priority > lowest_priority)
600         {
601             lowest_priority =  thread_ptr -> tx_thread_priority;
602         }
603     }
604 #if TX_THREAD_SMP_MAX_CORES > 6
605 
606     /* Loop to setup the remainder of the schedule list.  */
607     j =  ((UINT) 6);
608 
609 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
610     while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
611 #else
612 
613     while (j < _tx_thread_smp_max_cores)
614 #endif
615     {
616 
617         /* Determine if this core has a thread scheduled.  */
618         thread_ptr =  _tx_thread_execute_ptr[j];
619         if (thread_ptr != TX_NULL)
620         {
621 
622             /* Is this the new lowest priority?  */
623             if (thread_ptr -> tx_thread_priority > lowest_priority)
624             {
625                 lowest_priority =  thread_ptr -> tx_thread_priority;
626             }
627         }
628 
629         /* Move to next core.  */
630         j++;
631     }
632 #endif
633 #endif
634 #endif
635 #endif
636 #endif
637 #endif
638     return(lowest_priority);
639 }
640 
641 
_tx_thread_smp_remap_solution_find(TX_THREAD * schedule_thread,ULONG available_cores,ULONG thread_possible_cores,ULONG test_possible_cores)642 UINT  _tx_thread_smp_remap_solution_find(TX_THREAD *schedule_thread, ULONG available_cores, ULONG thread_possible_cores, ULONG test_possible_cores)
643 {
644 
645 UINT            core;
646 UINT            previous_core;
647 ULONG           test_cores;
648 ULONG           last_thread_cores;
649 UINT            queue_first, queue_last;
650 UINT            core_queue[TX_THREAD_SMP_MAX_CORES-1];
651 TX_THREAD       *thread_ptr;
652 TX_THREAD       *last_thread;
653 TX_THREAD       *thread_remap_list[TX_THREAD_SMP_MAX_CORES];
654 
655 
656     /* Clear the last thread cores in the search.  */
657     last_thread_cores =  ((ULONG) 0);
658 
659     /* Set the last thread pointer to NULL.  */
660     last_thread =  TX_NULL;
661 
662     /* Setup the core queue indices.  */
663     queue_first =  ((UINT) 0);
664     queue_last =   ((UINT) 0);
665 
666     /* Build a list of possible cores for this thread to execute on, starting
667        with the previously mapped core.  */
668     core =  schedule_thread -> tx_thread_smp_core_mapped;
669     if ((thread_possible_cores & (((ULONG) 1) << core)) != ((ULONG) 0))
670     {
671 
672         /* Remember this potential mapping.  */
673         thread_remap_list[core] =   schedule_thread;
674         core_queue[queue_last] =    core;
675 
676         /* Move to next slot.  */
677         queue_last++;
678 
679         /* Clear this core.  */
680         thread_possible_cores =  thread_possible_cores & ~(((ULONG) 1) << core);
681     }
682 
683     /* Loop to add additional possible cores.  */
684     while (thread_possible_cores != ((ULONG) 0))
685     {
686 
687         /* Determine the first possible core.  */
688         test_cores =  thread_possible_cores;
689         TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
690 
691         /* Clear this core.  */
692         thread_possible_cores =  thread_possible_cores & ~(((ULONG) 1) << core);
693 
694         /* Remember this potential mapping.  */
695         thread_remap_list[core] =  schedule_thread;
696         core_queue[queue_last] =   core;
697 
698         /* Move to next slot.  */
699         queue_last++;
700     }
701 
702     /* Loop to evaluate the potential thread mappings, against what is already mapped.  */
703     do
704     {
705 
706         /* Pickup the next entry.  */
707         core = core_queue[queue_first];
708 
709         /* Move to next slot.  */
710         queue_first++;
711 
712         /* Retrieve the thread from the current mapping.  */
713         thread_ptr =  _tx_thread_smp_schedule_list[core];
714 
715         /* Determine if there is a thread currently mapped to this core.  */
716         if (thread_ptr != TX_NULL)
717         {
718 
719             /* Determine the cores available for this thread.  */
720             thread_possible_cores =  thread_ptr -> tx_thread_smp_cores_allowed;
721             thread_possible_cores =  test_possible_cores & thread_possible_cores;
722 
723             /* Are there any possible cores for this thread?  */
724             if (thread_possible_cores != ((ULONG) 0))
725             {
726 
727                 /* Determine if there are cores available for this thread.  */
728                 if ((thread_possible_cores & available_cores) != ((ULONG) 0))
729                 {
730 
731                     /* Yes, remember the final thread and cores that are valid for this thread.  */
732                     last_thread_cores =  thread_possible_cores & available_cores;
733                     last_thread =        thread_ptr;
734 
735                     /* We are done - get out of the loop!  */
736                     break;
737                 }
738                 else
739                 {
740 
741                     /* Remove cores that will be added to the list.  */
742                     test_possible_cores =  test_possible_cores & ~(thread_possible_cores);
743 
744                     /* Loop to add this thread to the potential mapping list.  */
745                     do
746                     {
747 
748                         /* Calculate the core.  */
749                         test_cores =  thread_possible_cores;
750                         TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
751 
752                         /* Clear this core.  */
753                         thread_possible_cores =  thread_possible_cores & ~(((ULONG) 1) << core);
754 
755                         /* Remember this thread for remapping.  */
756                         thread_remap_list[core] =  thread_ptr;
757 
758                         /* Remember this core.  */
759                         core_queue[queue_last] =  core;
760 
761                         /* Move to next slot.  */
762                         queue_last++;
763 
764                     } while (thread_possible_cores != ((ULONG) 0));
765                 }
766             }
767         }
768     } while (queue_first != queue_last);
769 
770     /* Was a remapping solution found?  */
771     if (last_thread != TX_NULL)
772     {
773 
774         /* Pickup the core of the last thread to remap.  */
775         core =  last_thread -> tx_thread_smp_core_mapped;
776 
777         /* Pickup the thread from the remapping list.  */
778         thread_ptr =  thread_remap_list[core];
779 
780         /* Loop until we arrive at the thread we have been trying to map.  */
781         while (thread_ptr != schedule_thread)
782         {
783 
784             /* Move this thread in the schedule list.  */
785             _tx_thread_smp_schedule_list[core] =  thread_ptr;
786 
787             /* Remember the previous core.  */
788             previous_core =  core;
789 
790             /* Pickup the core of thread to remap.  */
791             core =  thread_ptr -> tx_thread_smp_core_mapped;
792 
793             /* Save the new core mapping for this thread.  */
794             thread_ptr -> tx_thread_smp_core_mapped =  previous_core;
795 
796             /* Move the next thread.  */
797             thread_ptr =  thread_remap_list[core];
798         }
799 
800         /* Save the remaining thread in the updated schedule list.  */
801         _tx_thread_smp_schedule_list[core] =  thread_ptr;
802 
803         /* Update this thread's core mapping.  */
804         thread_ptr -> tx_thread_smp_core_mapped =  core;
805 
806         /* Finally, setup the last thread in the remapping solution.  */
807         test_cores =  last_thread_cores;
808         TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
809 
810         /* Setup the last thread.  */
811         _tx_thread_smp_schedule_list[core] =     last_thread;
812 
813         /* Remember the core mapping for this thread.  */
814         last_thread -> tx_thread_smp_core_mapped =  core;
815     }
816     else
817     {
818 
819         /* Set core to the maximum value in order to signal a remapping solution was not found.  */
820         core =  ((UINT) TX_THREAD_SMP_MAX_CORES);
821     }
822 
823     /* Return core to the caller.  */
824     return(core);
825 }
826 
827 
_tx_thread_smp_preemptable_threads_get(UINT priority,TX_THREAD * possible_preemption_list[TX_THREAD_SMP_MAX_CORES])828 ULONG  _tx_thread_smp_preemptable_threads_get(UINT priority, TX_THREAD *possible_preemption_list[TX_THREAD_SMP_MAX_CORES])
829 {
830 
831 UINT        i, j, k;
832 TX_THREAD   *thread_ptr;
833 TX_THREAD   *next_thread;
834 TX_THREAD   *search_thread;
835 TX_THREAD   *list_head;
836 ULONG       possible_cores =  ((ULONG) 0);
837 
838 
839     /* Clear the possible preemption list.  */
840     possible_preemption_list[0] =  TX_NULL;
841 #if TX_THREAD_SMP_MAX_CORES > 1
842     possible_preemption_list[1] =  TX_NULL;
843 #if TX_THREAD_SMP_MAX_CORES > 2
844     possible_preemption_list[2] =  TX_NULL;
845 #if TX_THREAD_SMP_MAX_CORES > 3
846     possible_preemption_list[3] =  TX_NULL;
847 #if TX_THREAD_SMP_MAX_CORES > 4
848     possible_preemption_list[4] =  TX_NULL;
849 #if TX_THREAD_SMP_MAX_CORES > 5
850     possible_preemption_list[5] =  TX_NULL;
851 #if TX_THREAD_SMP_MAX_CORES > 6
852 
853     /* Loop to clear the remainder of the possible preemption list.  */
854     j =  ((UINT) 6);
855 
856 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
857 
858     while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
859 #else
860 
861     while (j < _tx_thread_smp_max_cores)
862 #endif
863     {
864 
865         /* Clear entry in possible preemption list.  */
866         possible_preemption_list[j] =  TX_NULL;
867 
868         /* Move to next core.  */
869         j++;
870     }
871 #endif
872 #endif
873 #endif
874 #endif
875 #endif
876 #endif
877 
878     /* Loop to build a list of threads of less priority.  */
879     i =  ((UINT) 0);
880     j =  ((UINT) 0);
881 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
882     while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
883 #else
884 
885     while (i < _tx_thread_smp_max_cores)
886 #endif
887     {
888 
889         /* Pickup the currently mapped thread.  */
890         thread_ptr =  _tx_thread_execute_ptr[i];
891 
892         /* Is there a thread scheduled for this core?  */
893         if (thread_ptr != TX_NULL)
894         {
895 
896             /* Update the possible cores bit map.  */
897             possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
898 
899             /* Can this thread be preempted?  */
900             if (priority < thread_ptr -> tx_thread_priority)
901             {
902 
903                 /* Thread that can be added to the preemption possible list.  */
904 
905                 /* Yes, this scheduled thread is lower priority, so add it to the preemption possible list.  */
906                 possible_preemption_list[j] =  thread_ptr;
907 
908                 /* Move to next entry in preemption possible list.  */
909                 j++;
910             }
911         }
912 
913         /* Move to next core.  */
914         i++;
915     }
916 
917     /* Check to see if there are more than 2 threads that can be preempted.  */
918     if (j > ((UINT) 1))
919     {
920 
921         /* Yes, loop through the preemption possible list and sort by priority.  */
922         i =  ((UINT) 0);
923         do
924         {
925 
926             /* Pickup preemptable thread.  */
927             thread_ptr =  possible_preemption_list[i];
928 
929             /* Initialize the search index.  */
930             k =  i + ((UINT) 1);
931 
932             /* Loop to get the lowest priority thread at the front of the list.  */
933             while (k < j)
934             {
935 
936                 /* Pickup the next thread to evaluate.  */
937                 next_thread =  possible_preemption_list[k];
938 
939                 /* Is this thread lower priority?  */
940                 if (next_thread -> tx_thread_priority > thread_ptr -> tx_thread_priority)
941                 {
942 
943                     /* Yes, swap the threads.  */
944                     possible_preemption_list[i] =  next_thread;
945                     possible_preemption_list[k] =  thread_ptr;
946                     thread_ptr =  next_thread;
947                 }
948                 else
949                 {
950 
951                     /* Compare the thread priorities.  */
952                     if (next_thread -> tx_thread_priority == thread_ptr -> tx_thread_priority)
953                     {
954 
955                         /* Equal priority threads...  see which is in the ready list first.  */
956                         search_thread =   thread_ptr -> tx_thread_ready_next;
957 
958                         /* Pickup the list head.  */
959                         list_head =  _tx_thread_priority_list[thread_ptr -> tx_thread_priority];
960 
961                         /* Now loop to see if the next thread is after the current thread preemption.  */
962                         while (search_thread != list_head)
963                         {
964 
965                             /* Have we found the next thread?  */
966                             if (search_thread == next_thread)
967                             {
968 
969                                 /* Yes, swap the threads.  */
970                                 possible_preemption_list[i] =  next_thread;
971                                 possible_preemption_list[k] =  thread_ptr;
972                                 thread_ptr =  next_thread;
973                                 break;
974                             }
975 
976                             /* Move to the next thread.  */
977                             search_thread =  search_thread -> tx_thread_ready_next;
978                         }
979                     }
980 
981                     /* Move to examine the next possible preemptable thread.  */
982                     k++;
983                 }
984             }
985 
986             /* We have found the lowest priority thread to preempt, now find the next lowest.  */
987             i++;
988         }
989         while (i < (j-((UINT) 1)));
990     }
991 
992     /* Return the possible cores.  */
993     return(possible_cores);
994 }
995 
_tx_thread_smp_simple_priority_change(TX_THREAD * thread_ptr,UINT new_priority)996 VOID  _tx_thread_smp_simple_priority_change(TX_THREAD *thread_ptr, UINT new_priority)
997 {
998 
999 UINT            priority;
1000 ULONG           priority_bit;
1001 TX_THREAD       *head_ptr;
1002 TX_THREAD       *tail_ptr;
1003 #if TX_MAX_PRIORITIES > 32
1004 UINT            map_index;
1005 #endif
1006 
1007     /* Pickup the priority.  */
1008     priority =  thread_ptr -> tx_thread_priority;
1009 
1010     /* Determine if there are other threads at this priority that are
1011        ready.  */
1012     if (thread_ptr -> tx_thread_ready_next != thread_ptr)
1013     {
1014 
1015         /* Yes, there are other threads at this priority ready.  */
1016 
1017         /* Just remove this thread from the priority list.  */
1018         (thread_ptr -> tx_thread_ready_next) -> tx_thread_ready_previous =    thread_ptr -> tx_thread_ready_previous;
1019         (thread_ptr -> tx_thread_ready_previous) -> tx_thread_ready_next =    thread_ptr -> tx_thread_ready_next;
1020 
1021         /* Determine if this is the head of the priority list.  */
1022         if (_tx_thread_priority_list[priority] == thread_ptr)
1023         {
1024 
1025             /* Update the head pointer of this priority list.  */
1026             _tx_thread_priority_list[priority] =  thread_ptr -> tx_thread_ready_next;
1027         }
1028     }
1029     else
1030     {
1031 
1032         /* This is the only thread at this priority ready to run.  Set the head
1033            pointer to NULL.  */
1034         _tx_thread_priority_list[priority] =    TX_NULL;
1035 
1036 #if TX_MAX_PRIORITIES > 32
1037 
1038         /* Calculate the index into the bit map array.  */
1039         map_index =  priority/((UINT) 32);
1040 #endif
1041 
1042         /* Clear this priority bit in the ready priority bit map.  */
1043         TX_MOD32_BIT_SET(priority, priority_bit)
1044         _tx_thread_priority_maps[MAP_INDEX] =  _tx_thread_priority_maps[MAP_INDEX] & (~(priority_bit));
1045 
1046 #if TX_MAX_PRIORITIES > 32
1047 
1048         /* Determine if there are any other bits set in this priority map.  */
1049         if (_tx_thread_priority_maps[MAP_INDEX] == ((ULONG) 0))
1050         {
1051 
1052             /* No, clear the active bit to signify this priority map has nothing set.  */
1053             TX_DIV32_BIT_SET(priority, priority_bit)
1054             _tx_thread_priority_map_active =  _tx_thread_priority_map_active & (~(priority_bit));
1055         }
1056 #endif
1057     }
1058 
1059     /* Determine if the actual thread priority should be setup, which is the
1060        case if the new priority is higher than the priority inheritance.  */
1061     if (new_priority < thread_ptr -> tx_thread_inherit_priority)
1062     {
1063 
1064         /* Change thread priority to the new user's priority.  */
1065         thread_ptr -> tx_thread_priority =           new_priority;
1066         thread_ptr -> tx_thread_preempt_threshold =  new_priority;
1067     }
1068     else
1069     {
1070 
1071         /* Change thread priority to the priority inheritance.  */
1072         thread_ptr -> tx_thread_priority =           thread_ptr -> tx_thread_inherit_priority;
1073         thread_ptr -> tx_thread_preempt_threshold =  thread_ptr -> tx_thread_inherit_priority;
1074     }
1075 
1076     /* Now, place the thread at the new priority level.  */
1077 
1078     /* Determine if there are other threads at this priority that are
1079        ready.  */
1080     head_ptr =  _tx_thread_priority_list[new_priority];
1081     if (head_ptr != TX_NULL)
1082     {
1083 
1084         /* Yes, there are other threads at this priority already ready.  */
1085 
1086         /* Just add this thread to the priority list.  */
1087         tail_ptr =                                 head_ptr -> tx_thread_ready_previous;
1088         tail_ptr -> tx_thread_ready_next =         thread_ptr;
1089         head_ptr -> tx_thread_ready_previous =     thread_ptr;
1090         thread_ptr -> tx_thread_ready_previous =   tail_ptr;
1091         thread_ptr -> tx_thread_ready_next =       head_ptr;
1092     }
1093     else
1094     {
1095 
1096         /* First thread at this priority ready.  Add to the front of the list.  */
1097         _tx_thread_priority_list[new_priority] =   thread_ptr;
1098         thread_ptr -> tx_thread_ready_next =       thread_ptr;
1099         thread_ptr -> tx_thread_ready_previous =   thread_ptr;
1100 
1101 #if TX_MAX_PRIORITIES > 32
1102 
1103         /* Calculate the index into the bit map array.  */
1104         map_index =  new_priority/((UINT) 32);
1105 
1106         /* Set the active bit to remember that the priority map has something set.  */
1107         TX_DIV32_BIT_SET(new_priority, priority_bit)
1108         _tx_thread_priority_map_active =  _tx_thread_priority_map_active | priority_bit;
1109 #endif
1110 
1111         /* Or in the thread's priority bit.  */
1112         TX_MOD32_BIT_SET(new_priority, priority_bit)
1113         _tx_thread_priority_maps[MAP_INDEX] =  _tx_thread_priority_maps[MAP_INDEX] | priority_bit;
1114     }
1115 }
1116 
1117 #endif
1118 
1119 
1120