1 /**************************************************************************/
2 /*                                                                        */
3 /*       Copyright (c) Microsoft Corporation. All rights reserved.        */
4 /*                                                                        */
5 /*       This software is licensed under the Microsoft Software License   */
6 /*       Terms for Microsoft Azure RTOS. Full text of the license can be  */
7 /*       found in the LICENSE file at https://aka.ms/AzureRTOS_EULA       */
8 /*       and in the root directory of this software.                      */
9 /*                                                                        */
10 /**************************************************************************/
11 
12 
13 /**************************************************************************/
14 /**************************************************************************/
15 /**                                                                       */
16 /** ThreadX Component                                                     */
17 /**                                                                       */
18 /**   Thread                                                              */
19 /**                                                                       */
20 /**************************************************************************/
21 /**************************************************************************/
22 
23 #define TX_SOURCE_CODE
24 #define TX_THREAD_SMP_SOURCE_CODE
25 
26 
27 /* Include necessary system files.  */
28 
29 #include "tx_api.h"
30 #include "tx_thread.h"
31 
32 
33 #ifdef TX_DISABLE_INLINE
34 
35 /* Define the routine to calculate the lowest set bit.  */
36 
_tx_thread_lowest_set_bit_calculate(ULONG map)37 UINT  _tx_thread_lowest_set_bit_calculate(ULONG map)
38 {
39 UINT    bit_set;
40 
41     if ((map & ((ULONG) 0x1)) != ((ULONG) 0))
42     {
43         bit_set = ((UINT) 0);
44     }
45     else
46     {
47         map =  map & (ULONG) ((~map) + ((ULONG) 1));
48         if (map < ((ULONG) 0x100))
49         {
50             bit_set = ((UINT) 1);
51         }
52         else if (map < ((ULONG) 0x10000))
53         {
54             bit_set =  ((UINT) 9);
55             map =  map >> ((UINT) 8);
56         }
57         else if (map < ((ULONG) 0x01000000))
58         {
59             bit_set = ((UINT) 17);
60             map = map >> ((UINT) 16);
61         }
62         else
63         {
64             bit_set = ((UINT) 25);
65             map = map >> ((UINT) 24);
66         }
67         if (map >= ((ULONG) 0x10))
68         {
69             map = map >> ((UINT) 4);
70             bit_set = bit_set + ((UINT) 4);
71         }
72         if (map >= ((ULONG) 0x4))
73         {
74             map = map >> ((UINT) 2);
75             bit_set = bit_set + ((UINT) 2);
76         }
77         bit_set = bit_set - (UINT) (map & (ULONG) 0x1);
78     }
79 
80     return(bit_set);
81 }
82 
83 
84 /* Define the next priority macro. Note, that this may be overridden
85    by a port specific definition.  */
86 
87 #if TX_MAX_PRIORITIES > 32
88 
_tx_thread_smp_next_priority_find(UINT priority)89 UINT _tx_thread_smp_next_priority_find(UINT priority)
90 {
91 ULONG           map_index;
92 ULONG           local_priority_map_active;
93 ULONG           local_priority_map;
94 ULONG           priority_bit;
95 ULONG           first_bit_set;
96 ULONG           found_priority;
97 
98     found_priority =  ((UINT) TX_MAX_PRIORITIES);
99     if (priority < ((UINT) TX_MAX_PRIORITIES))
100     {
101         map_index =  priority/((UINT) 32);
102         local_priority_map =  _tx_thread_priority_maps[map_index];
103         priority_bit =        (((ULONG) 1) << (priority % ((UINT) 32)));
104         local_priority_map =  local_priority_map & ~(priority_bit - ((UINT)1));
105         if (local_priority_map != ((ULONG) 0))
106         {
107             TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
108             found_priority =  (map_index * ((UINT) 32)) + first_bit_set;
109         }
110         else
111         {
112             /* Move to next map index.  */
113             map_index++;
114             if (map_index < (((UINT) TX_MAX_PRIORITIES)/((UINT) 32)))
115             {
116                 priority_bit =               (((ULONG) 1) << (map_index));
117                 local_priority_map_active =  _tx_thread_priority_map_active & ~(priority_bit - ((UINT) 1));
118                 if (local_priority_map_active != ((ULONG) 0))
119                 {
120                     TX_LOWEST_SET_BIT_CALCULATE(local_priority_map_active, map_index)
121                     local_priority_map =  _tx_thread_priority_maps[map_index];
122                     TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
123                     found_priority =  (map_index * ((UINT) 32)) + first_bit_set;
124                 }
125             }
126         }
127     }
128     return(found_priority);
129 }
130 #else
131 
_tx_thread_smp_next_priority_find(UINT priority)132 UINT _tx_thread_smp_next_priority_find(UINT priority)
133 {
134 UINT            first_bit_set;
135 ULONG           local_priority_map;
136 UINT            next_priority;
137 
138     local_priority_map =  _tx_thread_priority_maps[0];
139     local_priority_map =  local_priority_map >> priority;
140     next_priority =  priority;
141     if (local_priority_map == ((ULONG) 0))
142     {
143         next_priority =  ((UINT) TX_MAX_PRIORITIES);
144     }
145     else
146     {
147         if (next_priority >= ((UINT) TX_MAX_PRIORITIES))
148         {
149             next_priority =  ((UINT) TX_MAX_PRIORITIES);
150         }
151         else
152         {
153             TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
154             next_priority =  priority + first_bit_set;
155         }
156     }
157 
158     return(next_priority);
159 }
160 #endif
161 
162 
_tx_thread_smp_schedule_list_clear(void)163 void  _tx_thread_smp_schedule_list_clear(void)
164 {
165 #if TX_THREAD_SMP_MAX_CORES > 6
166 UINT    i;
167 #endif
168 
169 
170     /* Clear the schedule list.  */
171     _tx_thread_smp_schedule_list[0] =  TX_NULL;
172 #if TX_THREAD_SMP_MAX_CORES > 1
173     _tx_thread_smp_schedule_list[1] =  TX_NULL;
174 #if TX_THREAD_SMP_MAX_CORES > 2
175     _tx_thread_smp_schedule_list[2] =  TX_NULL;
176 #if TX_THREAD_SMP_MAX_CORES > 3
177     _tx_thread_smp_schedule_list[3] =  TX_NULL;
178 #if TX_THREAD_SMP_MAX_CORES > 4
179     _tx_thread_smp_schedule_list[4] =  TX_NULL;
180 #if TX_THREAD_SMP_MAX_CORES > 5
181     _tx_thread_smp_schedule_list[5] =  TX_NULL;
182 #if TX_THREAD_SMP_MAX_CORES > 6
183 
184     /* Loop to clear the remainder of the schedule list.  */
185     i =  ((UINT) 6);
186 
187 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
188 
189     while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
190 #else
191 
192     while (i < _tx_thread_smp_max_cores)
193 #endif
194     {
195         /* Clear entry in schedule list.  */
196         _tx_thread_smp_schedule_list[i] =  TX_NULL;
197 
198         /* Move to next index.  */
199         i++;
200     }
201 #endif
202 #endif
203 #endif
204 #endif
205 #endif
206 #endif
207 }
208 
_tx_thread_smp_execute_list_clear(void)209 VOID  _tx_thread_smp_execute_list_clear(void)
210 {
211 #if TX_THREAD_SMP_MAX_CORES > 6
212 UINT    j;
213 #endif
214 
215     /* Clear the execute list.  */
216     _tx_thread_execute_ptr[0] =  TX_NULL;
217 #if TX_THREAD_SMP_MAX_CORES > 1
218     _tx_thread_execute_ptr[1] =  TX_NULL;
219 #if TX_THREAD_SMP_MAX_CORES > 2
220     _tx_thread_execute_ptr[2] =  TX_NULL;
221 #if TX_THREAD_SMP_MAX_CORES > 3
222     _tx_thread_execute_ptr[3] =  TX_NULL;
223 #if TX_THREAD_SMP_MAX_CORES > 4
224     _tx_thread_execute_ptr[4] =  TX_NULL;
225 #if TX_THREAD_SMP_MAX_CORES > 5
226     _tx_thread_execute_ptr[5] =  TX_NULL;
227 #if TX_THREAD_SMP_MAX_CORES > 6
228 
229     /* Loop to clear the remainder of the execute list.  */
230     j =  ((UINT) 6);
231 
232 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
233 
234     while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
235 #else
236 
237     while (j < _tx_thread_smp_max_cores)
238 #endif
239     {
240 
241         /* Clear entry in execute list.  */
242         _tx_thread_execute_ptr[j] =  TX_NULL;
243 
244         /* Move to next index.  */
245         j++;
246     }
247 #endif
248 #endif
249 #endif
250 #endif
251 #endif
252 #endif
253 }
254 
255 
_tx_thread_smp_schedule_list_setup(void)256 VOID  _tx_thread_smp_schedule_list_setup(void)
257 {
258 #if TX_THREAD_SMP_MAX_CORES > 6
259 UINT    j;
260 #endif
261 
262     _tx_thread_smp_schedule_list[0] =  _tx_thread_execute_ptr[0];
263 #if TX_THREAD_SMP_MAX_CORES > 1
264     _tx_thread_smp_schedule_list[1] =  _tx_thread_execute_ptr[1];
265 #if TX_THREAD_SMP_MAX_CORES > 2
266     _tx_thread_smp_schedule_list[2] =  _tx_thread_execute_ptr[2];
267 #if TX_THREAD_SMP_MAX_CORES > 3
268     _tx_thread_smp_schedule_list[3] =  _tx_thread_execute_ptr[3];
269 #if TX_THREAD_SMP_MAX_CORES > 4
270     _tx_thread_smp_schedule_list[4] =  _tx_thread_execute_ptr[4];
271 #if TX_THREAD_SMP_MAX_CORES > 5
272     _tx_thread_smp_schedule_list[5] =  _tx_thread_execute_ptr[5];
273 #if TX_THREAD_SMP_MAX_CORES > 6
274 
275     /* Loop to setup the remainder of the schedule list.  */
276     j =  ((UINT) 6);
277 
278 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
279     while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
280 #else
281 
282     while (j < _tx_thread_smp_max_cores)
283 #endif
284     {
285 
286         /* Setup entry in schedule list.  */
287         _tx_thread_smp_schedule_list[j] =  _tx_thread_execute_ptr[j];
288 
289         /* Move to next index.  */
290         j++;
291     }
292 #endif
293 #endif
294 #endif
295 #endif
296 #endif
297 #endif
298 }
299 
300 
301 #ifdef TX_THREAD_SMP_INTER_CORE_INTERRUPT
_tx_thread_smp_core_interrupt(TX_THREAD * thread_ptr,UINT current_core,UINT target_core)302 VOID  _tx_thread_smp_core_interrupt(TX_THREAD *thread_ptr, UINT current_core, UINT target_core)
303 {
304 
305 TX_THREAD   *current_thread;
306 
307 
308     /* Make sure this is a different core, since there is no need to interrupt the current core for
309        a scheduling change.  */
310     if (current_core != target_core)
311     {
312 
313         /* Yes, a different core is present.  */
314 
315         /* Pickup the currently executing thread.  */
316         current_thread =  _tx_thread_current_ptr[target_core];
317 
318         /* Determine if they are the same.  */
319         if ((current_thread != TX_NULL) && (thread_ptr != current_thread))
320         {
321 
322             /* Not the same and not NULL... determine if the core is running at thread level.  */
323             if (_tx_thread_system_state[target_core] < TX_INITIALIZE_IN_PROGRESS)
324             {
325 
326                 /* Preempt the mapped thread.  */
327                 _tx_thread_smp_core_preempt(target_core);
328             }
329         }
330     }
331 }
332 #endif
333 
334 
335 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
_tx_thread_smp_core_wakeup(UINT current_core,UINT target_core)336 VOID  _tx_thread_smp_core_wakeup(UINT current_core, UINT target_core)
337 {
338 
339     /* Determine if the core specified is not the current core - no need to wakeup the
340        current core.  */
341     if (target_core != current_core)
342     {
343 
344         /* Wakeup based on application's macro.  */
345         TX_THREAD_SMP_WAKEUP(target_core);
346     }
347 }
348 #endif
349 
350 
_tx_thread_smp_execute_list_setup(UINT core_index)351 VOID  _tx_thread_smp_execute_list_setup(UINT core_index)
352 {
353 
354 TX_THREAD   *schedule_thread;
355 UINT        i;
356 
357 
358     /* Loop to copy the schedule list into the execution list.  */
359     i =  ((UINT) 0);
360 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
361 
362     while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
363 #else
364 
365     while (i < _tx_thread_smp_max_cores)
366 #endif
367     {
368 
369         /* Pickup the thread to schedule.  */
370         schedule_thread =  _tx_thread_smp_schedule_list[i];
371 
372         /* Copy the schedule list into the execution list.  */
373         _tx_thread_execute_ptr[i] =  schedule_thread;
374 
375         /* If necessary, interrupt the core with the new thread to schedule.  */
376         _tx_thread_smp_core_interrupt(schedule_thread, core_index, i);
377 
378 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
379 
380         /* Does this need to be waked up?  */
381         if ((i != core_index) && (schedule_thread != TX_NULL))
382         {
383 
384             /* Wakeup based on application's macro.  */
385             TX_THREAD_SMP_WAKEUP(i);
386         }
387 #endif
388         /* Move to next index.  */
389         i++;
390     }
391 }
392 
393 
_tx_thread_smp_available_cores_get(void)394 ULONG  _tx_thread_smp_available_cores_get(void)
395 {
396 
397 #if TX_THREAD_SMP_MAX_CORES > 6
398 UINT    j;
399 #endif
400 ULONG   available_cores;
401 
402     available_cores =  ((ULONG) 0);
403     if (_tx_thread_execute_ptr[0] == TX_NULL)
404     {
405         available_cores =  ((ULONG) 1);
406     }
407 #if TX_THREAD_SMP_MAX_CORES > 1
408     if (_tx_thread_execute_ptr[1] == TX_NULL)
409     {
410         available_cores =  available_cores | ((ULONG) 2);
411     }
412 #if TX_THREAD_SMP_MAX_CORES > 2
413     if (_tx_thread_execute_ptr[2] == TX_NULL)
414     {
415         available_cores =  available_cores | ((ULONG) 4);
416     }
417 #if TX_THREAD_SMP_MAX_CORES > 3
418     if (_tx_thread_execute_ptr[3] == TX_NULL)
419     {
420         available_cores =  available_cores | ((ULONG) 8);
421     }
422 #if TX_THREAD_SMP_MAX_CORES > 4
423     if (_tx_thread_execute_ptr[4] == TX_NULL)
424     {
425         available_cores =  available_cores | ((ULONG) 0x10);
426     }
427 #if TX_THREAD_SMP_MAX_CORES > 5
428     if (_tx_thread_execute_ptr[5] == TX_NULL)
429     {
430         available_cores =  available_cores | ((ULONG) 0x20);
431     }
432 #if TX_THREAD_SMP_MAX_CORES > 6
433 
434     /* Loop to setup the remainder of the schedule list.  */
435     j =  ((UINT) 6);
436 
437 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
438     while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
439 #else
440 
441     while (j < _tx_thread_smp_max_cores)
442 #endif
443     {
444 
445         /* Determine if this core is available.  */
446         if (_tx_thread_execute_ptr[j] == TX_NULL)
447         {
448             available_cores =  available_cores | (((ULONG) 1) << j);
449         }
450 
451         /* Move to next core.  */
452         j++;
453     }
454 #endif
455 #endif
456 #endif
457 #endif
458 #endif
459 #endif
460     return(available_cores);
461 }
462 
463 
_tx_thread_smp_possible_cores_get(void)464 ULONG  _tx_thread_smp_possible_cores_get(void)
465 {
466 
467 #if TX_THREAD_SMP_MAX_CORES > 6
468 UINT    j;
469 #endif
470 ULONG       possible_cores;
471 TX_THREAD   *thread_ptr;
472 
473     possible_cores =  ((ULONG) 0);
474     thread_ptr =  _tx_thread_execute_ptr[0];
475     if (thread_ptr != TX_NULL)
476     {
477         possible_cores =  thread_ptr -> tx_thread_smp_cores_allowed;
478     }
479 #if TX_THREAD_SMP_MAX_CORES > 1
480     thread_ptr =  _tx_thread_execute_ptr[1];
481     if (thread_ptr != TX_NULL)
482     {
483         possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
484     }
485 #if TX_THREAD_SMP_MAX_CORES > 2
486     thread_ptr =  _tx_thread_execute_ptr[2];
487     if (thread_ptr != TX_NULL)
488     {
489         possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
490     }
491 #if TX_THREAD_SMP_MAX_CORES > 3
492     thread_ptr =  _tx_thread_execute_ptr[3];
493     if (thread_ptr != TX_NULL)
494     {
495         possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
496     }
497 #if TX_THREAD_SMP_MAX_CORES > 4
498     thread_ptr =  _tx_thread_execute_ptr[4];
499     if (thread_ptr != TX_NULL)
500     {
501         possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
502     }
503 #if TX_THREAD_SMP_MAX_CORES > 5
504     thread_ptr =  _tx_thread_execute_ptr[5];
505     if (thread_ptr != TX_NULL)
506     {
507         possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
508     }
509 #if TX_THREAD_SMP_MAX_CORES > 6
510 
511     /* Loop to setup the remainder of the schedule list.  */
512     j =  ((UINT) 6);
513 
514 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
515     while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
516 #else
517 
518     while (j < _tx_thread_smp_max_cores)
519 #endif
520     {
521 
522         /* Determine if this core is available.  */
523         thread_ptr =  _tx_thread_execute_ptr[j];
524         if (thread_ptr != TX_NULL)
525         {
526             possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
527         }
528 
529         /* Move to next core.  */
530         j++;
531     }
532 #endif
533 #endif
534 #endif
535 #endif
536 #endif
537 #endif
538     return(possible_cores);
539 }
540 
541 
_tx_thread_smp_lowest_priority_get(void)542 UINT  _tx_thread_smp_lowest_priority_get(void)
543 {
544 
545 #if TX_THREAD_SMP_MAX_CORES > 6
546 UINT    j;
547 #endif
548 TX_THREAD   *thread_ptr;
549 UINT        lowest_priority;
550 
551     lowest_priority =  ((UINT) 0);
552     thread_ptr =  _tx_thread_execute_ptr[0];
553     if (thread_ptr != TX_NULL)
554     {
555         if (thread_ptr -> tx_thread_priority > lowest_priority)
556         {
557             lowest_priority =  thread_ptr -> tx_thread_priority;
558         }
559     }
560 #if TX_THREAD_SMP_MAX_CORES > 1
561     thread_ptr =  _tx_thread_execute_ptr[1];
562     if (thread_ptr != TX_NULL)
563     {
564         if (thread_ptr -> tx_thread_priority > lowest_priority)
565         {
566             lowest_priority =  thread_ptr -> tx_thread_priority;
567         }
568     }
569 #if TX_THREAD_SMP_MAX_CORES > 2
570     thread_ptr =  _tx_thread_execute_ptr[2];
571     if (thread_ptr != TX_NULL)
572     {
573         if (thread_ptr -> tx_thread_priority > lowest_priority)
574         {
575             lowest_priority =  thread_ptr -> tx_thread_priority;
576         }
577     }
578 #if TX_THREAD_SMP_MAX_CORES > 3
579     thread_ptr =  _tx_thread_execute_ptr[3];
580     if (thread_ptr != TX_NULL)
581     {
582         if (thread_ptr -> tx_thread_priority > lowest_priority)
583         {
584             lowest_priority =  thread_ptr -> tx_thread_priority;
585         }
586     }
587 #if TX_THREAD_SMP_MAX_CORES > 4
588     thread_ptr =  _tx_thread_execute_ptr[4];
589     if (thread_ptr != TX_NULL)
590     {
591         if (thread_ptr -> tx_thread_priority > lowest_priority)
592         {
593             lowest_priority =  thread_ptr -> tx_thread_priority;
594         }
595     }
596 #if TX_THREAD_SMP_MAX_CORES > 5
597     thread_ptr =  _tx_thread_execute_ptr[5];
598     if (thread_ptr != TX_NULL)
599     {
600         if (thread_ptr -> tx_thread_priority > lowest_priority)
601         {
602             lowest_priority =  thread_ptr -> tx_thread_priority;
603         }
604     }
605 #if TX_THREAD_SMP_MAX_CORES > 6
606 
607     /* Loop to setup the remainder of the schedule list.  */
608     j =  ((UINT) 6);
609 
610 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
611     while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
612 #else
613 
614     while (j < _tx_thread_smp_max_cores)
615 #endif
616     {
617 
618         /* Determine if this core has a thread scheduled.  */
619         thread_ptr =  _tx_thread_execute_ptr[j];
620         if (thread_ptr != TX_NULL)
621         {
622 
623             /* Is this the new lowest priority?  */
624             if (thread_ptr -> tx_thread_priority > lowest_priority)
625             {
626                 lowest_priority =  thread_ptr -> tx_thread_priority;
627             }
628         }
629 
630         /* Move to next core.  */
631         j++;
632     }
633 #endif
634 #endif
635 #endif
636 #endif
637 #endif
638 #endif
639     return(lowest_priority);
640 }
641 
642 
_tx_thread_smp_remap_solution_find(TX_THREAD * schedule_thread,ULONG available_cores,ULONG thread_possible_cores,ULONG test_possible_cores)643 UINT  _tx_thread_smp_remap_solution_find(TX_THREAD *schedule_thread, ULONG available_cores, ULONG thread_possible_cores, ULONG test_possible_cores)
644 {
645 
646 UINT            core;
647 UINT            previous_core;
648 ULONG           test_cores;
649 ULONG           last_thread_cores;
650 UINT            queue_first, queue_last;
651 UINT            core_queue[TX_THREAD_SMP_MAX_CORES-1];
652 TX_THREAD       *thread_ptr;
653 TX_THREAD       *last_thread;
654 TX_THREAD       *thread_remap_list[TX_THREAD_SMP_MAX_CORES];
655 
656 
657     /* Clear the last thread cores in the search.  */
658     last_thread_cores =  ((ULONG) 0);
659 
660     /* Set the last thread pointer to NULL.  */
661     last_thread =  TX_NULL;
662 
663     /* Setup the core queue indices.  */
664     queue_first =  ((UINT) 0);
665     queue_last =   ((UINT) 0);
666 
667     /* Build a list of possible cores for this thread to execute on, starting
668        with the previously mapped core.  */
669     core =  schedule_thread -> tx_thread_smp_core_mapped;
670     if ((thread_possible_cores & (((ULONG) 1) << core)) != ((ULONG) 0))
671     {
672 
673         /* Remember this potential mapping.  */
674         thread_remap_list[core] =   schedule_thread;
675         core_queue[queue_last] =    core;
676 
677         /* Move to next slot.  */
678         queue_last++;
679 
680         /* Clear this core.  */
681         thread_possible_cores =  thread_possible_cores & ~(((ULONG) 1) << core);
682     }
683 
684     /* Loop to add additional possible cores.  */
685     while (thread_possible_cores != ((ULONG) 0))
686     {
687 
688         /* Determine the first possible core.  */
689         test_cores =  thread_possible_cores;
690         TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
691 
692         /* Clear this core.  */
693         thread_possible_cores =  thread_possible_cores & ~(((ULONG) 1) << core);
694 
695         /* Remember this potential mapping.  */
696         thread_remap_list[core] =  schedule_thread;
697         core_queue[queue_last] =   core;
698 
699         /* Move to next slot.  */
700         queue_last++;
701     }
702 
703     /* Loop to evaluate the potential thread mappings, against what is already mapped.  */
704     do
705     {
706 
707         /* Pickup the next entry.  */
708         core = core_queue[queue_first];
709 
710         /* Move to next slot.  */
711         queue_first++;
712 
713         /* Retrieve the thread from the current mapping.  */
714         thread_ptr =  _tx_thread_smp_schedule_list[core];
715 
716         /* Determine if there is a thread currently mapped to this core.  */
717         if (thread_ptr != TX_NULL)
718         {
719 
720             /* Determine the cores available for this thread.  */
721             thread_possible_cores =  thread_ptr -> tx_thread_smp_cores_allowed;
722             thread_possible_cores =  test_possible_cores & thread_possible_cores;
723 
724             /* Are there any possible cores for this thread?  */
725             if (thread_possible_cores != ((ULONG) 0))
726             {
727 
728                 /* Determine if there are cores available for this thread.  */
729                 if ((thread_possible_cores & available_cores) != ((ULONG) 0))
730                 {
731 
732                     /* Yes, remember the final thread and cores that are valid for this thread.  */
733                     last_thread_cores =  thread_possible_cores & available_cores;
734                     last_thread =        thread_ptr;
735 
736                     /* We are done - get out of the loop!  */
737                     break;
738                 }
739                 else
740                 {
741 
742                     /* Remove cores that will be added to the list.  */
743                     test_possible_cores =  test_possible_cores & ~(thread_possible_cores);
744 
745                     /* Loop to add this thread to the potential mapping list.  */
746                     do
747                     {
748 
749                         /* Calculate the core.  */
750                         test_cores =  thread_possible_cores;
751                         TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
752 
753                         /* Clear this core.  */
754                         thread_possible_cores =  thread_possible_cores & ~(((ULONG) 1) << core);
755 
756                         /* Remember this thread for remapping.  */
757                         thread_remap_list[core] =  thread_ptr;
758 
759                         /* Remember this core.  */
760                         core_queue[queue_last] =  core;
761 
762                         /* Move to next slot.  */
763                         queue_last++;
764 
765                     } while (thread_possible_cores != ((ULONG) 0));
766                 }
767             }
768         }
769     } while (queue_first != queue_last);
770 
771     /* Was a remapping solution found?  */
772     if (last_thread != TX_NULL)
773     {
774 
775         /* Pickup the core of the last thread to remap.  */
776         core =  last_thread -> tx_thread_smp_core_mapped;
777 
778         /* Pickup the thread from the remapping list.  */
779         thread_ptr =  thread_remap_list[core];
780 
781         /* Loop until we arrive at the thread we have been trying to map.  */
782         while (thread_ptr != schedule_thread)
783         {
784 
785             /* Move this thread in the schedule list.  */
786             _tx_thread_smp_schedule_list[core] =  thread_ptr;
787 
788             /* Remember the previous core.  */
789             previous_core =  core;
790 
791             /* Pickup the core of thread to remap.  */
792             core =  thread_ptr -> tx_thread_smp_core_mapped;
793 
794             /* Save the new core mapping for this thread.  */
795             thread_ptr -> tx_thread_smp_core_mapped =  previous_core;
796 
797             /* Move the next thread.  */
798             thread_ptr =  thread_remap_list[core];
799         }
800 
801         /* Save the remaining thread in the updated schedule list.  */
802         _tx_thread_smp_schedule_list[core] =  thread_ptr;
803 
804         /* Update this thread's core mapping.  */
805         thread_ptr -> tx_thread_smp_core_mapped =  core;
806 
807         /* Finally, setup the last thread in the remapping solution.  */
808         test_cores =  last_thread_cores;
809         TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
810 
811         /* Setup the last thread.  */
812         _tx_thread_smp_schedule_list[core] =     last_thread;
813 
814         /* Remember the core mapping for this thread.  */
815         last_thread -> tx_thread_smp_core_mapped =  core;
816     }
817     else
818     {
819 
820         /* Set core to the maximum value in order to signal a remapping solution was not found.  */
821         core =  ((UINT) TX_THREAD_SMP_MAX_CORES);
822     }
823 
824     /* Return core to the caller.  */
825     return(core);
826 }
827 
828 
_tx_thread_smp_preemptable_threads_get(UINT priority,TX_THREAD * possible_preemption_list[])829 ULONG  _tx_thread_smp_preemptable_threads_get(UINT priority, TX_THREAD *possible_preemption_list[])
830 {
831 
832 UINT        i, j, k;
833 TX_THREAD   *thread_ptr;
834 TX_THREAD   *next_thread;
835 TX_THREAD   *search_thread;
836 TX_THREAD   *list_head;
837 ULONG       possible_cores =  ((ULONG) 0);
838 
839 
840     /* Clear the possible preemption list.  */
841     possible_preemption_list[0] =  TX_NULL;
842 #if TX_THREAD_SMP_MAX_CORES > 1
843     possible_preemption_list[1] =  TX_NULL;
844 #if TX_THREAD_SMP_MAX_CORES > 2
845     possible_preemption_list[2] =  TX_NULL;
846 #if TX_THREAD_SMP_MAX_CORES > 3
847     possible_preemption_list[3] =  TX_NULL;
848 #if TX_THREAD_SMP_MAX_CORES > 4
849     possible_preemption_list[4] =  TX_NULL;
850 #if TX_THREAD_SMP_MAX_CORES > 5
851     possible_preemption_list[5] =  TX_NULL;
852 #if TX_THREAD_SMP_MAX_CORES > 6
853 
854     /* Loop to clear the remainder of the possible preemption list.  */
855     j =  ((UINT) 6);
856 
857 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
858 
859     while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
860 #else
861 
862     while (j < _tx_thread_smp_max_cores)
863 #endif
864     {
865 
866         /* Clear entry in possible preemption list.  */
867         possible_preemption_list[j] =  TX_NULL;
868 
869         /* Move to next core.  */
870         j++;
871     }
872 #endif
873 #endif
874 #endif
875 #endif
876 #endif
877 #endif
878 
879     /* Loop to build a list of threads of less priority.  */
880     i =  ((UINT) 0);
881     j =  ((UINT) 0);
882 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
883     while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
884 #else
885 
886     while (i < _tx_thread_smp_max_cores)
887 #endif
888     {
889 
890         /* Pickup the currently mapped thread.  */
891         thread_ptr =  _tx_thread_execute_ptr[i];
892 
893         /* Is there a thread scheduled for this core?  */
894         if (thread_ptr != TX_NULL)
895         {
896 
897             /* Update the possible cores bit map.  */
898             possible_cores =  possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
899 
900             /* Can this thread be preempted?  */
901             if (priority < thread_ptr -> tx_thread_priority)
902             {
903 
904                 /* Thread that can be added to the preemption possible list.  */
905 
906                 /* Yes, this scheduled thread is lower priority, so add it to the preemption possible list.  */
907                 possible_preemption_list[j] =  thread_ptr;
908 
909                 /* Move to next entry in preemption possible list.  */
910                 j++;
911             }
912         }
913 
914         /* Move to next core.  */
915         i++;
916     }
917 
918     /* Check to see if there are more than 2 threads that can be preempted.  */
919     if (j > ((UINT) 1))
920     {
921 
922         /* Yes, loop through the preemption possible list and sort by priority.  */
923         i =  ((UINT) 0);
924         do
925         {
926 
927             /* Pickup preemptable thread.  */
928             thread_ptr =  possible_preemption_list[i];
929 
930             /* Initialize the search index.  */
931             k =  i + ((UINT) 1);
932 
933             /* Loop to get the lowest priority thread at the front of the list.  */
934             while (k < j)
935             {
936 
937                 /* Pickup the next thread to evaluate.  */
938                 next_thread =  possible_preemption_list[k];
939 
940                 /* Is this thread lower priority?  */
941                 if (next_thread -> tx_thread_priority > thread_ptr -> tx_thread_priority)
942                 {
943 
944                     /* Yes, swap the threads.  */
945                     possible_preemption_list[i] =  next_thread;
946                     possible_preemption_list[k] =  thread_ptr;
947                     thread_ptr =  next_thread;
948                 }
949                 else
950                 {
951 
952                     /* Compare the thread priorities.  */
953                     if (next_thread -> tx_thread_priority == thread_ptr -> tx_thread_priority)
954                     {
955 
956                         /* Equal priority threads...  see which is in the ready list first.  */
957                         search_thread =   thread_ptr -> tx_thread_ready_next;
958 
959                         /* Pickup the list head.  */
960                         list_head =  _tx_thread_priority_list[thread_ptr -> tx_thread_priority];
961 
962                         /* Now loop to see if the next thread is after the current thread preemption.  */
963                         while (search_thread != list_head)
964                         {
965 
966                             /* Have we found the next thread?  */
967                             if (search_thread == next_thread)
968                             {
969 
970                                 /* Yes, swap the threads.  */
971                                 possible_preemption_list[i] =  next_thread;
972                                 possible_preemption_list[k] =  thread_ptr;
973                                 thread_ptr =  next_thread;
974                                 break;
975                             }
976 
977                             /* Move to the next thread.  */
978                             search_thread =  search_thread -> tx_thread_ready_next;
979                         }
980                     }
981 
982                     /* Move to examine the next possible preemptable thread.  */
983                     k++;
984                 }
985             }
986 
987             /* We have found the lowest priority thread to preempt, now find the next lowest.  */
988             i++;
989         }
990         while (i < (j-((UINT) 1)));
991     }
992 
993     /* Return the possible cores.  */
994     return(possible_cores);
995 }
996 
_tx_thread_smp_simple_priority_change(TX_THREAD * thread_ptr,UINT new_priority)997 VOID  _tx_thread_smp_simple_priority_change(TX_THREAD *thread_ptr, UINT new_priority)
998 {
999 
1000 UINT            priority;
1001 ULONG           priority_bit;
1002 TX_THREAD       *head_ptr;
1003 TX_THREAD       *tail_ptr;
1004 #if TX_MAX_PRIORITIES > 32
1005 UINT            map_index;
1006 #endif
1007 
1008     /* Pickup the priority.  */
1009     priority =  thread_ptr -> tx_thread_priority;
1010 
1011     /* Determine if there are other threads at this priority that are
1012        ready.  */
1013     if (thread_ptr -> tx_thread_ready_next != thread_ptr)
1014     {
1015 
1016         /* Yes, there are other threads at this priority ready.  */
1017 
1018         /* Just remove this thread from the priority list.  */
1019         (thread_ptr -> tx_thread_ready_next) -> tx_thread_ready_previous =    thread_ptr -> tx_thread_ready_previous;
1020         (thread_ptr -> tx_thread_ready_previous) -> tx_thread_ready_next =    thread_ptr -> tx_thread_ready_next;
1021 
1022         /* Determine if this is the head of the priority list.  */
1023         if (_tx_thread_priority_list[priority] == thread_ptr)
1024         {
1025 
1026             /* Update the head pointer of this priority list.  */
1027             _tx_thread_priority_list[priority] =  thread_ptr -> tx_thread_ready_next;
1028         }
1029     }
1030     else
1031     {
1032 
1033         /* This is the only thread at this priority ready to run.  Set the head
1034            pointer to NULL.  */
1035         _tx_thread_priority_list[priority] =    TX_NULL;
1036 
1037 #if TX_MAX_PRIORITIES > 32
1038 
1039         /* Calculate the index into the bit map array.  */
1040         map_index =  priority/((UINT) 32);
1041 #endif
1042 
1043         /* Clear this priority bit in the ready priority bit map.  */
1044         TX_MOD32_BIT_SET(priority, priority_bit)
1045         _tx_thread_priority_maps[MAP_INDEX] =  _tx_thread_priority_maps[MAP_INDEX] & (~(priority_bit));
1046 
1047 #if TX_MAX_PRIORITIES > 32
1048 
1049         /* Determine if there are any other bits set in this priority map.  */
1050         if (_tx_thread_priority_maps[MAP_INDEX] == ((ULONG) 0))
1051         {
1052 
1053             /* No, clear the active bit to signify this priority map has nothing set.  */
1054             TX_DIV32_BIT_SET(priority, priority_bit)
1055             _tx_thread_priority_map_active =  _tx_thread_priority_map_active & (~(priority_bit));
1056         }
1057 #endif
1058     }
1059 
1060     /* Determine if the actual thread priority should be setup, which is the
1061        case if the new priority is higher than the priority inheritance.  */
1062     if (new_priority < thread_ptr -> tx_thread_inherit_priority)
1063     {
1064 
1065         /* Change thread priority to the new user's priority.  */
1066         thread_ptr -> tx_thread_priority =           new_priority;
1067         thread_ptr -> tx_thread_preempt_threshold =  new_priority;
1068     }
1069     else
1070     {
1071 
1072         /* Change thread priority to the priority inheritance.  */
1073         thread_ptr -> tx_thread_priority =           thread_ptr -> tx_thread_inherit_priority;
1074         thread_ptr -> tx_thread_preempt_threshold =  thread_ptr -> tx_thread_inherit_priority;
1075     }
1076 
1077     /* Now, place the thread at the new priority level.  */
1078 
1079     /* Determine if there are other threads at this priority that are
1080        ready.  */
1081     head_ptr =  _tx_thread_priority_list[new_priority];
1082     if (head_ptr != TX_NULL)
1083     {
1084 
1085         /* Yes, there are other threads at this priority already ready.  */
1086 
1087         /* Just add this thread to the priority list.  */
1088         tail_ptr =                                 head_ptr -> tx_thread_ready_previous;
1089         tail_ptr -> tx_thread_ready_next =         thread_ptr;
1090         head_ptr -> tx_thread_ready_previous =     thread_ptr;
1091         thread_ptr -> tx_thread_ready_previous =   tail_ptr;
1092         thread_ptr -> tx_thread_ready_next =       head_ptr;
1093     }
1094     else
1095     {
1096 
1097         /* First thread at this priority ready.  Add to the front of the list.  */
1098         _tx_thread_priority_list[new_priority] =   thread_ptr;
1099         thread_ptr -> tx_thread_ready_next =       thread_ptr;
1100         thread_ptr -> tx_thread_ready_previous =   thread_ptr;
1101 
1102 #if TX_MAX_PRIORITIES > 32
1103 
1104         /* Calculate the index into the bit map array.  */
1105         map_index =  new_priority/((UINT) 32);
1106 
1107         /* Set the active bit to remember that the priority map has something set.  */
1108         TX_DIV32_BIT_SET(new_priority, priority_bit)
1109         _tx_thread_priority_map_active =  _tx_thread_priority_map_active | priority_bit;
1110 #endif
1111 
1112         /* Or in the thread's priority bit.  */
1113         TX_MOD32_BIT_SET(new_priority, priority_bit)
1114         _tx_thread_priority_maps[MAP_INDEX] =  _tx_thread_priority_maps[MAP_INDEX] | priority_bit;
1115     }
1116 }
1117 
1118 #endif
1119 
1120 
1121