1 /**************************************************************************/
2 /*                                                                        */
3 /*       Copyright (c) Microsoft Corporation. All rights reserved.        */
4 /*                                                                        */
5 /*       This software is licensed under the Microsoft Software License   */
6 /*       Terms for Microsoft Azure RTOS. Full text of the license can be  */
7 /*       found in the LICENSE file at https://aka.ms/AzureRTOS_EULA       */
8 /*       and in the root directory of this software.                      */
9 /*                                                                        */
10 /**************************************************************************/
11 
12 
13 /**************************************************************************/
14 /**************************************************************************/
15 /**                                                                       */
16 /** ThreadX Component                                                     */
17 /**                                                                       */
18 /**   Thread                                                              */
19 /**                                                                       */
20 /**************************************************************************/
21 /**************************************************************************/
22 
23 #define TX_SOURCE_CODE
24 #define TX_THREAD_SMP_SOURCE_CODE
25 
26 
27 /* Include necessary system files.  */
28 
29 #include "tx_api.h"
30 #include "tx_initialize.h"
31 #include "tx_timer.h"
32 #include "tx_thread.h"
33 #include "tx_trace.h"
34 
35 
36 /**************************************************************************/
37 /*                                                                        */
38 /*  FUNCTION                                               RELEASE        */
39 /*                                                                        */
40 /*    _tx_thread_system_suspend                          PORTABLE SMP     */
41 /*                                                           6.1.11       */
42 /*  AUTHOR                                                                */
43 /*                                                                        */
44 /*    William E. Lamie, Microsoft Corporation                             */
45 /*                                                                        */
46 /*  DESCRIPTION                                                           */
47 /*                                                                        */
48 /*    This function suspends the specified thread and changes the thread  */
49 /*    state to the value specified.  Note: delayed suspension processing  */
50 /*    is handled outside of this routine.                                 */
51 /*                                                                        */
52 /*  INPUT                                                                 */
53 /*                                                                        */
54 /*    thread_ptr                            Pointer to thread to suspend  */
55 /*                                                                        */
56 /*  OUTPUT                                                                */
57 /*                                                                        */
58 /*    None                                                                */
59 /*                                                                        */
60 /*  CALLS                                                                 */
61 /*                                                                        */
62 /*    _tx_thread_smp_available_cores_get    Get available cores bitmap    */
63 /*    _tx_thread_smp_core_preempt           Preempt core for new thread   */
64 /*    _tx_thread_smp_execute_list_clear     Clear the thread execute list */
65 /*    _tx_thread_smp_execute_list_setup     Setup the thread execute list */
66 /*    _tx_thread_smp_next_priority_find     Find next priority with one   */
67 /*                                            or more ready threads       */
68 /*    _tx_thread_smp_possible_cores_get     Get possible cores bitmap     */
69 /*    [_tx_thread_smp_protect]              Get protection                */
70 /*    _tx_thread_smp_rebalance_execute_list Rebalance the execution list  */
71 /*    _tx_thread_smp_remap_solution_find    Attempt to remap threads to   */
72 /*                                            schedule another thread     */
73 /*    _tx_thread_smp_schedule_list_setup    Inherit schedule list from    */
74 /*                                            execute list                */
75 /*    _tx_thread_system_return              Return to system              */
76 /*                                                                        */
77 /*  CALLED BY                                                             */
78 /*                                                                        */
79 /*    _tx_thread_priority_change            Thread priority change        */
80 /*    _tx_thread_shell_entry                Thread shell function         */
81 /*    _tx_thread_sleep                      Thread sleep                  */
82 /*    _tx_thread_suspend                    Application thread suspend    */
83 /*    _tx_thread_terminate                  Thread terminate              */
84 /*    Other ThreadX Components                                            */
85 /*                                                                        */
86 /*  RELEASE HISTORY                                                       */
87 /*                                                                        */
88 /*    DATE              NAME                      DESCRIPTION             */
89 /*                                                                        */
90 /*  09-30-2020      William E. Lamie        Initial Version 6.1           */
91 /*  04-25-2022      Scott Larson            Modified comments and fixed   */
92 /*                                            loop to find next thread,   */
93 /*                                            resulting in version 6.1.11 */
94 /*                                                                        */
95 /**************************************************************************/
_tx_thread_system_suspend(TX_THREAD * thread_ptr)96 VOID  _tx_thread_system_suspend(TX_THREAD *thread_ptr)
97 {
98 
99 #ifndef TX_NOT_INTERRUPTABLE
100 
101 TX_INTERRUPT_SAVE_AREA
102 #endif
103 
104 UINT                        priority;
105 UINT                        i;
106 ULONG                       priority_bit;
107 ULONG                       combined_flags;
108 ULONG                       priority_map;
109 UINT                        core_index;
110 #ifndef TX_THREAD_SMP_EQUAL_PRIORITY
111 ULONG                       complex_path_possible;
112 UINT                        core;
113 ULONG                       possible_cores;
114 ULONG                       thread_possible_cores;
115 ULONG                       available_cores;
116 ULONG                       test_possible_cores;
117 UINT                        next_priority;
118 TX_THREAD                   *next_thread;
119 UINT                        loop_finished;
120 #endif
121 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
122 UINT                        next_preempted;
123 UINT                        base_priority;
124 UINT                        priority_bit_set;
125 TX_THREAD                   *preempted_thread;
126 #endif
127 #if TX_MAX_PRIORITIES > 32
128 UINT                        map_index;
129 #endif
130 
131 #ifndef TX_NO_TIMER
132 TX_TIMER_INTERNAL           **timer_list;
133 TX_TIMER_INTERNAL           *next_timer;
134 TX_TIMER_INTERNAL           *timer_ptr;
135 TX_TIMER_INTERNAL           *previous_timer;
136 ULONG                       expiration_time;
137 ULONG                       delta;
138 ULONG                       timeout;
139 #endif
140 
141 #ifdef TX_ENABLE_EVENT_TRACE
142 TX_TRACE_BUFFER_ENTRY       *entry_ptr =  TX_NULL;
143 ULONG                       time_stamp =  ((ULONG) 0);
144 #endif
145 UINT                        processing_complete;
146 
147 
148     /* Set the processing complete flag to false.  */
149     processing_complete =  TX_FALSE;
150 
151 #ifndef TX_NOT_INTERRUPTABLE
152 
153     /* Disable interrupts.  */
154     TX_DISABLE
155 #endif
156 
157     /* Pickup the index.  */
158     core_index =  TX_SMP_CORE_ID;
159 
160 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
161 
162     /* Debug entry.  */
163     _tx_thread_smp_debug_entry_insert(6, 1, thread_ptr);
164 #endif
165 
166 #ifndef TX_NO_TIMER
167 
168     /* Determine if a timeout needs to be activated.  */
169     if (thread_ptr == _tx_thread_current_ptr[core_index])
170     {
171 
172         /* Reset time slice for current thread.  */
173         _tx_timer_time_slice[core_index] =  thread_ptr -> tx_thread_new_time_slice;
174 
175         /* Pickup the wait option.  */
176         timeout =  thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks;
177 
178         /* Determine if an activation is needed.  */
179         if (timeout != TX_NO_WAIT)
180         {
181 
182             /* Make sure the suspension is not a wait-forever.  */
183             if (timeout != TX_WAIT_FOREVER)
184             {
185 
186                 /* Activate the thread timer with the timeout value setup in the caller. This is now done in-line
187                    for ThreadX SMP so the additional protection logic can be avoided.  */
188 
189                 /* Activate the thread's timeout timer.  */
190 
191                 /* Setup pointer to internal timer.  */
192                 timer_ptr =  &(thread_ptr -> tx_thread_timer);
193 
194                 /* Calculate the amount of time remaining for the timer.  */
195                 if (timeout > TX_TIMER_ENTRIES)
196                 {
197 
198                     /* Set expiration time to the maximum number of entries.  */
199                     expiration_time =  TX_TIMER_ENTRIES - ((ULONG) 1);
200                 }
201                 else
202                 {
203 
204                     /* Timer value fits in the timer entries.  */
205 
206                     /* Set the expiration time.  */
207                     expiration_time =  (UINT) (timeout - ((ULONG) 1));
208                 }
209 
210                 /* At this point, we are ready to put the timer on one of
211                    the timer lists.  */
212 
213                 /* Calculate the proper place for the timer.  */
214                 timer_list =  TX_TIMER_POINTER_ADD(_tx_timer_current_ptr, expiration_time);
215                 if (TX_TIMER_INDIRECT_TO_VOID_POINTER_CONVERT(timer_list) >= TX_TIMER_INDIRECT_TO_VOID_POINTER_CONVERT(_tx_timer_list_end))
216                 {
217 
218                     /* Wrap from the beginning of the list.  */
219                     delta =  TX_TIMER_POINTER_DIF(timer_list, _tx_timer_list_end);
220                     timer_list =  TX_TIMER_POINTER_ADD(_tx_timer_list_start, delta);
221                 }
222 
223                 /* Now put the timer on this list.  */
224                 if ((*timer_list) == TX_NULL)
225                 {
226 
227                     /* This list is NULL, just put the new timer on it.  */
228 
229                     /* Setup the links in this timer.  */
230                     timer_ptr -> tx_timer_internal_active_next =      timer_ptr;
231                     timer_ptr -> tx_timer_internal_active_previous =  timer_ptr;
232 
233                     /* Setup the list head pointer.  */
234                     *timer_list =  timer_ptr;
235                 }
236                 else
237                 {
238 
239                     /* This list is not NULL, add current timer to the end. */
240                     next_timer =                                        *timer_list;
241                     previous_timer =                                    next_timer -> tx_timer_internal_active_previous;
242                     previous_timer -> tx_timer_internal_active_next =   timer_ptr;
243                     next_timer -> tx_timer_internal_active_previous =   timer_ptr;
244                     timer_ptr -> tx_timer_internal_active_next =        next_timer;
245                     timer_ptr -> tx_timer_internal_active_previous =    previous_timer;
246                 }
247 
248                 /* Setup list head pointer.  */
249                 timer_ptr -> tx_timer_internal_list_head =  timer_list;
250             }
251         }
252     }
253 #endif
254 
255 
256 #ifdef TX_ENABLE_STACK_CHECKING
257 
258     /* Check this thread's stack.  */
259     TX_THREAD_STACK_CHECK(thread_ptr)
260 #endif
261 
262 
263 #ifndef TX_NOT_INTERRUPTABLE
264 
265     /* Decrement the preempt disable flag.  */
266     _tx_thread_preempt_disable--;
267 #endif
268 
269 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
270 
271     /* Increment the thread's suspend count.  */
272     thread_ptr -> tx_thread_performance_suspend_count++;
273 
274     /* Increment the total number of thread suspensions.  */
275     _tx_thread_performance_suspend_count++;
276 #endif
277 
278 
279 #ifndef TX_NOT_INTERRUPTABLE
280 
281     /* Check to make sure the thread suspending flag is still set.  If not, it
282        has already been resumed.  */
283     if ((thread_ptr -> tx_thread_suspending) == TX_TRUE)
284     {
285 #endif
286 
287         /* Thread state change.  */
288         TX_THREAD_STATE_CHANGE(thread_ptr, thread_ptr -> tx_thread_state)
289 
290         /* Log the thread status change.  */
291         TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, thread_ptr -> tx_thread_state)
292 
293 #ifdef TX_ENABLE_EVENT_TRACE
294 
295         /* If trace is enabled, save the current event pointer.  */
296         entry_ptr =  _tx_trace_buffer_current_ptr;
297 
298         /* Log the thread status change.  */
299         TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_SUSPEND, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&priority), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]), TX_TRACE_INTERNAL_EVENTS)
300 
301         /* Save the time stamp for later comparison to verify that
302            the event hasn't been overwritten by the time we have
303            computed the next thread to execute.  */
304         if (entry_ptr != TX_NULL)
305         {
306 
307             /* Save time stamp.  */
308             time_stamp =  entry_ptr -> tx_trace_buffer_entry_time_stamp;
309         }
310 #endif
311 
312         /* Actually suspend this thread.  But first, clear the suspending flag.  */
313         thread_ptr -> tx_thread_suspending =  TX_FALSE;
314 
315         /* Pickup priority of thread.  */
316         priority =  thread_ptr -> tx_thread_priority;
317 
318 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
319 
320 #if TX_MAX_PRIORITIES > 32
321 
322         /* Calculate the index into the bit map array.  */
323         map_index =  priority/((UINT) 32);
324 #endif
325 
326         /* Determine if this thread has preemption-threshold set.  */
327         if (thread_ptr -> tx_thread_preempt_threshold < priority)
328         {
329 
330             /* Was this thread with preemption-threshold set actually preempted with preemption-threshold set?  */
331             if (_tx_thread_preemption_threshold_list[priority] == thread_ptr)
332             {
333 
334                 /* Clear the preempted list entry.  */
335                 _tx_thread_preemption_threshold_list[priority] =  TX_NULL;
336 
337                 /* Ensure that this thread's priority is clear in the preempt map.  */
338                 TX_MOD32_BIT_SET(priority, priority_bit)
339                 _tx_thread_preempted_maps[MAP_INDEX] =  _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
340 
341 #if TX_MAX_PRIORITIES > 32
342 
343                 /* Determine if there are any other bits set in this preempt map.  */
344                 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
345                 {
346 
347                     /* No, clear the active bit to signify this preempted map has nothing set.  */
348                     TX_DIV32_BIT_SET(priority, priority_bit)
349                     _tx_thread_preempted_map_active =  _tx_thread_preempted_map_active & (~(priority_bit));
350                 }
351 #endif
352             }
353         }
354 #endif
355 
356         /* Determine if this thread has global preemption disabled.  */
357         if (thread_ptr == _tx_thread_preemption__threshold_scheduled)
358         {
359 
360             /* Clear the global preemption disable flag.  */
361             _tx_thread_preemption__threshold_scheduled =  TX_NULL;
362 
363 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
364 
365             /* Clear the entry in the preempted list.  */
366             _tx_thread_preemption_threshold_list[thread_ptr -> tx_thread_priority] =  TX_NULL;
367 
368             /* Calculate the first thread with preemption-threshold active.  */
369 #if TX_MAX_PRIORITIES > 32
370             if (_tx_thread_preempted_map_active != ((ULONG) 0))
371 #else
372             if (_tx_thread_preempted_maps[0] != ((ULONG) 0))
373 #endif
374             {
375 #if TX_MAX_PRIORITIES > 32
376 
377                 /* Calculate the index to find the next highest priority thread ready for execution.  */
378                 priority_map =    _tx_thread_preempted_map_active;
379 
380                 /* Calculate the lowest bit set in the priority map. */
381                 TX_LOWEST_SET_BIT_CALCULATE(priority_map, map_index)
382 
383                 /* Calculate the base priority as well.  */
384                 base_priority =  map_index * ((UINT) 32);
385 #else
386 
387                 /* Setup the base priority to zero.  */
388                 base_priority =   ((UINT) 0);
389 #endif
390 
391                 /* Setup temporary preempted map.  */
392                 priority_map =  _tx_thread_preempted_maps[MAP_INDEX];
393 
394                 /* Calculate the lowest bit set in the priority map. */
395                 TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit_set)
396 
397                 /* Move priority bit set into priority bit.  */
398                 priority_bit =  (ULONG) priority_bit_set;
399 
400                 /* Setup the highest priority preempted thread.  */
401                 next_preempted =  base_priority + priority_bit;
402 
403                 /* Pickup the next preempted thread.  */
404                 preempted_thread =  _tx_thread_preemption_threshold_list[next_preempted];
405 
406                 /* Setup the preempted thread.  */
407                 _tx_thread_preemption__threshold_scheduled =  preempted_thread;
408             }
409 #endif
410         }
411 
412         /* Determine if there are other threads at this priority that are
413            ready.  */
414         if (thread_ptr -> tx_thread_ready_next != thread_ptr)
415         {
416 
417             /* Yes, there are other threads at this priority ready.  */
418 
419 #ifndef TX_THREAD_SMP_EQUAL_PRIORITY
420 
421             /* Remember the head of the priority list.  */
422             next_thread =  _tx_thread_priority_list[priority];
423 #endif
424 
425             /* Just remove this thread from the priority list.  */
426             (thread_ptr -> tx_thread_ready_next) -> tx_thread_ready_previous =    thread_ptr -> tx_thread_ready_previous;
427             (thread_ptr -> tx_thread_ready_previous) -> tx_thread_ready_next =    thread_ptr -> tx_thread_ready_next;
428 
429             /* Determine if this is the head of the priority list.  */
430             if (_tx_thread_priority_list[priority] == thread_ptr)
431             {
432 
433                 /* Update the head pointer of this priority list.  */
434                 _tx_thread_priority_list[priority] =  thread_ptr -> tx_thread_ready_next;
435 
436 #ifndef TX_THREAD_SMP_EQUAL_PRIORITY
437 
438                 /* Update the next pointer as well.  */
439                 next_thread =  thread_ptr -> tx_thread_ready_next;
440 #endif
441             }
442         }
443         else
444         {
445 
446 #ifndef TX_THREAD_SMP_EQUAL_PRIORITY
447 
448             /* Remember the head of the priority list.  */
449             next_thread =  thread_ptr;
450 #endif
451 
452             /* This is the only thread at this priority ready to run.  Set the head
453                pointer to NULL.  */
454             _tx_thread_priority_list[priority] =    TX_NULL;
455 
456 #if TX_MAX_PRIORITIES > 32
457 
458             /* Calculate the index into the bit map array.  */
459             map_index =  priority/((UINT) 32);
460 #endif
461 
462             /* Clear this priority bit in the ready priority bit map.  */
463             TX_MOD32_BIT_SET(priority, priority_bit)
464             _tx_thread_priority_maps[MAP_INDEX] =  _tx_thread_priority_maps[MAP_INDEX] & (~(priority_bit));
465 
466 #if TX_MAX_PRIORITIES > 32
467 
468             /* Determine if there are any other bits set in this priority map.  */
469             if (_tx_thread_priority_maps[MAP_INDEX] == ((ULONG) 0))
470             {
471 
472                 /* No, clear the active bit to signify this priority map has nothing set.  */
473                 TX_DIV32_BIT_SET(priority, priority_bit)
474                 _tx_thread_priority_map_active =  _tx_thread_priority_map_active & (~(priority_bit));
475             }
476 #endif
477         }
478 
479 #if TX_MAX_PRIORITIES > 32
480 
481         /* Calculate the index to find the next highest priority thread ready for execution.  */
482         priority_map =    _tx_thread_priority_map_active;
483 
484         /* Determine if there is anything.   */
485         if (priority_map != ((ULONG) 0))
486         {
487 
488             /* Calculate the lowest bit set in the priority map. */
489             TX_LOWEST_SET_BIT_CALCULATE(priority_map, map_index)
490         }
491 #endif
492 
493         /* Setup working variable for the priority map.  */
494         priority_map =    _tx_thread_priority_maps[MAP_INDEX];
495 
496         /* Make a quick check for no other threads ready for execution.  */
497         if (priority_map == ((ULONG) 0))
498         {
499 
500 #ifdef TX_ENABLE_EVENT_TRACE
501 
502             /* Check that the event time stamp is unchanged.  A different
503                timestamp means that a later event wrote over the thread
504                suspend event. In that case, do nothing here.  */
505             if ((entry_ptr != TX_NULL) && (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp))
506             {
507 
508                 /* Timestamp is the same, set the "next thread pointer" to NULL. This can
509                    be used by the trace analysis tool to show idle system conditions.  */
510 #ifdef TX_MISRA_ENABLE
511                 entry_ptr -> tx_trace_buffer_entry_info_4 =  ((ULONG) 0);
512 #else
513                 entry_ptr -> tx_trace_buffer_entry_information_field_4 =  ((ULONG) 0);
514 #endif
515             }
516 #endif
517 
518             /* Check to see if the thread is in the execute list.  */
519             i =  thread_ptr -> tx_thread_smp_core_mapped;
520 
521             /* Clear the entry in the thread execution list.  */
522             _tx_thread_execute_ptr[i] =  TX_NULL;
523 
524 #ifdef TX_THREAD_SMP_INTER_CORE_INTERRUPT
525 
526             /* Determine if we need to preempt the core.  */
527             if (i != core_index)
528             {
529 
530                 if (_tx_thread_system_state[i] < TX_INITIALIZE_IN_PROGRESS)
531                 {
532 
533                     /* Preempt the mapped thread.  */
534                     _tx_thread_smp_core_preempt(i);
535                 }
536             }
537 #endif
538 
539 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
540 
541             /* Does this need to be waked up?  */
542             if ((i != core_index) && (_tx_thread_execute_ptr[i] != TX_NULL))
543             {
544 
545                 /* Wakeup based on application's macro.  */
546                 TX_THREAD_SMP_WAKEUP(i);
547             }
548 #endif
549 
550 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
551 
552             /* Debug entry.  */
553             _tx_thread_smp_debug_entry_insert(7, 1, thread_ptr);
554 #endif
555 
556 #ifdef TX_ENABLE_EVENT_TRACE
557 
558             /* Check that the event time stamp is unchanged.  A different
559                timestamp means that a later event wrote over the system suspend
560                event.  In that case, do nothing here.  */
561             if ((entry_ptr != TX_NULL) && (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp))
562             {
563 
564                 /* Timestamp is the same, set the "next thread pointer" to the next thread scheduled
565                    for this core.  */
566 #ifdef TX_MISRA_ENABLE
567                 entry_ptr -> tx_trace_buffer_entry_info_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]);
568 #else
569                 entry_ptr -> tx_trace_buffer_entry_information_field_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]);
570 #endif
571             }
572 #endif
573 
574             /* Check to see if the caller is a thread and the preempt disable flag is clear.  */
575             combined_flags =  ((ULONG) _tx_thread_system_state[core_index]);
576             combined_flags =  combined_flags | ((ULONG) _tx_thread_preempt_disable);
577             if (combined_flags == ((ULONG) 0))
578             {
579 
580 
581 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
582 
583                 /* Yes, increment the return to idle return count.  */
584                 _tx_thread_performance_idle_return_count++;
585 #endif
586 
587 
588 #ifndef TX_NOT_INTERRUPTABLE
589 
590                 /* Increment the preempt disable flag in order to keep the protection.  */
591                 _tx_thread_preempt_disable++;
592 
593                 /* Restore interrupts.  */
594                 TX_RESTORE
595 #endif
596 
597                 /* If so, return control to the system.  */
598                 _tx_thread_system_return();
599 
600 #ifdef TX_NOT_INTERRUPTABLE
601 
602                 /* Setup protection again since caller is expecting that it is still in force.  */
603                 _tx_thread_smp_protect();
604 #endif
605 
606                 /* Processing complete, set the flag.  */
607                 processing_complete =  TX_TRUE;
608             }
609         }
610         else
611         {
612 
613             /* There are more threads ready to execute.  */
614 
615             /* Check to see if the thread is in the execute list. If not, there is nothing else to do.  */
616             i =  thread_ptr -> tx_thread_smp_core_mapped;
617             if (_tx_thread_execute_ptr[i] == thread_ptr)
618             {
619 
620                 /* Clear the entry in the thread execution list.  */
621                 _tx_thread_execute_ptr[i] =  TX_NULL;
622 
623                 /* Determine if preemption-threshold is present in the suspending thread or present in another executing or previously executing
624                    thread.  */
625                 if ((_tx_thread_preemption__threshold_scheduled != TX_NULL) || (thread_ptr -> tx_thread_preempt_threshold < thread_ptr -> tx_thread_priority))
626                 {
627 
628                     /* Call the rebalance routine. This routine maps cores and ready threads.  */
629                     _tx_thread_smp_rebalance_execute_list(core_index);
630                 }
631 #ifdef TX_THREAD_SMP_EQUAL_PRIORITY
632                 else
633                 {
634 
635                     /* For equal priority SMP, we simply use the rebalance list function.  */
636 
637                     /* Call the rebalance routine. This routine maps cores and ready threads.  */
638                     _tx_thread_smp_rebalance_execute_list(core_index);
639                 }
640 #else
641                 else
642                 {
643 
644                     /* Now we need to find the next, highest-priority thread ready for execution.  */
645 
646                     /* Start at the priority of the thread suspending, since we know that higher priority threads
647                        have already been evaluated when preemption-threshold is not in effect.  */
648                     next_priority =  thread_ptr -> tx_thread_priority;
649 
650                     /* Determine if there are other threads at the same priority level as the suspending thread.  */
651                     if (next_thread == thread_ptr)
652                     {
653 
654                         /* No more threads at this priority level.  */
655 
656                         /* Start at the priority after that of the thread suspending, since we know there are no
657                            other threads at the suspending thread's priority ready to execute.  */
658                         next_priority++;
659 
660                         /* Set next thread to NULL..  */
661                         next_thread =  TX_NULL;
662                     }
663 
664                     /* Get the possible cores bit map, based on what has already been scheduled.  */
665                     possible_cores =  _tx_thread_smp_possible_cores_get();
666 
667                     /* Setup the available cores bit map. In the suspend case, this is simply the core that is now available. */
668                     available_cores =  (((ULONG) 1) << i);
669 
670                     /* Calculate the possible complex path.  */
671                     complex_path_possible =  possible_cores & available_cores;
672 
673                     /* Check if we need to loop to find the next highest priority thread.  */
674                     if (next_priority == TX_MAX_PRIORITIES)
675                     {
676                         loop_finished = TX_TRUE;
677                     }
678                     else
679                     {
680                         loop_finished = TX_FALSE;
681                     }
682 
683                     /* Loop to find the next highest priority ready thread that is allowed to run on this core.  */
684                     while (loop_finished == TX_FALSE)
685                     {
686 
687                         /* Determine if there is a thread to examine.  */
688                         if (next_thread == TX_NULL)
689                         {
690 
691                             /* Calculate the next ready priority.  */
692                             next_priority =  _tx_thread_smp_next_priority_find(next_priority);
693 
694                             /* Determine if there are no more threads to execute.  */
695                             if (next_priority == ((UINT) TX_MAX_PRIORITIES))
696                             {
697 
698                                 /* Break out of loop.  */
699                                 loop_finished =  TX_TRUE;
700                             }
701                             else
702                             {
703 
704                                 /* Pickup the next thread to schedule.  */
705                                 next_thread =  _tx_thread_priority_list[next_priority];
706                             }
707                         }
708 
709                         /* Determine if the processing is not complete.  */
710                         if (loop_finished == TX_FALSE)
711                         {
712 
713                             /* Is the this thread already in the execute list?  */
714                             if (next_thread != _tx_thread_execute_ptr[next_thread -> tx_thread_smp_core_mapped])
715                             {
716 
717                                 /* No, not already on the execute list.   */
718 
719                                 /* Check to see if the thread has preemption-threshold set.  */
720                                 if (next_thread -> tx_thread_preempt_threshold != next_thread -> tx_thread_priority)
721                                 {
722 
723                                     /* Call the rebalance routine. This routine maps cores and ready threads.  */
724                                     _tx_thread_smp_rebalance_execute_list(core_index);
725 
726                                     /* Get out of the loop.  */
727                                     loop_finished =  TX_TRUE;
728                                 }
729                                 else
730                                 {
731 
732                                     /* Now determine if this thread is allowed to run on this core.  */
733                                     if ((((next_thread -> tx_thread_smp_cores_allowed >> i) & ((ULONG) 1))) != ((ULONG) 0))
734                                     {
735 
736                                         /* Remember this index in the thread control block.  */
737                                         next_thread -> tx_thread_smp_core_mapped =  i;
738 
739                                         /* Setup the entry in the execution list.  */
740                                         _tx_thread_execute_ptr[i] =  next_thread;
741 
742                                         /* Found the thread to execute.  */
743                                         loop_finished =  TX_TRUE;
744                                     }
745                                     else
746                                     {
747 
748                                         /* Determine if nontrivial scheduling is possible.  */
749                                         if (complex_path_possible != ((ULONG) 0))
750                                         {
751 
752                                             /* Check for nontrivial scheduling, i.e., can other threads be remapped to allow this thread to be
753                                                scheduled.  */
754 
755                                             /* Determine what the possible cores are for this thread.  */
756                                             thread_possible_cores =  next_thread -> tx_thread_smp_cores_allowed;
757 
758                                             /* Apply the current possible cores.  */
759                                             thread_possible_cores =  thread_possible_cores & possible_cores;
760                                             if (thread_possible_cores != ((ULONG) 0))
761                                             {
762 
763                                                 /* Note that we know that the thread must have the target core excluded at this point,
764                                                    since we failed the test above.  */
765 
766                                                 /* Now we need to see if one of the other threads in the non-excluded cores can be moved to make room
767                                                    for this thread.  */
768 
769                                                 /* Default the schedule list to the current execution list.  */
770                                                 _tx_thread_smp_schedule_list_setup();
771 
772                                                 /* Determine the possible core mapping.  */
773                                                 test_possible_cores =  possible_cores & ~(thread_possible_cores);
774 
775                                                 /* Attempt to remap the cores in order to schedule this thread.  */
776                                                 core =  _tx_thread_smp_remap_solution_find(next_thread, available_cores, thread_possible_cores, test_possible_cores);
777 
778                                                 /* Determine if remapping was successful.  */
779                                                 if (core != ((UINT) TX_THREAD_SMP_MAX_CORES))
780                                                 {
781 
782                                                     /* Clear the execute list.  */
783                                                     _tx_thread_smp_execute_list_clear();
784 
785                                                     /* Setup the execute list based on the updated schedule list.  */
786                                                     _tx_thread_smp_execute_list_setup(core_index);
787 
788                                                     /* At this point, we are done since we have found a solution for one core.  */
789                                                     loop_finished =  TX_TRUE;
790                                                 }
791                                                 else
792                                                 {
793 
794                                                     /* We couldn't assign the thread to any of the cores possible for the thread so update the possible cores for the
795                                                        next pass so we don't waste time looking at them again!  */
796                                                     possible_cores =  possible_cores & (~thread_possible_cores);
797                                                 }
798                                             }
799                                         }
800                                     }
801                                 }
802                             }
803                         }
804 
805                         /* Determine if the loop is finished.  */
806                         if (loop_finished == TX_FALSE)
807                         {
808 
809                             /* Move to the next thread.  */
810                             next_thread =  next_thread -> tx_thread_ready_next;
811 
812                             /* Determine if we are at the head of the list.  */
813                             if (next_thread == _tx_thread_priority_list[next_priority])
814                             {
815 
816                                 /* Yes, set the next thread pointer to NULL, increment the priority, and continue.  */
817                                 next_thread =  TX_NULL;
818                                 next_priority++;
819 
820                                 /* Determine if there are no more threads to execute.  */
821                                 if (next_priority == ((UINT) TX_MAX_PRIORITIES))
822                                 {
823 
824                                     /* Break out of loop.  */
825                                     loop_finished =  TX_TRUE;
826                                 }
827                             }
828                         }
829                     }
830 
831 #ifdef TX_THREAD_SMP_INTER_CORE_INTERRUPT
832 
833                     /* Determine if we need to preempt the core.  */
834                     if (i != core_index)
835                     {
836 
837                         /* Make sure thread execution has started.  */
838                         if (_tx_thread_system_state[i] < ((ULONG) TX_INITIALIZE_IN_PROGRESS))
839                         {
840 
841                             /* Preempt the mapped thread.  */
842                             _tx_thread_smp_core_preempt(i);
843                         }
844                     }
845 #endif
846 
847 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
848 
849                     /* Does this need to be waked up?  */
850                     if (i != core_index)
851                     {
852 
853                         /* Check to make sure there a thread to execute for this core.  */
854                         if (_tx_thread_execute_ptr[i] != TX_NULL)
855                         {
856 
857                             /* Wakeup based on application's macro.  */
858                             TX_THREAD_SMP_WAKEUP(i);
859                         }
860                     }
861 #endif
862                 }
863 #endif
864             }
865         }
866 
867 #ifndef TX_NOT_INTERRUPTABLE
868 
869     }
870 #endif
871 
872     /* Check to see if the processing is complete.  */
873     if (processing_complete == TX_FALSE)
874     {
875 
876 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
877 
878         /* Debug entry.  */
879         _tx_thread_smp_debug_entry_insert(7, 1, thread_ptr);
880 #endif
881 
882 #ifdef TX_ENABLE_EVENT_TRACE
883 
884         /* Check that the event time stamp is unchanged.  A different
885            timestamp means that a later event wrote over the thread
886            suspend event. In that case, do nothing here.  */
887         if ((entry_ptr != TX_NULL) && (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp))
888         {
889 
890             /* Timestamp is the same, set the "next thread pointer" to the next thread scheduled
891                for this core.  */
892 #ifdef TX_MISRA_ENABLE
893             entry_ptr -> tx_trace_buffer_entry_info_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]);
894 #else
895             entry_ptr -> tx_trace_buffer_entry_information_field_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]);
896 #endif
897         }
898 #endif
899 
900         /* Determine if a preemption condition is present.  */
901         if (_tx_thread_current_ptr[core_index] != _tx_thread_execute_ptr[core_index])
902         {
903 
904 #ifdef TX_ENABLE_STACK_CHECKING
905 
906             /* Pickup the next execute pointer.  */
907             thread_ptr =  _tx_thread_execute_ptr[core_index];
908 
909             /* Determine if there is a thread pointer.  */
910             if (thread_ptr != TX_NULL)
911             {
912 
913                 /* Check this thread's stack.  */
914                 TX_THREAD_STACK_CHECK(thread_ptr)
915             }
916 #endif
917 
918             /* Determine if preemption should take place. This is only possible if the current thread pointer is
919                not the same as the execute thread pointer AND the system state and preempt disable flags are clear.  */
920             if (_tx_thread_system_state[core_index] == ((ULONG) 0))
921             {
922 
923                 /* Check the preempt disable flag.  */
924                 if (_tx_thread_preempt_disable == ((UINT) 0))
925                 {
926 
927 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
928 
929                     /* Determine if an idle system return is present.  */
930                     if (_tx_thread_execute_ptr[core_index] == TX_NULL)
931                     {
932 
933                         /* Yes, increment the return to idle return count.  */
934                         _tx_thread_performance_idle_return_count++;
935                     }
936                     else
937                     {
938 
939                         /* No, there is another thread ready to run and will be scheduled upon return.  */
940                         _tx_thread_performance_non_idle_return_count++;
941                     }
942 #endif
943 
944 
945 #ifndef TX_NOT_INTERRUPTABLE
946 
947                     /* Increment the preempt disable flag in order to keep the protection.  */
948                     _tx_thread_preempt_disable++;
949 
950                     /* Restore interrupts.  */
951                     TX_RESTORE
952 #endif
953 
954                     /* Preemption is needed - return to the system!  */
955                     _tx_thread_system_return();
956 
957 #ifdef TX_NOT_INTERRUPTABLE
958 
959                     /* Setup protection again since caller is expecting that it is still in force.  */
960                     _tx_thread_smp_protect();
961 #endif
962 
963 #ifndef TX_NOT_INTERRUPTABLE
964 
965                     /* Set the processing complete flag.  */
966                     processing_complete =  TX_TRUE;
967 #endif
968                 }
969             }
970         }
971 
972 #ifndef TX_NOT_INTERRUPTABLE
973 
974         /* Determine if processing is complete.  If so, no need to restore interrupts.  */
975         if (processing_complete == TX_FALSE)
976         {
977 
978             /* Restore interrupts.  */
979             TX_RESTORE
980         }
981 #endif
982     }
983 }
984 
985 #ifdef TX_NOT_INTERRUPTABLE
_tx_thread_system_ni_suspend(TX_THREAD * thread_ptr,ULONG timeout)986 VOID  _tx_thread_system_ni_suspend(TX_THREAD *thread_ptr, ULONG timeout)
987 {
988 
989     /* Setup timeout.   */
990     thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks =  timeout;
991 
992     /* Call system suspend function.  */
993     _tx_thread_system_suspend(thread_ptr);
994 }
995 #endif
996 
997