1 /**************************************************************************/
2 /*                                                                        */
3 /*       Copyright (c) Microsoft Corporation. All rights reserved.        */
4 /*                                                                        */
5 /*       This software is licensed under the Microsoft Software License   */
6 /*       Terms for Microsoft Azure RTOS. Full text of the license can be  */
7 /*       found in the LICENSE file at https://aka.ms/AzureRTOS_EULA       */
8 /*       and in the root directory of this software.                      */
9 /*                                                                        */
10 /**************************************************************************/
11 
12 
13 /**************************************************************************/
14 /**************************************************************************/
15 /**                                                                       */
16 /** ThreadX Component                                                     */
17 /**                                                                       */
18 /**   Thread                                                              */
19 /**                                                                       */
20 /**************************************************************************/
21 /**************************************************************************/
22 
23 #define TX_SOURCE_CODE
24 #define TX_THREAD_SMP_SOURCE_CODE
25 
26 
27 /* Include necessary system files.  */
28 
29 #include "tx_api.h"
30 #include "tx_initialize.h"
31 #include "tx_timer.h"
32 #include "tx_thread.h"
33 #include "tx_trace.h"
34 
35 
36 /**************************************************************************/
37 /*                                                                        */
38 /*  FUNCTION                                               RELEASE        */
39 /*                                                                        */
40 /*    _tx_thread_system_suspend                          PORTABLE SMP     */
41 /*                                                           6.3.0        */
42 /*  AUTHOR                                                                */
43 /*                                                                        */
44 /*    William E. Lamie, Microsoft Corporation                             */
45 /*                                                                        */
46 /*  DESCRIPTION                                                           */
47 /*                                                                        */
48 /*    This function suspends the specified thread and changes the thread  */
49 /*    state to the value specified.  Note: delayed suspension processing  */
50 /*    is handled outside of this routine.                                 */
51 /*                                                                        */
52 /*  INPUT                                                                 */
53 /*                                                                        */
54 /*    thread_ptr                            Pointer to thread to suspend  */
55 /*                                                                        */
56 /*  OUTPUT                                                                */
57 /*                                                                        */
58 /*    None                                                                */
59 /*                                                                        */
60 /*  CALLS                                                                 */
61 /*                                                                        */
62 /*    _tx_thread_smp_available_cores_get    Get available cores bitmap    */
63 /*    _tx_thread_smp_core_preempt           Preempt core for new thread   */
64 /*    _tx_thread_smp_execute_list_clear     Clear the thread execute list */
65 /*    _tx_thread_smp_execute_list_setup     Setup the thread execute list */
66 /*    _tx_thread_smp_next_priority_find     Find next priority with one   */
67 /*                                            or more ready threads       */
68 /*    _tx_thread_smp_possible_cores_get     Get possible cores bitmap     */
69 /*    [_tx_thread_smp_protect]              Get protection                */
70 /*    _tx_thread_smp_rebalance_execute_list Rebalance the execution list  */
71 /*    _tx_thread_smp_remap_solution_find    Attempt to remap threads to   */
72 /*                                            schedule another thread     */
73 /*    _tx_thread_smp_schedule_list_setup    Inherit schedule list from    */
74 /*                                            execute list                */
75 /*    _tx_thread_system_return              Return to system              */
76 /*                                                                        */
77 /*  CALLED BY                                                             */
78 /*                                                                        */
79 /*    _tx_thread_priority_change            Thread priority change        */
80 /*    _tx_thread_shell_entry                Thread shell function         */
81 /*    _tx_thread_sleep                      Thread sleep                  */
82 /*    _tx_thread_suspend                    Application thread suspend    */
83 /*    _tx_thread_terminate                  Thread terminate              */
84 /*    Other ThreadX Components                                            */
85 /*                                                                        */
86 /*  RELEASE HISTORY                                                       */
87 /*                                                                        */
88 /*    DATE              NAME                      DESCRIPTION             */
89 /*                                                                        */
90 /*  09-30-2020      William E. Lamie        Initial Version 6.1           */
91 /*  04-25-2022      Scott Larson            Modified comments and fixed   */
92 /*                                            loop to find next thread,   */
93 /*                                            resulting in version 6.1.11 */
94 /*  10-31-2023      Tiejun Zhou             Fixed MISRA2012 rule 10.4_a,  */
95 /*                                            resulting in version 6.3.0  */
96 /*                                                                        */
97 /**************************************************************************/
_tx_thread_system_suspend(TX_THREAD * thread_ptr)98 VOID  _tx_thread_system_suspend(TX_THREAD *thread_ptr)
99 {
100 
101 #ifndef TX_NOT_INTERRUPTABLE
102 
103 TX_INTERRUPT_SAVE_AREA
104 #endif
105 
106 UINT                        priority;
107 UINT                        i;
108 ULONG                       priority_bit;
109 ULONG                       combined_flags;
110 ULONG                       priority_map;
111 UINT                        core_index;
112 #ifndef TX_THREAD_SMP_EQUAL_PRIORITY
113 ULONG                       complex_path_possible;
114 UINT                        core;
115 ULONG                       possible_cores;
116 ULONG                       thread_possible_cores;
117 ULONG                       available_cores;
118 ULONG                       test_possible_cores;
119 UINT                        next_priority;
120 TX_THREAD                   *next_thread;
121 UINT                        loop_finished;
122 #endif
123 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
124 UINT                        next_preempted;
125 UINT                        base_priority;
126 UINT                        priority_bit_set;
127 TX_THREAD                   *preempted_thread;
128 #endif
129 #if TX_MAX_PRIORITIES > 32
130 UINT                        map_index;
131 #endif
132 
133 #ifndef TX_NO_TIMER
134 TX_TIMER_INTERNAL           **timer_list;
135 TX_TIMER_INTERNAL           *next_timer;
136 TX_TIMER_INTERNAL           *timer_ptr;
137 TX_TIMER_INTERNAL           *previous_timer;
138 ULONG                       expiration_time;
139 ULONG                       delta;
140 ULONG                       timeout;
141 #endif
142 
143 #ifdef TX_ENABLE_EVENT_TRACE
144 TX_TRACE_BUFFER_ENTRY       *entry_ptr =  TX_NULL;
145 ULONG                       time_stamp =  ((ULONG) 0);
146 #endif
147 UINT                        processing_complete;
148 
149 
150     /* Set the processing complete flag to false.  */
151     processing_complete =  TX_FALSE;
152 
153 #ifndef TX_NOT_INTERRUPTABLE
154 
155     /* Disable interrupts.  */
156     TX_DISABLE
157 #endif
158 
159     /* Pickup the index.  */
160     core_index =  TX_SMP_CORE_ID;
161 
162 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
163 
164     /* Debug entry.  */
165     _tx_thread_smp_debug_entry_insert(6, 1, thread_ptr);
166 #endif
167 
168 #ifndef TX_NO_TIMER
169 
170     /* Determine if a timeout needs to be activated.  */
171     if (thread_ptr == _tx_thread_current_ptr[core_index])
172     {
173 
174         /* Reset time slice for current thread.  */
175         _tx_timer_time_slice[core_index] =  thread_ptr -> tx_thread_new_time_slice;
176 
177         /* Pickup the wait option.  */
178         timeout =  thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks;
179 
180         /* Determine if an activation is needed.  */
181         if (timeout != TX_NO_WAIT)
182         {
183 
184             /* Make sure the suspension is not a wait-forever.  */
185             if (timeout != TX_WAIT_FOREVER)
186             {
187 
188                 /* Activate the thread timer with the timeout value setup in the caller. This is now done in-line
189                    for ThreadX SMP so the additional protection logic can be avoided.  */
190 
191                 /* Activate the thread's timeout timer.  */
192 
193                 /* Setup pointer to internal timer.  */
194                 timer_ptr =  &(thread_ptr -> tx_thread_timer);
195 
196                 /* Calculate the amount of time remaining for the timer.  */
197                 if (timeout > TX_TIMER_ENTRIES)
198                 {
199 
200                     /* Set expiration time to the maximum number of entries.  */
201                     expiration_time =  TX_TIMER_ENTRIES - ((ULONG) 1);
202                 }
203                 else
204                 {
205 
206                     /* Timer value fits in the timer entries.  */
207 
208                     /* Set the expiration time.  */
209                     expiration_time =  (UINT) (timeout - ((ULONG) 1));
210                 }
211 
212                 /* At this point, we are ready to put the timer on one of
213                    the timer lists.  */
214 
215                 /* Calculate the proper place for the timer.  */
216                 timer_list =  TX_TIMER_POINTER_ADD(_tx_timer_current_ptr, expiration_time);
217                 if (TX_TIMER_INDIRECT_TO_VOID_POINTER_CONVERT(timer_list) >= TX_TIMER_INDIRECT_TO_VOID_POINTER_CONVERT(_tx_timer_list_end))
218                 {
219 
220                     /* Wrap from the beginning of the list.  */
221                     delta =  TX_TIMER_POINTER_DIF(timer_list, _tx_timer_list_end);
222                     timer_list =  TX_TIMER_POINTER_ADD(_tx_timer_list_start, delta);
223                 }
224 
225                 /* Now put the timer on this list.  */
226                 if ((*timer_list) == TX_NULL)
227                 {
228 
229                     /* This list is NULL, just put the new timer on it.  */
230 
231                     /* Setup the links in this timer.  */
232                     timer_ptr -> tx_timer_internal_active_next =      timer_ptr;
233                     timer_ptr -> tx_timer_internal_active_previous =  timer_ptr;
234 
235                     /* Setup the list head pointer.  */
236                     *timer_list =  timer_ptr;
237                 }
238                 else
239                 {
240 
241                     /* This list is not NULL, add current timer to the end. */
242                     next_timer =                                        *timer_list;
243                     previous_timer =                                    next_timer -> tx_timer_internal_active_previous;
244                     previous_timer -> tx_timer_internal_active_next =   timer_ptr;
245                     next_timer -> tx_timer_internal_active_previous =   timer_ptr;
246                     timer_ptr -> tx_timer_internal_active_next =        next_timer;
247                     timer_ptr -> tx_timer_internal_active_previous =    previous_timer;
248                 }
249 
250                 /* Setup list head pointer.  */
251                 timer_ptr -> tx_timer_internal_list_head =  timer_list;
252             }
253         }
254     }
255 #endif
256 
257 
258 #ifdef TX_ENABLE_STACK_CHECKING
259 
260     /* Check this thread's stack.  */
261     TX_THREAD_STACK_CHECK(thread_ptr)
262 #endif
263 
264 
265 #ifndef TX_NOT_INTERRUPTABLE
266 
267     /* Decrement the preempt disable flag.  */
268     _tx_thread_preempt_disable--;
269 #endif
270 
271 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
272 
273     /* Increment the thread's suspend count.  */
274     thread_ptr -> tx_thread_performance_suspend_count++;
275 
276     /* Increment the total number of thread suspensions.  */
277     _tx_thread_performance_suspend_count++;
278 #endif
279 
280 
281 #ifndef TX_NOT_INTERRUPTABLE
282 
283     /* Check to make sure the thread suspending flag is still set.  If not, it
284        has already been resumed.  */
285     if ((thread_ptr -> tx_thread_suspending) == TX_TRUE)
286     {
287 #endif
288 
289         /* Thread state change.  */
290         TX_THREAD_STATE_CHANGE(thread_ptr, thread_ptr -> tx_thread_state)
291 
292         /* Log the thread status change.  */
293         TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, thread_ptr -> tx_thread_state)
294 
295 #ifdef TX_ENABLE_EVENT_TRACE
296 
297         /* If trace is enabled, save the current event pointer.  */
298         entry_ptr =  _tx_trace_buffer_current_ptr;
299 
300         /* Log the thread status change.  */
301         TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_SUSPEND, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&priority), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]), TX_TRACE_INTERNAL_EVENTS)
302 
303         /* Save the time stamp for later comparison to verify that
304            the event hasn't been overwritten by the time we have
305            computed the next thread to execute.  */
306         if (entry_ptr != TX_NULL)
307         {
308 
309             /* Save time stamp.  */
310             time_stamp =  entry_ptr -> tx_trace_buffer_entry_time_stamp;
311         }
312 #endif
313 
314         /* Actually suspend this thread.  But first, clear the suspending flag.  */
315         thread_ptr -> tx_thread_suspending =  TX_FALSE;
316 
317         /* Pickup priority of thread.  */
318         priority =  thread_ptr -> tx_thread_priority;
319 
320 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
321 
322 #if TX_MAX_PRIORITIES > 32
323 
324         /* Calculate the index into the bit map array.  */
325         map_index =  priority/((UINT) 32);
326 #endif
327 
328         /* Determine if this thread has preemption-threshold set.  */
329         if (thread_ptr -> tx_thread_preempt_threshold < priority)
330         {
331 
332             /* Was this thread with preemption-threshold set actually preempted with preemption-threshold set?  */
333             if (_tx_thread_preemption_threshold_list[priority] == thread_ptr)
334             {
335 
336                 /* Clear the preempted list entry.  */
337                 _tx_thread_preemption_threshold_list[priority] =  TX_NULL;
338 
339                 /* Ensure that this thread's priority is clear in the preempt map.  */
340                 TX_MOD32_BIT_SET(priority, priority_bit)
341                 _tx_thread_preempted_maps[MAP_INDEX] =  _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
342 
343 #if TX_MAX_PRIORITIES > 32
344 
345                 /* Determine if there are any other bits set in this preempt map.  */
346                 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
347                 {
348 
349                     /* No, clear the active bit to signify this preempted map has nothing set.  */
350                     TX_DIV32_BIT_SET(priority, priority_bit)
351                     _tx_thread_preempted_map_active =  _tx_thread_preempted_map_active & (~(priority_bit));
352                 }
353 #endif
354             }
355         }
356 #endif
357 
358         /* Determine if this thread has global preemption disabled.  */
359         if (thread_ptr == _tx_thread_preemption__threshold_scheduled)
360         {
361 
362             /* Clear the global preemption disable flag.  */
363             _tx_thread_preemption__threshold_scheduled =  TX_NULL;
364 
365 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
366 
367             /* Clear the entry in the preempted list.  */
368             _tx_thread_preemption_threshold_list[thread_ptr -> tx_thread_priority] =  TX_NULL;
369 
370             /* Calculate the first thread with preemption-threshold active.  */
371 #if TX_MAX_PRIORITIES > 32
372             if (_tx_thread_preempted_map_active != ((ULONG) 0))
373 #else
374             if (_tx_thread_preempted_maps[0] != ((ULONG) 0))
375 #endif
376             {
377 #if TX_MAX_PRIORITIES > 32
378 
379                 /* Calculate the index to find the next highest priority thread ready for execution.  */
380                 priority_map =    _tx_thread_preempted_map_active;
381 
382                 /* Calculate the lowest bit set in the priority map. */
383                 TX_LOWEST_SET_BIT_CALCULATE(priority_map, map_index)
384 
385                 /* Calculate the base priority as well.  */
386                 base_priority =  map_index * ((UINT) 32);
387 #else
388 
389                 /* Setup the base priority to zero.  */
390                 base_priority =   ((UINT) 0);
391 #endif
392 
393                 /* Setup temporary preempted map.  */
394                 priority_map =  _tx_thread_preempted_maps[MAP_INDEX];
395 
396                 /* Calculate the lowest bit set in the priority map. */
397                 TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit_set)
398 
399                 /* Move priority bit set into priority bit.  */
400                 priority_bit =  (ULONG) priority_bit_set;
401 
402                 /* Setup the highest priority preempted thread.  */
403                 next_preempted =  base_priority + priority_bit;
404 
405                 /* Pickup the next preempted thread.  */
406                 preempted_thread =  _tx_thread_preemption_threshold_list[next_preempted];
407 
408                 /* Setup the preempted thread.  */
409                 _tx_thread_preemption__threshold_scheduled =  preempted_thread;
410             }
411 #endif
412         }
413 
414         /* Determine if there are other threads at this priority that are
415            ready.  */
416         if (thread_ptr -> tx_thread_ready_next != thread_ptr)
417         {
418 
419             /* Yes, there are other threads at this priority ready.  */
420 
421 #ifndef TX_THREAD_SMP_EQUAL_PRIORITY
422 
423             /* Remember the head of the priority list.  */
424             next_thread =  _tx_thread_priority_list[priority];
425 #endif
426 
427             /* Just remove this thread from the priority list.  */
428             (thread_ptr -> tx_thread_ready_next) -> tx_thread_ready_previous =    thread_ptr -> tx_thread_ready_previous;
429             (thread_ptr -> tx_thread_ready_previous) -> tx_thread_ready_next =    thread_ptr -> tx_thread_ready_next;
430 
431             /* Determine if this is the head of the priority list.  */
432             if (_tx_thread_priority_list[priority] == thread_ptr)
433             {
434 
435                 /* Update the head pointer of this priority list.  */
436                 _tx_thread_priority_list[priority] =  thread_ptr -> tx_thread_ready_next;
437 
438 #ifndef TX_THREAD_SMP_EQUAL_PRIORITY
439 
440                 /* Update the next pointer as well.  */
441                 next_thread =  thread_ptr -> tx_thread_ready_next;
442 #endif
443             }
444         }
445         else
446         {
447 
448 #ifndef TX_THREAD_SMP_EQUAL_PRIORITY
449 
450             /* Remember the head of the priority list.  */
451             next_thread =  thread_ptr;
452 #endif
453 
454             /* This is the only thread at this priority ready to run.  Set the head
455                pointer to NULL.  */
456             _tx_thread_priority_list[priority] =    TX_NULL;
457 
458 #if TX_MAX_PRIORITIES > 32
459 
460             /* Calculate the index into the bit map array.  */
461             map_index =  priority/((UINT) 32);
462 #endif
463 
464             /* Clear this priority bit in the ready priority bit map.  */
465             TX_MOD32_BIT_SET(priority, priority_bit)
466             _tx_thread_priority_maps[MAP_INDEX] =  _tx_thread_priority_maps[MAP_INDEX] & (~(priority_bit));
467 
468 #if TX_MAX_PRIORITIES > 32
469 
470             /* Determine if there are any other bits set in this priority map.  */
471             if (_tx_thread_priority_maps[MAP_INDEX] == ((ULONG) 0))
472             {
473 
474                 /* No, clear the active bit to signify this priority map has nothing set.  */
475                 TX_DIV32_BIT_SET(priority, priority_bit)
476                 _tx_thread_priority_map_active =  _tx_thread_priority_map_active & (~(priority_bit));
477             }
478 #endif
479         }
480 
481 #if TX_MAX_PRIORITIES > 32
482 
483         /* Calculate the index to find the next highest priority thread ready for execution.  */
484         priority_map =    _tx_thread_priority_map_active;
485 
486         /* Determine if there is anything.   */
487         if (priority_map != ((ULONG) 0))
488         {
489 
490             /* Calculate the lowest bit set in the priority map. */
491             TX_LOWEST_SET_BIT_CALCULATE(priority_map, map_index)
492         }
493 #endif
494 
495         /* Setup working variable for the priority map.  */
496         priority_map =    _tx_thread_priority_maps[MAP_INDEX];
497 
498         /* Make a quick check for no other threads ready for execution.  */
499         if (priority_map == ((ULONG) 0))
500         {
501 
502 #ifdef TX_ENABLE_EVENT_TRACE
503 
504             /* Check that the event time stamp is unchanged.  A different
505                timestamp means that a later event wrote over the thread
506                suspend event. In that case, do nothing here.  */
507             if ((entry_ptr != TX_NULL) && (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp))
508             {
509 
510                 /* Timestamp is the same, set the "next thread pointer" to NULL. This can
511                    be used by the trace analysis tool to show idle system conditions.  */
512 #ifdef TX_MISRA_ENABLE
513                 entry_ptr -> tx_trace_buffer_entry_info_4 =  ((ULONG) 0);
514 #else
515                 entry_ptr -> tx_trace_buffer_entry_information_field_4 =  ((ULONG) 0);
516 #endif
517             }
518 #endif
519 
520             /* Check to see if the thread is in the execute list.  */
521             i =  thread_ptr -> tx_thread_smp_core_mapped;
522 
523             /* Clear the entry in the thread execution list.  */
524             _tx_thread_execute_ptr[i] =  TX_NULL;
525 
526 #ifdef TX_THREAD_SMP_INTER_CORE_INTERRUPT
527 
528             /* Determine if we need to preempt the core.  */
529             if (i != core_index)
530             {
531 
532                 if (_tx_thread_system_state[i] < TX_INITIALIZE_IN_PROGRESS)
533                 {
534 
535                     /* Preempt the mapped thread.  */
536                     _tx_thread_smp_core_preempt(i);
537                 }
538             }
539 #endif
540 
541 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
542 
543             /* Does this need to be waked up?  */
544             if ((i != core_index) && (_tx_thread_execute_ptr[i] != TX_NULL))
545             {
546 
547                 /* Wakeup based on application's macro.  */
548                 TX_THREAD_SMP_WAKEUP(i);
549             }
550 #endif
551 
552 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
553 
554             /* Debug entry.  */
555             _tx_thread_smp_debug_entry_insert(7, 1, thread_ptr);
556 #endif
557 
558 #ifdef TX_ENABLE_EVENT_TRACE
559 
560             /* Check that the event time stamp is unchanged.  A different
561                timestamp means that a later event wrote over the system suspend
562                event.  In that case, do nothing here.  */
563             if ((entry_ptr != TX_NULL) && (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp))
564             {
565 
566                 /* Timestamp is the same, set the "next thread pointer" to the next thread scheduled
567                    for this core.  */
568 #ifdef TX_MISRA_ENABLE
569                 entry_ptr -> tx_trace_buffer_entry_info_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]);
570 #else
571                 entry_ptr -> tx_trace_buffer_entry_information_field_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]);
572 #endif
573             }
574 #endif
575 
576             /* Check to see if the caller is a thread and the preempt disable flag is clear.  */
577             combined_flags =  ((ULONG) _tx_thread_system_state[core_index]);
578             combined_flags =  combined_flags | ((ULONG) _tx_thread_preempt_disable);
579             if (combined_flags == ((ULONG) 0))
580             {
581 
582 
583 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
584 
585                 /* Yes, increment the return to idle return count.  */
586                 _tx_thread_performance_idle_return_count++;
587 #endif
588 
589 
590 #ifndef TX_NOT_INTERRUPTABLE
591 
592                 /* Increment the preempt disable flag in order to keep the protection.  */
593                 _tx_thread_preempt_disable++;
594 
595                 /* Restore interrupts.  */
596                 TX_RESTORE
597 #endif
598 
599                 /* If so, return control to the system.  */
600                 _tx_thread_system_return();
601 
602 #ifdef TX_NOT_INTERRUPTABLE
603 
604                 /* Setup protection again since caller is expecting that it is still in force.  */
605                 _tx_thread_smp_protect();
606 #endif
607 
608                 /* Processing complete, set the flag.  */
609                 processing_complete =  TX_TRUE;
610             }
611         }
612         else
613         {
614 
615             /* There are more threads ready to execute.  */
616 
617             /* Check to see if the thread is in the execute list. If not, there is nothing else to do.  */
618             i =  thread_ptr -> tx_thread_smp_core_mapped;
619             if (_tx_thread_execute_ptr[i] == thread_ptr)
620             {
621 
622                 /* Clear the entry in the thread execution list.  */
623                 _tx_thread_execute_ptr[i] =  TX_NULL;
624 
625                 /* Determine if preemption-threshold is present in the suspending thread or present in another executing or previously executing
626                    thread.  */
627                 if ((_tx_thread_preemption__threshold_scheduled != TX_NULL) || (thread_ptr -> tx_thread_preempt_threshold < thread_ptr -> tx_thread_priority))
628                 {
629 
630                     /* Call the rebalance routine. This routine maps cores and ready threads.  */
631                     _tx_thread_smp_rebalance_execute_list(core_index);
632                 }
633 #ifdef TX_THREAD_SMP_EQUAL_PRIORITY
634                 else
635                 {
636 
637                     /* For equal priority SMP, we simply use the rebalance list function.  */
638 
639                     /* Call the rebalance routine. This routine maps cores and ready threads.  */
640                     _tx_thread_smp_rebalance_execute_list(core_index);
641                 }
642 #else
643                 else
644                 {
645 
646                     /* Now we need to find the next, highest-priority thread ready for execution.  */
647 
648                     /* Start at the priority of the thread suspending, since we know that higher priority threads
649                        have already been evaluated when preemption-threshold is not in effect.  */
650                     next_priority =  thread_ptr -> tx_thread_priority;
651 
652                     /* Determine if there are other threads at the same priority level as the suspending thread.  */
653                     if (next_thread == thread_ptr)
654                     {
655 
656                         /* No more threads at this priority level.  */
657 
658                         /* Start at the priority after that of the thread suspending, since we know there are no
659                            other threads at the suspending thread's priority ready to execute.  */
660                         next_priority++;
661 
662                         /* Set next thread to NULL..  */
663                         next_thread =  TX_NULL;
664                     }
665 
666                     /* Get the possible cores bit map, based on what has already been scheduled.  */
667                     possible_cores =  _tx_thread_smp_possible_cores_get();
668 
669                     /* Setup the available cores bit map. In the suspend case, this is simply the core that is now available. */
670                     available_cores =  (((ULONG) 1) << i);
671 
672                     /* Calculate the possible complex path.  */
673                     complex_path_possible =  possible_cores & available_cores;
674 
675                     /* Check if we need to loop to find the next highest priority thread.  */
676                     if (next_priority == (ULONG)TX_MAX_PRIORITIES)
677                     {
678                         loop_finished = TX_TRUE;
679                     }
680                     else
681                     {
682                         loop_finished = TX_FALSE;
683                     }
684 
685                     /* Loop to find the next highest priority ready thread that is allowed to run on this core.  */
686                     while (loop_finished == TX_FALSE)
687                     {
688 
689                         /* Determine if there is a thread to examine.  */
690                         if (next_thread == TX_NULL)
691                         {
692 
693                             /* Calculate the next ready priority.  */
694                             next_priority =  _tx_thread_smp_next_priority_find(next_priority);
695 
696                             /* Determine if there are no more threads to execute.  */
697                             if (next_priority == ((UINT) TX_MAX_PRIORITIES))
698                             {
699 
700                                 /* Break out of loop.  */
701                                 loop_finished =  TX_TRUE;
702                             }
703                             else
704                             {
705 
706                                 /* Pickup the next thread to schedule.  */
707                                 next_thread =  _tx_thread_priority_list[next_priority];
708                             }
709                         }
710 
711                         /* Determine if the processing is not complete.  */
712                         if (loop_finished == TX_FALSE)
713                         {
714 
715                             /* Is the this thread already in the execute list?  */
716                             if (next_thread != _tx_thread_execute_ptr[next_thread -> tx_thread_smp_core_mapped])
717                             {
718 
719                                 /* No, not already on the execute list.   */
720 
721                                 /* Check to see if the thread has preemption-threshold set.  */
722                                 if (next_thread -> tx_thread_preempt_threshold != next_thread -> tx_thread_priority)
723                                 {
724 
725                                     /* Call the rebalance routine. This routine maps cores and ready threads.  */
726                                     _tx_thread_smp_rebalance_execute_list(core_index);
727 
728                                     /* Get out of the loop.  */
729                                     loop_finished =  TX_TRUE;
730                                 }
731                                 else
732                                 {
733 
734                                     /* Now determine if this thread is allowed to run on this core.  */
735                                     if ((((next_thread -> tx_thread_smp_cores_allowed >> i) & ((ULONG) 1))) != ((ULONG) 0))
736                                     {
737 
738                                         /* Remember this index in the thread control block.  */
739                                         next_thread -> tx_thread_smp_core_mapped =  i;
740 
741                                         /* Setup the entry in the execution list.  */
742                                         _tx_thread_execute_ptr[i] =  next_thread;
743 
744                                         /* Found the thread to execute.  */
745                                         loop_finished =  TX_TRUE;
746                                     }
747                                     else
748                                     {
749 
750                                         /* Determine if nontrivial scheduling is possible.  */
751                                         if (complex_path_possible != ((ULONG) 0))
752                                         {
753 
754                                             /* Check for nontrivial scheduling, i.e., can other threads be remapped to allow this thread to be
755                                                scheduled.  */
756 
757                                             /* Determine what the possible cores are for this thread.  */
758                                             thread_possible_cores =  next_thread -> tx_thread_smp_cores_allowed;
759 
760                                             /* Apply the current possible cores.  */
761                                             thread_possible_cores =  thread_possible_cores & possible_cores;
762                                             if (thread_possible_cores != ((ULONG) 0))
763                                             {
764 
765                                                 /* Note that we know that the thread must have the target core excluded at this point,
766                                                    since we failed the test above.  */
767 
768                                                 /* Now we need to see if one of the other threads in the non-excluded cores can be moved to make room
769                                                    for this thread.  */
770 
771                                                 /* Default the schedule list to the current execution list.  */
772                                                 _tx_thread_smp_schedule_list_setup();
773 
774                                                 /* Determine the possible core mapping.  */
775                                                 test_possible_cores =  possible_cores & ~(thread_possible_cores);
776 
777                                                 /* Attempt to remap the cores in order to schedule this thread.  */
778                                                 core =  _tx_thread_smp_remap_solution_find(next_thread, available_cores, thread_possible_cores, test_possible_cores);
779 
780                                                 /* Determine if remapping was successful.  */
781                                                 if (core != ((UINT) TX_THREAD_SMP_MAX_CORES))
782                                                 {
783 
784                                                     /* Clear the execute list.  */
785                                                     _tx_thread_smp_execute_list_clear();
786 
787                                                     /* Setup the execute list based on the updated schedule list.  */
788                                                     _tx_thread_smp_execute_list_setup(core_index);
789 
790                                                     /* At this point, we are done since we have found a solution for one core.  */
791                                                     loop_finished =  TX_TRUE;
792                                                 }
793                                                 else
794                                                 {
795 
796                                                     /* We couldn't assign the thread to any of the cores possible for the thread so update the possible cores for the
797                                                        next pass so we don't waste time looking at them again!  */
798                                                     possible_cores =  possible_cores & (~thread_possible_cores);
799                                                 }
800                                             }
801                                         }
802                                     }
803                                 }
804                             }
805                         }
806 
807                         /* Determine if the loop is finished.  */
808                         if (loop_finished == TX_FALSE)
809                         {
810 
811                             /* Move to the next thread.  */
812                             next_thread =  next_thread -> tx_thread_ready_next;
813 
814                             /* Determine if we are at the head of the list.  */
815                             if (next_thread == _tx_thread_priority_list[next_priority])
816                             {
817 
818                                 /* Yes, set the next thread pointer to NULL, increment the priority, and continue.  */
819                                 next_thread =  TX_NULL;
820                                 next_priority++;
821 
822                                 /* Determine if there are no more threads to execute.  */
823                                 if (next_priority == ((UINT) TX_MAX_PRIORITIES))
824                                 {
825 
826                                     /* Break out of loop.  */
827                                     loop_finished =  TX_TRUE;
828                                 }
829                             }
830                         }
831                     }
832 
833 #ifdef TX_THREAD_SMP_INTER_CORE_INTERRUPT
834 
835                     /* Determine if we need to preempt the core.  */
836                     if (i != core_index)
837                     {
838 
839                         /* Make sure thread execution has started.  */
840                         if (_tx_thread_system_state[i] < ((ULONG) TX_INITIALIZE_IN_PROGRESS))
841                         {
842 
843                             /* Preempt the mapped thread.  */
844                             _tx_thread_smp_core_preempt(i);
845                         }
846                     }
847 #endif
848 
849 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
850 
851                     /* Does this need to be waked up?  */
852                     if (i != core_index)
853                     {
854 
855                         /* Check to make sure there a thread to execute for this core.  */
856                         if (_tx_thread_execute_ptr[i] != TX_NULL)
857                         {
858 
859                             /* Wakeup based on application's macro.  */
860                             TX_THREAD_SMP_WAKEUP(i);
861                         }
862                     }
863 #endif
864                 }
865 #endif
866             }
867         }
868 
869 #ifndef TX_NOT_INTERRUPTABLE
870 
871     }
872 #endif
873 
874     /* Check to see if the processing is complete.  */
875     if (processing_complete == TX_FALSE)
876     {
877 
878 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
879 
880         /* Debug entry.  */
881         _tx_thread_smp_debug_entry_insert(7, 1, thread_ptr);
882 #endif
883 
884 #ifdef TX_ENABLE_EVENT_TRACE
885 
886         /* Check that the event time stamp is unchanged.  A different
887            timestamp means that a later event wrote over the thread
888            suspend event. In that case, do nothing here.  */
889         if ((entry_ptr != TX_NULL) && (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp))
890         {
891 
892             /* Timestamp is the same, set the "next thread pointer" to the next thread scheduled
893                for this core.  */
894 #ifdef TX_MISRA_ENABLE
895             entry_ptr -> tx_trace_buffer_entry_info_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]);
896 #else
897             entry_ptr -> tx_trace_buffer_entry_information_field_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]);
898 #endif
899         }
900 #endif
901 
902         /* Determine if a preemption condition is present.  */
903         if (_tx_thread_current_ptr[core_index] != _tx_thread_execute_ptr[core_index])
904         {
905 
906 #ifdef TX_ENABLE_STACK_CHECKING
907 
908             /* Pickup the next execute pointer.  */
909             thread_ptr =  _tx_thread_execute_ptr[core_index];
910 
911             /* Determine if there is a thread pointer.  */
912             if (thread_ptr != TX_NULL)
913             {
914 
915                 /* Check this thread's stack.  */
916                 TX_THREAD_STACK_CHECK(thread_ptr)
917             }
918 #endif
919 
920             /* Determine if preemption should take place. This is only possible if the current thread pointer is
921                not the same as the execute thread pointer AND the system state and preempt disable flags are clear.  */
922             if (_tx_thread_system_state[core_index] == ((ULONG) 0))
923             {
924 
925                 /* Check the preempt disable flag.  */
926                 if (_tx_thread_preempt_disable == ((UINT) 0))
927                 {
928 
929 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
930 
931                     /* Determine if an idle system return is present.  */
932                     if (_tx_thread_execute_ptr[core_index] == TX_NULL)
933                     {
934 
935                         /* Yes, increment the return to idle return count.  */
936                         _tx_thread_performance_idle_return_count++;
937                     }
938                     else
939                     {
940 
941                         /* No, there is another thread ready to run and will be scheduled upon return.  */
942                         _tx_thread_performance_non_idle_return_count++;
943                     }
944 #endif
945 
946 
947 #ifndef TX_NOT_INTERRUPTABLE
948 
949                     /* Increment the preempt disable flag in order to keep the protection.  */
950                     _tx_thread_preempt_disable++;
951 
952                     /* Restore interrupts.  */
953                     TX_RESTORE
954 #endif
955 
956                     /* Preemption is needed - return to the system!  */
957                     _tx_thread_system_return();
958 
959 #ifdef TX_NOT_INTERRUPTABLE
960 
961                     /* Setup protection again since caller is expecting that it is still in force.  */
962                     _tx_thread_smp_protect();
963 #endif
964 
965 #ifndef TX_NOT_INTERRUPTABLE
966 
967                     /* Set the processing complete flag.  */
968                     processing_complete =  TX_TRUE;
969 #endif
970                 }
971             }
972         }
973 
974 #ifndef TX_NOT_INTERRUPTABLE
975 
976         /* Determine if processing is complete.  If so, no need to restore interrupts.  */
977         if (processing_complete == TX_FALSE)
978         {
979 
980             /* Restore interrupts.  */
981             TX_RESTORE
982         }
983 #endif
984     }
985 }
986 
987 #ifdef TX_NOT_INTERRUPTABLE
_tx_thread_system_ni_suspend(TX_THREAD * thread_ptr,ULONG timeout)988 VOID  _tx_thread_system_ni_suspend(TX_THREAD *thread_ptr, ULONG timeout)
989 {
990 
991     /* Setup timeout.   */
992     thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks =  timeout;
993 
994     /* Call system suspend function.  */
995     _tx_thread_system_suspend(thread_ptr);
996 }
997 #endif
998 
999