1 /***************************************************************************
2  * Copyright (c) 2024 Microsoft Corporation
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the MIT License which is available at
6  * https://opensource.org/licenses/MIT.
7  *
8  * SPDX-License-Identifier: MIT
9  **************************************************************************/
10 
11 
12 /**************************************************************************/
13 /**************************************************************************/
14 /**                                                                       */
15 /** ThreadX Component                                                     */
16 /**                                                                       */
17 /**   Thread                                                              */
18 /**                                                                       */
19 /**************************************************************************/
20 /**************************************************************************/
21 
22 #define TX_SOURCE_CODE
23 #define TX_THREAD_SMP_SOURCE_CODE
24 
25 
26 /* Include necessary system files.  */
27 
28 #include "tx_api.h"
29 #include "tx_initialize.h"
30 #include "tx_timer.h"
31 #include "tx_thread.h"
32 #include "tx_trace.h"
33 
34 
35 /**************************************************************************/
36 /*                                                                        */
37 /*  FUNCTION                                               RELEASE        */
38 /*                                                                        */
39 /*    _tx_thread_system_resume                           PORTABLE SMP     */
40 /*                                                           6.1          */
41 /*  AUTHOR                                                                */
42 /*                                                                        */
43 /*    William E. Lamie, Microsoft Corporation                             */
44 /*                                                                        */
45 /*  DESCRIPTION                                                           */
46 /*                                                                        */
47 /*    This function places the specified thread on the list of ready      */
48 /*    threads at the thread's specific priority.  If a thread preemption  */
49 /*    is detected, this function returns a TX_TRUE.                       */
50 /*                                                                        */
51 /*  INPUT                                                                 */
52 /*                                                                        */
53 /*    thread_ptr                            Pointer to thread to resume   */
54 /*                                                                        */
55 /*  OUTPUT                                                                */
56 /*                                                                        */
57 /*    None                                                                */
58 /*                                                                        */
59 /*  CALLS                                                                 */
60 /*                                                                        */
61 /*    _tx_thread_smp_available_cores_get    Get available cores bitmap    */
62 /*    _tx_thread_smp_core_preempt           Preempt core for new thread   */
63 /*    _tx_thread_smp_core_wakeup            Wakeup other core             */
64 /*    _tx_thread_smp_execute_list_clear     Clear the thread execute list */
65 /*    _tx_thread_smp_execute_list_setup     Setup the thread execute list */
66 /*    _tx_thread_smp_core_interrupt   Interrupt other core          */
67 /*    _tx_thread_smp_lowest_priority_get    Get lowest priority scheduled */
68 /*                                            thread                      */
69 /*    _tx_thread_smp_next_priority_find     Find next priority with one   */
70 /*                                            or more ready threads       */
71 /*    _tx_thread_smp_possible_cores_get     Get possible cores bitmap     */
72 /*    _tx_thread_smp_preemptable_threads_get                              */
73 /*                                          Get list of thread preemption */
74 /*                                            possibilities               */
75 /*    [_tx_thread_smp_protect]              Get protection                */
76 /*    _tx_thread_smp_rebalance_execute_list Rebalance the execution list  */
77 /*    _tx_thread_smp_remap_solution_find    Attempt to remap threads to   */
78 /*                                            schedule another thread     */
79 /*    _tx_thread_smp_schedule_list_clear    Clear the thread schedule list*/
80 /*    _tx_thread_smp_schedule_list_setup    Inherit schedule list from    */
81 /*                                            execute list                */
82 /*    _tx_thread_system_return              Return to the system          */
83 /*                                                                        */
84 /*  CALLED BY                                                             */
85 /*                                                                        */
86 /*    _tx_thread_create                     Thread create function        */
87 /*    _tx_thread_priority_change            Thread priority change        */
88 /*    _tx_thread_resume                     Application resume service    */
89 /*    _tx_thread_timeout                    Thread timeout                */
90 /*    _tx_thread_wait_abort                 Thread wait abort             */
91 /*    Other ThreadX Components                                            */
92 /*                                                                        */
93 /*  RELEASE HISTORY                                                       */
94 /*                                                                        */
95 /*    DATE              NAME                      DESCRIPTION             */
96 /*                                                                        */
97 /*  09-30-2020     William E. Lamie         Initial Version 6.1           */
98 /*                                                                        */
99 /**************************************************************************/
_tx_thread_system_resume(TX_THREAD * thread_ptr)100 VOID  _tx_thread_system_resume(TX_THREAD *thread_ptr)
101 {
102 
103 #ifndef TX_NOT_INTERRUPTABLE
104 
105 TX_INTERRUPT_SAVE_AREA
106 
107 #endif
108 
109 UINT                        priority;
110 ULONG                       priority_bit;
111 TX_THREAD                   *head_ptr;
112 TX_THREAD                   *tail_ptr;
113 UINT                        core_index;
114 #ifndef TX_THREAD_SMP_EQUAL_PRIORITY
115 UINT                        j;
116 UINT                        lowest_priority;
117 TX_THREAD                   *next_thread;
118 ULONG                       test_cores;
119 UINT                        core;
120 UINT                        thread_mapped;
121 TX_THREAD                   *preempt_thread;
122 ULONG                       possible_cores;
123 ULONG                       thread_possible_cores;
124 ULONG                       available_cores;
125 ULONG                       test_possible_cores;
126 TX_THREAD                   *possible_preemption_list[TX_THREAD_SMP_MAX_CORES];
127 #endif
128 TX_THREAD                   *execute_thread;
129 UINT                        i;
130 UINT                        loop_finished;
131 UINT                        processing_complete;
132 
133 #ifdef TX_ENABLE_EVENT_TRACE
134 TX_TRACE_BUFFER_ENTRY       *entry_ptr;
135 ULONG                       time_stamp =  ((ULONG) 0);
136 #endif
137 
138 #if TX_MAX_PRIORITIES > 32
139 UINT                        map_index;
140 #endif
141 
142 #ifndef TX_NO_TIMER
143 TX_TIMER_INTERNAL           *timer_ptr;
144 TX_TIMER_INTERNAL           **list_head;
145 TX_TIMER_INTERNAL           *next_timer;
146 TX_TIMER_INTERNAL           *previous_timer;
147 #endif
148 
149 
150     /* Set the processing complete flag to false.  */
151     processing_complete =  TX_FALSE;
152 
153 #ifndef TX_NOT_INTERRUPTABLE
154 
155     /* Lockout interrupts while the thread is being resumed.  */
156     TX_DISABLE
157 #endif
158 
159 
160 #ifndef TX_NO_TIMER
161 
162     /* Deactivate the timeout timer if necessary.  */
163     if ((thread_ptr -> tx_thread_timer.tx_timer_internal_list_head) != TX_NULL)
164     {
165 
166         /* Deactivate the thread's timeout timer.  This is now done in-line
167            for ThreadX SMP so the additional protection logic can be avoided.  */
168 
169         /* Deactivate the timer.  */
170 
171         /* Pickup internal timer pointer.  */
172         timer_ptr =  &(thread_ptr -> tx_thread_timer);
173 
174         /* Pickup the list head pointer.  */
175         list_head =  timer_ptr -> tx_timer_internal_list_head;
176 
177         /* Pickup the next active timer.  */
178         next_timer =  timer_ptr -> tx_timer_internal_active_next;
179 
180         /* See if this is the only timer in the list.  */
181         if (timer_ptr == next_timer)
182         {
183 
184             /* Yes, the only timer on the list.  */
185 
186             /* Determine if the head pointer needs to be updated.  */
187             if (*(list_head) == timer_ptr)
188             {
189 
190                 /* Update the head pointer.  */
191                 *(list_head) =  TX_NULL;
192             }
193         }
194         else
195         {
196 
197             /* At least one more timer is on the same expiration list.  */
198 
199             /* Update the links of the adjacent timers.  */
200             previous_timer =                                   timer_ptr -> tx_timer_internal_active_previous;
201             next_timer -> tx_timer_internal_active_previous =  previous_timer;
202             previous_timer -> tx_timer_internal_active_next =  next_timer;
203 
204             /* Determine if the head pointer needs to be updated.  */
205             if (*(list_head) == timer_ptr)
206             {
207 
208                 /* Update the next timer in the list with the list head pointer.  */
209                 next_timer -> tx_timer_internal_list_head =  list_head;
210 
211                 /* Update the head pointer.  */
212                 *(list_head) =  next_timer;
213             }
214         }
215 
216         /* Clear the timer's list head pointer.  */
217         timer_ptr -> tx_timer_internal_list_head =  TX_NULL;
218     }
219     else
220     {
221 
222         /* Clear the remaining time to ensure timer doesn't get activated.  */
223         thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks =  ((ULONG) 0);
224     }
225 #endif
226 
227 #ifdef TX_ENABLE_STACK_CHECKING
228 
229     /* Check this thread's stack.  */
230     TX_THREAD_STACK_CHECK(thread_ptr)
231 #endif
232 
233 
234     /* Pickup index.  */
235     core_index =  TX_SMP_CORE_ID;
236 
237 #ifdef TX_ENABLE_EVENT_TRACE
238 
239     /* If trace is enabled, save the current event pointer.  */
240     entry_ptr =  _tx_trace_buffer_current_ptr;
241 #endif
242 
243     /* Log the thread status change.  */
244     TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&time_stamp), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]), TX_TRACE_INTERNAL_EVENTS)
245 
246 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
247 
248     /* Debug entry.  */
249     _tx_thread_smp_debug_entry_insert(4, 0, thread_ptr);
250 #endif
251 
252 #ifdef TX_ENABLE_EVENT_TRACE
253 
254     /* Save the time stamp for later comparison to verify that
255        the event hasn't been overwritten by the time we have
256        computed the next thread to execute.  */
257     if (entry_ptr != TX_NULL)
258     {
259 
260         /* Save time stamp.  */
261         time_stamp =  entry_ptr -> tx_trace_buffer_entry_time_stamp;
262     }
263 #endif
264 
265 
266     /* Determine if the thread is in the process of suspending.  If so, the thread
267        control block is already on the linked list so nothing needs to be done.  */
268     if (thread_ptr -> tx_thread_suspending == TX_TRUE)
269     {
270 
271         /* Make sure the type of suspension under way is not a terminate or
272            thread completion.  In either of these cases, do not void the
273            interrupted suspension processing.  */
274         if (thread_ptr -> tx_thread_state != TX_COMPLETED)
275         {
276 
277             /* Make sure the thread isn't terminated.  */
278             if (thread_ptr -> tx_thread_state != TX_TERMINATED)
279             {
280 
281                 /* No, now check to see if the delayed suspension flag is set.  */
282                 if (thread_ptr -> tx_thread_delayed_suspend == TX_FALSE)
283                 {
284 
285                     /* Clear the suspending flag.  */
286                     thread_ptr -> tx_thread_suspending =   TX_FALSE;
287 
288                     /* Restore the state to ready.  */
289                     thread_ptr -> tx_thread_state =        TX_READY;
290 
291                     /* Thread state change.  */
292                     TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
293 
294                     /* Log the thread status change.  */
295                     TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
296                 }
297                 else
298                 {
299 
300                     /* Clear the delayed suspend flag and change the state.  */
301                     thread_ptr -> tx_thread_delayed_suspend =  TX_FALSE;
302                     thread_ptr -> tx_thread_state =            TX_SUSPENDED;
303                 }
304 
305 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
306 
307                 /* Increment the total number of thread resumptions.  */
308                 _tx_thread_performance_resume_count++;
309 
310                 /* Increment this thread's resume count.  */
311                 thread_ptr -> tx_thread_performance_resume_count++;
312 #endif
313             }
314         }
315     }
316     else
317     {
318 
319         /* Check to make sure the thread has not already been resumed.  */
320         if (thread_ptr -> tx_thread_state != TX_READY)
321         {
322 
323             /* Check for a delayed suspend flag.  */
324             if (thread_ptr -> tx_thread_delayed_suspend == TX_TRUE)
325             {
326 
327                 /* Clear the delayed suspend flag and change the state.  */
328                 thread_ptr -> tx_thread_delayed_suspend =  TX_FALSE;
329                 thread_ptr -> tx_thread_state =            TX_SUSPENDED;
330             }
331             else
332             {
333 
334                 /* Thread state change.  */
335                 TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
336 
337                 /* Log the thread status change.  */
338                 TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
339 
340                 /* Make this thread ready.  */
341 
342                 /* Change the state to ready.  */
343                 thread_ptr -> tx_thread_state =  TX_READY;
344 
345                 /* Pickup priority of thread.  */
346                 priority =  thread_ptr -> tx_thread_priority;
347 
348 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
349 
350                 /* Increment the total number of thread resumptions.  */
351                 _tx_thread_performance_resume_count++;
352 
353                 /* Increment this thread's resume count.  */
354                 thread_ptr -> tx_thread_performance_resume_count++;
355 #endif
356 
357               /* Determine if there are other threads at this priority that are
358                    ready.  */
359                 head_ptr =  _tx_thread_priority_list[priority];
360                 if (head_ptr != TX_NULL)
361                 {
362 
363                     /* Yes, there are other threads at this priority already ready.  */
364 
365                     /* Just add this thread to the priority list.  */
366                     tail_ptr =                                 head_ptr -> tx_thread_ready_previous;
367                     tail_ptr -> tx_thread_ready_next =         thread_ptr;
368                     head_ptr -> tx_thread_ready_previous =     thread_ptr;
369                     thread_ptr -> tx_thread_ready_previous =   tail_ptr;
370                     thread_ptr -> tx_thread_ready_next =       head_ptr;
371                 }
372                 else
373                 {
374 
375                     /* First thread at this priority ready.  Add to the front of the list.  */
376                     _tx_thread_priority_list[priority] =       thread_ptr;
377                     thread_ptr -> tx_thread_ready_next =       thread_ptr;
378                     thread_ptr -> tx_thread_ready_previous =   thread_ptr;
379 
380 #if TX_MAX_PRIORITIES > 32
381 
382                     /* Calculate the index into the bit map array.  */
383                     map_index =  priority/((UINT) 32);
384 
385                     /* Set the active bit to remember that the priority map has something set.  */
386                     TX_DIV32_BIT_SET(priority, priority_bit)
387                     _tx_thread_priority_map_active =  _tx_thread_priority_map_active | priority_bit;
388 #endif
389 
390                     /* Or in the thread's priority bit.  */
391                     TX_MOD32_BIT_SET(priority, priority_bit)
392                     _tx_thread_priority_maps[MAP_INDEX] =  _tx_thread_priority_maps[MAP_INDEX] | priority_bit;
393                 }
394 
395                 /* Determine if a thread with preemption-threshold is currently scheduled.  */
396                 if (_tx_thread_preemption__threshold_scheduled != TX_NULL)
397                 {
398 
399                     /* Yes, there has been a thread with preemption-threshold scheduled.  */
400 
401                     /* Determine if this thread can run with the current preemption-threshold.   */
402                     if (priority >= _tx_thread_preemption__threshold_scheduled -> tx_thread_preempt_threshold)
403                     {
404 
405                         /* The thread cannot run because of the current preemption-threshold. Simply
406                            return at this point.  */
407 
408 #ifndef TX_NOT_INTERRUPTABLE
409 
410                         /* Decrement the preemption disable flag.  */
411                         _tx_thread_preempt_disable--;
412 #endif
413 
414 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
415 
416                         /* Debug entry.  */
417                         _tx_thread_smp_debug_entry_insert(5, 0, thread_ptr);
418 #endif
419 
420 #ifndef TX_NOT_INTERRUPTABLE
421 
422                         /* Restore interrupts.  */
423                         TX_RESTORE
424 #endif
425 
426                         /* Processing is complete, set the complete flag.  */
427                         processing_complete =  TX_TRUE;
428                     }
429                 }
430 
431                 /* Is the processing complete at this point?  */
432                 if (processing_complete == TX_FALSE)
433                 {
434 
435                     /* Determine if this newly ready thread has preemption-threshold set. If so, determine
436                        if any other threads would need to be unscheduled for this thread to execute.  */
437                     if (thread_ptr -> tx_thread_preempt_threshold < priority)
438                     {
439 
440                         /* Is there a place in the execution list for the newly ready thread?  */
441                         i =  ((UINT) 0);
442                         loop_finished =  TX_FALSE;
443 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
444                         while(i < ((UINT) TX_THREAD_SMP_MAX_CORES))
445 #else
446                         while(i < _tx_thread_smp_max_cores)
447 #endif
448                         {
449 
450                             /* Pickup the current execute thread for this core.  */
451                             execute_thread =  _tx_thread_execute_ptr[i];
452 
453                             /* Is there a thread mapped to this core?  */
454                             if (execute_thread == TX_NULL)
455                             {
456 
457                                 /* Get out of the loop.  */
458                                 loop_finished =  TX_TRUE;
459                             }
460                             else
461                             {
462 
463                                 /* Determine if this thread should preempt the thread in the execution list.  */
464                                 if (priority < execute_thread -> tx_thread_preempt_threshold)
465                                 {
466 
467                                     /* Get out of the loop.  */
468                                     loop_finished =  TX_TRUE;
469                                 }
470                             }
471 
472                             /* Determine if we need to get out of the loop.  */
473                             if (loop_finished == TX_TRUE)
474                             {
475 
476                                 /* Get out of the loop.  */
477                                 break;
478                             }
479 
480                             /* Move to next index.  */
481                             i++;
482                         }
483 
484                         /* Determine if there is a reason to rebalance the list.  */
485 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
486                         if (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
487 #else
488                         if (i < _tx_thread_smp_max_cores)
489 #endif
490                         {
491 
492                             /* Yes, the new thread has preemption-threshold set and there is a slot in the
493                                execution list for it.  */
494 
495                             /* Call the rebalance routine. This routine maps cores and ready threads.  */
496                             _tx_thread_smp_rebalance_execute_list(core_index);
497                         }
498                     }
499 #ifdef TX_THREAD_SMP_EQUAL_PRIORITY
500                     else
501                     {
502 
503                         /* For equal priority SMP, we simply use the rebalance list function.  */
504 
505                         /* Call the rebalance routine. This routine maps cores and ready threads.  */
506                         _tx_thread_smp_rebalance_execute_list(core_index);
507                     }
508 #else
509                     else
510                     {
511 
512                         /* Determine if this thread has any available cores to execute on.  */
513                         if (thread_ptr -> tx_thread_smp_cores_allowed != ((ULONG) 0))
514                         {
515 
516                             /* At this point we know that the newly ready thread does not have preemption-threshold set and that
517                                any existing preemption-threshold is not blocking this thread from executing.  */
518 
519                             /* Pickup the core this thread was previously executing on.  */
520                             i =  thread_ptr -> tx_thread_smp_core_mapped;
521 
522                             /* Pickup the currently executing thread for the previously mapped core.  */
523                             execute_thread =  _tx_thread_execute_ptr[i];
524 
525                             /* First, let's see if the last core this thread executed on is available.  */
526                             if (execute_thread == TX_NULL)
527                             {
528 
529                                 /* Yes, simply place this thread into the execute list at the same location.  */
530                                 _tx_thread_execute_ptr[i] =  thread_ptr;
531 
532                                 /* If necessary, interrupt the core with the new thread to schedule.  */
533                                 _tx_thread_smp_core_interrupt(thread_ptr, core_index, i);
534 
535                                 /* If necessary, wakeup the target core.  */
536                                 _tx_thread_smp_core_wakeup(core_index, i);
537                             }
538                             else
539                             {
540 
541                                 /* This core is not able to execute on the core it last executed on
542                                    because another thread is already scheduled on that core.  */
543 
544                                 /* Pickup the available cores for the newly ready thread.  */
545                                 available_cores =  thread_ptr -> tx_thread_smp_cores_allowed;
546 
547                                 /* Isolate the lowest set bit so we can determine if more than one core is
548                                    available.  */
549                                 available_cores =  available_cores & ((~available_cores) + ((ULONG) 1));
550 
551                                 /* Determine if either this thread or the currently schedule thread can
552                                    run on more than one core or on a different core and preemption is not
553                                    possible.  */
554                                 if ((available_cores == thread_ptr -> tx_thread_smp_cores_allowed) &&
555                                     (available_cores == execute_thread -> tx_thread_smp_cores_allowed))
556                                 {
557 
558                                     /* Both this thread and the execute thread can only execute on the same core,
559                                        so this thread can only be scheduled if its priority is less. Otherwise,
560                                        there is nothing else to examine.  */
561                                     if (thread_ptr -> tx_thread_priority < execute_thread -> tx_thread_priority)
562                                     {
563 
564                                         /* We know that we have to preempt the executing thread.  */
565 
566                                         /* Preempt the executing thread.  */
567                                         _tx_thread_execute_ptr[i] =  thread_ptr;
568 
569                                         /* If necessary, interrupt the core with the new thread to schedule.  */
570                                         _tx_thread_smp_core_interrupt(thread_ptr, core_index, i);
571 
572                                         /* If necessary, wakeup the core.  */
573                                         _tx_thread_smp_core_wakeup(core_index, i);
574                                     }
575                                 }
576                                 else
577                                 {
578 
579                                     /* Determine if there are any available cores to execute on.  */
580                                     available_cores =  _tx_thread_smp_available_cores_get();
581 
582                                     /* Determine what the possible cores are for this thread.  */
583                                     thread_possible_cores =  thread_ptr -> tx_thread_smp_cores_allowed;
584 
585                                     /* Set the thread mapped flag to false.  */
586                                     thread_mapped =  TX_FALSE;
587 
588                                     /* Determine if there are available cores.  */
589                                     if (available_cores != ((ULONG) 0))
590                                     {
591 
592                                         /* Determine if one of the available cores is allowed for this thread.  */
593                                         if ((available_cores & thread_possible_cores) != ((ULONG) 0))
594                                         {
595 
596                                             /* Calculate the lowest set bit of allowed cores.  */
597                                             test_cores =  (thread_possible_cores & available_cores);
598                                             TX_LOWEST_SET_BIT_CALCULATE(test_cores, i)
599 
600                                             /* Remember this index in the thread control block.  */
601                                             thread_ptr -> tx_thread_smp_core_mapped =  i;
602 
603                                             /* Map this thread to the free slot.  */
604                                             _tx_thread_execute_ptr[i] =  thread_ptr;
605 
606                                             /* Indicate this thread was mapped.  */
607                                             thread_mapped =  TX_TRUE;
608 
609                                             /* If necessary, wakeup the target core.  */
610                                             _tx_thread_smp_core_wakeup(core_index, i);
611                                         }
612                                         else
613                                         {
614 
615                                             /* There are available cores, however, they are all excluded.  */
616 
617                                             /* Calculate the possible cores from the cores currently scheduled.  */
618                                             possible_cores =  _tx_thread_smp_possible_cores_get();
619 
620                                             /* Determine if it is worthwhile to try to remap the execution list.  */
621                                             if ((available_cores & possible_cores) != ((ULONG) 0))
622                                             {
623 
624                                                 /* Yes, some of the currently scheduled threads can be moved.  */
625 
626                                                 /* Now determine if there could be a remap solution that will allow us to schedule this thread.  */
627 
628                                                 /* Narrow to the current possible cores.  */
629                                                 thread_possible_cores =  thread_possible_cores & possible_cores;
630 
631                                                 /* Now we need to see if one of the other threads in the non-excluded cores can be moved to make room
632                                                    for this thread.  */
633 
634                                                 /* Default the schedule list to the current execution list.  */
635                                                 _tx_thread_smp_schedule_list_setup();
636 
637                                                 /* Determine the possible core mapping.  */
638                                                 test_possible_cores =  possible_cores & ~(thread_possible_cores);
639 
640                                                 /* Attempt to remap the cores in order to schedule this thread.  */
641                                                 core =  _tx_thread_smp_remap_solution_find(thread_ptr, available_cores, thread_possible_cores, test_possible_cores);
642 
643                                                 /* Determine if remapping was successful.  */
644                                                 if (core != ((UINT) TX_THREAD_SMP_MAX_CORES))
645                                                 {
646 
647                                                     /* Clear the execute list.  */
648                                                     _tx_thread_smp_execute_list_clear();
649 
650                                                     /* Setup the execute list based on the updated schedule list.  */
651                                                     _tx_thread_smp_execute_list_setup(core_index);
652 
653                                                     /* Indicate this thread was mapped.  */
654                                                     thread_mapped =  TX_TRUE;
655                                                 }
656                                             }
657                                         }
658                                     }
659 
660                                     /* Determine if we need to investigate thread preemption.  */
661                                     if (thread_mapped == TX_FALSE)
662                                     {
663 
664                                         /* At this point, we need to first check for thread preemption possibilities.  */
665                                         lowest_priority =  _tx_thread_smp_lowest_priority_get();
666 
667                                         /* Does this thread have a higher priority?  */
668                                         if (thread_ptr -> tx_thread_priority < lowest_priority)
669                                         {
670 
671                                             /* Yes, preemption is possible.  */
672 
673                                             /* Pickup the thread to preempt.  */
674                                             preempt_thread =  _tx_thread_priority_list[lowest_priority];
675 
676                                             /* Determine if there are more than one thread ready at this priority level.  */
677                                             if (preempt_thread -> tx_thread_ready_next != preempt_thread)
678                                             {
679 
680                                                 /* Remember the list head.  */
681                                                 head_ptr =  preempt_thread;
682 
683                                                 /* Setup thread search pointer to the start of the list.  */
684                                                 next_thread =  preempt_thread -> tx_thread_ready_next;
685 
686                                                 /* Loop to find the last thread scheduled at this priority.  */
687                                                 i =  ((UINT) 0);
688 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
689                                                 while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
690 #else
691 
692                                                 while (i < _tx_thread_smp_max_cores)
693 #endif
694                                                 {
695 
696                                                     /* Is this thread currently scheduled?  */
697                                                     if (next_thread == _tx_thread_execute_ptr[next_thread -> tx_thread_smp_core_mapped])
698                                                     {
699 
700                                                         /* Yes, this is the new preempt thread.  */
701                                                         preempt_thread =  next_thread;
702 
703                                                         /* Increment core count. */
704                                                         i++;
705                                                     }
706 
707                                                     /* Move to the next thread.  */
708                                                     next_thread =  next_thread -> tx_thread_ready_next;
709 
710                                                     /* Are we at the head of the list?  */
711                                                     if (next_thread == head_ptr)
712                                                     {
713 
714                                                         /* End the loop.  */
715                                                         i =  ((UINT) TX_THREAD_SMP_MAX_CORES);
716                                                     }
717                                                 }
718                                             }
719 
720                                             /* Calculate the core that this thread is scheduled on.  */
721                                             possible_cores =  (((ULONG) 1) << preempt_thread -> tx_thread_smp_core_mapped);
722 
723                                             /* Determine if preemption is possible.  */
724                                             if ((thread_possible_cores & possible_cores) != ((ULONG) 0))
725                                             {
726 
727                                                 /* Pickup the newly available core.  */
728                                                 i =  preempt_thread -> tx_thread_smp_core_mapped;
729 
730                                                 /* Remember this index in the thread control block.  */
731                                                 thread_ptr -> tx_thread_smp_core_mapped =  i;
732 
733                                                 /* Map this thread to the free slot.  */
734                                                 _tx_thread_execute_ptr[i] =  thread_ptr;
735 
736                                                 /* If necessary, interrupt the core with the new thread to schedule.  */
737                                                 _tx_thread_smp_core_interrupt(thread_ptr, core_index, i);
738 
739                                                 /* If necessary, wakeup the target core.  */
740                                                 _tx_thread_smp_core_wakeup(core_index, i);
741                                             }
742                                             else
743                                             {
744 
745                                                 /* Build the list of possible thread preemptions, ordered lowest priority first.  */
746                                                 possible_cores =  _tx_thread_smp_preemptable_threads_get(thread_ptr -> tx_thread_priority, possible_preemption_list);
747 
748                                                 /* Determine if preemption is possible.  */
749 
750                                                 /* Loop through the potential threads can can be preempted.  */
751                                                 i =  ((UINT) 0);
752                                                 loop_finished =  TX_FALSE;
753                                                 while (possible_preemption_list[i] != TX_NULL)
754                                                 {
755 
756                                                     /* Pickup the thread to preempt.  */
757                                                     preempt_thread =  possible_preemption_list[i];
758 
759                                                     /* Pickup the core this thread is mapped to.  */
760                                                     j =  preempt_thread -> tx_thread_smp_core_mapped;
761 
762                                                     /* Calculate the core that this thread is scheduled on.  */
763                                                     available_cores =  (((ULONG) 1) << j);
764 
765                                                     /* Can this thread execute on this core?  */
766                                                     if ((thread_possible_cores & available_cores) != ((ULONG) 0))
767                                                     {
768 
769                                                         /* Remember this index in the thread control block.  */
770                                                         thread_ptr -> tx_thread_smp_core_mapped =  j;
771 
772                                                         /* Map this thread to the free slot.  */
773                                                         _tx_thread_execute_ptr[j] =  thread_ptr;
774 
775                                                         /* If necessary, interrupt the core with the new thread to schedule.  */
776                                                         _tx_thread_smp_core_interrupt(thread_ptr, core_index, j);
777 
778                                                         /* If necessary, wakeup the target core.  */
779                                                         _tx_thread_smp_core_wakeup(core_index, j);
780 
781                                                         /* Finished with the preemption condition.  */
782                                                         loop_finished =  TX_TRUE;
783                                                     }
784                                                     else
785                                                     {
786 
787                                                         /* No, the thread to preempt is not running on a core available to the new thread.
788                                                            Attempt to find a remapping solution.  */
789 
790                                                         /* Narrow to the current possible cores.  */
791                                                         thread_possible_cores =  thread_possible_cores & possible_cores;
792 
793                                                         /* Now we need to see if one of the other threads in the non-excluded cores can be moved to make room
794                                                            for this thread.  */
795 
796                                                         /* Temporarily set the execute thread to NULL.  */
797                                                         _tx_thread_execute_ptr[j] =  TX_NULL;
798 
799                                                         /* Default the schedule list to the current execution list.  */
800                                                         _tx_thread_smp_schedule_list_setup();
801 
802                                                         /* Determine the possible core mapping.  */
803                                                         test_possible_cores =  possible_cores & ~(thread_possible_cores);
804 
805                                                         /* Attempt to remap the cores in order to schedule this thread.  */
806                                                         core =  _tx_thread_smp_remap_solution_find(thread_ptr, available_cores, thread_possible_cores, test_possible_cores);
807 
808                                                         /* Determine if remapping was successful.  */
809                                                         if (core != ((UINT) TX_THREAD_SMP_MAX_CORES))
810                                                         {
811 
812                                                             /* Clear the execute list.  */
813                                                             _tx_thread_smp_execute_list_clear();
814 
815                                                             /* Setup the execute list based on the updated schedule list.  */
816                                                             _tx_thread_smp_execute_list_setup(core_index);
817 
818                                                             /* Finished with the preemption condition.  */
819                                                             loop_finished =  TX_TRUE;
820                                                         }
821                                                         else
822                                                         {
823 
824                                                             /* Restore the preempted thread and examine the next thread.  */
825                                                             _tx_thread_execute_ptr[j] =  preempt_thread;
826                                                         }
827                                                     }
828 
829                                                     /* Determine if we should get out of the loop.  */
830                                                     if (loop_finished == TX_TRUE)
831                                                     {
832 
833                                                         /* Yes, get out of the loop.  */
834                                                         break;
835                                                     }
836 
837                                                     /* Move to the next possible thread preemption.  */
838                                                     i++;
839                                                 }
840                                             }
841                                         }
842                                     }
843                                 }
844                             }
845                         }
846                     }
847 #endif
848                 }
849             }
850         }
851     }
852 
853     /* Determine if there is more processing.  */
854     if (processing_complete == TX_FALSE)
855     {
856 
857 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
858 
859         /* Debug entry.  */
860         _tx_thread_smp_debug_entry_insert(5, 0, thread_ptr);
861 #endif
862 
863 #ifdef TX_ENABLE_EVENT_TRACE
864 
865         /* Check that the event time stamp is unchanged.  A different
866            timestamp means that a later event wrote over the thread
867            resume event. In that case, do nothing here.  */
868         if ((entry_ptr != TX_NULL) && (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp))
869         {
870 
871             /* Timestamp is the same, set the "next thread pointer" to NULL. This can
872                be used by the trace analysis tool to show idle system conditions.  */
873 #ifdef TX_MISRA_ENABLE
874             entry_ptr -> tx_trace_buffer_entry_info_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]);
875 #else
876             entry_ptr -> tx_trace_buffer_entry_information_field_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]);
877 #endif
878         }
879 #endif
880 
881 #ifndef TX_NOT_INTERRUPTABLE
882 
883         /* Decrement the preemption disable flag.  */
884         _tx_thread_preempt_disable--;
885 #endif
886 
887         if (_tx_thread_current_ptr[core_index] != _tx_thread_execute_ptr[core_index])
888         {
889 
890 #ifdef TX_ENABLE_STACK_CHECKING
891 
892             /* Pickup the next thread to execute.  */
893             thread_ptr =  _tx_thread_execute_ptr[core_index];
894 
895             /* Determine if there is a thread pointer.  */
896             if (thread_ptr != TX_NULL)
897             {
898 
899                 /* Check this thread's stack.  */
900                 TX_THREAD_STACK_CHECK(thread_ptr)
901             }
902 #endif
903 
904             /* Now determine if preemption should take place. This is only possible if the current thread pointer is
905                not the same as the execute thread pointer AND the system state and preempt disable flags are clear.  */
906             if (_tx_thread_system_state[core_index] == ((ULONG) 0))
907             {
908 
909                 /* Is the preempt disable flag set?  */
910                 if (_tx_thread_preempt_disable == ((UINT) 0))
911                 {
912 
913 
914 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
915 
916                     /* No, there is another thread ready to run and will be scheduled upon return.  */
917                     _tx_thread_performance_non_idle_return_count++;
918 #endif
919 
920 #ifndef TX_NOT_INTERRUPTABLE
921 
922                     /* Increment the preempt disable flag in order to keep the protection.  */
923                     _tx_thread_preempt_disable++;
924 
925                     /* Restore interrupts.  */
926                     TX_RESTORE
927 #endif
928 
929                     /* Preemption is needed - return to the system!  */
930                     _tx_thread_system_return();
931 
932 #ifdef TX_NOT_INTERRUPTABLE
933 
934                     /* Setup protection again since caller is expecting that it is still in force.  */
935                     _tx_thread_smp_protect();
936 #endif
937 
938 #ifndef TX_NOT_INTERRUPTABLE
939 
940                     /* Set the processing complete flag.  */
941                     processing_complete =  TX_TRUE;
942 #endif
943                 }
944             }
945         }
946 
947 #ifndef TX_NOT_INTERRUPTABLE
948 
949         /* Determine if processing is complete.  If so, no need to restore interrupts.  */
950         if (processing_complete == TX_FALSE)
951         {
952 
953             /* Restore interrupts.  */
954             TX_RESTORE
955         }
956 #endif
957     }
958 }
959 
960 #ifdef TX_NOT_INTERRUPTABLE
_tx_thread_system_ni_resume(TX_THREAD * thread_ptr)961 VOID  _tx_thread_system_ni_resume(TX_THREAD *thread_ptr)
962 {
963 
964     /* Call system resume.  */
965     _tx_thread_system_resume(thread_ptr);
966 }
967 #endif
968 
969