1 /***************************************************************************
2  * Copyright (c) 2024 Microsoft Corporation
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the MIT License which is available at
6  * https://opensource.org/licenses/MIT.
7  *
8  * SPDX-License-Identifier: MIT
9  **************************************************************************/
10 
11 
12 /**************************************************************************/
13 /**************************************************************************/
14 /**                                                                       */
15 /** ThreadX Component                                                     */
16 /**                                                                       */
17 /**   Thread                                                              */
18 /**                                                                       */
19 /**************************************************************************/
20 /**************************************************************************/
21 
22 #define TX_SOURCE_CODE
23 
24 /* Include necessary system files.  */
25 #include "tx_api.h"
26 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
27 #include "tx_initialize.h"
28 #endif
29 #include "tx_trace.h"
30 #include "tx_timer.h"
31 #include "tx_thread.h"
32 
33 /**************************************************************************/
34 /*                                                                        */
35 /*  FUNCTION                                               RELEASE        */
36 /*                                                                        */
37 /*    _tx_thread_system_resume                            PORTABLE C      */
38 /*                                                           6.1          */
39 /*  AUTHOR                                                                */
40 /*                                                                        */
41 /*    William E. Lamie, Microsoft Corporation                             */
42 /*                                                                        */
43 /*  DESCRIPTION                                                           */
44 /*                                                                        */
45 /*    This function places the specified thread on the list of ready      */
46 /*    threads at the thread's specific priority.                          */
47 /*                                                                        */
48 /*  INPUT                                                                 */
49 /*                                                                        */
50 /*    thread_ptr                            Pointer to thread to resume   */
51 /*                                                                        */
52 /*  OUTPUT                                                                */
53 /*                                                                        */
54 /*    None                                                                */
55 /*                                                                        */
56 /*  CALLS                                                                 */
57 /*                                                                        */
58 /*    _tx_thread_system_return              Return to the system          */
59 /*    _tx_thread_system_ni_resume           Noninterruptable thread resume*/
60 /*    _tx_timer_system_deactivate           Timer deactivate              */
61 /*                                                                        */
62 /*  CALLED BY                                                             */
63 /*                                                                        */
64 /*    _tx_thread_create                     Thread create function        */
65 /*    _tx_thread_priority_change            Thread priority change        */
66 /*    _tx_thread_resume                     Application resume service    */
67 /*    _tx_thread_timeout                    Thread timeout                */
68 /*    _tx_thread_wait_abort                 Thread wait abort             */
69 /*    Other ThreadX Components                                            */
70 /*                                                                        */
71 /*  RELEASE HISTORY                                                       */
72 /*                                                                        */
73 /*    DATE              NAME                      DESCRIPTION             */
74 /*                                                                        */
75 /*  05-19-2020     William E. Lamie         Initial Version 6.0           */
76 /*  09-30-2020     Yuxin Zhou               Modified comment(s),          */
77 /*                                            resulting in version 6.1    */
78 /*                                                                        */
79 /**************************************************************************/
_tx_thread_system_resume(TX_THREAD * thread_ptr)80 VOID  _tx_thread_system_resume(TX_THREAD *thread_ptr)
81 #ifndef TX_NOT_INTERRUPTABLE
82 {
83 
84 TX_INTERRUPT_SAVE_AREA
85 
86 UINT            priority;
87 ULONG           priority_bit;
88 TX_THREAD       *head_ptr;
89 TX_THREAD       *tail_ptr;
90 TX_THREAD       *execute_ptr;
91 TX_THREAD       *current_thread;
92 ULONG           combined_flags;
93 
94 #ifdef TX_ENABLE_EVENT_TRACE
95 TX_TRACE_BUFFER_ENTRY       *entry_ptr;
96 ULONG                       time_stamp =  ((ULONG) 0);
97 #endif
98 
99 #if TX_MAX_PRIORITIES > 32
100 UINT            map_index;
101 #endif
102 
103 
104 #ifdef TX_ENABLE_STACK_CHECKING
105 
106     /* Check this thread's stack.  */
107     TX_THREAD_STACK_CHECK(thread_ptr)
108 #endif
109 
110     /* Lockout interrupts while the thread is being resumed.  */
111     TX_DISABLE
112 
113 #ifndef TX_NO_TIMER
114 
115     /* Deactivate the timeout timer if necessary.  */
116     if (thread_ptr -> tx_thread_timer.tx_timer_internal_list_head != TX_NULL)
117     {
118 
119         /* Deactivate the thread's timeout timer.  */
120         _tx_timer_system_deactivate(&(thread_ptr -> tx_thread_timer));
121     }
122     else
123     {
124 
125         /* Clear the remaining time to ensure timer doesn't get activated.  */
126         thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks =  ((ULONG) 0);
127     }
128 #endif
129 
130 #ifdef TX_ENABLE_EVENT_TRACE
131 
132     /* If trace is enabled, save the current event pointer.  */
133     entry_ptr =  _tx_trace_buffer_current_ptr;
134 #endif
135 
136     /* Log the thread status change.  */
137     TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&execute_ptr), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
138 
139 #ifdef TX_ENABLE_EVENT_TRACE
140 
141     /* Save the time stamp for later comparison to verify that
142        the event hasn't been overwritten by the time we have
143        computed the next thread to execute.  */
144     if (entry_ptr != TX_NULL)
145     {
146 
147         /* Save time stamp.  */
148         time_stamp =  entry_ptr -> tx_trace_buffer_entry_time_stamp;
149     }
150 #endif
151 
152     /* Decrease the preempt disabled count.  */
153     _tx_thread_preempt_disable--;
154 
155     /* Determine if the thread is in the process of suspending.  If so, the thread
156        control block is already on the linked list so nothing needs to be done.  */
157     if (thread_ptr -> tx_thread_suspending == TX_FALSE)
158     {
159 
160         /* Thread is not in the process of suspending. Now check to make sure the thread
161            has not already been resumed.  */
162         if (thread_ptr -> tx_thread_state != TX_READY)
163         {
164 
165             /* No, now check to see if the delayed suspension flag is set.  */
166             if (thread_ptr -> tx_thread_delayed_suspend == TX_FALSE)
167             {
168 
169                 /* Resume the thread!  */
170 
171                 /* Make this thread ready.  */
172 
173                 /* Change the state to ready.  */
174                 thread_ptr -> tx_thread_state =  TX_READY;
175 
176                 /* Pickup priority of thread.  */
177                 priority =  thread_ptr -> tx_thread_priority;
178 
179                 /* Thread state change.  */
180                 TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
181 
182                 /* Log the thread status change.  */
183                 TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
184 
185 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
186 
187                 /* Increment the total number of thread resumptions.  */
188                 _tx_thread_performance_resume_count++;
189 
190                 /* Increment this thread's resume count.  */
191                 thread_ptr -> tx_thread_performance_resume_count++;
192 #endif
193 
194                 /* Determine if there are other threads at this priority that are
195                    ready.  */
196                 head_ptr =  _tx_thread_priority_list[priority];
197                 if (head_ptr == TX_NULL)
198                 {
199 
200                     /* First thread at this priority ready.  Add to the front of the list.  */
201                     _tx_thread_priority_list[priority] =       thread_ptr;
202                     thread_ptr -> tx_thread_ready_next =       thread_ptr;
203                     thread_ptr -> tx_thread_ready_previous =   thread_ptr;
204 
205 #if TX_MAX_PRIORITIES > 32
206 
207                     /* Calculate the index into the bit map array.  */
208                     map_index =  priority/((UINT) 32);
209 
210                     /* Set the active bit to remember that the priority map has something set.  */
211                     TX_DIV32_BIT_SET(priority, priority_bit)
212                     _tx_thread_priority_map_active =  _tx_thread_priority_map_active | priority_bit;
213 #endif
214 
215                     /* Or in the thread's priority bit.  */
216                     TX_MOD32_BIT_SET(priority, priority_bit)
217                     _tx_thread_priority_maps[MAP_INDEX] =  _tx_thread_priority_maps[MAP_INDEX] | priority_bit;
218 
219                     /* Determine if this newly ready thread is the highest priority.  */
220                     if (priority < _tx_thread_highest_priority)
221                     {
222 
223                         /* A new highest priority thread is present. */
224 
225                         /* Update the highest priority variable.  */
226                         _tx_thread_highest_priority =  priority;
227 
228                         /* Pickup the execute pointer. Since it is going to be referenced multiple
229                            times, it is placed in a local variable.  */
230                         execute_ptr =  _tx_thread_execute_ptr;
231 
232                         /* Determine if no thread is currently executing.  */
233                         if (execute_ptr == TX_NULL)
234                         {
235 
236                             /* Simply setup the execute pointer.  */
237                             _tx_thread_execute_ptr =  thread_ptr;
238                         }
239                         else
240                         {
241 
242                             /* Another thread has been scheduled for execution.  */
243 
244                             /* Check to see if this is a higher priority thread and determine if preemption is allowed.  */
245                             if (priority < execute_ptr -> tx_thread_preempt_threshold)
246                             {
247 
248 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
249 
250                                 /* Determine if the preempted thread had preemption-threshold set.  */
251                                 if (execute_ptr -> tx_thread_preempt_threshold != execute_ptr -> tx_thread_priority)
252                                 {
253 
254 #if TX_MAX_PRIORITIES > 32
255 
256                                     /* Calculate the index into the bit map array.  */
257                                     map_index =  (execute_ptr -> tx_thread_priority)/((UINT) 32);
258 
259                                     /* Set the active bit to remember that the preempt map has something set.  */
260                                     TX_DIV32_BIT_SET(execute_ptr -> tx_thread_priority, priority_bit)
261                                     _tx_thread_preempted_map_active =  _tx_thread_preempted_map_active | priority_bit;
262 #endif
263 
264                                     /* Remember that this thread was preempted by a thread above the thread's threshold.  */
265                                     TX_MOD32_BIT_SET(execute_ptr -> tx_thread_priority, priority_bit)
266                                     _tx_thread_preempted_maps[MAP_INDEX] =  _tx_thread_preempted_maps[MAP_INDEX] | priority_bit;
267                                 }
268 #endif
269 
270 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
271 
272                                 /* Determine if the caller is an interrupt or from a thread.  */
273                                 if (TX_THREAD_GET_SYSTEM_STATE() == ((ULONG) 0))
274                                 {
275 
276                                     /* Caller is a thread, so this is a solicited preemption.  */
277                                     _tx_thread_performance_solicited_preemption_count++;
278 
279                                     /* Increment the thread's solicited preemption counter.  */
280                                     execute_ptr -> tx_thread_performance_solicited_preemption_count++;
281                                 }
282                                 else
283                                 {
284 
285                                     if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
286                                     {
287 
288                                         /* Caller is an interrupt, so this is an interrupt preemption.  */
289                                         _tx_thread_performance_interrupt_preemption_count++;
290 
291                                         /* Increment the thread's interrupt preemption counter.  */
292                                         execute_ptr -> tx_thread_performance_interrupt_preemption_count++;
293                                     }
294                                 }
295 
296                                 /* Remember the thread that preempted this thread.  */
297                                 execute_ptr -> tx_thread_performance_last_preempting_thread =  thread_ptr;
298 
299 #endif
300 
301                                 /* Yes, modify the execute thread pointer.  */
302                                 _tx_thread_execute_ptr =  thread_ptr;
303 
304 #ifndef TX_MISRA_ENABLE
305 
306                                 /* If MISRA is not-enabled, insert a preemption and return in-line for performance.  */
307 
308 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
309 
310                                 /* Is the execute pointer different?  */
311                                 if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
312                                 {
313 
314                                     /* Move to next entry.  */
315                                     _tx_thread_performance__execute_log_index++;
316 
317                                     /* Check for wrap condition.  */
318                                     if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
319                                     {
320 
321                                         /* Set the index to the beginning.  */
322                                         _tx_thread_performance__execute_log_index =  ((UINT) 0);
323                                     }
324 
325                                     /* Log the new execute pointer.  */
326                                     _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] =  _tx_thread_execute_ptr;
327                                 }
328 #endif
329 
330 #ifdef TX_ENABLE_EVENT_TRACE
331 
332                                 /* Check that the event time stamp is unchanged.  A different
333                                    timestamp means that a later event wrote over the thread
334                                    resume event. In that case, do nothing here.  */
335                                 if (entry_ptr != TX_NULL)
336                                 {
337 
338                                     /* Is the timestamp the same?  */
339                                     if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
340                                     {
341 
342                                         /* Timestamp is the same, set the "next thread pointer" to NULL. This can
343                                            be used by the trace analysis tool to show idle system conditions.  */
344                                         entry_ptr -> tx_trace_buffer_entry_information_field_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
345                                     }
346                                 }
347 #endif
348 
349                                 /* Restore interrupts.  */
350                                 TX_RESTORE
351 
352 #ifdef TX_ENABLE_STACK_CHECKING
353 
354                                 /* Pickup the next execute pointer.  */
355                                 thread_ptr =  _tx_thread_execute_ptr;
356 
357                                 /* Check this thread's stack.  */
358                                 TX_THREAD_STACK_CHECK(thread_ptr)
359 #endif
360 
361                                 /* Now determine if preemption should take place. This is only possible if the current thread pointer is
362                                    not the same as the execute thread pointer AND the system state and preempt disable flags are clear.  */
363                                 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
364                                 if (combined_flags == ((ULONG) 0))
365                                 {
366 
367 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
368 
369                                     /* There is another thread ready to run and will be scheduled upon return.  */
370                                     _tx_thread_performance_non_idle_return_count++;
371 #endif
372 
373                                     /* Preemption is needed - return to the system!  */
374                                     _tx_thread_system_return();
375                                 }
376 
377                                 /* Return in-line when MISRA is not enabled.  */
378                                 return;
379 #endif
380                             }
381                         }
382                     }
383                 }
384                 else
385                 {
386 
387                     /* No, there are other threads at this priority already ready.  */
388 
389                     /* Just add this thread to the priority list.  */
390                     tail_ptr =                                 head_ptr -> tx_thread_ready_previous;
391                     tail_ptr -> tx_thread_ready_next =         thread_ptr;
392                     head_ptr -> tx_thread_ready_previous =     thread_ptr;
393                     thread_ptr -> tx_thread_ready_previous =   tail_ptr;
394                     thread_ptr -> tx_thread_ready_next =       head_ptr;
395                 }
396             }
397 
398             /* Else, delayed suspend flag was set.  */
399             else
400             {
401 
402                 /* Clear the delayed suspend flag and change the state.  */
403                 thread_ptr -> tx_thread_delayed_suspend =  TX_FALSE;
404                 thread_ptr -> tx_thread_state =            TX_SUSPENDED;
405             }
406         }
407     }
408     else
409     {
410 
411         /* A resumption occurred in the middle of a previous thread suspension.  */
412 
413         /* Make sure the type of suspension under way is not a terminate or
414            thread completion.  In either of these cases, do not void the
415            interrupted suspension processing.  */
416         if (thread_ptr -> tx_thread_state != TX_COMPLETED)
417         {
418 
419             /* Make sure the thread isn't terminated.  */
420             if (thread_ptr -> tx_thread_state != TX_TERMINATED)
421             {
422 
423                 /* No, now check to see if the delayed suspension flag is set.  */
424                 if (thread_ptr -> tx_thread_delayed_suspend == TX_FALSE)
425                 {
426 
427                     /* Clear the suspending flag.  */
428                     thread_ptr -> tx_thread_suspending =   TX_FALSE;
429 
430                     /* Restore the state to ready.  */
431                     thread_ptr -> tx_thread_state =        TX_READY;
432 
433                     /* Thread state change.  */
434                     TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
435 
436                     /* Log the thread status change.  */
437                     TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
438                 }
439                 else
440                 {
441 
442                     /* Clear the delayed suspend flag and change the state.  */
443                     thread_ptr -> tx_thread_delayed_suspend =  TX_FALSE;
444                     thread_ptr -> tx_thread_state =            TX_SUSPENDED;
445                 }
446 
447 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
448 
449                 /* Increment the total number of thread resumptions.  */
450                 _tx_thread_performance_resume_count++;
451 
452                 /* Increment this thread's resume count.  */
453                 thread_ptr -> tx_thread_performance_resume_count++;
454 #endif
455             }
456         }
457     }
458 
459 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
460 
461     /* Is the execute pointer different?  */
462     if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
463     {
464 
465         /* Move to next entry.  */
466         _tx_thread_performance__execute_log_index++;
467 
468         /* Check for wrap condition.  */
469         if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
470         {
471 
472             /* Set the index to the beginning.  */
473             _tx_thread_performance__execute_log_index =  ((UINT) 0);
474         }
475 
476         /* Log the new execute pointer.  */
477         _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] =  _tx_thread_execute_ptr;
478     }
479 #endif
480 
481 #ifdef TX_ENABLE_EVENT_TRACE
482 
483     /* Check that the event time stamp is unchanged.  A different
484        timestamp means that a later event wrote over the thread
485        resume event. In that case, do nothing here.  */
486     if (entry_ptr != TX_NULL)
487     {
488 
489         /* Is the timestamp the same?  */
490         if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
491         {
492 
493             /* Timestamp is the same, set the "next thread pointer" to NULL. This can
494                be used by the trace analysis tool to show idle system conditions.  */
495 #ifdef TX_MISRA_ENABLE
496             entry_ptr -> tx_trace_buffer_entry_info_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
497 #else
498             entry_ptr -> tx_trace_buffer_entry_information_field_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
499 #endif
500         }
501     }
502 #endif
503 
504     /* Pickup thread pointer.  */
505     TX_THREAD_GET_CURRENT(current_thread)
506 
507     /* Restore interrupts.  */
508     TX_RESTORE
509 
510     /* Determine if a preemption condition is present.  */
511     if (current_thread != _tx_thread_execute_ptr)
512     {
513 
514 #ifdef TX_ENABLE_STACK_CHECKING
515 
516         /* Pickup the next execute pointer.  */
517         thread_ptr =  _tx_thread_execute_ptr;
518 
519         /* Check this thread's stack.  */
520         TX_THREAD_STACK_CHECK(thread_ptr)
521 #endif
522 
523         /* Now determine if preemption should take place. This is only possible if the current thread pointer is
524            not the same as the execute thread pointer AND the system state and preempt disable flags are clear.  */
525         TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
526         if (combined_flags == ((ULONG) 0))
527         {
528 
529 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
530 
531             /* There is another thread ready to run and will be scheduled upon return.  */
532             _tx_thread_performance_non_idle_return_count++;
533 #endif
534 
535             /* Preemption is needed - return to the system!  */
536             _tx_thread_system_return();
537         }
538     }
539 }
540 #else
541 {
542 
543 TX_INTERRUPT_SAVE_AREA
544 #ifdef TX_ENABLE_EVENT_TRACE
545 UINT            temp_state;
546 #endif
547 UINT            state;
548 
549 
550     /* Lockout interrupts while the thread is being resumed.  */
551     TX_DISABLE
552 
553     /* Decrease the preempt disabled count.  */
554     _tx_thread_preempt_disable--;
555 
556     /* Determine if the thread is in the process of suspending.  If so, the thread
557        control block is already on the linked list so nothing needs to be done.  */
558     if (thread_ptr -> tx_thread_suspending == TX_FALSE)
559     {
560 
561         /* Call the non-interruptable thread system resume function.  */
562         _tx_thread_system_ni_resume(thread_ptr);
563     }
564     else
565     {
566 
567         /* A resumption occurred in the middle of a previous thread suspension.  */
568 
569         /* Pickup the current thread state.  */
570         state =  thread_ptr -> tx_thread_state;
571 
572 #ifdef TX_ENABLE_EVENT_TRACE
573 
574         /* Move the state into a different variable for MISRA compliance.  */
575         temp_state =  state;
576 #endif
577 
578         /* Log the thread status change.  */
579         TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, ((ULONG) state), TX_POINTER_TO_ULONG_CONVERT(&temp_state), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
580 
581         /* Make sure the type of suspension under way is not a terminate or
582            thread completion.  In either of these cases, do not void the
583            interrupted suspension processing.  */
584         if (state != TX_COMPLETED)
585         {
586 
587             /* Check for terminated thread.  */
588             if (state != TX_TERMINATED)
589             {
590 
591                 /* Clear the suspending flag.  */
592                 thread_ptr -> tx_thread_suspending =   TX_FALSE;
593 
594                 /* Restore the state to ready.  */
595                 thread_ptr -> tx_thread_state =        TX_READY;
596 
597                 /* Thread state change.  */
598                 TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
599 
600                 /* Log the thread status change.  */
601                 TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
602 
603 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
604 
605                 /* Increment the total number of thread resumptions.  */
606                 _tx_thread_performance_resume_count++;
607 
608                 /* Increment this thread's resume count.  */
609                 thread_ptr -> tx_thread_performance_resume_count++;
610 #endif
611             }
612         }
613     }
614 
615     /* Restore interrupts.  */
616     TX_RESTORE
617 }
618 
619 /* Define the non-interruptable version of thread resume. It is assumed at this point that
620    all interrupts are disabled and will remain so during this function.  */
621 
622 VOID  _tx_thread_system_ni_resume(TX_THREAD *thread_ptr)
623 {
624 
625 UINT            priority;
626 ULONG           priority_bit;
627 TX_THREAD       *head_ptr;
628 TX_THREAD       *tail_ptr;
629 TX_THREAD       *execute_ptr;
630 TX_THREAD       *current_thread;
631 ULONG           combined_flags;
632 
633 #ifdef TX_ENABLE_EVENT_TRACE
634 TX_TRACE_BUFFER_ENTRY       *entry_ptr;
635 ULONG                       time_stamp =  ((ULONG) 0);
636 #endif
637 
638 #if TX_MAX_PRIORITIES > 32
639 UINT            map_index;
640 #endif
641 
642 
643 #ifdef TX_ENABLE_EVENT_TRACE
644 
645     /* If trace is enabled, save the current event pointer.  */
646     entry_ptr =  _tx_trace_buffer_current_ptr;
647 #endif
648 
649     /* Log the thread status change.  */
650     TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, ((ULONG) thread_ptr -> tx_thread_state), TX_POINTER_TO_ULONG_CONVERT(&execute_ptr), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
651 
652 #ifdef TX_ENABLE_EVENT_TRACE
653 
654     /* Save the time stamp for later comparison to verify that
655        the event hasn't been overwritten by the time we have
656        computed the next thread to execute.  */
657     if (entry_ptr != TX_NULL)
658     {
659 
660         /* Save time stamp.  */
661         time_stamp =  entry_ptr -> tx_trace_buffer_entry_time_stamp;
662     }
663 #endif
664 
665 
666 #ifndef TX_NO_TIMER
667 
668     /* Deactivate the timeout timer if necessary.  */
669     if (thread_ptr -> tx_thread_timer.tx_timer_internal_list_head != TX_NULL)
670     {
671 
672         /* Deactivate the thread's timeout timer.  */
673         _tx_timer_system_deactivate(&(thread_ptr -> tx_thread_timer));
674     }
675 #endif
676 
677 #ifdef TX_ENABLE_STACK_CHECKING
678 
679     /* Check this thread's stack.  */
680     TX_THREAD_STACK_CHECK(thread_ptr)
681 #endif
682 
683     /* Thread is not in the process of suspending. Now check to make sure the thread
684        has not already been resumed.  */
685     if (thread_ptr -> tx_thread_state != TX_READY)
686     {
687 
688         /* No, now check to see if the delayed suspension flag is set.  */
689         if (thread_ptr -> tx_thread_delayed_suspend == TX_FALSE)
690         {
691 
692             /* Resume the thread!  */
693 
694             /* Make this thread ready.  */
695 
696             /* Change the state to ready.  */
697             thread_ptr -> tx_thread_state =  TX_READY;
698 
699             /* Thread state change.  */
700             TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
701 
702             /* Log the thread status change.  */
703             TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
704 
705             /* Pickup priority of thread.  */
706             priority =  thread_ptr -> tx_thread_priority;
707 
708 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
709 
710             /* Increment the total number of thread resumptions.  */
711             _tx_thread_performance_resume_count++;
712 
713             /* Increment this thread's resume count.  */
714             thread_ptr -> tx_thread_performance_resume_count++;
715 #endif
716 
717             /* Determine if there are other threads at this priority that are
718                ready.  */
719             head_ptr =  _tx_thread_priority_list[priority];
720             if (head_ptr == TX_NULL)
721             {
722 
723                 /* First thread at this priority ready.  Add to the front of the list.  */
724                 _tx_thread_priority_list[priority] =       thread_ptr;
725                 thread_ptr -> tx_thread_ready_next =       thread_ptr;
726                 thread_ptr -> tx_thread_ready_previous =   thread_ptr;
727 
728 #if TX_MAX_PRIORITIES > 32
729 
730                 /* Calculate the index into the bit map array.  */
731                 map_index =  priority/((UINT) 32);
732 
733                 /* Set the active bit to remember that the priority map has something set.  */
734                 TX_DIV32_BIT_SET(priority, priority_bit)
735                 _tx_thread_priority_map_active =  _tx_thread_priority_map_active | priority_bit;
736 #endif
737 
738                 /* Or in the thread's priority bit.  */
739                 TX_MOD32_BIT_SET(priority, priority_bit)
740                 _tx_thread_priority_maps[MAP_INDEX] =  _tx_thread_priority_maps[MAP_INDEX] | priority_bit;
741 
742                 /* Determine if this newly ready thread is the highest priority.  */
743                 if (priority < _tx_thread_highest_priority)
744                 {
745 
746                     /* A new highest priority thread is present. */
747 
748                     /* Update the highest priority variable.  */
749                     _tx_thread_highest_priority =  priority;
750 
751                     /* Pickup the execute pointer. Since it is going to be referenced multiple
752                        times, it is placed in a local variable.  */
753                     execute_ptr =  _tx_thread_execute_ptr;
754 
755                     /* Determine if no thread is currently executing.  */
756                     if (execute_ptr == TX_NULL)
757                     {
758 
759                         /* Simply setup the execute pointer.  */
760                         _tx_thread_execute_ptr =  thread_ptr;
761                     }
762                     else
763                     {
764 
765                         /* Check to see if this is a higher priority thread and determine if preemption is allowed.  */
766                         if (priority < execute_ptr -> tx_thread_preempt_threshold)
767                         {
768 
769 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
770 
771                             /* Determine if the preempted thread had preemption-threshold set.  */
772                             if (execute_ptr -> tx_thread_preempt_threshold != execute_ptr -> tx_thread_priority)
773                             {
774 
775 #if TX_MAX_PRIORITIES > 32
776 
777                                 /* Calculate the index into the bit map array.  */
778                                 map_index =  (execute_ptr -> tx_thread_priority)/((UINT) 32);
779 
780                                 /* Set the active bit to remember that the preempt map has something set.  */
781                                 TX_DIV32_BIT_SET(execute_ptr -> tx_thread_priority, priority_bit)
782                                 _tx_thread_preempted_map_active =  _tx_thread_preempted_map_active | priority_bit;
783 #endif
784 
785                                 /* Remember that this thread was preempted by a thread above the thread's threshold.  */
786                                 TX_MOD32_BIT_SET(execute_ptr -> tx_thread_priority, priority_bit)
787                                 _tx_thread_preempted_maps[MAP_INDEX] =  _tx_thread_preempted_maps[MAP_INDEX] | priority_bit;
788                             }
789 #endif
790 
791 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
792 
793                             /* Determine if the caller is an interrupt or from a thread.  */
794                             if (TX_THREAD_GET_SYSTEM_STATE() == ((ULONG) 0))
795                             {
796 
797                                 /* Caller is a thread, so this is a solicited preemption.  */
798                                 _tx_thread_performance_solicited_preemption_count++;
799 
800                                 /* Increment the thread's solicited preemption counter.  */
801                                 execute_ptr -> tx_thread_performance_solicited_preemption_count++;
802                             }
803                             else
804                             {
805 
806                                 if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
807                                 {
808 
809                                     /* Caller is an interrupt, so this is an interrupt preemption.  */
810                                     _tx_thread_performance_interrupt_preemption_count++;
811 
812                                     /* Increment the thread's interrupt preemption counter.  */
813                                     execute_ptr -> tx_thread_performance_interrupt_preemption_count++;
814                                 }
815                             }
816 
817                             /* Remember the thread that preempted this thread.  */
818                             execute_ptr -> tx_thread_performance_last_preempting_thread =  thread_ptr;
819 #endif
820 
821                             /* Yes, modify the execute thread pointer.  */
822                             _tx_thread_execute_ptr =  thread_ptr;
823 
824 #ifndef TX_MISRA_ENABLE
825 
826                             /* If MISRA is not-enabled, insert a preemption and return in-line for performance.  */
827 
828 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
829 
830                             /* Is the execute pointer different?  */
831                             if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
832                             {
833 
834                                 /* Move to next entry.  */
835                                 _tx_thread_performance__execute_log_index++;
836 
837                                 /* Check for wrap condition.  */
838                                 if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
839                                 {
840 
841                                     /* Set the index to the beginning.  */
842                                     _tx_thread_performance__execute_log_index =  ((UINT) 0);
843                                 }
844 
845                                 /* Log the new execute pointer.  */
846                                 _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] =  _tx_thread_execute_ptr;
847                             }
848 #endif
849 
850 #ifdef TX_ENABLE_EVENT_TRACE
851 
852                             /* Check that the event time stamp is unchanged.  A different
853                                timestamp means that a later event wrote over the thread
854                                resume event. In that case, do nothing here.  */
855                             if (entry_ptr != TX_NULL)
856                             {
857 
858                                 /* Is the timestamp the same?  */
859                                 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
860                                 {
861 
862                                     /* Timestamp is the same, set the "next thread pointer" to NULL. This can
863                                        be used by the trace analysis tool to show idle system conditions.  */
864                                     entry_ptr -> tx_trace_buffer_entry_information_field_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
865                                 }
866                             }
867 #endif
868 
869 #ifdef TX_ENABLE_STACK_CHECKING
870 
871                             /* Pickup the next execute pointer.  */
872                             thread_ptr =  _tx_thread_execute_ptr;
873 
874                             /* Check this thread's stack.  */
875                             TX_THREAD_STACK_CHECK(thread_ptr)
876 #endif
877 
878                             /* Now determine if preemption should take place. This is only possible if the current thread pointer is
879                                not the same as the execute thread pointer AND the system state and preempt disable flags are clear.  */
880                             TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
881                             if (combined_flags == ((ULONG) 0))
882                             {
883 
884 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
885 
886                                 /* There is another thread ready to run and will be scheduled upon return.  */
887                                 _tx_thread_performance_non_idle_return_count++;
888 #endif
889 
890                                 /* Preemption is needed - return to the system!  */
891                                 _tx_thread_system_return();
892                             }
893 
894                             /* Return in-line when MISRA is not enabled.  */
895                             return;
896 #endif
897                         }
898                     }
899                 }
900             }
901             else
902             {
903 
904                 /* No, there are other threads at this priority already ready.  */
905 
906                 /* Just add this thread to the priority list.  */
907                 tail_ptr =                                 head_ptr -> tx_thread_ready_previous;
908                 tail_ptr -> tx_thread_ready_next =         thread_ptr;
909                 head_ptr -> tx_thread_ready_previous =     thread_ptr;
910                 thread_ptr -> tx_thread_ready_previous =   tail_ptr;
911                 thread_ptr -> tx_thread_ready_next =       head_ptr;
912             }
913         }
914 
915         /* Else, delayed suspend flag was set.  */
916         else
917         {
918 
919             /* Clear the delayed suspend flag and change the state.  */
920             thread_ptr -> tx_thread_delayed_suspend =  TX_FALSE;
921             thread_ptr -> tx_thread_state =            TX_SUSPENDED;
922         }
923     }
924 
925 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
926 
927     /* Is the execute pointer different?  */
928     if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
929     {
930 
931         /* Move to next entry.  */
932         _tx_thread_performance__execute_log_index++;
933 
934         /* Check for wrap condition.  */
935         if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
936         {
937 
938             /* Set the index to the beginning.  */
939             _tx_thread_performance__execute_log_index =  ((UINT) 0);
940         }
941 
942         /* Log the new execute pointer.  */
943         _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] =  _tx_thread_execute_ptr;
944     }
945 #endif
946 
947 #ifdef TX_ENABLE_EVENT_TRACE
948 
949     /* Check that the event time stamp is unchanged.  A different
950        timestamp means that a later event wrote over the thread
951        resume event. In that case, do nothing here.  */
952     if (entry_ptr != TX_NULL)
953     {
954 
955         /* Does the timestamp match?  */
956         if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
957         {
958 
959             /* Timestamp is the same, set the "next thread pointer" to NULL. This can
960                be used by the trace analysis tool to show idle system conditions.  */
961 #ifdef TX_MISRA_ENABLE
962             entry_ptr -> tx_trace_buffer_entry_info_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
963 #else
964             entry_ptr -> tx_trace_buffer_entry_information_field_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
965 #endif
966         }
967     }
968 #endif
969 
970     /* Pickup thread pointer.  */
971     TX_THREAD_GET_CURRENT(current_thread)
972 
973     /* Determine if a preemption condition is present.  */
974     if (current_thread != _tx_thread_execute_ptr)
975     {
976 
977 #ifdef TX_ENABLE_STACK_CHECKING
978 
979         /* Pickup the next execute pointer.  */
980         thread_ptr =  _tx_thread_execute_ptr;
981 
982         /* Check this thread's stack.  */
983         TX_THREAD_STACK_CHECK(thread_ptr)
984 #endif
985 
986         /* Now determine if preemption should take place. This is only possible if the current thread pointer is
987            not the same as the execute thread pointer AND the system state and preempt disable flags are clear.  */
988         TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
989         if (combined_flags == ((ULONG) 0))
990         {
991 
992 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
993 
994             /* There is another thread ready to run and will be scheduled upon return.  */
995             _tx_thread_performance_non_idle_return_count++;
996 #endif
997 
998             /* Preemption is needed - return to the system!  */
999             _tx_thread_system_return();
1000         }
1001     }
1002 }
1003 #endif
1004