1 /**************************************************************************/
2 /*                                                                        */
3 /*       Copyright (c) Microsoft Corporation. All rights reserved.        */
4 /*                                                                        */
5 /*       This software is licensed under the Microsoft Software License   */
6 /*       Terms for Microsoft Azure RTOS. Full text of the license can be  */
7 /*       found in the LICENSE file at https://aka.ms/AzureRTOS_EULA       */
8 /*       and in the root directory of this software.                      */
9 /*                                                                        */
10 /**************************************************************************/
11 
12 
13 /**************************************************************************/
14 /**************************************************************************/
15 /**                                                                       */
16 /** ThreadX Component                                                     */
17 /**                                                                       */
18 /**   Thread                                                              */
19 /**                                                                       */
20 /**************************************************************************/
21 /**************************************************************************/
22 
23 #define TX_SOURCE_CODE
24 
25 /* Include necessary system files.  */
26 #include "tx_api.h"
27 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
28 #include "tx_initialize.h"
29 #endif
30 #include "tx_trace.h"
31 #include "tx_timer.h"
32 #include "tx_thread.h"
33 
34 /**************************************************************************/
35 /*                                                                        */
36 /*  FUNCTION                                               RELEASE        */
37 /*                                                                        */
38 /*    _tx_thread_system_resume                            PORTABLE C      */
39 /*                                                           6.1          */
40 /*  AUTHOR                                                                */
41 /*                                                                        */
42 /*    William E. Lamie, Microsoft Corporation                             */
43 /*                                                                        */
44 /*  DESCRIPTION                                                           */
45 /*                                                                        */
46 /*    This function places the specified thread on the list of ready      */
47 /*    threads at the thread's specific priority.                          */
48 /*                                                                        */
49 /*  INPUT                                                                 */
50 /*                                                                        */
51 /*    thread_ptr                            Pointer to thread to resume   */
52 /*                                                                        */
53 /*  OUTPUT                                                                */
54 /*                                                                        */
55 /*    None                                                                */
56 /*                                                                        */
57 /*  CALLS                                                                 */
58 /*                                                                        */
59 /*    _tx_thread_system_return              Return to the system          */
60 /*    _tx_thread_system_ni_resume           Noninterruptable thread resume*/
61 /*    _tx_timer_system_deactivate           Timer deactivate              */
62 /*                                                                        */
63 /*  CALLED BY                                                             */
64 /*                                                                        */
65 /*    _tx_thread_create                     Thread create function        */
66 /*    _tx_thread_priority_change            Thread priority change        */
67 /*    _tx_thread_resume                     Application resume service    */
68 /*    _tx_thread_timeout                    Thread timeout                */
69 /*    _tx_thread_wait_abort                 Thread wait abort             */
70 /*    Other ThreadX Components                                            */
71 /*                                                                        */
72 /*  RELEASE HISTORY                                                       */
73 /*                                                                        */
74 /*    DATE              NAME                      DESCRIPTION             */
75 /*                                                                        */
76 /*  05-19-2020     William E. Lamie         Initial Version 6.0           */
77 /*  09-30-2020     Yuxin Zhou               Modified comment(s),          */
78 /*                                            resulting in version 6.1    */
79 /*                                                                        */
80 /**************************************************************************/
_tx_thread_system_resume(TX_THREAD * thread_ptr)81 VOID  _tx_thread_system_resume(TX_THREAD *thread_ptr)
82 #ifndef TX_NOT_INTERRUPTABLE
83 {
84 
85 TX_INTERRUPT_SAVE_AREA
86 
87 UINT            priority;
88 ULONG           priority_bit;
89 TX_THREAD       *head_ptr;
90 TX_THREAD       *tail_ptr;
91 TX_THREAD       *execute_ptr;
92 TX_THREAD       *current_thread;
93 ULONG           combined_flags;
94 
95 #ifdef TX_ENABLE_EVENT_TRACE
96 TX_TRACE_BUFFER_ENTRY       *entry_ptr;
97 ULONG                       time_stamp =  ((ULONG) 0);
98 #endif
99 
100 #if TX_MAX_PRIORITIES > 32
101 UINT            map_index;
102 #endif
103 
104 
105 #ifdef TX_ENABLE_STACK_CHECKING
106 
107     /* Check this thread's stack.  */
108     TX_THREAD_STACK_CHECK(thread_ptr)
109 #endif
110 
111     /* Lockout interrupts while the thread is being resumed.  */
112     TX_DISABLE
113 
114 #ifndef TX_NO_TIMER
115 
116     /* Deactivate the timeout timer if necessary.  */
117     if (thread_ptr -> tx_thread_timer.tx_timer_internal_list_head != TX_NULL)
118     {
119 
120         /* Deactivate the thread's timeout timer.  */
121         _tx_timer_system_deactivate(&(thread_ptr -> tx_thread_timer));
122     }
123     else
124     {
125 
126         /* Clear the remaining time to ensure timer doesn't get activated.  */
127         thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks =  ((ULONG) 0);
128     }
129 #endif
130 
131 #ifdef TX_ENABLE_EVENT_TRACE
132 
133     /* If trace is enabled, save the current event pointer.  */
134     entry_ptr =  _tx_trace_buffer_current_ptr;
135 #endif
136 
137     /* Log the thread status change.  */
138     TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&execute_ptr), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
139 
140 #ifdef TX_ENABLE_EVENT_TRACE
141 
142     /* Save the time stamp for later comparison to verify that
143        the event hasn't been overwritten by the time we have
144        computed the next thread to execute.  */
145     if (entry_ptr != TX_NULL)
146     {
147 
148         /* Save time stamp.  */
149         time_stamp =  entry_ptr -> tx_trace_buffer_entry_time_stamp;
150     }
151 #endif
152 
153     /* Decrease the preempt disabled count.  */
154     _tx_thread_preempt_disable--;
155 
156     /* Determine if the thread is in the process of suspending.  If so, the thread
157        control block is already on the linked list so nothing needs to be done.  */
158     if (thread_ptr -> tx_thread_suspending == TX_FALSE)
159     {
160 
161         /* Thread is not in the process of suspending. Now check to make sure the thread
162            has not already been resumed.  */
163         if (thread_ptr -> tx_thread_state != TX_READY)
164         {
165 
166             /* No, now check to see if the delayed suspension flag is set.  */
167             if (thread_ptr -> tx_thread_delayed_suspend == TX_FALSE)
168             {
169 
170                 /* Resume the thread!  */
171 
172                 /* Make this thread ready.  */
173 
174                 /* Change the state to ready.  */
175                 thread_ptr -> tx_thread_state =  TX_READY;
176 
177                 /* Pickup priority of thread.  */
178                 priority =  thread_ptr -> tx_thread_priority;
179 
180                 /* Thread state change.  */
181                 TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
182 
183                 /* Log the thread status change.  */
184                 TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
185 
186 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
187 
188                 /* Increment the total number of thread resumptions.  */
189                 _tx_thread_performance_resume_count++;
190 
191                 /* Increment this thread's resume count.  */
192                 thread_ptr -> tx_thread_performance_resume_count++;
193 #endif
194 
195                 /* Determine if there are other threads at this priority that are
196                    ready.  */
197                 head_ptr =  _tx_thread_priority_list[priority];
198                 if (head_ptr == TX_NULL)
199                 {
200 
201                     /* First thread at this priority ready.  Add to the front of the list.  */
202                     _tx_thread_priority_list[priority] =       thread_ptr;
203                     thread_ptr -> tx_thread_ready_next =       thread_ptr;
204                     thread_ptr -> tx_thread_ready_previous =   thread_ptr;
205 
206 #if TX_MAX_PRIORITIES > 32
207 
208                     /* Calculate the index into the bit map array.  */
209                     map_index =  priority/((UINT) 32);
210 
211                     /* Set the active bit to remember that the priority map has something set.  */
212                     TX_DIV32_BIT_SET(priority, priority_bit)
213                     _tx_thread_priority_map_active =  _tx_thread_priority_map_active | priority_bit;
214 #endif
215 
216                     /* Or in the thread's priority bit.  */
217                     TX_MOD32_BIT_SET(priority, priority_bit)
218                     _tx_thread_priority_maps[MAP_INDEX] =  _tx_thread_priority_maps[MAP_INDEX] | priority_bit;
219 
220                     /* Determine if this newly ready thread is the highest priority.  */
221                     if (priority < _tx_thread_highest_priority)
222                     {
223 
224                         /* A new highest priority thread is present. */
225 
226                         /* Update the highest priority variable.  */
227                         _tx_thread_highest_priority =  priority;
228 
229                         /* Pickup the execute pointer. Since it is going to be referenced multiple
230                            times, it is placed in a local variable.  */
231                         execute_ptr =  _tx_thread_execute_ptr;
232 
233                         /* Determine if no thread is currently executing.  */
234                         if (execute_ptr == TX_NULL)
235                         {
236 
237                             /* Simply setup the execute pointer.  */
238                             _tx_thread_execute_ptr =  thread_ptr;
239                         }
240                         else
241                         {
242 
243                             /* Another thread has been scheduled for execution.  */
244 
245                             /* Check to see if this is a higher priority thread and determine if preemption is allowed.  */
246                             if (priority < execute_ptr -> tx_thread_preempt_threshold)
247                             {
248 
249 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
250 
251                                 /* Determine if the preempted thread had preemption-threshold set.  */
252                                 if (execute_ptr -> tx_thread_preempt_threshold != execute_ptr -> tx_thread_priority)
253                                 {
254 
255 #if TX_MAX_PRIORITIES > 32
256 
257                                     /* Calculate the index into the bit map array.  */
258                                     map_index =  (execute_ptr -> tx_thread_priority)/((UINT) 32);
259 
260                                     /* Set the active bit to remember that the preempt map has something set.  */
261                                     TX_DIV32_BIT_SET(execute_ptr -> tx_thread_priority, priority_bit)
262                                     _tx_thread_preempted_map_active =  _tx_thread_preempted_map_active | priority_bit;
263 #endif
264 
265                                     /* Remember that this thread was preempted by a thread above the thread's threshold.  */
266                                     TX_MOD32_BIT_SET(execute_ptr -> tx_thread_priority, priority_bit)
267                                     _tx_thread_preempted_maps[MAP_INDEX] =  _tx_thread_preempted_maps[MAP_INDEX] | priority_bit;
268                                 }
269 #endif
270 
271 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
272 
273                                 /* Determine if the caller is an interrupt or from a thread.  */
274                                 if (TX_THREAD_GET_SYSTEM_STATE() == ((ULONG) 0))
275                                 {
276 
277                                     /* Caller is a thread, so this is a solicited preemption.  */
278                                     _tx_thread_performance_solicited_preemption_count++;
279 
280                                     /* Increment the thread's solicited preemption counter.  */
281                                     execute_ptr -> tx_thread_performance_solicited_preemption_count++;
282                                 }
283                                 else
284                                 {
285 
286                                     if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
287                                     {
288 
289                                         /* Caller is an interrupt, so this is an interrupt preemption.  */
290                                         _tx_thread_performance_interrupt_preemption_count++;
291 
292                                         /* Increment the thread's interrupt preemption counter.  */
293                                         execute_ptr -> tx_thread_performance_interrupt_preemption_count++;
294                                     }
295                                 }
296 
297                                 /* Remember the thread that preempted this thread.  */
298                                 execute_ptr -> tx_thread_performance_last_preempting_thread =  thread_ptr;
299 
300 #endif
301 
302                                 /* Yes, modify the execute thread pointer.  */
303                                 _tx_thread_execute_ptr =  thread_ptr;
304 
305 #ifndef TX_MISRA_ENABLE
306 
307                                 /* If MISRA is not-enabled, insert a preemption and return in-line for performance.  */
308 
309 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
310 
311                                 /* Is the execute pointer different?  */
312                                 if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
313                                 {
314 
315                                     /* Move to next entry.  */
316                                     _tx_thread_performance__execute_log_index++;
317 
318                                     /* Check for wrap condition.  */
319                                     if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
320                                     {
321 
322                                         /* Set the index to the beginning.  */
323                                         _tx_thread_performance__execute_log_index =  ((UINT) 0);
324                                     }
325 
326                                     /* Log the new execute pointer.  */
327                                     _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] =  _tx_thread_execute_ptr;
328                                 }
329 #endif
330 
331 #ifdef TX_ENABLE_EVENT_TRACE
332 
333                                 /* Check that the event time stamp is unchanged.  A different
334                                    timestamp means that a later event wrote over the thread
335                                    resume event. In that case, do nothing here.  */
336                                 if (entry_ptr != TX_NULL)
337                                 {
338 
339                                     /* Is the timestamp the same?  */
340                                     if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
341                                     {
342 
343                                         /* Timestamp is the same, set the "next thread pointer" to NULL. This can
344                                            be used by the trace analysis tool to show idle system conditions.  */
345                                         entry_ptr -> tx_trace_buffer_entry_information_field_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
346                                     }
347                                 }
348 #endif
349 
350                                 /* Restore interrupts.  */
351                                 TX_RESTORE
352 
353 #ifdef TX_ENABLE_STACK_CHECKING
354 
355                                 /* Pickup the next execute pointer.  */
356                                 thread_ptr =  _tx_thread_execute_ptr;
357 
358                                 /* Check this thread's stack.  */
359                                 TX_THREAD_STACK_CHECK(thread_ptr)
360 #endif
361 
362                                 /* Now determine if preemption should take place. This is only possible if the current thread pointer is
363                                    not the same as the execute thread pointer AND the system state and preempt disable flags are clear.  */
364                                 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
365                                 if (combined_flags == ((ULONG) 0))
366                                 {
367 
368 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
369 
370                                     /* There is another thread ready to run and will be scheduled upon return.  */
371                                     _tx_thread_performance_non_idle_return_count++;
372 #endif
373 
374                                     /* Preemption is needed - return to the system!  */
375                                     _tx_thread_system_return();
376                                 }
377 
378                                 /* Return in-line when MISRA is not enabled.  */
379                                 return;
380 #endif
381                             }
382                         }
383                     }
384                 }
385                 else
386                 {
387 
388                     /* No, there are other threads at this priority already ready.  */
389 
390                     /* Just add this thread to the priority list.  */
391                     tail_ptr =                                 head_ptr -> tx_thread_ready_previous;
392                     tail_ptr -> tx_thread_ready_next =         thread_ptr;
393                     head_ptr -> tx_thread_ready_previous =     thread_ptr;
394                     thread_ptr -> tx_thread_ready_previous =   tail_ptr;
395                     thread_ptr -> tx_thread_ready_next =       head_ptr;
396                 }
397             }
398 
399             /* Else, delayed suspend flag was set.  */
400             else
401             {
402 
403                 /* Clear the delayed suspend flag and change the state.  */
404                 thread_ptr -> tx_thread_delayed_suspend =  TX_FALSE;
405                 thread_ptr -> tx_thread_state =            TX_SUSPENDED;
406             }
407         }
408     }
409     else
410     {
411 
412         /* A resumption occurred in the middle of a previous thread suspension.  */
413 
414         /* Make sure the type of suspension under way is not a terminate or
415            thread completion.  In either of these cases, do not void the
416            interrupted suspension processing.  */
417         if (thread_ptr -> tx_thread_state != TX_COMPLETED)
418         {
419 
420             /* Make sure the thread isn't terminated.  */
421             if (thread_ptr -> tx_thread_state != TX_TERMINATED)
422             {
423 
424                 /* No, now check to see if the delayed suspension flag is set.  */
425                 if (thread_ptr -> tx_thread_delayed_suspend == TX_FALSE)
426                 {
427 
428                     /* Clear the suspending flag.  */
429                     thread_ptr -> tx_thread_suspending =   TX_FALSE;
430 
431                     /* Restore the state to ready.  */
432                     thread_ptr -> tx_thread_state =        TX_READY;
433 
434                     /* Thread state change.  */
435                     TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
436 
437                     /* Log the thread status change.  */
438                     TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
439                 }
440                 else
441                 {
442 
443                     /* Clear the delayed suspend flag and change the state.  */
444                     thread_ptr -> tx_thread_delayed_suspend =  TX_FALSE;
445                     thread_ptr -> tx_thread_state =            TX_SUSPENDED;
446                 }
447 
448 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
449 
450                 /* Increment the total number of thread resumptions.  */
451                 _tx_thread_performance_resume_count++;
452 
453                 /* Increment this thread's resume count.  */
454                 thread_ptr -> tx_thread_performance_resume_count++;
455 #endif
456             }
457         }
458     }
459 
460 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
461 
462     /* Is the execute pointer different?  */
463     if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
464     {
465 
466         /* Move to next entry.  */
467         _tx_thread_performance__execute_log_index++;
468 
469         /* Check for wrap condition.  */
470         if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
471         {
472 
473             /* Set the index to the beginning.  */
474             _tx_thread_performance__execute_log_index =  ((UINT) 0);
475         }
476 
477         /* Log the new execute pointer.  */
478         _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] =  _tx_thread_execute_ptr;
479     }
480 #endif
481 
482 #ifdef TX_ENABLE_EVENT_TRACE
483 
484     /* Check that the event time stamp is unchanged.  A different
485        timestamp means that a later event wrote over the thread
486        resume event. In that case, do nothing here.  */
487     if (entry_ptr != TX_NULL)
488     {
489 
490         /* Is the timestamp the same?  */
491         if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
492         {
493 
494             /* Timestamp is the same, set the "next thread pointer" to NULL. This can
495                be used by the trace analysis tool to show idle system conditions.  */
496 #ifdef TX_MISRA_ENABLE
497             entry_ptr -> tx_trace_buffer_entry_info_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
498 #else
499             entry_ptr -> tx_trace_buffer_entry_information_field_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
500 #endif
501         }
502     }
503 #endif
504 
505     /* Pickup thread pointer.  */
506     TX_THREAD_GET_CURRENT(current_thread)
507 
508     /* Restore interrupts.  */
509     TX_RESTORE
510 
511     /* Determine if a preemption condition is present.  */
512     if (current_thread != _tx_thread_execute_ptr)
513     {
514 
515 #ifdef TX_ENABLE_STACK_CHECKING
516 
517         /* Pickup the next execute pointer.  */
518         thread_ptr =  _tx_thread_execute_ptr;
519 
520         /* Check this thread's stack.  */
521         TX_THREAD_STACK_CHECK(thread_ptr)
522 #endif
523 
524         /* Now determine if preemption should take place. This is only possible if the current thread pointer is
525            not the same as the execute thread pointer AND the system state and preempt disable flags are clear.  */
526         TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
527         if (combined_flags == ((ULONG) 0))
528         {
529 
530 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
531 
532             /* There is another thread ready to run and will be scheduled upon return.  */
533             _tx_thread_performance_non_idle_return_count++;
534 #endif
535 
536             /* Preemption is needed - return to the system!  */
537             _tx_thread_system_return();
538         }
539     }
540 }
541 #else
542 {
543 
544 TX_INTERRUPT_SAVE_AREA
545 #ifdef TX_ENABLE_EVENT_TRACE
546 UINT            temp_state;
547 #endif
548 UINT            state;
549 
550 
551     /* Lockout interrupts while the thread is being resumed.  */
552     TX_DISABLE
553 
554     /* Decrease the preempt disabled count.  */
555     _tx_thread_preempt_disable--;
556 
557     /* Determine if the thread is in the process of suspending.  If so, the thread
558        control block is already on the linked list so nothing needs to be done.  */
559     if (thread_ptr -> tx_thread_suspending == TX_FALSE)
560     {
561 
562         /* Call the non-interruptable thread system resume function.  */
563         _tx_thread_system_ni_resume(thread_ptr);
564     }
565     else
566     {
567 
568         /* A resumption occurred in the middle of a previous thread suspension.  */
569 
570         /* Pickup the current thread state.  */
571         state =  thread_ptr -> tx_thread_state;
572 
573 #ifdef TX_ENABLE_EVENT_TRACE
574 
575         /* Move the state into a different variable for MISRA compliance.  */
576         temp_state =  state;
577 #endif
578 
579         /* Log the thread status change.  */
580         TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, ((ULONG) state), TX_POINTER_TO_ULONG_CONVERT(&temp_state), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
581 
582         /* Make sure the type of suspension under way is not a terminate or
583            thread completion.  In either of these cases, do not void the
584            interrupted suspension processing.  */
585         if (state != TX_COMPLETED)
586         {
587 
588             /* Check for terminated thread.  */
589             if (state != TX_TERMINATED)
590             {
591 
592                 /* Clear the suspending flag.  */
593                 thread_ptr -> tx_thread_suspending =   TX_FALSE;
594 
595                 /* Restore the state to ready.  */
596                 thread_ptr -> tx_thread_state =        TX_READY;
597 
598                 /* Thread state change.  */
599                 TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
600 
601                 /* Log the thread status change.  */
602                 TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
603 
604 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
605 
606                 /* Increment the total number of thread resumptions.  */
607                 _tx_thread_performance_resume_count++;
608 
609                 /* Increment this thread's resume count.  */
610                 thread_ptr -> tx_thread_performance_resume_count++;
611 #endif
612             }
613         }
614     }
615 
616     /* Restore interrupts.  */
617     TX_RESTORE
618 }
619 
620 /* Define the non-interruptable version of thread resume. It is assumed at this point that
621    all interrupts are disabled and will remain so during this function.  */
622 
623 VOID  _tx_thread_system_ni_resume(TX_THREAD *thread_ptr)
624 {
625 
626 UINT            priority;
627 ULONG           priority_bit;
628 TX_THREAD       *head_ptr;
629 TX_THREAD       *tail_ptr;
630 TX_THREAD       *execute_ptr;
631 TX_THREAD       *current_thread;
632 ULONG           combined_flags;
633 
634 #ifdef TX_ENABLE_EVENT_TRACE
635 TX_TRACE_BUFFER_ENTRY       *entry_ptr;
636 ULONG                       time_stamp =  ((ULONG) 0);
637 #endif
638 
639 #if TX_MAX_PRIORITIES > 32
640 UINT            map_index;
641 #endif
642 
643 
644 #ifdef TX_ENABLE_EVENT_TRACE
645 
646     /* If trace is enabled, save the current event pointer.  */
647     entry_ptr =  _tx_trace_buffer_current_ptr;
648 #endif
649 
650     /* Log the thread status change.  */
651     TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, ((ULONG) thread_ptr -> tx_thread_state), TX_POINTER_TO_ULONG_CONVERT(&execute_ptr), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
652 
653 #ifdef TX_ENABLE_EVENT_TRACE
654 
655     /* Save the time stamp for later comparison to verify that
656        the event hasn't been overwritten by the time we have
657        computed the next thread to execute.  */
658     if (entry_ptr != TX_NULL)
659     {
660 
661         /* Save time stamp.  */
662         time_stamp =  entry_ptr -> tx_trace_buffer_entry_time_stamp;
663     }
664 #endif
665 
666 
667 #ifndef TX_NO_TIMER
668 
669     /* Deactivate the timeout timer if necessary.  */
670     if (thread_ptr -> tx_thread_timer.tx_timer_internal_list_head != TX_NULL)
671     {
672 
673         /* Deactivate the thread's timeout timer.  */
674         _tx_timer_system_deactivate(&(thread_ptr -> tx_thread_timer));
675     }
676 #endif
677 
678 #ifdef TX_ENABLE_STACK_CHECKING
679 
680     /* Check this thread's stack.  */
681     TX_THREAD_STACK_CHECK(thread_ptr)
682 #endif
683 
684     /* Thread is not in the process of suspending. Now check to make sure the thread
685        has not already been resumed.  */
686     if (thread_ptr -> tx_thread_state != TX_READY)
687     {
688 
689         /* No, now check to see if the delayed suspension flag is set.  */
690         if (thread_ptr -> tx_thread_delayed_suspend == TX_FALSE)
691         {
692 
693             /* Resume the thread!  */
694 
695             /* Make this thread ready.  */
696 
697             /* Change the state to ready.  */
698             thread_ptr -> tx_thread_state =  TX_READY;
699 
700             /* Thread state change.  */
701             TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
702 
703             /* Log the thread status change.  */
704             TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
705 
706             /* Pickup priority of thread.  */
707             priority =  thread_ptr -> tx_thread_priority;
708 
709 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
710 
711             /* Increment the total number of thread resumptions.  */
712             _tx_thread_performance_resume_count++;
713 
714             /* Increment this thread's resume count.  */
715             thread_ptr -> tx_thread_performance_resume_count++;
716 #endif
717 
718             /* Determine if there are other threads at this priority that are
719                ready.  */
720             head_ptr =  _tx_thread_priority_list[priority];
721             if (head_ptr == TX_NULL)
722             {
723 
724                 /* First thread at this priority ready.  Add to the front of the list.  */
725                 _tx_thread_priority_list[priority] =       thread_ptr;
726                 thread_ptr -> tx_thread_ready_next =       thread_ptr;
727                 thread_ptr -> tx_thread_ready_previous =   thread_ptr;
728 
729 #if TX_MAX_PRIORITIES > 32
730 
731                 /* Calculate the index into the bit map array.  */
732                 map_index =  priority/((UINT) 32);
733 
734                 /* Set the active bit to remember that the priority map has something set.  */
735                 TX_DIV32_BIT_SET(priority, priority_bit)
736                 _tx_thread_priority_map_active =  _tx_thread_priority_map_active | priority_bit;
737 #endif
738 
739                 /* Or in the thread's priority bit.  */
740                 TX_MOD32_BIT_SET(priority, priority_bit)
741                 _tx_thread_priority_maps[MAP_INDEX] =  _tx_thread_priority_maps[MAP_INDEX] | priority_bit;
742 
743                 /* Determine if this newly ready thread is the highest priority.  */
744                 if (priority < _tx_thread_highest_priority)
745                 {
746 
747                     /* A new highest priority thread is present. */
748 
749                     /* Update the highest priority variable.  */
750                     _tx_thread_highest_priority =  priority;
751 
752                     /* Pickup the execute pointer. Since it is going to be referenced multiple
753                        times, it is placed in a local variable.  */
754                     execute_ptr =  _tx_thread_execute_ptr;
755 
756                     /* Determine if no thread is currently executing.  */
757                     if (execute_ptr == TX_NULL)
758                     {
759 
760                         /* Simply setup the execute pointer.  */
761                         _tx_thread_execute_ptr =  thread_ptr;
762                     }
763                     else
764                     {
765 
766                         /* Check to see if this is a higher priority thread and determine if preemption is allowed.  */
767                         if (priority < execute_ptr -> tx_thread_preempt_threshold)
768                         {
769 
770 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
771 
772                             /* Determine if the preempted thread had preemption-threshold set.  */
773                             if (execute_ptr -> tx_thread_preempt_threshold != execute_ptr -> tx_thread_priority)
774                             {
775 
776 #if TX_MAX_PRIORITIES > 32
777 
778                                 /* Calculate the index into the bit map array.  */
779                                 map_index =  (execute_ptr -> tx_thread_priority)/((UINT) 32);
780 
781                                 /* Set the active bit to remember that the preempt map has something set.  */
782                                 TX_DIV32_BIT_SET(execute_ptr -> tx_thread_priority, priority_bit)
783                                 _tx_thread_preempted_map_active =  _tx_thread_preempted_map_active | priority_bit;
784 #endif
785 
786                                 /* Remember that this thread was preempted by a thread above the thread's threshold.  */
787                                 TX_MOD32_BIT_SET(execute_ptr -> tx_thread_priority, priority_bit)
788                                 _tx_thread_preempted_maps[MAP_INDEX] =  _tx_thread_preempted_maps[MAP_INDEX] | priority_bit;
789                             }
790 #endif
791 
792 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
793 
794                             /* Determine if the caller is an interrupt or from a thread.  */
795                             if (TX_THREAD_GET_SYSTEM_STATE() == ((ULONG) 0))
796                             {
797 
798                                 /* Caller is a thread, so this is a solicited preemption.  */
799                                 _tx_thread_performance_solicited_preemption_count++;
800 
801                                 /* Increment the thread's solicited preemption counter.  */
802                                 execute_ptr -> tx_thread_performance_solicited_preemption_count++;
803                             }
804                             else
805                             {
806 
807                                 if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
808                                 {
809 
810                                     /* Caller is an interrupt, so this is an interrupt preemption.  */
811                                     _tx_thread_performance_interrupt_preemption_count++;
812 
813                                     /* Increment the thread's interrupt preemption counter.  */
814                                     execute_ptr -> tx_thread_performance_interrupt_preemption_count++;
815                                 }
816                             }
817 
818                             /* Remember the thread that preempted this thread.  */
819                             execute_ptr -> tx_thread_performance_last_preempting_thread =  thread_ptr;
820 #endif
821 
822                             /* Yes, modify the execute thread pointer.  */
823                             _tx_thread_execute_ptr =  thread_ptr;
824 
825 #ifndef TX_MISRA_ENABLE
826 
827                             /* If MISRA is not-enabled, insert a preemption and return in-line for performance.  */
828 
829 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
830 
831                             /* Is the execute pointer different?  */
832                             if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
833                             {
834 
835                                 /* Move to next entry.  */
836                                 _tx_thread_performance__execute_log_index++;
837 
838                                 /* Check for wrap condition.  */
839                                 if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
840                                 {
841 
842                                     /* Set the index to the beginning.  */
843                                     _tx_thread_performance__execute_log_index =  ((UINT) 0);
844                                 }
845 
846                                 /* Log the new execute pointer.  */
847                                 _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] =  _tx_thread_execute_ptr;
848                             }
849 #endif
850 
851 #ifdef TX_ENABLE_EVENT_TRACE
852 
853                             /* Check that the event time stamp is unchanged.  A different
854                                timestamp means that a later event wrote over the thread
855                                resume event. In that case, do nothing here.  */
856                             if (entry_ptr != TX_NULL)
857                             {
858 
859                                 /* Is the timestamp the same?  */
860                                 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
861                                 {
862 
863                                     /* Timestamp is the same, set the "next thread pointer" to NULL. This can
864                                        be used by the trace analysis tool to show idle system conditions.  */
865                                     entry_ptr -> tx_trace_buffer_entry_information_field_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
866                                 }
867                             }
868 #endif
869 
870 #ifdef TX_ENABLE_STACK_CHECKING
871 
872                             /* Pickup the next execute pointer.  */
873                             thread_ptr =  _tx_thread_execute_ptr;
874 
875                             /* Check this thread's stack.  */
876                             TX_THREAD_STACK_CHECK(thread_ptr)
877 #endif
878 
879                             /* Now determine if preemption should take place. This is only possible if the current thread pointer is
880                                not the same as the execute thread pointer AND the system state and preempt disable flags are clear.  */
881                             TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
882                             if (combined_flags == ((ULONG) 0))
883                             {
884 
885 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
886 
887                                 /* There is another thread ready to run and will be scheduled upon return.  */
888                                 _tx_thread_performance_non_idle_return_count++;
889 #endif
890 
891                                 /* Preemption is needed - return to the system!  */
892                                 _tx_thread_system_return();
893                             }
894 
895                             /* Return in-line when MISRA is not enabled.  */
896                             return;
897 #endif
898                         }
899                     }
900                 }
901             }
902             else
903             {
904 
905                 /* No, there are other threads at this priority already ready.  */
906 
907                 /* Just add this thread to the priority list.  */
908                 tail_ptr =                                 head_ptr -> tx_thread_ready_previous;
909                 tail_ptr -> tx_thread_ready_next =         thread_ptr;
910                 head_ptr -> tx_thread_ready_previous =     thread_ptr;
911                 thread_ptr -> tx_thread_ready_previous =   tail_ptr;
912                 thread_ptr -> tx_thread_ready_next =       head_ptr;
913             }
914         }
915 
916         /* Else, delayed suspend flag was set.  */
917         else
918         {
919 
920             /* Clear the delayed suspend flag and change the state.  */
921             thread_ptr -> tx_thread_delayed_suspend =  TX_FALSE;
922             thread_ptr -> tx_thread_state =            TX_SUSPENDED;
923         }
924     }
925 
926 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
927 
928     /* Is the execute pointer different?  */
929     if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
930     {
931 
932         /* Move to next entry.  */
933         _tx_thread_performance__execute_log_index++;
934 
935         /* Check for wrap condition.  */
936         if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
937         {
938 
939             /* Set the index to the beginning.  */
940             _tx_thread_performance__execute_log_index =  ((UINT) 0);
941         }
942 
943         /* Log the new execute pointer.  */
944         _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] =  _tx_thread_execute_ptr;
945     }
946 #endif
947 
948 #ifdef TX_ENABLE_EVENT_TRACE
949 
950     /* Check that the event time stamp is unchanged.  A different
951        timestamp means that a later event wrote over the thread
952        resume event. In that case, do nothing here.  */
953     if (entry_ptr != TX_NULL)
954     {
955 
956         /* Does the timestamp match?  */
957         if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
958         {
959 
960             /* Timestamp is the same, set the "next thread pointer" to NULL. This can
961                be used by the trace analysis tool to show idle system conditions.  */
962 #ifdef TX_MISRA_ENABLE
963             entry_ptr -> tx_trace_buffer_entry_info_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
964 #else
965             entry_ptr -> tx_trace_buffer_entry_information_field_4 =  TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
966 #endif
967         }
968     }
969 #endif
970 
971     /* Pickup thread pointer.  */
972     TX_THREAD_GET_CURRENT(current_thread)
973 
974     /* Determine if a preemption condition is present.  */
975     if (current_thread != _tx_thread_execute_ptr)
976     {
977 
978 #ifdef TX_ENABLE_STACK_CHECKING
979 
980         /* Pickup the next execute pointer.  */
981         thread_ptr =  _tx_thread_execute_ptr;
982 
983         /* Check this thread's stack.  */
984         TX_THREAD_STACK_CHECK(thread_ptr)
985 #endif
986 
987         /* Now determine if preemption should take place. This is only possible if the current thread pointer is
988            not the same as the execute thread pointer AND the system state and preempt disable flags are clear.  */
989         TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
990         if (combined_flags == ((ULONG) 0))
991         {
992 
993 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
994 
995             /* There is another thread ready to run and will be scheduled upon return.  */
996             _tx_thread_performance_non_idle_return_count++;
997 #endif
998 
999             /* Preemption is needed - return to the system!  */
1000             _tx_thread_system_return();
1001         }
1002     }
1003 }
1004 #endif
1005