1 /**************************************************************************/
2 /* */
3 /* Copyright (c) Microsoft Corporation. All rights reserved. */
4 /* */
5 /* This software is licensed under the Microsoft Software License */
6 /* Terms for Microsoft Azure RTOS. Full text of the license can be */
7 /* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
8 /* and in the root directory of this software. */
9 /* */
10 /**************************************************************************/
11
12
13 /**************************************************************************/
14 /**************************************************************************/
15 /** */
16 /** ThreadX Component */
17 /** */
18 /** Thread */
19 /** */
20 /**************************************************************************/
21 /**************************************************************************/
22
23 #define TX_SOURCE_CODE
24
25
26 /* Include necessary system files. */
27
28 #include "tx_api.h"
29 #include "tx_trace.h"
30 #include "tx_timer.h"
31 #include "tx_thread.h"
32
33
34 /**************************************************************************/
35 /* */
36 /* FUNCTION RELEASE */
37 /* */
38 /* _tx_thread_system_suspend PORTABLE C */
39 /* 6.1 */
40 /* */
41 /* AUTHOR */
42 /* */
43 /* William E. Lamie, Microsoft Corporation */
44 /* */
45 /* DESCRIPTION */
46 /* */
47 /* This function suspends the specified thread and changes the thread */
48 /* state to the value specified. Note: delayed suspension processing */
49 /* is handled outside of this routine. */
50 /* */
51 /* INPUT */
52 /* */
53 /* thread_ptr Pointer to thread to suspend */
54 /* */
55 /* OUTPUT */
56 /* */
57 /* None */
58 /* */
59 /* CALLS */
60 /* */
61 /* _tx_thread_system_return Return to system */
62 /* _tx_thread_system_preempt_check System preemption check */
63 /* _tx_timer_system_activate Activate timer for timeout */
64 /* */
65 /* CALLED BY */
66 /* */
67 /* _tx_thread_priority_change Thread priority change */
68 /* _tx_thread_shell_entry Thread shell function */
69 /* _tx_thread_sleep Thread sleep */
70 /* _tx_thread_suspend Application thread suspend */
71 /* _tx_thread_terminate Thread terminate */
72 /* Other ThreadX Components */
73 /* */
74 /* RELEASE HISTORY */
75 /* */
76 /* DATE NAME DESCRIPTION */
77 /* */
78 /* 05-19-2020 William E. Lamie Initial Version 6.0 */
79 /* 09-30-2020 Yuxin Zhou Modified comment(s), */
80 /* resulting in version 6.1 */
81 /* */
82 /**************************************************************************/
_tx_thread_system_suspend(TX_THREAD * thread_ptr)83 VOID _tx_thread_system_suspend(TX_THREAD *thread_ptr)
84 #ifndef TX_NOT_INTERRUPTABLE
85 {
86
87 TX_INTERRUPT_SAVE_AREA
88
89 UINT priority;
90 UINT base_priority;
91 ULONG priority_map;
92 ULONG priority_bit;
93 ULONG combined_flags;
94 TX_THREAD *ready_next;
95 TX_THREAD *ready_previous;
96 TX_THREAD *current_thread;
97
98 #if TX_MAX_PRIORITIES > 32
99 UINT map_index;
100 #endif
101
102 #ifndef TX_NO_TIMER
103 ULONG timeout;
104 #endif
105
106 #ifdef TX_ENABLE_EVENT_TRACE
107 TX_TRACE_BUFFER_ENTRY *entry_ptr;
108 ULONG time_stamp = ((ULONG) 0);
109 #endif
110
111 /* Pickup thread pointer. */
112 TX_THREAD_GET_CURRENT(current_thread)
113
114 #ifdef TX_ENABLE_STACK_CHECKING
115
116 /* Check this thread's stack. */
117 TX_THREAD_STACK_CHECK(thread_ptr)
118 #endif
119
120 /* Lockout interrupts while the thread is being suspended. */
121 TX_DISABLE
122
123 #ifndef TX_NO_TIMER
124
125 /* Is the current thread suspending? */
126 if (thread_ptr == current_thread)
127 {
128
129 /* Pickup the wait option. */
130 timeout = thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks;
131
132 /* Determine if an activation is needed. */
133 if (timeout != TX_NO_WAIT)
134 {
135
136 /* Make sure the suspension is not a wait-forever. */
137 if (timeout != TX_WAIT_FOREVER)
138 {
139
140 /* Activate the thread timer with the timeout value setup in the caller. */
141 _tx_timer_system_activate(&(thread_ptr -> tx_thread_timer));
142 }
143 }
144
145 /* Yes, reset time slice for current thread. */
146 _tx_timer_time_slice = thread_ptr -> tx_thread_new_time_slice;
147 }
148 #endif
149
150 /* Decrease the preempt disabled count. */
151 _tx_thread_preempt_disable--;
152
153 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
154
155 /* Increment the thread's suspend count. */
156 thread_ptr -> tx_thread_performance_suspend_count++;
157
158 /* Increment the total number of thread suspensions. */
159 _tx_thread_performance_suspend_count++;
160 #endif
161
162 /* Check to make sure the thread suspending flag is still set. If not, it
163 has already been resumed. */
164 if (thread_ptr -> tx_thread_suspending == TX_TRUE)
165 {
166
167 /* Thread state change. */
168 TX_THREAD_STATE_CHANGE(thread_ptr, thread_ptr -> tx_thread_state)
169
170 /* Log the thread status change. */
171 TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, thread_ptr -> tx_thread_state)
172
173 #ifdef TX_ENABLE_EVENT_TRACE
174
175 /* If trace is enabled, save the current event pointer. */
176 entry_ptr = _tx_trace_buffer_current_ptr;
177 #endif
178
179 /* Log the thread status change. */
180 TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_SUSPEND, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&priority), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
181
182 #ifdef TX_ENABLE_EVENT_TRACE
183
184 /* Save the time stamp for later comparison to verify that
185 the event hasn't been overwritten by the time we have
186 computed the next thread to execute. */
187 if (entry_ptr != TX_NULL)
188 {
189
190 /* Save time stamp. */
191 time_stamp = entry_ptr -> tx_trace_buffer_entry_time_stamp;
192 }
193 #endif
194
195 /* Actually suspend this thread. But first, clear the suspending flag. */
196 thread_ptr -> tx_thread_suspending = TX_FALSE;
197
198 /* Pickup priority of thread. */
199 priority = thread_ptr -> tx_thread_priority;
200
201 /* Pickup the next ready thread pointer. */
202 ready_next = thread_ptr -> tx_thread_ready_next;
203
204 /* Determine if there are other threads at this priority that are
205 ready. */
206 if (ready_next != thread_ptr)
207 {
208
209 /* Yes, there are other threads at this priority ready. */
210
211 /* Pickup the previous ready thread pointer. */
212 ready_previous = thread_ptr -> tx_thread_ready_previous;
213
214 /* Just remove this thread from the priority list. */
215 ready_next -> tx_thread_ready_previous = ready_previous;
216 ready_previous -> tx_thread_ready_next = ready_next;
217
218 /* Determine if this is the head of the priority list. */
219 if (_tx_thread_priority_list[priority] == thread_ptr)
220 {
221
222 /* Update the head pointer of this priority list. */
223 _tx_thread_priority_list[priority] = ready_next;
224
225 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
226
227 #if TX_MAX_PRIORITIES > 32
228
229 /* Calculate the index into the bit map array. */
230 map_index = priority/((UINT) 32);
231 #endif
232
233 /* Check for a thread preempted that had preemption threshold set. */
234 if (_tx_thread_preempted_maps[MAP_INDEX] != ((ULONG) 0))
235 {
236
237 /* Ensure that this thread's priority is clear in the preempt map. */
238 TX_MOD32_BIT_SET(priority, priority_bit)
239 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
240
241 #if TX_MAX_PRIORITIES > 32
242
243 /* Determine if there are any other bits set in this preempt map. */
244 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
245 {
246
247 /* No, clear the active bit to signify this preempt map has nothing set. */
248 TX_DIV32_BIT_SET(priority, priority_bit)
249 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
250 }
251 #endif
252 }
253 #endif
254 }
255 }
256 else
257 {
258
259 /* This is the only thread at this priority ready to run. Set the head
260 pointer to NULL. */
261 _tx_thread_priority_list[priority] = TX_NULL;
262
263 #if TX_MAX_PRIORITIES > 32
264
265 /* Calculate the index into the bit map array. */
266 map_index = priority/((UINT) 32);
267 #endif
268
269 /* Clear this priority bit in the ready priority bit map. */
270 TX_MOD32_BIT_SET(priority, priority_bit)
271 _tx_thread_priority_maps[MAP_INDEX] = _tx_thread_priority_maps[MAP_INDEX] & (~(priority_bit));
272
273 #if TX_MAX_PRIORITIES > 32
274
275 /* Determine if there are any other bits set in this priority map. */
276 if (_tx_thread_priority_maps[MAP_INDEX] == ((ULONG) 0))
277 {
278
279 /* No, clear the active bit to signify this priority map has nothing set. */
280 TX_DIV32_BIT_SET(priority, priority_bit)
281 _tx_thread_priority_map_active = _tx_thread_priority_map_active & (~(priority_bit));
282 }
283 #endif
284
285 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
286
287 /* Check for a thread preempted that had preemption-threshold set. */
288 if (_tx_thread_preempted_maps[MAP_INDEX] != ((ULONG) 0))
289 {
290
291 /* Ensure that this thread's priority is clear in the preempt map. */
292 TX_MOD32_BIT_SET(priority, priority_bit)
293 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
294
295 #if TX_MAX_PRIORITIES > 32
296
297 /* Determine if there are any other bits set in this preempt map. */
298 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
299 {
300
301 /* No, clear the active bit to signify this preempted map has nothing set. */
302 TX_DIV32_BIT_SET(priority, priority_bit)
303 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
304 }
305 #endif
306 }
307 #endif
308
309 #if TX_MAX_PRIORITIES > 32
310
311 /* Calculate the index to find the next highest priority thread ready for execution. */
312 priority_map = _tx_thread_priority_map_active;
313
314 /* Determine if there is anything. */
315 if (priority_map != ((ULONG) 0))
316 {
317
318 /* Calculate the lowest bit set in the priority map. */
319 TX_LOWEST_SET_BIT_CALCULATE(priority_map, map_index)
320 }
321
322 /* Calculate the base priority as well. */
323 base_priority = map_index * ((UINT) 32);
324 #else
325
326 /* Setup the base priority to zero. */
327 base_priority = ((UINT) 0);
328 #endif
329
330 /* Setup working variable for the priority map. */
331 priority_map = _tx_thread_priority_maps[MAP_INDEX];
332
333 /* Make a quick check for no other threads ready for execution. */
334 if (priority_map == ((ULONG) 0))
335 {
336
337 /* Nothing else is ready. Set highest priority and execute thread
338 accordingly. */
339 _tx_thread_highest_priority = ((UINT) TX_MAX_PRIORITIES);
340 _tx_thread_execute_ptr = TX_NULL;
341
342 #ifndef TX_MISRA_ENABLE
343
344 #ifdef TX_ENABLE_EVENT_TRACE
345
346 /* Check that the event time stamp is unchanged. A different
347 timestamp means that a later event wrote over the thread
348 suspend event. In that case, do nothing here. */
349 if (entry_ptr != TX_NULL)
350 {
351
352 /* Is the timestamp the same? */
353 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
354 {
355
356 /* Timestamp is the same, set the "next thread pointer" to the new value of the
357 next thread to execute. This can be used by the trace analysis tool to keep
358 track of next thread execution. */
359 entry_ptr -> tx_trace_buffer_entry_information_field_4 = 0;
360 }
361 }
362 #endif
363
364 /* Restore interrupts. */
365 TX_RESTORE
366
367 /* Determine if preemption should take place. This is only possible if the current thread pointer is
368 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
369 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
370 if (combined_flags == ((ULONG) 0))
371 {
372
373 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
374
375 /* Yes, increment the return to idle return count. */
376 _tx_thread_performance_idle_return_count++;
377 #endif
378
379 /* Preemption is needed - return to the system! */
380 _tx_thread_system_return();
381 }
382
383 /* Return to caller. */
384 return;
385 #endif
386 }
387 else
388 {
389
390 /* Other threads at different priority levels are ready to run. */
391
392 /* Calculate the lowest bit set in the priority map. */
393 TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit)
394
395 /* Setup the next highest priority variable. */
396 _tx_thread_highest_priority = base_priority + ((UINT) priority_bit);
397 }
398 }
399
400 /* Determine if the suspending thread is the thread designated to execute. */
401 if (thread_ptr == _tx_thread_execute_ptr)
402 {
403
404 /* Pickup the highest priority thread to execute. */
405 _tx_thread_execute_ptr = _tx_thread_priority_list[_tx_thread_highest_priority];
406
407 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
408
409 /* Determine if a previous thread with preemption-threshold was preempted. */
410 #if TX_MAX_PRIORITIES > 32
411 if (_tx_thread_preempted_map_active != ((ULONG) 0))
412 #else
413 if (_tx_thread_preempted_maps[MAP_INDEX] != ((ULONG) 0))
414 #endif
415 {
416
417 /* Yes, there was a thread preempted when it was using preemption-threshold. */
418
419 /* Disable preemption. */
420 _tx_thread_preempt_disable++;
421
422 /* Restore interrupts. */
423 TX_RESTORE
424
425 /* Interrupts are enabled briefly here to keep the interrupt
426 lockout time deterministic. */
427
428 /* Disable interrupts again. */
429 TX_DISABLE
430
431 /* Decrement the preemption disable variable. */
432 _tx_thread_preempt_disable--;
433
434 /* Calculate the thread with preemption threshold set that
435 was interrupted by a thread above the preemption level. */
436
437 #if TX_MAX_PRIORITIES > 32
438
439 /* Calculate the index to find the next highest priority thread ready for execution. */
440 priority_map = _tx_thread_preempted_map_active;
441
442 /* Calculate the lowest bit set in the priority map. */
443 TX_LOWEST_SET_BIT_CALCULATE(priority_map, map_index)
444
445 /* Calculate the base priority as well. */
446 base_priority = map_index * ((UINT) 32);
447 #else
448
449 /* Setup the base priority to zero. */
450 base_priority = ((UINT) 0);
451 #endif
452
453 /* Setup temporary preempted map. */
454 priority_map = _tx_thread_preempted_maps[MAP_INDEX];
455
456 /* Calculate the lowest bit set in the priority map. */
457 TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit)
458
459 /* Setup the highest priority preempted thread. */
460 priority = base_priority + ((UINT) priority_bit);
461
462 /* Determine if the next highest priority thread is above the highest priority threshold value. */
463 if (_tx_thread_highest_priority >= (_tx_thread_priority_list[priority] -> tx_thread_preempt_threshold))
464 {
465
466 /* Thread not allowed to execute until earlier preempted thread finishes or lowers its
467 preemption-threshold. */
468 _tx_thread_execute_ptr = _tx_thread_priority_list[priority];
469
470 /* Clear the corresponding bit in the preempted map, since the preemption has been restored. */
471 TX_MOD32_BIT_SET(priority, priority_bit)
472 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
473
474 #if TX_MAX_PRIORITIES > 32
475
476 /* Determine if there are any other bits set in this preempt map. */
477 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
478 {
479
480 /* No, clear the active bit to signify this preempt map has nothing set. */
481 TX_DIV32_BIT_SET(priority, priority_bit)
482 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
483 }
484 #endif
485 }
486 }
487 #endif
488
489 #ifndef TX_MISRA_ENABLE
490
491 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
492
493 /* Is the execute pointer different? */
494 if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
495 {
496
497 /* Move to next entry. */
498 _tx_thread_performance__execute_log_index++;
499
500 /* Check for wrap condition. */
501 if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
502 {
503
504 /* Set the index to the beginning. */
505 _tx_thread_performance__execute_log_index = ((UINT) 0);
506 }
507
508 /* Log the new execute pointer. */
509 _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
510 }
511 #endif
512
513 #ifdef TX_ENABLE_EVENT_TRACE
514
515 /* Check that the event time stamp is unchanged. A different
516 timestamp means that a later event wrote over the thread
517 suspend event. In that case, do nothing here. */
518 if (entry_ptr != TX_NULL)
519 {
520
521 /* Is the timestamp the same? */
522 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
523 {
524
525 /* Timestamp is the same, set the "next thread pointer" to the new value of the
526 next thread to execute. This can be used by the trace analysis tool to keep
527 track of next thread execution. */
528 entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
529 }
530 }
531 #endif
532
533 /* Restore interrupts. */
534 TX_RESTORE
535
536 /* Determine if preemption should take place. This is only possible if the current thread pointer is
537 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
538 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
539 if (combined_flags == ((ULONG) 0))
540 {
541
542 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
543
544 /* No, there is another thread ready to run and will be scheduled upon return. */
545 _tx_thread_performance_non_idle_return_count++;
546 #endif
547
548 /* Preemption is needed - return to the system! */
549 _tx_thread_system_return();
550 }
551
552 /* Return to caller. */
553 return;
554 #endif
555 }
556
557 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
558
559 /* Is the execute pointer different? */
560 if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
561 {
562
563 /* Move to next entry. */
564 _tx_thread_performance__execute_log_index++;
565
566 /* Check for wrap condition. */
567 if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
568 {
569
570 /* Set the index to the beginning. */
571 _tx_thread_performance__execute_log_index = ((UINT) 0);
572 }
573
574 /* Log the new execute pointer. */
575 _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
576 }
577 #endif
578
579 #ifdef TX_ENABLE_EVENT_TRACE
580
581 /* Check that the event time stamp is unchanged. A different
582 timestamp means that a later event wrote over the thread
583 suspend event. In that case, do nothing here. */
584 if (entry_ptr != TX_NULL)
585 {
586
587 /* Is the timestamp the same? */
588 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
589 {
590
591 /* Timestamp is the same, set the "next thread pointer" to the new value of the
592 next thread to execute. This can be used by the trace analysis tool to keep
593 track of next thread execution. */
594 #ifdef TX_MISRA_ENABLE
595 entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
596 #else
597 entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
598 #endif
599 }
600 }
601 #endif
602 }
603
604 /* Restore interrupts. */
605 TX_RESTORE
606
607 /* Determine if a preemption condition is present. */
608 if (current_thread != _tx_thread_execute_ptr)
609 {
610
611 #ifdef TX_ENABLE_STACK_CHECKING
612
613 /* Pickup the next execute pointer. */
614 thread_ptr = _tx_thread_execute_ptr;
615
616 /* Check this thread's stack. */
617 TX_THREAD_STACK_CHECK(thread_ptr)
618 #endif
619
620 /* Determine if preemption should take place. This is only possible if the current thread pointer is
621 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
622 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
623 if (combined_flags == ((ULONG) 0))
624 {
625
626 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
627
628 /* Determine if an idle system return is present. */
629 if (_tx_thread_execute_ptr == TX_NULL)
630 {
631
632 /* Yes, increment the return to idle return count. */
633 _tx_thread_performance_idle_return_count++;
634 }
635 else
636 {
637
638 /* No, there is another thread ready to run and will be scheduled upon return. */
639 _tx_thread_performance_non_idle_return_count++;
640 }
641 #endif
642
643 /* Preemption is needed - return to the system! */
644 _tx_thread_system_return();
645 }
646 }
647
648 /* Return to caller. */
649 return;
650 }
651 #else
652 /* Define the entry function for modules assuming the interruptable version of system suspend. */
653 {
654
655 TX_INTERRUPT_SAVE_AREA
656
657 ULONG wait_option;
658
659 /* Disable interrupts. */
660 TX_DISABLE
661
662 /* Determine if the thread is still suspending. */
663 if (thread_ptr -> tx_thread_suspending == TX_TRUE)
664 {
665
666 /* Yes, prepare to call the non-interruptable system suspend function. */
667
668 /* Clear the thread suspending flag. */
669 thread_ptr -> tx_thread_suspending = TX_FALSE;
670
671 /* Pickup the wait option. */
672 wait_option = thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks;
673
674 /* Decrement the preempt disable count. */
675 _tx_thread_preempt_disable--;
676
677 /* Call actual non-interruptable thread suspension routine. */
678 _tx_thread_system_ni_suspend(thread_ptr, wait_option);
679 }
680
681 /* Restore interrupts. */
682 TX_RESTORE
683
684 /* Check for preemption. */
685 _tx_thread_system_preempt_check();
686 }
687
688 /* Define the system suspend function that is not interruptable, i.e., it is assumed that
689 interrupts are disabled upon calling this function. */
690
691 VOID _tx_thread_system_ni_suspend(TX_THREAD *thread_ptr, ULONG wait_option)
692 {
693
694 UINT priority;
695 UINT base_priority;
696 ULONG priority_map;
697 ULONG priority_bit;
698 ULONG combined_flags;
699 TX_THREAD *ready_next;
700 TX_THREAD *ready_previous;
701 TX_THREAD *current_thread;
702
703 #if TX_MAX_PRIORITIES > 32
704 UINT map_index;
705 #endif
706
707 #ifdef TX_ENABLE_EVENT_TRACE
708 TX_TRACE_BUFFER_ENTRY *entry_ptr;
709 ULONG time_stamp = ((ULONG) 0);
710 #endif
711
712
713 /* Pickup thread pointer. */
714 TX_THREAD_GET_CURRENT(current_thread)
715
716 #ifndef TX_NO_TIMER
717
718
719 /* Determine if a timeout needs to be activated. */
720 if (thread_ptr == current_thread)
721 {
722
723 /* Is there a wait option? */
724 if (wait_option != TX_NO_WAIT)
725 {
726
727 /* Make sure it is not a wait-forever option. */
728 if (wait_option != TX_WAIT_FOREVER)
729 {
730
731 /* Setup the wait option. */
732 thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks = wait_option;
733
734 /* Activate the thread timer with the timeout value setup in the caller. */
735 _tx_timer_system_activate(&(thread_ptr -> tx_thread_timer));
736 }
737 }
738
739 /* Reset time slice for current thread. */
740 _tx_timer_time_slice = thread_ptr -> tx_thread_new_time_slice;
741 }
742 #endif
743
744 #ifdef TX_ENABLE_STACK_CHECKING
745
746 /* Check this thread's stack. */
747 TX_THREAD_STACK_CHECK(thread_ptr)
748 #endif
749
750 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
751
752 /* Increment the thread's suspend count. */
753 thread_ptr -> tx_thread_performance_suspend_count++;
754
755 /* Increment the total number of thread suspensions. */
756 _tx_thread_performance_suspend_count++;
757 #endif
758
759 /* Thread state change. */
760 TX_THREAD_STATE_CHANGE(thread_ptr, thread_ptr -> tx_thread_state)
761
762 /* Log the thread status change. */
763 TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, thread_ptr -> tx_thread_state)
764
765 #ifdef TX_ENABLE_EVENT_TRACE
766
767 /* If trace is enabled, save the current event pointer. */
768 entry_ptr = _tx_trace_buffer_current_ptr;
769 #endif
770
771 /* Log the thread status change. */
772 TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_SUSPEND, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&priority), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
773
774 #ifdef TX_ENABLE_EVENT_TRACE
775
776 /* Save the time stamp for later comparison to verify that
777 the event hasn't been overwritten by the time we have
778 computed the next thread to execute. */
779 if (entry_ptr != TX_NULL)
780 {
781
782 /* Save time stamp. */
783 time_stamp = entry_ptr -> tx_trace_buffer_entry_time_stamp;
784 }
785 #endif
786
787 /* Pickup priority of thread. */
788 priority = thread_ptr -> tx_thread_priority;
789
790 /* Pickup the next ready thread pointer. */
791 ready_next = thread_ptr -> tx_thread_ready_next;
792
793 /* Determine if there are other threads at this priority that are
794 ready. */
795 if (ready_next != thread_ptr)
796 {
797
798 /* Yes, there are other threads at this priority ready. */
799
800 /* Pickup the previous ready thread pointer. */
801 ready_previous = thread_ptr -> tx_thread_ready_previous;
802
803 /* Just remove this thread from the priority list. */
804 ready_next -> tx_thread_ready_previous = ready_previous;
805 ready_previous -> tx_thread_ready_next = ready_next;
806
807 /* Determine if this is the head of the priority list. */
808 if (_tx_thread_priority_list[priority] == thread_ptr)
809 {
810
811 /* Update the head pointer of this priority list. */
812 _tx_thread_priority_list[priority] = ready_next;
813
814 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
815
816 #if TX_MAX_PRIORITIES > 32
817
818 /* Calculate the index into the bit map array. */
819 map_index = priority/((UINT) 32);
820 #endif
821
822 /* Check for a thread preempted that had preemption threshold set. */
823 if (_tx_thread_preempted_maps[MAP_INDEX] != ((ULONG) 0))
824 {
825
826 /* Ensure that this thread's priority is clear in the preempt map. */
827 TX_MOD32_BIT_SET(priority, priority_bit)
828 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
829
830 #if TX_MAX_PRIORITIES > 32
831
832 /* Determine if there are any other bits set in this preempt map. */
833 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
834 {
835
836 /* No, clear the active bit to signify this preempt map has nothing set. */
837 TX_DIV32_BIT_SET(priority, priority_bit)
838 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
839 }
840 #endif
841 }
842 #endif
843 }
844 }
845 else
846 {
847
848 /* This is the only thread at this priority ready to run. Set the head
849 pointer to NULL. */
850 _tx_thread_priority_list[priority] = TX_NULL;
851
852 #if TX_MAX_PRIORITIES > 32
853
854 /* Calculate the index into the bit map array. */
855 map_index = priority/((UINT) 32);
856 #endif
857
858 /* Clear this priority bit in the ready priority bit map. */
859 TX_MOD32_BIT_SET(priority, priority_bit)
860 _tx_thread_priority_maps[MAP_INDEX] = _tx_thread_priority_maps[MAP_INDEX] & (~(priority_bit));
861
862 #if TX_MAX_PRIORITIES > 32
863
864 /* Determine if there are any other bits set in this priority map. */
865 if (_tx_thread_priority_maps[MAP_INDEX] == ((ULONG) 0))
866 {
867
868 /* No, clear the active bit to signify this priority map has nothing set. */
869 TX_DIV32_BIT_SET(priority, priority_bit)
870 _tx_thread_priority_map_active = _tx_thread_priority_map_active & (~(priority_bit));
871 }
872 #endif
873
874 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
875
876 /* Check for a thread preempted that had preemption-threshold set. */
877 if (_tx_thread_preempted_maps[MAP_INDEX] != ((ULONG) 0))
878 {
879
880 /* Ensure that this thread's priority is clear in the preempt map. */
881 TX_MOD32_BIT_SET(priority, priority_bit)
882 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
883
884 #if TX_MAX_PRIORITIES > 32
885
886 /* Determine if there are any other bits set in this preempt map. */
887 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
888 {
889
890 /* No, clear the active bit to signify this preempted map has nothing set. */
891 TX_DIV32_BIT_SET(priority, priority_bit)
892 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
893 }
894 #endif
895 }
896 #endif
897
898 #if TX_MAX_PRIORITIES > 32
899
900 /* Calculate the index to find the next highest priority thread ready for execution. */
901 priority_map = _tx_thread_priority_map_active;
902
903 /* Determine if there is anything. */
904 if (priority_map != ((ULONG) 0))
905 {
906
907 /* Calculate the lowest bit set in the priority map. */
908 TX_LOWEST_SET_BIT_CALCULATE(priority_map, map_index)
909 }
910
911 /* Calculate the base priority as well. */
912 base_priority = map_index * ((UINT) 32);
913 #else
914
915 /* Setup the base priority to zero. */
916 base_priority = ((UINT) 0);
917 #endif
918
919 /* Setup working variable for the priority map. */
920 priority_map = _tx_thread_priority_maps[MAP_INDEX];
921
922 /* Make a quick check for no other threads ready for execution. */
923 if (priority_map == ((ULONG) 0))
924 {
925
926 /* Nothing else is ready. Set highest priority and execute thread
927 accordingly. */
928 _tx_thread_highest_priority = ((UINT) TX_MAX_PRIORITIES);
929 _tx_thread_execute_ptr = TX_NULL;
930
931 #ifndef TX_MISRA_ENABLE
932
933 #ifdef TX_ENABLE_EVENT_TRACE
934
935 /* Check that the event time stamp is unchanged. A different
936 timestamp means that a later event wrote over the thread
937 suspend event. In that case, do nothing here. */
938 if (entry_ptr != TX_NULL)
939 {
940
941 /* Is the timestamp the same? */
942 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
943 {
944
945 /* Timestamp is the same, set the "next thread pointer" to the new value of the
946 next thread to execute. This can be used by the trace analysis tool to keep
947 track of next thread execution. */
948 entry_ptr -> tx_trace_buffer_entry_information_field_4 = 0;
949 }
950 }
951 #endif
952
953 /* Determine if preemption should take place. This is only possible if the current thread pointer is
954 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
955 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
956 if (combined_flags == ((ULONG) 0))
957 {
958
959 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
960
961 /* Yes, increment the return to idle return count. */
962 _tx_thread_performance_idle_return_count++;
963 #endif
964
965 /* Preemption is needed - return to the system! */
966 _tx_thread_system_return();
967 }
968
969 /* Return to caller. */
970 return;
971 #endif
972 }
973 else
974 {
975
976 /* Calculate the lowest bit set in the priority map. */
977 TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit)
978
979 /* Setup the next highest priority variable. */
980 _tx_thread_highest_priority = base_priority + ((UINT) priority_bit);
981 }
982 }
983
984 /* Determine if the suspending thread is the thread designated to execute. */
985 if (thread_ptr == _tx_thread_execute_ptr)
986 {
987
988 /* Pickup the highest priority thread to execute. */
989 _tx_thread_execute_ptr = _tx_thread_priority_list[_tx_thread_highest_priority];
990
991 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
992
993 /* Determine if a previous thread with preemption-threshold was preempted. */
994 #if TX_MAX_PRIORITIES > 32
995 if (_tx_thread_preempted_map_active != ((ULONG) 0))
996 #else
997 if (_tx_thread_preempted_maps[MAP_INDEX] != ((ULONG) 0))
998 #endif
999 {
1000
1001 /* Yes, there was a thread preempted when it was using preemption-threshold. */
1002
1003 /* Disable preemption. */
1004 _tx_thread_preempt_disable++;
1005
1006 /* Decrement the preemption disable variable. */
1007 _tx_thread_preempt_disable--;
1008
1009 /* Calculate the thread with preemption threshold set that
1010 was interrupted by a thread above the preemption level. */
1011
1012 #if TX_MAX_PRIORITIES > 32
1013
1014 /* Calculate the index to find the next highest priority thread ready for execution. */
1015 priority_map = _tx_thread_preempted_map_active;
1016
1017 /* Calculate the lowest bit set in the priority map. */
1018 TX_LOWEST_SET_BIT_CALCULATE(priority_map, map_index)
1019
1020 /* Calculate the base priority as well. */
1021 base_priority = map_index * ((UINT) 32);
1022 #else
1023
1024 /* Setup the base priority to zero. */
1025 base_priority = ((UINT) 0);
1026 #endif
1027
1028 /* Setup temporary preempted map. */
1029 priority_map = _tx_thread_preempted_maps[MAP_INDEX];
1030
1031 /* Calculate the lowest bit set in the priority map. */
1032 TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit)
1033
1034 /* Setup the highest priority preempted thread. */
1035 priority = base_priority + ((UINT) priority_bit);
1036
1037 /* Determine if the next highest priority thread is above the highest priority threshold value. */
1038 if (_tx_thread_highest_priority >= (_tx_thread_priority_list[priority] -> tx_thread_preempt_threshold))
1039 {
1040
1041 /* Thread not allowed to execute until earlier preempted thread finishes or lowers its
1042 preemption-threshold. */
1043 _tx_thread_execute_ptr = _tx_thread_priority_list[priority];
1044
1045 /* Clear the corresponding bit in the preempted map, since the preemption has been restored. */
1046 TX_MOD32_BIT_SET(priority, priority_bit)
1047 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
1048
1049 #if TX_MAX_PRIORITIES > 32
1050
1051 /* Determine if there are any other bits set in this preempt map. */
1052 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
1053 {
1054
1055 /* No, clear the active bit to signify this preempt map has nothing set. */
1056 TX_DIV32_BIT_SET(priority, priority_bit)
1057 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
1058 }
1059 #endif
1060 }
1061 }
1062 #endif
1063
1064 #ifndef TX_MISRA_ENABLE
1065
1066 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
1067
1068 /* Is the execute pointer different? */
1069 if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
1070 {
1071
1072 /* Move to next entry. */
1073 _tx_thread_performance__execute_log_index++;
1074
1075 /* Check for wrap condition. */
1076 if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
1077 {
1078
1079 /* Set the index to the beginning. */
1080 _tx_thread_performance__execute_log_index = ((UINT) 0);
1081 }
1082
1083 /* Log the new execute pointer. */
1084 _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
1085 }
1086 #endif
1087
1088 #ifdef TX_ENABLE_EVENT_TRACE
1089
1090 /* Check that the event time stamp is unchanged. A different
1091 timestamp means that a later event wrote over the thread
1092 suspend event. In that case, do nothing here. */
1093 if (entry_ptr != TX_NULL)
1094 {
1095
1096 /* Is the timestamp the same? */
1097 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
1098 {
1099
1100 /* Timestamp is the same, set the "next thread pointer" to the new value of the
1101 next thread to execute. This can be used by the trace analysis tool to keep
1102 track of next thread execution. */
1103 entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
1104 }
1105 }
1106 #endif
1107
1108 /* Determine if preemption should take place. This is only possible if the current thread pointer is
1109 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
1110 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
1111 if (combined_flags == ((ULONG) 0))
1112 {
1113
1114 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
1115
1116 /* No, there is another thread ready to run and will be scheduled upon return. */
1117 _tx_thread_performance_non_idle_return_count++;
1118 #endif
1119
1120 /* Preemption is needed - return to the system! */
1121 _tx_thread_system_return();
1122 }
1123
1124 /* Return to caller. */
1125 return;
1126 #endif
1127 }
1128
1129 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
1130
1131 /* Is the execute pointer different? */
1132 if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
1133 {
1134
1135 /* Move to next entry. */
1136 _tx_thread_performance__execute_log_index++;
1137
1138 /* Check for wrap condition. */
1139 if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
1140 {
1141
1142 /* Set the index to the beginning. */
1143 _tx_thread_performance__execute_log_index = ((UINT) 0);
1144 }
1145
1146 /* Log the new execute pointer. */
1147 _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
1148 }
1149 #endif
1150
1151 #ifdef TX_ENABLE_EVENT_TRACE
1152
1153 /* Check that the event time stamp is unchanged. A different
1154 timestamp means that a later event wrote over the thread
1155 suspend event. In that case, do nothing here. */
1156 if (entry_ptr != TX_NULL)
1157 {
1158
1159 /* Is the timestamp the same? */
1160 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
1161 {
1162
1163 /* Timestamp is the same, set the "next thread pointer" to the new value of the
1164 next thread to execute. This can be used by the trace analysis tool to keep
1165 track of next thread execution. */
1166 #ifdef TX_MISRA_ENABLE
1167 entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
1168 #else
1169 entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
1170 #endif
1171 }
1172 }
1173 #endif
1174
1175 /* Determine if a preemption condition is present. */
1176 if (current_thread != _tx_thread_execute_ptr)
1177 {
1178
1179 #ifdef TX_ENABLE_STACK_CHECKING
1180
1181 /* Pickup the next execute pointer. */
1182 thread_ptr = _tx_thread_execute_ptr;
1183
1184 /* Check this thread's stack. */
1185 TX_THREAD_STACK_CHECK(thread_ptr)
1186 #endif
1187
1188 /* Determine if preemption should take place. This is only possible if the current thread pointer is
1189 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
1190 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
1191 if (combined_flags == ((ULONG) 0))
1192 {
1193
1194 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
1195
1196 /* Determine if an idle system return is present. */
1197 if (_tx_thread_execute_ptr == TX_NULL)
1198 {
1199
1200 /* Yes, increment the return to idle return count. */
1201 _tx_thread_performance_idle_return_count++;
1202 }
1203 else
1204 {
1205
1206 /* No, there is another thread ready to run and will be scheduled upon return. */
1207 _tx_thread_performance_non_idle_return_count++;
1208 }
1209 #endif
1210
1211 /* Preemption is needed - return to the system! */
1212 _tx_thread_system_return();
1213 }
1214 }
1215
1216 /* Return to caller. */
1217 return;
1218 }
1219 #endif
1220
1221