1 /**************************************************************************/
2 /* */
3 /* Copyright (c) Microsoft Corporation. All rights reserved. */
4 /* */
5 /* This software is licensed under the Microsoft Software License */
6 /* Terms for Microsoft Azure RTOS. Full text of the license can be */
7 /* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
8 /* and in the root directory of this software. */
9 /* */
10 /**************************************************************************/
11
12
13 /**************************************************************************/
14 /**************************************************************************/
15 /** */
16 /** ThreadX Component */
17 /** */
18 /** Thread */
19 /** */
20 /**************************************************************************/
21 /**************************************************************************/
22 #define TX_SOURCE_CODE
23
24 /* Include necessary system files. */
25 #include "tx_api.h"
26 #include "tx_trace.h"
27 #include "tx_thread.h"
28 #ifdef TX_INLINE_THREAD_RESUME_SUSPEND
29 #ifndef TX_NO_TIMER
30 #include "tx_timer.h"
31 #endif
32 #endif
33 /**************************************************************************/
34 /* */
35 /* FUNCTION RELEASE */
36 /* */
37 /* _tx_thread_suspend PORTABLE C */
38 /* 6.1.1 */
39 /* AUTHOR */
40 /* */
41 /* William E. Lamie, Microsoft Corporation */
42 /* */
43 /* DESCRIPTION */
44 /* */
45 /* This function handles application suspend requests. If the suspend */
46 /* requires actual processing, this function calls the actual suspend */
47 /* thread routine. */
48 /* */
49 /* INPUT */
50 /* */
51 /* thread_ptr Pointer to thread to suspend */
52 /* */
53 /* OUTPUT */
54 /* */
55 /* status Return completion status */
56 /* */
57 /* CALLS */
58 /* */
59 /* _tx_thread_system_suspend Actual thread suspension */
60 /* _tx_thread_system_ni_suspend Non-interruptable suspend thread */
61 /* */
62 /* CALLED BY */
63 /* */
64 /* Application code */
65 /* */
66 /* RELEASE HISTORY */
67 /* */
68 /* DATE NAME DESCRIPTION */
69 /* */
70 /* 05-19-2020 William E. Lamie Initial Version 6.0 */
71 /* 09-30-2020 Yuxin Zhou Modified comment(s), */
72 /* resulting in version 6.1 */
73 /* 10-16-2020 Yuxin Zhou Modified comment(s), and */
74 /* added type cast to address */
75 /* a MISRA compliance issue, */
76 /* resulting in version 6.1.1 */
77 /* */
78 /**************************************************************************/
_tx_thread_suspend(TX_THREAD * thread_ptr)79 UINT _tx_thread_suspend(TX_THREAD *thread_ptr)
80 {
81
82 TX_INTERRUPT_SAVE_AREA
83
84 TX_THREAD *current_thread;
85 UINT status;
86
87
88 #ifndef TX_INLINE_THREAD_RESUME_SUSPEND
89
90 /* Lockout interrupts while the thread is being suspended. */
91 TX_DISABLE
92
93 /* Pickup thread pointer. */
94 TX_THREAD_GET_CURRENT(current_thread)
95
96 /* If trace is enabled, insert this event into the trace buffer. */
97 TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_SUSPEND_API, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&status), 0, TX_TRACE_THREAD_EVENTS)
98
99 /* Log this kernel call. */
100 TX_EL_THREAD_SUSPEND_INSERT
101
102 /* Check the specified thread's current status. */
103 if (thread_ptr -> tx_thread_state == TX_READY)
104 {
105
106 /* Initialize status to success. */
107 status = TX_SUCCESS;
108
109 /* Determine if we are in a thread context. */
110 if (TX_THREAD_GET_SYSTEM_STATE() == ((ULONG) 0))
111 {
112
113 /* Yes, we are in a thread context. */
114
115 /* Determine if the current thread is also the suspending thread. */
116 if (current_thread == thread_ptr)
117 {
118
119 /* Now determine if the preempt disable flag is non-zero. */
120 if (_tx_thread_preempt_disable != ((UINT) 0))
121 {
122
123 /* Current thread cannot suspend when the preempt disable flag is non-zero,
124 return an error. */
125 status = TX_SUSPEND_ERROR;
126 }
127 }
128 }
129
130 /* Determine if the status is still successful. */
131 if (status == TX_SUCCESS)
132 {
133
134 /* Set the state to suspended. */
135 thread_ptr -> tx_thread_state = TX_SUSPENDED;
136
137 #ifdef TX_NOT_INTERRUPTABLE
138
139 /* Call actual non-interruptable thread suspension routine. */
140 _tx_thread_system_ni_suspend(thread_ptr, ((ULONG) 0));
141
142 /* Restore interrupts. */
143 TX_RESTORE
144 #else
145
146 /* Set the suspending flag. */
147 thread_ptr -> tx_thread_suspending = TX_TRUE;
148
149 /* Setup for no timeout period. */
150 thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks = ((ULONG) 0);
151
152 /* Temporarily disable preemption. */
153 _tx_thread_preempt_disable++;
154
155 /* Restore interrupts. */
156 TX_RESTORE
157
158 /* Call actual thread suspension routine. */
159 _tx_thread_system_suspend(thread_ptr);
160 #endif
161
162 #ifdef TX_MISRA_ENABLE
163
164 /* Disable interrupts. */
165 TX_DISABLE
166
167 /* Return success. */
168 status = TX_SUCCESS;
169 #else
170
171 /* If MISRA is not enabled, return directly. */
172 return(TX_SUCCESS);
173 #endif
174 }
175 }
176 else if (thread_ptr -> tx_thread_state == TX_TERMINATED)
177 {
178
179 /* Thread is terminated. */
180 status = TX_SUSPEND_ERROR;
181 }
182 else if (thread_ptr -> tx_thread_state == TX_COMPLETED)
183 {
184
185 /* Thread is completed. */
186 status = TX_SUSPEND_ERROR;
187 }
188 else if (thread_ptr -> tx_thread_state == TX_SUSPENDED)
189 {
190
191 /* Already suspended, just set status to success. */
192 status = TX_SUCCESS;
193 }
194 else
195 {
196
197 /* Just set the delayed suspension flag. */
198 thread_ptr -> tx_thread_delayed_suspend = TX_TRUE;
199
200 /* Set status to success. */
201 status = TX_SUCCESS;
202 }
203
204 /* Restore interrupts. */
205 TX_RESTORE
206
207 /* Always return success, since this function does not perform error
208 checking. */
209 return(status);
210
211 #else
212
213 /* In-line thread suspension processing follows, which is effectively just taking the
214 logic in tx_thread_system_suspend.c and placing it here! */
215
216 UINT priority;
217 UINT base_priority;
218 ULONG priority_map;
219 ULONG priority_bit;
220 ULONG combined_flags;
221 TX_THREAD *ready_next;
222 TX_THREAD *ready_previous;
223
224 #if TX_MAX_PRIORITIES > 32
225 UINT map_index;
226 #endif
227
228 #ifdef TX_ENABLE_EVENT_TRACE
229 TX_TRACE_BUFFER_ENTRY *entry_ptr;
230 ULONG time_stamp = ((ULONG) 0);
231 #endif
232
233
234 /* Pickup thread pointer. */
235 TX_THREAD_GET_CURRENT(current_thread)
236
237 #ifdef TX_ENABLE_STACK_CHECKING
238
239 /* Check this thread's stack. */
240 TX_THREAD_STACK_CHECK(thread_ptr)
241 #endif
242
243 /* Lockout interrupts while the thread is being suspended. */
244 TX_DISABLE
245
246 #ifndef TX_NO_TIMER
247
248 /* Determine if this is the current thread. */
249 if (thread_ptr == current_thread)
250 {
251
252 /* Yes, current thread is suspending - reset time slice for current thread. */
253 _tx_timer_time_slice = thread_ptr -> tx_thread_new_time_slice;
254 }
255 #endif
256
257 /* If trace is enabled, insert this event into the trace buffer. */
258 TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_SUSPEND_API, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&status), 0, TX_TRACE_THREAD_EVENTS)
259
260 /* Log this kernel call. */
261 TX_EL_THREAD_SUSPEND_INSERT
262
263 /* Check the specified thread's current status. */
264 if (thread_ptr -> tx_thread_state == TX_READY)
265 {
266
267 /* Initialize status to success. */
268 status = TX_SUCCESS;
269
270 /* Determine if we are in a thread context. */
271 if (TX_THREAD_GET_SYSTEM_STATE() == ((ULONG) 0))
272 {
273
274 /* Yes, we are in a thread context. */
275
276 /* Determine if the current thread is also the suspending thread. */
277 if (current_thread == thread_ptr)
278 {
279
280 /* Now determine if the preempt disable flag is non-zero. */
281 if (_tx_thread_preempt_disable != ((UINT) 0))
282 {
283
284 /* Current thread cannot suspend when the preempt disable flag is non-zero,
285 return an error. */
286 status = TX_SUSPEND_ERROR;
287 }
288 }
289 }
290
291 /* Determine if the status is still successful. */
292 if (status == TX_SUCCESS)
293 {
294
295 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
296
297 /* Increment the thread's suspend count. */
298 thread_ptr -> tx_thread_performance_suspend_count++;
299
300 /* Increment the total number of thread suspensions. */
301 _tx_thread_performance_suspend_count++;
302 #endif
303
304 /* Set the state to suspended. */
305 thread_ptr -> tx_thread_state = TX_SUSPENDED;
306
307 /* Thread state change. */
308 TX_THREAD_STATE_CHANGE(thread_ptr, TX_SUSPENDED)
309
310 /* Log the thread status change. */
311 TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, thread_ptr -> tx_thread_state)
312
313 #ifdef TX_ENABLE_EVENT_TRACE
314
315 /* If trace is enabled, save the current event pointer. */
316 entry_ptr = _tx_trace_buffer_current_ptr;
317 #endif
318
319 /* Log the thread status change. */
320 TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_SUSPEND, thread_ptr, ((ULONG) thread_ptr -> tx_thread_state), TX_POINTER_TO_ULONG_CONVERT(&priority), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
321
322 #ifdef TX_ENABLE_EVENT_TRACE
323
324 /* Save the time stamp for later comparison to verify that
325 the event hasn't been overwritten by the time we have
326 computed the next thread to execute. */
327 if (entry_ptr != TX_NULL)
328 {
329
330 /* Save time stamp. */
331 time_stamp = entry_ptr -> tx_trace_buffer_entry_time_stamp;
332 }
333 #endif
334
335 /* Pickup priority of thread. */
336 priority = thread_ptr -> tx_thread_priority;
337
338 /* Pickup the previous and next ready thread pointers. */
339 ready_next = thread_ptr -> tx_thread_ready_next;
340 ready_previous = thread_ptr -> tx_thread_ready_previous;
341
342 /* Determine if there are other threads at this priority that are
343 ready. */
344 if (ready_next != thread_ptr)
345 {
346
347 /* Yes, there are other threads at this priority ready. */
348
349 /* Just remove this thread from the priority list. */
350 ready_next -> tx_thread_ready_previous = ready_previous;
351 ready_previous -> tx_thread_ready_next = ready_next;
352
353 /* Determine if this is the head of the priority list. */
354 if (_tx_thread_priority_list[priority] == thread_ptr)
355 {
356
357 /* Update the head pointer of this priority list. */
358 _tx_thread_priority_list[priority] = ready_next;
359
360 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
361
362 #if TX_MAX_PRIORITIES > 32
363
364 /* Calculate the index into the bit map array. */
365 map_index = priority/((UINT) 32);
366 #endif
367
368 /* Check for a thread preempted that had preemption threshold set. */
369 if (_tx_thread_preempted_maps[MAP_INDEX] != ((ULONG) 0))
370 {
371
372 /* Ensure that this thread's priority is clear in the preempt map. */
373 TX_MOD32_BIT_SET(priority, priority_bit)
374 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
375
376 #if TX_MAX_PRIORITIES > 32
377
378 /* Determine if there are any other bits set in this preempt map. */
379 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
380 {
381
382 /* No, clear the active bit to signify this preempt map has nothing set. */
383 TX_DIV32_BIT_SET(priority, priority_bit)
384 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
385 }
386 #endif
387 }
388 #endif
389 }
390 }
391 else
392 {
393
394 /* This is the only thread at this priority ready to run. Set the head
395 pointer to NULL. */
396 _tx_thread_priority_list[priority] = TX_NULL;
397
398 #if TX_MAX_PRIORITIES > 32
399
400 /* Calculate the index into the bit map array. */
401 map_index = priority/((UINT) 32);
402 #endif
403
404 /* Clear this priority bit in the ready priority bit map. */
405 TX_MOD32_BIT_SET(priority, priority_bit)
406 _tx_thread_priority_maps[MAP_INDEX] = _tx_thread_priority_maps[MAP_INDEX] & (~(priority_bit));
407
408 #if TX_MAX_PRIORITIES > 32
409
410 /* Determine if there are any other bits set in this priority map. */
411 if (_tx_thread_priority_maps[MAP_INDEX] == ((ULONG) 0))
412 {
413
414 /* No, clear the active bit to signify this priority map has nothing set. */
415 TX_DIV32_BIT_SET(priority, priority_bit)
416 _tx_thread_priority_map_active = _tx_thread_priority_map_active & (~(priority_bit));
417 }
418 #endif
419
420 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
421
422 /* Check for a thread preempted that had preemption-threshold set. */
423 if (_tx_thread_preempted_maps[MAP_INDEX] != ((ULONG) 0))
424 {
425
426 /* Ensure that this thread's priority is clear in the preempt map. */
427 TX_MOD32_BIT_SET(priority, priority_bit)
428 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
429
430 #if TX_MAX_PRIORITIES > 32
431
432 /* Determine if there are any other bits set in this preempt map. */
433 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
434 {
435
436 /* No, clear the active bit to signify this preempted map has nothing set. */
437 TX_DIV32_BIT_SET(priority, priority_bit)
438 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
439 }
440 #endif
441 }
442 #endif
443
444 #if TX_MAX_PRIORITIES > 32
445
446 /* Calculate the index to find the next highest priority thread ready for execution. */
447 priority_map = _tx_thread_priority_map_active;
448
449 /* Determine if there is anything. */
450 if (priority_map != ((ULONG) 0))
451 {
452
453 /* Calculate the lowest bit set in the priority map. */
454 TX_LOWEST_SET_BIT_CALCULATE(priority_map, map_index)
455 }
456
457 /* Calculate the base priority as well. */
458 base_priority = map_index * ((UINT) 32);
459 #else
460
461 /* Setup the base priority to zero. */
462 base_priority = ((UINT) 0);
463 #endif
464
465 /* Setup working variable for the priority map. */
466 priority_map = _tx_thread_priority_maps[MAP_INDEX];
467
468 /* Make a quick check for no other threads ready for execution. */
469 if (priority_map == ((ULONG) 0))
470 {
471
472 /* Nothing else is ready. Set highest priority and execute thread
473 accordingly. */
474 _tx_thread_highest_priority = ((UINT) TX_MAX_PRIORITIES);
475 _tx_thread_execute_ptr = TX_NULL;
476
477 #ifndef TX_MISRA_ENABLE
478
479 #ifdef TX_ENABLE_EVENT_TRACE
480
481 /* Check that the event time stamp is unchanged. A different
482 timestamp means that a later event wrote over the thread
483 suspend event. In that case, do nothing here. */
484 if (entry_ptr != TX_NULL)
485 {
486
487 /* Is the timestamp the same? */
488 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
489 {
490
491 /* Timestamp is the same, set the "next thread pointer" to the new value of the
492 next thread to execute. This can be used by the trace analysis tool to keep
493 track of next thread execution. */
494 entry_ptr -> tx_trace_buffer_entry_information_field_4 = 0;
495 }
496 }
497 #endif
498
499 /* Restore interrupts. */
500 TX_RESTORE
501
502 /* Determine if preemption should take place. This is only possible if the current thread pointer is
503 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
504 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
505 if (combined_flags == ((ULONG) 0))
506 {
507
508 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
509
510 /* Yes, increment the return to idle return count. */
511 _tx_thread_performance_idle_return_count++;
512 #endif
513
514 /* Preemption is needed - return to the system! */
515 _tx_thread_system_return();
516 }
517
518 /* Return to caller. */
519 return(TX_SUCCESS);
520 #endif
521 }
522 else
523 {
524
525 /* Calculate the lowest bit set in the priority map. */
526 TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit)
527
528 /* Setup the next highest priority variable. */
529 _tx_thread_highest_priority = base_priority + priority_bit;
530 }
531 }
532
533 /* Determine if this thread is the thread designated to execute. */
534 if (thread_ptr == _tx_thread_execute_ptr)
535 {
536
537 /* Pickup the highest priority thread to execute. */
538 _tx_thread_execute_ptr = _tx_thread_priority_list[_tx_thread_highest_priority];
539
540 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
541
542 /* Determine if a previous thread with preemption-threshold was preempted. */
543 #if TX_MAX_PRIORITIES > 32
544 if (_tx_thread_preempted_map_active != ((ULONG) 0))
545 #else
546 if (_tx_thread_preempted_maps[MAP_INDEX] != ((ULONG) 0))
547 #endif
548 {
549
550 /* Yes, there was a thread preempted when it was using preemption-threshold. */
551
552 #ifndef TX_NOT_INTERRUPTABLE
553
554 /* Disable preemption. */
555 _tx_thread_preempt_disable++;
556
557 /* Restore interrupts. */
558 TX_RESTORE
559
560 /* Interrupts are enabled briefly here to keep the interrupt
561 lockout time deterministic. */
562
563 /* Disable interrupts again. */
564 TX_DISABLE
565
566 /* Decrement the preemption disable variable. */
567 _tx_thread_preempt_disable--;
568 #endif
569
570 /* Calculate the thread with preemption threshold set that
571 was interrupted by a thread above the preemption level. */
572
573 #if TX_MAX_PRIORITIES > 32
574
575 /* Calculate the index to find the next highest priority thread ready for execution. */
576 priority_map = _tx_thread_preempted_map_active;
577
578 /* Calculate the lowest bit set in the priority map. */
579 TX_LOWEST_SET_BIT_CALCULATE(priority_map, map_index)
580
581 /* Calculate the base priority as well. */
582 base_priority = map_index * ((UINT) 32);
583 #else
584
585 /* Setup the base priority to zero. */
586 base_priority = ((UINT) 0);
587 #endif
588
589 /* Setup temporary preempted map. */
590 priority_map = _tx_thread_preempted_maps[MAP_INDEX];
591
592 /* Calculate the lowest bit set in the priority map. */
593 TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit)
594
595 /* Setup the highest priority preempted thread. */
596 priority = base_priority + priority_bit;
597
598 /* Determine if the next highest priority thread is above the highest priority threshold value. */
599 if (_tx_thread_highest_priority >= (_tx_thread_priority_list[priority] -> tx_thread_preempt_threshold))
600 {
601
602 /* Thread not allowed to execute until earlier preempted thread finishes or lowers its
603 preemption-threshold. */
604 _tx_thread_execute_ptr = _tx_thread_priority_list[priority];
605
606 #ifdef TX_ENABLE_EVENT_TRACE
607
608 /* Check that the event time stamp is unchanged. A different
609 timestamp means that a later event wrote over the thread
610 suspend event. In that case, do nothing here. */
611 if (entry_ptr != TX_NULL)
612 {
613
614 /* Is the timestamp the same? */
615 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
616 {
617
618 /* Timestamp is the same, set the "next thread pointer" to the new value of the
619 next thread to execute. This can be used by the trace analysis tool to keep
620 track of next thread execution. */
621 #ifdef TX_MISRA_ENABLE
622 entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
623 #else
624 entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
625 #endif
626 }
627 }
628 #endif
629
630 /* Clear the corresponding bit in the preempted map, since the preemption has been restored. */
631 TX_MOD32_BIT_SET(priority, priority_bit)
632 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
633
634 #if TX_MAX_PRIORITIES > 32
635
636 /* Determine if there are any other bits set in this preempt map. */
637 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
638 {
639
640 /* No, clear the active bit to signify this preempt map has nothing set. */
641 TX_DIV32_BIT_SET(priority, priority_bit)
642 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
643 }
644 #endif
645 }
646 }
647 #endif
648
649 #ifndef TX_MISRA_ENABLE
650
651 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
652
653 /* Is the execute pointer different? */
654 if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
655 {
656
657 /* Move to next entry. */
658 _tx_thread_performance__execute_log_index++;
659
660 /* Check for wrap condition. */
661 if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
662 {
663
664 /* Set the index to the beginning. */
665 _tx_thread_performance__execute_log_index = ((UINT) 0);
666 }
667
668 /* Log the new execute pointer. */
669 _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
670 }
671 #endif
672
673 #ifdef TX_ENABLE_EVENT_TRACE
674
675 /* Check that the event time stamp is unchanged. A different
676 timestamp means that a later event wrote over the thread
677 suspend event. In that case, do nothing here. */
678 if (entry_ptr != TX_NULL)
679 {
680
681 /* Is the timestamp the same? */
682 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
683 {
684
685 /* Timestamp is the same, set the "next thread pointer" to the new value of the
686 next thread to execute. This can be used by the trace analysis tool to keep
687 track of next thread execution. */
688 entry_ptr -> tx_trace_buffer_entry_information_field_4 = 0;
689 }
690 }
691 #endif
692
693 /* Restore interrupts. */
694 TX_RESTORE
695
696 /* Determine if preemption should take place. This is only possible if the current thread pointer is
697 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
698 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
699 if (combined_flags == ((ULONG) 0))
700 {
701
702 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
703
704 /* No, there is another thread ready to run and will be scheduled upon return. */
705 _tx_thread_performance_non_idle_return_count++;
706 #endif
707
708 /* Preemption is needed - return to the system! */
709 _tx_thread_system_return();
710 }
711
712 /* Return to caller. */
713 return(TX_SUCCESS);
714 #endif
715 }
716
717 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
718
719 /* Is the execute pointer different? */
720 if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
721 {
722
723 /* Move to next entry. */
724 _tx_thread_performance__execute_log_index++;
725
726 /* Check for wrap condition. */
727 if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
728 {
729
730 /* Set the index to the beginning. */
731 _tx_thread_performance__execute_log_index = ((UINT) 0);
732 }
733
734 /* Log the new execute pointer. */
735 _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
736 }
737 #endif
738
739 #ifdef TX_ENABLE_EVENT_TRACE
740
741 /* Check that the event time stamp is unchanged. A different
742 timestamp means that a later event wrote over the thread
743 suspend event. In that case, do nothing here. */
744 if (entry_ptr != TX_NULL)
745 {
746
747 /* Is the timestamp the same? */
748 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
749 {
750
751 /* Timestamp is the same, set the "next thread pointer" to the new value of the
752 next thread to execute. This can be used by the trace analysis tool to keep
753 track of next thread execution. */
754 #ifdef TX_MISRA_ENABLE
755 entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
756 #else
757 entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
758 #endif
759 }
760 }
761 #endif
762
763 /* Restore interrupts. */
764 TX_RESTORE
765
766 /* Determine if a preemption condition is present. */
767 if (current_thread != _tx_thread_execute_ptr)
768 {
769
770 #ifdef TX_ENABLE_STACK_CHECKING
771
772 /* Pickup the next execute pointer. */
773 thread_ptr = _tx_thread_execute_ptr;
774
775 /* Check this thread's stack. */
776 TX_THREAD_STACK_CHECK(thread_ptr)
777 #endif
778
779 /* Determine if preemption should take place. This is only possible if the current thread pointer is
780 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
781 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
782 if (combined_flags == ((ULONG) 0))
783 {
784
785 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
786
787 /* Determine if an idle system return is present. */
788 if (_tx_thread_execute_ptr == TX_NULL)
789 {
790
791 /* Yes, increment the return to idle return count. */
792 _tx_thread_performance_idle_return_count++;
793 }
794 else
795 {
796
797 /* No, there is another thread ready to run and will be scheduled upon return. */
798 _tx_thread_performance_non_idle_return_count++;
799 }
800 #endif
801
802 /* Preemption is needed - return to the system! */
803 _tx_thread_system_return();
804 }
805 }
806
807 /* Disable interrupts. */
808 TX_DISABLE
809
810 /* Return success. */
811 status = TX_SUCCESS;
812 }
813 }
814 else if (thread_ptr -> tx_thread_state == TX_TERMINATED)
815 {
816
817 /* Thread is terminated. */
818 status = TX_SUSPEND_ERROR;
819 }
820 else if (thread_ptr -> tx_thread_state == TX_COMPLETED)
821 {
822
823 /* Thread is completed. */
824 status = TX_SUSPEND_ERROR;
825 }
826 else if (thread_ptr -> tx_thread_state == TX_SUSPENDED)
827 {
828
829 /* Already suspended, just set status to success. */
830 status = TX_SUCCESS;
831 }
832 else
833 {
834
835 /* Just set the delayed suspension flag. */
836 thread_ptr -> tx_thread_delayed_suspend = TX_TRUE;
837
838 /* Set status to success. */
839 status = TX_SUCCESS;
840 }
841
842 /* Restore interrupts. */
843 TX_RESTORE
844
845 /* Return completion status. */
846 return(status);
847 #endif
848 }
849
850