1 /***************************************************************************
2 * Copyright (c) 2024 Microsoft Corporation
3 *
4 * This program and the accompanying materials are made available under the
5 * terms of the MIT License which is available at
6 * https://opensource.org/licenses/MIT.
7 *
8 * SPDX-License-Identifier: MIT
9 **************************************************************************/
10
11
12 /**************************************************************************/
13 /**************************************************************************/
14 /** */
15 /** ThreadX Component */
16 /** */
17 /** Thread */
18 /** */
19 /**************************************************************************/
20 /**************************************************************************/
21
22 #define TX_SOURCE_CODE
23
24
25 /* Include necessary system files. */
26
27 #include "tx_api.h"
28 #include "tx_trace.h"
29 #include "tx_thread.h"
30 #include "tx_initialize.h"
31
32
33 /**************************************************************************/
34 /* */
35 /* FUNCTION RELEASE */
36 /* */
37 /* _tx_thread_resume PORTABLE C */
38 /* 6.1 */
39 /* AUTHOR */
40 /* */
41 /* William E. Lamie, Microsoft Corporation */
42 /* */
43 /* DESCRIPTION */
44 /* */
45 /* This function processes application resume thread services. Actual */
46 /* thread resumption is performed in the core service. */
47 /* */
48 /* INPUT */
49 /* */
50 /* thread_ptr Pointer to thread to resume */
51 /* */
52 /* OUTPUT */
53 /* */
54 /* status Service return status */
55 /* */
56 /* CALLS */
57 /* */
58 /* _tx_thread_system_resume Resume thread */
59 /* _tx_thread_system_ni_resume Non-interruptable resume thread */
60 /* */
61 /* CALLED BY */
62 /* */
63 /* Application Code */
64 /* */
65 /* RELEASE HISTORY */
66 /* */
67 /* DATE NAME DESCRIPTION */
68 /* */
69 /* 05-19-2020 William E. Lamie Initial Version 6.0 */
70 /* 09-30-2020 Yuxin Zhou Modified comment(s), */
71 /* resulting in version 6.1 */
72 /* */
73 /**************************************************************************/
_tx_thread_resume(TX_THREAD * thread_ptr)74 UINT _tx_thread_resume(TX_THREAD *thread_ptr)
75 {
76
77 TX_INTERRUPT_SAVE_AREA
78
79 UINT status;
80 TX_THREAD *saved_thread_ptr;
81 UINT saved_threshold = ((UINT) 0);
82
83 #ifdef TX_INLINE_THREAD_RESUME_SUSPEND
84 UINT priority;
85 ULONG priority_bit;
86 TX_THREAD *head_ptr;
87 TX_THREAD *tail_ptr;
88 TX_THREAD *execute_ptr;
89 TX_THREAD *current_thread;
90 ULONG combined_flags;
91
92 #ifdef TX_ENABLE_EVENT_TRACE
93 TX_TRACE_BUFFER_ENTRY *entry_ptr;
94 ULONG time_stamp = ((ULONG) 0);
95 #endif
96
97 #if TX_MAX_PRIORITIES > 32
98 UINT map_index;
99 #endif
100
101
102 #ifdef TX_ENABLE_STACK_CHECKING
103
104 /* Check this thread's stack. */
105 TX_THREAD_STACK_CHECK(thread_ptr)
106 #endif
107 #endif
108
109 /* Lockout interrupts while the thread is being resumed. */
110 TX_DISABLE
111
112 /* If trace is enabled, insert this event into the trace buffer. */
113 TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME_API, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&status), 0, TX_TRACE_THREAD_EVENTS)
114
115 /* Log this kernel call. */
116 TX_EL_THREAD_RESUME_INSERT
117
118 /* Determine if the thread is suspended or in the process of suspending.
119 If so, call the thread resume processing. */
120 if (thread_ptr -> tx_thread_state == TX_SUSPENDED)
121 {
122
123 /* Determine if the create call is being called from initialization. */
124 if (TX_THREAD_GET_SYSTEM_STATE() >= TX_INITIALIZE_IN_PROGRESS)
125 {
126
127 /* Yes, this resume call was made from initialization. */
128
129 /* Pickup the current thread execute pointer, which corresponds to the
130 highest priority thread ready to execute. Interrupt lockout is
131 not required, since interrupts are assumed to be disabled during
132 initialization. */
133 saved_thread_ptr = _tx_thread_execute_ptr;
134
135 /* Determine if there is thread ready for execution. */
136 if (saved_thread_ptr != TX_NULL)
137 {
138
139 /* Yes, a thread is ready for execution when initialization completes. */
140
141 /* Save the current preemption-threshold. */
142 saved_threshold = saved_thread_ptr -> tx_thread_preempt_threshold;
143
144 /* For initialization, temporarily set the preemption-threshold to the
145 priority level to make sure the highest-priority thread runs once
146 initialization is complete. */
147 saved_thread_ptr -> tx_thread_preempt_threshold = saved_thread_ptr -> tx_thread_priority;
148 }
149 }
150 else
151 {
152
153 /* Simply set the saved thread pointer to NULL. */
154 saved_thread_ptr = TX_NULL;
155 }
156
157 #ifndef TX_INLINE_THREAD_RESUME_SUSPEND
158
159 #ifdef TX_NOT_INTERRUPTABLE
160
161 /* Resume the thread! */
162 _tx_thread_system_ni_resume(thread_ptr);
163
164 /* Restore interrupts. */
165 TX_RESTORE
166 #else
167
168 /* Temporarily disable preemption. */
169 _tx_thread_preempt_disable++;
170
171 /* Restore interrupts. */
172 TX_RESTORE
173
174 /* Call the actual resume service to resume the thread. */
175 _tx_thread_system_resume(thread_ptr);
176 #endif
177
178 /* Determine if the thread's preemption-threshold needs to be restored. */
179 if (saved_thread_ptr != TX_NULL)
180 {
181
182 /* Yes, restore the previous highest-priority thread's preemption-threshold. This
183 can only happen if this routine is called from initialization. */
184 saved_thread_ptr -> tx_thread_preempt_threshold = saved_threshold;
185 }
186
187 #ifdef TX_MISRA_ENABLE
188
189 /* Disable interrupts. */
190 TX_DISABLE
191
192 /* Setup successful return status. */
193 status = TX_SUCCESS;
194 #else
195
196 /* Return successful completion. */
197 return(TX_SUCCESS);
198 #endif
199
200
201 #else
202
203 /* In-line thread resumption processing follows, which is effectively just taking the
204 logic in tx_thread_system_resume.c and placing it here! */
205
206 /* Resume the thread! */
207
208 #ifdef TX_ENABLE_EVENT_TRACE
209
210 /* If trace is enabled, save the current event pointer. */
211 entry_ptr = _tx_trace_buffer_current_ptr;
212 #endif
213
214 /* Log the thread status change. */
215 TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&execute_ptr), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
216
217 #ifdef TX_ENABLE_EVENT_TRACE
218
219 /* Save the time stamp for later comparison to verify that
220 the event hasn't been overwritten by the time we have
221 computed the next thread to execute. */
222 if (entry_ptr != TX_NULL)
223 {
224
225 /* Save time stamp. */
226 time_stamp = entry_ptr -> tx_trace_buffer_entry_time_stamp;
227 }
228 #endif
229
230 /* Make this thread ready. */
231
232 /* Change the state to ready. */
233 thread_ptr -> tx_thread_state = TX_READY;
234
235 /* Pickup priority of thread. */
236 priority = thread_ptr -> tx_thread_priority;
237
238 /* Thread state change. */
239 TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
240
241 /* Log the thread status change. */
242 TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
243
244 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
245
246 /* Increment the total number of thread resumptions. */
247 _tx_thread_performance_resume_count++;
248
249 /* Increment this thread's resume count. */
250 thread_ptr -> tx_thread_performance_resume_count++;
251 #endif
252
253 /* Determine if there are other threads at this priority that are
254 ready. */
255 head_ptr = _tx_thread_priority_list[priority];
256 if (head_ptr == TX_NULL)
257 {
258
259 /* First thread at this priority ready. Add to the front of the list. */
260 _tx_thread_priority_list[priority] = thread_ptr;
261 thread_ptr -> tx_thread_ready_next = thread_ptr;
262 thread_ptr -> tx_thread_ready_previous = thread_ptr;
263
264 #if TX_MAX_PRIORITIES > 32
265
266 /* Calculate the index into the bit map array. */
267 map_index = priority/((UINT) 32);
268
269 /* Set the active bit to remember that the priority map has something set. */
270 TX_DIV32_BIT_SET(priority, priority_bit)
271 _tx_thread_priority_map_active = _tx_thread_priority_map_active | priority_bit;
272 #endif
273
274 /* Or in the thread's priority bit. */
275 TX_MOD32_BIT_SET(priority, priority_bit)
276 _tx_thread_priority_maps[MAP_INDEX] = _tx_thread_priority_maps[MAP_INDEX] | priority_bit;
277
278 /* Determine if this newly ready thread is the highest priority. */
279 if (priority < _tx_thread_highest_priority)
280 {
281
282 /* A new highest priority thread is present. */
283
284 /* Update the highest priority variable. */
285 _tx_thread_highest_priority = priority;
286
287 /* Pickup the execute pointer. Since it is going to be referenced multiple
288 times, it is placed in a local variable. */
289 execute_ptr = _tx_thread_execute_ptr;
290
291 /* Determine if no thread is currently executing. */
292 if (execute_ptr == TX_NULL)
293 {
294
295 /* Simply setup the execute pointer. */
296 _tx_thread_execute_ptr = thread_ptr;
297 }
298 else
299 {
300
301 /* Another thread has been scheduled for execution. */
302
303 /* Check to see if this is a higher priority thread and determine if preemption is allowed. */
304 if (priority < execute_ptr -> tx_thread_preempt_threshold)
305 {
306
307 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
308
309 /* Determine if the preempted thread had preemption-threshold set. */
310 if (execute_ptr -> tx_thread_preempt_threshold != execute_ptr -> tx_thread_priority)
311 {
312
313 #if TX_MAX_PRIORITIES > 32
314
315 /* Calculate the index into the bit map array. */
316 map_index = (execute_ptr -> tx_thread_priority)/((UINT) 32);
317
318 /* Set the active bit to remember that the preempt map has something set. */
319 TX_DIV32_BIT_SET(execute_ptr -> tx_thread_priority, priority_bit)
320 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active | priority_bit;
321 #endif
322
323 /* Remember that this thread was preempted by a thread above the thread's threshold. */
324 TX_MOD32_BIT_SET(execute_ptr -> tx_thread_priority, priority_bit)
325 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] | priority_bit;
326 }
327 #endif
328
329 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
330
331 /* Determine if the caller is an interrupt or from a thread. */
332 if (TX_THREAD_GET_SYSTEM_STATE() == ((ULONG) 0))
333 {
334
335 /* Caller is a thread, so this is a solicited preemption. */
336 _tx_thread_performance_solicited_preemption_count++;
337
338 /* Increment the thread's solicited preemption counter. */
339 execute_ptr -> tx_thread_performance_solicited_preemption_count++;
340 }
341 else
342 {
343
344 if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
345 {
346
347 /* Caller is an interrupt, so this is an interrupt preemption. */
348 _tx_thread_performance_interrupt_preemption_count++;
349
350 /* Increment the thread's interrupt preemption counter. */
351 execute_ptr -> tx_thread_performance_interrupt_preemption_count++;
352 }
353 }
354
355 /* Remember the thread that preempted this thread. */
356 execute_ptr -> tx_thread_performance_last_preempting_thread = thread_ptr;
357 #endif
358
359 /* Yes, modify the execute thread pointer. */
360 _tx_thread_execute_ptr = thread_ptr;
361
362 #ifndef TX_MISRA_ENABLE
363
364 /* If MISRA is not-enabled, insert a preemption and return in-line for performance. */
365
366 /* Determine if the thread's preemption-threshold needs to be restored. */
367 if (saved_thread_ptr != TX_NULL)
368 {
369
370 /* Yes, restore the previous highest-priority thread's preemption-threshold. This
371 can only happen if this routine is called from initialization. */
372 saved_thread_ptr -> tx_thread_preempt_threshold = saved_threshold;
373 }
374
375 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
376
377 /* Is the execute pointer different? */
378 if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
379 {
380
381 /* Move to next entry. */
382 _tx_thread_performance__execute_log_index++;
383
384 /* Check for wrap condition. */
385 if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
386 {
387
388 /* Set the index to the beginning. */
389 _tx_thread_performance__execute_log_index = ((UINT) 0);
390 }
391
392 /* Log the new execute pointer. */
393 _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
394 }
395 #endif
396
397 #ifdef TX_ENABLE_EVENT_TRACE
398
399 /* Check that the event time stamp is unchanged. A different
400 timestamp means that a later event wrote over the thread
401 resume event. In that case, do nothing here. */
402 if (entry_ptr != TX_NULL)
403 {
404
405 /* Is the timestamp the same? */
406 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
407 {
408
409 /* Timestamp is the same, set the "next thread pointer" to NULL. This can
410 be used by the trace analysis tool to show idle system conditions. */
411 entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
412 }
413 }
414 #endif
415
416 /* Restore interrupts. */
417 TX_RESTORE
418
419 #ifdef TX_ENABLE_STACK_CHECKING
420
421 /* Pickup the next execute pointer. */
422 thread_ptr = _tx_thread_execute_ptr;
423
424 /* Check this thread's stack. */
425 TX_THREAD_STACK_CHECK(thread_ptr)
426 #endif
427
428 /* Now determine if preemption should take place. This is only possible if the current thread pointer is
429 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
430 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
431 if (combined_flags == ((ULONG) 0))
432 {
433
434 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
435
436 /* There is another thread ready to run and will be scheduled upon return. */
437 _tx_thread_performance_non_idle_return_count++;
438 #endif
439
440 /* Preemption is needed - return to the system! */
441 _tx_thread_system_return();
442 }
443
444 /* Return in-line when MISRA is not enabled. */
445 return(TX_SUCCESS);
446 #endif
447 }
448 }
449 }
450 }
451 else
452 {
453
454 /* No, there are other threads at this priority already ready. */
455
456 /* Just add this thread to the priority list. */
457 tail_ptr = head_ptr -> tx_thread_ready_previous;
458 tail_ptr -> tx_thread_ready_next = thread_ptr;
459 head_ptr -> tx_thread_ready_previous = thread_ptr;
460 thread_ptr -> tx_thread_ready_previous = tail_ptr;
461 thread_ptr -> tx_thread_ready_next = head_ptr;
462 }
463
464 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
465
466 /* Determine if we should log the execute pointer. */
467
468 /* Is the execute pointer different? */
469 if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
470 {
471
472 /* Move to next entry. */
473 _tx_thread_performance__execute_log_index++;
474
475 /* Check for wrap condition. */
476 if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
477 {
478
479 /* Set the index to the beginning. */
480 _tx_thread_performance__execute_log_index = ((UINT) 0);
481 }
482
483 /* Log the new execute pointer. */
484 _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
485 }
486 #endif
487
488 #ifdef TX_ENABLE_EVENT_TRACE
489
490 /* Check that the event time stamp is unchanged. A different
491 timestamp means that a later event wrote over the thread
492 resume event. In that case, do nothing here. */
493 if (entry_ptr != TX_NULL)
494 {
495
496 /* Is the timestamp the same? */
497 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
498 {
499
500 /* Timestamp is the same, set the "next thread pointer" to NULL. This can
501 be used by the trace analysis tool to show idle system conditions. */
502 #ifdef TX_MISRA_ENABLE
503 entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
504 #else
505 entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
506 #endif
507 }
508 }
509 #endif
510
511 /* Determine if the thread's preemption-threshold needs to be restored. */
512 if (saved_thread_ptr != TX_NULL)
513 {
514
515 /* Yes, restore the previous highest-priority thread's preemption-threshold. This
516 can only happen if this routine is called from initialization. */
517 saved_thread_ptr -> tx_thread_preempt_threshold = saved_threshold;
518 }
519
520 /* Setup successful return status. */
521 status = TX_SUCCESS;
522 #endif
523 }
524 else if (thread_ptr -> tx_thread_delayed_suspend == TX_TRUE)
525 {
526
527 /* Clear the delayed suspension. */
528 thread_ptr -> tx_thread_delayed_suspend = TX_FALSE;
529
530 /* Setup delayed suspend lifted return status. */
531 status = TX_SUSPEND_LIFTED;
532 }
533 else
534 {
535
536 /* Setup invalid resume return status. */
537 status = TX_RESUME_ERROR;
538 }
539
540 /* Restore interrupts. */
541 TX_RESTORE
542
543 #ifdef TX_INLINE_THREAD_RESUME_SUSPEND
544
545 /* Pickup thread pointer. */
546 TX_THREAD_GET_CURRENT(current_thread)
547
548 /* Determine if a preemption condition is present. */
549 if (current_thread != _tx_thread_execute_ptr)
550 {
551
552 #ifdef TX_ENABLE_STACK_CHECKING
553
554 /* Pickup the next execute pointer. */
555 thread_ptr = _tx_thread_execute_ptr;
556
557 /* Check this thread's stack. */
558 TX_THREAD_STACK_CHECK(thread_ptr)
559 #endif
560
561 /* Now determine if preemption should take place. This is only possible if the current thread pointer is
562 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
563 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
564 if (combined_flags == ((ULONG) 0))
565 {
566
567 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
568
569 /* There is another thread ready to run and will be scheduled upon return. */
570 _tx_thread_performance_non_idle_return_count++;
571 #endif
572
573 /* Preemption is needed - return to the system! */
574 _tx_thread_system_return();
575 }
576 }
577 #endif
578
579 /* Return completion status. */
580 return(status);
581 }
582
583