1 /**************************************************************************/
2 /* */
3 /* Copyright (c) Microsoft Corporation. All rights reserved. */
4 /* */
5 /* This software is licensed under the Microsoft Software License */
6 /* Terms for Microsoft Azure RTOS. Full text of the license can be */
7 /* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
8 /* and in the root directory of this software. */
9 /* */
10 /**************************************************************************/
11
12
13 /**************************************************************************/
14 /**************************************************************************/
15 /** */
16 /** ThreadX Component */
17 /** */
18 /** Thread */
19 /** */
20 /**************************************************************************/
21 /**************************************************************************/
22
23 #define TX_SOURCE_CODE
24
25
26 /* Include necessary system files. */
27
28 #include "tx_api.h"
29 #include "tx_trace.h"
30 #include "tx_thread.h"
31 #include "tx_initialize.h"
32
33
34 /**************************************************************************/
35 /* */
36 /* FUNCTION RELEASE */
37 /* */
38 /* _tx_thread_resume PORTABLE C */
39 /* 6.1 */
40 /* AUTHOR */
41 /* */
42 /* William E. Lamie, Microsoft Corporation */
43 /* */
44 /* DESCRIPTION */
45 /* */
46 /* This function processes application resume thread services. Actual */
47 /* thread resumption is performed in the core service. */
48 /* */
49 /* INPUT */
50 /* */
51 /* thread_ptr Pointer to thread to resume */
52 /* */
53 /* OUTPUT */
54 /* */
55 /* status Service return status */
56 /* */
57 /* CALLS */
58 /* */
59 /* _tx_thread_system_resume Resume thread */
60 /* _tx_thread_system_ni_resume Non-interruptable resume thread */
61 /* */
62 /* CALLED BY */
63 /* */
64 /* Application Code */
65 /* */
66 /* RELEASE HISTORY */
67 /* */
68 /* DATE NAME DESCRIPTION */
69 /* */
70 /* 05-19-2020 William E. Lamie Initial Version 6.0 */
71 /* 09-30-2020 Yuxin Zhou Modified comment(s), */
72 /* resulting in version 6.1 */
73 /* */
74 /**************************************************************************/
_tx_thread_resume(TX_THREAD * thread_ptr)75 UINT _tx_thread_resume(TX_THREAD *thread_ptr)
76 {
77
78 TX_INTERRUPT_SAVE_AREA
79
80 UINT status;
81 TX_THREAD *saved_thread_ptr;
82 UINT saved_threshold = ((UINT) 0);
83
84 #ifdef TX_INLINE_THREAD_RESUME_SUSPEND
85 UINT priority;
86 ULONG priority_bit;
87 TX_THREAD *head_ptr;
88 TX_THREAD *tail_ptr;
89 TX_THREAD *execute_ptr;
90 TX_THREAD *current_thread;
91 ULONG combined_flags;
92
93 #ifdef TX_ENABLE_EVENT_TRACE
94 TX_TRACE_BUFFER_ENTRY *entry_ptr;
95 ULONG time_stamp = ((ULONG) 0);
96 #endif
97
98 #if TX_MAX_PRIORITIES > 32
99 UINT map_index;
100 #endif
101
102
103 #ifdef TX_ENABLE_STACK_CHECKING
104
105 /* Check this thread's stack. */
106 TX_THREAD_STACK_CHECK(thread_ptr)
107 #endif
108 #endif
109
110 /* Lockout interrupts while the thread is being resumed. */
111 TX_DISABLE
112
113 /* If trace is enabled, insert this event into the trace buffer. */
114 TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME_API, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&status), 0, TX_TRACE_THREAD_EVENTS)
115
116 /* Log this kernel call. */
117 TX_EL_THREAD_RESUME_INSERT
118
119 /* Determine if the thread is suspended or in the process of suspending.
120 If so, call the thread resume processing. */
121 if (thread_ptr -> tx_thread_state == TX_SUSPENDED)
122 {
123
124 /* Determine if the create call is being called from initialization. */
125 if (TX_THREAD_GET_SYSTEM_STATE() >= TX_INITIALIZE_IN_PROGRESS)
126 {
127
128 /* Yes, this resume call was made from initialization. */
129
130 /* Pickup the current thread execute pointer, which corresponds to the
131 highest priority thread ready to execute. Interrupt lockout is
132 not required, since interrupts are assumed to be disabled during
133 initialization. */
134 saved_thread_ptr = _tx_thread_execute_ptr;
135
136 /* Determine if there is thread ready for execution. */
137 if (saved_thread_ptr != TX_NULL)
138 {
139
140 /* Yes, a thread is ready for execution when initialization completes. */
141
142 /* Save the current preemption-threshold. */
143 saved_threshold = saved_thread_ptr -> tx_thread_preempt_threshold;
144
145 /* For initialization, temporarily set the preemption-threshold to the
146 priority level to make sure the highest-priority thread runs once
147 initialization is complete. */
148 saved_thread_ptr -> tx_thread_preempt_threshold = saved_thread_ptr -> tx_thread_priority;
149 }
150 }
151 else
152 {
153
154 /* Simply set the saved thread pointer to NULL. */
155 saved_thread_ptr = TX_NULL;
156 }
157
158 #ifndef TX_INLINE_THREAD_RESUME_SUSPEND
159
160 #ifdef TX_NOT_INTERRUPTABLE
161
162 /* Resume the thread! */
163 _tx_thread_system_ni_resume(thread_ptr);
164
165 /* Restore interrupts. */
166 TX_RESTORE
167 #else
168
169 /* Temporarily disable preemption. */
170 _tx_thread_preempt_disable++;
171
172 /* Restore interrupts. */
173 TX_RESTORE
174
175 /* Call the actual resume service to resume the thread. */
176 _tx_thread_system_resume(thread_ptr);
177 #endif
178
179 /* Determine if the thread's preemption-threshold needs to be restored. */
180 if (saved_thread_ptr != TX_NULL)
181 {
182
183 /* Yes, restore the previous highest-priority thread's preemption-threshold. This
184 can only happen if this routine is called from initialization. */
185 saved_thread_ptr -> tx_thread_preempt_threshold = saved_threshold;
186 }
187
188 #ifdef TX_MISRA_ENABLE
189
190 /* Disable interrupts. */
191 TX_DISABLE
192
193 /* Setup successful return status. */
194 status = TX_SUCCESS;
195 #else
196
197 /* Return successful completion. */
198 return(TX_SUCCESS);
199 #endif
200
201
202 #else
203
204 /* In-line thread resumption processing follows, which is effectively just taking the
205 logic in tx_thread_system_resume.c and placing it here! */
206
207 /* Resume the thread! */
208
209 #ifdef TX_ENABLE_EVENT_TRACE
210
211 /* If trace is enabled, save the current event pointer. */
212 entry_ptr = _tx_trace_buffer_current_ptr;
213 #endif
214
215 /* Log the thread status change. */
216 TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&execute_ptr), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
217
218 #ifdef TX_ENABLE_EVENT_TRACE
219
220 /* Save the time stamp for later comparison to verify that
221 the event hasn't been overwritten by the time we have
222 computed the next thread to execute. */
223 if (entry_ptr != TX_NULL)
224 {
225
226 /* Save time stamp. */
227 time_stamp = entry_ptr -> tx_trace_buffer_entry_time_stamp;
228 }
229 #endif
230
231 /* Make this thread ready. */
232
233 /* Change the state to ready. */
234 thread_ptr -> tx_thread_state = TX_READY;
235
236 /* Pickup priority of thread. */
237 priority = thread_ptr -> tx_thread_priority;
238
239 /* Thread state change. */
240 TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
241
242 /* Log the thread status change. */
243 TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
244
245 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
246
247 /* Increment the total number of thread resumptions. */
248 _tx_thread_performance_resume_count++;
249
250 /* Increment this thread's resume count. */
251 thread_ptr -> tx_thread_performance_resume_count++;
252 #endif
253
254 /* Determine if there are other threads at this priority that are
255 ready. */
256 head_ptr = _tx_thread_priority_list[priority];
257 if (head_ptr == TX_NULL)
258 {
259
260 /* First thread at this priority ready. Add to the front of the list. */
261 _tx_thread_priority_list[priority] = thread_ptr;
262 thread_ptr -> tx_thread_ready_next = thread_ptr;
263 thread_ptr -> tx_thread_ready_previous = thread_ptr;
264
265 #if TX_MAX_PRIORITIES > 32
266
267 /* Calculate the index into the bit map array. */
268 map_index = priority/((UINT) 32);
269
270 /* Set the active bit to remember that the priority map has something set. */
271 TX_DIV32_BIT_SET(priority, priority_bit)
272 _tx_thread_priority_map_active = _tx_thread_priority_map_active | priority_bit;
273 #endif
274
275 /* Or in the thread's priority bit. */
276 TX_MOD32_BIT_SET(priority, priority_bit)
277 _tx_thread_priority_maps[MAP_INDEX] = _tx_thread_priority_maps[MAP_INDEX] | priority_bit;
278
279 /* Determine if this newly ready thread is the highest priority. */
280 if (priority < _tx_thread_highest_priority)
281 {
282
283 /* A new highest priority thread is present. */
284
285 /* Update the highest priority variable. */
286 _tx_thread_highest_priority = priority;
287
288 /* Pickup the execute pointer. Since it is going to be referenced multiple
289 times, it is placed in a local variable. */
290 execute_ptr = _tx_thread_execute_ptr;
291
292 /* Determine if no thread is currently executing. */
293 if (execute_ptr == TX_NULL)
294 {
295
296 /* Simply setup the execute pointer. */
297 _tx_thread_execute_ptr = thread_ptr;
298 }
299 else
300 {
301
302 /* Another thread has been scheduled for execution. */
303
304 /* Check to see if this is a higher priority thread and determine if preemption is allowed. */
305 if (priority < execute_ptr -> tx_thread_preempt_threshold)
306 {
307
308 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
309
310 /* Determine if the preempted thread had preemption-threshold set. */
311 if (execute_ptr -> tx_thread_preempt_threshold != execute_ptr -> tx_thread_priority)
312 {
313
314 #if TX_MAX_PRIORITIES > 32
315
316 /* Calculate the index into the bit map array. */
317 map_index = (execute_ptr -> tx_thread_priority)/((UINT) 32);
318
319 /* Set the active bit to remember that the preempt map has something set. */
320 TX_DIV32_BIT_SET(execute_ptr -> tx_thread_priority, priority_bit)
321 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active | priority_bit;
322 #endif
323
324 /* Remember that this thread was preempted by a thread above the thread's threshold. */
325 TX_MOD32_BIT_SET(execute_ptr -> tx_thread_priority, priority_bit)
326 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] | priority_bit;
327 }
328 #endif
329
330 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
331
332 /* Determine if the caller is an interrupt or from a thread. */
333 if (TX_THREAD_GET_SYSTEM_STATE() == ((ULONG) 0))
334 {
335
336 /* Caller is a thread, so this is a solicited preemption. */
337 _tx_thread_performance_solicited_preemption_count++;
338
339 /* Increment the thread's solicited preemption counter. */
340 execute_ptr -> tx_thread_performance_solicited_preemption_count++;
341 }
342 else
343 {
344
345 if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
346 {
347
348 /* Caller is an interrupt, so this is an interrupt preemption. */
349 _tx_thread_performance_interrupt_preemption_count++;
350
351 /* Increment the thread's interrupt preemption counter. */
352 execute_ptr -> tx_thread_performance_interrupt_preemption_count++;
353 }
354 }
355
356 /* Remember the thread that preempted this thread. */
357 execute_ptr -> tx_thread_performance_last_preempting_thread = thread_ptr;
358 #endif
359
360 /* Yes, modify the execute thread pointer. */
361 _tx_thread_execute_ptr = thread_ptr;
362
363 #ifndef TX_MISRA_ENABLE
364
365 /* If MISRA is not-enabled, insert a preemption and return in-line for performance. */
366
367 /* Determine if the thread's preemption-threshold needs to be restored. */
368 if (saved_thread_ptr != TX_NULL)
369 {
370
371 /* Yes, restore the previous highest-priority thread's preemption-threshold. This
372 can only happen if this routine is called from initialization. */
373 saved_thread_ptr -> tx_thread_preempt_threshold = saved_threshold;
374 }
375
376 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
377
378 /* Is the execute pointer different? */
379 if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
380 {
381
382 /* Move to next entry. */
383 _tx_thread_performance__execute_log_index++;
384
385 /* Check for wrap condition. */
386 if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
387 {
388
389 /* Set the index to the beginning. */
390 _tx_thread_performance__execute_log_index = ((UINT) 0);
391 }
392
393 /* Log the new execute pointer. */
394 _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
395 }
396 #endif
397
398 #ifdef TX_ENABLE_EVENT_TRACE
399
400 /* Check that the event time stamp is unchanged. A different
401 timestamp means that a later event wrote over the thread
402 resume event. In that case, do nothing here. */
403 if (entry_ptr != TX_NULL)
404 {
405
406 /* Is the timestamp the same? */
407 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
408 {
409
410 /* Timestamp is the same, set the "next thread pointer" to NULL. This can
411 be used by the trace analysis tool to show idle system conditions. */
412 entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
413 }
414 }
415 #endif
416
417 /* Restore interrupts. */
418 TX_RESTORE
419
420 #ifdef TX_ENABLE_STACK_CHECKING
421
422 /* Pickup the next execute pointer. */
423 thread_ptr = _tx_thread_execute_ptr;
424
425 /* Check this thread's stack. */
426 TX_THREAD_STACK_CHECK(thread_ptr)
427 #endif
428
429 /* Now determine if preemption should take place. This is only possible if the current thread pointer is
430 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
431 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
432 if (combined_flags == ((ULONG) 0))
433 {
434
435 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
436
437 /* There is another thread ready to run and will be scheduled upon return. */
438 _tx_thread_performance_non_idle_return_count++;
439 #endif
440
441 /* Preemption is needed - return to the system! */
442 _tx_thread_system_return();
443 }
444
445 /* Return in-line when MISRA is not enabled. */
446 return(TX_SUCCESS);
447 #endif
448 }
449 }
450 }
451 }
452 else
453 {
454
455 /* No, there are other threads at this priority already ready. */
456
457 /* Just add this thread to the priority list. */
458 tail_ptr = head_ptr -> tx_thread_ready_previous;
459 tail_ptr -> tx_thread_ready_next = thread_ptr;
460 head_ptr -> tx_thread_ready_previous = thread_ptr;
461 thread_ptr -> tx_thread_ready_previous = tail_ptr;
462 thread_ptr -> tx_thread_ready_next = head_ptr;
463 }
464
465 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
466
467 /* Determine if we should log the execute pointer. */
468
469 /* Is the execute pointer different? */
470 if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
471 {
472
473 /* Move to next entry. */
474 _tx_thread_performance__execute_log_index++;
475
476 /* Check for wrap condition. */
477 if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
478 {
479
480 /* Set the index to the beginning. */
481 _tx_thread_performance__execute_log_index = ((UINT) 0);
482 }
483
484 /* Log the new execute pointer. */
485 _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
486 }
487 #endif
488
489 #ifdef TX_ENABLE_EVENT_TRACE
490
491 /* Check that the event time stamp is unchanged. A different
492 timestamp means that a later event wrote over the thread
493 resume event. In that case, do nothing here. */
494 if (entry_ptr != TX_NULL)
495 {
496
497 /* Is the timestamp the same? */
498 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
499 {
500
501 /* Timestamp is the same, set the "next thread pointer" to NULL. This can
502 be used by the trace analysis tool to show idle system conditions. */
503 #ifdef TX_MISRA_ENABLE
504 entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
505 #else
506 entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
507 #endif
508 }
509 }
510 #endif
511
512 /* Determine if the thread's preemption-threshold needs to be restored. */
513 if (saved_thread_ptr != TX_NULL)
514 {
515
516 /* Yes, restore the previous highest-priority thread's preemption-threshold. This
517 can only happen if this routine is called from initialization. */
518 saved_thread_ptr -> tx_thread_preempt_threshold = saved_threshold;
519 }
520
521 /* Setup successful return status. */
522 status = TX_SUCCESS;
523 #endif
524 }
525 else if (thread_ptr -> tx_thread_delayed_suspend == TX_TRUE)
526 {
527
528 /* Clear the delayed suspension. */
529 thread_ptr -> tx_thread_delayed_suspend = TX_FALSE;
530
531 /* Setup delayed suspend lifted return status. */
532 status = TX_SUSPEND_LIFTED;
533 }
534 else
535 {
536
537 /* Setup invalid resume return status. */
538 status = TX_RESUME_ERROR;
539 }
540
541 /* Restore interrupts. */
542 TX_RESTORE
543
544 #ifdef TX_INLINE_THREAD_RESUME_SUSPEND
545
546 /* Pickup thread pointer. */
547 TX_THREAD_GET_CURRENT(current_thread)
548
549 /* Determine if a preemption condition is present. */
550 if (current_thread != _tx_thread_execute_ptr)
551 {
552
553 #ifdef TX_ENABLE_STACK_CHECKING
554
555 /* Pickup the next execute pointer. */
556 thread_ptr = _tx_thread_execute_ptr;
557
558 /* Check this thread's stack. */
559 TX_THREAD_STACK_CHECK(thread_ptr)
560 #endif
561
562 /* Now determine if preemption should take place. This is only possible if the current thread pointer is
563 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
564 TX_THREAD_SYSTEM_RETURN_CHECK(combined_flags)
565 if (combined_flags == ((ULONG) 0))
566 {
567
568 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
569
570 /* There is another thread ready to run and will be scheduled upon return. */
571 _tx_thread_performance_non_idle_return_count++;
572 #endif
573
574 /* Preemption is needed - return to the system! */
575 _tx_thread_system_return();
576 }
577 }
578 #endif
579
580 /* Return completion status. */
581 return(status);
582 }
583
584