1 /**************************************************************************/
2 /* */
3 /* Copyright (c) Microsoft Corporation. All rights reserved. */
4 /* */
5 /* This software is licensed under the Microsoft Software License */
6 /* Terms for Microsoft Azure RTOS. Full text of the license can be */
7 /* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
8 /* and in the root directory of this software. */
9 /* */
10 /**************************************************************************/
11
12
13 /**************************************************************************/
14 /**************************************************************************/
15 /** */
16 /** ThreadX Component */
17 /** */
18 /** Thread */
19 /** */
20 /**************************************************************************/
21 /**************************************************************************/
22
23 #define TX_SOURCE_CODE
24 #define TX_THREAD_SMP_SOURCE_CODE
25
26
27 /* Include necessary system files. */
28
29 #include "tx_api.h"
30 #include "tx_initialize.h"
31 #include "tx_timer.h"
32 #include "tx_thread.h"
33 #include "tx_trace.h"
34
35
36 /**************************************************************************/
37 /* */
38 /* FUNCTION RELEASE */
39 /* */
40 /* _tx_thread_system_resume PORTABLE SMP */
41 /* 6.1 */
42 /* AUTHOR */
43 /* */
44 /* William E. Lamie, Microsoft Corporation */
45 /* */
46 /* DESCRIPTION */
47 /* */
48 /* This function places the specified thread on the list of ready */
49 /* threads at the thread's specific priority. If a thread preemption */
50 /* is detected, this function returns a TX_TRUE. */
51 /* */
52 /* INPUT */
53 /* */
54 /* thread_ptr Pointer to thread to resume */
55 /* */
56 /* OUTPUT */
57 /* */
58 /* None */
59 /* */
60 /* CALLS */
61 /* */
62 /* _tx_thread_smp_available_cores_get Get available cores bitmap */
63 /* _tx_thread_smp_core_preempt Preempt core for new thread */
64 /* _tx_thread_smp_core_wakeup Wakeup other core */
65 /* _tx_thread_smp_execute_list_clear Clear the thread execute list */
66 /* _tx_thread_smp_execute_list_setup Setup the thread execute list */
67 /* _tx_thread_smp_core_interrupt Interrupt other core */
68 /* _tx_thread_smp_lowest_priority_get Get lowest priority scheduled */
69 /* thread */
70 /* _tx_thread_smp_next_priority_find Find next priority with one */
71 /* or more ready threads */
72 /* _tx_thread_smp_possible_cores_get Get possible cores bitmap */
73 /* _tx_thread_smp_preemptable_threads_get */
74 /* Get list of thread preemption */
75 /* possibilities */
76 /* [_tx_thread_smp_protect] Get protection */
77 /* _tx_thread_smp_rebalance_execute_list Rebalance the execution list */
78 /* _tx_thread_smp_remap_solution_find Attempt to remap threads to */
79 /* schedule another thread */
80 /* _tx_thread_smp_schedule_list_clear Clear the thread schedule list*/
81 /* _tx_thread_smp_schedule_list_setup Inherit schedule list from */
82 /* execute list */
83 /* _tx_thread_system_return Return to the system */
84 /* */
85 /* CALLED BY */
86 /* */
87 /* _tx_thread_create Thread create function */
88 /* _tx_thread_priority_change Thread priority change */
89 /* _tx_thread_resume Application resume service */
90 /* _tx_thread_timeout Thread timeout */
91 /* _tx_thread_wait_abort Thread wait abort */
92 /* Other ThreadX Components */
93 /* */
94 /* RELEASE HISTORY */
95 /* */
96 /* DATE NAME DESCRIPTION */
97 /* */
98 /* 09-30-2020 William E. Lamie Initial Version 6.1 */
99 /* */
100 /**************************************************************************/
_tx_thread_system_resume(TX_THREAD * thread_ptr)101 VOID _tx_thread_system_resume(TX_THREAD *thread_ptr)
102 {
103
104 #ifndef TX_NOT_INTERRUPTABLE
105
106 TX_INTERRUPT_SAVE_AREA
107
108 #endif
109
110 UINT priority;
111 ULONG priority_bit;
112 TX_THREAD *head_ptr;
113 TX_THREAD *tail_ptr;
114 UINT core_index;
115 #ifndef TX_THREAD_SMP_EQUAL_PRIORITY
116 UINT j;
117 UINT lowest_priority;
118 TX_THREAD *next_thread;
119 ULONG test_cores;
120 UINT core;
121 UINT thread_mapped;
122 TX_THREAD *preempt_thread;
123 ULONG possible_cores;
124 ULONG thread_possible_cores;
125 ULONG available_cores;
126 ULONG test_possible_cores;
127 TX_THREAD *possible_preemption_list[TX_THREAD_SMP_MAX_CORES];
128 #endif
129 TX_THREAD *execute_thread;
130 UINT i;
131 UINT loop_finished;
132 UINT processing_complete;
133
134 #ifdef TX_ENABLE_EVENT_TRACE
135 TX_TRACE_BUFFER_ENTRY *entry_ptr;
136 ULONG time_stamp = ((ULONG) 0);
137 #endif
138
139 #if TX_MAX_PRIORITIES > 32
140 UINT map_index;
141 #endif
142
143 #ifndef TX_NO_TIMER
144 TX_TIMER_INTERNAL *timer_ptr;
145 TX_TIMER_INTERNAL **list_head;
146 TX_TIMER_INTERNAL *next_timer;
147 TX_TIMER_INTERNAL *previous_timer;
148 #endif
149
150
151 /* Set the processing complete flag to false. */
152 processing_complete = TX_FALSE;
153
154 #ifndef TX_NOT_INTERRUPTABLE
155
156 /* Lockout interrupts while the thread is being resumed. */
157 TX_DISABLE
158 #endif
159
160
161 #ifndef TX_NO_TIMER
162
163 /* Deactivate the timeout timer if necessary. */
164 if ((thread_ptr -> tx_thread_timer.tx_timer_internal_list_head) != TX_NULL)
165 {
166
167 /* Deactivate the thread's timeout timer. This is now done in-line
168 for ThreadX SMP so the additional protection logic can be avoided. */
169
170 /* Deactivate the timer. */
171
172 /* Pickup internal timer pointer. */
173 timer_ptr = &(thread_ptr -> tx_thread_timer);
174
175 /* Pickup the list head pointer. */
176 list_head = timer_ptr -> tx_timer_internal_list_head;
177
178 /* Pickup the next active timer. */
179 next_timer = timer_ptr -> tx_timer_internal_active_next;
180
181 /* See if this is the only timer in the list. */
182 if (timer_ptr == next_timer)
183 {
184
185 /* Yes, the only timer on the list. */
186
187 /* Determine if the head pointer needs to be updated. */
188 if (*(list_head) == timer_ptr)
189 {
190
191 /* Update the head pointer. */
192 *(list_head) = TX_NULL;
193 }
194 }
195 else
196 {
197
198 /* At least one more timer is on the same expiration list. */
199
200 /* Update the links of the adjacent timers. */
201 previous_timer = timer_ptr -> tx_timer_internal_active_previous;
202 next_timer -> tx_timer_internal_active_previous = previous_timer;
203 previous_timer -> tx_timer_internal_active_next = next_timer;
204
205 /* Determine if the head pointer needs to be updated. */
206 if (*(list_head) == timer_ptr)
207 {
208
209 /* Update the next timer in the list with the list head pointer. */
210 next_timer -> tx_timer_internal_list_head = list_head;
211
212 /* Update the head pointer. */
213 *(list_head) = next_timer;
214 }
215 }
216
217 /* Clear the timer's list head pointer. */
218 timer_ptr -> tx_timer_internal_list_head = TX_NULL;
219 }
220 else
221 {
222
223 /* Clear the remaining time to ensure timer doesn't get activated. */
224 thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks = ((ULONG) 0);
225 }
226 #endif
227
228 #ifdef TX_ENABLE_STACK_CHECKING
229
230 /* Check this thread's stack. */
231 TX_THREAD_STACK_CHECK(thread_ptr)
232 #endif
233
234
235 /* Pickup index. */
236 core_index = TX_SMP_CORE_ID;
237
238 #ifdef TX_ENABLE_EVENT_TRACE
239
240 /* If trace is enabled, save the current event pointer. */
241 entry_ptr = _tx_trace_buffer_current_ptr;
242 #endif
243
244 /* Log the thread status change. */
245 TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&time_stamp), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]), TX_TRACE_INTERNAL_EVENTS)
246
247 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
248
249 /* Debug entry. */
250 _tx_thread_smp_debug_entry_insert(4, 0, thread_ptr);
251 #endif
252
253 #ifdef TX_ENABLE_EVENT_TRACE
254
255 /* Save the time stamp for later comparison to verify that
256 the event hasn't been overwritten by the time we have
257 computed the next thread to execute. */
258 if (entry_ptr != TX_NULL)
259 {
260
261 /* Save time stamp. */
262 time_stamp = entry_ptr -> tx_trace_buffer_entry_time_stamp;
263 }
264 #endif
265
266
267 /* Determine if the thread is in the process of suspending. If so, the thread
268 control block is already on the linked list so nothing needs to be done. */
269 if (thread_ptr -> tx_thread_suspending == TX_TRUE)
270 {
271
272 /* Make sure the type of suspension under way is not a terminate or
273 thread completion. In either of these cases, do not void the
274 interrupted suspension processing. */
275 if (thread_ptr -> tx_thread_state != TX_COMPLETED)
276 {
277
278 /* Make sure the thread isn't terminated. */
279 if (thread_ptr -> tx_thread_state != TX_TERMINATED)
280 {
281
282 /* No, now check to see if the delayed suspension flag is set. */
283 if (thread_ptr -> tx_thread_delayed_suspend == TX_FALSE)
284 {
285
286 /* Clear the suspending flag. */
287 thread_ptr -> tx_thread_suspending = TX_FALSE;
288
289 /* Restore the state to ready. */
290 thread_ptr -> tx_thread_state = TX_READY;
291
292 /* Thread state change. */
293 TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
294
295 /* Log the thread status change. */
296 TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
297 }
298 else
299 {
300
301 /* Clear the delayed suspend flag and change the state. */
302 thread_ptr -> tx_thread_delayed_suspend = TX_FALSE;
303 thread_ptr -> tx_thread_state = TX_SUSPENDED;
304 }
305
306 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
307
308 /* Increment the total number of thread resumptions. */
309 _tx_thread_performance_resume_count++;
310
311 /* Increment this thread's resume count. */
312 thread_ptr -> tx_thread_performance_resume_count++;
313 #endif
314 }
315 }
316 }
317 else
318 {
319
320 /* Check to make sure the thread has not already been resumed. */
321 if (thread_ptr -> tx_thread_state != TX_READY)
322 {
323
324 /* Check for a delayed suspend flag. */
325 if (thread_ptr -> tx_thread_delayed_suspend == TX_TRUE)
326 {
327
328 /* Clear the delayed suspend flag and change the state. */
329 thread_ptr -> tx_thread_delayed_suspend = TX_FALSE;
330 thread_ptr -> tx_thread_state = TX_SUSPENDED;
331 }
332 else
333 {
334
335 /* Thread state change. */
336 TX_THREAD_STATE_CHANGE(thread_ptr, TX_READY)
337
338 /* Log the thread status change. */
339 TX_EL_THREAD_STATUS_CHANGE_INSERT(thread_ptr, TX_READY)
340
341 /* Make this thread ready. */
342
343 /* Change the state to ready. */
344 thread_ptr -> tx_thread_state = TX_READY;
345
346 /* Pickup priority of thread. */
347 priority = thread_ptr -> tx_thread_priority;
348
349 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
350
351 /* Increment the total number of thread resumptions. */
352 _tx_thread_performance_resume_count++;
353
354 /* Increment this thread's resume count. */
355 thread_ptr -> tx_thread_performance_resume_count++;
356 #endif
357
358 /* Determine if there are other threads at this priority that are
359 ready. */
360 head_ptr = _tx_thread_priority_list[priority];
361 if (head_ptr != TX_NULL)
362 {
363
364 /* Yes, there are other threads at this priority already ready. */
365
366 /* Just add this thread to the priority list. */
367 tail_ptr = head_ptr -> tx_thread_ready_previous;
368 tail_ptr -> tx_thread_ready_next = thread_ptr;
369 head_ptr -> tx_thread_ready_previous = thread_ptr;
370 thread_ptr -> tx_thread_ready_previous = tail_ptr;
371 thread_ptr -> tx_thread_ready_next = head_ptr;
372 }
373 else
374 {
375
376 /* First thread at this priority ready. Add to the front of the list. */
377 _tx_thread_priority_list[priority] = thread_ptr;
378 thread_ptr -> tx_thread_ready_next = thread_ptr;
379 thread_ptr -> tx_thread_ready_previous = thread_ptr;
380
381 #if TX_MAX_PRIORITIES > 32
382
383 /* Calculate the index into the bit map array. */
384 map_index = priority/((UINT) 32);
385
386 /* Set the active bit to remember that the priority map has something set. */
387 TX_DIV32_BIT_SET(priority, priority_bit)
388 _tx_thread_priority_map_active = _tx_thread_priority_map_active | priority_bit;
389 #endif
390
391 /* Or in the thread's priority bit. */
392 TX_MOD32_BIT_SET(priority, priority_bit)
393 _tx_thread_priority_maps[MAP_INDEX] = _tx_thread_priority_maps[MAP_INDEX] | priority_bit;
394 }
395
396 /* Determine if a thread with preemption-threshold is currently scheduled. */
397 if (_tx_thread_preemption__threshold_scheduled != TX_NULL)
398 {
399
400 /* Yes, there has been a thread with preemption-threshold scheduled. */
401
402 /* Determine if this thread can run with the current preemption-threshold. */
403 if (priority >= _tx_thread_preemption__threshold_scheduled -> tx_thread_preempt_threshold)
404 {
405
406 /* The thread cannot run because of the current preemption-threshold. Simply
407 return at this point. */
408
409 #ifndef TX_NOT_INTERRUPTABLE
410
411 /* Decrement the preemption disable flag. */
412 _tx_thread_preempt_disable--;
413 #endif
414
415 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
416
417 /* Debug entry. */
418 _tx_thread_smp_debug_entry_insert(5, 0, thread_ptr);
419 #endif
420
421 #ifndef TX_NOT_INTERRUPTABLE
422
423 /* Restore interrupts. */
424 TX_RESTORE
425 #endif
426
427 /* Processing is complete, set the complete flag. */
428 processing_complete = TX_TRUE;
429 }
430 }
431
432 /* Is the processing complete at this point? */
433 if (processing_complete == TX_FALSE)
434 {
435
436 /* Determine if this newly ready thread has preemption-threshold set. If so, determine
437 if any other threads would need to be unscheduled for this thread to execute. */
438 if (thread_ptr -> tx_thread_preempt_threshold < priority)
439 {
440
441 /* Is there a place in the execution list for the newly ready thread? */
442 i = ((UINT) 0);
443 loop_finished = TX_FALSE;
444 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
445 while(i < ((UINT) TX_THREAD_SMP_MAX_CORES))
446 #else
447 while(i < _tx_thread_smp_max_cores)
448 #endif
449 {
450
451 /* Pickup the current execute thread for this core. */
452 execute_thread = _tx_thread_execute_ptr[i];
453
454 /* Is there a thread mapped to this core? */
455 if (execute_thread == TX_NULL)
456 {
457
458 /* Get out of the loop. */
459 loop_finished = TX_TRUE;
460 }
461 else
462 {
463
464 /* Determine if this thread should preempt the thread in the execution list. */
465 if (priority < execute_thread -> tx_thread_preempt_threshold)
466 {
467
468 /* Get out of the loop. */
469 loop_finished = TX_TRUE;
470 }
471 }
472
473 /* Determine if we need to get out of the loop. */
474 if (loop_finished == TX_TRUE)
475 {
476
477 /* Get out of the loop. */
478 break;
479 }
480
481 /* Move to next index. */
482 i++;
483 }
484
485 /* Determine if there is a reason to rebalance the list. */
486 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
487 if (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
488 #else
489 if (i < _tx_thread_smp_max_cores)
490 #endif
491 {
492
493 /* Yes, the new thread has preemption-threshold set and there is a slot in the
494 execution list for it. */
495
496 /* Call the rebalance routine. This routine maps cores and ready threads. */
497 _tx_thread_smp_rebalance_execute_list(core_index);
498 }
499 }
500 #ifdef TX_THREAD_SMP_EQUAL_PRIORITY
501 else
502 {
503
504 /* For equal priority SMP, we simply use the rebalance list function. */
505
506 /* Call the rebalance routine. This routine maps cores and ready threads. */
507 _tx_thread_smp_rebalance_execute_list(core_index);
508 }
509 #else
510 else
511 {
512
513 /* Determine if this thread has any available cores to execute on. */
514 if (thread_ptr -> tx_thread_smp_cores_allowed != ((ULONG) 0))
515 {
516
517 /* At this point we know that the newly ready thread does not have preemption-threshold set and that
518 any existing preemption-threshold is not blocking this thread from executing. */
519
520 /* Pickup the core this thread was previously executing on. */
521 i = thread_ptr -> tx_thread_smp_core_mapped;
522
523 /* Pickup the currently executing thread for the previously mapped core. */
524 execute_thread = _tx_thread_execute_ptr[i];
525
526 /* First, let's see if the last core this thread executed on is available. */
527 if (execute_thread == TX_NULL)
528 {
529
530 /* Yes, simply place this thread into the execute list at the same location. */
531 _tx_thread_execute_ptr[i] = thread_ptr;
532
533 /* If necessary, interrupt the core with the new thread to schedule. */
534 _tx_thread_smp_core_interrupt(thread_ptr, core_index, i);
535
536 /* If necessary, wakeup the target core. */
537 _tx_thread_smp_core_wakeup(core_index, i);
538 }
539 else
540 {
541
542 /* This core is not able to execute on the core it last executed on
543 because another thread is already scheduled on that core. */
544
545 /* Pickup the available cores for the newly ready thread. */
546 available_cores = thread_ptr -> tx_thread_smp_cores_allowed;
547
548 /* Isolate the lowest set bit so we can determine if more than one core is
549 available. */
550 available_cores = available_cores & ((~available_cores) + ((ULONG) 1));
551
552 /* Determine if either this thread or the currently schedule thread can
553 run on more than one core or on a different core and preemption is not
554 possible. */
555 if ((available_cores == thread_ptr -> tx_thread_smp_cores_allowed) &&
556 (available_cores == execute_thread -> tx_thread_smp_cores_allowed))
557 {
558
559 /* Both this thread and the execute thread can only execute on the same core,
560 so this thread can only be scheduled if its priority is less. Otherwise,
561 there is nothing else to examine. */
562 if (thread_ptr -> tx_thread_priority < execute_thread -> tx_thread_priority)
563 {
564
565 /* We know that we have to preempt the executing thread. */
566
567 /* Preempt the executing thread. */
568 _tx_thread_execute_ptr[i] = thread_ptr;
569
570 /* If necessary, interrupt the core with the new thread to schedule. */
571 _tx_thread_smp_core_interrupt(thread_ptr, core_index, i);
572
573 /* If necessary, wakeup the core. */
574 _tx_thread_smp_core_wakeup(core_index, i);
575 }
576 }
577 else
578 {
579
580 /* Determine if there are any available cores to execute on. */
581 available_cores = _tx_thread_smp_available_cores_get();
582
583 /* Determine what the possible cores are for this thread. */
584 thread_possible_cores = thread_ptr -> tx_thread_smp_cores_allowed;
585
586 /* Set the thread mapped flag to false. */
587 thread_mapped = TX_FALSE;
588
589 /* Determine if there are available cores. */
590 if (available_cores != ((ULONG) 0))
591 {
592
593 /* Determine if one of the available cores is allowed for this thread. */
594 if ((available_cores & thread_possible_cores) != ((ULONG) 0))
595 {
596
597 /* Calculate the lowest set bit of allowed cores. */
598 test_cores = (thread_possible_cores & available_cores);
599 TX_LOWEST_SET_BIT_CALCULATE(test_cores, i)
600
601 /* Remember this index in the thread control block. */
602 thread_ptr -> tx_thread_smp_core_mapped = i;
603
604 /* Map this thread to the free slot. */
605 _tx_thread_execute_ptr[i] = thread_ptr;
606
607 /* Indicate this thread was mapped. */
608 thread_mapped = TX_TRUE;
609
610 /* If necessary, wakeup the target core. */
611 _tx_thread_smp_core_wakeup(core_index, i);
612 }
613 else
614 {
615
616 /* There are available cores, however, they are all excluded. */
617
618 /* Calculate the possible cores from the cores currently scheduled. */
619 possible_cores = _tx_thread_smp_possible_cores_get();
620
621 /* Determine if it is worthwhile to try to remap the execution list. */
622 if ((available_cores & possible_cores) != ((ULONG) 0))
623 {
624
625 /* Yes, some of the currently scheduled threads can be moved. */
626
627 /* Now determine if there could be a remap solution that will allow us to schedule this thread. */
628
629 /* Narrow to the current possible cores. */
630 thread_possible_cores = thread_possible_cores & possible_cores;
631
632 /* Now we need to see if one of the other threads in the non-excluded cores can be moved to make room
633 for this thread. */
634
635 /* Default the schedule list to the current execution list. */
636 _tx_thread_smp_schedule_list_setup();
637
638 /* Determine the possible core mapping. */
639 test_possible_cores = possible_cores & ~(thread_possible_cores);
640
641 /* Attempt to remap the cores in order to schedule this thread. */
642 core = _tx_thread_smp_remap_solution_find(thread_ptr, available_cores, thread_possible_cores, test_possible_cores);
643
644 /* Determine if remapping was successful. */
645 if (core != ((UINT) TX_THREAD_SMP_MAX_CORES))
646 {
647
648 /* Clear the execute list. */
649 _tx_thread_smp_execute_list_clear();
650
651 /* Setup the execute list based on the updated schedule list. */
652 _tx_thread_smp_execute_list_setup(core_index);
653
654 /* Indicate this thread was mapped. */
655 thread_mapped = TX_TRUE;
656 }
657 }
658 }
659 }
660
661 /* Determine if we need to investigate thread preemption. */
662 if (thread_mapped == TX_FALSE)
663 {
664
665 /* At this point, we need to first check for thread preemption possibilities. */
666 lowest_priority = _tx_thread_smp_lowest_priority_get();
667
668 /* Does this thread have a higher priority? */
669 if (thread_ptr -> tx_thread_priority < lowest_priority)
670 {
671
672 /* Yes, preemption is possible. */
673
674 /* Pickup the thread to preempt. */
675 preempt_thread = _tx_thread_priority_list[lowest_priority];
676
677 /* Determine if there are more than one thread ready at this priority level. */
678 if (preempt_thread -> tx_thread_ready_next != preempt_thread)
679 {
680
681 /* Remember the list head. */
682 head_ptr = preempt_thread;
683
684 /* Setup thread search pointer to the start of the list. */
685 next_thread = preempt_thread -> tx_thread_ready_next;
686
687 /* Loop to find the last thread scheduled at this priority. */
688 i = ((UINT) 0);
689 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
690 while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
691 #else
692
693 while (i < _tx_thread_smp_max_cores)
694 #endif
695 {
696
697 /* Is this thread currently scheduled? */
698 if (next_thread == _tx_thread_execute_ptr[next_thread -> tx_thread_smp_core_mapped])
699 {
700
701 /* Yes, this is the new preempt thread. */
702 preempt_thread = next_thread;
703
704 /* Increment core count. */
705 i++;
706 }
707
708 /* Move to the next thread. */
709 next_thread = next_thread -> tx_thread_ready_next;
710
711 /* Are we at the head of the list? */
712 if (next_thread == head_ptr)
713 {
714
715 /* End the loop. */
716 i = ((UINT) TX_THREAD_SMP_MAX_CORES);
717 }
718 }
719 }
720
721 /* Calculate the core that this thread is scheduled on. */
722 possible_cores = (((ULONG) 1) << preempt_thread -> tx_thread_smp_core_mapped);
723
724 /* Determine if preemption is possible. */
725 if ((thread_possible_cores & possible_cores) != ((ULONG) 0))
726 {
727
728 /* Pickup the newly available core. */
729 i = preempt_thread -> tx_thread_smp_core_mapped;
730
731 /* Remember this index in the thread control block. */
732 thread_ptr -> tx_thread_smp_core_mapped = i;
733
734 /* Map this thread to the free slot. */
735 _tx_thread_execute_ptr[i] = thread_ptr;
736
737 /* If necessary, interrupt the core with the new thread to schedule. */
738 _tx_thread_smp_core_interrupt(thread_ptr, core_index, i);
739
740 /* If necessary, wakeup the target core. */
741 _tx_thread_smp_core_wakeup(core_index, i);
742 }
743 else
744 {
745
746 /* Build the list of possible thread preemptions, ordered lowest priority first. */
747 possible_cores = _tx_thread_smp_preemptable_threads_get(thread_ptr -> tx_thread_priority, possible_preemption_list);
748
749 /* Determine if preemption is possible. */
750
751 /* Loop through the potential threads can can be preempted. */
752 i = ((UINT) 0);
753 loop_finished = TX_FALSE;
754 while (possible_preemption_list[i] != TX_NULL)
755 {
756
757 /* Pickup the thread to preempt. */
758 preempt_thread = possible_preemption_list[i];
759
760 /* Pickup the core this thread is mapped to. */
761 j = preempt_thread -> tx_thread_smp_core_mapped;
762
763 /* Calculate the core that this thread is scheduled on. */
764 available_cores = (((ULONG) 1) << j);
765
766 /* Can this thread execute on this core? */
767 if ((thread_possible_cores & available_cores) != ((ULONG) 0))
768 {
769
770 /* Remember this index in the thread control block. */
771 thread_ptr -> tx_thread_smp_core_mapped = j;
772
773 /* Map this thread to the free slot. */
774 _tx_thread_execute_ptr[j] = thread_ptr;
775
776 /* If necessary, interrupt the core with the new thread to schedule. */
777 _tx_thread_smp_core_interrupt(thread_ptr, core_index, j);
778
779 /* If necessary, wakeup the target core. */
780 _tx_thread_smp_core_wakeup(core_index, j);
781
782 /* Finished with the preemption condition. */
783 loop_finished = TX_TRUE;
784 }
785 else
786 {
787
788 /* No, the thread to preempt is not running on a core available to the new thread.
789 Attempt to find a remapping solution. */
790
791 /* Narrow to the current possible cores. */
792 thread_possible_cores = thread_possible_cores & possible_cores;
793
794 /* Now we need to see if one of the other threads in the non-excluded cores can be moved to make room
795 for this thread. */
796
797 /* Temporarily set the execute thread to NULL. */
798 _tx_thread_execute_ptr[j] = TX_NULL;
799
800 /* Default the schedule list to the current execution list. */
801 _tx_thread_smp_schedule_list_setup();
802
803 /* Determine the possible core mapping. */
804 test_possible_cores = possible_cores & ~(thread_possible_cores);
805
806 /* Attempt to remap the cores in order to schedule this thread. */
807 core = _tx_thread_smp_remap_solution_find(thread_ptr, available_cores, thread_possible_cores, test_possible_cores);
808
809 /* Determine if remapping was successful. */
810 if (core != ((UINT) TX_THREAD_SMP_MAX_CORES))
811 {
812
813 /* Clear the execute list. */
814 _tx_thread_smp_execute_list_clear();
815
816 /* Setup the execute list based on the updated schedule list. */
817 _tx_thread_smp_execute_list_setup(core_index);
818
819 /* Finished with the preemption condition. */
820 loop_finished = TX_TRUE;
821 }
822 else
823 {
824
825 /* Restore the preempted thread and examine the next thread. */
826 _tx_thread_execute_ptr[j] = preempt_thread;
827 }
828 }
829
830 /* Determine if we should get out of the loop. */
831 if (loop_finished == TX_TRUE)
832 {
833
834 /* Yes, get out of the loop. */
835 break;
836 }
837
838 /* Move to the next possible thread preemption. */
839 i++;
840 }
841 }
842 }
843 }
844 }
845 }
846 }
847 }
848 #endif
849 }
850 }
851 }
852 }
853
854 /* Determine if there is more processing. */
855 if (processing_complete == TX_FALSE)
856 {
857
858 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
859
860 /* Debug entry. */
861 _tx_thread_smp_debug_entry_insert(5, 0, thread_ptr);
862 #endif
863
864 #ifdef TX_ENABLE_EVENT_TRACE
865
866 /* Check that the event time stamp is unchanged. A different
867 timestamp means that a later event wrote over the thread
868 resume event. In that case, do nothing here. */
869 if ((entry_ptr != TX_NULL) && (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp))
870 {
871
872 /* Timestamp is the same, set the "next thread pointer" to NULL. This can
873 be used by the trace analysis tool to show idle system conditions. */
874 #ifdef TX_MISRA_ENABLE
875 entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]);
876 #else
877 entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr[core_index]);
878 #endif
879 }
880 #endif
881
882 #ifndef TX_NOT_INTERRUPTABLE
883
884 /* Decrement the preemption disable flag. */
885 _tx_thread_preempt_disable--;
886 #endif
887
888 if (_tx_thread_current_ptr[core_index] != _tx_thread_execute_ptr[core_index])
889 {
890
891 #ifdef TX_ENABLE_STACK_CHECKING
892
893 /* Pickup the next thread to execute. */
894 thread_ptr = _tx_thread_execute_ptr[core_index];
895
896 /* Determine if there is a thread pointer. */
897 if (thread_ptr != TX_NULL)
898 {
899
900 /* Check this thread's stack. */
901 TX_THREAD_STACK_CHECK(thread_ptr)
902 }
903 #endif
904
905 /* Now determine if preemption should take place. This is only possible if the current thread pointer is
906 not the same as the execute thread pointer AND the system state and preempt disable flags are clear. */
907 if (_tx_thread_system_state[core_index] == ((ULONG) 0))
908 {
909
910 /* Is the preempt disable flag set? */
911 if (_tx_thread_preempt_disable == ((UINT) 0))
912 {
913
914
915 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
916
917 /* No, there is another thread ready to run and will be scheduled upon return. */
918 _tx_thread_performance_non_idle_return_count++;
919 #endif
920
921 #ifndef TX_NOT_INTERRUPTABLE
922
923 /* Increment the preempt disable flag in order to keep the protection. */
924 _tx_thread_preempt_disable++;
925
926 /* Restore interrupts. */
927 TX_RESTORE
928 #endif
929
930 /* Preemption is needed - return to the system! */
931 _tx_thread_system_return();
932
933 #ifdef TX_NOT_INTERRUPTABLE
934
935 /* Setup protection again since caller is expecting that it is still in force. */
936 _tx_thread_smp_protect();
937 #endif
938
939 #ifndef TX_NOT_INTERRUPTABLE
940
941 /* Set the processing complete flag. */
942 processing_complete = TX_TRUE;
943 #endif
944 }
945 }
946 }
947
948 #ifndef TX_NOT_INTERRUPTABLE
949
950 /* Determine if processing is complete. If so, no need to restore interrupts. */
951 if (processing_complete == TX_FALSE)
952 {
953
954 /* Restore interrupts. */
955 TX_RESTORE
956 }
957 #endif
958 }
959 }
960
961 #ifdef TX_NOT_INTERRUPTABLE
_tx_thread_system_ni_resume(TX_THREAD * thread_ptr)962 VOID _tx_thread_system_ni_resume(TX_THREAD *thread_ptr)
963 {
964
965 /* Call system resume. */
966 _tx_thread_system_resume(thread_ptr);
967 }
968 #endif
969
970