1 /***************************************************************************
2 * Copyright (c) 2024 Microsoft Corporation
3 *
4 * This program and the accompanying materials are made available under the
5 * terms of the MIT License which is available at
6 * https://opensource.org/licenses/MIT.
7 *
8 * SPDX-License-Identifier: MIT
9 **************************************************************************/
10
11
12 /**************************************************************************/
13 /**************************************************************************/
14 /** */
15 /** ThreadX Component */
16 /** */
17 /** Thread */
18 /** */
19 /**************************************************************************/
20 /**************************************************************************/
21
22
23 /**************************************************************************/
24 /* */
25 /* COMPONENT DEFINITION RELEASE */
26 /* */
27 /* tx_thread.h PORTABLE SMP */
28 /* 6.3.0 */
29 /* AUTHOR */
30 /* */
31 /* William E. Lamie, Microsoft Corporation */
32 /* */
33 /* DESCRIPTION */
34 /* */
35 /* This file defines the ThreadX thread control component, including */
36 /* data types and external references. It is assumed that tx_api.h */
37 /* and tx_port.h have already been included. */
38 /* */
39 /* RELEASE HISTORY */
40 /* */
41 /* DATE NAME DESCRIPTION */
42 /* */
43 /* 09-30-2020 William E. Lamie Initial Version 6.1 */
44 /* 10-31-2023 Tiejun Zhou Fixed MISRA2012 rule 8.3, */
45 /* resulting in version 6.3.0 */
46 /* */
47 /**************************************************************************/
48
49 #ifndef TX_THREAD_H
50 #define TX_THREAD_H
51
52
53 /* Add include files needed for in-line macros. */
54
55 #include "tx_initialize.h"
56
57
58 /* Define thread control specific data definitions. */
59
60 #define TX_THREAD_ID ((ULONG) 0x54485244)
61 #define TX_THREAD_MAX_BYTE_VALUES 256
62 #define TX_THREAD_PRIORITY_GROUP_MASK ((ULONG) 0xFF)
63 #define TX_THREAD_PRIORITY_GROUP_SIZE 8
64 #define TX_THREAD_EXECUTE_LOG_SIZE ((UINT) 8)
65 #define TX_THREAD_SMP_PROTECT_WAIT_LIST_SIZE (TX_THREAD_SMP_MAX_CORES + 1)
66
67
68 /* Define the default thread stack checking. This can be overridden by
69 a particular port, which is necessary if the stack growth is from
70 low address to high address (the default logic is for stacks that
71 grow from high address to low address. */
72
73 #ifndef TX_THREAD_STACK_CHECK
74 #define TX_THREAD_STACK_CHECK(thread_ptr) \
75 { \
76 TX_INTERRUPT_SAVE_AREA \
77 TX_DISABLE \
78 if (((thread_ptr)) && ((thread_ptr) -> tx_thread_id == TX_THREAD_ID)) \
79 { \
80 if (((ULONG *) (thread_ptr) -> tx_thread_stack_ptr) < ((ULONG *) (thread_ptr) -> tx_thread_stack_highest_ptr)) \
81 { \
82 (thread_ptr) -> tx_thread_stack_highest_ptr = (thread_ptr) -> tx_thread_stack_ptr; \
83 } \
84 if ((*((ULONG *) (thread_ptr) -> tx_thread_stack_start) != TX_STACK_FILL) || \
85 (*((ULONG *) (((UCHAR *) (thread_ptr) -> tx_thread_stack_end) + 1)) != TX_STACK_FILL) || \
86 (((ULONG *) (thread_ptr) -> tx_thread_stack_highest_ptr) < ((ULONG *) (thread_ptr) -> tx_thread_stack_start))) \
87 { \
88 TX_RESTORE \
89 _tx_thread_stack_error_handler((thread_ptr)); \
90 TX_DISABLE \
91 } \
92 if (*(((ULONG *) (thread_ptr) -> tx_thread_stack_highest_ptr) - 1) != TX_STACK_FILL) \
93 { \
94 TX_RESTORE \
95 _tx_thread_stack_analyze((thread_ptr)); \
96 TX_DISABLE \
97 } \
98 } \
99 TX_RESTORE \
100 }
101 #endif
102
103
104 /* Define default post thread delete macro to whitespace, if it hasn't been defined previously (typically in tx_port.h). */
105
106 #ifndef TX_THREAD_DELETE_PORT_COMPLETION
107 #define TX_THREAD_DELETE_PORT_COMPLETION(t)
108 #endif
109
110
111 /* Define default post thread reset macro to whitespace, if it hasn't been defined previously (typically in tx_port.h). */
112
113 #ifndef TX_THREAD_RESET_PORT_COMPLETION
114 #define TX_THREAD_RESET_PORT_COMPLETION(t)
115 #endif
116
117
118 /* Define the thread create internal extension macro to whitespace, if it hasn't been defined previously (typically in tx_port.h). */
119
120 #ifndef TX_THREAD_CREATE_INTERNAL_EXTENSION
121 #define TX_THREAD_CREATE_INTERNAL_EXTENSION(t)
122 #endif
123
124
125 /* Define internal thread control function prototypes. */
126
127 VOID _tx_thread_initialize(VOID);
128 VOID _tx_thread_schedule(VOID);
129 VOID _tx_thread_shell_entry(VOID);
130 VOID _tx_thread_stack_analyze(TX_THREAD *thread_ptr);
131 VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID));
132 VOID _tx_thread_stack_error(TX_THREAD *thread_ptr);
133 VOID _tx_thread_stack_error_handler(TX_THREAD *thread_ptr);
134 VOID _tx_thread_system_preempt_check(VOID);
135 VOID _tx_thread_system_resume(TX_THREAD *thread_ptr);
136 VOID _tx_thread_system_ni_resume(TX_THREAD *thread_ptr);
137 VOID _tx_thread_system_return(VOID);
138 VOID _tx_thread_system_suspend(TX_THREAD *thread_ptr);
139 VOID _tx_thread_system_ni_suspend(TX_THREAD *thread_ptr, ULONG timeout);
140 VOID _tx_thread_time_slice(VOID);
141 VOID _tx_thread_timeout(ULONG timeout_input);
142
143
144 /* Define all internal SMP prototypes. */
145
146 void _tx_thread_smp_current_state_set(ULONG new_state);
147 UINT _tx_thread_smp_find_next_priority(UINT priority);
148 void _tx_thread_smp_high_level_initialize(void);
149 void _tx_thread_smp_rebalance_execute_list(UINT core_index);
150
151
152 /* Define all internal ThreadX SMP low-level assembly routines. */
153
154 VOID _tx_thread_smp_core_wait(void);
155 void _tx_thread_smp_initialize_wait(void);
156 void _tx_thread_smp_low_level_initialize(UINT number_of_cores);
157 void _tx_thread_smp_core_preempt(UINT core);
158
159
160 /* Thread control component external data declarations follow. */
161
162 #define THREAD_DECLARE extern
163
164
165 /* Define the pointer that contains the system stack pointer. This is
166 utilized when control returns from a thread to the system to reset the
167 current stack. This is setup in the low-level initialization function. */
168
169 THREAD_DECLARE VOID * _tx_thread_system_stack_ptr[TX_THREAD_SMP_MAX_CORES];
170
171
172 /* Define the current thread pointer. This variable points to the currently
173 executing thread. If this variable is NULL, no thread is executing. */
174
175 THREAD_DECLARE TX_THREAD * _tx_thread_current_ptr[TX_THREAD_SMP_MAX_CORES];
176
177
178 /* Define the variable that holds the next thread to execute. It is important
179 to remember that this is not necessarily equal to the current thread
180 pointer. */
181
182 THREAD_DECLARE TX_THREAD * _tx_thread_execute_ptr[TX_THREAD_SMP_MAX_CORES];
183
184
185 /* Define the ThreadX SMP scheduling and mapping data structures. */
186
187 THREAD_DECLARE TX_THREAD * _tx_thread_smp_schedule_list[TX_THREAD_SMP_MAX_CORES];
188 THREAD_DECLARE ULONG _tx_thread_smp_reschedule_pending;
189 THREAD_DECLARE TX_THREAD_SMP_PROTECT _tx_thread_smp_protection;
190 THREAD_DECLARE volatile ULONG _tx_thread_smp_release_cores_flag;
191 THREAD_DECLARE ULONG _tx_thread_smp_system_error;
192 THREAD_DECLARE ULONG _tx_thread_smp_inter_core_interrupts[TX_THREAD_SMP_MAX_CORES];
193
194 THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list_size;
195 THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list[TX_THREAD_SMP_PROTECT_WAIT_LIST_SIZE];
196 THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_counts[TX_THREAD_SMP_MAX_CORES];
197 THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list_lock_protect_in_force;
198 THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list_tail;
199 THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list_head;
200
201
202 /* Define logic for conditional dynamic maximum number of cores. */
203
204 #ifdef TX_THREAD_SMP_DYNAMIC_CORE_MAX
205
206 THREAD_DECLARE ULONG _tx_thread_smp_max_cores;
207 THREAD_DECLARE ULONG _tx_thread_smp_detected_cores;
208
209 #endif
210
211
212
213 /* Define the head pointer of the created thread list. */
214
215 THREAD_DECLARE TX_THREAD * _tx_thread_created_ptr;
216
217
218 /* Define the variable that holds the number of created threads. */
219
220 THREAD_DECLARE ULONG _tx_thread_created_count;
221
222
223 /* Define the current state variable. When this value is 0, a thread
224 is executing or the system is idle. Other values indicate that
225 interrupt or initialization processing is active. This variable is
226 initialized to TX_INITIALIZE_IN_PROGRESS to indicate initialization is
227 active. */
228
229 THREAD_DECLARE volatile ULONG _tx_thread_system_state[TX_THREAD_SMP_MAX_CORES];
230
231
232 /* Determine if we need to remap system state to a function call. */
233
234 #ifndef TX_THREAD_SMP_SOURCE_CODE
235
236
237 /* Yes, remap system state to a function call so we can get the system state for the current core. */
238
239 #define _tx_thread_system_state _tx_thread_smp_current_state_get()
240
241
242 /* Yes, remap get current thread to a function call so we can get the current thread for the current core. */
243
244 #define _tx_thread_current_ptr _tx_thread_smp_current_thread_get()
245
246 #endif
247
248
249 /* Define the 32-bit priority bit-maps. There is one priority bit map for each
250 32 priority levels supported. If only 32 priorities are supported there is
251 only one bit map. Each bit within a priority bit map represents that one
252 or more threads at the associated thread priority are ready. */
253
254 THREAD_DECLARE ULONG _tx_thread_priority_maps[TX_MAX_PRIORITIES/32];
255
256
257 /* Define the priority map active bit map that specifies which of the previously
258 defined priority maps have something set. This is only necessary if more than
259 32 priorities are supported. */
260
261 #if TX_MAX_PRIORITIES > 32
262 THREAD_DECLARE ULONG _tx_thread_priority_map_active;
263 #endif
264
265
266 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
267
268 /* Define the 32-bit preempt priority bit maps. There is one preempt bit map
269 for each 32 priority levels supported. If only 32 priorities are supported
270 there is only one bit map. Each set set bit corresponds to a preempted priority
271 level that had preemption-threshold active to protect against preemption of a
272 range of relatively higher priority threads. */
273
274 THREAD_DECLARE ULONG _tx_thread_preempted_maps[TX_MAX_PRIORITIES/32];
275
276
277 /* Define the preempt map active bit map that specifies which of the previously
278 defined preempt maps have something set. This is only necessary if more than
279 32 priorities are supported. */
280
281 #if TX_MAX_PRIORITIES > 32
282 THREAD_DECLARE ULONG _tx_thread_preempted_map_active;
283 #endif
284
285
286 /* Define the array that contains the thread at each priority level that was scheduled with
287 preemption-threshold enabled. This will be useful when returning from a nested
288 preemption-threshold condition. */
289
290 THREAD_DECLARE TX_THREAD *_tx_thread_preemption_threshold_list[TX_MAX_PRIORITIES];
291
292
293 #endif
294
295
296 /* Define the last thread scheduled with preemption-threshold. When preemption-threshold is
297 disabled, a thread with preemption-threshold set disables all other threads from running.
298 Effectively, its preemption-threshold is 0. */
299
300 THREAD_DECLARE TX_THREAD *_tx_thread_preemption__threshold_scheduled;
301
302
303 /* Define the array of thread pointers. Each entry represents the threads that
304 are ready at that priority group. For example, index 10 in this array
305 represents the first thread ready at priority 10. If this entry is NULL,
306 no threads are ready at that priority. */
307
308 THREAD_DECLARE TX_THREAD * _tx_thread_priority_list[TX_MAX_PRIORITIES];
309
310
311 /* Define the global preempt disable variable. If this is non-zero, preemption is
312 disabled. It is used internally by ThreadX to prevent preemption of a thread in
313 the middle of a service that is resuming or suspending another thread. */
314
315 THREAD_DECLARE volatile UINT _tx_thread_preempt_disable;
316
317
318 /* Define the global function pointer for mutex cleanup on thread completion or
319 termination. This pointer is setup during mutex initialization. */
320
321 THREAD_DECLARE VOID (*_tx_thread_mutex_release)(TX_THREAD *thread_ptr);
322
323
324 /* Define the global build options variable. This contains a bit map representing
325 how the ThreadX library was built. The following are the bit field definitions:
326
327 Bit(s) Meaning
328
329 31 Reserved
330 30 TX_NOT_INTERRUPTABLE defined
331 29-24 Priority groups 1 -> 32 priorities
332 2 -> 64 priorities
333 3 -> 96 priorities
334
335 ...
336
337 32 -> 1024 priorities
338 23 TX_TIMER_PROCESS_IN_ISR defined
339 22 TX_REACTIVATE_INLINE defined
340 21 TX_DISABLE_STACK_FILLING defined
341 20 TX_ENABLE_STACK_CHECKING defined
342 19 TX_DISABLE_PREEMPTION_THRESHOLD defined
343 18 TX_DISABLE_REDUNDANT_CLEARING defined
344 17 TX_DISABLE_NOTIFY_CALLBACKS defined
345 16 TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO defined
346 15 TX_BYTE_POOL_ENABLE_PERFORMANCE_INFO defined
347 14 TX_EVENT_FLAGS_ENABLE_PERFORMANCE_INFO defined
348 13 TX_MUTEX_ENABLE_PERFORMANCE_INFO defined
349 12 TX_QUEUE_ENABLE_PERFORMANCE_INFO defined
350 11 TX_SEMAPHORE_ENABLE_PERFORMANCE_INFO defined
351 10 TX_THREAD_ENABLE_PERFORMANCE_INFO defined
352 9 TX_TIMER_ENABLE_PERFORMANCE_INFO defined
353 8 TX_ENABLE_EVENT_TRACE | TX_ENABLE_EVENT_LOGGING defined
354 7 Reserved
355 6 Reserved
356 5 Reserved
357 4 Reserved
358 3 Reserved
359 2 Reserved
360 1 64-bit FPU Enabled
361 0 Reserved */
362
363 THREAD_DECLARE ULONG _tx_build_options;
364
365
366 #ifdef TX_ENABLE_STACK_CHECKING
367
368 /* Define the global function pointer for stack error handling. If a stack error is
369 detected and the application has registered a stack error handler, it will be
370 called via this function pointer. */
371
372 THREAD_DECLARE VOID (*_tx_thread_application_stack_error_handler)(TX_THREAD *thread_ptr);
373
374 #endif
375
376 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
377
378 /* Define the total number of thread resumptions. Each time a thread enters the
379 ready state this variable is incremented. */
380
381 THREAD_DECLARE ULONG _tx_thread_performance_resume_count;
382
383
384 /* Define the total number of thread suspensions. Each time a thread enters a
385 suspended state this variable is incremented. */
386
387 THREAD_DECLARE ULONG _tx_thread_performance_suspend_count;
388
389
390 /* Define the total number of solicited thread preemptions. Each time a thread is
391 preempted by directly calling a ThreadX service, this variable is incremented. */
392
393 THREAD_DECLARE ULONG _tx_thread_performance_solicited_preemption_count;
394
395
396 /* Define the total number of interrupt thread preemptions. Each time a thread is
397 preempted as a result of an ISR calling a ThreadX service, this variable is
398 incremented. */
399
400 THREAD_DECLARE ULONG _tx_thread_performance_interrupt_preemption_count;
401
402
403 /* Define the total number of priority inversions. Each time a thread is blocked by
404 a mutex owned by a lower-priority thread, this variable is incremented. */
405
406 THREAD_DECLARE ULONG _tx_thread_performance_priority_inversion_count;
407
408
409 /* Define the total number of time-slices. Each time a time-slice operation is
410 actually performed (another thread is setup for running) this variable is
411 incremented. */
412
413 THREAD_DECLARE ULONG _tx_thread_performance_time_slice_count;
414
415
416 /* Define the total number of thread relinquish operations. Each time a thread
417 relinquish operation is actually performed (another thread is setup for running)
418 this variable is incremented. */
419
420 THREAD_DECLARE ULONG _tx_thread_performance_relinquish_count;
421
422
423 /* Define the total number of thread timeouts. Each time a thread has a
424 timeout this variable is incremented. */
425
426 THREAD_DECLARE ULONG _tx_thread_performance_timeout_count;
427
428
429 /* Define the total number of thread wait aborts. Each time a thread's suspension
430 is lifted by the tx_thread_wait_abort call this variable is incremented. */
431
432 THREAD_DECLARE ULONG _tx_thread_performance_wait_abort_count;
433
434
435 /* Define the total number of idle system thread returns. Each time a thread returns to
436 an idle system (no other thread is ready to run) this variable is incremented. */
437
438 THREAD_DECLARE ULONG _tx_thread_performance_idle_return_count;
439
440
441 /* Define the total number of non-idle system thread returns. Each time a thread returns to
442 a non-idle system (another thread is ready to run) this variable is incremented. */
443
444 THREAD_DECLARE ULONG _tx_thread_performance_non_idle_return_count;
445
446 #endif
447
448
449 /* Define macros and helper functions. */
450
451 /* Define the MOD32 bit set macro that is used to set/clear a priority bit within a specific
452 priority group. */
453
454 #if TX_MAX_PRIORITIES > 32
455 #define MAP_INDEX (map_index)
456 #ifndef TX_MOD32_BIT_SET
457 #define TX_MOD32_BIT_SET(a,b) (b) = (((ULONG) 1) << ((a)%((UINT) 32)));
458 #endif
459 #else
460 #define MAP_INDEX (0)
461 #ifndef TX_MOD32_BIT_SET
462 #define TX_MOD32_BIT_SET(a,b) (b) = (((ULONG) 1) << ((a)));
463 #endif
464 #endif
465
466
467 /* Define the DIV32 bit set macro that is used to set/clear a priority group bit and is
468 only necessary when using priorities greater than 32. */
469
470 #if TX_MAX_PRIORITIES > 32
471 #ifndef TX_DIV32_BIT_SET
472 #define TX_DIV32_BIT_SET(a,b) (b) = (((ULONG) 1) << ((a)/((UINT) 32)));
473 #endif
474 #endif
475
476
477 /* Define state change macro that can be used by run-mode debug agents to keep track of thread
478 state changes. By default, it is mapped to white space. */
479
480 #ifndef TX_THREAD_STATE_CHANGE
481 #define TX_THREAD_STATE_CHANGE(a, b)
482 #endif
483
484
485 /* Define the macro to set the current thread pointer. This is particularly useful in SMP
486 versions of ThreadX to add additional processing. The default implementation is to simply
487 access the global current thread pointer directly. */
488
489 #ifndef TX_THREAD_SET_CURRENT
490 #define TX_THREAD_SET_CURRENT(a) TX_MEMSET(&_tx_thread_current_ptr[0], (a), sizeof(_tx_thread_current_ptr));
491 #endif
492
493
494 /* Define the get system state macro. By default, it is mapped to white space. */
495
496 #ifndef TX_THREAD_GET_SYSTEM_STATE
497 #define TX_THREAD_GET_SYSTEM_STATE() _tx_thread_smp_current_state_get()
498 #endif
499
500
501 /* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
502 indicates that _tx_thread_system_return should not be called. */
503
504 #ifndef TX_THREAD_SYSTEM_RETURN_CHECK
505 #define TX_THREAD_SYSTEM_RETURN_CHECK(c) (c) = (ULONG) _tx_thread_preempt_disable; (c) = (c) | TX_THREAD_GET_SYSTEM_STATE();
506 #endif
507
508
509 /* Define the timeout setup macro used in _tx_thread_create. */
510
511 #ifndef TX_THREAD_CREATE_TIMEOUT_SETUP
512 #define TX_THREAD_CREATE_TIMEOUT_SETUP(t) (t) -> tx_thread_timer.tx_timer_internal_timeout_function = &(_tx_thread_timeout); \
513 (t) -> tx_thread_timer.tx_timer_internal_timeout_param = TX_POINTER_TO_ULONG_CONVERT((t));
514 #endif
515
516
517 /* Define the thread timeout pointer setup macro used in _tx_thread_timeout. */
518
519 #ifndef TX_THREAD_TIMEOUT_POINTER_SETUP
520 #define TX_THREAD_TIMEOUT_POINTER_SETUP(t) (t) = TX_ULONG_TO_THREAD_POINTER_CONVERT(timeout_input);
521 #endif
522
523
524 #ifdef TX_THREAD_SMP_SOURCE_CODE
525
526
527 /* Determine if the in-line capability has been disabled. */
528
529 #ifndef TX_DISABLE_INLINE
530
531
532 /* Define the inline option, which is compiler specific. If not defined, it will be resolved as
533 "inline". */
534
535 #ifndef INLINE_DECLARE
536 #define INLINE_DECLARE inline
537 #endif
538
539
540 /* Define the lowest bit set macro. Note, that this may be overridden
541 by a port specific definition if there is supporting assembly language
542 instructions in the architecture. */
543
544 #ifndef TX_LOWEST_SET_BIT_CALCULATE
545
_tx_thread_lowest_set_bit_calculate(ULONG map)546 static INLINE_DECLARE UINT _tx_thread_lowest_set_bit_calculate(ULONG map)
547 {
548 UINT bit_set;
549
550 if ((map & ((ULONG) 0x1)) != ((ULONG) 0))
551 {
552 bit_set = ((UINT) 0);
553 }
554 else
555 {
556 map = map & (ULONG) ((~map) + ((ULONG) 1));
557 if (map < ((ULONG) 0x100))
558 {
559 bit_set = ((UINT) 1);
560 }
561 else if (map < ((ULONG) 0x10000))
562 {
563 bit_set = ((UINT) 9);
564 map = map >> ((UINT) 8);
565 }
566 else if (map < ((ULONG) 0x01000000))
567 {
568 bit_set = ((UINT) 17);
569 map = map >> ((UINT) 16);
570 }
571 else
572 {
573 bit_set = ((UINT) 25);
574 map = map >> ((UINT) 24);
575 }
576 if (map >= ((ULONG) 0x10))
577 {
578 map = map >> ((UINT) 4);
579 bit_set = bit_set + ((UINT) 4);
580 }
581 if (map >= ((ULONG) 0x4))
582 {
583 map = map >> ((UINT) 2);
584 bit_set = bit_set + ((UINT) 2);
585 }
586 bit_set = bit_set - (UINT) (map & (ULONG) 0x1);
587 }
588
589 return(bit_set);
590 }
591
592
593 #define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = _tx_thread_lowest_set_bit_calculate((m));
594
595 #endif
596
597
598 /* Define the next priority macro. Note, that this may be overridden
599 by a port specific definition. */
600
601 #ifndef TX_NEXT_PRIORITY_FIND
602 #if TX_MAX_PRIORITIES > 32
_tx_thread_smp_next_priority_find(UINT priority)603 static INLINE_DECLARE UINT _tx_thread_smp_next_priority_find(UINT priority)
604 {
605 ULONG map_index;
606 ULONG local_priority_map_active;
607 ULONG local_priority_map;
608 ULONG priority_bit;
609 ULONG first_bit_set;
610 ULONG found_priority;
611
612 found_priority = ((UINT) TX_MAX_PRIORITIES);
613 if (priority < ((UINT) TX_MAX_PRIORITIES))
614 {
615 map_index = priority/((UINT) 32);
616 local_priority_map = _tx_thread_priority_maps[map_index];
617 priority_bit = (((ULONG) 1) << (priority % ((UINT) 32)));
618 local_priority_map = local_priority_map & ~(priority_bit - ((UINT)1));
619 if (local_priority_map != ((ULONG) 0))
620 {
621 TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
622 found_priority = (map_index * ((UINT) 32)) + first_bit_set;
623 }
624 else
625 {
626 /* Move to next map index. */
627 map_index++;
628 if (map_index < (((UINT) TX_MAX_PRIORITIES)/((UINT) 32)))
629 {
630 priority_bit = (((ULONG) 1) << (map_index));
631 local_priority_map_active = _tx_thread_priority_map_active & ~(priority_bit - ((UINT) 1));
632 if (local_priority_map_active != ((ULONG) 0))
633 {
634 TX_LOWEST_SET_BIT_CALCULATE(local_priority_map_active, map_index)
635 local_priority_map = _tx_thread_priority_maps[map_index];
636 TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
637 found_priority = (map_index * ((UINT) 32)) + first_bit_set;
638 }
639 }
640 }
641 }
642 return(found_priority);
643 }
644 #else
645
_tx_thread_smp_next_priority_find(UINT priority)646 static INLINE_DECLARE UINT _tx_thread_smp_next_priority_find(UINT priority)
647 {
648 UINT first_bit_set;
649 ULONG local_priority_map;
650 UINT next_priority;
651
652 local_priority_map = _tx_thread_priority_maps[0];
653 local_priority_map = local_priority_map >> priority;
654 next_priority = priority;
655 if (local_priority_map == ((ULONG) 0))
656 {
657 next_priority = ((UINT) TX_MAX_PRIORITIES);
658 }
659 else
660 {
661 if (next_priority >= ((UINT) TX_MAX_PRIORITIES))
662 {
663 next_priority = ((UINT) TX_MAX_PRIORITIES);
664 }
665 else
666 {
667 TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
668 next_priority = priority + first_bit_set;
669 }
670 }
671
672 return(next_priority);
673 }
674 #endif
675 #endif
676
_tx_thread_smp_schedule_list_clear(void)677 static INLINE_DECLARE void _tx_thread_smp_schedule_list_clear(void)
678 {
679 #if TX_THREAD_SMP_MAX_CORES > 6
680 UINT i;
681 #endif
682
683
684 /* Clear the schedule list. */
685 _tx_thread_smp_schedule_list[0] = TX_NULL;
686 #if TX_THREAD_SMP_MAX_CORES > 1
687 _tx_thread_smp_schedule_list[1] = TX_NULL;
688 #if TX_THREAD_SMP_MAX_CORES > 2
689 _tx_thread_smp_schedule_list[2] = TX_NULL;
690 #if TX_THREAD_SMP_MAX_CORES > 3
691 _tx_thread_smp_schedule_list[3] = TX_NULL;
692 #if TX_THREAD_SMP_MAX_CORES > 4
693 _tx_thread_smp_schedule_list[4] = TX_NULL;
694 #if TX_THREAD_SMP_MAX_CORES > 5
695 _tx_thread_smp_schedule_list[5] = TX_NULL;
696 #if TX_THREAD_SMP_MAX_CORES > 6
697
698 /* Loop to clear the remainder of the schedule list. */
699 i = ((UINT) 6);
700
701 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
702
703 while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
704 #else
705
706 while (i < _tx_thread_smp_max_cores)
707 #endif
708 {
709 /* Clear entry in schedule list. */
710 _tx_thread_smp_schedule_list[i] = TX_NULL;
711
712 /* Move to next index. */
713 i++;
714 }
715 #endif
716 #endif
717 #endif
718 #endif
719 #endif
720 #endif
721 }
722
_tx_thread_smp_execute_list_clear(void)723 static INLINE_DECLARE VOID _tx_thread_smp_execute_list_clear(void)
724 {
725 #if TX_THREAD_SMP_MAX_CORES > 6
726 UINT j;
727 #endif
728
729 /* Clear the execute list. */
730 _tx_thread_execute_ptr[0] = TX_NULL;
731 #if TX_THREAD_SMP_MAX_CORES > 1
732 _tx_thread_execute_ptr[1] = TX_NULL;
733 #if TX_THREAD_SMP_MAX_CORES > 2
734 _tx_thread_execute_ptr[2] = TX_NULL;
735 #if TX_THREAD_SMP_MAX_CORES > 3
736 _tx_thread_execute_ptr[3] = TX_NULL;
737 #if TX_THREAD_SMP_MAX_CORES > 4
738 _tx_thread_execute_ptr[4] = TX_NULL;
739 #if TX_THREAD_SMP_MAX_CORES > 5
740 _tx_thread_execute_ptr[5] = TX_NULL;
741 #if TX_THREAD_SMP_MAX_CORES > 6
742
743 /* Loop to clear the remainder of the execute list. */
744 j = ((UINT) 6);
745
746 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
747
748 while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
749 #else
750
751 while (j < _tx_thread_smp_max_cores)
752 #endif
753 {
754
755 /* Clear entry in execute list. */
756 _tx_thread_execute_ptr[j] = TX_NULL;
757
758 /* Move to next index. */
759 j++;
760 }
761 #endif
762 #endif
763 #endif
764 #endif
765 #endif
766 #endif
767 }
768
769
_tx_thread_smp_schedule_list_setup(void)770 static INLINE_DECLARE VOID _tx_thread_smp_schedule_list_setup(void)
771 {
772 #if TX_THREAD_SMP_MAX_CORES > 6
773 UINT j;
774 #endif
775
776 _tx_thread_smp_schedule_list[0] = _tx_thread_execute_ptr[0];
777 #if TX_THREAD_SMP_MAX_CORES > 1
778 _tx_thread_smp_schedule_list[1] = _tx_thread_execute_ptr[1];
779 #if TX_THREAD_SMP_MAX_CORES > 2
780 _tx_thread_smp_schedule_list[2] = _tx_thread_execute_ptr[2];
781 #if TX_THREAD_SMP_MAX_CORES > 3
782 _tx_thread_smp_schedule_list[3] = _tx_thread_execute_ptr[3];
783 #if TX_THREAD_SMP_MAX_CORES > 4
784 _tx_thread_smp_schedule_list[4] = _tx_thread_execute_ptr[4];
785 #if TX_THREAD_SMP_MAX_CORES > 5
786 _tx_thread_smp_schedule_list[5] = _tx_thread_execute_ptr[5];
787 #if TX_THREAD_SMP_MAX_CORES > 6
788
789 /* Loop to setup the remainder of the schedule list. */
790 j = ((UINT) 6);
791
792 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
793 while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
794 #else
795
796 while (j < _tx_thread_smp_max_cores)
797 #endif
798 {
799
800 /* Setup entry in schedule list. */
801 _tx_thread_smp_schedule_list[j] = _tx_thread_execute_ptr[j];
802
803 /* Move to next index. */
804 j++;
805 }
806 #endif
807 #endif
808 #endif
809 #endif
810 #endif
811 #endif
812 }
813
814
815 #ifdef TX_THREAD_SMP_INTER_CORE_INTERRUPT
_tx_thread_smp_core_interrupt(TX_THREAD * thread_ptr,UINT current_core,UINT target_core)816 static INLINE_DECLARE VOID _tx_thread_smp_core_interrupt(TX_THREAD *thread_ptr, UINT current_core, UINT target_core)
817 {
818
819 TX_THREAD *current_thread;
820
821
822 /* Make sure this is a different core, since there is no need to interrupt the current core for
823 a scheduling change. */
824 if (current_core != target_core)
825 {
826
827 /* Yes, a different core is present. */
828
829 /* Pickup the currently executing thread. */
830 current_thread = _tx_thread_current_ptr[target_core];
831
832 /* Determine if they are the same. */
833 if ((current_thread != TX_NULL) && (thread_ptr != current_thread))
834 {
835
836 /* Not the same and not NULL... determine if the core is running at thread level. */
837 if (_tx_thread_system_state[target_core] < TX_INITIALIZE_IN_PROGRESS)
838 {
839
840 /* Preempt the mapped thread. */
841 _tx_thread_smp_core_preempt(target_core);
842 }
843 }
844 }
845 }
846 #else
847
848 /* Define to whitespace. */
849 #define _tx_thread_smp_core_interrupt(a,b,c)
850
851 #endif
852
853
854 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
_tx_thread_smp_core_wakeup(UINT current_core,UINT target_core)855 static INLINE_DECLARE VOID _tx_thread_smp_core_wakeup(UINT current_core, UINT target_core)
856 {
857
858 /* Determine if the core specified is not the current core - no need to wakeup the
859 current core. */
860 if (target_core != current_core)
861 {
862
863 /* Wakeup based on application's macro. */
864 TX_THREAD_SMP_WAKEUP(target_core);
865 }
866 }
867 #else
868
869 /* Define to whitespace. */
870 #define _tx_thread_smp_core_wakeup(a,b)
871
872 #endif
873
874
_tx_thread_smp_execute_list_setup(UINT core_index)875 static INLINE_DECLARE VOID _tx_thread_smp_execute_list_setup(UINT core_index)
876 {
877
878 TX_THREAD *schedule_thread;
879 UINT i;
880
881
882 /* Loop to copy the schedule list into the execution list. */
883 i = ((UINT) 0);
884 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
885
886 while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
887 #else
888
889 while (i < _tx_thread_smp_max_cores)
890 #endif
891 {
892
893 /* Pickup the thread to schedule. */
894 schedule_thread = _tx_thread_smp_schedule_list[i];
895
896 /* Copy the schedule list into the execution list. */
897 _tx_thread_execute_ptr[i] = schedule_thread;
898
899 /* If necessary, interrupt the core with the new thread to schedule. */
900 _tx_thread_smp_core_interrupt(schedule_thread, core_index, i);
901
902 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
903
904 /* Does this need to be waked up? */
905 if ((i != core_index) && (schedule_thread != TX_NULL))
906 {
907
908 /* Wakeup based on application's macro. */
909 TX_THREAD_SMP_WAKEUP(i);
910 }
911 #endif
912 /* Move to next index. */
913 i++;
914 }
915 }
916
917
_tx_thread_smp_available_cores_get(void)918 static INLINE_DECLARE ULONG _tx_thread_smp_available_cores_get(void)
919 {
920
921 #if TX_THREAD_SMP_MAX_CORES > 6
922 UINT j;
923 #endif
924 ULONG available_cores;
925
926 available_cores = ((ULONG) 0);
927 if (_tx_thread_execute_ptr[0] == TX_NULL)
928 {
929 available_cores = ((ULONG) 1);
930 }
931 #if TX_THREAD_SMP_MAX_CORES > 1
932 if (_tx_thread_execute_ptr[1] == TX_NULL)
933 {
934 available_cores = available_cores | ((ULONG) 2);
935 }
936 #if TX_THREAD_SMP_MAX_CORES > 2
937 if (_tx_thread_execute_ptr[2] == TX_NULL)
938 {
939 available_cores = available_cores | ((ULONG) 4);
940 }
941 #if TX_THREAD_SMP_MAX_CORES > 3
942 if (_tx_thread_execute_ptr[3] == TX_NULL)
943 {
944 available_cores = available_cores | ((ULONG) 8);
945 }
946 #if TX_THREAD_SMP_MAX_CORES > 4
947 if (_tx_thread_execute_ptr[4] == TX_NULL)
948 {
949 available_cores = available_cores | ((ULONG) 0x10);
950 }
951 #if TX_THREAD_SMP_MAX_CORES > 5
952 if (_tx_thread_execute_ptr[5] == TX_NULL)
953 {
954 available_cores = available_cores | ((ULONG) 0x20);
955 }
956 #if TX_THREAD_SMP_MAX_CORES > 6
957
958 /* Loop to setup the remainder of the schedule list. */
959 j = ((UINT) 6);
960
961 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
962 while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
963 #else
964
965 while (j < _tx_thread_smp_max_cores)
966 #endif
967 {
968
969 /* Determine if this core is available. */
970 if (_tx_thread_execute_ptr[j] == TX_NULL)
971 {
972 available_cores = available_cores | (((ULONG) 1) << j);
973 }
974
975 /* Move to next core. */
976 j++;
977 }
978 #endif
979 #endif
980 #endif
981 #endif
982 #endif
983 #endif
984 return(available_cores);
985 }
986
987
_tx_thread_smp_possible_cores_get(void)988 static INLINE_DECLARE ULONG _tx_thread_smp_possible_cores_get(void)
989 {
990
991 #if TX_THREAD_SMP_MAX_CORES > 6
992 UINT j;
993 #endif
994 ULONG possible_cores;
995 TX_THREAD *thread_ptr;
996
997 possible_cores = ((ULONG) 0);
998 thread_ptr = _tx_thread_execute_ptr[0];
999 if (thread_ptr != TX_NULL)
1000 {
1001 possible_cores = thread_ptr -> tx_thread_smp_cores_allowed;
1002 }
1003 #if TX_THREAD_SMP_MAX_CORES > 1
1004 thread_ptr = _tx_thread_execute_ptr[1];
1005 if (thread_ptr != TX_NULL)
1006 {
1007 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1008 }
1009 #if TX_THREAD_SMP_MAX_CORES > 2
1010 thread_ptr = _tx_thread_execute_ptr[2];
1011 if (thread_ptr != TX_NULL)
1012 {
1013 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1014 }
1015 #if TX_THREAD_SMP_MAX_CORES > 3
1016 thread_ptr = _tx_thread_execute_ptr[3];
1017 if (thread_ptr != TX_NULL)
1018 {
1019 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1020 }
1021 #if TX_THREAD_SMP_MAX_CORES > 4
1022 thread_ptr = _tx_thread_execute_ptr[4];
1023 if (thread_ptr != TX_NULL)
1024 {
1025 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1026 }
1027 #if TX_THREAD_SMP_MAX_CORES > 5
1028 thread_ptr = _tx_thread_execute_ptr[5];
1029 if (thread_ptr != TX_NULL)
1030 {
1031 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1032 }
1033 #if TX_THREAD_SMP_MAX_CORES > 6
1034
1035 /* Loop to setup the remainder of the schedule list. */
1036 j = ((UINT) 6);
1037
1038 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
1039 while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
1040 #else
1041
1042 while (j < _tx_thread_smp_max_cores)
1043 #endif
1044 {
1045
1046 /* Determine if this core is available. */
1047 thread_ptr = _tx_thread_execute_ptr[j];
1048 if (thread_ptr != TX_NULL)
1049 {
1050 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1051 }
1052
1053 /* Move to next core. */
1054 j++;
1055 }
1056 #endif
1057 #endif
1058 #endif
1059 #endif
1060 #endif
1061 #endif
1062 return(possible_cores);
1063 }
1064
1065
_tx_thread_smp_lowest_priority_get(void)1066 static INLINE_DECLARE UINT _tx_thread_smp_lowest_priority_get(void)
1067 {
1068
1069 #if TX_THREAD_SMP_MAX_CORES > 6
1070 UINT j;
1071 #endif
1072 TX_THREAD *thread_ptr;
1073 UINT lowest_priority;
1074
1075 lowest_priority = ((UINT) 0);
1076 thread_ptr = _tx_thread_execute_ptr[0];
1077 if (thread_ptr != TX_NULL)
1078 {
1079 if (thread_ptr -> tx_thread_priority > lowest_priority)
1080 {
1081 lowest_priority = thread_ptr -> tx_thread_priority;
1082 }
1083 }
1084 #if TX_THREAD_SMP_MAX_CORES > 1
1085 thread_ptr = _tx_thread_execute_ptr[1];
1086 if (thread_ptr != TX_NULL)
1087 {
1088 if (thread_ptr -> tx_thread_priority > lowest_priority)
1089 {
1090 lowest_priority = thread_ptr -> tx_thread_priority;
1091 }
1092 }
1093 #if TX_THREAD_SMP_MAX_CORES > 2
1094 thread_ptr = _tx_thread_execute_ptr[2];
1095 if (thread_ptr != TX_NULL)
1096 {
1097 if (thread_ptr -> tx_thread_priority > lowest_priority)
1098 {
1099 lowest_priority = thread_ptr -> tx_thread_priority;
1100 }
1101 }
1102 #if TX_THREAD_SMP_MAX_CORES > 3
1103 thread_ptr = _tx_thread_execute_ptr[3];
1104 if (thread_ptr != TX_NULL)
1105 {
1106 if (thread_ptr -> tx_thread_priority > lowest_priority)
1107 {
1108 lowest_priority = thread_ptr -> tx_thread_priority;
1109 }
1110 }
1111 #if TX_THREAD_SMP_MAX_CORES > 4
1112 thread_ptr = _tx_thread_execute_ptr[4];
1113 if (thread_ptr != TX_NULL)
1114 {
1115 if (thread_ptr -> tx_thread_priority > lowest_priority)
1116 {
1117 lowest_priority = thread_ptr -> tx_thread_priority;
1118 }
1119 }
1120 #if TX_THREAD_SMP_MAX_CORES > 5
1121 thread_ptr = _tx_thread_execute_ptr[5];
1122 if (thread_ptr != TX_NULL)
1123 {
1124 if (thread_ptr -> tx_thread_priority > lowest_priority)
1125 {
1126 lowest_priority = thread_ptr -> tx_thread_priority;
1127 }
1128 }
1129 #if TX_THREAD_SMP_MAX_CORES > 6
1130
1131 /* Loop to setup the remainder of the schedule list. */
1132 j = ((UINT) 6);
1133
1134 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
1135 while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
1136 #else
1137
1138 while (j < _tx_thread_smp_max_cores)
1139 #endif
1140 {
1141
1142 /* Determine if this core has a thread scheduled. */
1143 thread_ptr = _tx_thread_execute_ptr[j];
1144 if (thread_ptr != TX_NULL)
1145 {
1146
1147 /* Is this the new lowest priority? */
1148 if (thread_ptr -> tx_thread_priority > lowest_priority)
1149 {
1150 lowest_priority = thread_ptr -> tx_thread_priority;
1151 }
1152 }
1153
1154 /* Move to next core. */
1155 j++;
1156 }
1157 #endif
1158 #endif
1159 #endif
1160 #endif
1161 #endif
1162 #endif
1163 return(lowest_priority);
1164 }
1165
1166
_tx_thread_smp_remap_solution_find(TX_THREAD * schedule_thread,ULONG available_cores,ULONG thread_possible_cores,ULONG test_possible_cores)1167 static INLINE_DECLARE UINT _tx_thread_smp_remap_solution_find(TX_THREAD *schedule_thread, ULONG available_cores, ULONG thread_possible_cores, ULONG test_possible_cores)
1168 {
1169
1170 UINT core;
1171 UINT previous_core;
1172 ULONG test_cores;
1173 ULONG last_thread_cores;
1174 UINT queue_first, queue_last;
1175 UINT core_queue[TX_THREAD_SMP_MAX_CORES-1];
1176 TX_THREAD *thread_ptr;
1177 TX_THREAD *last_thread;
1178 TX_THREAD *thread_remap_list[TX_THREAD_SMP_MAX_CORES];
1179
1180
1181 /* Clear the last thread cores in the search. */
1182 last_thread_cores = ((ULONG) 0);
1183
1184 /* Set the last thread pointer to NULL. */
1185 last_thread = TX_NULL;
1186
1187 /* Setup the core queue indices. */
1188 queue_first = ((UINT) 0);
1189 queue_last = ((UINT) 0);
1190
1191 /* Build a list of possible cores for this thread to execute on, starting
1192 with the previously mapped core. */
1193 core = schedule_thread -> tx_thread_smp_core_mapped;
1194 if ((thread_possible_cores & (((ULONG) 1) << core)) != ((ULONG) 0))
1195 {
1196
1197 /* Remember this potential mapping. */
1198 thread_remap_list[core] = schedule_thread;
1199 core_queue[queue_last] = core;
1200
1201 /* Move to next slot. */
1202 queue_last++;
1203
1204 /* Clear this core. */
1205 thread_possible_cores = thread_possible_cores & ~(((ULONG) 1) << core);
1206 }
1207
1208 /* Loop to add additional possible cores. */
1209 while (thread_possible_cores != ((ULONG) 0))
1210 {
1211
1212 /* Determine the first possible core. */
1213 test_cores = thread_possible_cores;
1214 TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
1215
1216 /* Clear this core. */
1217 thread_possible_cores = thread_possible_cores & ~(((ULONG) 1) << core);
1218
1219 /* Remember this potential mapping. */
1220 thread_remap_list[core] = schedule_thread;
1221 core_queue[queue_last] = core;
1222
1223 /* Move to next slot. */
1224 queue_last++;
1225 }
1226
1227 /* Loop to evaluate the potential thread mappings, against what is already mapped. */
1228 do
1229 {
1230
1231 /* Pickup the next entry. */
1232 core = core_queue[queue_first];
1233
1234 /* Move to next slot. */
1235 queue_first++;
1236
1237 /* Retrieve the thread from the current mapping. */
1238 thread_ptr = _tx_thread_smp_schedule_list[core];
1239
1240 /* Determine if there is a thread currently mapped to this core. */
1241 if (thread_ptr != TX_NULL)
1242 {
1243
1244 /* Determine the cores available for this thread. */
1245 thread_possible_cores = thread_ptr -> tx_thread_smp_cores_allowed;
1246 thread_possible_cores = test_possible_cores & thread_possible_cores;
1247
1248 /* Are there any possible cores for this thread? */
1249 if (thread_possible_cores != ((ULONG) 0))
1250 {
1251
1252 /* Determine if there are cores available for this thread. */
1253 if ((thread_possible_cores & available_cores) != ((ULONG) 0))
1254 {
1255
1256 /* Yes, remember the final thread and cores that are valid for this thread. */
1257 last_thread_cores = thread_possible_cores & available_cores;
1258 last_thread = thread_ptr;
1259
1260 /* We are done - get out of the loop! */
1261 break;
1262 }
1263 else
1264 {
1265
1266 /* Remove cores that will be added to the list. */
1267 test_possible_cores = test_possible_cores & ~(thread_possible_cores);
1268
1269 /* Loop to add this thread to the potential mapping list. */
1270 do
1271 {
1272
1273 /* Calculate the core. */
1274 test_cores = thread_possible_cores;
1275 TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
1276
1277 /* Clear this core. */
1278 thread_possible_cores = thread_possible_cores & ~(((ULONG) 1) << core);
1279
1280 /* Remember this thread for remapping. */
1281 thread_remap_list[core] = thread_ptr;
1282
1283 /* Remember this core. */
1284 core_queue[queue_last] = core;
1285
1286 /* Move to next slot. */
1287 queue_last++;
1288
1289 } while (thread_possible_cores != ((ULONG) 0));
1290 }
1291 }
1292 }
1293 } while (queue_first != queue_last);
1294
1295 /* Was a remapping solution found? */
1296 if (last_thread != TX_NULL)
1297 {
1298
1299 /* Pickup the core of the last thread to remap. */
1300 core = last_thread -> tx_thread_smp_core_mapped;
1301
1302 /* Pickup the thread from the remapping list. */
1303 thread_ptr = thread_remap_list[core];
1304
1305 /* Loop until we arrive at the thread we have been trying to map. */
1306 while (thread_ptr != schedule_thread)
1307 {
1308
1309 /* Move this thread in the schedule list. */
1310 _tx_thread_smp_schedule_list[core] = thread_ptr;
1311
1312 /* Remember the previous core. */
1313 previous_core = core;
1314
1315 /* Pickup the core of thread to remap. */
1316 core = thread_ptr -> tx_thread_smp_core_mapped;
1317
1318 /* Save the new core mapping for this thread. */
1319 thread_ptr -> tx_thread_smp_core_mapped = previous_core;
1320
1321 /* Move the next thread. */
1322 thread_ptr = thread_remap_list[core];
1323 }
1324
1325 /* Save the remaining thread in the updated schedule list. */
1326 _tx_thread_smp_schedule_list[core] = thread_ptr;
1327
1328 /* Update this thread's core mapping. */
1329 thread_ptr -> tx_thread_smp_core_mapped = core;
1330
1331 /* Finally, setup the last thread in the remapping solution. */
1332 test_cores = last_thread_cores;
1333 TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
1334
1335 /* Setup the last thread. */
1336 _tx_thread_smp_schedule_list[core] = last_thread;
1337
1338 /* Remember the core mapping for this thread. */
1339 last_thread -> tx_thread_smp_core_mapped = core;
1340 }
1341 else
1342 {
1343
1344 /* Set core to the maximum value in order to signal a remapping solution was not found. */
1345 core = ((UINT) TX_THREAD_SMP_MAX_CORES);
1346 }
1347
1348 /* Return core to the caller. */
1349 return(core);
1350 }
1351
1352
_tx_thread_smp_preemptable_threads_get(UINT priority,TX_THREAD * possible_preemption_list[TX_THREAD_SMP_MAX_CORES])1353 static INLINE_DECLARE ULONG _tx_thread_smp_preemptable_threads_get(UINT priority, TX_THREAD *possible_preemption_list[TX_THREAD_SMP_MAX_CORES])
1354 {
1355
1356 UINT i, j, k;
1357 TX_THREAD *thread_ptr;
1358 TX_THREAD *next_thread;
1359 TX_THREAD *search_thread;
1360 TX_THREAD *list_head;
1361 ULONG possible_cores = ((ULONG) 0);
1362
1363
1364 /* Clear the possible preemption list. */
1365 possible_preemption_list[0] = TX_NULL;
1366 #if TX_THREAD_SMP_MAX_CORES > 1
1367 possible_preemption_list[1] = TX_NULL;
1368 #if TX_THREAD_SMP_MAX_CORES > 2
1369 possible_preemption_list[2] = TX_NULL;
1370 #if TX_THREAD_SMP_MAX_CORES > 3
1371 possible_preemption_list[3] = TX_NULL;
1372 #if TX_THREAD_SMP_MAX_CORES > 4
1373 possible_preemption_list[4] = TX_NULL;
1374 #if TX_THREAD_SMP_MAX_CORES > 5
1375 possible_preemption_list[5] = TX_NULL;
1376 #if TX_THREAD_SMP_MAX_CORES > 6
1377
1378 /* Loop to clear the remainder of the possible preemption list. */
1379 j = ((UINT) 6);
1380
1381 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
1382
1383 while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
1384 #else
1385
1386 while (j < _tx_thread_smp_max_cores)
1387 #endif
1388 {
1389
1390 /* Clear entry in possible preemption list. */
1391 possible_preemption_list[j] = TX_NULL;
1392
1393 /* Move to next core. */
1394 j++;
1395 }
1396 #endif
1397 #endif
1398 #endif
1399 #endif
1400 #endif
1401 #endif
1402
1403 /* Loop to build a list of threads of less priority. */
1404 i = ((UINT) 0);
1405 j = ((UINT) 0);
1406 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
1407 while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
1408 #else
1409
1410 while (i < _tx_thread_smp_max_cores)
1411 #endif
1412 {
1413
1414 /* Pickup the currently mapped thread. */
1415 thread_ptr = _tx_thread_execute_ptr[i];
1416
1417 /* Is there a thread scheduled for this core? */
1418 if (thread_ptr != TX_NULL)
1419 {
1420
1421 /* Update the possible cores bit map. */
1422 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1423
1424 /* Can this thread be preempted? */
1425 if (priority < thread_ptr -> tx_thread_priority)
1426 {
1427
1428 /* Thread that can be added to the preemption possible list. */
1429
1430 /* Yes, this scheduled thread is lower priority, so add it to the preemption possible list. */
1431 possible_preemption_list[j] = thread_ptr;
1432
1433 /* Move to next entry in preemption possible list. */
1434 j++;
1435 }
1436 }
1437
1438 /* Move to next core. */
1439 i++;
1440 }
1441
1442 /* Check to see if there are more than 2 threads that can be preempted. */
1443 if (j > ((UINT) 1))
1444 {
1445
1446 /* Yes, loop through the preemption possible list and sort by priority. */
1447 i = ((UINT) 0);
1448 do
1449 {
1450
1451 /* Pickup preemptable thread. */
1452 thread_ptr = possible_preemption_list[i];
1453
1454 /* Initialize the search index. */
1455 k = i + ((UINT) 1);
1456
1457 /* Loop to get the lowest priority thread at the front of the list. */
1458 while (k < j)
1459 {
1460
1461 /* Pickup the next thread to evaluate. */
1462 next_thread = possible_preemption_list[k];
1463
1464 /* Is this thread lower priority? */
1465 if (next_thread -> tx_thread_priority > thread_ptr -> tx_thread_priority)
1466 {
1467
1468 /* Yes, swap the threads. */
1469 possible_preemption_list[i] = next_thread;
1470 possible_preemption_list[k] = thread_ptr;
1471 thread_ptr = next_thread;
1472 }
1473 else
1474 {
1475
1476 /* Compare the thread priorities. */
1477 if (next_thread -> tx_thread_priority == thread_ptr -> tx_thread_priority)
1478 {
1479
1480 /* Equal priority threads... see which is in the ready list first. */
1481 search_thread = thread_ptr -> tx_thread_ready_next;
1482
1483 /* Pickup the list head. */
1484 list_head = _tx_thread_priority_list[thread_ptr -> tx_thread_priority];
1485
1486 /* Now loop to see if the next thread is after the current thread preemption. */
1487 while (search_thread != list_head)
1488 {
1489
1490 /* Have we found the next thread? */
1491 if (search_thread == next_thread)
1492 {
1493
1494 /* Yes, swap the threads. */
1495 possible_preemption_list[i] = next_thread;
1496 possible_preemption_list[k] = thread_ptr;
1497 thread_ptr = next_thread;
1498 break;
1499 }
1500
1501 /* Move to the next thread. */
1502 search_thread = search_thread -> tx_thread_ready_next;
1503 }
1504 }
1505
1506 /* Move to examine the next possible preemptable thread. */
1507 k++;
1508 }
1509 }
1510
1511 /* We have found the lowest priority thread to preempt, now find the next lowest. */
1512 i++;
1513 }
1514 while (i < (j-((UINT) 1)));
1515 }
1516
1517 /* Return the possible cores. */
1518 return(possible_cores);
1519 }
1520
_tx_thread_smp_simple_priority_change(TX_THREAD * thread_ptr,UINT new_priority)1521 static INLINE_DECLARE VOID _tx_thread_smp_simple_priority_change(TX_THREAD *thread_ptr, UINT new_priority)
1522 {
1523
1524 UINT priority;
1525 ULONG priority_bit;
1526 TX_THREAD *head_ptr;
1527 TX_THREAD *tail_ptr;
1528 #if TX_MAX_PRIORITIES > 32
1529 UINT map_index;
1530 #endif
1531
1532 /* Pickup the priority. */
1533 priority = thread_ptr -> tx_thread_priority;
1534
1535 /* Determine if there are other threads at this priority that are
1536 ready. */
1537 if (thread_ptr -> tx_thread_ready_next != thread_ptr)
1538 {
1539
1540 /* Yes, there are other threads at this priority ready. */
1541
1542 /* Just remove this thread from the priority list. */
1543 (thread_ptr -> tx_thread_ready_next) -> tx_thread_ready_previous = thread_ptr -> tx_thread_ready_previous;
1544 (thread_ptr -> tx_thread_ready_previous) -> tx_thread_ready_next = thread_ptr -> tx_thread_ready_next;
1545
1546 /* Determine if this is the head of the priority list. */
1547 if (_tx_thread_priority_list[priority] == thread_ptr)
1548 {
1549
1550 /* Update the head pointer of this priority list. */
1551 _tx_thread_priority_list[priority] = thread_ptr -> tx_thread_ready_next;
1552 }
1553 }
1554 else
1555 {
1556
1557 /* This is the only thread at this priority ready to run. Set the head
1558 pointer to NULL. */
1559 _tx_thread_priority_list[priority] = TX_NULL;
1560
1561 #if TX_MAX_PRIORITIES > 32
1562
1563 /* Calculate the index into the bit map array. */
1564 map_index = priority/((UINT) 32);
1565 #endif
1566
1567 /* Clear this priority bit in the ready priority bit map. */
1568 TX_MOD32_BIT_SET(priority, priority_bit)
1569 _tx_thread_priority_maps[MAP_INDEX] = _tx_thread_priority_maps[MAP_INDEX] & (~(priority_bit));
1570
1571 #if TX_MAX_PRIORITIES > 32
1572
1573 /* Determine if there are any other bits set in this priority map. */
1574 if (_tx_thread_priority_maps[MAP_INDEX] == ((ULONG) 0))
1575 {
1576
1577 /* No, clear the active bit to signify this priority map has nothing set. */
1578 TX_DIV32_BIT_SET(priority, priority_bit)
1579 _tx_thread_priority_map_active = _tx_thread_priority_map_active & (~(priority_bit));
1580 }
1581 #endif
1582 }
1583
1584 /* Determine if the actual thread priority should be setup, which is the
1585 case if the new priority is higher than the priority inheritance. */
1586 if (new_priority < thread_ptr -> tx_thread_inherit_priority)
1587 {
1588
1589 /* Change thread priority to the new user's priority. */
1590 thread_ptr -> tx_thread_priority = new_priority;
1591 thread_ptr -> tx_thread_preempt_threshold = new_priority;
1592 }
1593 else
1594 {
1595
1596 /* Change thread priority to the priority inheritance. */
1597 thread_ptr -> tx_thread_priority = thread_ptr -> tx_thread_inherit_priority;
1598 thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_inherit_priority;
1599 }
1600
1601 /* Now, place the thread at the new priority level. */
1602
1603 /* Determine if there are other threads at this priority that are
1604 ready. */
1605 head_ptr = _tx_thread_priority_list[new_priority];
1606 if (head_ptr != TX_NULL)
1607 {
1608
1609 /* Yes, there are other threads at this priority already ready. */
1610
1611 /* Just add this thread to the priority list. */
1612 tail_ptr = head_ptr -> tx_thread_ready_previous;
1613 tail_ptr -> tx_thread_ready_next = thread_ptr;
1614 head_ptr -> tx_thread_ready_previous = thread_ptr;
1615 thread_ptr -> tx_thread_ready_previous = tail_ptr;
1616 thread_ptr -> tx_thread_ready_next = head_ptr;
1617 }
1618 else
1619 {
1620
1621 /* First thread at this priority ready. Add to the front of the list. */
1622 _tx_thread_priority_list[new_priority] = thread_ptr;
1623 thread_ptr -> tx_thread_ready_next = thread_ptr;
1624 thread_ptr -> tx_thread_ready_previous = thread_ptr;
1625
1626 #if TX_MAX_PRIORITIES > 32
1627
1628 /* Calculate the index into the bit map array. */
1629 map_index = new_priority/((UINT) 32);
1630
1631 /* Set the active bit to remember that the priority map has something set. */
1632 TX_DIV32_BIT_SET(new_priority, priority_bit)
1633 _tx_thread_priority_map_active = _tx_thread_priority_map_active | priority_bit;
1634 #endif
1635
1636 /* Or in the thread's priority bit. */
1637 TX_MOD32_BIT_SET(new_priority, priority_bit)
1638 _tx_thread_priority_maps[MAP_INDEX] = _tx_thread_priority_maps[MAP_INDEX] | priority_bit;
1639 }
1640 }
1641 #else
1642
1643 /* In-line was disabled. All of the above helper fuctions must be defined as actual functions. */
1644
1645 UINT _tx_thread_lowest_set_bit_calculate(ULONG map);
1646 #define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = _tx_thread_lowest_set_bit_calculate((m));
1647
1648 UINT _tx_thread_smp_next_priority_find(UINT priority);
1649 VOID _tx_thread_smp_schedule_list_clear(void);
1650 VOID _tx_thread_smp_execute_list_clear(void);
1651 VOID _tx_thread_smp_schedule_list_setup(void);
1652
1653 #ifdef TX_THREAD_SMP_INTER_CORE_INTERRUPT
1654 VOID _tx_thread_smp_core_interrupt(TX_THREAD *thread_ptr, UINT current_core, UINT target_core);
1655 #else
1656 /* Define to whitespace. */
1657 #define _tx_thread_smp_core_interrupt(a,b,c)
1658 #endif
1659
1660 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
1661 VOID _tx_thread_smp_core_wakeup(UINT current_core, UINT target_core);
1662 #else
1663 /* Define to whitespace. */
1664 #define _tx_thread_smp_core_wakeup(a,b)
1665 #endif
1666
1667 VOID _tx_thread_smp_execute_list_setup(UINT core_index);
1668 ULONG _tx_thread_smp_available_cores_get(void);
1669 ULONG _tx_thread_smp_possible_cores_get(void);
1670 UINT _tx_thread_smp_lowest_priority_get(void);
1671 UINT _tx_thread_smp_remap_solution_find(TX_THREAD *schedule_thread, ULONG available_cores, ULONG thread_possible_cores, ULONG test_possible_cores);
1672 ULONG _tx_thread_smp_preemptable_threads_get(UINT priority, TX_THREAD *possible_preemption_list[TX_THREAD_SMP_MAX_CORES]);
1673 VOID _tx_thread_smp_simple_priority_change(TX_THREAD *thread_ptr, UINT new_priority);
1674
1675 #endif
1676
1677
1678 #endif
1679
1680 #endif
1681
1682