1 /**************************************************************************/
2 /* */
3 /* Copyright (c) Microsoft Corporation. All rights reserved. */
4 /* */
5 /* This software is licensed under the Microsoft Software License */
6 /* Terms for Microsoft Azure RTOS. Full text of the license can be */
7 /* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
8 /* and in the root directory of this software. */
9 /* */
10 /**************************************************************************/
11
12
13 /**************************************************************************/
14 /**************************************************************************/
15 /** */
16 /** ThreadX Component */
17 /** */
18 /** Thread */
19 /** */
20 /**************************************************************************/
21 /**************************************************************************/
22
23
24 /**************************************************************************/
25 /* */
26 /* COMPONENT DEFINITION RELEASE */
27 /* */
28 /* tx_thread.h PORTABLE SMP */
29 /* 6.1 */
30 /* AUTHOR */
31 /* */
32 /* William E. Lamie, Microsoft Corporation */
33 /* */
34 /* DESCRIPTION */
35 /* */
36 /* This file defines the ThreadX thread control component, including */
37 /* data types and external references. It is assumed that tx_api.h */
38 /* and tx_port.h have already been included. */
39 /* */
40 /* RELEASE HISTORY */
41 /* */
42 /* DATE NAME DESCRIPTION */
43 /* */
44 /* 09-30-2020 William E. Lamie Initial Version 6.1 */
45 /* */
46 /**************************************************************************/
47
48 #ifndef TX_THREAD_H
49 #define TX_THREAD_H
50
51
52 /* Add include files needed for in-line macros. */
53
54 #include "tx_initialize.h"
55
56
57 /* Define thread control specific data definitions. */
58
59 #define TX_THREAD_ID ((ULONG) 0x54485244)
60 #define TX_THREAD_MAX_BYTE_VALUES 256
61 #define TX_THREAD_PRIORITY_GROUP_MASK ((ULONG) 0xFF)
62 #define TX_THREAD_PRIORITY_GROUP_SIZE 8
63 #define TX_THREAD_EXECUTE_LOG_SIZE ((UINT) 8)
64 #define TX_THREAD_SMP_PROTECT_WAIT_LIST_SIZE (TX_THREAD_SMP_MAX_CORES + 1)
65
66
67 /* Define the default thread stack checking. This can be overridden by
68 a particular port, which is necessary if the stack growth is from
69 low address to high address (the default logic is for stacks that
70 grow from high address to low address. */
71
72 #ifndef TX_THREAD_STACK_CHECK
73 #define TX_THREAD_STACK_CHECK(thread_ptr) \
74 { \
75 TX_INTERRUPT_SAVE_AREA \
76 TX_DISABLE \
77 if (((thread_ptr)) && ((thread_ptr) -> tx_thread_id == TX_THREAD_ID)) \
78 { \
79 if (((ULONG *) (thread_ptr) -> tx_thread_stack_ptr) < ((ULONG *) (thread_ptr) -> tx_thread_stack_highest_ptr)) \
80 { \
81 (thread_ptr) -> tx_thread_stack_highest_ptr = (thread_ptr) -> tx_thread_stack_ptr; \
82 } \
83 if ((*((ULONG *) (thread_ptr) -> tx_thread_stack_start) != TX_STACK_FILL) || \
84 (*((ULONG *) (((UCHAR *) (thread_ptr) -> tx_thread_stack_end) + 1)) != TX_STACK_FILL) || \
85 (((ULONG *) (thread_ptr) -> tx_thread_stack_highest_ptr) < ((ULONG *) (thread_ptr) -> tx_thread_stack_start))) \
86 { \
87 TX_RESTORE \
88 _tx_thread_stack_error_handler((thread_ptr)); \
89 TX_DISABLE \
90 } \
91 if (*(((ULONG *) (thread_ptr) -> tx_thread_stack_highest_ptr) - 1) != TX_STACK_FILL) \
92 { \
93 TX_RESTORE \
94 _tx_thread_stack_analyze((thread_ptr)); \
95 TX_DISABLE \
96 } \
97 } \
98 TX_RESTORE \
99 }
100 #endif
101
102
103 /* Define default post thread delete macro to whitespace, if it hasn't been defined previously (typically in tx_port.h). */
104
105 #ifndef TX_THREAD_DELETE_PORT_COMPLETION
106 #define TX_THREAD_DELETE_PORT_COMPLETION(t)
107 #endif
108
109
110 /* Define default post thread reset macro to whitespace, if it hasn't been defined previously (typically in tx_port.h). */
111
112 #ifndef TX_THREAD_RESET_PORT_COMPLETION
113 #define TX_THREAD_RESET_PORT_COMPLETION(t)
114 #endif
115
116
117 /* Define the thread create internal extension macro to whitespace, if it hasn't been defined previously (typically in tx_port.h). */
118
119 #ifndef TX_THREAD_CREATE_INTERNAL_EXTENSION
120 #define TX_THREAD_CREATE_INTERNAL_EXTENSION(t)
121 #endif
122
123
124 /* Define internal thread control function prototypes. */
125
126 VOID _tx_thread_initialize(VOID);
127 VOID _tx_thread_schedule(VOID);
128 VOID _tx_thread_shell_entry(VOID);
129 VOID _tx_thread_stack_analyze(TX_THREAD *thread_ptr);
130 VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID));
131 VOID _tx_thread_stack_error(TX_THREAD *thread_ptr);
132 VOID _tx_thread_stack_error_handler(TX_THREAD *thread_ptr);
133 VOID _tx_thread_system_preempt_check(VOID);
134 VOID _tx_thread_system_resume(TX_THREAD *thread_ptr);
135 VOID _tx_thread_system_ni_resume(TX_THREAD *thread_ptr);
136 VOID _tx_thread_system_return(VOID);
137 VOID _tx_thread_system_suspend(TX_THREAD *thread_ptr);
138 VOID _tx_thread_system_ni_suspend(TX_THREAD *thread_ptr, ULONG timeout);
139 VOID _tx_thread_time_slice(VOID);
140 VOID _tx_thread_timeout(ULONG timeout_input);
141
142
143 /* Define all internal SMP prototypes. */
144
145 void _tx_thread_smp_current_state_set(ULONG new_state);
146 UINT _tx_thread_smp_find_next_priority(UINT priority);
147 void _tx_thread_smp_high_level_initialize(void);
148 void _tx_thread_smp_rebalance_execute_list(UINT core_index);
149
150
151 /* Define all internal ThreadX SMP low-level assembly routines. */
152
153 VOID _tx_thread_smp_core_wait(void);
154 void _tx_thread_smp_initialize_wait(void);
155 void _tx_thread_smp_low_level_initialize(UINT number_of_cores);
156 void _tx_thread_smp_core_preempt(UINT core);
157
158
159 /* Thread control component external data declarations follow. */
160
161 #define THREAD_DECLARE extern
162
163
164 /* Define the pointer that contains the system stack pointer. This is
165 utilized when control returns from a thread to the system to reset the
166 current stack. This is setup in the low-level initialization function. */
167
168 THREAD_DECLARE VOID * _tx_thread_system_stack_ptr[TX_THREAD_SMP_MAX_CORES];
169
170
171 /* Define the current thread pointer. This variable points to the currently
172 executing thread. If this variable is NULL, no thread is executing. */
173
174 THREAD_DECLARE TX_THREAD * _tx_thread_current_ptr[TX_THREAD_SMP_MAX_CORES];
175
176
177 /* Define the variable that holds the next thread to execute. It is important
178 to remember that this is not necessarily equal to the current thread
179 pointer. */
180
181 THREAD_DECLARE TX_THREAD * _tx_thread_execute_ptr[TX_THREAD_SMP_MAX_CORES];
182
183
184 /* Define the ThreadX SMP scheduling and mapping data structures. */
185
186 THREAD_DECLARE TX_THREAD * _tx_thread_smp_schedule_list[TX_THREAD_SMP_MAX_CORES];
187 THREAD_DECLARE ULONG _tx_thread_smp_reschedule_pending;
188 THREAD_DECLARE TX_THREAD_SMP_PROTECT _tx_thread_smp_protection;
189 THREAD_DECLARE volatile ULONG _tx_thread_smp_release_cores_flag;
190 THREAD_DECLARE ULONG _tx_thread_smp_system_error;
191 THREAD_DECLARE ULONG _tx_thread_smp_inter_core_interrupts[TX_THREAD_SMP_MAX_CORES];
192
193 THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list_size;
194 THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list[TX_THREAD_SMP_PROTECT_WAIT_LIST_SIZE];
195 THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_counts[TX_THREAD_SMP_MAX_CORES];
196 THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list_lock_protect_in_force;
197 THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list_tail;
198 THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list_head;
199
200
201 /* Define logic for conditional dynamic maximum number of cores. */
202
203 #ifdef TX_THREAD_SMP_DYNAMIC_CORE_MAX
204
205 THREAD_DECLARE ULONG _tx_thread_smp_max_cores;
206 THREAD_DECLARE ULONG _tx_thread_smp_detected_cores;
207
208 #endif
209
210
211
212 /* Define the head pointer of the created thread list. */
213
214 THREAD_DECLARE TX_THREAD * _tx_thread_created_ptr;
215
216
217 /* Define the variable that holds the number of created threads. */
218
219 THREAD_DECLARE ULONG _tx_thread_created_count;
220
221
222 /* Define the current state variable. When this value is 0, a thread
223 is executing or the system is idle. Other values indicate that
224 interrupt or initialization processing is active. This variable is
225 initialized to TX_INITIALIZE_IN_PROGRESS to indicate initialization is
226 active. */
227
228 THREAD_DECLARE volatile ULONG _tx_thread_system_state[TX_THREAD_SMP_MAX_CORES];
229
230
231 /* Determine if we need to remap system state to a function call. */
232
233 #ifndef TX_THREAD_SMP_SOURCE_CODE
234
235
236 /* Yes, remap system state to a function call so we can get the system state for the current core. */
237
238 #define _tx_thread_system_state _tx_thread_smp_current_state_get()
239
240
241 /* Yes, remap get current thread to a function call so we can get the current thread for the current core. */
242
243 #define _tx_thread_current_ptr _tx_thread_smp_current_thread_get()
244
245 #endif
246
247
248 /* Define the 32-bit priority bit-maps. There is one priority bit map for each
249 32 priority levels supported. If only 32 priorities are supported there is
250 only one bit map. Each bit within a priority bit map represents that one
251 or more threads at the associated thread priority are ready. */
252
253 THREAD_DECLARE ULONG _tx_thread_priority_maps[TX_MAX_PRIORITIES/32];
254
255
256 /* Define the priority map active bit map that specifies which of the previously
257 defined priority maps have something set. This is only necessary if more than
258 32 priorities are supported. */
259
260 #if TX_MAX_PRIORITIES > 32
261 THREAD_DECLARE ULONG _tx_thread_priority_map_active;
262 #endif
263
264
265 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
266
267 /* Define the 32-bit preempt priority bit maps. There is one preempt bit map
268 for each 32 priority levels supported. If only 32 priorities are supported
269 there is only one bit map. Each set set bit corresponds to a preempted priority
270 level that had preemption-threshold active to protect against preemption of a
271 range of relatively higher priority threads. */
272
273 THREAD_DECLARE ULONG _tx_thread_preempted_maps[TX_MAX_PRIORITIES/32];
274
275
276 /* Define the preempt map active bit map that specifies which of the previously
277 defined preempt maps have something set. This is only necessary if more than
278 32 priorities are supported. */
279
280 #if TX_MAX_PRIORITIES > 32
281 THREAD_DECLARE ULONG _tx_thread_preempted_map_active;
282 #endif
283
284
285 /* Define the array that contains the thread at each priority level that was scheduled with
286 preemption-threshold enabled. This will be useful when returning from a nested
287 preemption-threshold condition. */
288
289 THREAD_DECLARE TX_THREAD *_tx_thread_preemption_threshold_list[TX_MAX_PRIORITIES];
290
291
292 #endif
293
294
295 /* Define the last thread scheduled with preemption-threshold. When preemption-threshold is
296 disabled, a thread with preemption-threshold set disables all other threads from running.
297 Effectively, its preemption-threshold is 0. */
298
299 THREAD_DECLARE TX_THREAD *_tx_thread_preemption__threshold_scheduled;
300
301
302 /* Define the array of thread pointers. Each entry represents the threads that
303 are ready at that priority group. For example, index 10 in this array
304 represents the first thread ready at priority 10. If this entry is NULL,
305 no threads are ready at that priority. */
306
307 THREAD_DECLARE TX_THREAD * _tx_thread_priority_list[TX_MAX_PRIORITIES];
308
309
310 /* Define the global preempt disable variable. If this is non-zero, preemption is
311 disabled. It is used internally by ThreadX to prevent preemption of a thread in
312 the middle of a service that is resuming or suspending another thread. */
313
314 THREAD_DECLARE volatile UINT _tx_thread_preempt_disable;
315
316
317 /* Define the global function pointer for mutex cleanup on thread completion or
318 termination. This pointer is setup during mutex initialization. */
319
320 THREAD_DECLARE VOID (*_tx_thread_mutex_release)(TX_THREAD *thread_ptr);
321
322
323 /* Define the global build options variable. This contains a bit map representing
324 how the ThreadX library was built. The following are the bit field definitions:
325
326 Bit(s) Meaning
327
328 31 Reserved
329 30 TX_NOT_INTERRUPTABLE defined
330 29-24 Priority groups 1 -> 32 priorities
331 2 -> 64 priorities
332 3 -> 96 priorities
333
334 ...
335
336 32 -> 1024 priorities
337 23 TX_TIMER_PROCESS_IN_ISR defined
338 22 TX_REACTIVATE_INLINE defined
339 21 TX_DISABLE_STACK_FILLING defined
340 20 TX_ENABLE_STACK_CHECKING defined
341 19 TX_DISABLE_PREEMPTION_THRESHOLD defined
342 18 TX_DISABLE_REDUNDANT_CLEARING defined
343 17 TX_DISABLE_NOTIFY_CALLBACKS defined
344 16 TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO defined
345 15 TX_BYTE_POOL_ENABLE_PERFORMANCE_INFO defined
346 14 TX_EVENT_FLAGS_ENABLE_PERFORMANCE_INFO defined
347 13 TX_MUTEX_ENABLE_PERFORMANCE_INFO defined
348 12 TX_QUEUE_ENABLE_PERFORMANCE_INFO defined
349 11 TX_SEMAPHORE_ENABLE_PERFORMANCE_INFO defined
350 10 TX_THREAD_ENABLE_PERFORMANCE_INFO defined
351 9 TX_TIMER_ENABLE_PERFORMANCE_INFO defined
352 8 TX_ENABLE_EVENT_TRACE | TX_ENABLE_EVENT_LOGGING defined
353 7 Reserved
354 6 Reserved
355 5 Reserved
356 4 Reserved
357 3 Reserved
358 2 Reserved
359 1 64-bit FPU Enabled
360 0 Reserved */
361
362 THREAD_DECLARE ULONG _tx_build_options;
363
364
365 #ifdef TX_ENABLE_STACK_CHECKING
366
367 /* Define the global function pointer for stack error handling. If a stack error is
368 detected and the application has registered a stack error handler, it will be
369 called via this function pointer. */
370
371 THREAD_DECLARE VOID (*_tx_thread_application_stack_error_handler)(TX_THREAD *thread_ptr);
372
373 #endif
374
375 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
376
377 /* Define the total number of thread resumptions. Each time a thread enters the
378 ready state this variable is incremented. */
379
380 THREAD_DECLARE ULONG _tx_thread_performance_resume_count;
381
382
383 /* Define the total number of thread suspensions. Each time a thread enters a
384 suspended state this variable is incremented. */
385
386 THREAD_DECLARE ULONG _tx_thread_performance_suspend_count;
387
388
389 /* Define the total number of solicited thread preemptions. Each time a thread is
390 preempted by directly calling a ThreadX service, this variable is incremented. */
391
392 THREAD_DECLARE ULONG _tx_thread_performance_solicited_preemption_count;
393
394
395 /* Define the total number of interrupt thread preemptions. Each time a thread is
396 preempted as a result of an ISR calling a ThreadX service, this variable is
397 incremented. */
398
399 THREAD_DECLARE ULONG _tx_thread_performance_interrupt_preemption_count;
400
401
402 /* Define the total number of priority inversions. Each time a thread is blocked by
403 a mutex owned by a lower-priority thread, this variable is incremented. */
404
405 THREAD_DECLARE ULONG _tx_thread_performance_priority_inversion_count;
406
407
408 /* Define the total number of time-slices. Each time a time-slice operation is
409 actually performed (another thread is setup for running) this variable is
410 incremented. */
411
412 THREAD_DECLARE ULONG _tx_thread_performance_time_slice_count;
413
414
415 /* Define the total number of thread relinquish operations. Each time a thread
416 relinquish operation is actually performed (another thread is setup for running)
417 this variable is incremented. */
418
419 THREAD_DECLARE ULONG _tx_thread_performance_relinquish_count;
420
421
422 /* Define the total number of thread timeouts. Each time a thread has a
423 timeout this variable is incremented. */
424
425 THREAD_DECLARE ULONG _tx_thread_performance_timeout_count;
426
427
428 /* Define the total number of thread wait aborts. Each time a thread's suspension
429 is lifted by the tx_thread_wait_abort call this variable is incremented. */
430
431 THREAD_DECLARE ULONG _tx_thread_performance_wait_abort_count;
432
433
434 /* Define the total number of idle system thread returns. Each time a thread returns to
435 an idle system (no other thread is ready to run) this variable is incremented. */
436
437 THREAD_DECLARE ULONG _tx_thread_performance_idle_return_count;
438
439
440 /* Define the total number of non-idle system thread returns. Each time a thread returns to
441 a non-idle system (another thread is ready to run) this variable is incremented. */
442
443 THREAD_DECLARE ULONG _tx_thread_performance_non_idle_return_count;
444
445 #endif
446
447
448 /* Define macros and helper functions. */
449
450 /* Define the MOD32 bit set macro that is used to set/clear a priority bit within a specific
451 priority group. */
452
453 #if TX_MAX_PRIORITIES > 32
454 #define MAP_INDEX (map_index)
455 #ifndef TX_MOD32_BIT_SET
456 #define TX_MOD32_BIT_SET(a,b) (b) = (((ULONG) 1) << ((a)%((UINT) 32)));
457 #endif
458 #else
459 #define MAP_INDEX (0)
460 #ifndef TX_MOD32_BIT_SET
461 #define TX_MOD32_BIT_SET(a,b) (b) = (((ULONG) 1) << ((a)));
462 #endif
463 #endif
464
465
466 /* Define the DIV32 bit set macro that is used to set/clear a priority group bit and is
467 only necessary when using priorities greater than 32. */
468
469 #if TX_MAX_PRIORITIES > 32
470 #ifndef TX_DIV32_BIT_SET
471 #define TX_DIV32_BIT_SET(a,b) (b) = (((ULONG) 1) << ((a)/((UINT) 32)));
472 #endif
473 #endif
474
475
476 /* Define state change macro that can be used by run-mode debug agents to keep track of thread
477 state changes. By default, it is mapped to white space. */
478
479 #ifndef TX_THREAD_STATE_CHANGE
480 #define TX_THREAD_STATE_CHANGE(a, b)
481 #endif
482
483
484 /* Define the macro to set the current thread pointer. This is particularly useful in SMP
485 versions of ThreadX to add additional processing. The default implementation is to simply
486 access the global current thread pointer directly. */
487
488 #ifndef TX_THREAD_SET_CURRENT
489 #define TX_THREAD_SET_CURRENT(a) TX_MEMSET(&_tx_thread_current_ptr[0], (a), sizeof(_tx_thread_current_ptr));
490 #endif
491
492
493 /* Define the get system state macro. By default, it is mapped to white space. */
494
495 #ifndef TX_THREAD_GET_SYSTEM_STATE
496 #define TX_THREAD_GET_SYSTEM_STATE() _tx_thread_smp_current_state_get()
497 #endif
498
499
500 /* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
501 indicates that _tx_thread_system_return should not be called. */
502
503 #ifndef TX_THREAD_SYSTEM_RETURN_CHECK
504 #define TX_THREAD_SYSTEM_RETURN_CHECK(c) (c) = (ULONG) _tx_thread_preempt_disable; (c) = (c) | TX_THREAD_GET_SYSTEM_STATE();
505 #endif
506
507
508 /* Define the timeout setup macro used in _tx_thread_create. */
509
510 #ifndef TX_THREAD_CREATE_TIMEOUT_SETUP
511 #define TX_THREAD_CREATE_TIMEOUT_SETUP(t) (t) -> tx_thread_timer.tx_timer_internal_timeout_function = &(_tx_thread_timeout); \
512 (t) -> tx_thread_timer.tx_timer_internal_timeout_param = TX_POINTER_TO_ULONG_CONVERT((t));
513 #endif
514
515
516 /* Define the thread timeout pointer setup macro used in _tx_thread_timeout. */
517
518 #ifndef TX_THREAD_TIMEOUT_POINTER_SETUP
519 #define TX_THREAD_TIMEOUT_POINTER_SETUP(t) (t) = TX_ULONG_TO_THREAD_POINTER_CONVERT(timeout_input);
520 #endif
521
522
523 #ifdef TX_THREAD_SMP_SOURCE_CODE
524
525
526 /* Determine if the in-line capability has been disabled. */
527
528 #ifndef TX_DISABLE_INLINE
529
530
531 /* Define the inline option, which is compiler specific. If not defined, it will be resolved as
532 "inline". */
533
534 #ifndef INLINE_DECLARE
535 #define INLINE_DECLARE inline
536 #endif
537
538
539 /* Define the lowest bit set macro. Note, that this may be overridden
540 by a port specific definition if there is supporting assembly language
541 instructions in the architecture. */
542
543 #ifndef TX_LOWEST_SET_BIT_CALCULATE
544
_tx_thread_lowest_set_bit_calculate(ULONG map)545 static INLINE_DECLARE UINT _tx_thread_lowest_set_bit_calculate(ULONG map)
546 {
547 UINT bit_set;
548
549 if ((map & ((ULONG) 0x1)) != ((ULONG) 0))
550 {
551 bit_set = ((UINT) 0);
552 }
553 else
554 {
555 map = map & (ULONG) ((~map) + ((ULONG) 1));
556 if (map < ((ULONG) 0x100))
557 {
558 bit_set = ((UINT) 1);
559 }
560 else if (map < ((ULONG) 0x10000))
561 {
562 bit_set = ((UINT) 9);
563 map = map >> ((UINT) 8);
564 }
565 else if (map < ((ULONG) 0x01000000))
566 {
567 bit_set = ((UINT) 17);
568 map = map >> ((UINT) 16);
569 }
570 else
571 {
572 bit_set = ((UINT) 25);
573 map = map >> ((UINT) 24);
574 }
575 if (map >= ((ULONG) 0x10))
576 {
577 map = map >> ((UINT) 4);
578 bit_set = bit_set + ((UINT) 4);
579 }
580 if (map >= ((ULONG) 0x4))
581 {
582 map = map >> ((UINT) 2);
583 bit_set = bit_set + ((UINT) 2);
584 }
585 bit_set = bit_set - (UINT) (map & (ULONG) 0x1);
586 }
587
588 return(bit_set);
589 }
590
591
592 #define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = _tx_thread_lowest_set_bit_calculate((m));
593
594 #endif
595
596
597 /* Define the next priority macro. Note, that this may be overridden
598 by a port specific definition. */
599
600 #ifndef TX_NEXT_PRIORITY_FIND
601 #if TX_MAX_PRIORITIES > 32
_tx_thread_smp_next_priority_find(UINT priority)602 static INLINE_DECLARE UINT _tx_thread_smp_next_priority_find(UINT priority)
603 {
604 ULONG map_index;
605 ULONG local_priority_map_active;
606 ULONG local_priority_map;
607 ULONG priority_bit;
608 ULONG first_bit_set;
609 ULONG found_priority;
610
611 found_priority = ((UINT) TX_MAX_PRIORITIES);
612 if (priority < ((UINT) TX_MAX_PRIORITIES))
613 {
614 map_index = priority/((UINT) 32);
615 local_priority_map = _tx_thread_priority_maps[map_index];
616 priority_bit = (((ULONG) 1) << (priority % ((UINT) 32)));
617 local_priority_map = local_priority_map & ~(priority_bit - ((UINT)1));
618 if (local_priority_map != ((ULONG) 0))
619 {
620 TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
621 found_priority = (map_index * ((UINT) 32)) + first_bit_set;
622 }
623 else
624 {
625 /* Move to next map index. */
626 map_index++;
627 if (map_index < (((UINT) TX_MAX_PRIORITIES)/((UINT) 32)))
628 {
629 priority_bit = (((ULONG) 1) << (map_index));
630 local_priority_map_active = _tx_thread_priority_map_active & ~(priority_bit - ((UINT) 1));
631 if (local_priority_map_active != ((ULONG) 0))
632 {
633 TX_LOWEST_SET_BIT_CALCULATE(local_priority_map_active, map_index)
634 local_priority_map = _tx_thread_priority_maps[map_index];
635 TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
636 found_priority = (map_index * ((UINT) 32)) + first_bit_set;
637 }
638 }
639 }
640 }
641 return(found_priority);
642 }
643 #else
644
_tx_thread_smp_next_priority_find(UINT priority)645 static INLINE_DECLARE UINT _tx_thread_smp_next_priority_find(UINT priority)
646 {
647 UINT first_bit_set;
648 ULONG local_priority_map;
649 UINT next_priority;
650
651 local_priority_map = _tx_thread_priority_maps[0];
652 local_priority_map = local_priority_map >> priority;
653 next_priority = priority;
654 if (local_priority_map == ((ULONG) 0))
655 {
656 next_priority = ((UINT) TX_MAX_PRIORITIES);
657 }
658 else
659 {
660 if (next_priority >= ((UINT) TX_MAX_PRIORITIES))
661 {
662 next_priority = ((UINT) TX_MAX_PRIORITIES);
663 }
664 else
665 {
666 TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
667 next_priority = priority + first_bit_set;
668 }
669 }
670
671 return(next_priority);
672 }
673 #endif
674 #endif
675
_tx_thread_smp_schedule_list_clear(void)676 static INLINE_DECLARE void _tx_thread_smp_schedule_list_clear(void)
677 {
678 #if TX_THREAD_SMP_MAX_CORES > 6
679 UINT i;
680 #endif
681
682
683 /* Clear the schedule list. */
684 _tx_thread_smp_schedule_list[0] = TX_NULL;
685 #if TX_THREAD_SMP_MAX_CORES > 1
686 _tx_thread_smp_schedule_list[1] = TX_NULL;
687 #if TX_THREAD_SMP_MAX_CORES > 2
688 _tx_thread_smp_schedule_list[2] = TX_NULL;
689 #if TX_THREAD_SMP_MAX_CORES > 3
690 _tx_thread_smp_schedule_list[3] = TX_NULL;
691 #if TX_THREAD_SMP_MAX_CORES > 4
692 _tx_thread_smp_schedule_list[4] = TX_NULL;
693 #if TX_THREAD_SMP_MAX_CORES > 5
694 _tx_thread_smp_schedule_list[5] = TX_NULL;
695 #if TX_THREAD_SMP_MAX_CORES > 6
696
697 /* Loop to clear the remainder of the schedule list. */
698 i = ((UINT) 6);
699
700 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
701
702 while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
703 #else
704
705 while (i < _tx_thread_smp_max_cores)
706 #endif
707 {
708 /* Clear entry in schedule list. */
709 _tx_thread_smp_schedule_list[i] = TX_NULL;
710
711 /* Move to next index. */
712 i++;
713 }
714 #endif
715 #endif
716 #endif
717 #endif
718 #endif
719 #endif
720 }
721
_tx_thread_smp_execute_list_clear(void)722 static INLINE_DECLARE VOID _tx_thread_smp_execute_list_clear(void)
723 {
724 #if TX_THREAD_SMP_MAX_CORES > 6
725 UINT j;
726 #endif
727
728 /* Clear the execute list. */
729 _tx_thread_execute_ptr[0] = TX_NULL;
730 #if TX_THREAD_SMP_MAX_CORES > 1
731 _tx_thread_execute_ptr[1] = TX_NULL;
732 #if TX_THREAD_SMP_MAX_CORES > 2
733 _tx_thread_execute_ptr[2] = TX_NULL;
734 #if TX_THREAD_SMP_MAX_CORES > 3
735 _tx_thread_execute_ptr[3] = TX_NULL;
736 #if TX_THREAD_SMP_MAX_CORES > 4
737 _tx_thread_execute_ptr[4] = TX_NULL;
738 #if TX_THREAD_SMP_MAX_CORES > 5
739 _tx_thread_execute_ptr[5] = TX_NULL;
740 #if TX_THREAD_SMP_MAX_CORES > 6
741
742 /* Loop to clear the remainder of the execute list. */
743 j = ((UINT) 6);
744
745 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
746
747 while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
748 #else
749
750 while (j < _tx_thread_smp_max_cores)
751 #endif
752 {
753
754 /* Clear entry in execute list. */
755 _tx_thread_execute_ptr[j] = TX_NULL;
756
757 /* Move to next index. */
758 j++;
759 }
760 #endif
761 #endif
762 #endif
763 #endif
764 #endif
765 #endif
766 }
767
768
_tx_thread_smp_schedule_list_setup(void)769 static INLINE_DECLARE VOID _tx_thread_smp_schedule_list_setup(void)
770 {
771 #if TX_THREAD_SMP_MAX_CORES > 6
772 UINT j;
773 #endif
774
775 _tx_thread_smp_schedule_list[0] = _tx_thread_execute_ptr[0];
776 #if TX_THREAD_SMP_MAX_CORES > 1
777 _tx_thread_smp_schedule_list[1] = _tx_thread_execute_ptr[1];
778 #if TX_THREAD_SMP_MAX_CORES > 2
779 _tx_thread_smp_schedule_list[2] = _tx_thread_execute_ptr[2];
780 #if TX_THREAD_SMP_MAX_CORES > 3
781 _tx_thread_smp_schedule_list[3] = _tx_thread_execute_ptr[3];
782 #if TX_THREAD_SMP_MAX_CORES > 4
783 _tx_thread_smp_schedule_list[4] = _tx_thread_execute_ptr[4];
784 #if TX_THREAD_SMP_MAX_CORES > 5
785 _tx_thread_smp_schedule_list[5] = _tx_thread_execute_ptr[5];
786 #if TX_THREAD_SMP_MAX_CORES > 6
787
788 /* Loop to setup the remainder of the schedule list. */
789 j = ((UINT) 6);
790
791 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
792 while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
793 #else
794
795 while (j < _tx_thread_smp_max_cores)
796 #endif
797 {
798
799 /* Setup entry in schedule list. */
800 _tx_thread_smp_schedule_list[j] = _tx_thread_execute_ptr[j];
801
802 /* Move to next index. */
803 j++;
804 }
805 #endif
806 #endif
807 #endif
808 #endif
809 #endif
810 #endif
811 }
812
813
814 #ifdef TX_THREAD_SMP_INTER_CORE_INTERRUPT
_tx_thread_smp_core_interrupt(TX_THREAD * thread_ptr,UINT current_core,UINT target_core)815 static INLINE_DECLARE VOID _tx_thread_smp_core_interrupt(TX_THREAD *thread_ptr, UINT current_core, UINT target_core)
816 {
817
818 TX_THREAD *current_thread;
819
820
821 /* Make sure this is a different core, since there is no need to interrupt the current core for
822 a scheduling change. */
823 if (current_core != target_core)
824 {
825
826 /* Yes, a different core is present. */
827
828 /* Pickup the currently executing thread. */
829 current_thread = _tx_thread_current_ptr[target_core];
830
831 /* Determine if they are the same. */
832 if ((current_thread != TX_NULL) && (thread_ptr != current_thread))
833 {
834
835 /* Not the same and not NULL... determine if the core is running at thread level. */
836 if (_tx_thread_system_state[target_core] < TX_INITIALIZE_IN_PROGRESS)
837 {
838
839 /* Preempt the mapped thread. */
840 _tx_thread_smp_core_preempt(target_core);
841 }
842 }
843 }
844 }
845 #else
846
847 /* Define to whitespace. */
848 #define _tx_thread_smp_core_interrupt(a,b,c)
849
850 #endif
851
852
853 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
_tx_thread_smp_core_wakeup(UINT current_core,UINT target_core)854 static INLINE_DECLARE VOID _tx_thread_smp_core_wakeup(UINT current_core, UINT target_core)
855 {
856
857 /* Determine if the core specified is not the current core - no need to wakeup the
858 current core. */
859 if (target_core != current_core)
860 {
861
862 /* Wakeup based on application's macro. */
863 TX_THREAD_SMP_WAKEUP(target_core);
864 }
865 }
866 #else
867
868 /* Define to whitespace. */
869 #define _tx_thread_smp_core_wakeup(a,b)
870
871 #endif
872
873
_tx_thread_smp_execute_list_setup(UINT core_index)874 static INLINE_DECLARE VOID _tx_thread_smp_execute_list_setup(UINT core_index)
875 {
876
877 TX_THREAD *schedule_thread;
878 UINT i;
879
880
881 /* Loop to copy the schedule list into the execution list. */
882 i = ((UINT) 0);
883 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
884
885 while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
886 #else
887
888 while (i < _tx_thread_smp_max_cores)
889 #endif
890 {
891
892 /* Pickup the thread to schedule. */
893 schedule_thread = _tx_thread_smp_schedule_list[i];
894
895 /* Copy the schedule list into the execution list. */
896 _tx_thread_execute_ptr[i] = schedule_thread;
897
898 /* If necessary, interrupt the core with the new thread to schedule. */
899 _tx_thread_smp_core_interrupt(schedule_thread, core_index, i);
900
901 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
902
903 /* Does this need to be waked up? */
904 if ((i != core_index) && (schedule_thread != TX_NULL))
905 {
906
907 /* Wakeup based on application's macro. */
908 TX_THREAD_SMP_WAKEUP(i);
909 }
910 #endif
911 /* Move to next index. */
912 i++;
913 }
914 }
915
916
_tx_thread_smp_available_cores_get(void)917 static INLINE_DECLARE ULONG _tx_thread_smp_available_cores_get(void)
918 {
919
920 #if TX_THREAD_SMP_MAX_CORES > 6
921 UINT j;
922 #endif
923 ULONG available_cores;
924
925 available_cores = ((ULONG) 0);
926 if (_tx_thread_execute_ptr[0] == TX_NULL)
927 {
928 available_cores = ((ULONG) 1);
929 }
930 #if TX_THREAD_SMP_MAX_CORES > 1
931 if (_tx_thread_execute_ptr[1] == TX_NULL)
932 {
933 available_cores = available_cores | ((ULONG) 2);
934 }
935 #if TX_THREAD_SMP_MAX_CORES > 2
936 if (_tx_thread_execute_ptr[2] == TX_NULL)
937 {
938 available_cores = available_cores | ((ULONG) 4);
939 }
940 #if TX_THREAD_SMP_MAX_CORES > 3
941 if (_tx_thread_execute_ptr[3] == TX_NULL)
942 {
943 available_cores = available_cores | ((ULONG) 8);
944 }
945 #if TX_THREAD_SMP_MAX_CORES > 4
946 if (_tx_thread_execute_ptr[4] == TX_NULL)
947 {
948 available_cores = available_cores | ((ULONG) 0x10);
949 }
950 #if TX_THREAD_SMP_MAX_CORES > 5
951 if (_tx_thread_execute_ptr[5] == TX_NULL)
952 {
953 available_cores = available_cores | ((ULONG) 0x20);
954 }
955 #if TX_THREAD_SMP_MAX_CORES > 6
956
957 /* Loop to setup the remainder of the schedule list. */
958 j = ((UINT) 6);
959
960 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
961 while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
962 #else
963
964 while (j < _tx_thread_smp_max_cores)
965 #endif
966 {
967
968 /* Determine if this core is available. */
969 if (_tx_thread_execute_ptr[j] == TX_NULL)
970 {
971 available_cores = available_cores | (((ULONG) 1) << j);
972 }
973
974 /* Move to next core. */
975 j++;
976 }
977 #endif
978 #endif
979 #endif
980 #endif
981 #endif
982 #endif
983 return(available_cores);
984 }
985
986
_tx_thread_smp_possible_cores_get(void)987 static INLINE_DECLARE ULONG _tx_thread_smp_possible_cores_get(void)
988 {
989
990 #if TX_THREAD_SMP_MAX_CORES > 6
991 UINT j;
992 #endif
993 ULONG possible_cores;
994 TX_THREAD *thread_ptr;
995
996 possible_cores = ((ULONG) 0);
997 thread_ptr = _tx_thread_execute_ptr[0];
998 if (thread_ptr != TX_NULL)
999 {
1000 possible_cores = thread_ptr -> tx_thread_smp_cores_allowed;
1001 }
1002 #if TX_THREAD_SMP_MAX_CORES > 1
1003 thread_ptr = _tx_thread_execute_ptr[1];
1004 if (thread_ptr != TX_NULL)
1005 {
1006 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1007 }
1008 #if TX_THREAD_SMP_MAX_CORES > 2
1009 thread_ptr = _tx_thread_execute_ptr[2];
1010 if (thread_ptr != TX_NULL)
1011 {
1012 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1013 }
1014 #if TX_THREAD_SMP_MAX_CORES > 3
1015 thread_ptr = _tx_thread_execute_ptr[3];
1016 if (thread_ptr != TX_NULL)
1017 {
1018 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1019 }
1020 #if TX_THREAD_SMP_MAX_CORES > 4
1021 thread_ptr = _tx_thread_execute_ptr[4];
1022 if (thread_ptr != TX_NULL)
1023 {
1024 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1025 }
1026 #if TX_THREAD_SMP_MAX_CORES > 5
1027 thread_ptr = _tx_thread_execute_ptr[5];
1028 if (thread_ptr != TX_NULL)
1029 {
1030 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1031 }
1032 #if TX_THREAD_SMP_MAX_CORES > 6
1033
1034 /* Loop to setup the remainder of the schedule list. */
1035 j = ((UINT) 6);
1036
1037 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
1038 while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
1039 #else
1040
1041 while (j < _tx_thread_smp_max_cores)
1042 #endif
1043 {
1044
1045 /* Determine if this core is available. */
1046 thread_ptr = _tx_thread_execute_ptr[j];
1047 if (thread_ptr != TX_NULL)
1048 {
1049 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1050 }
1051
1052 /* Move to next core. */
1053 j++;
1054 }
1055 #endif
1056 #endif
1057 #endif
1058 #endif
1059 #endif
1060 #endif
1061 return(possible_cores);
1062 }
1063
1064
_tx_thread_smp_lowest_priority_get(void)1065 static INLINE_DECLARE UINT _tx_thread_smp_lowest_priority_get(void)
1066 {
1067
1068 #if TX_THREAD_SMP_MAX_CORES > 6
1069 UINT j;
1070 #endif
1071 TX_THREAD *thread_ptr;
1072 UINT lowest_priority;
1073
1074 lowest_priority = ((UINT) 0);
1075 thread_ptr = _tx_thread_execute_ptr[0];
1076 if (thread_ptr != TX_NULL)
1077 {
1078 if (thread_ptr -> tx_thread_priority > lowest_priority)
1079 {
1080 lowest_priority = thread_ptr -> tx_thread_priority;
1081 }
1082 }
1083 #if TX_THREAD_SMP_MAX_CORES > 1
1084 thread_ptr = _tx_thread_execute_ptr[1];
1085 if (thread_ptr != TX_NULL)
1086 {
1087 if (thread_ptr -> tx_thread_priority > lowest_priority)
1088 {
1089 lowest_priority = thread_ptr -> tx_thread_priority;
1090 }
1091 }
1092 #if TX_THREAD_SMP_MAX_CORES > 2
1093 thread_ptr = _tx_thread_execute_ptr[2];
1094 if (thread_ptr != TX_NULL)
1095 {
1096 if (thread_ptr -> tx_thread_priority > lowest_priority)
1097 {
1098 lowest_priority = thread_ptr -> tx_thread_priority;
1099 }
1100 }
1101 #if TX_THREAD_SMP_MAX_CORES > 3
1102 thread_ptr = _tx_thread_execute_ptr[3];
1103 if (thread_ptr != TX_NULL)
1104 {
1105 if (thread_ptr -> tx_thread_priority > lowest_priority)
1106 {
1107 lowest_priority = thread_ptr -> tx_thread_priority;
1108 }
1109 }
1110 #if TX_THREAD_SMP_MAX_CORES > 4
1111 thread_ptr = _tx_thread_execute_ptr[4];
1112 if (thread_ptr != TX_NULL)
1113 {
1114 if (thread_ptr -> tx_thread_priority > lowest_priority)
1115 {
1116 lowest_priority = thread_ptr -> tx_thread_priority;
1117 }
1118 }
1119 #if TX_THREAD_SMP_MAX_CORES > 5
1120 thread_ptr = _tx_thread_execute_ptr[5];
1121 if (thread_ptr != TX_NULL)
1122 {
1123 if (thread_ptr -> tx_thread_priority > lowest_priority)
1124 {
1125 lowest_priority = thread_ptr -> tx_thread_priority;
1126 }
1127 }
1128 #if TX_THREAD_SMP_MAX_CORES > 6
1129
1130 /* Loop to setup the remainder of the schedule list. */
1131 j = ((UINT) 6);
1132
1133 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
1134 while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
1135 #else
1136
1137 while (j < _tx_thread_smp_max_cores)
1138 #endif
1139 {
1140
1141 /* Determine if this core has a thread scheduled. */
1142 thread_ptr = _tx_thread_execute_ptr[j];
1143 if (thread_ptr != TX_NULL)
1144 {
1145
1146 /* Is this the new lowest priority? */
1147 if (thread_ptr -> tx_thread_priority > lowest_priority)
1148 {
1149 lowest_priority = thread_ptr -> tx_thread_priority;
1150 }
1151 }
1152
1153 /* Move to next core. */
1154 j++;
1155 }
1156 #endif
1157 #endif
1158 #endif
1159 #endif
1160 #endif
1161 #endif
1162 return(lowest_priority);
1163 }
1164
1165
_tx_thread_smp_remap_solution_find(TX_THREAD * schedule_thread,ULONG available_cores,ULONG thread_possible_cores,ULONG test_possible_cores)1166 static INLINE_DECLARE UINT _tx_thread_smp_remap_solution_find(TX_THREAD *schedule_thread, ULONG available_cores, ULONG thread_possible_cores, ULONG test_possible_cores)
1167 {
1168
1169 UINT core;
1170 UINT previous_core;
1171 ULONG test_cores;
1172 ULONG last_thread_cores;
1173 UINT queue_first, queue_last;
1174 UINT core_queue[TX_THREAD_SMP_MAX_CORES-1];
1175 TX_THREAD *thread_ptr;
1176 TX_THREAD *last_thread;
1177 TX_THREAD *thread_remap_list[TX_THREAD_SMP_MAX_CORES];
1178
1179
1180 /* Clear the last thread cores in the search. */
1181 last_thread_cores = ((ULONG) 0);
1182
1183 /* Set the last thread pointer to NULL. */
1184 last_thread = TX_NULL;
1185
1186 /* Setup the core queue indices. */
1187 queue_first = ((UINT) 0);
1188 queue_last = ((UINT) 0);
1189
1190 /* Build a list of possible cores for this thread to execute on, starting
1191 with the previously mapped core. */
1192 core = schedule_thread -> tx_thread_smp_core_mapped;
1193 if ((thread_possible_cores & (((ULONG) 1) << core)) != ((ULONG) 0))
1194 {
1195
1196 /* Remember this potential mapping. */
1197 thread_remap_list[core] = schedule_thread;
1198 core_queue[queue_last] = core;
1199
1200 /* Move to next slot. */
1201 queue_last++;
1202
1203 /* Clear this core. */
1204 thread_possible_cores = thread_possible_cores & ~(((ULONG) 1) << core);
1205 }
1206
1207 /* Loop to add additional possible cores. */
1208 while (thread_possible_cores != ((ULONG) 0))
1209 {
1210
1211 /* Determine the first possible core. */
1212 test_cores = thread_possible_cores;
1213 TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
1214
1215 /* Clear this core. */
1216 thread_possible_cores = thread_possible_cores & ~(((ULONG) 1) << core);
1217
1218 /* Remember this potential mapping. */
1219 thread_remap_list[core] = schedule_thread;
1220 core_queue[queue_last] = core;
1221
1222 /* Move to next slot. */
1223 queue_last++;
1224 }
1225
1226 /* Loop to evaluate the potential thread mappings, against what is already mapped. */
1227 do
1228 {
1229
1230 /* Pickup the next entry. */
1231 core = core_queue[queue_first];
1232
1233 /* Move to next slot. */
1234 queue_first++;
1235
1236 /* Retrieve the thread from the current mapping. */
1237 thread_ptr = _tx_thread_smp_schedule_list[core];
1238
1239 /* Determine if there is a thread currently mapped to this core. */
1240 if (thread_ptr != TX_NULL)
1241 {
1242
1243 /* Determine the cores available for this thread. */
1244 thread_possible_cores = thread_ptr -> tx_thread_smp_cores_allowed;
1245 thread_possible_cores = test_possible_cores & thread_possible_cores;
1246
1247 /* Are there any possible cores for this thread? */
1248 if (thread_possible_cores != ((ULONG) 0))
1249 {
1250
1251 /* Determine if there are cores available for this thread. */
1252 if ((thread_possible_cores & available_cores) != ((ULONG) 0))
1253 {
1254
1255 /* Yes, remember the final thread and cores that are valid for this thread. */
1256 last_thread_cores = thread_possible_cores & available_cores;
1257 last_thread = thread_ptr;
1258
1259 /* We are done - get out of the loop! */
1260 break;
1261 }
1262 else
1263 {
1264
1265 /* Remove cores that will be added to the list. */
1266 test_possible_cores = test_possible_cores & ~(thread_possible_cores);
1267
1268 /* Loop to add this thread to the potential mapping list. */
1269 do
1270 {
1271
1272 /* Calculate the core. */
1273 test_cores = thread_possible_cores;
1274 TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
1275
1276 /* Clear this core. */
1277 thread_possible_cores = thread_possible_cores & ~(((ULONG) 1) << core);
1278
1279 /* Remember this thread for remapping. */
1280 thread_remap_list[core] = thread_ptr;
1281
1282 /* Remember this core. */
1283 core_queue[queue_last] = core;
1284
1285 /* Move to next slot. */
1286 queue_last++;
1287
1288 } while (thread_possible_cores != ((ULONG) 0));
1289 }
1290 }
1291 }
1292 } while (queue_first != queue_last);
1293
1294 /* Was a remapping solution found? */
1295 if (last_thread != TX_NULL)
1296 {
1297
1298 /* Pickup the core of the last thread to remap. */
1299 core = last_thread -> tx_thread_smp_core_mapped;
1300
1301 /* Pickup the thread from the remapping list. */
1302 thread_ptr = thread_remap_list[core];
1303
1304 /* Loop until we arrive at the thread we have been trying to map. */
1305 while (thread_ptr != schedule_thread)
1306 {
1307
1308 /* Move this thread in the schedule list. */
1309 _tx_thread_smp_schedule_list[core] = thread_ptr;
1310
1311 /* Remember the previous core. */
1312 previous_core = core;
1313
1314 /* Pickup the core of thread to remap. */
1315 core = thread_ptr -> tx_thread_smp_core_mapped;
1316
1317 /* Save the new core mapping for this thread. */
1318 thread_ptr -> tx_thread_smp_core_mapped = previous_core;
1319
1320 /* Move the next thread. */
1321 thread_ptr = thread_remap_list[core];
1322 }
1323
1324 /* Save the remaining thread in the updated schedule list. */
1325 _tx_thread_smp_schedule_list[core] = thread_ptr;
1326
1327 /* Update this thread's core mapping. */
1328 thread_ptr -> tx_thread_smp_core_mapped = core;
1329
1330 /* Finally, setup the last thread in the remapping solution. */
1331 test_cores = last_thread_cores;
1332 TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
1333
1334 /* Setup the last thread. */
1335 _tx_thread_smp_schedule_list[core] = last_thread;
1336
1337 /* Remember the core mapping for this thread. */
1338 last_thread -> tx_thread_smp_core_mapped = core;
1339 }
1340 else
1341 {
1342
1343 /* Set core to the maximum value in order to signal a remapping solution was not found. */
1344 core = ((UINT) TX_THREAD_SMP_MAX_CORES);
1345 }
1346
1347 /* Return core to the caller. */
1348 return(core);
1349 }
1350
1351
_tx_thread_smp_preemptable_threads_get(UINT priority,TX_THREAD * possible_preemption_list[])1352 static INLINE_DECLARE ULONG _tx_thread_smp_preemptable_threads_get(UINT priority, TX_THREAD *possible_preemption_list[])
1353 {
1354
1355 UINT i, j, k;
1356 TX_THREAD *thread_ptr;
1357 TX_THREAD *next_thread;
1358 TX_THREAD *search_thread;
1359 TX_THREAD *list_head;
1360 ULONG possible_cores = ((ULONG) 0);
1361
1362
1363 /* Clear the possible preemption list. */
1364 possible_preemption_list[0] = TX_NULL;
1365 #if TX_THREAD_SMP_MAX_CORES > 1
1366 possible_preemption_list[1] = TX_NULL;
1367 #if TX_THREAD_SMP_MAX_CORES > 2
1368 possible_preemption_list[2] = TX_NULL;
1369 #if TX_THREAD_SMP_MAX_CORES > 3
1370 possible_preemption_list[3] = TX_NULL;
1371 #if TX_THREAD_SMP_MAX_CORES > 4
1372 possible_preemption_list[4] = TX_NULL;
1373 #if TX_THREAD_SMP_MAX_CORES > 5
1374 possible_preemption_list[5] = TX_NULL;
1375 #if TX_THREAD_SMP_MAX_CORES > 6
1376
1377 /* Loop to clear the remainder of the possible preemption list. */
1378 j = ((UINT) 6);
1379
1380 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
1381
1382 while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
1383 #else
1384
1385 while (j < _tx_thread_smp_max_cores)
1386 #endif
1387 {
1388
1389 /* Clear entry in possible preemption list. */
1390 possible_preemption_list[j] = TX_NULL;
1391
1392 /* Move to next core. */
1393 j++;
1394 }
1395 #endif
1396 #endif
1397 #endif
1398 #endif
1399 #endif
1400 #endif
1401
1402 /* Loop to build a list of threads of less priority. */
1403 i = ((UINT) 0);
1404 j = ((UINT) 0);
1405 #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
1406 while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
1407 #else
1408
1409 while (i < _tx_thread_smp_max_cores)
1410 #endif
1411 {
1412
1413 /* Pickup the currently mapped thread. */
1414 thread_ptr = _tx_thread_execute_ptr[i];
1415
1416 /* Is there a thread scheduled for this core? */
1417 if (thread_ptr != TX_NULL)
1418 {
1419
1420 /* Update the possible cores bit map. */
1421 possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
1422
1423 /* Can this thread be preempted? */
1424 if (priority < thread_ptr -> tx_thread_priority)
1425 {
1426
1427 /* Thread that can be added to the preemption possible list. */
1428
1429 /* Yes, this scheduled thread is lower priority, so add it to the preemption possible list. */
1430 possible_preemption_list[j] = thread_ptr;
1431
1432 /* Move to next entry in preemption possible list. */
1433 j++;
1434 }
1435 }
1436
1437 /* Move to next core. */
1438 i++;
1439 }
1440
1441 /* Check to see if there are more than 2 threads that can be preempted. */
1442 if (j > ((UINT) 1))
1443 {
1444
1445 /* Yes, loop through the preemption possible list and sort by priority. */
1446 i = ((UINT) 0);
1447 do
1448 {
1449
1450 /* Pickup preemptable thread. */
1451 thread_ptr = possible_preemption_list[i];
1452
1453 /* Initialize the search index. */
1454 k = i + ((UINT) 1);
1455
1456 /* Loop to get the lowest priority thread at the front of the list. */
1457 while (k < j)
1458 {
1459
1460 /* Pickup the next thread to evaluate. */
1461 next_thread = possible_preemption_list[k];
1462
1463 /* Is this thread lower priority? */
1464 if (next_thread -> tx_thread_priority > thread_ptr -> tx_thread_priority)
1465 {
1466
1467 /* Yes, swap the threads. */
1468 possible_preemption_list[i] = next_thread;
1469 possible_preemption_list[k] = thread_ptr;
1470 thread_ptr = next_thread;
1471 }
1472 else
1473 {
1474
1475 /* Compare the thread priorities. */
1476 if (next_thread -> tx_thread_priority == thread_ptr -> tx_thread_priority)
1477 {
1478
1479 /* Equal priority threads... see which is in the ready list first. */
1480 search_thread = thread_ptr -> tx_thread_ready_next;
1481
1482 /* Pickup the list head. */
1483 list_head = _tx_thread_priority_list[thread_ptr -> tx_thread_priority];
1484
1485 /* Now loop to see if the next thread is after the current thread preemption. */
1486 while (search_thread != list_head)
1487 {
1488
1489 /* Have we found the next thread? */
1490 if (search_thread == next_thread)
1491 {
1492
1493 /* Yes, swap the threads. */
1494 possible_preemption_list[i] = next_thread;
1495 possible_preemption_list[k] = thread_ptr;
1496 thread_ptr = next_thread;
1497 break;
1498 }
1499
1500 /* Move to the next thread. */
1501 search_thread = search_thread -> tx_thread_ready_next;
1502 }
1503 }
1504
1505 /* Move to examine the next possible preemptable thread. */
1506 k++;
1507 }
1508 }
1509
1510 /* We have found the lowest priority thread to preempt, now find the next lowest. */
1511 i++;
1512 }
1513 while (i < (j-((UINT) 1)));
1514 }
1515
1516 /* Return the possible cores. */
1517 return(possible_cores);
1518 }
1519
_tx_thread_smp_simple_priority_change(TX_THREAD * thread_ptr,UINT new_priority)1520 static INLINE_DECLARE VOID _tx_thread_smp_simple_priority_change(TX_THREAD *thread_ptr, UINT new_priority)
1521 {
1522
1523 UINT priority;
1524 ULONG priority_bit;
1525 TX_THREAD *head_ptr;
1526 TX_THREAD *tail_ptr;
1527 #if TX_MAX_PRIORITIES > 32
1528 UINT map_index;
1529 #endif
1530
1531 /* Pickup the priority. */
1532 priority = thread_ptr -> tx_thread_priority;
1533
1534 /* Determine if there are other threads at this priority that are
1535 ready. */
1536 if (thread_ptr -> tx_thread_ready_next != thread_ptr)
1537 {
1538
1539 /* Yes, there are other threads at this priority ready. */
1540
1541 /* Just remove this thread from the priority list. */
1542 (thread_ptr -> tx_thread_ready_next) -> tx_thread_ready_previous = thread_ptr -> tx_thread_ready_previous;
1543 (thread_ptr -> tx_thread_ready_previous) -> tx_thread_ready_next = thread_ptr -> tx_thread_ready_next;
1544
1545 /* Determine if this is the head of the priority list. */
1546 if (_tx_thread_priority_list[priority] == thread_ptr)
1547 {
1548
1549 /* Update the head pointer of this priority list. */
1550 _tx_thread_priority_list[priority] = thread_ptr -> tx_thread_ready_next;
1551 }
1552 }
1553 else
1554 {
1555
1556 /* This is the only thread at this priority ready to run. Set the head
1557 pointer to NULL. */
1558 _tx_thread_priority_list[priority] = TX_NULL;
1559
1560 #if TX_MAX_PRIORITIES > 32
1561
1562 /* Calculate the index into the bit map array. */
1563 map_index = priority/((UINT) 32);
1564 #endif
1565
1566 /* Clear this priority bit in the ready priority bit map. */
1567 TX_MOD32_BIT_SET(priority, priority_bit)
1568 _tx_thread_priority_maps[MAP_INDEX] = _tx_thread_priority_maps[MAP_INDEX] & (~(priority_bit));
1569
1570 #if TX_MAX_PRIORITIES > 32
1571
1572 /* Determine if there are any other bits set in this priority map. */
1573 if (_tx_thread_priority_maps[MAP_INDEX] == ((ULONG) 0))
1574 {
1575
1576 /* No, clear the active bit to signify this priority map has nothing set. */
1577 TX_DIV32_BIT_SET(priority, priority_bit)
1578 _tx_thread_priority_map_active = _tx_thread_priority_map_active & (~(priority_bit));
1579 }
1580 #endif
1581 }
1582
1583 /* Determine if the actual thread priority should be setup, which is the
1584 case if the new priority is higher than the priority inheritance. */
1585 if (new_priority < thread_ptr -> tx_thread_inherit_priority)
1586 {
1587
1588 /* Change thread priority to the new user's priority. */
1589 thread_ptr -> tx_thread_priority = new_priority;
1590 thread_ptr -> tx_thread_preempt_threshold = new_priority;
1591 }
1592 else
1593 {
1594
1595 /* Change thread priority to the priority inheritance. */
1596 thread_ptr -> tx_thread_priority = thread_ptr -> tx_thread_inherit_priority;
1597 thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_inherit_priority;
1598 }
1599
1600 /* Now, place the thread at the new priority level. */
1601
1602 /* Determine if there are other threads at this priority that are
1603 ready. */
1604 head_ptr = _tx_thread_priority_list[new_priority];
1605 if (head_ptr != TX_NULL)
1606 {
1607
1608 /* Yes, there are other threads at this priority already ready. */
1609
1610 /* Just add this thread to the priority list. */
1611 tail_ptr = head_ptr -> tx_thread_ready_previous;
1612 tail_ptr -> tx_thread_ready_next = thread_ptr;
1613 head_ptr -> tx_thread_ready_previous = thread_ptr;
1614 thread_ptr -> tx_thread_ready_previous = tail_ptr;
1615 thread_ptr -> tx_thread_ready_next = head_ptr;
1616 }
1617 else
1618 {
1619
1620 /* First thread at this priority ready. Add to the front of the list. */
1621 _tx_thread_priority_list[new_priority] = thread_ptr;
1622 thread_ptr -> tx_thread_ready_next = thread_ptr;
1623 thread_ptr -> tx_thread_ready_previous = thread_ptr;
1624
1625 #if TX_MAX_PRIORITIES > 32
1626
1627 /* Calculate the index into the bit map array. */
1628 map_index = new_priority/((UINT) 32);
1629
1630 /* Set the active bit to remember that the priority map has something set. */
1631 TX_DIV32_BIT_SET(new_priority, priority_bit)
1632 _tx_thread_priority_map_active = _tx_thread_priority_map_active | priority_bit;
1633 #endif
1634
1635 /* Or in the thread's priority bit. */
1636 TX_MOD32_BIT_SET(new_priority, priority_bit)
1637 _tx_thread_priority_maps[MAP_INDEX] = _tx_thread_priority_maps[MAP_INDEX] | priority_bit;
1638 }
1639 }
1640 #else
1641
1642 /* In-line was disabled. All of the above helper fuctions must be defined as actual functions. */
1643
1644 UINT _tx_thread_lowest_set_bit_calculate(ULONG map);
1645 #define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = _tx_thread_lowest_set_bit_calculate((m));
1646
1647 UINT _tx_thread_smp_next_priority_find(UINT priority);
1648 VOID _tx_thread_smp_schedule_list_clear(void);
1649 VOID _tx_thread_smp_execute_list_clear(void);
1650 VOID _tx_thread_smp_schedule_list_setup(void);
1651
1652 #ifdef TX_THREAD_SMP_INTER_CORE_INTERRUPT
1653 VOID _tx_thread_smp_core_interrupt(TX_THREAD *thread_ptr, UINT current_core, UINT target_core);
1654 #else
1655 /* Define to whitespace. */
1656 #define _tx_thread_smp_core_interrupt(a,b,c)
1657 #endif
1658
1659 #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
1660 VOID _tx_thread_smp_core_wakeup(UINT current_core, UINT target_core);
1661 #else
1662 /* Define to whitespace. */
1663 #define _tx_thread_smp_core_wakeup(a,b)
1664 #endif
1665
1666 VOID _tx_thread_smp_execute_list_setup(UINT core_index);
1667 ULONG _tx_thread_smp_available_cores_get(void);
1668 ULONG _tx_thread_smp_possible_cores_get(void);
1669 UINT _tx_thread_smp_lowest_priority_get(void);
1670 UINT _tx_thread_smp_remap_solution_find(TX_THREAD *schedule_thread, ULONG available_cores, ULONG thread_possible_cores, ULONG test_possible_cores);
1671 ULONG _tx_thread_smp_preemptable_threads_get(UINT priority, TX_THREAD *possible_preemption_list[]);
1672 VOID _tx_thread_smp_simple_priority_change(TX_THREAD *thread_ptr, UINT new_priority);
1673
1674 #endif
1675
1676
1677 #endif
1678
1679 #endif
1680
1681