1 /**************************************************************************/
2 /* */
3 /* Copyright (c) Microsoft Corporation. All rights reserved. */
4 /* */
5 /* This software is licensed under the Microsoft Software License */
6 /* Terms for Microsoft Azure RTOS. Full text of the license can be */
7 /* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
8 /* and in the root directory of this software. */
9 /* */
10 /**************************************************************************/
11
12
13 /**************************************************************************/
14 /**************************************************************************/
15 /** */
16 /** ThreadX Component */
17 /** */
18 /** Thread */
19 /** */
20 /**************************************************************************/
21 /**************************************************************************/
22
23 #define TX_SOURCE_CODE
24 #define TX_THREAD_SMP_SOURCE_CODE
25
26
27 /* Include necessary system files. */
28
29 #include "tx_api.h"
30 #include "tx_thread.h"
31 #include "tx_timer.h"
32 #include "tx_trace.h"
33
34
35 /**************************************************************************/
36 /* */
37 /* FUNCTION RELEASE */
38 /* */
39 /* _tx_thread_relinquish PORTABLE SMP */
40 /* 6.1 */
41 /* AUTHOR */
42 /* */
43 /* William E. Lamie, Microsoft Corporation */
44 /* */
45 /* DESCRIPTION */
46 /* */
47 /* This function determines if there is another higher or equal */
48 /* priority, non-executing thread that can execute on this processor. */
49 /* such a thread is found, the calling thread relinquishes control. */
50 /* Otherwise, this function simply returns. */
51 /* */
52 /* INPUT */
53 /* */
54 /* None */
55 /* */
56 /* OUTPUT */
57 /* */
58 /* None */
59 /* */
60 /* CALLS */
61 /* */
62 /* _tx_thread_smp_rebalance_execute_list Rebalance the execution list */
63 /* _tx_thread_system_return Return to the system */
64 /* */
65 /* CALLED BY */
66 /* */
67 /* Application Code */
68 /* */
69 /* RELEASE HISTORY */
70 /* */
71 /* DATE NAME DESCRIPTION */
72 /* */
73 /* 09-30-2020 William E. Lamie Initial Version 6.1 */
74 /* */
75 /**************************************************************************/
_tx_thread_relinquish(VOID)76 VOID _tx_thread_relinquish(VOID)
77 {
78
79 TX_INTERRUPT_SAVE_AREA
80
81 UINT priority;
82 TX_THREAD *thread_ptr;
83 TX_THREAD *head_ptr;
84 TX_THREAD *tail_ptr;
85 TX_THREAD *next_thread;
86 TX_THREAD *previous_thread;
87 UINT core_index;
88 UINT rebalance;
89 UINT mapped_core;
90 ULONG excluded;
91
92 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
93 UINT base_priority;
94 UINT priority_bit_set;
95 UINT next_preempted;
96 ULONG priority_bit;
97 ULONG priority_map;
98 TX_THREAD *preempted_thread;
99 #if TX_MAX_PRIORITIES > 32
100 UINT map_index;
101 #endif
102 #endif
103 UINT finished;
104
105
106 /* Default finished to false. */
107 finished = TX_FALSE;
108
109 /* Initialize the rebalance flag to false. */
110 rebalance = TX_FALSE;
111
112 /* Lockout interrupts while thread attempts to relinquish control. */
113 TX_DISABLE
114
115 /* Pickup the index. */
116 core_index = TX_SMP_CORE_ID;
117
118 /* Pickup the current thread pointer. */
119 thread_ptr = _tx_thread_current_ptr[core_index];
120
121 #ifndef TX_NO_TIMER
122
123 /* Reset time slice for current thread. */
124 _tx_timer_time_slice[core_index] = thread_ptr -> tx_thread_new_time_slice;
125 #endif
126
127 #ifdef TX_ENABLE_STACK_CHECKING
128
129 /* Check this thread's stack. */
130 TX_THREAD_STACK_CHECK(thread_ptr)
131 #endif
132
133 /* If trace is enabled, insert this event into the trace buffer. */
134 TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RELINQUISH, &thread_ptr, TX_POINTER_TO_ULONG_CONVERT(thread_ptr -> tx_thread_ready_next), 0, 0, TX_TRACE_THREAD_EVENTS)
135
136 /* Log this kernel call. */
137 TX_EL_THREAD_RELINQUISH_INSERT
138
139 /* Pickup the thread's priority. */
140 priority = thread_ptr -> tx_thread_priority;
141
142 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
143
144 /* Debug entry. */
145 _tx_thread_smp_debug_entry_insert(0, 0, thread_ptr);
146 #endif
147
148 /* Pickup the next thread. */
149 next_thread = thread_ptr -> tx_thread_ready_next;
150
151 /* Pickup the head of the list. */
152 head_ptr = _tx_thread_priority_list[priority];
153
154 /* Pickup the list tail. */
155 tail_ptr = head_ptr -> tx_thread_ready_previous;
156
157 /* Determine if this thread is not the tail pointer. */
158 if (thread_ptr != tail_ptr)
159 {
160
161 /* Not the tail pointer, this thread must be moved to the end of the ready list. */
162
163 /* Determine if this thread is at the head of the list. */
164 if (head_ptr == thread_ptr)
165 {
166
167 /* Simply move the head pointer to put this thread at the end of the ready list at this priority. */
168 _tx_thread_priority_list[priority] = next_thread;
169 }
170 else
171 {
172
173 /* Now we need to remove this thread from its current position and place it at the end of the list. */
174
175 /* Pickup the previous thread pointer. */
176 previous_thread = thread_ptr -> tx_thread_ready_previous;
177
178 /* Remove the thread from the ready list. */
179 next_thread -> tx_thread_ready_previous = previous_thread;
180 previous_thread -> tx_thread_ready_next = next_thread;
181
182 /* Insert the thread at the end of the list. */
183 tail_ptr -> tx_thread_ready_next = thread_ptr;
184 head_ptr -> tx_thread_ready_previous = thread_ptr;
185 thread_ptr -> tx_thread_ready_previous = tail_ptr;
186 thread_ptr -> tx_thread_ready_next = head_ptr;
187 }
188
189 /* Pickup the mapped core of the relinquishing thread - this can be different from the current core. */
190 mapped_core = thread_ptr -> tx_thread_smp_core_mapped;
191
192 /* Determine if the relinquishing thread is no longer present in the execute list. */
193 if (thread_ptr != _tx_thread_execute_ptr[mapped_core])
194 {
195
196 /* Yes, the thread is no longer mapped. Set the rebalance flag to determine if there is a new mapping due to moving
197 this thread to the end of the priority list. */
198
199 /* Set the rebalance flag to true. */
200 rebalance = TX_TRUE;
201 }
202
203 /* Determine if preemption-threshold is in force. */
204 else if (thread_ptr -> tx_thread_preempt_threshold == priority)
205 {
206
207 /* No preemption-threshold is in force. */
208
209 /* Determine if there is a thread at the same priority that isn't currently executing. */
210 do
211 {
212
213 /* Isolate the exclusion for this core. */
214 excluded = (next_thread -> tx_thread_smp_cores_excluded >> mapped_core) & ((ULONG) 1);
215
216 /* Determine if the next thread has preemption-threshold set or is excluded from running on the
217 mapped core. */
218 if ((next_thread -> tx_thread_preempt_threshold < next_thread -> tx_thread_priority) ||
219 (excluded == ((ULONG) 1)))
220 {
221
222 /* Set the rebalance flag. */
223 rebalance = TX_TRUE;
224
225 /* Get out of the loop. We need to rebalance the list when we detect preemption-threshold. */
226 break;
227 }
228 else
229 {
230
231 /* Is the next thread already in the execute list? */
232 if (next_thread != _tx_thread_execute_ptr[next_thread -> tx_thread_smp_core_mapped])
233 {
234
235 /* No, we can place this thread in the position the relinquishing thread
236 was in. */
237
238 /* Remember this index in the thread control block. */
239 next_thread -> tx_thread_smp_core_mapped = mapped_core;
240
241 /* Setup the entry in the execution list. */
242 _tx_thread_execute_ptr[mapped_core] = next_thread;
243
244 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
245
246 /* Debug entry. */
247 _tx_thread_smp_debug_entry_insert(1, 0, next_thread);
248 #endif
249
250 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
251
252 /* Increment the number of thread relinquishes. */
253 thread_ptr -> tx_thread_performance_relinquish_count++;
254
255 /* Increment the total number of thread relinquish operations. */
256 _tx_thread_performance_relinquish_count++;
257
258 /* No, there is another thread ready to run and will be scheduled upon return. */
259 _tx_thread_performance_non_idle_return_count++;
260 #endif
261
262 #ifdef TX_ENABLE_STACK_CHECKING
263
264 /* Check this thread's stack. */
265 TX_THREAD_STACK_CHECK(next_thread)
266 #endif
267
268 #ifndef TX_NOT_INTERRUPTABLE
269
270 /* Increment the preempt disable flag in order to keep the protection. */
271 _tx_thread_preempt_disable++;
272
273 /* Restore interrupts. */
274 TX_RESTORE
275 #endif
276
277 /* Transfer control to the system so the scheduler can execute
278 the next thread. */
279 _tx_thread_system_return();
280
281
282 #ifdef TX_NOT_INTERRUPTABLE
283
284 /* Restore interrupts. */
285 TX_RESTORE
286 #endif
287
288 /* Set the finished flag. */
289 finished = TX_TRUE;
290
291 }
292
293 /* Move to the next thread at this priority. */
294 next_thread = next_thread -> tx_thread_ready_next;
295
296 }
297 } while ((next_thread != thread_ptr) && (finished == TX_FALSE));
298
299 /* Determine if we are finished. */
300 if (finished == TX_FALSE)
301 {
302
303 /* No other thread is ready at this priority... simply return. */
304
305 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
306
307 /* Debug entry. */
308 _tx_thread_smp_debug_entry_insert(1, 0, thread_ptr);
309 #endif
310
311 /* Restore interrupts. */
312 TX_RESTORE
313
314 /* Set the finished flag. */
315 finished = TX_TRUE;
316 }
317 }
318 else
319 {
320
321 /* Preemption-threshold is in force. */
322
323 /* Set the rebalance flag. */
324 rebalance = TX_TRUE;
325 }
326 }
327
328 /* Determine if preemption-threshold is in force. */
329 if (thread_ptr -> tx_thread_preempt_threshold < priority)
330 {
331
332 /* Set the rebalance flag. */
333 rebalance = TX_TRUE;
334
335 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
336
337 #if TX_MAX_PRIORITIES > 32
338
339 /* Calculate the index into the bit map array. */
340 map_index = priority/((UINT) 32);
341 #endif
342
343 /* Ensure that this thread's priority is clear in the preempt map. */
344 TX_MOD32_BIT_SET(priority, priority_bit)
345 _tx_thread_preempted_maps[MAP_INDEX] = _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
346
347 #if TX_MAX_PRIORITIES > 32
348
349 /* Determine if there are any other bits set in this preempt map. */
350 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
351 {
352
353 /* No, clear the active bit to signify this preempted map has nothing set. */
354 TX_DIV32_BIT_SET(priority, priority_bit)
355 _tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
356 }
357 #endif
358
359 /* Clear the entry in the preempted list. */
360 _tx_thread_preemption_threshold_list[priority] = TX_NULL;
361
362 /* Does this thread have preemption-threshold? */
363 if (_tx_thread_preemption__threshold_scheduled == thread_ptr)
364 {
365
366 /* Yes, set the preempted thread to NULL. */
367 _tx_thread_preemption__threshold_scheduled = TX_NULL;
368 }
369
370 /* Calculate the first thread with preemption-threshold active. */
371 #if TX_MAX_PRIORITIES > 32
372 if (_tx_thread_preempted_map_active != ((ULONG) 0))
373 #else
374 if (_tx_thread_preempted_maps[0] != ((ULONG) 0))
375 #endif
376 {
377 #if TX_MAX_PRIORITIES > 32
378
379 /* Calculate the index to find the next highest priority thread ready for execution. */
380 priority_map = _tx_thread_preempted_map_active;
381
382 /* Calculate the lowest bit set in the priority map. */
383 TX_LOWEST_SET_BIT_CALCULATE(priority_map, map_index)
384
385 /* Calculate the base priority as well. */
386 base_priority = map_index * ((UINT) 32);
387 #else
388
389 /* Setup the base priority to zero. */
390 base_priority = ((UINT) 0);
391 #endif
392
393 /* Setup temporary preempted map. */
394 priority_map = _tx_thread_preempted_maps[MAP_INDEX];
395
396 /* Calculate the lowest bit set in the priority map. */
397 TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit_set)
398
399 /* Move priority bit set into priority bit. */
400 priority_bit = (ULONG) priority_bit_set;
401
402 /* Setup the highest priority preempted thread. */
403 next_preempted = base_priority + priority_bit;
404
405 /* Pickup the previously preempted thread. */
406 preempted_thread = _tx_thread_preemption_threshold_list[next_preempted];
407
408 /* Setup the preempted thread. */
409 _tx_thread_preemption__threshold_scheduled = preempted_thread;
410 }
411 #else
412
413 /* Determine if this thread has preemption-threshold disabled. */
414 if (thread_ptr == _tx_thread_preemption__threshold_scheduled)
415 {
416
417 /* Clear the global preemption disable flag. */
418 _tx_thread_preemption__threshold_scheduled = TX_NULL;
419 }
420 #endif
421 }
422
423 /* Check to see if there is still work to do. */
424 if (finished == TX_FALSE)
425 {
426
427 #ifdef TX_THREAD_SMP_DEBUG_ENABLE
428
429 /* Debug entry. */
430 _tx_thread_smp_debug_entry_insert(1, 0, thread_ptr);
431 #endif
432
433 /* Determine if we need to rebalance the execute list. */
434 if (rebalance == TX_TRUE)
435 {
436
437 /* Rebalance the excute list. */
438 _tx_thread_smp_rebalance_execute_list(core_index);
439 }
440
441 /* Determine if this thread needs to return to the system. */
442 if (_tx_thread_execute_ptr[core_index] != thread_ptr)
443 {
444
445 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
446
447 /* Increment the number of thread relinquishes. */
448 thread_ptr -> tx_thread_performance_relinquish_count++;
449
450 /* Increment the total number of thread relinquish operations. */
451 _tx_thread_performance_relinquish_count++;
452
453 /* Determine if an idle system return is present. */
454 if (_tx_thread_execute_ptr[core_index] == TX_NULL)
455 {
456
457 /* Yes, increment the return to idle return count. */
458 _tx_thread_performance_idle_return_count++;
459 }
460 else
461 {
462
463 /* No, there is another thread ready to run and will be scheduled upon return. */
464 _tx_thread_performance_non_idle_return_count++;
465 }
466 #endif
467
468 #ifdef TX_ENABLE_STACK_CHECKING
469
470 /* Pickup new thread pointer. */
471 thread_ptr = _tx_thread_execute_ptr[core_index];
472
473 /* Check this thread's stack. */
474 TX_THREAD_STACK_CHECK(thread_ptr)
475 #endif
476
477 #ifndef TX_NOT_INTERRUPTABLE
478
479 /* Increment the preempt disable flag in order to keep the protection. */
480 _tx_thread_preempt_disable++;
481
482 /* Restore interrupts. */
483 TX_RESTORE
484 #endif
485
486 /* Transfer control to the system so the scheduler can execute
487 the next thread. */
488 _tx_thread_system_return();
489
490 #ifdef TX_NOT_INTERRUPTABLE
491
492 /* Restore interrupts. */
493 TX_RESTORE
494 #endif
495 }
496 else
497 {
498
499 /* Restore interrupts. */
500 TX_RESTORE
501 }
502 }
503 }
504
505