1 /***************************************************************************
2 * Copyright (c) 2024 Microsoft Corporation
3 *
4 * This program and the accompanying materials are made available under the
5 * terms of the MIT License which is available at
6 * https://opensource.org/licenses/MIT.
7 *
8 * SPDX-License-Identifier: MIT
9 **************************************************************************/
10
11
12 /**************************************************************************/
13 /**************************************************************************/
14 /** */
15 /** ThreadX Component */
16 /** */
17 /** Thread */
18 /** */
19 /**************************************************************************/
20 /**************************************************************************/
21
22
23 #define TX_SOURCE_CODE
24 #define TX_THREAD_SMP_SOURCE_CODE
25
26
27 /* Include necessary system files. */
28
29 #include "tx_api.h"
30 #include "tx_thread.h"
31 #include "tx_timer.h"
32 #include <stdio.h>
33 #include <errno.h>
34
35 extern sem_t _tx_linux_isr_semaphore;
36 extern UINT _tx_linux_timer_waiting;
37 extern pthread_t _tx_linux_timer_id;
38 /**************************************************************************/
39 /* */
40 /* FUNCTION RELEASE */
41 /* */
42 /* _tx_thread_schedule SMP/Linux/GCC */
43 /* 6.1 */
44 /* AUTHOR */
45 /* */
46 /* William E. Lamie, Microsoft Corporation */
47 /* */
48 /* DESCRIPTION */
49 /* */
50 /* This function waits for a thread control block pointer to appear in */
51 /* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
52 /* in the variable, the corresponding thread is resumed. */
53 /* */
54 /* INPUT */
55 /* */
56 /* None */
57 /* */
58 /* OUTPUT */
59 /* */
60 /* None */
61 /* */
62 /* CALLS */
63 /* */
64 /* _tx_linux_mutex_obtain */
65 /* _tx_linux_debug_entry_insert */
66 /* _tx_linux_thread_resume */
67 /* tx_linux_sem_post */
68 /* sem_trywait */
69 /* tx_linux_sem_wait */
70 /* */
71 /* CALLED BY */
72 /* */
73 /* _tx_initialize_kernel_enter ThreadX entry function */
74 /* */
75 /* RELEASE HISTORY */
76 /* */
77 /* DATE NAME DESCRIPTION */
78 /* */
79 /* 09-30-2020 William E. Lamie Initial Version 6.1 */
80 /* */
81 /**************************************************************************/
_tx_thread_schedule(VOID)82 VOID _tx_thread_schedule(VOID)
83 {
84 UINT core;
85 TX_THREAD *current_thread;
86 TX_THREAD *execute_thread;
87 struct timespec ts;
88 UCHAR preemt_retry = TX_FALSE;
89
90 /* Loop forever. */
91 while(1)
92 {
93
94 /* Lock Linux mutex. */
95 _tx_linux_mutex_obtain(&_tx_linux_mutex);
96
97 /* Check for a system error condition. */
98 if (_tx_linux_global_int_disabled_flag != TX_FALSE)
99 {
100
101 /* This should not happen... increment the system error counter. */
102 _tx_linux_system_error++;
103 }
104
105 /* Debug entry. */
106 _tx_linux_debug_entry_insert("SCHEDULE-wake_up", __FILE__, __LINE__);
107
108 /* Loop through each virtual core to look for an idle core. */
109 for (core = 0; core < TX_THREAD_SMP_MAX_CORES; core++)
110 {
111
112 /* Pickup the current thread pointer for this core. */
113 current_thread = _tx_thread_current_ptr[core];
114
115 /* Determine if the thread's deferred preemption flag is set. */
116 if ((current_thread) && (current_thread -> tx_thread_linux_deferred_preempt))
117 {
118 if (_tx_thread_preempt_disable)
119 {
120
121 /* Preemption disabled. Retry. */
122 preemt_retry = TX_TRUE;
123 break;
124 }
125
126 if (current_thread -> tx_thread_state != TX_TERMINATED)
127 {
128
129 /* Suspend the thread to simulate preemption. Note that the thread is suspended BEFORE the protection get
130 flag is checked to ensure there is not a race condition between this thread and the update of that flag. */
131 _tx_linux_thread_suspend(current_thread -> tx_thread_linux_thread_id);
132
133 /* Clear the preemption flag. */
134 current_thread -> tx_thread_linux_deferred_preempt = TX_FALSE;
135
136 /* Indicate that this thread was suspended asynchronously. */
137 current_thread -> tx_thread_linux_suspension_type = 1;
138
139 /* Save the remaining time-slice and disable it. */
140 if (_tx_timer_time_slice[core])
141 {
142
143 current_thread -> tx_thread_time_slice = _tx_timer_time_slice[core];
144 _tx_timer_time_slice[core] = 0;
145 }
146 }
147
148 /* Clear the current thread pointer. */
149 _tx_thread_current_ptr[core] = TX_NULL;
150
151 /* Clear this mapping entry. */
152 _tx_linux_virtual_cores[core].tx_thread_smp_core_mapping_thread = TX_NULL;
153 _tx_linux_virtual_cores[core].tx_thread_smp_core_mapping_linux_thread_id = 0;
154
155 /* Indicate that this thread is now ready for scheduling again by another core. */
156 current_thread -> tx_thread_smp_core_control = 1;
157
158 /* Debug entry. */
159 _tx_linux_debug_entry_insert("SCHEDULE-core_preempt_complete", __FILE__, __LINE__);
160 }
161
162 /* Determine if this core is idle. */
163 if (_tx_thread_current_ptr[core] == TX_NULL)
164 {
165
166 /* Yes, this core is idle, determine if there is a thread that can be scheduled for it. */
167
168 /* Pickup the execute thread pointer. */
169 execute_thread = _tx_thread_execute_ptr[core];
170
171 /* Is there a thread that is ready to execute on this core? */
172 if ((execute_thread) && (execute_thread -> tx_thread_smp_core_control))
173 {
174
175 /* Yes! We have a thread to execute. Note that the critical section is already
176 active from the scheduling loop above. */
177
178 /* Setup the current thread pointer. */
179 _tx_thread_current_ptr[core] = execute_thread;
180
181 /* Remember the virtual core in the thread control block. */
182 execute_thread -> tx_thread_linux_virtual_core = core;
183
184 /* Setup the virtual core mapping structure. */
185 _tx_linux_virtual_cores[core].tx_thread_smp_core_mapping_thread = execute_thread;
186 _tx_linux_virtual_cores[core].tx_thread_smp_core_mapping_linux_thread_id = execute_thread -> tx_thread_linux_thread_id;
187
188 /* Clear the execution control flag. */
189 execute_thread -> tx_thread_smp_core_control = 0;
190
191 /* Increment the run count for this thread. */
192 execute_thread -> tx_thread_run_count++;
193
194 /* Setup time-slice, if present. */
195 _tx_timer_time_slice[core] = execute_thread -> tx_thread_time_slice;
196
197 /* Determine how the thread was last suspended. */
198 if (execute_thread -> tx_thread_linux_suspension_type == 1)
199 {
200
201 /* Clear the suspension type. */
202 execute_thread -> tx_thread_linux_suspension_type = 0;
203
204 /* Debug entry. */
205 _tx_linux_debug_entry_insert("SCHEDULE-resume_thread", __FILE__, __LINE__);
206
207 /* Pseudo interrupt suspension. The thread is not waiting on
208 its run semaphore. */
209 _tx_linux_thread_resume(execute_thread -> tx_thread_linux_thread_id);
210 }
211 else if (execute_thread -> tx_thread_linux_suspension_type == 2)
212 {
213
214 /* Clear the suspension type. */
215 execute_thread -> tx_thread_linux_suspension_type = 0;
216
217 /* Debug entry. */
218 _tx_linux_debug_entry_insert("SCHEDULE-release_sem", __FILE__, __LINE__);
219
220 /* Make sure semaphore is 0. */
221 while(!sem_trywait(&execute_thread -> tx_thread_linux_thread_run_semaphore));
222
223 /* Let the thread run again by releasing its run semaphore. */
224 tx_linux_sem_post(&execute_thread -> tx_thread_linux_thread_run_semaphore);
225
226 /* Block timer ISR. */
227 if(_tx_linux_timer_waiting)
228 {
229
230 /* It is woken up by timer ISR. */
231 /* Let ThreadX thread wake up first. */
232 tx_linux_sem_wait(&_tx_linux_scheduler_semaphore);
233
234 /* Wake up timer ISR. */
235 tx_linux_sem_post(&_tx_linux_isr_semaphore);
236 }
237 else
238 {
239
240 /* It is woken up by TX_THREAD. */
241 /* Suspend timer thread and let ThreadX thread wake up first. */
242 _tx_linux_thread_suspend(_tx_linux_timer_id);
243 tx_linux_sem_wait(&_tx_linux_scheduler_semaphore);
244 _tx_linux_thread_resume(_tx_linux_timer_id);
245
246 }
247 }
248 else
249 {
250
251 /* System error, increment the counter. */
252 _tx_linux_system_error++;
253 }
254 }
255 }
256 }
257
258 if (preemt_retry)
259 {
260
261 /* Unlock linux mutex. */
262 _tx_linux_mutex_release_all(&_tx_linux_mutex);
263
264 /* Let user thread run to reset _tx_thread_preempt_disable. */
265 _tx_linux_thread_sleep(1);
266
267 preemt_retry = TX_FALSE;
268
269 continue;
270 }
271
272 /* Debug entry. */
273 _tx_linux_debug_entry_insert("SCHEDULE-self_suspend_sem", __FILE__, __LINE__);
274
275 /* Unlock linux mutex. */
276 _tx_linux_mutex_release_all(&_tx_linux_mutex);
277
278 /* Now suspend the main thread so the application thread can run. */
279 clock_gettime(CLOCK_REALTIME, &ts);
280 ts.tv_nsec += 2000000;
281 if (ts.tv_nsec >= 1000000000)
282 {
283 ts.tv_nsec -= 1000000000;
284 ts.tv_sec++;
285 }
286 tx_linux_sem_timedwait(&_tx_linux_scheduler_semaphore, &ts);
287 clock_gettime(CLOCK_REALTIME, &ts);
288 }
289 }
290
291
292 /* Define the ThreadX Linux mutex get, release, and release all functions. */
293
_tx_linux_mutex_obtain(TX_LINUX_MUTEX * mutex)294 void _tx_linux_mutex_obtain(TX_LINUX_MUTEX *mutex)
295 {
296
297 TX_THREAD *thread_ptr;
298 pthread_t current_thread_id;
299 UINT i;
300
301 /* Pickup the current thread ID. */
302 current_thread_id = pthread_self();
303
304 /* Is the protection owned? */
305 if (mutex -> tx_linux_mutex_owner == current_thread_id)
306 {
307
308 /* Simply increment the nested counter. */
309 mutex -> tx_linux_mutex_nested_count++;
310 }
311 else
312 {
313
314 /* Loop to find a thread matching this ID. */
315 i = 0;
316 do
317 {
318
319 /* Pickup the thread pointer. */
320 thread_ptr = _tx_thread_current_ptr[i];
321
322 /* Is this thread obtaining the mutex? */
323 if ((thread_ptr) && (thread_ptr -> tx_thread_linux_thread_id == current_thread_id))
324 {
325
326 /* We have found the thread, get out of the loop. */
327 break;
328 }
329
330 /* Look at next core. */
331 i++;
332
333 } while (i < TX_THREAD_SMP_MAX_CORES);
334
335 /* Determine if we found a thread. */
336 if (i >= TX_THREAD_SMP_MAX_CORES)
337 {
338
339 /* Set the thread pointer to NULL to indicate a thread was not found. */
340 thread_ptr = TX_NULL;
341 }
342
343 /* If a thread was found, indicate the thread is attempting to access the mutex. */
344 if (thread_ptr)
345 {
346
347 /* Yes, current ThreadX thread attempting to get the mutex - set the flag. */
348 thread_ptr -> tx_thread_linux_mutex_access = TX_TRUE;
349 }
350
351 /* Get the Linux mutex. */
352 pthread_mutex_lock(&mutex -> tx_linux_mutex);
353
354 /* At this point we have the mutex. */
355
356 /* Clear the mutex access flag for the thread. */
357 if (thread_ptr)
358 {
359
360 /* Yes, clear the current ThreadX thread attempting to get the mutex. */
361 thread_ptr -> tx_thread_linux_mutex_access = TX_FALSE;
362 }
363
364 /* Increment the nesting counter. */
365 mutex -> tx_linux_mutex_nested_count = 1;
366
367 /* Remember the owner. */
368 mutex -> tx_linux_mutex_owner = pthread_self();
369 }
370 }
371
372
_tx_linux_mutex_release(TX_LINUX_MUTEX * mutex)373 void _tx_linux_mutex_release(TX_LINUX_MUTEX *mutex)
374 {
375
376 pthread_t current_thread_id;
377
378
379 /* Pickup the current thread ID. */
380 current_thread_id = pthread_self();
381
382 /* Ensure the caller is the mutex owner. */
383 if (mutex -> tx_linux_mutex_owner == current_thread_id)
384 {
385
386 /* Determine if there is protection. */
387 if (mutex -> tx_linux_mutex_nested_count)
388 {
389
390 /* Decrement the nesting counter. */
391 mutex -> tx_linux_mutex_nested_count--;
392
393 /* Determine if the critical section is now being released. */
394 if (mutex -> tx_linux_mutex_nested_count == 0)
395 {
396
397 /* Yes, it is being released clear the owner. */
398 mutex -> tx_linux_mutex_owner = 0;
399
400 /* Finally, release the mutex. */
401 if (pthread_mutex_unlock(&mutex -> tx_linux_mutex) != 0)
402 {
403
404 /* Increment the system error counter. */
405 _tx_linux_system_error++;
406 }
407
408 /* Just in case, make sure there the mutex is not owned. */
409 while (pthread_mutex_unlock(&mutex -> tx_linux_mutex) == 0)
410 {
411
412 /* Increment the system error counter. */
413 _tx_linux_system_error++;
414 }
415
416 /* Relinquish to other ready threads. */
417 _tx_linux_thread_sleep(1000);
418 }
419 }
420 }
421 else
422 {
423
424 /* Increment the system error counter. */
425 _tx_linux_system_error++;
426 }
427 }
428
429
_tx_linux_mutex_release_all(TX_LINUX_MUTEX * mutex)430 void _tx_linux_mutex_release_all(TX_LINUX_MUTEX *mutex)
431 {
432
433 /* Ensure the caller is the mutex owner. */
434 if (mutex -> tx_linux_mutex_owner == pthread_self())
435 {
436
437 /* Determine if there is protection. */
438 if (mutex -> tx_linux_mutex_nested_count)
439 {
440
441 /* Clear the nesting counter. */
442 mutex -> tx_linux_mutex_nested_count = 0;
443
444 /* Yes, it is being release clear the owner. */
445 mutex -> tx_linux_mutex_owner = 0;
446
447 /* Finally, release the mutex. */
448 if (pthread_mutex_unlock(&mutex -> tx_linux_mutex) != 0)
449 {
450
451 /* Increment the system error counter. */
452 _tx_linux_system_error++;
453 }
454
455 /* Just in case, make sure there the mutex is not owned. */
456 while (pthread_mutex_unlock(&mutex -> tx_linux_mutex) == 0)
457 {
458
459 /* Increment the system error counter. */
460 _tx_linux_system_error++;
461 }
462 }
463 }
464 else
465 {
466
467 /* Increment the system error counter. */
468 _tx_linux_system_error++;
469 }
470 }
471
_tx_thread_delete_port_completion(TX_THREAD * thread_ptr,UINT tx_interrupt_save)472 void _tx_thread_delete_port_completion(TX_THREAD *thread_ptr, UINT tx_interrupt_save)
473 {
474 INT linux_status;
475 sem_t *threadrunsemaphore;
476 pthread_t thread_id;
477 thread_id = thread_ptr -> tx_thread_linux_thread_id;
478 threadrunsemaphore = &(thread_ptr -> tx_thread_linux_thread_run_semaphore);
479 _tx_thread_smp_unprotect(tx_interrupt_save);
480 do
481 {
482 linux_status = pthread_cancel(thread_id);
483 if(linux_status != EAGAIN)
484 {
485 break;
486 }
487 _tx_linux_thread_resume(thread_id);
488 tx_linux_sem_post(threadrunsemaphore);
489 _tx_linux_thread_sleep(1000000);
490 } while (1);
491 pthread_join(thread_id, NULL);
492 sem_destroy(threadrunsemaphore);
493 tx_interrupt_save = _tx_thread_smp_protect();
494 }
495
_tx_thread_reset_port_completion(TX_THREAD * thread_ptr,UINT tx_interrupt_save)496 void _tx_thread_reset_port_completion(TX_THREAD *thread_ptr, UINT tx_interrupt_save)
497 {
498 INT linux_status;
499 sem_t *threadrunsemaphore;
500 pthread_t thread_id;
501 thread_id = thread_ptr -> tx_thread_linux_thread_id;
502 threadrunsemaphore = &(thread_ptr -> tx_thread_linux_thread_run_semaphore);
503 _tx_thread_smp_unprotect(tx_interrupt_save);
504 do
505 {
506 linux_status = pthread_cancel(thread_id);
507 if(linux_status != EAGAIN)
508 {
509 break;
510 }
511 _tx_linux_thread_resume(thread_id);
512 tx_linux_sem_post(threadrunsemaphore);
513 _tx_linux_thread_sleep(1000000);
514 } while (1);
515 pthread_join(thread_id, NULL);
516 sem_destroy(threadrunsemaphore);
517 tx_interrupt_save = _tx_thread_smp_protect();
518 }
519