1 /**************************************************************************/
2 /*                                                                        */
3 /*       Copyright (c) Microsoft Corporation. All rights reserved.        */
4 /*                                                                        */
5 /*       This software is licensed under the Microsoft Software License   */
6 /*       Terms for Microsoft Azure RTOS. Full text of the license can be  */
7 /*       found in the LICENSE file at https://aka.ms/AzureRTOS_EULA       */
8 /*       and in the root directory of this software.                      */
9 /*                                                                        */
10 /**************************************************************************/
11 
12 
13 /**************************************************************************/
14 /**************************************************************************/
15 /**                                                                       */
16 /** ThreadX Component                                                     */
17 /**                                                                       */
18 /**   Thread                                                              */
19 /**                                                                       */
20 /**************************************************************************/
21 /**************************************************************************/
22 
23 
24 #define TX_SOURCE_CODE
25 #define TX_THREAD_SMP_SOURCE_CODE
26 
27 
28 /* Include necessary system files.  */
29 
30 #include "tx_api.h"
31 #include "tx_thread.h"
32 #include "tx_timer.h"
33 #include <stdio.h>
34 #include <errno.h>
35 
36 extern sem_t _tx_linux_isr_semaphore;
37 extern UINT _tx_linux_timer_waiting;
38 extern pthread_t _tx_linux_timer_id;
39 /**************************************************************************/
40 /*                                                                        */
41 /*  FUNCTION                                               RELEASE        */
42 /*                                                                        */
43 /*    _tx_thread_schedule                               SMP/Linux/GCC     */
44 /*                                                           6.1          */
45 /*  AUTHOR                                                                */
46 /*                                                                        */
47 /*    William E. Lamie, Microsoft Corporation                             */
48 /*                                                                        */
49 /*  DESCRIPTION                                                           */
50 /*                                                                        */
51 /*    This function waits for a thread control block pointer to appear in */
52 /*    the _tx_thread_execute_ptr variable.  Once a thread pointer appears */
53 /*    in the variable, the corresponding thread is resumed.               */
54 /*                                                                        */
55 /*  INPUT                                                                 */
56 /*                                                                        */
57 /*    None                                                                */
58 /*                                                                        */
59 /*  OUTPUT                                                                */
60 /*                                                                        */
61 /*    None                                                                */
62 /*                                                                        */
63 /*  CALLS                                                                 */
64 /*                                                                        */
65 /*    _tx_linux_mutex_obtain                                              */
66 /*    _tx_linux_debug_entry_insert                                        */
67 /*    _tx_linux_thread_resume                                             */
68 /*    tx_linux_sem_post                                                   */
69 /*    sem_trywait                                                         */
70 /*    tx_linux_sem_wait                                                   */
71 /*                                                                        */
72 /*  CALLED BY                                                             */
73 /*                                                                        */
74 /*    _tx_initialize_kernel_enter          ThreadX entry function         */
75 /*                                                                        */
76 /*  RELEASE HISTORY                                                       */
77 /*                                                                        */
78 /*    DATE              NAME                      DESCRIPTION             */
79 /*                                                                        */
80 /*  09-30-2020     William E. Lamie         Initial Version 6.1           */
81 /*                                                                        */
82 /**************************************************************************/
_tx_thread_schedule(VOID)83 VOID   _tx_thread_schedule(VOID)
84 {
85 UINT            core;
86 TX_THREAD       *current_thread;
87 TX_THREAD       *execute_thread;
88 struct timespec ts;
89 UCHAR preemt_retry = TX_FALSE;
90 
91     /* Loop forever.  */
92     while(1)
93     {
94 
95         /* Lock Linux mutex.  */
96         _tx_linux_mutex_obtain(&_tx_linux_mutex);
97 
98         /* Check for a system error condition.  */
99         if (_tx_linux_global_int_disabled_flag != TX_FALSE)
100         {
101 
102             /* This should not happen... increment the system error counter.  */
103             _tx_linux_system_error++;
104         }
105 
106         /* Debug entry.  */
107         _tx_linux_debug_entry_insert("SCHEDULE-wake_up", __FILE__, __LINE__);
108 
109         /* Loop through each virtual core to look for an idle core.  */
110         for (core = 0; core <  TX_THREAD_SMP_MAX_CORES; core++)
111         {
112 
113             /* Pickup the current thread pointer for this core.  */
114             current_thread =  _tx_thread_current_ptr[core];
115 
116             /* Determine if the thread's deferred preemption flag is set.  */
117             if ((current_thread) && (current_thread -> tx_thread_linux_deferred_preempt))
118             {
119                 if (_tx_thread_preempt_disable)
120                 {
121 
122                     /* Preemption disabled. Retry. */
123                     preemt_retry = TX_TRUE;
124                     break;
125                 }
126 
127                 if (current_thread -> tx_thread_state != TX_TERMINATED)
128                 {
129 
130                     /* Suspend the thread to simulate preemption.  Note that the thread is suspended BEFORE the protection get
131                        flag is checked to ensure there is not a race condition between this thread and the update of that flag.  */
132                     _tx_linux_thread_suspend(current_thread -> tx_thread_linux_thread_id);
133 
134                     /* Clear the preemption flag.  */
135                     current_thread -> tx_thread_linux_deferred_preempt =  TX_FALSE;
136 
137                     /* Indicate that this thread was suspended asynchronously.  */
138                     current_thread -> tx_thread_linux_suspension_type =  1;
139 
140                     /* Save the remaining time-slice and disable it.  */
141                     if (_tx_timer_time_slice[core])
142                     {
143 
144                         current_thread -> tx_thread_time_slice =  _tx_timer_time_slice[core];
145                         _tx_timer_time_slice[core] =  0;
146                     }
147                 }
148 
149                 /* Clear the current thread pointer.  */
150                 _tx_thread_current_ptr[core] =  TX_NULL;
151 
152                 /* Clear this mapping entry.  */
153                 _tx_linux_virtual_cores[core].tx_thread_smp_core_mapping_thread =          TX_NULL;
154                 _tx_linux_virtual_cores[core].tx_thread_smp_core_mapping_linux_thread_id = 0;
155 
156                 /* Indicate that this thread is now ready for scheduling again by another core.  */
157                 current_thread -> tx_thread_smp_core_control =  1;
158 
159                 /* Debug entry.  */
160                 _tx_linux_debug_entry_insert("SCHEDULE-core_preempt_complete", __FILE__, __LINE__);
161             }
162 
163             /* Determine if this core is idle.  */
164             if (_tx_thread_current_ptr[core] == TX_NULL)
165             {
166 
167                 /* Yes, this core is idle, determine if there is a thread that can be scheduled for it.  */
168 
169                 /* Pickup the execute thread pointer.  */
170                 execute_thread =  _tx_thread_execute_ptr[core];
171 
172                 /* Is there a thread that is ready to execute on this core?  */
173                 if ((execute_thread) && (execute_thread -> tx_thread_smp_core_control))
174                 {
175 
176                     /* Yes! We have a thread to execute. Note that the critical section is already
177                        active from the scheduling loop above.  */
178 
179                     /* Setup the current thread pointer.  */
180                     _tx_thread_current_ptr[core] =  execute_thread;
181 
182                     /* Remember the virtual core in the thread control block.  */
183                     execute_thread -> tx_thread_linux_virtual_core =  core;
184 
185                     /* Setup the virtual core mapping structure.  */
186                     _tx_linux_virtual_cores[core].tx_thread_smp_core_mapping_thread =          execute_thread;
187                     _tx_linux_virtual_cores[core].tx_thread_smp_core_mapping_linux_thread_id = execute_thread -> tx_thread_linux_thread_id;
188 
189                     /* Clear the execution control flag.  */
190                     execute_thread -> tx_thread_smp_core_control =  0;
191 
192                     /* Increment the run count for this thread.  */
193                     execute_thread -> tx_thread_run_count++;
194 
195                     /* Setup time-slice, if present.  */
196                     _tx_timer_time_slice[core] =  execute_thread -> tx_thread_time_slice;
197 
198                     /* Determine how the thread was last suspended.  */
199                     if (execute_thread -> tx_thread_linux_suspension_type == 1)
200                     {
201 
202                         /* Clear the suspension type.  */
203                         execute_thread -> tx_thread_linux_suspension_type =  0;
204 
205                         /* Debug entry.  */
206                         _tx_linux_debug_entry_insert("SCHEDULE-resume_thread", __FILE__, __LINE__);
207 
208                         /* Pseudo interrupt suspension.  The thread is not waiting on
209                            its run semaphore.  */
210                         _tx_linux_thread_resume(execute_thread -> tx_thread_linux_thread_id);
211                     }
212                     else if (execute_thread -> tx_thread_linux_suspension_type == 2)
213                     {
214 
215                         /* Clear the suspension type.  */
216                         execute_thread -> tx_thread_linux_suspension_type =  0;
217 
218                         /* Debug entry.  */
219                         _tx_linux_debug_entry_insert("SCHEDULE-release_sem", __FILE__, __LINE__);
220 
221                         /* Make sure semaphore is 0. */
222                         while(!sem_trywait(&execute_thread -> tx_thread_linux_thread_run_semaphore));
223 
224                         /* Let the thread run again by releasing its run semaphore.  */
225                         tx_linux_sem_post(&execute_thread -> tx_thread_linux_thread_run_semaphore);
226 
227                         /* Block timer ISR. */
228                         if(_tx_linux_timer_waiting)
229                         {
230 
231                             /* It is woken up by timer ISR. */
232                             /* Let ThreadX thread wake up first. */
233                             tx_linux_sem_wait(&_tx_linux_scheduler_semaphore);
234 
235                             /* Wake up timer ISR. */
236                             tx_linux_sem_post(&_tx_linux_isr_semaphore);
237                         }
238                         else
239                         {
240 
241                             /* It is woken up by TX_THREAD. */
242                             /* Suspend timer thread and let ThreadX thread wake up first. */
243                             _tx_linux_thread_suspend(_tx_linux_timer_id);
244                             tx_linux_sem_wait(&_tx_linux_scheduler_semaphore);
245                             _tx_linux_thread_resume(_tx_linux_timer_id);
246 
247                         }
248                     }
249                     else
250                     {
251 
252                         /* System error, increment the counter.  */
253                         _tx_linux_system_error++;
254                     }
255                 }
256             }
257         }
258 
259         if (preemt_retry)
260         {
261 
262             /* Unlock linux mutex. */
263             _tx_linux_mutex_release_all(&_tx_linux_mutex);
264 
265             /* Let user thread run to reset _tx_thread_preempt_disable. */
266             _tx_linux_thread_sleep(1);
267 
268             preemt_retry = TX_FALSE;
269 
270             continue;
271         }
272 
273         /* Debug entry.  */
274         _tx_linux_debug_entry_insert("SCHEDULE-self_suspend_sem", __FILE__, __LINE__);
275 
276         /* Unlock linux mutex. */
277         _tx_linux_mutex_release_all(&_tx_linux_mutex);
278 
279         /* Now suspend the main thread so the application thread can run.  */
280         clock_gettime(CLOCK_REALTIME, &ts);
281         ts.tv_nsec += 2000000;
282         if (ts.tv_nsec >= 1000000000)
283         {
284             ts.tv_nsec -= 1000000000;
285             ts.tv_sec++;
286         }
287         tx_linux_sem_timedwait(&_tx_linux_scheduler_semaphore, &ts);
288         clock_gettime(CLOCK_REALTIME, &ts);
289     }
290 }
291 
292 
293 /* Define the ThreadX Linux mutex get, release, and release all functions.  */
294 
_tx_linux_mutex_obtain(TX_LINUX_MUTEX * mutex)295 void _tx_linux_mutex_obtain(TX_LINUX_MUTEX *mutex)
296 {
297 
298 TX_THREAD       *thread_ptr;
299 pthread_t       current_thread_id;
300 UINT            i;
301 
302     /* Pickup the current thread ID.  */
303     current_thread_id =  pthread_self();
304 
305     /* Is the protection owned?  */
306     if (mutex -> tx_linux_mutex_owner == current_thread_id)
307     {
308 
309         /* Simply increment the nested counter.  */
310         mutex -> tx_linux_mutex_nested_count++;
311     }
312     else
313     {
314 
315         /* Loop to find a thread matching this ID.  */
316         i =  0;
317         do
318         {
319 
320             /* Pickup the thread pointer.  */
321             thread_ptr =  _tx_thread_current_ptr[i];
322 
323             /* Is this thread obtaining the mutex?  */
324             if ((thread_ptr) && (thread_ptr -> tx_thread_linux_thread_id == current_thread_id))
325             {
326 
327                 /* We have found the thread, get out of the loop.  */
328                 break;
329             }
330 
331             /* Look at next core.  */
332             i++;
333 
334         } while (i < TX_THREAD_SMP_MAX_CORES);
335 
336         /* Determine if we found a thread.  */
337         if (i >= TX_THREAD_SMP_MAX_CORES)
338         {
339 
340             /* Set the thread pointer to NULL to indicate a thread was not found.  */
341             thread_ptr =  TX_NULL;
342         }
343 
344         /* If a thread was found, indicate the thread is attempting to access the mutex.  */
345         if (thread_ptr)
346         {
347 
348             /* Yes, current ThreadX thread attempting to get the mutex - set the flag.  */
349             thread_ptr -> tx_thread_linux_mutex_access =  TX_TRUE;
350         }
351 
352         /* Get the Linux mutex.  */
353         pthread_mutex_lock(&mutex -> tx_linux_mutex);
354 
355         /* At this point we have the mutex.  */
356 
357         /* Clear the mutex access flag for the thread.  */
358         if (thread_ptr)
359         {
360 
361             /* Yes, clear the current ThreadX thread attempting to get the mutex.  */
362             thread_ptr -> tx_thread_linux_mutex_access =  TX_FALSE;
363         }
364 
365         /* Increment the nesting counter.  */
366         mutex -> tx_linux_mutex_nested_count =  1;
367 
368         /* Remember the owner.  */
369         mutex -> tx_linux_mutex_owner = pthread_self();
370     }
371 }
372 
373 
_tx_linux_mutex_release(TX_LINUX_MUTEX * mutex)374 void _tx_linux_mutex_release(TX_LINUX_MUTEX *mutex)
375 {
376 
377 pthread_t   current_thread_id;
378 
379 
380     /* Pickup the current thread ID.  */
381     current_thread_id =  pthread_self();
382 
383     /* Ensure the caller is the mutex owner.  */
384     if (mutex -> tx_linux_mutex_owner == current_thread_id)
385     {
386 
387         /* Determine if there is protection.  */
388         if (mutex -> tx_linux_mutex_nested_count)
389         {
390 
391             /* Decrement the nesting counter.  */
392             mutex -> tx_linux_mutex_nested_count--;
393 
394             /* Determine if the critical section is now being released.  */
395             if (mutex -> tx_linux_mutex_nested_count == 0)
396             {
397 
398                 /* Yes, it is being released clear the owner.  */
399                 mutex -> tx_linux_mutex_owner =  0;
400 
401                 /* Finally, release the mutex.  */
402                 if (pthread_mutex_unlock(&mutex -> tx_linux_mutex) != 0)
403                 {
404 
405                     /* Increment the system error counter.  */
406                     _tx_linux_system_error++;
407                 }
408 
409                 /* Just in case, make sure there the mutex is not owned.  */
410                 while (pthread_mutex_unlock(&mutex -> tx_linux_mutex) == 0)
411                 {
412 
413                     /* Increment the system error counter.  */
414                     _tx_linux_system_error++;
415                 }
416 
417                 /* Relinquish to other ready threads.  */
418                 _tx_linux_thread_sleep(1000);
419             }
420         }
421     }
422     else
423     {
424 
425         /* Increment the system error counter.  */
426         _tx_linux_system_error++;
427     }
428 }
429 
430 
_tx_linux_mutex_release_all(TX_LINUX_MUTEX * mutex)431 void _tx_linux_mutex_release_all(TX_LINUX_MUTEX *mutex)
432 {
433 
434     /* Ensure the caller is the mutex owner.  */
435     if (mutex -> tx_linux_mutex_owner == pthread_self())
436     {
437 
438         /* Determine if there is protection.  */
439         if (mutex -> tx_linux_mutex_nested_count)
440         {
441 
442             /* Clear the nesting counter.  */
443             mutex -> tx_linux_mutex_nested_count =  0;
444 
445             /* Yes, it is being release clear the owner.  */
446             mutex -> tx_linux_mutex_owner =  0;
447 
448             /* Finally, release the mutex.  */
449             if (pthread_mutex_unlock(&mutex -> tx_linux_mutex) != 0)
450             {
451 
452                 /* Increment the system error counter.  */
453                 _tx_linux_system_error++;
454             }
455 
456             /* Just in case, make sure there the mutex is not owned.  */
457             while (pthread_mutex_unlock(&mutex -> tx_linux_mutex) == 0)
458             {
459 
460                 /* Increment the system error counter.  */
461                 _tx_linux_system_error++;
462             }
463         }
464     }
465     else
466     {
467 
468         /* Increment the system error counter.  */
469         _tx_linux_system_error++;
470     }
471 }
472 
_tx_thread_delete_port_completion(TX_THREAD * thread_ptr,UINT tx_interrupt_save)473 void _tx_thread_delete_port_completion(TX_THREAD *thread_ptr, UINT tx_interrupt_save)
474 {
475 INT             linux_status;
476 sem_t           *threadrunsemaphore;
477 pthread_t       thread_id;
478     thread_id = thread_ptr -> tx_thread_linux_thread_id;
479     threadrunsemaphore = &(thread_ptr -> tx_thread_linux_thread_run_semaphore);
480     _tx_thread_smp_unprotect(tx_interrupt_save);
481     do
482     {
483         linux_status = pthread_cancel(thread_id);
484         if(linux_status != EAGAIN)
485         {
486             break;
487         }
488         _tx_linux_thread_resume(thread_id);
489         tx_linux_sem_post(threadrunsemaphore);
490         _tx_linux_thread_sleep(1000000);
491     } while (1);
492     pthread_join(thread_id, NULL);
493     sem_destroy(threadrunsemaphore);
494     tx_interrupt_save =   _tx_thread_smp_protect();
495 }
496 
_tx_thread_reset_port_completion(TX_THREAD * thread_ptr,UINT tx_interrupt_save)497 void _tx_thread_reset_port_completion(TX_THREAD *thread_ptr, UINT tx_interrupt_save)
498 {
499 INT             linux_status;
500 sem_t           *threadrunsemaphore;
501 pthread_t       thread_id;
502     thread_id = thread_ptr -> tx_thread_linux_thread_id;
503     threadrunsemaphore = &(thread_ptr -> tx_thread_linux_thread_run_semaphore);
504     _tx_thread_smp_unprotect(tx_interrupt_save);
505     do
506     {
507         linux_status = pthread_cancel(thread_id);
508         if(linux_status != EAGAIN)
509         {
510             break;
511         }
512         _tx_linux_thread_resume(thread_id);
513         tx_linux_sem_post(threadrunsemaphore);
514         _tx_linux_thread_sleep(1000000);
515     } while (1);
516     pthread_join(thread_id, NULL);
517     sem_destroy(threadrunsemaphore);
518     tx_interrupt_save =   _tx_thread_smp_protect();
519 }
520