1 /**************************************************************************/ 2 /* */ 3 /* Copyright (c) Microsoft Corporation. All rights reserved. */ 4 /* */ 5 /* This software is licensed under the Microsoft Software License */ 6 /* Terms for Microsoft Azure RTOS. Full text of the license can be */ 7 /* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */ 8 /* and in the root directory of this software. */ 9 /* */ 10 /**************************************************************************/ 11 12 13 /**************************************************************************/ 14 /**************************************************************************/ 15 /** */ 16 /** ThreadX Component */ 17 /** */ 18 /** Thread */ 19 /** */ 20 /**************************************************************************/ 21 /**************************************************************************/ 22 23 24 #define TX_SOURCE_CODE 25 #define TX_THREAD_SMP_SOURCE_CODE 26 27 28 /* Include necessary system files. */ 29 30 #include "tx_api.h" 31 #include "tx_thread.h" 32 #include "tx_timer.h" 33 34 35 /**************************************************************************/ 36 /* */ 37 /* FUNCTION RELEASE */ 38 /* */ 39 /* _tx_thread_smp_protect SMP/Linux/GCC */ 40 /* 6.1 */ 41 /* AUTHOR */ 42 /* */ 43 /* William E. Lamie, Microsoft Corporation */ 44 /* */ 45 /* DESCRIPTION */ 46 /* */ 47 /* This function gets protection for running inside the ThreadX */ 48 /* source. This is acomplished by a combination of a test-and-set */ 49 /* flag and periodically disabling interrupts. */ 50 /* */ 51 /* INPUT */ 52 /* */ 53 /* None */ 54 /* */ 55 /* OUTPUT */ 56 /* */ 57 /* Previous Status Register */ 58 /* */ 59 /* CALLS */ 60 /* */ 61 /* pthread_self Get Linux thread ID */ 62 /* GetThreadPriority Get current thread priority */ 63 /* _tx_thread_smp_core_get Get the current core ID */ 64 /* */ 65 /* CALLED BY */ 66 /* */ 67 /* ThreadX Source */ 68 /* */ 69 /* RELEASE HISTORY */ 70 /* */ 71 /* DATE NAME DESCRIPTION */ 72 /* */ 73 /* 09-30-2020 William E. Lamie Initial Version 6.1 */ 74 /* */ 75 /**************************************************************************/ _tx_thread_smp_protect(void)76UINT _tx_thread_smp_protect(void) 77 { 78 79 pthread_t current_thread_id; 80 int exit_code = 0; 81 struct sched_param sp; 82 UINT core; 83 UINT interrupt_posture; 84 TX_THREAD *current_thread; 85 UINT current_state; 86 87 /* Loop to attempt to get the protection. */ 88 do 89 { 90 91 /* First, get the critical section. */ 92 do 93 { 94 95 96 /* Lock Linux mutex. */ 97 _tx_linux_mutex_obtain(&_tx_linux_mutex); 98 99 /* Pickup the current thread ID. */ 100 current_thread_id = pthread_self(); 101 102 /* Pickup the current core. */ 103 core = _tx_thread_smp_core_get(); 104 105 /* Pickup the current thread pointer. */ 106 current_thread = _tx_thread_current_ptr[core]; 107 108 /* Determine if this is a thread (THREAD_PRIORITY_LOWEST) and it does not 109 match the current thread pointer. */ 110 if ((_tx_linux_threadx_thread) && 111 ((!current_thread) || (current_thread -> tx_thread_linux_thread_id != current_thread_id))) 112 { 113 114 /* This indicates the Linux thread was actually terminated by ThreadX is only 115 being allowed to run in order to cleanup its resources. */ 116 _tx_linux_mutex_release_all(&_tx_linux_mutex); 117 118 /* Exit this thread. */ 119 pthread_exit((void *)&exit_code); 120 } 121 122 /* Determine if this is not actually a thread. */ 123 if (!_tx_linux_threadx_thread) 124 break; 125 126 /* Now check for terminated or completed state... and preempt disable is not set! */ 127 if ((current_thread) && (_tx_thread_preempt_disable == 0)) 128 { 129 130 /* Pickup current state. */ 131 current_state = current_thread -> tx_thread_state; 132 133 /* Now check for terminated or completed state. */ 134 if ((current_state == TX_TERMINATED) || (current_state == TX_COMPLETED)) 135 { 136 137 /* Clear the preemption flag. */ 138 current_thread -> tx_thread_linux_deferred_preempt = TX_FALSE; 139 140 /* Indicate that this thread was suspended asynchronously. */ 141 current_thread -> tx_thread_linux_suspension_type = 1; 142 143 /* Save the remaining time-slice and disable it. */ 144 if (_tx_timer_time_slice[core]) 145 { 146 147 current_thread -> tx_thread_time_slice = _tx_timer_time_slice[core]; 148 _tx_timer_time_slice[core] = 0; 149 } 150 151 /* Clear the current thread pointer. */ 152 _tx_thread_current_ptr[core] = TX_NULL; 153 154 /* Clear this mapping entry. */ 155 _tx_linux_virtual_cores[core].tx_thread_smp_core_mapping_thread = TX_NULL; 156 _tx_linux_virtual_cores[core].tx_thread_smp_core_mapping_linux_thread_id = 0; 157 158 /* Indicate that this thread is now ready for scheduling again by another core. */ 159 current_thread -> tx_thread_smp_core_control = 1; 160 161 /* Debug entry. */ 162 _tx_linux_debug_entry_insert("SCHEDULE-thread_terminate_preempt_complete", __FILE__, __LINE__); 163 164 /* Release the scheduler's semaphore to immediately try again. */ 165 tx_linux_sem_post(&_tx_linux_scheduler_semaphore); 166 167 /* This indicates the Linux thread was actually terminated by ThreadX is only 168 being allowed to run in order to cleanup its resources. */ 169 _tx_linux_mutex_release_all(&_tx_linux_mutex); 170 171 /* Exit this thread. */ 172 pthread_exit((void *)&exit_code); 173 } 174 } 175 176 /* Determine if the deferred preempt flag is set. */ 177 if ((current_thread) && (current_thread -> tx_thread_linux_deferred_preempt)) 178 { 179 180 /* Release the scheduler's semaphore to immediately try again. */ 181 tx_linux_sem_post(&_tx_linux_scheduler_semaphore); 182 183 /* Release the protection that is nested. */ 184 _tx_linux_mutex_release_all(&_tx_linux_mutex); 185 186 /* Sleep just to let other threads run. */ 187 _tx_linux_thread_sleep(1000000); 188 } 189 else 190 { 191 192 /* Get out of the protection loop. */ 193 break; 194 } 195 } while (1); 196 197 /* Setup the returned interrupt posture. */ 198 interrupt_posture = _tx_linux_global_int_disabled_flag; 199 200 /* Determine if the protection is already active for this core. */ 201 if (_tx_thread_smp_protection.tx_thread_smp_protect_core == core) 202 { 203 204 /* Yes, we have the protection already. */ 205 206 /* Increment the protection count. */ 207 _tx_thread_smp_protection.tx_thread_smp_protect_count++; 208 209 /* Set the global interrupt disable value. */ 210 _tx_linux_global_int_disabled_flag = TX_TRUE; 211 212 /* Debug entry. */ 213 _tx_linux_debug_entry_insert("PROTECT-obtained-nested", __FILE__, __LINE__); 214 215 /* Get out of the retry loop. */ 216 break; 217 } 218 /* Determine if the protection is available. */ 219 else if (_tx_thread_smp_protection.tx_thread_smp_protect_core == 0xFFFFFFFF) 220 { 221 222 /* At this point we have the protection. Setup the protection structure. */ 223 _tx_thread_smp_protection.tx_thread_smp_protect_in_force = TX_TRUE; 224 _tx_thread_smp_protection.tx_thread_smp_protect_thread = current_thread; 225 _tx_thread_smp_protection.tx_thread_smp_protect_core = core; 226 _tx_thread_smp_protection.tx_thread_smp_protect_count = 1; 227 _tx_thread_smp_protection.tx_thread_smp_protect_linux_thread_id = current_thread_id; 228 229 /* Set the global interrupt disable value. */ 230 _tx_linux_global_int_disabled_flag = TX_TRUE; 231 232 /* Debug entry. */ 233 _tx_linux_debug_entry_insert("PROTECT-obtained", __FILE__, __LINE__); 234 235 /* Get out of the retry loop. */ 236 break; 237 } 238 else 239 { 240 241 /* Protection is owned by another core. */ 242 243 /* Release the protection and start over. */ 244 _tx_linux_mutex_release(&_tx_linux_mutex); 245 } 246 } while (1); 247 248 /* Set the global interrupt disable value. */ 249 _tx_linux_global_int_disabled_flag = TX_TRUE; 250 251 /* Return the interrupt posture. */ 252 return(interrupt_posture); 253 } 254 255 256