1 /**************************************************************************/ 2 /* */ 3 /* Copyright (c) Microsoft Corporation. All rights reserved. */ 4 /* */ 5 /* This software is licensed under the Microsoft Software License */ 6 /* Terms for Microsoft Azure RTOS. Full text of the license can be */ 7 /* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */ 8 /* and in the root directory of this software. */ 9 /* */ 10 /**************************************************************************/ 11 12 13 /**************************************************************************/ 14 /**************************************************************************/ 15 /** */ 16 /** ThreadX Component */ 17 /** */ 18 /** Thread */ 19 /** */ 20 /**************************************************************************/ 21 /**************************************************************************/ 22 23 #define TX_SOURCE_CODE 24 25 26 /* Include necessary system files. */ 27 28 #include "tx_api.h" 29 #include "tx_thread.h" 30 31 /* Define small routines used for the TX_DISABLE/TX_RESTORE macros. */ 32 _tx_thread_interrupt_disable(void)33UINT _tx_thread_interrupt_disable(void) 34 { 35 36 UINT previous_value; 37 38 39 previous_value = _tx_thread_interrupt_control(TX_INT_DISABLE); 40 return(previous_value); 41 } 42 43 _tx_thread_interrupt_restore(UINT previous_posture)44VOID _tx_thread_interrupt_restore(UINT previous_posture) 45 { 46 47 previous_posture = _tx_thread_interrupt_control(previous_posture); 48 } 49 50 51 /**************************************************************************/ 52 /* */ 53 /* FUNCTION RELEASE */ 54 /* */ 55 /* _tx_thread_interrupt_control Linux/GNU */ 56 /* 6.1 */ 57 /* AUTHOR */ 58 /* */ 59 /* William E. Lamie, Microsoft Corporation */ 60 /* */ 61 /* DESCRIPTION */ 62 /* */ 63 /* This function is responsible for changing the interrupt lockout */ 64 /* posture of the system. */ 65 /* */ 66 /* INPUT */ 67 /* */ 68 /* new_posture New interrupt lockout posture */ 69 /* */ 70 /* OUTPUT */ 71 /* */ 72 /* old_posture Old interrupt lockout posture */ 73 /* */ 74 /* CALLS */ 75 /* */ 76 /* tx_linux_mutex_lock */ 77 /* pthread_self */ 78 /* pthread_getschedparam */ 79 /* tx_linux_mutex_recursive_unlock */ 80 /* pthread_exit */ 81 /* */ 82 /* CALLED BY */ 83 /* */ 84 /* Application Code */ 85 /* */ 86 /* RELEASE HISTORY */ 87 /* */ 88 /* DATE NAME DESCRIPTION */ 89 /* */ 90 /* 09-30-2020 William E. Lamie Initial Version 6.1 */ 91 /* */ 92 /**************************************************************************/ _tx_thread_interrupt_control(UINT new_posture)93UINT _tx_thread_interrupt_control(UINT new_posture) 94 { 95 96 UINT old_posture; 97 TX_THREAD *thread_ptr; 98 pthread_t thread_id; 99 int exit_code = 0; 100 101 102 /* Lock Linux mutex. */ 103 tx_linux_mutex_lock(_tx_linux_mutex); 104 105 /* Pickup the id of the current thread. */ 106 thread_id = pthread_self(); 107 108 /* Pickup the current thread pointer. */ 109 thread_ptr = _tx_thread_current_ptr; 110 111 /* Determine if this is a thread and it does not 112 match the current thread pointer. */ 113 if ((_tx_linux_threadx_thread) && 114 ((!thread_ptr) || (!pthread_equal(thread_ptr -> tx_thread_linux_thread_id, thread_id)))) 115 { 116 117 /* This indicates the Linux thread was actually terminated by ThreadX is only 118 being allowed to run in order to cleanup its resources. */ 119 /* Unlock linux mutex. */ 120 tx_linux_mutex_recursive_unlock(_tx_linux_mutex); 121 pthread_exit((void *)&exit_code); 122 } 123 124 /* Determine the current interrupt lockout condition. */ 125 if (tx_linux_mutex_recursive_count == 1) 126 { 127 128 /* Interrupts are enabled. */ 129 old_posture = TX_INT_ENABLE; 130 } 131 else 132 { 133 134 /* Interrupts are disabled. */ 135 old_posture = TX_INT_DISABLE; 136 } 137 138 /* First, determine if this call is from a non-thread. */ 139 if (_tx_thread_system_state) 140 { 141 142 /* Determine how to apply the new posture. */ 143 if (new_posture == TX_INT_ENABLE) 144 { 145 146 /* Clear the disabled flag. */ 147 _tx_linux_global_int_disabled_flag = TX_FALSE; 148 149 /* Determine if the critical section is locked. */ 150 tx_linux_mutex_recursive_unlock(_tx_linux_mutex); 151 } 152 else if (new_posture == TX_INT_DISABLE) 153 { 154 155 /* Set the disabled flag. */ 156 _tx_linux_global_int_disabled_flag = TX_TRUE; 157 } 158 } 159 else if (thread_ptr) 160 { 161 162 /* Determine how to apply the new posture. */ 163 if (new_posture == TX_INT_ENABLE) 164 { 165 166 /* Clear the disabled flag. */ 167 _tx_thread_current_ptr -> tx_thread_linux_int_disabled_flag = TX_FALSE; 168 169 /* Determine if the critical section is locked. */ 170 tx_linux_mutex_recursive_unlock(_tx_linux_mutex); 171 } 172 else if (new_posture == TX_INT_DISABLE) 173 { 174 175 /* Set the disabled flag. */ 176 _tx_thread_current_ptr -> tx_thread_linux_int_disabled_flag = TX_TRUE; 177 } 178 } 179 180 /* Return the previous interrupt disable posture. */ 181 return(old_posture); 182 } 183 184