1 ;/**************************************************************************/
2 ;/*                                                                        */
3 ;/*       Copyright (c) Microsoft Corporation. All rights reserved.        */
4 ;/*                                                                        */
5 ;/*       This software is licensed under the Microsoft Software License   */
6 ;/*       Terms for Microsoft Azure RTOS. Full text of the license can be  */
7 ;/*       found in the LICENSE file at https://aka.ms/AzureRTOS_EULA       */
8 ;/*       and in the root directory of this software.                      */
9 ;/*                                                                        */
10 ;/**************************************************************************/
11 ;
12 ;
13 ;/**************************************************************************/
14 ;/**************************************************************************/
15 ;/**                                                                       */
16 ;/** ThreadX Component                                                     */
17 ;/**                                                                       */
18 ;/**   Thread - Low Level SMP Support                                      */
19 ;/**                                                                       */
20 ;/**************************************************************************/
21 ;/**************************************************************************/
22 
23     MACRO
24 $label _tx_thread_smp_protect_lock_got
25 ;
26 ;    /* Set the currently owned core.  */
27 ;    _tx_thread_smp_protection.tx_thread_smp_protect_core = this_core;
28 ;
29     STR     r1, [r2, #8]                        ; Store this core
30 ;
31 ;    /* Increment the protection count. */
32 ;    _tx_thread_smp_protection.tx_thread_smp_protect_count++;
33 ;
34     LDR     r3, [r2, #12]                       ; Pickup ownership count
35     ADD     r3, r3, #1                          ; Increment ownership count
36     STR     r3, [r2, #12]                       ; Store ownership count
37     DMB
38 
39     IF :DEF:TX_MPCORE_DEBUG_ENABLE
40     LSL     r3, r1, #2                          ; Build offset to array indexes
41     LDR     r4, =_tx_thread_current_ptr         ; Pickup start of the current thread array
42     ADD     r4, r3, r4                          ; Build index into the current thread array
43     LDR     r3, [r4]                            ; Pickup current thread for this core
44     STR     r3, [r2, #4]                        ; Save current thread pointer
45     STR     LR, [r2, #16]                       ; Save caller's return address
46     STR     r0, [r2, #20]                       ; Save CPSR
47     ENDIF
48 
49     MEND
50 
51     MACRO
52 $label _tx_thread_smp_protect_remove_from_front_of_list
53 ;
54 ;    /* Remove ourselves from the list.  */
55 ;    _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head++] =  0xFFFFFFFF;
56 ;
57     MOV     r3, #0xFFFFFFFF                     ; Build the invalid core value
58     LDR     r4, =_tx_thread_smp_protect_wait_list_head ; Get the address of the head
59     LDR     r5, [r4]                            ; Get the value of the head
60     LDR     r6, =_tx_thread_smp_protect_wait_list ; Get the address of the list
61     STR     r3, [r6, r5, LSL #2]                ; Store the invalid core value
62     ADD     r5, r5, #1                          ; Increment the head
63 ;
64 ;    /* Did we wrap?  */
65 ;    if (_tx_thread_smp_protect_wait_list_head == TX_THREAD_SMP_MAX_CORES + 1)
66 ;    {
67 ;
68     LDR     r3, =_tx_thread_smp_protect_wait_list_size ; Load address of core list size
69     LDR     r3, [r3]                            ; Load the max cores value
70     CMP     r5, r3                              ; Compare the head to it
71     BNE     $label._store_new_head              ; Are we at the max?
72 ;
73 ;    _tx_thread_smp_protect_wait_list_head = 0;
74 ;
75     EOR     r5, r5, r5                          ; We're at the max. Set it to zero
76 ;
77 ;    }
78 ;
79 $label._store_new_head
80 
81     STR     r5, [r4]                            ; Store the new head
82 ;
83 ;    /* We have the lock!  */
84 ;    return;
85 ;
86     MEND
87 
88 
89     MACRO
90 $label _tx_thread_smp_protect_wait_list_lock_get
91 ;VOID  _tx_thread_smp_protect_wait_list_lock_get()
92 ;{
93 ;    /* We do this until we have the lock.  */
94 ;    while (1)
95 ;    {
96 ;
97 $label._tx_thread_smp_protect_wait_list_lock_get__try_to_get_lock
98 ;
99 ;    /* Is the list lock available?  */
100 ;    _tx_thread_smp_protect_wait_list_lock_protect_in_force = load_exclusive(&_tx_thread_smp_protect_wait_list_lock_protect_in_force);
101 ;
102     LDR     r1, =_tx_thread_smp_protect_wait_list_lock_protect_in_force
103     LDREX   r2, [r1]                            ; Pickup the protection flag
104 ;
105 ;    if (protect_in_force == 0)
106 ;    {
107 ;
108     CMP     r2, #0
109     BNE     $label._tx_thread_smp_protect_wait_list_lock_get__try_to_get_lock ; No, protection not available
110 ;
111 ;    /* Try to get the list.  */
112 ;    int status = store_exclusive(&_tx_thread_smp_protect_wait_list_lock_protect_in_force, 1);
113 ;
114     MOV     r2, #1                              ; Build lock value
115     STREX   r3, r2, [r1]                        ; Attempt to get the protection
116 ;
117 ;    if (status == SUCCESS)
118 ;
119     CMP     r3, #0
120     BNE     $label._tx_thread_smp_protect_wait_list_lock_get__try_to_get_lock ; Did it fail? If so, try again.
121 ;
122 ;    /* We have the lock!  */
123 ;    return;
124 ;
125     MEND
126 
127 
128     MACRO
129 $label _tx_thread_smp_protect_wait_list_add
130 ;VOID  _tx_thread_smp_protect_wait_list_add(UINT new_core)
131 ;{
132 ;
133 ;    /* We're about to modify the list, so get the list lock.  */
134 ;    _tx_thread_smp_protect_wait_list_lock_get();
135 ;
136     PUSH    {r1-r2}
137 
138 $label _tx_thread_smp_protect_wait_list_lock_get
139 
140     POP     {r1-r2}
141 ;
142 ;    /* Add this core.  */
143 ;    _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_tail++] = new_core;
144 ;
145     LDR     r3, =_tx_thread_smp_protect_wait_list_tail ; Get the address of the tail
146     LDR     r4, [r3]                            ; Get the value of tail
147     LDR     r5, =_tx_thread_smp_protect_wait_list ; Get the address of the list
148     STR     r1, [r5, r4, LSL #2]                ; Store the new core value
149     ADD     r4, r4, #1                          ; Increment the tail
150 ;
151 ;    /* Did we wrap?  */
152 ;    if (_tx_thread_smp_protect_wait_list_tail == _tx_thread_smp_protect_wait_list_size)
153 ;    {
154 ;
155     LDR     r5, =_tx_thread_smp_protect_wait_list_size ; Load max cores address
156     LDR     r5, [r5]                            ; Load max cores value
157     CMP     r4, r5                              ; Compare max cores to tail
158     BNE     $label._tx_thread_smp_protect_wait_list_add__no_wrap ; Did we wrap?
159 ;
160 ;    _tx_thread_smp_protect_wait_list_tail = 0;
161 ;
162     MOV     r4, #0
163 ;
164 ;    }
165 ;
166 $label._tx_thread_smp_protect_wait_list_add__no_wrap
167 
168     STR     r4, [r3]                            ; Store the new tail value.
169 ;
170 ;    /* Release the list lock.  */
171 ;    _tx_thread_smp_protect_wait_list_lock_protect_in_force = 0;
172 ;
173     MOV     r3, #0                              ; Build lock value
174     LDR     r4, =_tx_thread_smp_protect_wait_list_lock_protect_in_force
175     STR     r3, [r4]                            ; Store the new value
176 
177     MEND
178 
179 
180     MACRO
181 $label _tx_thread_smp_protect_wait_list_remove
182 ;VOID _tx_thread_smp_protect_wait_list_remove(UINT core)
183 ;{
184 ;
185 ;    /* Get the core index.  */
186 ;    UINT core_index;
187 ;    for (core_index = 0;; core_index++)
188 ;
189     EOR     r1, r1, r1                          ; Clear for 'core_index'
190     LDR     r2, =_tx_thread_smp_protect_wait_list ; Get the address of the list
191 ;
192 ;    {
193 ;
194 $label._tx_thread_smp_protect_wait_list_remove__check_cur_core
195 ;
196 ;    /* Is this the core?  */
197 ;    if (_tx_thread_smp_protect_wait_list[core_index] == core)
198 ;    {
199 ;        break;
200 ;
201     LDR     r3, [r2, r1, LSL #2]                ; Get the value at the current index
202     CMP     r3, r10                             ; Did we find the core?
203     BEQ     $label._tx_thread_smp_protect_wait_list_remove__found_core
204 ;
205 ;    }
206 ;
207     ADD     r1, r1, #1                          ; Increment cur index
208     B       $label._tx_thread_smp_protect_wait_list_remove__check_cur_core ; Restart the loop
209 ;
210 ;    }
211 ;
212 $label._tx_thread_smp_protect_wait_list_remove__found_core
213 ;
214 ;    /* We're about to modify the list. Get the lock. We need the lock because another
215 ;       core could be simultaneously adding (a core is simultaneously trying to get
216 ;       the inter-core lock) or removing (a core is simultaneously being preempted,
217 ;       like what is currently happening).  */
218 ;    _tx_thread_smp_protect_wait_list_lock_get();
219 ;
220     PUSH    {r1}
221 
222 $label _tx_thread_smp_protect_wait_list_lock_get
223 
224     POP     {r1}
225 ;
226 ;    /* We remove by shifting.  */
227 ;    while (core_index != _tx_thread_smp_protect_wait_list_tail)
228 ;    {
229 ;
230 $label._tx_thread_smp_protect_wait_list_remove__compare_index_to_tail
231 
232     LDR     r2, =_tx_thread_smp_protect_wait_list_tail ; Load tail address
233     LDR     r2, [r2]                            ; Load tail value
234     CMP     r1, r2                              ; Compare cur index and tail
235     BEQ     $label._tx_thread_smp_protect_wait_list_remove__removed
236 ;
237 ;    UINT next_index = core_index + 1;
238 ;
239     MOV     r2, r1                              ; Move current index to next index register
240     ADD     r2, r2, #1                          ; Add 1
241 ;
242 ;    if (next_index == _tx_thread_smp_protect_wait_list_size)
243 ;    {
244 ;
245     LDR     r3, =_tx_thread_smp_protect_wait_list_size
246     LDR     r3, [r3]
247     CMP     r2, r3
248     BNE     $label._tx_thread_smp_protect_wait_list_remove__next_index_no_wrap
249 ;
250 ;    next_index = 0;
251 ;
252     MOV     r2, #0
253 ;
254 ;    }
255 ;
256 $label._tx_thread_smp_protect_wait_list_remove__next_index_no_wrap
257 ;
258 ;    list_cores[core_index] = list_cores[next_index];
259 ;
260     LDR     r0, =_tx_thread_smp_protect_wait_list ; Get the address of the list
261     LDR     r3, [r0, r2, LSL #2]                ; Get the value at the next index
262     STR     r3, [r0, r1, LSL #2]                ; Store the value at the current index
263 ;
264 ;    core_index = next_index;
265 ;
266     MOV     r1, r2
267 
268     B       $label._tx_thread_smp_protect_wait_list_remove__compare_index_to_tail
269 ;
270 ;    }
271 ;
272 $label._tx_thread_smp_protect_wait_list_remove__removed
273 ;
274 ;    /* Now update the tail.  */
275 ;    if (_tx_thread_smp_protect_wait_list_tail == 0)
276 ;    {
277 ;
278     LDR     r0, =_tx_thread_smp_protect_wait_list_tail ; Load tail address
279     LDR     r1, [r0]                            ; Load tail value
280     CMP     r1, #0
281     BNE     $label._tx_thread_smp_protect_wait_list_remove__tail_not_zero
282 ;
283 ;    _tx_thread_smp_protect_wait_list_tail = _tx_thread_smp_protect_wait_list_size;
284 ;
285     LDR     r2, =_tx_thread_smp_protect_wait_list_size
286     LDR     r1, [r2]
287 ;
288 ;    }
289 ;
290 $label._tx_thread_smp_protect_wait_list_remove__tail_not_zero
291 ;
292 ;    _tx_thread_smp_protect_wait_list_tail--;
293 ;
294     SUB     r1, r1, #1
295     STR     r1, [r0]                            ; Store new tail value
296 ;
297 ;    /* Release the list lock.  */
298 ;    _tx_thread_smp_protect_wait_list_lock_protect_in_force = 0;
299 ;
300     MOV     r0, #0                              ; Build lock value
301     LDR     r1, =_tx_thread_smp_protect_wait_list_lock_protect_in_force ; Load lock address
302     STR     r0, [r1]                            ; Store the new value
303 ;
304 ;    /* We're no longer waiting. Note that this should be zero since, again,
305 ;       this function is only called when a thread preemption is occurring.  */
306 ;    _tx_thread_smp_protect_wait_counts[core]--;
307 ;
308     LDR     r1, =_tx_thread_smp_protect_wait_counts ; Load wait list counts
309     LDR     r2, [r1, r10, LSL #2]               ; Load waiting value
310     SUB     r2, r2, #1                          ; Subtract 1
311     STR     r2, [r1, r10, LSL #2]               ; Store new waiting value
312     MEND
313 
314