1 /*
2  * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Licensed under the Apache License, Version 2.0 (the License); you may
7  * not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * -----------------------------------------------------------------------------
19  *
20  * Project:     CMSIS-RTOS RTX
21  * Title:       Cortex-A Core definitions
22  *
23  * -----------------------------------------------------------------------------
24  */
25 
26 #ifndef RTX_CORE_CA_H_
27 #define RTX_CORE_CA_H_
28 
29 #ifndef RTX_CORE_C_H_
30 #ifndef RTE_COMPONENTS_H
31 #include "RTE_Components.h"
32 #endif
33 #include CMSIS_device_header
34 #endif
35 
36 #include <stdbool.h>
37 typedef bool bool_t;
38 
39 #ifndef FALSE
40 #define FALSE                   ((bool_t)0)
41 #endif
42 
43 #ifndef TRUE
44 #define TRUE                    ((bool_t)1)
45 #endif
46 
47 #define DOMAIN_NS               0
48 #define EXCLUSIVE_ACCESS        1
49 
50 #define OS_TICK_HANDLER         osRtxTick_Handler
51 
52 // CPSR bit definitions
53 #define CPSR_T_BIT              0x20U
54 #define CPSR_I_BIT              0x80U
55 #define CPSR_F_BIT              0x40U
56 
57 // CPSR mode bitmasks
58 #define CPSR_MODE_USER          0x10U
59 #define CPSR_MODE_SYSTEM        0x1FU
60 
61 /// xPSR_Initialization Value
62 /// \param[in]  privileged      true=privileged, false=unprivileged
63 /// \param[in]  thumb           true=Thumb, false=Arm
64 /// \return                     xPSR Init Value
xPSR_InitVal(bool_t privileged,bool_t thumb)65 __STATIC_INLINE uint32_t xPSR_InitVal (bool_t privileged, bool_t thumb) {
66   uint32_t psr;
67 
68   if (privileged) {
69     if (thumb) {
70       psr = CPSR_MODE_SYSTEM | CPSR_T_BIT;
71     } else {
72       psr = CPSR_MODE_SYSTEM;
73     }
74   } else {
75     if (thumb) {
76       psr = CPSR_MODE_USER   | CPSR_T_BIT;
77     } else {
78       psr = CPSR_MODE_USER;
79     }
80   }
81 
82   return psr;
83 }
84 
85 // Stack Frame:
86 //  - VFP-D32: D16-31, D0-D15, FPSCR, Reserved, R4-R11, R0-R3, R12, LR, PC, CPSR
87 //  - VFP-D16:         D0-D15, FPSCR, Reserved, R4-R11, R0-R3, R12, LR, PC, CPSR
88 //  - Basic:                                    R4-R11, R0-R3, R12, LR, PC, CPSR
89 
90 /// Stack Frame Initialization Value
91 #define STACK_FRAME_INIT_VAL    0x00U
92 
93 /// Stack Offset of Register R0
94 /// \param[in]  stack_frame     Stack Frame
95 /// \return                     R0 Offset
StackOffsetR0(uint8_t stack_frame)96 __STATIC_INLINE uint32_t StackOffsetR0 (uint8_t stack_frame) {
97   uint32_t offset;
98 
99   if        ((stack_frame & 0x04U) != 0U) {
100     offset = (32U*8U) + (2U*4U) + (8U*4U);
101   } else if ((stack_frame & 0x02U) != 0U) {
102     offset = (16U*8U) + (2U*4U) + (8U*4U);
103   } else {
104     offset =                      (8U*4U);
105   }
106   return offset;
107 }
108 
109 
110 //  ==== Emulated Cortex-M functions ====
111 
112 /// Get xPSR Register - emulate M profile: SP_usr - (8*4)
113 /// \return      xPSR Register value
114 #if defined(__CC_ARM)
115 #pragma push
116 #pragma arm
__get_PSP(void)117 static __asm    uint32_t __get_PSP (void) {
118   sub   sp, sp, #4
119   stm   sp, {sp}^
120   pop   {r0}
121   sub   r0, r0, #32
122   bx    lr
123 }
124 #pragma pop
125 #else
126 #ifdef __ICCARM__
127 __arm
128 #else
129 __attribute__((target("arm")))
130 #endif
__get_PSP(void)131 __STATIC_INLINE uint32_t __get_PSP (void) {
132   register uint32_t ret;
133 
134   __ASM volatile (
135     "sub  sp,sp,#4\n\t"
136     "stm  sp,{sp}^\n\t"
137     "pop  {%[ret]}\n\t"
138     "sub  %[ret],%[ret],#32\n\t"
139     : [ret] "=&l" (ret)
140     :
141     : "memory"
142   );
143 
144   return ret;
145 }
146 #endif
147 
148 /// Set Control Register - not needed for A profile
149 /// \param[in]  control         Control Register value to set
__set_CONTROL(uint32_t control)150 __STATIC_INLINE void __set_CONTROL(uint32_t control) {
151   (void)control;
152 }
153 
154 
155 //  ==== Core functions ====
156 
157 /// Check if running Privileged
158 /// \return     true=privileged, false=unprivileged
IsPrivileged(void)159 __STATIC_INLINE bool_t IsPrivileged (void) {
160   return (__get_mode() != CPSR_MODE_USER);
161 }
162 
163 /// Check if in Exception
164 /// \return     true=exception, false=thread
IsException(void)165 __STATIC_INLINE bool_t IsException (void) {
166   return ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM));
167 }
168 
169 /// Check if IRQ is Masked
170 /// \return     true=masked, false=not masked
IsIrqMasked(void)171 __STATIC_INLINE bool_t IsIrqMasked (void) {
172   return  FALSE;
173 }
174 
175 
176 //  ==== Core Peripherals functions ====
177 
178 extern uint8_t IRQ_PendSV;
179 
180 /// Setup SVC and PendSV System Service Calls (not needed on Cortex-A)
SVC_Setup(void)181 __STATIC_INLINE void SVC_Setup (void) {
182 }
183 
184 /// Get Pending SV (Service Call) Flag
185 /// \return     Pending SV Flag
GetPendSV(void)186 __STATIC_INLINE uint8_t GetPendSV (void) {
187   return (IRQ_PendSV);
188 }
189 
190 /// Clear Pending SV (Service Call) Flag
ClrPendSV(void)191 __STATIC_INLINE void ClrPendSV (void) {
192   IRQ_PendSV = 0U;
193 }
194 
195 /// Set Pending SV (Service Call) Flag
SetPendSV(void)196 __STATIC_INLINE void SetPendSV (void) {
197   IRQ_PendSV = 1U;
198 }
199 
200 
201 //  ==== Service Calls definitions ====
202 
203 #if defined(__CC_ARM)
204 
205 #define __SVC_INDIRECT(n) __svc_indirect(n)
206 
207 #define SVC0_0N(f,t)                                                           \
208 __SVC_INDIRECT(0) t    svc##f (t(*)());                                        \
209 __attribute__((always_inline))                                                 \
210 __STATIC_INLINE   t  __svc##f (void) {                                         \
211   svc##f(svcRtx##f);                                                           \
212 }
213 
214 #define SVC0_0(f,t)                                                            \
215 __SVC_INDIRECT(0) t    svc##f (t(*)());                                        \
216 __attribute__((always_inline))                                                 \
217 __STATIC_INLINE   t  __svc##f (void) {                                         \
218   return svc##f(svcRtx##f);                                                    \
219 }
220 
221 #define SVC0_1N(f,t,t1)                                                        \
222 __SVC_INDIRECT(0) t    svc##f (t(*)(t1),t1);                                   \
223 __attribute__((always_inline))                                                 \
224 __STATIC_INLINE   t  __svc##f (t1 a1) {                                        \
225   svc##f(svcRtx##f,a1);                                                        \
226 }
227 
228 #define SVC0_1(f,t,t1)                                                         \
229 __SVC_INDIRECT(0) t    svc##f (t(*)(t1),t1);                                   \
230 __attribute__((always_inline))                                                 \
231 __STATIC_INLINE   t  __svc##f (t1 a1) {                                        \
232   return svc##f(svcRtx##f,a1);                                                 \
233 }
234 
235 #define SVC0_2(f,t,t1,t2)                                                      \
236 __SVC_INDIRECT(0) t    svc##f (t(*)(t1,t2),t1,t2);                             \
237 __attribute__((always_inline))                                                 \
238 __STATIC_INLINE   t  __svc##f (t1 a1, t2 a2) {                                 \
239   return svc##f(svcRtx##f,a1,a2);                                              \
240 }
241 
242 #define SVC0_3(f,t,t1,t2,t3)                                                   \
243 __SVC_INDIRECT(0) t    svc##f (t(*)(t1,t2,t3),t1,t2,t3);                       \
244 __attribute__((always_inline))                                                 \
245 __STATIC_INLINE   t  __svc##f (t1 a1, t2 a2, t3 a3) {                          \
246   return svc##f(svcRtx##f,a1,a2,a3);                                           \
247 }
248 
249 #define SVC0_4(f,t,t1,t2,t3,t4)                                                \
250 __SVC_INDIRECT(0) t    svc##f (t(*)(t1,t2,t3,t4),t1,t2,t3,t4);                 \
251 __attribute__((always_inline))                                                 \
252 __STATIC_INLINE   t  __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) {                   \
253   return svc##f(svcRtx##f,a1,a2,a3,a4);                                        \
254 }
255 
256 #elif defined(__ICCARM__)
257 
258 #define SVC_ArgF(f)                                                            \
259   __asm(                                                                       \
260     "mov r12,%0\n"                                                             \
261     :: "r"(&f): "r12"                                                          \
262   );
263 
264 #define STRINGIFY(a) #a
265 #define __SVC_INDIRECT(n) _Pragma(STRINGIFY(swi_number = n)) __swi
266 
267 #define SVC0_0N(f,t)                                                           \
268 __SVC_INDIRECT(0) t    svc##f ();                                              \
269 __attribute__((always_inline))                                                 \
270 __STATIC_INLINE   t  __svc##f (void) {                                         \
271   SVC_ArgF(svcRtx##f);                                                         \
272   svc##f();                                                                    \
273 }
274 
275 #define SVC0_0(f,t)                                                            \
276 __SVC_INDIRECT(0) t    svc##f ();                                              \
277 __attribute__((always_inline))                                                 \
278 __STATIC_INLINE   t  __svc##f (void) {                                         \
279   SVC_ArgF(svcRtx##f);                                                         \
280   return svc##f();                                                             \
281 }
282 
283 #define SVC0_1N(f,t,t1)                                                        \
284 __SVC_INDIRECT(0) t    svc##f (t1 a1);                                         \
285 __attribute__((always_inline))                                                 \
286 __STATIC_INLINE   t  __svc##f (t1 a1) {                                        \
287   SVC_ArgF(svcRtx##f);                                                         \
288   svc##f(a1);                                                                  \
289 }
290 
291 #define SVC0_1(f,t,t1)                                                         \
292 __SVC_INDIRECT(0) t    svc##f (t1 a1);                                         \
293 __attribute__((always_inline))                                                 \
294 __STATIC_INLINE   t  __svc##f (t1 a1) {                                        \
295   SVC_ArgF(svcRtx##f);                                                         \
296   return svc##f(a1);                                                           \
297 }
298 
299 #define SVC0_2(f,t,t1,t2)                                                      \
300 __SVC_INDIRECT(0) t    svc##f (t1 a1, t2 a2);                                  \
301 __attribute__((always_inline))                                                 \
302 __STATIC_INLINE   t  __svc##f (t1 a1, t2 a2) {                                 \
303   SVC_ArgF(svcRtx##f);                                                         \
304   return svc##f(a1,a2);                                                        \
305 }
306 
307 #define SVC0_3(f,t,t1,t2,t3)                                                   \
308 __SVC_INDIRECT(0) t    svc##f (t1 a1, t2 a2, t3 a3);                           \
309 __attribute__((always_inline))                                                 \
310 __STATIC_INLINE   t  __svc##f (t1 a1, t2 a2, t3 a3) {                          \
311   SVC_ArgF(svcRtx##f);                                                         \
312   return svc##f(a1,a2,a3);                                                     \
313 }
314 
315 #define SVC0_4(f,t,t1,t2,t3,t4)                                                \
316 __SVC_INDIRECT(0) t    svc##f (t1 a1, t2 a2, t3 a3, t4 a4);                    \
317 __attribute__((always_inline))                                                 \
318 __STATIC_INLINE   t  __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) {                   \
319   SVC_ArgF(svcRtx##f);                                                         \
320   return svc##f(a1,a2,a3,a4);                                                  \
321 }
322 
323 #else   // !(defined(__CC_ARM) || defined(__ICCARM__))
324 
325 #define SVC_RegF "r12"
326 
327 #define SVC_ArgN(n) \
328 register uint32_t __r##n __ASM("r"#n)
329 
330 #define SVC_ArgR(n,a) \
331 register uint32_t __r##n __ASM("r"#n) = (uint32_t)a
332 
333 #define SVC_ArgF(f) \
334 register uint32_t __rf   __ASM(SVC_RegF) = (uint32_t)f
335 
336 #define SVC_In0 "r"(__rf)
337 #define SVC_In1 "r"(__rf),"r"(__r0)
338 #define SVC_In2 "r"(__rf),"r"(__r0),"r"(__r1)
339 #define SVC_In3 "r"(__rf),"r"(__r0),"r"(__r1),"r"(__r2)
340 #define SVC_In4 "r"(__rf),"r"(__r0),"r"(__r1),"r"(__r2),"r"(__r3)
341 
342 #define SVC_Out0
343 #define SVC_Out1 "=r"(__r0)
344 
345 #define SVC_CL0
346 #define SVC_CL1 "r1"
347 #define SVC_CL2 "r0","r1"
348 
349 #define SVC_Call0(in, out, cl)                                                 \
350   __ASM volatile ("svc 0" : out : in : cl)
351 
352 #define SVC0_0N(f,t)                                                           \
353 __attribute__((always_inline))                                                 \
354 __STATIC_INLINE t __svc##f (void) {                                            \
355   SVC_ArgF(svcRtx##f);                                                         \
356   SVC_Call0(SVC_In0, SVC_Out0, SVC_CL2);                                       \
357 }
358 
359 #define SVC0_0(f,t)                                                            \
360 __attribute__((always_inline))                                                 \
361 __STATIC_INLINE t __svc##f (void) {                                            \
362   SVC_ArgN(0);                                                                 \
363   SVC_ArgF(svcRtx##f);                                                         \
364   SVC_Call0(SVC_In0, SVC_Out1, SVC_CL1);                                       \
365   return (t) __r0;                                                             \
366 }
367 
368 #define SVC0_1N(f,t,t1)                                                        \
369 __attribute__((always_inline))                                                 \
370 __STATIC_INLINE t __svc##f (t1 a1) {                                           \
371   SVC_ArgR(0,a1);                                                              \
372   SVC_ArgF(svcRtx##f);                                                         \
373   SVC_Call0(SVC_In1, SVC_Out0, SVC_CL1);                                       \
374 }
375 
376 #define SVC0_1(f,t,t1)                                                         \
377 __attribute__((always_inline))                                                 \
378 __STATIC_INLINE t __svc##f (t1 a1) {                                           \
379   SVC_ArgR(0,a1);                                                              \
380   SVC_ArgF(svcRtx##f);                                                         \
381   SVC_Call0(SVC_In1, SVC_Out1, SVC_CL1);                                       \
382   return (t) __r0;                                                             \
383 }
384 
385 #define SVC0_2(f,t,t1,t2)                                                      \
386 __attribute__((always_inline))                                                 \
387 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) {                                    \
388   SVC_ArgR(0,a1);                                                              \
389   SVC_ArgR(1,a2);                                                              \
390   SVC_ArgF(svcRtx##f);                                                         \
391   SVC_Call0(SVC_In2, SVC_Out1, SVC_CL0);                                       \
392   return (t) __r0;                                                             \
393 }
394 
395 #define SVC0_3(f,t,t1,t2,t3)                                                   \
396 __attribute__((always_inline))                                                 \
397 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) {                             \
398   SVC_ArgR(0,a1);                                                              \
399   SVC_ArgR(1,a2);                                                              \
400   SVC_ArgR(2,a3);                                                              \
401   SVC_ArgF(svcRtx##f);                                                         \
402   SVC_Call0(SVC_In3, SVC_Out1, SVC_CL0);                                       \
403   return (t) __r0;                                                             \
404 }
405 
406 #define SVC0_4(f,t,t1,t2,t3,t4)                                                \
407 __attribute__((always_inline))                                                 \
408 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) {                      \
409   SVC_ArgR(0,a1);                                                              \
410   SVC_ArgR(1,a2);                                                              \
411   SVC_ArgR(2,a3);                                                              \
412   SVC_ArgR(3,a4);                                                              \
413   SVC_ArgF(svcRtx##f);                                                         \
414   SVC_Call0(SVC_In4, SVC_Out1, SVC_CL0);                                       \
415   return (t) __r0;                                                             \
416 }
417 
418 #endif
419 
420 
421 //  ==== Exclusive Access Operation ====
422 
423 #if (EXCLUSIVE_ACCESS == 1)
424 
425 /// Atomic Access Operation: Write (8-bit)
426 /// \param[in]  mem             Memory address
427 /// \param[in]  val             Value to write
428 /// \return                     Previous value
429 #if defined(__CC_ARM)
atomic_wr8(uint8_t * mem,uint8_t val)430 static __asm    uint8_t atomic_wr8 (uint8_t *mem, uint8_t val) {
431   mov    r2,r0
432 1
433   ldrexb r0,[r2]
434   strexb r3,r1,[r2]
435   cmp    r3,#0
436   bne    %B1
437   bx     lr
438 }
439 #else
atomic_wr8(uint8_t * mem,uint8_t val)440 __STATIC_INLINE uint8_t atomic_wr8 (uint8_t *mem, uint8_t val) {
441 #ifdef  __ICCARM__
442 #pragma diag_suppress=Pe550
443 #endif
444   register uint32_t res;
445 #ifdef  __ICCARM__
446 #pragma diag_default=Pe550
447 #endif
448   register uint8_t  ret;
449 
450   __ASM volatile (
451 #ifndef __ICCARM__
452   ".syntax unified\n\t"
453 #endif
454   "1:\n\t"
455     "ldrexb %[ret],[%[mem]]\n\t"
456     "strexb %[res],%[val],[%[mem]]\n\t"
457     "cmp    %[res],#0\n\t"
458     "bne    1b\n\t"
459   : [ret] "=&l" (ret),
460     [res] "=&l" (res)
461   : [mem] "l"   (mem),
462     [val] "l"   (val)
463   : "memory"
464   );
465 
466   return ret;
467 }
468 #endif
469 
470 /// Atomic Access Operation: Set bits (32-bit)
471 /// \param[in]  mem             Memory address
472 /// \param[in]  bits            Bit mask
473 /// \return                     New value
474 #if defined(__CC_ARM)
atomic_set32(uint32_t * mem,uint32_t bits)475 static __asm    uint32_t atomic_set32 (uint32_t *mem, uint32_t bits) {
476   mov   r2,r0
477 1
478   ldrex r0,[r2]
479   orr   r0,r0,r1
480   strex r3,r0,[r2]
481   cmp   r3,#0
482   bne   %B1
483   bx    lr
484 }
485 #else
atomic_set32(uint32_t * mem,uint32_t bits)486 __STATIC_INLINE uint32_t atomic_set32 (uint32_t *mem, uint32_t bits) {
487 #ifdef  __ICCARM__
488 #pragma diag_suppress=Pe550
489 #endif
490   register uint32_t val, res;
491 #ifdef  __ICCARM__
492 #pragma diag_default=Pe550
493 #endif
494   register uint32_t ret;
495 
496   __ASM volatile (
497 #ifndef __ICCARM__
498   ".syntax unified\n\t"
499 #endif
500   "1:\n\t"
501     "ldrex %[val],[%[mem]]\n\t"
502     "orr   %[ret],%[val],%[bits]\n\t"
503     "strex %[res],%[ret],[%[mem]]\n\t"
504     "cmp   %[res],#0\n\t"
505     "bne   1b\n"
506   : [ret]  "=&l" (ret),
507     [val]  "=&l" (val),
508     [res]  "=&l" (res)
509   : [mem]  "l"   (mem),
510     [bits] "l"   (bits)
511   : "memory"
512   );
513 
514   return ret;
515 }
516 #endif
517 
518 /// Atomic Access Operation: Clear bits (32-bit)
519 /// \param[in]  mem             Memory address
520 /// \param[in]  bits            Bit mask
521 /// \return                     Previous value
522 #if defined(__CC_ARM)
atomic_clr32(uint32_t * mem,uint32_t bits)523 static __asm    uint32_t atomic_clr32 (uint32_t *mem, uint32_t bits) {
524   push  {r4,lr}
525   mov   r2,r0
526 1
527   ldrex r0,[r2]
528   bic   r4,r0,r1
529   strex r3,r4,[r2]
530   cmp   r3,#0
531   bne   %B1
532   pop   {r4,pc}
533 }
534 #else
atomic_clr32(uint32_t * mem,uint32_t bits)535 __STATIC_INLINE uint32_t atomic_clr32 (uint32_t *mem, uint32_t bits) {
536 #ifdef  __ICCARM__
537 #pragma diag_suppress=Pe550
538 #endif
539   register uint32_t val, res;
540 #ifdef  __ICCARM__
541 #pragma diag_default=Pe550
542 #endif
543   register uint32_t ret;
544 
545   __ASM volatile (
546 #ifndef __ICCARM__
547   ".syntax unified\n\t"
548 #endif
549   "1:\n\t"
550     "ldrex %[ret],[%[mem]]\n\t"
551     "bic   %[val],%[ret],%[bits]\n\t"
552     "strex %[res],%[val],[%[mem]]\n\t"
553     "cmp   %[res],#0\n\t"
554     "bne   1b\n"
555   : [ret]  "=&l" (ret),
556     [val]  "=&l" (val),
557     [res]  "=&l" (res)
558   : [mem]  "l"   (mem),
559     [bits] "l"   (bits)
560   : "memory"
561   );
562 
563   return ret;
564 }
565 #endif
566 
567 /// Atomic Access Operation: Check if all specified bits (32-bit) are active and clear them
568 /// \param[in]  mem             Memory address
569 /// \param[in]  bits            Bit mask
570 /// \return                     Active bits before clearing or 0 if not active
571 #if defined(__CC_ARM)
atomic_chk32_all(uint32_t * mem,uint32_t bits)572 static __asm    uint32_t atomic_chk32_all (uint32_t *mem, uint32_t bits) {
573   push  {r4,lr}
574   mov   r2,r0
575 1
576   ldrex r0,[r2]
577   and   r4,r0,r1
578   cmp   r4,r1
579   beq   %F2
580   clrex
581   movs  r0,#0
582   pop   {r4,pc}
583 2
584   bic   r4,r0,r1
585   strex r3,r4,[r2]
586   cmp   r3,#0
587   bne   %B1
588   pop   {r4,pc}
589 }
590 #else
atomic_chk32_all(uint32_t * mem,uint32_t bits)591 __STATIC_INLINE uint32_t atomic_chk32_all (uint32_t *mem, uint32_t bits) {
592 #ifdef  __ICCARM__
593 #pragma diag_suppress=Pe550
594 #endif
595   register uint32_t val, res;
596 #ifdef  __ICCARM__
597 #pragma diag_default=Pe550
598 #endif
599   register uint32_t ret;
600 
601   __ASM volatile (
602 #ifndef __ICCARM__
603   ".syntax unified\n\t"
604 #endif
605   "1:\n\t"
606     "ldrex %[ret],[%[mem]]\n\t"
607     "and   %[val],%[ret],%[bits]\n\t"
608     "cmp   %[val],%[bits]\n\t"
609     "beq   2f\n\t"
610     "clrex\n\t"
611     "movs  %[ret],#0\n\t"
612     "b     3f\n"
613   "2:\n\t"
614     "bic   %[val],%[ret],%[bits]\n\t"
615     "strex %[res],%[val],[%[mem]]\n\t"
616     "cmp   %[res],#0\n\t"
617     "bne   1b\n"
618   "3:"
619   : [ret]  "=&l" (ret),
620     [val]  "=&l" (val),
621     [res]  "=&l" (res)
622   : [mem]  "l"   (mem),
623     [bits] "l"   (bits)
624   : "cc", "memory"
625   );
626 
627   return ret;
628 }
629 #endif
630 
631 /// Atomic Access Operation: Check if any specified bits (32-bit) are active and clear them
632 /// \param[in]  mem             Memory address
633 /// \param[in]  bits            Bit mask
634 /// \return                     Active bits before clearing or 0 if not active
635 #if defined(__CC_ARM)
atomic_chk32_any(uint32_t * mem,uint32_t bits)636 static __asm    uint32_t atomic_chk32_any (uint32_t *mem, uint32_t bits) {
637   push  {r4,lr}
638   mov   r2,r0
639 1
640   ldrex r0,[r2]
641   tst   r0,r1
642   bne   %F2
643   clrex
644   movs  r0,#0
645   pop   {r4,pc}
646 2
647   bic   r4,r0,r1
648   strex r3,r4,[r2]
649   cmp    r3,#0
650   bne   %B1
651   pop   {r4,pc}
652 }
653 #else
atomic_chk32_any(uint32_t * mem,uint32_t bits)654 __STATIC_INLINE uint32_t atomic_chk32_any (uint32_t *mem, uint32_t bits) {
655 #ifdef  __ICCARM__
656 #pragma diag_suppress=Pe550
657 #endif
658   register uint32_t val, res;
659 #ifdef  __ICCARM__
660 #pragma diag_default=Pe550
661 #endif
662   register uint32_t ret;
663 
664   __ASM volatile (
665 #ifndef __ICCARM__
666   ".syntax unified\n\t"
667 #endif
668   "1:\n\t"
669     "ldrex %[ret],[%[mem]]\n\t"
670     "tst   %[ret],%[bits]\n\t"
671     "bne   2f\n\t"
672     "clrex\n\t"
673     "movs  %[ret],#0\n\t"
674     "b     3f\n"
675   "2:\n\t"
676     "bic   %[val],%[ret],%[bits]\n\t"
677     "strex %[res],%[val],[%[mem]]\n\t"
678     "cmp   %[res],#0\n\t"
679     "bne   1b\n"
680   "3:"
681   : [ret]  "=&l" (ret),
682     [val]  "=&l" (val),
683     [res]  "=&l" (res)
684   : [mem]  "l"   (mem),
685     [bits] "l"   (bits)
686   : "cc", "memory"
687   );
688 
689   return ret;
690 }
691 #endif
692 
693 /// Atomic Access Operation: Increment (32-bit)
694 /// \param[in]  mem             Memory address
695 /// \return                     Previous value
696 #if defined(__CC_ARM)
atomic_inc32(uint32_t * mem)697 static __asm    uint32_t atomic_inc32 (uint32_t *mem) {
698   mov   r2,r0
699 1
700   ldrex r0,[r2]
701   adds  r1,r0,#1
702   strex r3,r1,[r2]
703   cmp   r3,#0
704   bne   %B1
705   bx    lr
706 }
707 #else
atomic_inc32(uint32_t * mem)708 __STATIC_INLINE uint32_t atomic_inc32 (uint32_t *mem) {
709 #ifdef  __ICCARM__
710 #pragma diag_suppress=Pe550
711 #endif
712   register uint32_t val, res;
713 #ifdef  __ICCARM__
714 #pragma diag_default=Pe550
715 #endif
716   register uint32_t ret;
717 
718   __ASM volatile (
719 #ifndef __ICCARM__
720   ".syntax unified\n\t"
721 #endif
722   "1:\n\t"
723     "ldrex %[ret],[%[mem]]\n\t"
724     "adds  %[val],%[ret],#1\n\t"
725     "strex %[res],%[val],[%[mem]]\n\t"
726     "cmp   %[res],#0\n\t"
727     "bne   1b\n"
728   : [ret] "=&l" (ret),
729     [val] "=&l" (val),
730     [res] "=&l" (res)
731   : [mem] "l"   (mem)
732   : "cc", "memory"
733   );
734 
735   return ret;
736 }
737 #endif
738 
739 /// Atomic Access Operation: Increment (16-bit) if Less Than
740 /// \param[in]  mem             Memory address
741 /// \param[in]  max             Maximum value
742 /// \return                     Previous value
743 #if defined(__CC_ARM)
atomic_inc16_lt(uint16_t * mem,uint16_t max)744 static __asm    uint16_t atomic_inc16_lt (uint16_t *mem, uint16_t max) {
745   push   {r4,lr}
746   mov    r2,r0
747 1
748   ldrexh r0,[r2]
749   cmp    r1,r0
750   bhi    %F2
751   clrex
752   pop    {r4,pc}
753 2
754   adds   r4,r0,#1
755   strexh r3,r4,[r2]
756   cmp    r3,#0
757   bne    %B1
758   pop    {r4,pc}
759 }
760 #else
atomic_inc16_lt(uint16_t * mem,uint16_t max)761 __STATIC_INLINE uint16_t atomic_inc16_lt (uint16_t *mem, uint16_t max) {
762 #ifdef  __ICCARM__
763 #pragma diag_suppress=Pe550
764 #endif
765   register uint32_t val, res;
766 #ifdef  __ICCARM__
767 #pragma diag_default=Pe550
768 #endif
769   register uint16_t ret;
770 
771   __ASM volatile (
772 #ifndef __ICCARM__
773   ".syntax unified\n\t"
774 #endif
775   "1:\n\t"
776     "ldrexh %[ret],[%[mem]]\n\t"
777     "cmp    %[max],%[ret]\n\t"
778     "bhi    2f\n\t"
779     "clrex\n\t"
780     "b      3f\n"
781   "2:\n\t"
782     "adds   %[val],%[ret],#1\n\t"
783     "strexh %[res],%[val],[%[mem]]\n\t"
784     "cmp    %[res],#0\n\t"
785     "bne    1b\n"
786   "3:"
787   : [ret] "=&l" (ret),
788     [val] "=&l" (val),
789     [res] "=&l" (res)
790   : [mem] "l"   (mem),
791     [max] "l"   (max)
792   : "cc", "memory"
793   );
794 
795   return ret;
796 }
797 #endif
798 
799 /// Atomic Access Operation: Increment (16-bit) and clear on Limit
800 /// \param[in]  mem             Memory address
801 /// \param[in]  max             Maximum value
802 /// \return                     Previous value
803 #if defined(__CC_ARM)
atomic_inc16_lim(uint16_t * mem,uint16_t lim)804 static __asm    uint16_t atomic_inc16_lim (uint16_t *mem, uint16_t lim) {
805   push   {r4,lr}
806   mov    r2,r0
807 1
808   ldrexh r0,[r2]
809   adds   r4,r0,#1
810   cmp    r1,r4
811   bhi    %F2
812   movs   r4,#0
813 2
814   strexh r3,r4,[r2]
815   cmp    r3,#0
816   bne    %B1
817   pop    {r4,pc}
818 }
819 #else
atomic_inc16_lim(uint16_t * mem,uint16_t lim)820 __STATIC_INLINE uint16_t atomic_inc16_lim (uint16_t *mem, uint16_t lim) {
821 #ifdef  __ICCARM__
822 #pragma diag_suppress=Pe550
823 #endif
824   register uint32_t val, res;
825 #ifdef  __ICCARM__
826 #pragma diag_default=Pe550
827 #endif
828   register uint16_t ret;
829 
830   __ASM volatile (
831 #ifndef __ICCARM__
832   ".syntax unified\n\t"
833 #endif
834   "1:\n\t"
835     "ldrexh %[ret],[%[mem]]\n\t"
836     "adds   %[val],%[ret],#1\n\t"
837     "cmp    %[lim],%[val]\n\t"
838     "bhi    2f\n\t"
839     "movs   %[val],#0\n"
840   "2:\n\t"
841     "strexh %[res],%[val],[%[mem]]\n\t"
842     "cmp    %[res],#0\n\t"
843     "bne    1b\n"
844   : [ret] "=&l" (ret),
845     [val] "=&l" (val),
846     [res] "=&l" (res)
847   : [mem] "l"   (mem),
848     [lim] "l"   (lim)
849   : "cc", "memory"
850   );
851 
852   return ret;
853 }
854 #endif
855 
856 /// Atomic Access Operation: Decrement (32-bit)
857 /// \param[in]  mem             Memory address
858 /// \return                     Previous value
859 #if defined(__CC_ARM)
atomic_dec32(uint32_t * mem)860 static __asm    uint32_t atomic_dec32 (uint32_t *mem) {
861   mov   r2,r0
862 1
863   ldrex r0,[r2]
864   subs  r1,r0,#1
865   strex r3,r1,[r2]
866   cmp   r3,#0
867   bne   %B1
868   bx    lr
869 }
870 #else
atomic_dec32(uint32_t * mem)871 __STATIC_INLINE uint32_t atomic_dec32 (uint32_t *mem) {
872 #ifdef  __ICCARM__
873 #pragma diag_suppress=Pe550
874 #endif
875   register uint32_t val, res;
876 #ifdef  __ICCARM__
877 #pragma diag_default=Pe550
878 #endif
879   register uint32_t ret;
880 
881   __ASM volatile (
882 #ifndef __ICCARM__
883   ".syntax unified\n\t"
884 #endif
885   "1:\n\t"
886     "ldrex %[ret],[%[mem]]\n\t"
887     "subs  %[val],%[ret],#1\n\t"
888     "strex %[res],%[val],[%[mem]]\n\t"
889     "cmp   %[res],#0\n\t"
890     "bne   1b\n"
891   : [ret] "=&l" (ret),
892     [val] "=&l" (val),
893     [res] "=&l" (res)
894   : [mem] "l"   (mem)
895   : "cc", "memory"
896   );
897 
898   return ret;
899 }
900 #endif
901 
902 /// Atomic Access Operation: Decrement (32-bit) if Not Zero
903 /// \param[in]  mem             Memory address
904 /// \return                     Previous value
905 #if defined(__CC_ARM)
atomic_dec32_nz(uint32_t * mem)906 static __asm    uint32_t atomic_dec32_nz (uint32_t *mem) {
907   mov   r2,r0
908 1
909   ldrex r0,[r2]
910   cmp   r0,#0
911   bne   %F2
912   clrex
913   bx    lr
914 2
915   subs  r1,r0,#1
916   strex r3,r1,[r2]
917   cmp   r3,#0
918   bne   %B1
919   bx    lr
920 }
921 #else
atomic_dec32_nz(uint32_t * mem)922 __STATIC_INLINE uint32_t atomic_dec32_nz (uint32_t *mem) {
923 #ifdef  __ICCARM__
924 #pragma diag_suppress=Pe550
925 #endif
926   register uint32_t val, res;
927 #ifdef  __ICCARM__
928 #pragma diag_default=Pe550
929 #endif
930   register uint32_t ret;
931 
932   __ASM volatile (
933 #ifndef __ICCARM__
934   ".syntax unified\n\t"
935 #endif
936   "1:\n\t"
937     "ldrex %[ret],[%[mem]]\n\t"
938     "cmp   %[ret],#0\n\t"
939     "bne   2f\n"
940     "clrex\n\t"
941     "b     3f\n"
942   "2:\n\t"
943     "subs  %[val],%[ret],#1\n\t"
944     "strex %[res],%[val],[%[mem]]\n\t"
945     "cmp   %[res],#0\n\t"
946     "bne   1b\n"
947   "3:"
948   : [ret] "=&l" (ret),
949     [val] "=&l" (val),
950     [res] "=&l" (res)
951   : [mem] "l"   (mem)
952   : "cc", "memory"
953   );
954 
955   return ret;
956 }
957 #endif
958 
959 /// Atomic Access Operation: Decrement (16-bit) if Not Zero
960 /// \param[in]  mem             Memory address
961 /// \return                     Previous value
962 #if defined(__CC_ARM)
atomic_dec16_nz(uint16_t * mem)963 static __asm    uint16_t atomic_dec16_nz (uint16_t *mem) {
964   mov    r2,r0
965 1
966   ldrexh r0,[r2]
967   cmp    r0,#0
968   bne    %F2
969   clrex
970   bx     lr
971 2
972   subs   r1,r0,#1
973   strexh r3,r1,[r2]
974   cmp    r3,#0
975   bne    %B1
976   bx      lr
977 }
978 #else
atomic_dec16_nz(uint16_t * mem)979 __STATIC_INLINE uint16_t atomic_dec16_nz (uint16_t *mem) {
980 #ifdef  __ICCARM__
981 #pragma diag_suppress=Pe550
982 #endif
983   register uint32_t val, res;
984 #ifdef  __ICCARM__
985 #pragma diag_default=Pe550
986 #endif
987   register uint16_t ret;
988 
989   __ASM volatile (
990 #ifndef __ICCARM__
991   ".syntax unified\n\t"
992 #endif
993   "1:\n\t"
994     "ldrexh %[ret],[%[mem]]\n\t"
995     "cmp    %[ret],#0\n\t"
996     "bne    2f\n\t"
997     "clrex\n\t"
998     "b      3f\n"
999   "2:\n\t"
1000     "subs   %[val],%[ret],#1\n\t"
1001     "strexh %[res],%[val],[%[mem]]\n\t"
1002     "cmp    %[res],#0\n\t"
1003     "bne    1b\n"
1004   "3:"
1005   : [ret] "=&l" (ret),
1006     [val] "=&l" (val),
1007     [res] "=&l" (res)
1008   : [mem] "l"   (mem)
1009   : "cc", "memory"
1010   );
1011 
1012   return ret;
1013 }
1014 #endif
1015 
1016 /// Atomic Access Operation: Link Get
1017 /// \param[in]  root            Root address
1018 /// \return                     Link
1019 #if defined(__CC_ARM)
atomic_link_get(void ** root)1020 static __asm    void *atomic_link_get (void **root) {
1021   mov   r2,r0
1022 1
1023   ldrex r0,[r2]
1024   cmp   r0,#0
1025   bne   %F2
1026   clrex
1027   bx    lr
1028 2
1029   ldr   r1,[r0]
1030   strex r3,r1,[r2]
1031   cmp   r3,#0
1032   bne   %B1
1033   bx    lr
1034 }
1035 #else
atomic_link_get(void ** root)1036 __STATIC_INLINE void *atomic_link_get (void **root) {
1037 #ifdef  __ICCARM__
1038 #pragma diag_suppress=Pe550
1039 #endif
1040   register uint32_t val, res;
1041 #ifdef  __ICCARM__
1042 #pragma diag_default=Pe550
1043 #endif
1044   register void    *ret;
1045 
1046   __ASM volatile (
1047 #ifndef __ICCARM__
1048   ".syntax unified\n\t"
1049 #endif
1050   "1:\n\t"
1051     "ldrex %[ret],[%[root]]\n\t"
1052     "cmp   %[ret],#0\n\t"
1053     "bne   2f\n\t"
1054     "clrex\n\t"
1055     "b     3f\n"
1056   "2:\n\t"
1057     "ldr   %[val],[%[ret]]\n\t"
1058     "strex %[res],%[val],[%[root]]\n\t"
1059     "cmp   %[res],#0\n\t"
1060     "bne   1b\n"
1061   "3:"
1062   : [ret]  "=&l" (ret),
1063     [val]  "=&l" (val),
1064     [res]  "=&l" (res)
1065   : [root] "l"   (root)
1066   : "cc", "memory"
1067   );
1068 
1069   return ret;
1070 }
1071 #endif
1072 
1073 /// Atomic Access Operation: Link Put
1074 /// \param[in]  root            Root address
1075 /// \param[in]  lnk             Link
1076 #if defined(__CC_ARM)
atomic_link_put(void ** root,void * link)1077 static __asm    void atomic_link_put (void **root, void *link) {
1078 1
1079   ldr   r2,[r0]
1080   str   r2,[r1]
1081   dmb
1082   ldrex r2,[r0]
1083   ldr   r3,[r1]
1084   cmp   r3,r2
1085   bne   %B1
1086   strex r3,r1,[r0]
1087   cmp   r3,#0
1088   bne   %B1
1089   bx    lr
1090 }
1091 #else
atomic_link_put(void ** root,void * link)1092 __STATIC_INLINE void atomic_link_put (void **root, void *link) {
1093 #ifdef  __ICCARM__
1094 #pragma diag_suppress=Pe550
1095 #endif
1096   register uint32_t val1, val2, res;
1097 #ifdef  __ICCARM__
1098 #pragma diag_default=Pe550
1099 #endif
1100 
1101   __ASM volatile (
1102 #ifndef __ICCARM__
1103   ".syntax unified\n\t"
1104 #endif
1105   "1:\n\t"
1106     "ldr   %[val1],[%[root]]\n\t"
1107     "str   %[val1],[%[link]]\n\t"
1108     "dmb\n\t"
1109     "ldrex %[val1],[%[root]]\n\t"
1110     "ldr   %[val2],[%[link]]\n\t"
1111     "cmp   %[val2],%[val1]\n\t"
1112     "bne   1b\n\t"
1113     "strex %[res],%[link],[%[root]]\n\t"
1114     "cmp   %[res],#0\n\t"
1115     "bne   1b\n"
1116   : [val1] "=&l" (val1),
1117     [val2] "=&l" (val2),
1118     [res]  "=&l" (res)
1119   : [root] "l"   (root),
1120     [link] "l"   (link)
1121   : "cc", "memory"
1122   );
1123 }
1124 #endif
1125 
1126 #endif  // (EXCLUSIVE_ACCESS == 1)
1127 
1128 
1129 #endif  // RTX_CORE_CA_H_
1130