1 /*
2  * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Licensed under the Apache License, Version 2.0 (the License); you may
7  * not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * -----------------------------------------------------------------------------
19  *
20  * Project:     CMSIS-RTOS RTX
21  * Title:       Cortex-M Core definitions
22  *
23  * -----------------------------------------------------------------------------
24  */
25 
26 #ifndef RTX_CORE_CM_H_
27 #define RTX_CORE_CM_H_
28 
29 #ifndef RTX_CORE_C_H_
30 #ifndef RTE_COMPONENTS_H
31 #include "RTE_Components.h"
32 #endif
33 #include CMSIS_device_header
34 #endif
35 
36 #include <stdbool.h>
37 typedef bool bool_t;
38 
39 #ifndef FALSE
40 #define FALSE                   ((bool_t)0)
41 #endif
42 
43 #ifndef TRUE
44 #define TRUE                    ((bool_t)1)
45 #endif
46 
47 #ifndef DOMAIN_NS
48 #define DOMAIN_NS               0
49 #endif
50 
51 #if    (DOMAIN_NS == 1)
52 #if   ((!defined(__ARM_ARCH_8M_BASE__)   || (__ARM_ARCH_8M_BASE__   == 0)) && \
53        (!defined(__ARM_ARCH_8M_MAIN__)   || (__ARM_ARCH_8M_MAIN__   == 0)) && \
54        (!defined(__ARM_ARCH_8_1M_MAIN__) || (__ARM_ARCH_8_1M_MAIN__ == 0)))
55 #error "Non-secure domain requires ARMv8-M Architecture!"
56 #endif
57 #endif
58 
59 #ifndef EXCLUSIVE_ACCESS
60 #if   ((defined(__ARM_ARCH_7M__)        && (__ARM_ARCH_7M__        != 0)) || \
61        (defined(__ARM_ARCH_7EM__)       && (__ARM_ARCH_7EM__       != 0)) || \
62        (defined(__ARM_ARCH_8M_BASE__)   && (__ARM_ARCH_8M_BASE__   != 0)) || \
63        (defined(__ARM_ARCH_8M_MAIN__)   && (__ARM_ARCH_8M_MAIN__   != 0)) || \
64        (defined(__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ != 0)))
65 #define EXCLUSIVE_ACCESS        1
66 #else
67 #define EXCLUSIVE_ACCESS        0
68 #endif
69 #endif
70 
71 #define OS_TICK_HANDLER         SysTick_Handler
72 
73 /// xPSR_Initialization Value
74 /// \param[in]  privileged      true=privileged, false=unprivileged
75 /// \param[in]  thumb           true=Thumb, false=ARM
76 /// \return                     xPSR Init Value
xPSR_InitVal(bool_t privileged,bool_t thumb)77 __STATIC_INLINE uint32_t xPSR_InitVal (bool_t privileged, bool_t thumb) {
78   (void)privileged;
79   (void)thumb;
80   return (0x01000000U);
81 }
82 
83 // Stack Frame:
84 //  - Extended: S16-S31, R4-R11, R0-R3, R12, LR, PC, xPSR, S0-S15, FPSCR
85 //  - Basic:             R4-R11, R0-R3, R12, LR, PC, xPSR
86 
87 /// Stack Frame Initialization Value (EXC_RETURN[7..0])
88 #if (DOMAIN_NS == 1)
89 #define STACK_FRAME_INIT_VAL    0xBCU
90 #else
91 #define STACK_FRAME_INIT_VAL    0xFDU
92 #endif
93 
94 /// Stack Offset of Register R0
95 /// \param[in]  stack_frame     Stack Frame (EXC_RETURN[7..0])
96 /// \return                     R0 Offset
StackOffsetR0(uint8_t stack_frame)97 __STATIC_INLINE uint32_t StackOffsetR0 (uint8_t stack_frame) {
98 #if ((__FPU_USED == 1U) || \
99      (defined(__ARM_FEATURE_MVE) && (__ARM_FEATURE_MVE > 0)))
100   return (((stack_frame & 0x10U) == 0U) ? ((16U+8U)*4U) : (8U*4U));
101 #else
102   (void)stack_frame;
103   return (8U*4U);
104 #endif
105 }
106 
107 
108 //  ==== Core functions ====
109 
110 //lint -sem(__get_CONTROL, pure)
111 //lint -sem(__get_IPSR,    pure)
112 //lint -sem(__get_PRIMASK, pure)
113 //lint -sem(__get_BASEPRI, pure)
114 
115 /// Check if running Privileged
116 /// \return     true=privileged, false=unprivileged
IsPrivileged(void)117 __STATIC_INLINE bool_t IsPrivileged (void) {
118   return ((__get_CONTROL() & 1U) == 0U);
119 }
120 
121 /// Check if in Exception
122 /// \return     true=exception, false=thread
IsException(void)123 __STATIC_INLINE bool_t IsException (void) {
124   return (__get_IPSR() != 0U);
125 }
126 
127 /// Check if IRQ is Masked
128 /// \return     true=masked, false=not masked
IsIrqMasked(void)129 __STATIC_INLINE bool_t IsIrqMasked (void) {
130 #if   ((defined(__ARM_ARCH_7M__)        && (__ARM_ARCH_7M__        != 0)) || \
131        (defined(__ARM_ARCH_7EM__)       && (__ARM_ARCH_7EM__       != 0)) || \
132        (defined(__ARM_ARCH_8M_MAIN__)   && (__ARM_ARCH_8M_MAIN__   != 0)) || \
133        (defined(__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ != 0)))
134   return ((__get_PRIMASK() != 0U) || (__get_BASEPRI() != 0U));
135 #else
136   return  (__get_PRIMASK() != 0U);
137 #endif
138 }
139 
140 
141 //  ==== Core Peripherals functions ====
142 
143 /// Setup SVC and PendSV System Service Calls
SVC_Setup(void)144 __STATIC_INLINE void SVC_Setup (void) {
145 #if   ((defined(__ARM_ARCH_8M_MAIN__)   && (__ARM_ARCH_8M_MAIN__   != 0)) || \
146        (defined(__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ != 0)) || \
147        (defined(__CORTEX_M)             && (__CORTEX_M == 7U)))
148   uint32_t p, n;
149 
150   SCB->SHPR[10] = 0xFFU;
151   n = 32U - (uint32_t)__CLZ(~(SCB->SHPR[10] | 0xFFFFFF00U));
152   p = NVIC_GetPriorityGrouping();
153   if (p >= n) {
154     n = p + 1U;
155   }
156   SCB->SHPR[7] = (uint8_t)(0xFEU << n);
157 #elif  (defined(__ARM_ARCH_8M_BASE__)   && (__ARM_ARCH_8M_BASE__   != 0))
158   uint32_t n;
159 
160   SCB->SHPR[1] |= 0x00FF0000U;
161   n = SCB->SHPR[1];
162   SCB->SHPR[0] |= (n << (8+1)) & 0xFC000000U;
163 #elif ((defined(__ARM_ARCH_7M__)        && (__ARM_ARCH_7M__        != 0)) || \
164        (defined(__ARM_ARCH_7EM__)       && (__ARM_ARCH_7EM__       != 0)))
165   uint32_t p, n;
166 
167   SCB->SHP[10] = 0xFFU;
168   n = 32U - (uint32_t)__CLZ(~(SCB->SHP[10] | 0xFFFFFF00U));
169   p = NVIC_GetPriorityGrouping();
170   if (p >= n) {
171     n = p + 1U;
172   }
173   SCB->SHP[7] = (uint8_t)(0xFEU << n);
174 #elif  (defined(__ARM_ARCH_6M__)        && (__ARM_ARCH_6M__        != 0))
175   uint32_t n;
176 
177   SCB->SHP[1] |= 0x00FF0000U;
178   n = SCB->SHP[1];
179   SCB->SHP[0] |= (n << (8+1)) & 0xFC000000U;
180 #endif
181 }
182 
183 /// Get Pending SV (Service Call) Flag
184 /// \return     Pending SV Flag
GetPendSV(void)185 __STATIC_INLINE uint8_t GetPendSV (void) {
186   return ((uint8_t)((SCB->ICSR & (SCB_ICSR_PENDSVSET_Msk)) >> 24));
187 }
188 
189 /// Clear Pending SV (Service Call) Flag
ClrPendSV(void)190 __STATIC_INLINE void ClrPendSV (void) {
191   SCB->ICSR = SCB_ICSR_PENDSVCLR_Msk;
192 }
193 
194 /// Set Pending SV (Service Call) Flag
SetPendSV(void)195 __STATIC_INLINE void SetPendSV (void) {
196   SCB->ICSR = SCB_ICSR_PENDSVSET_Msk;
197 }
198 
199 
200 //  ==== Service Calls definitions ====
201 
202 //lint -save -e9023 -e9024 -e9026 "Function-like macros using '#/##'" [MISRA Note 10]
203 
204 #if defined(__CC_ARM)
205 
206 #if   ((defined(__ARM_ARCH_7M__)        && (__ARM_ARCH_7M__        != 0)) ||   \
207        (defined(__ARM_ARCH_7EM__)       && (__ARM_ARCH_7EM__       != 0)) ||   \
208        (defined(__ARM_ARCH_8M_MAIN__)   && (__ARM_ARCH_8M_MAIN__   != 0)) ||   \
209        (defined(__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ != 0)))
210 #define SVC_INDIRECT(n) __svc_indirect(n)
211 #elif ((defined(__ARM_ARCH_6M__)        && (__ARM_ARCH_6M__        != 0)) ||   \
212        (defined(__ARM_ARCH_8M_BASE__)   && (__ARM_ARCH_8M_BASE__   != 0)))
213 #define SVC_INDIRECT(n) __svc_indirect_r7(n)
214 #endif
215 
216 #define SVC0_0N(f,t)                                                           \
217 SVC_INDIRECT(0) t    svc##f (t(*)());                                          \
218 __attribute__((always_inline))                                                 \
219 __STATIC_INLINE t  __svc##f (void) {                                           \
220   svc##f(svcRtx##f);                                                           \
221 }
222 
223 #define SVC0_0(f,t)                                                            \
224 SVC_INDIRECT(0) t    svc##f (t(*)());                                          \
225 __attribute__((always_inline))                                                 \
226 __STATIC_INLINE t  __svc##f (void) {                                           \
227   return svc##f(svcRtx##f);                                                    \
228 }
229 
230 #define SVC0_1N(f,t,t1)                                                        \
231 SVC_INDIRECT(0) t    svc##f (t(*)(t1),t1);                                     \
232 __attribute__((always_inline))                                                 \
233 __STATIC_INLINE t  __svc##f (t1 a1) {                                          \
234   svc##f(svcRtx##f,a1);                                                        \
235 }
236 
237 #define SVC0_1(f,t,t1)                                                         \
238 SVC_INDIRECT(0) t    svc##f (t(*)(t1),t1);                                     \
239 __attribute__((always_inline))                                                 \
240 __STATIC_INLINE t  __svc##f (t1 a1) {                                          \
241   return svc##f(svcRtx##f,a1);                                                 \
242 }
243 
244 #define SVC0_2(f,t,t1,t2)                                                      \
245 SVC_INDIRECT(0) t    svc##f (t(*)(t1,t2),t1,t2);                               \
246 __attribute__((always_inline))                                                 \
247 __STATIC_INLINE t  __svc##f (t1 a1, t2 a2) {                                   \
248   return svc##f(svcRtx##f,a1,a2);                                              \
249 }
250 
251 #define SVC0_3(f,t,t1,t2,t3)                                                   \
252 SVC_INDIRECT(0) t    svc##f (t(*)(t1,t2,t3),t1,t2,t3);                         \
253 __attribute__((always_inline))                                                 \
254 __STATIC_INLINE t  __svc##f (t1 a1, t2 a2, t3 a3) {                            \
255   return svc##f(svcRtx##f,a1,a2,a3);                                           \
256 }
257 
258 #define SVC0_4(f,t,t1,t2,t3,t4)                                                \
259 SVC_INDIRECT(0) t    svc##f (t(*)(t1,t2,t3,t4),t1,t2,t3,t4);                   \
260 __attribute__((always_inline))                                                 \
261 __STATIC_INLINE t  __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) {                     \
262   return svc##f(svcRtx##f,a1,a2,a3,a4);                                        \
263 }
264 
265 #elif defined(__ICCARM__)
266 
267 #if   ((defined(__ARM_ARCH_7M__)        && (__ARM_ARCH_7M__        != 0)) ||   \
268        (defined(__ARM_ARCH_7EM__)       && (__ARM_ARCH_7EM__       != 0)) ||   \
269        (defined(__ARM_ARCH_8M_MAIN__)   && (__ARM_ARCH_8M_MAIN__   != 0)) ||   \
270        (defined(__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ != 0)))
271 #define SVC_ArgF(f)                                                            \
272   __asm(                                                                       \
273     "mov r12,%0\n"                                                             \
274     :: "r"(&f): "r12"                                                          \
275   );
276 #elif ((defined(__ARM_ARCH_6M__)        && (__ARM_ARCH_6M__        != 0)) ||   \
277        (defined(__ARM_ARCH_8M_BASE__)   && (__ARM_ARCH_8M_BASE__   != 0)))
278 #define SVC_ArgF(f)                                                            \
279   __asm(                                                                       \
280     "mov r7,%0\n"                                                              \
281     :: "r"(&f): "r7"                                                           \
282   );
283 #endif
284 
285 #define STRINGIFY(a) #a
286 #define SVC_INDIRECT(n) _Pragma(STRINGIFY(swi_number = n)) __swi
287 
288 #define SVC0_0N(f,t)                                                           \
289 SVC_INDIRECT(0) t    svc##f ();                                                \
290 __attribute__((always_inline))                                                 \
291 __STATIC_INLINE t  __svc##f (void) {                                           \
292   SVC_ArgF(svcRtx##f);                                                         \
293   svc##f();                                                                    \
294 }
295 
296 #define SVC0_0(f,t)                                                            \
297 SVC_INDIRECT(0) t    svc##f ();                                                \
298 __attribute__((always_inline))                                                 \
299 __STATIC_INLINE t  __svc##f (void) {                                           \
300   SVC_ArgF(svcRtx##f);                                                         \
301   return svc##f();                                                             \
302 }
303 
304 #define SVC0_1N(f,t,t1)                                                        \
305 SVC_INDIRECT(0) t    svc##f (t1 a1);                                           \
306 __attribute__((always_inline))                                                 \
307 __STATIC_INLINE t  __svc##f (t1 a1) {                                          \
308   SVC_ArgF(svcRtx##f);                                                         \
309   svc##f(a1);                                                                  \
310 }
311 
312 #define SVC0_1(f,t,t1)                                                         \
313 SVC_INDIRECT(0) t    svc##f (t1 a1);                                           \
314 __attribute__((always_inline))                                                 \
315 __STATIC_INLINE t  __svc##f (t1 a1) {                                          \
316   SVC_ArgF(svcRtx##f);                                                         \
317   return svc##f(a1);                                                           \
318 }
319 
320 #define SVC0_2(f,t,t1,t2)                                                      \
321 SVC_INDIRECT(0) t    svc##f (t1 a1, t2 a2);                                    \
322 __attribute__((always_inline))                                                 \
323 __STATIC_INLINE t  __svc##f (t1 a1, t2 a2) {                                   \
324   SVC_ArgF(svcRtx##f);                                                         \
325   return svc##f(a1,a2);                                                        \
326 }
327 
328 #define SVC0_3(f,t,t1,t2,t3)                                                   \
329 SVC_INDIRECT(0) t    svc##f (t1 a1, t2 a2, t3 a3);                             \
330 __attribute__((always_inline))                                                 \
331 __STATIC_INLINE t  __svc##f (t1 a1, t2 a2, t3 a3) {                            \
332   SVC_ArgF(svcRtx##f);                                                         \
333   return svc##f(a1,a2,a3);                                                     \
334 }
335 
336 #define SVC0_4(f,t,t1,t2,t3,t4)                                                \
337 SVC_INDIRECT(0) t    svc##f (t1 a1, t2 a2, t3 a3, t4 a4);                      \
338 __attribute__((always_inline))                                                 \
339 __STATIC_INLINE t  __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) {                     \
340   SVC_ArgF(svcRtx##f);                                                         \
341   return svc##f(a1,a2,a3,a4);                                                  \
342 }
343 
344 #else   // !(defined(__CC_ARM) || defined(__ICCARM__))
345 
346 //lint -esym(522,__svc*) "Functions '__svc*' are impure (side-effects)"
347 
348 #if   ((defined(__ARM_ARCH_7M__)        && (__ARM_ARCH_7M__        != 0)) ||   \
349        (defined(__ARM_ARCH_7EM__)       && (__ARM_ARCH_7EM__       != 0)) ||   \
350        (defined(__ARM_ARCH_8M_MAIN__)   && (__ARM_ARCH_8M_MAIN__   != 0)) ||   \
351        (defined(__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ != 0)))
352 #define SVC_RegF "r12"
353 #elif ((defined(__ARM_ARCH_6M__)        && (__ARM_ARCH_6M__        != 0)) ||   \
354        (defined(__ARM_ARCH_8M_BASE__)   && (__ARM_ARCH_8M_BASE__   != 0)))
355 #define SVC_RegF "r7"
356 #endif
357 
358 #define SVC_ArgN(n) \
359 register uint32_t __r##n __ASM("r"#n)
360 
361 #define SVC_ArgR(n,a) \
362 register uint32_t __r##n __ASM("r"#n) = (uint32_t)a
363 
364 #define SVC_ArgF(f) \
365 register uint32_t __rf   __ASM(SVC_RegF) = (uint32_t)f
366 
367 #define SVC_In0 "r"(__rf)
368 #define SVC_In1 "r"(__rf),"r"(__r0)
369 #define SVC_In2 "r"(__rf),"r"(__r0),"r"(__r1)
370 #define SVC_In3 "r"(__rf),"r"(__r0),"r"(__r1),"r"(__r2)
371 #define SVC_In4 "r"(__rf),"r"(__r0),"r"(__r1),"r"(__r2),"r"(__r3)
372 
373 #define SVC_Out0
374 #define SVC_Out1 "=r"(__r0)
375 
376 #define SVC_CL0
377 #define SVC_CL1 "r1"
378 #define SVC_CL2 "r0","r1"
379 
380 #define SVC_Call0(in, out, cl)                                                 \
381   __ASM volatile ("svc 0" : out : in : cl)
382 
383 #define SVC0_0N(f,t)                                                           \
384 __attribute__((always_inline))                                                 \
385 __STATIC_INLINE t __svc##f (void) {                                            \
386   SVC_ArgF(svcRtx##f);                                                         \
387   SVC_Call0(SVC_In0, SVC_Out0, SVC_CL2);                                       \
388 }
389 
390 #define SVC0_0(f,t)                                                            \
391 __attribute__((always_inline))                                                 \
392 __STATIC_INLINE t __svc##f (void) {                                            \
393   SVC_ArgN(0);                                                                 \
394   SVC_ArgF(svcRtx##f);                                                         \
395   SVC_Call0(SVC_In0, SVC_Out1, SVC_CL1);                                       \
396   return (t) __r0;                                                             \
397 }
398 
399 #define SVC0_1N(f,t,t1)                                                        \
400 __attribute__((always_inline))                                                 \
401 __STATIC_INLINE t __svc##f (t1 a1) {                                           \
402   SVC_ArgR(0,a1);                                                              \
403   SVC_ArgF(svcRtx##f);                                                         \
404   SVC_Call0(SVC_In1, SVC_Out0, SVC_CL1);                                       \
405 }
406 
407 #define SVC0_1(f,t,t1)                                                         \
408 __attribute__((always_inline))                                                 \
409 __STATIC_INLINE t __svc##f (t1 a1) {                                           \
410   SVC_ArgR(0,a1);                                                              \
411   SVC_ArgF(svcRtx##f);                                                         \
412   SVC_Call0(SVC_In1, SVC_Out1, SVC_CL1);                                       \
413   return (t) __r0;                                                             \
414 }
415 
416 #define SVC0_2(f,t,t1,t2)                                                      \
417 __attribute__((always_inline))                                                 \
418 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) {                                    \
419   SVC_ArgR(0,a1);                                                              \
420   SVC_ArgR(1,a2);                                                              \
421   SVC_ArgF(svcRtx##f);                                                         \
422   SVC_Call0(SVC_In2, SVC_Out1, SVC_CL0);                                       \
423   return (t) __r0;                                                             \
424 }
425 
426 #define SVC0_3(f,t,t1,t2,t3)                                                   \
427 __attribute__((always_inline))                                                 \
428 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) {                             \
429   SVC_ArgR(0,a1);                                                              \
430   SVC_ArgR(1,a2);                                                              \
431   SVC_ArgR(2,a3);                                                              \
432   SVC_ArgF(svcRtx##f);                                                         \
433   SVC_Call0(SVC_In3, SVC_Out1, SVC_CL0);                                       \
434   return (t) __r0;                                                             \
435 }
436 
437 #define SVC0_4(f,t,t1,t2,t3,t4)                                                \
438 __attribute__((always_inline))                                                 \
439 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) {                      \
440   SVC_ArgR(0,a1);                                                              \
441   SVC_ArgR(1,a2);                                                              \
442   SVC_ArgR(2,a3);                                                              \
443   SVC_ArgR(3,a4);                                                              \
444   SVC_ArgF(svcRtx##f);                                                         \
445   SVC_Call0(SVC_In4, SVC_Out1, SVC_CL0);                                       \
446   return (t) __r0;                                                             \
447 }
448 
449 #endif
450 
451 //lint -restore [MISRA Note 10]
452 
453 
454 //  ==== Exclusive Access Operation ====
455 
456 #if (EXCLUSIVE_ACCESS == 1)
457 
458 //lint ++flb "Library Begin" [MISRA Note 12]
459 
460 /// Atomic Access Operation: Write (8-bit)
461 /// \param[in]  mem             Memory address
462 /// \param[in]  val             Value to write
463 /// \return                     Previous value
464 #if defined(__CC_ARM)
atomic_wr8(uint8_t * mem,uint8_t val)465 static __asm    uint8_t atomic_wr8 (uint8_t *mem, uint8_t val) {
466   mov    r2,r0
467 1
468   ldrexb r0,[r2]
469   strexb r3,r1,[r2]
470   cbz    r3,%F2
471   b      %B1
472 2
473   bx     lr
474 }
475 #else
atomic_wr8(uint8_t * mem,uint8_t val)476 __STATIC_INLINE uint8_t atomic_wr8 (uint8_t *mem, uint8_t val) {
477 #ifdef  __ICCARM__
478 #pragma diag_suppress=Pe550
479 #endif
480   register uint32_t res;
481 #ifdef  __ICCARM__
482 #pragma diag_default=Pe550
483 #endif
484   register uint8_t  ret;
485 
486   __ASM volatile (
487 #ifndef __ICCARM__
488   ".syntax unified\n\t"
489 #endif
490   "1:\n\t"
491     "ldrexb %[ret],[%[mem]]\n\t"
492     "strexb %[res],%[val],[%[mem]]\n\t"
493     "cbz    %[res],2f\n\t"
494     "b       1b\n"
495   "2:"
496   : [ret] "=&l" (ret),
497     [res] "=&l" (res)
498   : [mem] "l"   (mem),
499     [val] "l"   (val)
500   : "memory"
501   );
502 
503   return ret;
504 }
505 #endif
506 
507 /// Atomic Access Operation: Set bits (32-bit)
508 /// \param[in]  mem             Memory address
509 /// \param[in]  bits            Bit mask
510 /// \return                     New value
511 #if defined(__CC_ARM)
atomic_set32(uint32_t * mem,uint32_t bits)512 static __asm    uint32_t atomic_set32 (uint32_t *mem, uint32_t bits) {
513   mov   r2,r0
514 1
515   ldrex r0,[r2]
516   orr   r0,r0,r1
517   strex r3,r0,[r2]
518   cbz   r3,%F2
519   b     %B1
520 2
521   bx     lr
522 }
523 #else
atomic_set32(uint32_t * mem,uint32_t bits)524 __STATIC_INLINE uint32_t atomic_set32 (uint32_t *mem, uint32_t bits) {
525 #ifdef  __ICCARM__
526 #pragma diag_suppress=Pe550
527 #endif
528   register uint32_t val, res;
529 #ifdef  __ICCARM__
530 #pragma diag_default=Pe550
531 #endif
532   register uint32_t ret;
533 
534   __ASM volatile (
535 #ifndef __ICCARM__
536   ".syntax unified\n\t"
537 #endif
538   "1:\n\t"
539     "ldrex %[val],[%[mem]]\n\t"
540 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
541     "mov   %[ret],%[val]\n\t"
542     "orrs  %[ret],%[bits]\n\t"
543 #else
544     "orr   %[ret],%[val],%[bits]\n\t"
545 #endif
546     "strex %[res],%[ret],[%[mem]]\n\t"
547     "cbz   %[res],2f\n\t"
548     "b     1b\n"
549   "2:"
550   : [ret]  "=&l" (ret),
551     [val]  "=&l" (val),
552     [res]  "=&l" (res)
553   : [mem]  "l"   (mem),
554     [bits] "l"   (bits)
555 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
556   : "memory", "cc"
557 #else
558   : "memory"
559 #endif
560   );
561 
562   return ret;
563 }
564 #endif
565 
566 /// Atomic Access Operation: Clear bits (32-bit)
567 /// \param[in]  mem             Memory address
568 /// \param[in]  bits            Bit mask
569 /// \return                     Previous value
570 #if defined(__CC_ARM)
atomic_clr32(uint32_t * mem,uint32_t bits)571 static __asm    uint32_t atomic_clr32 (uint32_t *mem, uint32_t bits) {
572   push  {r4,lr}
573   mov   r2,r0
574 1
575   ldrex r0,[r2]
576   bic   r4,r0,r1
577   strex r3,r4,[r2]
578   cbz   r3,%F2
579   b     %B1
580 2
581   pop   {r4,pc}
582 }
583 #else
atomic_clr32(uint32_t * mem,uint32_t bits)584 __STATIC_INLINE uint32_t atomic_clr32 (uint32_t *mem, uint32_t bits) {
585 #ifdef  __ICCARM__
586 #pragma diag_suppress=Pe550
587 #endif
588   register uint32_t val, res;
589 #ifdef  __ICCARM__
590 #pragma diag_default=Pe550
591 #endif
592   register uint32_t ret;
593 
594   __ASM volatile (
595 #ifndef __ICCARM__
596   ".syntax unified\n\t"
597 #endif
598   "1:\n\t"
599     "ldrex %[ret],[%[mem]]\n\t"
600 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
601     "mov   %[val],%[ret]\n\t"
602     "bics  %[val],%[bits]\n\t"
603 #else
604     "bic   %[val],%[ret],%[bits]\n\t"
605 #endif
606     "strex %[res],%[val],[%[mem]]\n\t"
607     "cbz   %[res],2f\n\t"
608     "b     1b\n"
609   "2:"
610   : [ret]  "=&l" (ret),
611     [val]  "=&l" (val),
612     [res]  "=&l" (res)
613   : [mem]  "l"   (mem),
614     [bits] "l"   (bits)
615 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
616   : "memory", "cc"
617 #else
618   : "memory"
619 #endif
620   );
621 
622   return ret;
623 }
624 #endif
625 
626 /// Atomic Access Operation: Check if all specified bits (32-bit) are active and clear them
627 /// \param[in]  mem             Memory address
628 /// \param[in]  bits            Bit mask
629 /// \return                     Active bits before clearing or 0 if not active
630 #if defined(__CC_ARM)
atomic_chk32_all(uint32_t * mem,uint32_t bits)631 static __asm    uint32_t atomic_chk32_all (uint32_t *mem, uint32_t bits) {
632   push  {r4,lr}
633   mov   r2,r0
634 1
635   ldrex r0,[r2]
636   and   r4,r0,r1
637   cmp   r4,r1
638   beq   %F2
639   clrex
640   movs  r0,#0
641   pop   {r4,pc}
642 2
643   bic   r4,r0,r1
644   strex r3,r4,[r2]
645   cbz   r3,%F3
646   b     %B1
647 3
648   pop   {r4,pc}
649 }
650 #else
atomic_chk32_all(uint32_t * mem,uint32_t bits)651 __STATIC_INLINE uint32_t atomic_chk32_all (uint32_t *mem, uint32_t bits) {
652 #ifdef  __ICCARM__
653 #pragma diag_suppress=Pe550
654 #endif
655   register uint32_t val, res;
656 #ifdef  __ICCARM__
657 #pragma diag_default=Pe550
658 #endif
659   register uint32_t ret;
660 
661   __ASM volatile (
662 #ifndef __ICCARM__
663   ".syntax unified\n\t"
664 #endif
665   "1:\n\t"
666     "ldrex %[ret],[%[mem]]\n\t"
667 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
668     "mov   %[val],%[ret]\n\t"
669     "ands  %[val],%[bits]\n\t"
670 #else
671     "and   %[val],%[ret],%[bits]\n\t"
672 #endif
673     "cmp   %[val],%[bits]\n\t"
674     "beq   2f\n\t"
675     "clrex\n\t"
676     "movs  %[ret],#0\n\t"
677     "b     3f\n"
678   "2:\n\t"
679 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
680     "mov   %[val],%[ret]\n\t"
681     "bics  %[val],%[bits]\n\t"
682 #else
683     "bic   %[val],%[ret],%[bits]\n\t"
684 #endif
685     "strex %[res],%[val],[%[mem]]\n\t"
686     "cbz   %[res],3f\n\t"
687     "b     1b\n"
688   "3:"
689   : [ret]  "=&l" (ret),
690     [val]  "=&l" (val),
691     [res]  "=&l" (res)
692   : [mem]  "l"   (mem),
693     [bits] "l"   (bits)
694   : "cc", "memory"
695   );
696 
697   return ret;
698 }
699 #endif
700 
701 /// Atomic Access Operation: Check if any specified bits (32-bit) are active and clear them
702 /// \param[in]  mem             Memory address
703 /// \param[in]  bits            Bit mask
704 /// \return                     Active bits before clearing or 0 if not active
705 #if defined(__CC_ARM)
atomic_chk32_any(uint32_t * mem,uint32_t bits)706 static __asm    uint32_t atomic_chk32_any (uint32_t *mem, uint32_t bits) {
707   push  {r4,lr}
708   mov   r2,r0
709 1
710   ldrex r0,[r2]
711   tst   r0,r1
712   bne   %F2
713   clrex
714   movs  r0,#0
715   pop   {r4,pc}
716 2
717   bic   r4,r0,r1
718   strex r3,r4,[r2]
719   cbz   r3,%F3
720   b     %B1
721 3
722   pop   {r4,pc}
723 }
724 #else
atomic_chk32_any(uint32_t * mem,uint32_t bits)725 __STATIC_INLINE uint32_t atomic_chk32_any (uint32_t *mem, uint32_t bits) {
726 #ifdef  __ICCARM__
727 #pragma diag_suppress=Pe550
728 #endif
729   register uint32_t val, res;
730 #ifdef  __ICCARM__
731 #pragma diag_default=Pe550
732 #endif
733   register uint32_t ret;
734 
735   __ASM volatile (
736 #ifndef __ICCARM__
737   ".syntax unified\n\t"
738 #endif
739   "1:\n\t"
740     "ldrex %[ret],[%[mem]]\n\t"
741     "tst   %[ret],%[bits]\n\t"
742     "bne   2f\n\t"
743     "clrex\n\t"
744     "movs  %[ret],#0\n\t"
745     "b     3f\n"
746   "2:\n\t"
747 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
748     "mov   %[val],%[ret]\n\t"
749     "bics  %[val],%[bits]\n\t"
750 #else
751     "bic   %[val],%[ret],%[bits]\n\t"
752 #endif
753     "strex %[res],%[val],[%[mem]]\n\t"
754     "cbz   %[res],3f\n\t"
755     "b     1b\n"
756   "3:"
757   : [ret]  "=&l" (ret),
758     [val]  "=&l" (val),
759     [res]  "=&l" (res)
760   : [mem]  "l"   (mem),
761     [bits] "l"   (bits)
762   : "cc", "memory"
763   );
764 
765   return ret;
766 }
767 #endif
768 
769 /// Atomic Access Operation: Increment (32-bit)
770 /// \param[in]  mem             Memory address
771 /// \return                     Previous value
772 #if defined(__CC_ARM)
atomic_inc32(uint32_t * mem)773 static __asm    uint32_t atomic_inc32 (uint32_t *mem) {
774   mov   r2,r0
775 1
776   ldrex r0,[r2]
777   adds  r1,r0,#1
778   strex r3,r1,[r2]
779   cbz   r3,%F2
780   b     %B1
781 2
782   bx     lr
783 }
784 #else
atomic_inc32(uint32_t * mem)785 __STATIC_INLINE uint32_t atomic_inc32 (uint32_t *mem) {
786 #ifdef  __ICCARM__
787 #pragma diag_suppress=Pe550
788 #endif
789   register uint32_t val, res;
790 #ifdef  __ICCARM__
791 #pragma diag_default=Pe550
792 #endif
793   register uint32_t ret;
794 
795   __ASM volatile (
796 #ifndef __ICCARM__
797   ".syntax unified\n\t"
798 #endif
799   "1:\n\t"
800     "ldrex %[ret],[%[mem]]\n\t"
801     "adds  %[val],%[ret],#1\n\t"
802     "strex %[res],%[val],[%[mem]]\n\t"
803     "cbz   %[res],2f\n\t"
804     "b     1b\n"
805   "2:"
806   : [ret] "=&l" (ret),
807     [val] "=&l" (val),
808     [res] "=&l" (res)
809   : [mem] "l"   (mem)
810   : "cc", "memory"
811   );
812 
813   return ret;
814 }
815 #endif
816 
817 /// Atomic Access Operation: Increment (16-bit) if Less Than
818 /// \param[in]  mem             Memory address
819 /// \param[in]  max             Maximum value
820 /// \return                     Previous value
821 #if defined(__CC_ARM)
atomic_inc16_lt(uint16_t * mem,uint16_t max)822 static __asm    uint16_t atomic_inc16_lt (uint16_t *mem, uint16_t max) {
823   push   {r4,lr}
824   mov    r2,r0
825 1
826   ldrexh r0,[r2]
827   cmp    r1,r0
828   bhi    %F2
829   clrex
830   pop    {r4,pc}
831 2
832   adds   r4,r0,#1
833   strexh r3,r4,[r2]
834   cbz    r3,%F3
835   b      %B1
836 3
837   pop    {r4,pc}
838 }
839 #else
atomic_inc16_lt(uint16_t * mem,uint16_t max)840 __STATIC_INLINE uint16_t atomic_inc16_lt (uint16_t *mem, uint16_t max) {
841 #ifdef  __ICCARM__
842 #pragma diag_suppress=Pe550
843 #endif
844   register uint32_t val, res;
845 #ifdef  __ICCARM__
846 #pragma diag_default=Pe550
847 #endif
848   register uint16_t ret;
849 
850   __ASM volatile (
851 #ifndef __ICCARM__
852   ".syntax unified\n\t"
853 #endif
854   "1:\n\t"
855     "ldrexh %[ret],[%[mem]]\n\t"
856     "cmp    %[max],%[ret]\n\t"
857     "bhi    2f\n\t"
858     "clrex\n\t"
859     "b      3f\n"
860   "2:\n\t"
861     "adds   %[val],%[ret],#1\n\t"
862     "strexh %[res],%[val],[%[mem]]\n\t"
863     "cbz    %[res],3f\n\t"
864     "b      1b\n"
865   "3:"
866   : [ret] "=&l" (ret),
867     [val] "=&l" (val),
868     [res] "=&l" (res)
869   : [mem] "l"   (mem),
870     [max] "l"   (max)
871   : "cc", "memory"
872   );
873 
874   return ret;
875 }
876 #endif
877 
878 /// Atomic Access Operation: Increment (16-bit) and clear on Limit
879 /// \param[in]  mem             Memory address
880 /// \param[in]  max             Maximum value
881 /// \return                     Previous value
882 #if defined(__CC_ARM)
atomic_inc16_lim(uint16_t * mem,uint16_t lim)883 static __asm    uint16_t atomic_inc16_lim (uint16_t *mem, uint16_t lim) {
884   push   {r4,lr}
885   mov    r2,r0
886 1
887   ldrexh r0,[r2]
888   adds   r4,r0,#1
889   cmp    r1,r4
890   bhi    %F2
891   movs   r4,#0
892 2
893   strexh r3,r4,[r2]
894   cbz    r3,%F3
895   b      %B1
896 3
897   pop    {r4,pc}
898 }
899 #else
atomic_inc16_lim(uint16_t * mem,uint16_t lim)900 __STATIC_INLINE uint16_t atomic_inc16_lim (uint16_t *mem, uint16_t lim) {
901 #ifdef  __ICCARM__
902 #pragma diag_suppress=Pe550
903 #endif
904   register uint32_t val, res;
905 #ifdef  __ICCARM__
906 #pragma diag_default=Pe550
907 #endif
908   register uint16_t ret;
909 
910   __ASM volatile (
911 #ifndef __ICCARM__
912   ".syntax unified\n\t"
913 #endif
914   "1:\n\t"
915     "ldrexh %[ret],[%[mem]]\n\t"
916     "adds   %[val],%[ret],#1\n\t"
917     "cmp    %[lim],%[val]\n\t"
918     "bhi    2f\n\t"
919     "movs   %[val],#0\n"
920   "2:\n\t"
921     "strexh %[res],%[val],[%[mem]]\n\t"
922     "cbz    %[res],3f\n\t"
923     "b      1b\n"
924   "3:"
925   : [ret] "=&l" (ret),
926     [val] "=&l" (val),
927     [res] "=&l" (res)
928   : [mem] "l"   (mem),
929     [lim] "l"   (lim)
930   : "cc", "memory"
931   );
932 
933   return ret;
934 }
935 #endif
936 
937 /// Atomic Access Operation: Decrement (32-bit)
938 /// \param[in]  mem             Memory address
939 /// \return                     Previous value
940 #if defined(__CC_ARM)
atomic_dec32(uint32_t * mem)941 static __asm    uint32_t atomic_dec32 (uint32_t *mem) {
942   mov   r2,r0
943 1
944   ldrex r0,[r2]
945   subs  r1,r0,#1
946   strex r3,r1,[r2]
947   cbz   r3,%F2
948   b     %B1
949 2
950   bx     lr
951 }
952 #else
atomic_dec32(uint32_t * mem)953 __STATIC_INLINE uint32_t atomic_dec32 (uint32_t *mem) {
954 #ifdef  __ICCARM__
955 #pragma diag_suppress=Pe550
956 #endif
957   register uint32_t val, res;
958 #ifdef  __ICCARM__
959 #pragma diag_default=Pe550
960 #endif
961   register uint32_t ret;
962 
963   __ASM volatile (
964 #ifndef __ICCARM__
965   ".syntax unified\n\t"
966 #endif
967   "1:\n\t"
968     "ldrex %[ret],[%[mem]]\n\t"
969     "subs  %[val],%[ret],#1\n\t"
970     "strex %[res],%[val],[%[mem]]\n\t"
971     "cbz   %[res],2f\n\t"
972     "b     1b\n"
973   "2:"
974   : [ret] "=&l" (ret),
975     [val] "=&l" (val),
976     [res] "=&l" (res)
977   : [mem] "l"   (mem)
978   : "cc", "memory"
979   );
980 
981   return ret;
982 }
983 #endif
984 
985 /// Atomic Access Operation: Decrement (32-bit) if Not Zero
986 /// \param[in]  mem             Memory address
987 /// \return                     Previous value
988 #if defined(__CC_ARM)
atomic_dec32_nz(uint32_t * mem)989 static __asm    uint32_t atomic_dec32_nz (uint32_t *mem) {
990   mov   r2,r0
991 1
992   ldrex r0,[r2]
993   cbnz  r0,%F2
994   clrex
995   bx    lr
996 2
997   subs  r1,r0,#1
998   strex r3,r1,[r2]
999   cbz   r3,%F3
1000   b     %B1
1001 3
1002   bx     lr
1003 }
1004 #else
atomic_dec32_nz(uint32_t * mem)1005 __STATIC_INLINE uint32_t atomic_dec32_nz (uint32_t *mem) {
1006 #ifdef  __ICCARM__
1007 #pragma diag_suppress=Pe550
1008 #endif
1009   register uint32_t val, res;
1010 #ifdef  __ICCARM__
1011 #pragma diag_default=Pe550
1012 #endif
1013   register uint32_t ret;
1014 
1015   __ASM volatile (
1016 #ifndef __ICCARM__
1017   ".syntax unified\n\t"
1018 #endif
1019   "1:\n\t"
1020     "ldrex %[ret],[%[mem]]\n\t"
1021     "cbnz  %[ret],2f\n\t"
1022     "clrex\n\t"
1023     "b     3f\n"
1024   "2:\n\t"
1025     "subs  %[val],%[ret],#1\n\t"
1026     "strex %[res],%[val],[%[mem]]\n\t"
1027     "cbz   %[res],3f\n\t"
1028     "b     1b\n"
1029   "3:"
1030   : [ret] "=&l" (ret),
1031     [val] "=&l" (val),
1032     [res] "=&l" (res)
1033   : [mem] "l"   (mem)
1034   : "cc", "memory"
1035   );
1036 
1037   return ret;
1038 }
1039 #endif
1040 
1041 /// Atomic Access Operation: Decrement (16-bit) if Not Zero
1042 /// \param[in]  mem             Memory address
1043 /// \return                     Previous value
1044 #if defined(__CC_ARM)
atomic_dec16_nz(uint16_t * mem)1045 static __asm    uint16_t atomic_dec16_nz (uint16_t *mem) {
1046   mov    r2,r0
1047 1
1048   ldrexh r0,[r2]
1049   cbnz   r0,%F2
1050   clrex
1051   bx     lr
1052 2
1053   subs   r1,r0,#1
1054   strexh r3,r1,[r2]
1055   cbz    r3,%F3
1056   b      %B1
1057 3
1058   bx      lr
1059 }
1060 #else
atomic_dec16_nz(uint16_t * mem)1061 __STATIC_INLINE uint16_t atomic_dec16_nz (uint16_t *mem) {
1062 #ifdef  __ICCARM__
1063 #pragma diag_suppress=Pe550
1064 #endif
1065   register uint32_t val, res;
1066 #ifdef  __ICCARM__
1067 #pragma diag_default=Pe550
1068 #endif
1069   register uint16_t ret;
1070 
1071   __ASM volatile (
1072 #ifndef __ICCARM__
1073   ".syntax unified\n\t"
1074 #endif
1075   "1:\n\t"
1076     "ldrexh %[ret],[%[mem]]\n\t"
1077     "cbnz   %[ret],2f\n\t"
1078     "clrex\n\t"
1079     "b      3f\n"
1080   "2:\n\t"
1081     "subs   %[val],%[ret],#1\n\t"
1082     "strexh %[res],%[val],[%[mem]]\n\t"
1083     "cbz    %[res],3f\n\t"
1084     "b      1b\n"
1085   "3:"
1086   : [ret] "=&l" (ret),
1087     [val] "=&l" (val),
1088     [res] "=&l" (res)
1089   : [mem] "l"   (mem)
1090   : "cc", "memory"
1091   );
1092 
1093   return ret;
1094 }
1095 #endif
1096 
1097 /// Atomic Access Operation: Link Get
1098 /// \param[in]  root            Root address
1099 /// \return                     Link
1100 #if defined(__CC_ARM)
atomic_link_get(void ** root)1101 static __asm    void *atomic_link_get (void **root) {
1102   mov   r2,r0
1103 1
1104   ldrex r0,[r2]
1105   cbnz  r0,%F2
1106   clrex
1107   bx    lr
1108 2
1109   ldr   r1,[r0]
1110   strex r3,r1,[r2]
1111   cbz   r3,%F3
1112   b     %B1
1113 3
1114   bx     lr
1115 }
1116 #else
atomic_link_get(void ** root)1117 __STATIC_INLINE void *atomic_link_get (void **root) {
1118 #ifdef  __ICCARM__
1119 #pragma diag_suppress=Pe550
1120 #endif
1121   register uint32_t val, res;
1122 #ifdef  __ICCARM__
1123 #pragma diag_default=Pe550
1124 #endif
1125   register void    *ret;
1126 
1127   __ASM volatile (
1128 #ifndef __ICCARM__
1129   ".syntax unified\n\t"
1130 #endif
1131   "1:\n\t"
1132     "ldrex %[ret],[%[root]]\n\t"
1133     "cbnz  %[ret],2f\n\t"
1134     "clrex\n\t"
1135     "b     3f\n"
1136   "2:\n\t"
1137     "ldr   %[val],[%[ret]]\n\t"
1138     "strex %[res],%[val],[%[root]]\n\t"
1139     "cbz   %[res],3f\n\t"
1140     "b     1b\n"
1141   "3:"
1142   : [ret]  "=&l" (ret),
1143     [val]  "=&l" (val),
1144     [res]  "=&l" (res)
1145   : [root] "l"   (root)
1146   : "cc", "memory"
1147   );
1148 
1149   return ret;
1150 }
1151 #endif
1152 
1153 /// Atomic Access Operation: Link Put
1154 /// \param[in]  root            Root address
1155 /// \param[in]  lnk             Link
1156 #if defined(__CC_ARM)
atomic_link_put(void ** root,void * link)1157 static __asm    void atomic_link_put (void **root, void *link) {
1158 1
1159   ldr   r2,[r0]
1160   str   r2,[r1]
1161   dmb
1162   ldrex r2,[r0]
1163   ldr   r3,[r1]
1164   cmp   r3,r2
1165   bne   %B1
1166   strex r3,r1,[r0]
1167   cbz   r3,%F2
1168   b     %B1
1169 2
1170   bx    lr
1171 }
1172 #else
atomic_link_put(void ** root,void * link)1173 __STATIC_INLINE void atomic_link_put (void **root, void *link) {
1174 #ifdef  __ICCARM__
1175 #pragma diag_suppress=Pe550
1176 #endif
1177   register uint32_t val1, val2, res;
1178 #ifdef  __ICCARM__
1179 #pragma diag_default=Pe550
1180 #endif
1181 
1182   __ASM volatile (
1183 #ifndef __ICCARM__
1184   ".syntax unified\n\t"
1185 #endif
1186   "1:\n\t"
1187     "ldr   %[val1],[%[root]]\n\t"
1188     "str   %[val1],[%[link]]\n\t"
1189     "dmb\n\t"
1190     "ldrex %[val1],[%[root]]\n\t"
1191     "ldr   %[val2],[%[link]]\n\t"
1192     "cmp   %[val2],%[val1]\n\t"
1193     "bne   1b\n\t"
1194     "strex %[res],%[link],[%[root]]\n\t"
1195     "cbz   %[res],2f\n\t"
1196     "b     1b\n"
1197   "2:"
1198   : [val1] "=&l" (val1),
1199     [val2] "=&l" (val2),
1200     [res]  "=&l" (res)
1201   : [root] "l"   (root),
1202     [link] "l"   (link)
1203   : "cc", "memory"
1204   );
1205 }
1206 #endif
1207 
1208 //lint --flb "Library End" [MISRA Note 12]
1209 
1210 #endif  // (EXCLUSIVE_ACCESS == 1)
1211 
1212 
1213 #endif  // RTX_CORE_CM_H_
1214