1 /*
2 * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 //replacement for gcc built-in functions
8
9 #include "sdkconfig.h"
10 #include <stdbool.h>
11 #include <stdint.h>
12 #include <string.h>
13 #include "soc/soc_caps.h"
14 #include "freertos/FreeRTOS.h"
15
16 #ifdef __XTENSA__
17 #include "xtensa/config/core-isa.h"
18
19 #ifndef XCHAL_HAVE_S32C1I
20 #error "XCHAL_HAVE_S32C1I not defined, include correct header!"
21 #endif
22
23 #define HAS_ATOMICS_32 (XCHAL_HAVE_S32C1I == 1)
24 // no 64-bit atomics on Xtensa
25 #define HAS_ATOMICS_64 0
26
27 #else // RISCV
28
29 // GCC toolchain will define this pre-processor if "A" extension is supported
30 #ifndef __riscv_atomic
31 #define __riscv_atomic 0
32 #endif
33
34 #define HAS_ATOMICS_32 (__riscv_atomic == 1)
35 #define HAS_ATOMICS_64 ((__riscv_atomic == 1) && (__riscv_xlen == 64))
36 #endif // (__XTENSA__, __riscv)
37
38 #if SOC_CPU_CORES_NUM == 1
39
40 // Single core SoC: atomics can be implemented using portSET_INTERRUPT_MASK_FROM_ISR
41 // and portCLEAR_INTERRUPT_MASK_FROM_ISR, which disables and enables interrupts.
42 #if CONFIG_FREERTOS_SMP
43 #define _ATOMIC_ENTER_CRITICAL() ({ \
44 unsigned state = portDISABLE_INTERRUPTS(); \
45 state; \
46 })
47
48 #define _ATOMIC_EXIT_CRITICAL(state) do { \
49 portRESTORE_INTERRUPTS(state); \
50 } while (0)
51 #else // CONFIG_FREERTOS_SMP
52 #define _ATOMIC_ENTER_CRITICAL() ({ \
53 unsigned state = portSET_INTERRUPT_MASK_FROM_ISR(); \
54 state; \
55 })
56
57 #define _ATOMIC_EXIT_CRITICAL(state) do { \
58 portCLEAR_INTERRUPT_MASK_FROM_ISR(state); \
59 } while (0)
60 #endif
61 #else // SOC_CPU_CORES_NUM
62
63 _Static_assert(HAS_ATOMICS_32, "32-bit atomics should be supported if SOC_CPU_CORES_NUM > 1");
64 // Only need to implement 64-bit atomics here. Use a single global portMUX_TYPE spinlock
65 // to emulate the atomics.
66 static portMUX_TYPE s_atomic_lock = portMUX_INITIALIZER_UNLOCKED;
67
68 // Return value is not used but kept for compatibility with the single-core version above.
69 #define _ATOMIC_ENTER_CRITICAL() ({ \
70 portENTER_CRITICAL_SAFE(&s_atomic_lock); \
71 0; \
72 })
73
74 #define _ATOMIC_EXIT_CRITICAL(state) do { \
75 (void) (state); \
76 portEXIT_CRITICAL_SAFE(&s_atomic_lock); \
77 } while(0)
78
79 #endif // SOC_CPU_CORES_NUM
80
81 #ifdef __clang__
82 // Clang doesn't allow to define "__sync_*" atomics. The workaround is to define function with name "__sync_*_builtin",
83 // which implements "__sync_*" atomic functionality and use asm directive to set the value of symbol "__sync_*" to the name
84 // of defined function.
85
86 #define CLANG_ATOMIC_SUFFIX(name_) name_ ## _builtin
87 #define CLANG_DECLARE_ALIAS(name_) \
88 __asm__(".type " # name_ ", @function\n" \
89 ".global " #name_ "\n" \
90 ".equ " #name_ ", " #name_ "_builtin");
91
92 #else // __clang__
93
94 #define CLANG_ATOMIC_SUFFIX(name_) name_
95 #define CLANG_DECLARE_ALIAS(name_)
96
97 #endif // __clang__
98
99 #define ATOMIC_LOAD(n, type) type __atomic_load_ ## n (const volatile void* mem, int memorder) \
100 { \
101 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
102 type ret = *(const volatile type*)mem; \
103 _ATOMIC_EXIT_CRITICAL(state); \
104 return ret; \
105 }
106
107 #define ATOMIC_STORE(n, type) void __atomic_store_ ## n (volatile void * mem, type val, int memorder) \
108 { \
109 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
110 *(volatile type *)mem = val; \
111 _ATOMIC_EXIT_CRITICAL(state); \
112 }
113
114 #define ATOMIC_EXCHANGE(n, type) type __atomic_exchange_ ## n (volatile void* mem, type val, int memorder) \
115 { \
116 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
117 type ret = *(volatile type*)mem; \
118 *(volatile type*)mem = val; \
119 _ATOMIC_EXIT_CRITICAL(state); \
120 return ret; \
121 }
122
123 #define CMP_EXCHANGE(n, type) bool __atomic_compare_exchange_ ## n (volatile void* mem, void* expect, type desired, bool weak, int success, int failure) \
124 { \
125 bool ret = false; \
126 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
127 if (*(volatile type*)mem == *(type*)expect) { \
128 ret = true; \
129 *(volatile type*)mem = desired; \
130 } else { \
131 *(type*)expect = *(volatile type*)mem; \
132 } \
133 _ATOMIC_EXIT_CRITICAL(state); \
134 return ret; \
135 }
136
137 #define FETCH_ADD(n, type) type __atomic_fetch_add_ ## n (volatile void* ptr, type value, int memorder) \
138 { \
139 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
140 type ret = *(volatile type*)ptr; \
141 *(volatile type*)ptr = *(volatile type*)ptr + value; \
142 _ATOMIC_EXIT_CRITICAL(state); \
143 return ret; \
144 }
145
146 #define ADD_FETCH(n, type) type __atomic_add_fetch_ ## n (volatile void* ptr, type value, int memorder) \
147 { \
148 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
149 type ret = *(volatile type*)ptr + value; \
150 *(volatile type*)ptr = ret; \
151 _ATOMIC_EXIT_CRITICAL(state); \
152 return ret; \
153 }
154
155 #define FETCH_SUB(n, type) type __atomic_fetch_sub_ ## n (volatile void* ptr, type value, int memorder) \
156 { \
157 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
158 type ret = *(volatile type*)ptr; \
159 *(volatile type*)ptr = *(volatile type*)ptr - value; \
160 _ATOMIC_EXIT_CRITICAL(state); \
161 return ret; \
162 }
163
164 #define SUB_FETCH(n, type) type __atomic_sub_fetch_ ## n (volatile void* ptr, type value, int memorder) \
165 { \
166 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
167 type ret = *(volatile type*)ptr - value; \
168 *(volatile type*)ptr = ret; \
169 _ATOMIC_EXIT_CRITICAL(state); \
170 return ret; \
171 }
172
173 #define FETCH_AND(n, type) type __atomic_fetch_and_ ## n (volatile void* ptr, type value, int memorder) \
174 { \
175 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
176 type ret = *(volatile type*)ptr; \
177 *(volatile type*)ptr = *(volatile type*)ptr & value; \
178 _ATOMIC_EXIT_CRITICAL(state); \
179 return ret; \
180 }
181
182 #define AND_FETCH(n, type) type __atomic_and_fetch_ ## n (volatile void* ptr, type value, int memorder) \
183 { \
184 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
185 type ret = *(volatile type*)ptr & value; \
186 *(volatile type*)ptr = ret; \
187 _ATOMIC_EXIT_CRITICAL(state); \
188 return ret; \
189 }
190
191 #define FETCH_OR(n, type) type __atomic_fetch_or_ ## n (volatile void* ptr, type value, int memorder) \
192 { \
193 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
194 type ret = *(volatile type*)ptr; \
195 *(volatile type*)ptr = *(volatile type*)ptr | value; \
196 _ATOMIC_EXIT_CRITICAL(state); \
197 return ret; \
198 }
199
200 #define OR_FETCH(n, type) type __atomic_or_fetch_ ## n (volatile void* ptr, type value, int memorder) \
201 { \
202 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
203 type ret = *(volatile type*)ptr | value; \
204 *(volatile type*)ptr = ret; \
205 _ATOMIC_EXIT_CRITICAL(state); \
206 return ret; \
207 }
208
209 #define FETCH_XOR(n, type) type __atomic_fetch_xor_ ## n (volatile void* ptr, type value, int memorder) \
210 { \
211 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
212 type ret = *(volatile type*)ptr; \
213 *(volatile type*)ptr = *(volatile type*)ptr ^ value; \
214 _ATOMIC_EXIT_CRITICAL(state); \
215 return ret; \
216 }
217
218 #define XOR_FETCH(n, type) type __atomic_xor_fetch_ ## n (volatile void* ptr, type value, int memorder) \
219 { \
220 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
221 type ret = *(volatile type*)ptr ^ value; \
222 *(volatile type*)ptr = ret; \
223 _ATOMIC_EXIT_CRITICAL(state); \
224 return ret; \
225 }
226
227 #define FETCH_NAND(n, type) type __atomic_fetch_nand_ ## n (volatile void* ptr, type value, int memorder) \
228 { \
229 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
230 type ret = *(volatile type*)ptr; \
231 *(volatile type*)ptr = ~(*(volatile type*)ptr & value); \
232 _ATOMIC_EXIT_CRITICAL(state); \
233 return ret; \
234 }
235
236 #define NAND_FETCH(n, type) type __atomic_nand_fetch_ ## n (volatile void* ptr, type value, int memorder) \
237 { \
238 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
239 type ret = ~(*(volatile type*)ptr & value); \
240 *(volatile type*)ptr = ret; \
241 _ATOMIC_EXIT_CRITICAL(state); \
242 return ret; \
243 }
244
245 #define SYNC_FETCH_OP(op, n, type) type CLANG_ATOMIC_SUFFIX(__sync_fetch_and_ ## op ##_ ## n) (volatile void* ptr, type value) \
246 { \
247 return __atomic_fetch_ ## op ##_ ## n (ptr, value, __ATOMIC_SEQ_CST); \
248 } \
249 CLANG_DECLARE_ALIAS( __sync_fetch_and_ ## op ##_ ## n )
250
251 #define SYNC_OP_FETCH(op, n, type) type CLANG_ATOMIC_SUFFIX(__sync_ ## op ##_and_fetch_ ## n) (volatile void* ptr, type value) \
252 { \
253 return __atomic_ ## op ##_fetch_ ## n (ptr, value, __ATOMIC_SEQ_CST); \
254 } \
255 CLANG_DECLARE_ALIAS( __sync_ ## op ##_and_fetch_ ## n )
256
257 #define SYNC_BOOL_CMP_EXCHANGE(n, type) bool CLANG_ATOMIC_SUFFIX(__sync_bool_compare_and_swap_ ## n) (volatile void* ptr, type oldval, type newval) \
258 { \
259 bool ret = false; \
260 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
261 if (*(volatile type*)ptr == oldval) { \
262 *(volatile type*)ptr = newval; \
263 ret = true; \
264 } \
265 _ATOMIC_EXIT_CRITICAL(state); \
266 return ret; \
267 } \
268 CLANG_DECLARE_ALIAS( __sync_bool_compare_and_swap_ ## n )
269
270 #define SYNC_VAL_CMP_EXCHANGE(n, type) type CLANG_ATOMIC_SUFFIX(__sync_val_compare_and_swap_ ## n) (volatile void* ptr, type oldval, type newval) \
271 { \
272 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
273 type ret = *(volatile type*)ptr; \
274 if (*(volatile type*)ptr == oldval) { \
275 *(volatile type*)ptr = newval; \
276 } \
277 _ATOMIC_EXIT_CRITICAL(state); \
278 return ret; \
279 } \
280 CLANG_DECLARE_ALIAS( __sync_val_compare_and_swap_ ## n )
281
282 #define SYNC_LOCK_TEST_AND_SET(n, type) type CLANG_ATOMIC_SUFFIX(__sync_lock_test_and_set_ ## n) (volatile void* ptr, type val) \
283 { \
284 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
285 type ret = *(volatile type*)ptr; \
286 *(volatile type*)ptr = val; \
287 _ATOMIC_EXIT_CRITICAL(state); \
288 return ret; \
289 } \
290 CLANG_DECLARE_ALIAS( __sync_lock_test_and_set_ ## n )
291
292 #define SYNC_LOCK_RELEASE(n, type) void CLANG_ATOMIC_SUFFIX(__sync_lock_release_ ## n) (volatile void* ptr) \
293 { \
294 unsigned state = _ATOMIC_ENTER_CRITICAL(); \
295 *(volatile type*)ptr = 0; \
296 _ATOMIC_EXIT_CRITICAL(state); \
297 } \
298 CLANG_DECLARE_ALIAS( __sync_lock_release_ ## n )
299
300
301 #if !HAS_ATOMICS_32
302
303 _Static_assert(sizeof(unsigned char) == 1, "atomics require a 1-byte type");
304 _Static_assert(sizeof(short unsigned int) == 2, "atomics require a 2-bytes type");
305 _Static_assert(sizeof(unsigned int) == 4, "atomics require a 4-bytes type");
306
307 ATOMIC_EXCHANGE(1, unsigned char)
308 ATOMIC_EXCHANGE(2, short unsigned int)
309 ATOMIC_EXCHANGE(4, unsigned int)
310
311 CMP_EXCHANGE(1, unsigned char)
312 CMP_EXCHANGE(2, short unsigned int)
313 CMP_EXCHANGE(4, unsigned int)
314
315 FETCH_ADD(1, unsigned char)
316 FETCH_ADD(2, short unsigned int)
317 FETCH_ADD(4, unsigned int)
318
319 ADD_FETCH(1, unsigned char)
320 ADD_FETCH(2, short unsigned int)
321 ADD_FETCH(4, unsigned int)
322
323 FETCH_SUB(1, unsigned char)
324 FETCH_SUB(2, short unsigned int)
325 FETCH_SUB(4, unsigned int)
326
327 SUB_FETCH(1, unsigned char)
328 SUB_FETCH(2, short unsigned int)
329 SUB_FETCH(4, unsigned int)
330
331 FETCH_AND(1, unsigned char)
332 FETCH_AND(2, short unsigned int)
333 FETCH_AND(4, unsigned int)
334
335 AND_FETCH(1, unsigned char)
336 AND_FETCH(2, short unsigned int)
337 AND_FETCH(4, unsigned int)
338
339 FETCH_OR(1, unsigned char)
340 FETCH_OR(2, short unsigned int)
341 FETCH_OR(4, unsigned int)
342
343 OR_FETCH(1, unsigned char)
344 OR_FETCH(2, short unsigned int)
345 OR_FETCH(4, unsigned int)
346
347 FETCH_XOR(1, unsigned char)
348 FETCH_XOR(2, short unsigned int)
349 FETCH_XOR(4, unsigned int)
350
351 XOR_FETCH(1, unsigned char)
352 XOR_FETCH(2, short unsigned int)
353 XOR_FETCH(4, unsigned int)
354
355 FETCH_NAND(1, unsigned char)
356 FETCH_NAND(2, short unsigned int)
357 FETCH_NAND(4, unsigned int)
358
359 NAND_FETCH(1, unsigned char)
360 NAND_FETCH(2, short unsigned int)
361 NAND_FETCH(4, unsigned int)
362
363 SYNC_FETCH_OP(add, 1, unsigned char)
364 SYNC_FETCH_OP(add, 2, short unsigned int)
365 SYNC_FETCH_OP(add, 4, unsigned int)
366
367 SYNC_OP_FETCH(add, 1, unsigned char)
368 SYNC_OP_FETCH(add, 2, short unsigned int)
369 SYNC_OP_FETCH(add, 4, unsigned int)
370
371 SYNC_FETCH_OP(sub, 1, unsigned char)
372 SYNC_FETCH_OP(sub, 2, short unsigned int)
373 SYNC_FETCH_OP(sub, 4, unsigned int)
374
375 SYNC_OP_FETCH(sub, 1, unsigned char)
376 SYNC_OP_FETCH(sub, 2, short unsigned int)
377 SYNC_OP_FETCH(sub, 4, unsigned int)
378
379 SYNC_FETCH_OP(and, 1, unsigned char)
380 SYNC_FETCH_OP(and, 2, short unsigned int)
381 SYNC_FETCH_OP(and, 4, unsigned int)
382
383 SYNC_OP_FETCH(and, 1, unsigned char)
384 SYNC_OP_FETCH(and, 2, short unsigned int)
385 SYNC_OP_FETCH(and, 4, unsigned int)
386
387 SYNC_FETCH_OP(or, 1, unsigned char)
388 SYNC_FETCH_OP(or, 2, short unsigned int)
389 SYNC_FETCH_OP(or, 4, unsigned int)
390
391 SYNC_OP_FETCH(or, 1, unsigned char)
392 SYNC_OP_FETCH(or, 2, short unsigned int)
393 SYNC_OP_FETCH(or, 4, unsigned int)
394
395 SYNC_FETCH_OP(xor, 1, unsigned char)
396 SYNC_FETCH_OP(xor, 2, short unsigned int)
397 SYNC_FETCH_OP(xor, 4, unsigned int)
398
399 SYNC_OP_FETCH(xor, 1, unsigned char)
400 SYNC_OP_FETCH(xor, 2, short unsigned int)
401 SYNC_OP_FETCH(xor, 4, unsigned int)
402
403 SYNC_FETCH_OP(nand, 1, unsigned char)
404 SYNC_FETCH_OP(nand, 2, short unsigned int)
405 SYNC_FETCH_OP(nand, 4, unsigned int)
406
407 SYNC_OP_FETCH(nand, 1, unsigned char)
408 SYNC_OP_FETCH(nand, 2, short unsigned int)
409 SYNC_OP_FETCH(nand, 4, unsigned int)
410
411 SYNC_BOOL_CMP_EXCHANGE(1, unsigned char)
412 SYNC_BOOL_CMP_EXCHANGE(2, short unsigned int)
413 SYNC_BOOL_CMP_EXCHANGE(4, unsigned int)
414
415 SYNC_VAL_CMP_EXCHANGE(1, unsigned char)
416 SYNC_VAL_CMP_EXCHANGE(2, short unsigned int)
417 SYNC_VAL_CMP_EXCHANGE(4, unsigned int)
418
419
420 SYNC_LOCK_TEST_AND_SET(1, unsigned char)
421 SYNC_LOCK_TEST_AND_SET(2, short unsigned int)
422 SYNC_LOCK_TEST_AND_SET(4, unsigned int)
423
424 SYNC_LOCK_RELEASE(1, unsigned char)
425 SYNC_LOCK_RELEASE(2, short unsigned int)
426 SYNC_LOCK_RELEASE(4, unsigned int)
427
428 // LLVM has not implemented native atomic load/stores for riscv targets without the Atomic extension. LLVM thread: https://reviews.llvm.org/D47553.
429 // Even though GCC does transform them, these libcalls need to be available for the case where a LLVM based project links against IDF.
430 ATOMIC_LOAD(1, unsigned char)
431 ATOMIC_LOAD(2, short unsigned int)
432 ATOMIC_LOAD(4, unsigned int)
433 ATOMIC_STORE(1, unsigned char)
434 ATOMIC_STORE(2, short unsigned int)
435 ATOMIC_STORE(4, unsigned int)
436
437 #elif __riscv_atomic == 1
438
439 bool CLANG_ATOMIC_SUFFIX(__atomic_always_lock_free) (unsigned int size, const volatile void *) {
440 return size <= sizeof(int);
441 }
442 CLANG_DECLARE_ALIAS( __atomic_always_lock_free)
443
444 bool CLANG_ATOMIC_SUFFIX(__atomic_is_lock_free) (unsigned int size, const volatile void *) {
445 return size <= sizeof(int);
446 }
447 CLANG_DECLARE_ALIAS( __atomic_is_lock_free)
448
449 #endif // !HAS_ATOMICS_32
450
451 #if !HAS_ATOMICS_64
452
453 _Static_assert(sizeof(long long unsigned int) == 8, "atomics require a 8-bytes type");
454
455 ATOMIC_EXCHANGE(8, long long unsigned int)
456
457 CMP_EXCHANGE(8, long long unsigned int)
458
459 FETCH_ADD(8, long long unsigned int)
460
461 FETCH_SUB(8, long long unsigned int)
462
463 FETCH_AND(8, long long unsigned int)
464
465 FETCH_OR(8, long long unsigned int)
466
467 FETCH_XOR(8, long long unsigned int)
468
469 FETCH_NAND(8, long long unsigned int)
470
471 ADD_FETCH(8, long long unsigned int)
472
473 SUB_FETCH(8, long long unsigned int)
474
475 AND_FETCH(8, long long unsigned int)
476
477 OR_FETCH(8, long long unsigned int)
478
479 XOR_FETCH(8, long long unsigned int)
480
481 NAND_FETCH(8, long long unsigned int)
482
483 SYNC_FETCH_OP(add, 8, long long unsigned int)
484
485 SYNC_FETCH_OP(sub, 8, long long unsigned int)
486
487 SYNC_FETCH_OP(and, 8, long long unsigned int)
488
489 SYNC_FETCH_OP(or, 8, long long unsigned int)
490
491 SYNC_FETCH_OP(xor, 8, long long unsigned int)
492
493 SYNC_FETCH_OP(nand, 8, long long unsigned int)
494
495 SYNC_OP_FETCH(add, 8, long long unsigned int)
496
497 SYNC_OP_FETCH(sub, 8, long long unsigned int)
498
499 SYNC_OP_FETCH(and, 8, long long unsigned int)
500
501 SYNC_OP_FETCH(or, 8, long long unsigned int)
502
503 SYNC_OP_FETCH(xor, 8, long long unsigned int)
504
505 SYNC_OP_FETCH(nand, 8, long long unsigned int)
506
507 SYNC_BOOL_CMP_EXCHANGE(8, long long unsigned int)
508
509 SYNC_VAL_CMP_EXCHANGE(8, long long unsigned int)
510
511 SYNC_LOCK_TEST_AND_SET(8, long long unsigned int)
512 SYNC_LOCK_RELEASE(8, long long unsigned int)
513
514 // LLVM has not implemented native atomic load/stores for riscv targets without the Atomic extension. LLVM thread: https://reviews.llvm.org/D47553.
515 // Even though GCC does transform them, these libcalls need to be available for the case where a LLVM based project links against IDF.
516 ATOMIC_LOAD(8, long long unsigned int)
517 ATOMIC_STORE(8, long long unsigned int)
518
519 #endif // !HAS_ATOMICS_64
520
521 // Clang generates calls to the __atomic_load/__atomic_store functions for object size more then 4 bytes
CLANG_ATOMIC_SUFFIX(__atomic_load)522 void CLANG_ATOMIC_SUFFIX( __atomic_load ) (size_t size, const volatile void *src, void *dest, int model) {
523 unsigned state = _ATOMIC_ENTER_CRITICAL();
524 memcpy(dest, (const void *)src, size);
525 _ATOMIC_EXIT_CRITICAL(state);
526 }
CLANG_DECLARE_ALIAS(__atomic_load)527 CLANG_DECLARE_ALIAS( __atomic_load )
528
529 void CLANG_ATOMIC_SUFFIX( __atomic_store ) (size_t size, volatile void *dest, void *src, int model) {
530 unsigned state = _ATOMIC_ENTER_CRITICAL();
531 memcpy((void *)dest, (const void *)src, size);
532 _ATOMIC_EXIT_CRITICAL(state);
533 }
CLANG_DECLARE_ALIAS(__atomic_store)534 CLANG_DECLARE_ALIAS( __atomic_store)
535
536 bool CLANG_ATOMIC_SUFFIX(__atomic_compare_exchange) (size_t size, volatile void *ptr, void *expected, void *desired, int success_memorder, int failure_memorder) {
537 bool ret = false;
538 unsigned state = _ATOMIC_ENTER_CRITICAL();
539 if (!memcmp((void *)ptr, expected, size)) {
540 memcpy((void *)ptr, (const void *)desired, size);
541 ret = true;
542 } else {
543 memcpy((void *)expected, (const void *)ptr, size);
544 }
545 _ATOMIC_EXIT_CRITICAL(state);
546 return ret;
547 }
548 CLANG_DECLARE_ALIAS( __atomic_compare_exchange)
549