1 /*
2 * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <stdatomic.h>
8 #include "pico/sync.h"
9
10 // We use __builtin_mem* to avoid libc dependency.
11 #define memcpy __builtin_memcpy
12 #define memcmp __builtin_memcmp
13
atomic_lock(__unused const volatile void * ptr)14 static inline uint32_t atomic_lock(__unused const volatile void *ptr) {
15 uint32_t save = save_and_disable_interrupts();
16 // __dmb(); not necessary on RP2040
17 return save;
18 }
19
atomic_unlock(__unused const volatile void * ptr,uint32_t save)20 static inline void atomic_unlock(__unused const volatile void *ptr, uint32_t save) {
21 // __dmb(); not necessary on RP2040
22 restore_interrupts_from_disabled(save);
23 }
24
25 #if PICO_C_COMPILER_IS_GNU
26
__atomic_test_and_set_c(volatile void * mem,__unused int model)27 _Bool __atomic_test_and_set_c(volatile void *mem, __unused int model) {
28 uint32_t save = atomic_lock(mem);
29 bool result = *(volatile bool *) mem;
30 *(volatile bool *) mem = true;
31 atomic_unlock(mem, save);
32 return result;
33 }
34
35 #define __atomic_load_c __atomic_load
36 #define __atomic_store_c __atomic_store
37 #define __atomic_exchange_c __atomic_exchange
38 #define __atomic_compare_exchange_c __atomic_compare_exchange
39 #define __atomic_is_lock_free_c __atomic_is_lock_free
40 #else
41 // Clang objects if you redefine a builtin.
42 #pragma redefine_extname __atomic_load_c __atomic_load
43 #pragma redefine_extname __atomic_store_c __atomic_store
44 #pragma redefine_extname __atomic_exchange_c __atomic_exchange
45 #pragma redefine_extname __atomic_compare_exchange_c __atomic_compare_exchange
46 #pragma redefine_extname __atomic_is_lock_free_c __atomic_is_lock_free
47 #endif
48
49 // Whether atomic operations for the given size (and alignment) are lock-free.
__atomic_is_lock_free_c(__unused size_t size,__unused const volatile void * ptr)50 bool __atomic_is_lock_free_c(__unused size_t size, __unused const volatile void *ptr) {
51 #if !__ARM_ARCH_6M__
52 if (size == 1 || size == 2 || size == 4) {
53 size_t align = size - 1;
54 return (((uintptr_t)ptr) & align) == 0;
55 }
56 #endif
57 return false;
58 }
59
60
61
62 // An atomic load operation. This is atomic with respect to the source pointer only.
__atomic_load_c(uint size,const volatile void * src,void * dest,__unused int model)63 void __atomic_load_c(uint size, const volatile void *src, void *dest, __unused int model) {
64 uint32_t save = atomic_lock(src);
65 memcpy(dest, remove_volatile_cast_no_barrier(const void *, src), size);
66 atomic_unlock(src, save);
67 }
68
69 // An atomic store operation. This is atomic with respect to the destination
70 // pointer only.
__atomic_store_c(uint size,volatile void * dest,void * src,__unused int model)71 void __atomic_store_c(uint size, volatile void *dest, void *src, __unused int model) {
72 uint32_t save = atomic_lock(src);
73 memcpy(remove_volatile_cast_no_barrier(void *, dest), src, size);
74 atomic_unlock(src, save);
75 }
76
77 // Atomic compare and exchange operation. If the value at *ptr is identical
78 // to the value at *expected, then this copies value at *desired to *ptr. If
79 // they are not, then this stores the current value from *ptr in *expected.
80 //
81 // This function returns 1 if the exchange takes place or 0 if it fails.
__atomic_compare_exchange_c(uint size,volatile void * ptr,void * expected,void * desired,__unused int success,__unused int failure)82 _Bool __atomic_compare_exchange_c(uint size, volatile void *ptr, void *expected,
83 void *desired, __unused int success, __unused int failure) {
84 uint32_t save = atomic_lock(ptr);
85 if (memcmp(remove_volatile_cast_no_barrier(void *, ptr), expected, size) == 0) {
86 memcpy(remove_volatile_cast_no_barrier(void *, ptr), desired, size);
87 atomic_unlock(ptr, save);
88 return 1;
89 }
90 memcpy(expected, remove_volatile_cast_no_barrier(void *, ptr), size);
91 atomic_unlock(ptr, save);
92 return 0;
93 }
94
95 // Performs an atomic exchange operation between two pointers. This is atomic
96 // with respect to the target address.
__atomic_exchange_c(uint size,volatile void * ptr,void * val,void * old,__unused int model)97 void __atomic_exchange_c(uint size, volatile void *ptr, void *val, void *old, __unused int model) {
98
99 uint32_t save = atomic_lock(ptr);
100 memcpy(old, remove_volatile_cast_no_barrier(void *, ptr), size);
101 memcpy(remove_volatile_cast_no_barrier(void *, ptr), val, size);
102 atomic_unlock(ptr, save);
103 }
104
105 #if __ARM_ARCH_6M__
106 #define ATOMIC_OPTIMIZED_CASES \
107 ATOMIC_OPTIMIZED_CASE(1, uint8_t) \
108 ATOMIC_OPTIMIZED_CASE(2, uint16_t) \
109 ATOMIC_OPTIMIZED_CASE(4, uint) \
110 ATOMIC_OPTIMIZED_CASE(8, uint64_t)
111 #else
112 #define ATOMIC_OPTIMIZED_CASES \
113 ATOMIC_OPTIMIZED_CASE(8, uint64_t)
114 #endif
115
116 #define ATOMIC_OPTIMIZED_CASE(n, type) \
117 type __atomic_load_##n(const volatile void *src, __unused int memorder) { \
118 uint32_t save = atomic_lock(src); \
119 type val = *(const volatile type *)src; \
120 atomic_unlock(src, save); \
121 return val; \
122 }
123
124 ATOMIC_OPTIMIZED_CASES
125
126 #undef ATOMIC_OPTIMIZED_CASE
127
128 #define ATOMIC_OPTIMIZED_CASE(n, type) \
129 void __atomic_store_##n(volatile void *dest, type val, __unused int model) { \
130 uint32_t save = atomic_lock(dest); \
131 *(volatile type *)dest = val; \
132 atomic_unlock(dest, save); \
133 }
134
135 ATOMIC_OPTIMIZED_CASES
136
137 #undef ATOMIC_OPTIMIZED_CASE
138
139 #define ATOMIC_OPTIMIZED_CASE(n, type) \
140 bool __atomic_compare_exchange_##n(volatile void *ptr, void *expected, type desired, \
141 __unused bool weak, __unused int success, __unused int failure) { \
142 uint32_t save = atomic_lock(ptr); \
143 if (*(volatile type *)ptr == *(type *)expected) { \
144 *(volatile type *)ptr = desired; \
145 atomic_unlock(ptr, save); \
146 return true; \
147 } \
148 *(type *)expected = *(volatile type *)ptr; \
149 atomic_unlock(ptr, save); \
150 return false; \
151 }
152
153 ATOMIC_OPTIMIZED_CASES
154
155 #undef ATOMIC_OPTIMIZED_CASE
156
157 #define ATOMIC_OPTIMIZED_CASE(n, type) \
158 type __atomic_exchange_##n(volatile void *dest, type val, __unused int model) { \
159 uint32_t save = atomic_lock(dest); \
160 type tmp = *(volatile type *)dest; \
161 *(volatile type *)dest = val; \
162 atomic_unlock(dest, save); \
163 return tmp; \
164 }
165
166 ATOMIC_OPTIMIZED_CASES
167
168 #undef ATOMIC_OPTIMIZED_CASE
169
170 // Atomic read-modify-write operations for integers of various sizes.
171
172 #define ATOMIC_RMW(n, type, opname, op) \
173 type __atomic_fetch_##opname##_##n(volatile void *ptr, type val, __unused int model) { \
174 uint32_t save = atomic_lock(ptr); \
175 type tmp = *(volatile type *)ptr; \
176 *(volatile type *)ptr = tmp op val; \
177 atomic_unlock(ptr, save); \
178 return tmp; \
179 }
180
181 #define ATOMIC_RMW_NAND(n, type) \
182 type __atomic_fetch_nand_##n(type *ptr, type val, __unused int model) { \
183 uint32_t save = atomic_lock(ptr); \
184 type tmp = *ptr; \
185 *ptr = ~(tmp & val); \
186 atomic_unlock(ptr, save); \
187 return tmp; \
188 }
189
190 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, add, +)
191
192 ATOMIC_OPTIMIZED_CASES
193
194 #undef ATOMIC_OPTIMIZED_CASE
195 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, sub, -)
196
197 ATOMIC_OPTIMIZED_CASES
198
199 #undef ATOMIC_OPTIMIZED_CASE
200 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, and, &)
201
202 ATOMIC_OPTIMIZED_CASES
203
204 #undef ATOMIC_OPTIMIZED_CASE
205 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, or, |)
206
207 ATOMIC_OPTIMIZED_CASES
208
209 #undef ATOMIC_OPTIMIZED_CASE
210 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, xor, ^)
211
212 ATOMIC_OPTIMIZED_CASES
213
214 #undef ATOMIC_OPTIMIZED_CASE
215
216 #if __has_builtin(__c11_atomic_fetch_nand)
217 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW_NAND(n, type)
218 ATOMIC_OPTIMIZED_CASES
219 #undef ATOMIC_OPTIMIZED_CASE
220 #endif
221