1 /*
2  * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <stdatomic.h>
8 #include "pico/sync.h"
9 
10 // We use __builtin_mem* to avoid libc dependency.
11 #define memcpy __builtin_memcpy
12 #define memcmp __builtin_memcmp
13 
atomic_lock(__unused const volatile void * ptr)14 static inline uint32_t atomic_lock(__unused const volatile void *ptr) {
15     return spin_lock_blocking(spin_lock_instance(PICO_SPINLOCK_ID_ATOMIC));
16 }
17 
atomic_unlock(__unused const volatile void * ptr,uint32_t save)18 static inline void atomic_unlock(__unused const volatile void *ptr, uint32_t save) {
19     spin_unlock(spin_lock_instance(PICO_SPINLOCK_ID_ATOMIC), save);
20 }
21 
22 #if PICO_C_COMPILER_IS_GNU
23 
__atomic_test_and_set_c(volatile void * mem,__unused int model)24 _Bool __atomic_test_and_set_c(volatile void *mem, __unused int model) {
25     uint32_t save = atomic_lock(mem);
26     bool result = *(volatile bool *) mem;
27     *(volatile bool *) mem = true;
28     atomic_unlock(mem, save);
29     return result;
30 }
31 
32 #define __atomic_load_c __atomic_load
33 #define __atomic_store_c __atomic_store
34 #define __atomic_exchange_c __atomic_exchange
35 #define __atomic_compare_exchange_c __atomic_compare_exchange
36 #define __atomic_is_lock_free_c __atomic_is_lock_free
37 #else
38 // Clang objects if you redefine a builtin.
39 #pragma redefine_extname __atomic_load_c __atomic_load
40 #pragma redefine_extname __atomic_store_c __atomic_store
41 #pragma redefine_extname __atomic_exchange_c __atomic_exchange
42 #pragma redefine_extname __atomic_compare_exchange_c __atomic_compare_exchange
43 #pragma redefine_extname __atomic_is_lock_free_c __atomic_is_lock_free
44 #endif
45 
46 // Whether atomic operations for the given size (and alignment) are lock-free.
__atomic_is_lock_free_c(__unused size_t size,__unused const volatile void * ptr)47 bool __atomic_is_lock_free_c(__unused size_t size, __unused const volatile void *ptr) {
48 #if !__ARM_ARCH_6M__
49     if (size == 1 || size == 2 || size == 4) {
50         size_t align = size - 1;
51         return (((uintptr_t)ptr) & align) == 0;
52     }
53 #endif
54     return false;
55 }
56 
57 
58 
59 // An atomic load operation.  This is atomic with respect to the source pointer only.
__atomic_load_c(uint size,const volatile void * src,void * dest,__unused int model)60 void __atomic_load_c(uint size, const volatile void *src, void *dest, __unused int model) {
61     uint32_t save = atomic_lock(src);
62     memcpy(dest, remove_volatile_cast_no_barrier(const void *, src), size);
63     atomic_unlock(src, save);
64 }
65 
66 // An atomic store operation.  This is atomic with respect to the destination
67 // pointer only.
__atomic_store_c(uint size,volatile void * dest,void * src,__unused int model)68 void __atomic_store_c(uint size, volatile void *dest, void *src, __unused int model) {
69     uint32_t save = atomic_lock(src);
70     memcpy(remove_volatile_cast_no_barrier(void *, dest), src, size);
71     atomic_unlock(src, save);
72 }
73 
74 // Atomic compare and exchange operation.  If the value at *ptr is identical
75 // to the value at *expected, then this copies value at *desired to *ptr.  If
76 // they  are not, then this stores the current value from *ptr in *expected.
77 //
78 // This function returns 1 if the exchange takes place or 0 if it fails.
__atomic_compare_exchange_c(uint size,volatile void * ptr,void * expected,void * desired,__unused int success,__unused int failure)79 _Bool __atomic_compare_exchange_c(uint size, volatile void *ptr, void *expected,
80                                   void *desired, __unused int success, __unused int failure) {
81     uint32_t save = atomic_lock(ptr);
82     if (memcmp(remove_volatile_cast_no_barrier(void *, ptr), expected, size) == 0) {
83         memcpy(remove_volatile_cast_no_barrier(void *, ptr), desired, size);
84         atomic_unlock(ptr, save);
85         return 1;
86     }
87     memcpy(expected, remove_volatile_cast_no_barrier(void *, ptr), size);
88     atomic_unlock(ptr, save);
89     return 0;
90 }
91 
92 // Performs an atomic exchange operation between two pointers.  This is atomic
93 // with respect to the target address.
__atomic_exchange_c(uint size,volatile void * ptr,void * val,void * old,__unused int model)94 void __atomic_exchange_c(uint size, volatile void *ptr, void *val, void *old, __unused int model) {
95 
96     uint32_t save = atomic_lock(ptr);
97     memcpy(old, remove_volatile_cast_no_barrier(void *, ptr), size);
98     memcpy(remove_volatile_cast_no_barrier(void *, ptr), val, size);
99     atomic_unlock(ptr, save);
100 }
101 
102 #if __ARM_ARCH_6M__
103 #define ATOMIC_OPTIMIZED_CASES       \
104   ATOMIC_OPTIMIZED_CASE(1, uint8_t)  \
105   ATOMIC_OPTIMIZED_CASE(2, uint16_t) \
106   ATOMIC_OPTIMIZED_CASE(4, uint) \
107   ATOMIC_OPTIMIZED_CASE(8, uint64_t)
108 #else
109 #define ATOMIC_OPTIMIZED_CASES \
110   ATOMIC_OPTIMIZED_CASE(8, uint64_t)
111 #endif
112 
113 #define ATOMIC_OPTIMIZED_CASE(n, type)                                               \
114   type __atomic_load_##n(const volatile void *src, __unused int memorder) {   \
115     uint32_t save = atomic_lock(src);                                         \
116     type val = *(const volatile type *)src;                                   \
117     atomic_unlock(src, save);                                                 \
118     return val;                                                               \
119   }
120 
121 ATOMIC_OPTIMIZED_CASES
122 
123 #undef ATOMIC_OPTIMIZED_CASE
124 
125 #define ATOMIC_OPTIMIZED_CASE(n, type)                                               \
126   void __atomic_store_##n(volatile void *dest, type val, __unused  int model) { \
127     uint32_t save = atomic_lock(dest);                                        \
128     *(volatile type *)dest = val;                                             \
129     atomic_unlock(dest, save);                                                \
130   }
131 
132 ATOMIC_OPTIMIZED_CASES
133 
134 #undef ATOMIC_OPTIMIZED_CASE
135 
136 #define ATOMIC_OPTIMIZED_CASE(n, type)                                               \
137   bool __atomic_compare_exchange_##n(volatile void *ptr, void  *expected, type desired, \
138                                      __unused bool weak, __unused int success, __unused int failure) { \
139     uint32_t save = atomic_lock(ptr);                                         \
140     if (*(volatile type *)ptr == *(type *)expected) {                         \
141       *(volatile type *)ptr = desired;                                        \
142       atomic_unlock(ptr, save);                                               \
143       return true;                                                            \
144     }                                                                         \
145     *(type *)expected = *(volatile type *)ptr;                                \
146     atomic_unlock(ptr, save);                                                 \
147     return false;                                                             \
148   }
149 
150 ATOMIC_OPTIMIZED_CASES
151 
152 #undef ATOMIC_OPTIMIZED_CASE
153 
154 #define ATOMIC_OPTIMIZED_CASE(n, type)                                      \
155   type __atomic_exchange_##n(volatile void *dest, type val, __unused int model) { \
156     uint32_t save = atomic_lock(dest);                               \
157     type tmp = *(volatile type *)dest;                               \
158     *(volatile type *)dest = val;                                    \
159     atomic_unlock(dest, save);                                       \
160     return tmp;                                                      \
161   }
162 
163 ATOMIC_OPTIMIZED_CASES
164 
165 #undef ATOMIC_OPTIMIZED_CASE
166 
167 // Atomic read-modify-write operations for integers of various sizes.
168 
169 #define ATOMIC_RMW(n, type, opname, op)                                \
170   type __atomic_fetch_##opname##_##n(volatile void *ptr, type val, __unused int model) { \
171     uint32_t save = atomic_lock(ptr);                                  \
172     type tmp = *(volatile type *)ptr;                                  \
173     *(volatile type *)ptr = tmp op val;                                \
174     atomic_unlock(ptr, save);                                          \
175     return tmp;                                                        \
176   }
177 
178 #define ATOMIC_RMW_NAND(n, type)                                     \
179   type __atomic_fetch_nand_##n(type *ptr, type val, __unused int model) { \
180     uint32_t save = atomic_lock(ptr);                                \
181     type tmp = *ptr;                                                 \
182     *ptr = ~(tmp & val);                                             \
183     atomic_unlock(ptr, save);                                        \
184     return tmp;                                                      \
185   }
186 
187 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, add, +)
188 
189 ATOMIC_OPTIMIZED_CASES
190 
191 #undef ATOMIC_OPTIMIZED_CASE
192 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, sub, -)
193 
194 ATOMIC_OPTIMIZED_CASES
195 
196 #undef ATOMIC_OPTIMIZED_CASE
197 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, and, &)
198 
199 ATOMIC_OPTIMIZED_CASES
200 
201 #undef ATOMIC_OPTIMIZED_CASE
202 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, or, |)
203 
204 ATOMIC_OPTIMIZED_CASES
205 
206 #undef ATOMIC_OPTIMIZED_CASE
207 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, xor, ^)
208 
209 ATOMIC_OPTIMIZED_CASES
210 
211 #undef ATOMIC_OPTIMIZED_CASE
212 
213 #if __has_builtin(__c11_atomic_fetch_nand)
214 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW_NAND(n, type)
215 ATOMIC_OPTIMIZED_CASES
216 #undef ATOMIC_OPTIMIZED_CASE
217 #endif
218