1 /*
2  * Copyright (c) 2016 Intel Corporation
3  * Copyright (c) 2011-2014 Wind River Systems, Inc.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /**
9  * @file Atomic ops in pure C
10  *
11  * This module provides the atomic operators for processors
12  * which do not support native atomic operations.
13  *
14  * The atomic operations are guaranteed to be atomic with respect
15  * to interrupt service routines, and to operations performed by peer
16  * processors.
17  *
18  * (originally from x86's atomic.c)
19  */
20 
21 #include <zephyr/toolchain.h>
22 #include <zephyr/arch/cpu.h>
23 #include <zephyr/spinlock.h>
24 #include <zephyr/sys/atomic.h>
25 #include <zephyr/kernel_structs.h>
26 
27 /* Single global spinlock for atomic operations.  This is fallback
28  * code, not performance sensitive.  At least by not using irq_lock()
29  * in SMP contexts we won't content with legitimate users of the
30  * global lock.
31  */
32 static struct k_spinlock lock;
33 
34 /* For those rare CPUs which support user mode, but not native atomic
35  * operations, the best we can do for them is implement the atomic
36  * functions as system calls, since in user mode locking a spinlock is
37  * forbidden.
38  */
39 #ifdef CONFIG_USERSPACE
40 #include <zephyr/internal/syscall_handler.h>
41 
42 #define ATOMIC_SYSCALL_HANDLER_TARGET(name) \
43 	static inline atomic_val_t z_vrfy_##name(atomic_t *target) \
44 	{								\
45 		K_OOPS(K_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
46 		return z_impl_##name((atomic_t *)target); \
47 	}
48 
49 #define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name) \
50 	static inline atomic_val_t z_vrfy_##name(atomic_t *target, \
51 						 atomic_val_t value) \
52 	{								\
53 		K_OOPS(K_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
54 		return z_impl_##name((atomic_t *)target, value); \
55 	}
56 #else
57 #define ATOMIC_SYSCALL_HANDLER_TARGET(name)
58 #define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name)
59 #endif
60 
61 /**
62  *
63  * @brief Atomic compare-and-set primitive
64  *
65  * This routine provides the compare-and-set operator. If the original value at
66  * <target> equals <oldValue>, then <newValue> is stored at <target> and the
67  * function returns true.
68  *
69  * If the original value at <target> does not equal <oldValue>, then the store
70  * is not done and the function returns false.
71  *
72  * The reading of the original value at <target>, the comparison,
73  * and the write of the new value (if it occurs) all happen atomically with
74  * respect to both interrupts and accesses of other processors to <target>.
75  *
76  * @param target address to be tested
77  * @param old_value value to compare against
78  * @param new_value value to compare against
79  * @return Returns true if <new_value> is written, false otherwise.
80  */
z_impl_atomic_cas(atomic_t * target,atomic_val_t old_value,atomic_val_t new_value)81 bool z_impl_atomic_cas(atomic_t *target, atomic_val_t old_value,
82 		       atomic_val_t new_value)
83 {
84 	k_spinlock_key_t key;
85 	int ret = false;
86 
87 	/*
88 	 * On SMP the k_spin_lock() definition calls atomic_cas().
89 	 * Using k_spin_lock() here would create an infinite loop and
90 	 * massive stack overflow. Consider CONFIG_ATOMIC_OPERATIONS_ARCH
91 	 * or CONFIG_ATOMIC_OPERATIONS_BUILTIN instead.
92 	 */
93 	BUILD_ASSERT(!IS_ENABLED(CONFIG_SMP));
94 
95 	key = k_spin_lock(&lock);
96 
97 	if (*target == old_value) {
98 		*target = new_value;
99 		ret = true;
100 	}
101 
102 	k_spin_unlock(&lock, key);
103 
104 	return ret;
105 }
106 
107 #ifdef CONFIG_USERSPACE
z_vrfy_atomic_cas(atomic_t * target,atomic_val_t old_value,atomic_val_t new_value)108 bool z_vrfy_atomic_cas(atomic_t *target, atomic_val_t old_value,
109 		       atomic_val_t new_value)
110 {
111 	K_OOPS(K_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t)));
112 
113 	return z_impl_atomic_cas((atomic_t *)target, old_value, new_value);
114 }
115 #include <syscalls/atomic_cas_mrsh.c>
116 #endif /* CONFIG_USERSPACE */
117 
z_impl_atomic_ptr_cas(atomic_ptr_t * target,atomic_ptr_val_t old_value,atomic_ptr_val_t new_value)118 bool z_impl_atomic_ptr_cas(atomic_ptr_t *target, atomic_ptr_val_t old_value,
119 			   atomic_ptr_val_t new_value)
120 {
121 	k_spinlock_key_t key;
122 	int ret = false;
123 
124 	key = k_spin_lock(&lock);
125 
126 	if (*target == old_value) {
127 		*target = new_value;
128 		ret = true;
129 	}
130 
131 	k_spin_unlock(&lock, key);
132 
133 	return ret;
134 }
135 
136 #ifdef CONFIG_USERSPACE
z_vrfy_atomic_ptr_cas(atomic_ptr_t * target,atomic_ptr_val_t old_value,atomic_ptr_val_t new_value)137 static inline bool z_vrfy_atomic_ptr_cas(atomic_ptr_t *target,
138 					 atomic_ptr_val_t old_value,
139 					 atomic_ptr_val_t new_value)
140 {
141 	K_OOPS(K_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_ptr_t)));
142 
143 	return z_impl_atomic_ptr_cas(target, old_value, new_value);
144 }
145 #include <syscalls/atomic_ptr_cas_mrsh.c>
146 #endif /* CONFIG_USERSPACE */
147 
148 /**
149  *
150  * @brief Atomic addition primitive
151  *
152  * This routine provides the atomic addition operator. The <value> is
153  * atomically added to the value at <target>, placing the result at <target>,
154  * and the old value from <target> is returned.
155  *
156  * @param target memory location to add to
157  * @param value the value to add
158  *
159  * @return The previous value from <target>
160  */
z_impl_atomic_add(atomic_t * target,atomic_val_t value)161 atomic_val_t z_impl_atomic_add(atomic_t *target, atomic_val_t value)
162 {
163 	k_spinlock_key_t key;
164 	atomic_val_t ret;
165 
166 	key = k_spin_lock(&lock);
167 
168 	ret = *target;
169 	*target += value;
170 
171 	k_spin_unlock(&lock, key);
172 
173 	return ret;
174 }
175 
176 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_add);
177 
178 /**
179  *
180  * @brief Atomic subtraction primitive
181  *
182  * This routine provides the atomic subtraction operator. The <value> is
183  * atomically subtracted from the value at <target>, placing the result at
184  * <target>, and the old value from <target> is returned.
185  *
186  * @param target the memory location to subtract from
187  * @param value the value to subtract
188  *
189  * @return The previous value from <target>
190  */
z_impl_atomic_sub(atomic_t * target,atomic_val_t value)191 atomic_val_t z_impl_atomic_sub(atomic_t *target, atomic_val_t value)
192 {
193 	k_spinlock_key_t key;
194 	atomic_val_t ret;
195 
196 	key = k_spin_lock(&lock);
197 
198 	ret = *target;
199 	*target -= value;
200 
201 	k_spin_unlock(&lock, key);
202 
203 	return ret;
204 }
205 
206 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_sub);
207 
208 /**
209  *
210  * @brief Atomic get primitive
211  *
212  * @param target memory location to read from
213  *
214  * This routine provides the atomic get primitive to atomically read
215  * a value from <target>. It simply does an ordinary load.  Note that <target>
216  * is expected to be aligned to a 4-byte boundary.
217  *
218  * @return The value read from <target>
219  */
atomic_get(const atomic_t * target)220 atomic_val_t atomic_get(const atomic_t *target)
221 {
222 	return *target;
223 }
224 
atomic_ptr_get(const atomic_ptr_t * target)225 atomic_ptr_val_t atomic_ptr_get(const atomic_ptr_t *target)
226 {
227 	return *target;
228 }
229 
230 /**
231  *
232  * @brief Atomic get-and-set primitive
233  *
234  * This routine provides the atomic set operator. The <value> is atomically
235  * written at <target> and the previous value at <target> is returned.
236  *
237  * @param target the memory location to write to
238  * @param value the value to write
239  *
240  * @return The previous value from <target>
241  */
z_impl_atomic_set(atomic_t * target,atomic_val_t value)242 atomic_val_t z_impl_atomic_set(atomic_t *target, atomic_val_t value)
243 {
244 	k_spinlock_key_t key;
245 	atomic_val_t ret;
246 
247 	key = k_spin_lock(&lock);
248 
249 	ret = *target;
250 	*target = value;
251 
252 	k_spin_unlock(&lock, key);
253 
254 	return ret;
255 }
256 
257 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_set);
258 
z_impl_atomic_ptr_set(atomic_ptr_t * target,atomic_ptr_val_t value)259 atomic_ptr_val_t z_impl_atomic_ptr_set(atomic_ptr_t *target,
260 				       atomic_ptr_val_t value)
261 {
262 	k_spinlock_key_t key;
263 	atomic_ptr_val_t ret;
264 
265 	key = k_spin_lock(&lock);
266 
267 	ret = *target;
268 	*target = value;
269 
270 	k_spin_unlock(&lock, key);
271 
272 	return ret;
273 }
274 
275 #ifdef CONFIG_USERSPACE
z_vrfy_atomic_ptr_set(atomic_ptr_t * target,atomic_ptr_val_t value)276 static inline atomic_ptr_val_t z_vrfy_atomic_ptr_set(atomic_ptr_t *target,
277 						     atomic_ptr_val_t value)
278 {
279 	K_OOPS(K_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_ptr_t)));
280 
281 	return z_impl_atomic_ptr_set(target, value);
282 }
283 #include <syscalls/atomic_ptr_set_mrsh.c>
284 #endif /* CONFIG_USERSPACE */
285 
286 /**
287  *
288  * @brief Atomic bitwise inclusive OR primitive
289  *
290  * This routine provides the atomic bitwise inclusive OR operator. The <value>
291  * is atomically bitwise OR'ed with the value at <target>, placing the result
292  * at <target>, and the previous value at <target> is returned.
293  *
294  * @param target the memory location to be modified
295  * @param value the value to OR
296  *
297  * @return The previous value from <target>
298  */
z_impl_atomic_or(atomic_t * target,atomic_val_t value)299 atomic_val_t z_impl_atomic_or(atomic_t *target, atomic_val_t value)
300 {
301 	k_spinlock_key_t key;
302 	atomic_val_t ret;
303 
304 	key = k_spin_lock(&lock);
305 
306 	ret = *target;
307 	*target |= value;
308 
309 	k_spin_unlock(&lock, key);
310 
311 	return ret;
312 }
313 
314 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_or);
315 
316 /**
317  *
318  * @brief Atomic bitwise exclusive OR (XOR) primitive
319  *
320  * This routine provides the atomic bitwise exclusive OR operator. The <value>
321  * is atomically bitwise XOR'ed with the value at <target>, placing the result
322  * at <target>, and the previous value at <target> is returned.
323  *
324  * @param target the memory location to be modified
325  * @param value the value to XOR
326  *
327  * @return The previous value from <target>
328  */
z_impl_atomic_xor(atomic_t * target,atomic_val_t value)329 atomic_val_t z_impl_atomic_xor(atomic_t *target, atomic_val_t value)
330 {
331 	k_spinlock_key_t key;
332 	atomic_val_t ret;
333 
334 	key = k_spin_lock(&lock);
335 
336 	ret = *target;
337 	*target ^= value;
338 
339 	k_spin_unlock(&lock, key);
340 
341 	return ret;
342 }
343 
344 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_xor);
345 
346 /**
347  *
348  * @brief Atomic bitwise AND primitive
349  *
350  * This routine provides the atomic bitwise AND operator. The <value> is
351  * atomically bitwise AND'ed with the value at <target>, placing the result
352  * at <target>, and the previous value at <target> is returned.
353  *
354  * @param target the memory location to be modified
355  * @param value the value to AND
356  *
357  * @return The previous value from <target>
358  */
z_impl_atomic_and(atomic_t * target,atomic_val_t value)359 atomic_val_t z_impl_atomic_and(atomic_t *target, atomic_val_t value)
360 {
361 	k_spinlock_key_t key;
362 	atomic_val_t ret;
363 
364 	key = k_spin_lock(&lock);
365 
366 	ret = *target;
367 	*target &= value;
368 
369 	k_spin_unlock(&lock, key);
370 
371 	return ret;
372 }
373 
374 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_and);
375 
376 /**
377  *
378  * @brief Atomic bitwise NAND primitive
379  *
380  * This routine provides the atomic bitwise NAND operator. The <value> is
381  * atomically bitwise NAND'ed with the value at <target>, placing the result
382  * at <target>, and the previous value at <target> is returned.
383  *
384  * @param target the memory location to be modified
385  * @param value the value to NAND
386  *
387  * @return The previous value from <target>
388  */
z_impl_atomic_nand(atomic_t * target,atomic_val_t value)389 atomic_val_t z_impl_atomic_nand(atomic_t *target, atomic_val_t value)
390 {
391 	k_spinlock_key_t key;
392 	atomic_val_t ret;
393 
394 	key = k_spin_lock(&lock);
395 
396 	ret = *target;
397 	*target = ~(*target & value);
398 
399 	k_spin_unlock(&lock, key);
400 
401 	return ret;
402 }
403 
404 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_nand);
405 
406 #ifdef CONFIG_USERSPACE
407 #include <syscalls/atomic_add_mrsh.c>
408 #include <syscalls/atomic_sub_mrsh.c>
409 #include <syscalls/atomic_set_mrsh.c>
410 #include <syscalls/atomic_or_mrsh.c>
411 #include <syscalls/atomic_xor_mrsh.c>
412 #include <syscalls/atomic_and_mrsh.c>
413 #include <syscalls/atomic_nand_mrsh.c>
414 #endif
415