1 /*
2  * Copyright (c) 2016 Intel Corporation
3  * Copyright (c) 2011-2014 Wind River Systems, Inc.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /**
9  * @file Atomic ops in pure C
10  *
11  * This module provides the atomic operators for processors
12  * which do not support native atomic operations.
13  *
14  * The atomic operations are guaranteed to be atomic with respect
15  * to interrupt service routines, and to operations performed by peer
16  * processors.
17  *
18  * (originally from x86's atomic.c)
19  */
20 
21 #include <toolchain.h>
22 #include <arch/cpu.h>
23 #include <spinlock.h>
24 #include <sys/atomic.h>
25 #include <kernel_structs.h>
26 
27 /* Single global spinlock for atomic operations.  This is fallback
28  * code, not performance sensitive.  At least by not using irq_lock()
29  * in SMP contexts we won't content with legitimate users of the
30  * global lock.
31  */
32 static struct k_spinlock lock;
33 
34 /* For those rare CPUs which support user mode, but not native atomic
35  * operations, the best we can do for them is implement the atomic
36  * functions as system calls, since in user mode locking a spinlock is
37  * forbidden.
38  */
39 #ifdef CONFIG_USERSPACE
40 #include <syscall_handler.h>
41 
42 #define ATOMIC_SYSCALL_HANDLER_TARGET(name) \
43 	static inline atomic_val_t z_vrfy_##name(atomic_t *target) \
44 	{								\
45 		Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
46 		return z_impl_##name((atomic_t *)target); \
47 	}
48 
49 #define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name) \
50 	static inline atomic_val_t z_vrfy_##name(atomic_t *target, \
51 						 atomic_val_t value) \
52 	{								\
53 		Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
54 		return z_impl_##name((atomic_t *)target, value); \
55 	}
56 #else
57 #define ATOMIC_SYSCALL_HANDLER_TARGET(name)
58 #define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name)
59 #endif
60 
61 /**
62  *
63  * @brief Atomic compare-and-set primitive
64  *
65  * This routine provides the compare-and-set operator. If the original value at
66  * <target> equals <oldValue>, then <newValue> is stored at <target> and the
67  * function returns true.
68  *
69  * If the original value at <target> does not equal <oldValue>, then the store
70  * is not done and the function returns false.
71  *
72  * The reading of the original value at <target>, the comparison,
73  * and the write of the new value (if it occurs) all happen atomically with
74  * respect to both interrupts and accesses of other processors to <target>.
75  *
76  * @param target address to be tested
77  * @param old_value value to compare against
78  * @param new_value value to compare against
79  * @return Returns true if <new_value> is written, false otherwise.
80  */
z_impl_atomic_cas(atomic_t * target,atomic_val_t old_value,atomic_val_t new_value)81 bool z_impl_atomic_cas(atomic_t *target, atomic_val_t old_value,
82 		       atomic_val_t new_value)
83 {
84 	k_spinlock_key_t key;
85 	int ret = false;
86 
87 	key = k_spin_lock(&lock);
88 
89 	if (*target == old_value) {
90 		*target = new_value;
91 		ret = true;
92 	}
93 
94 	k_spin_unlock(&lock, key);
95 
96 	return ret;
97 }
98 
99 #ifdef CONFIG_USERSPACE
z_vrfy_atomic_cas(atomic_t * target,atomic_val_t old_value,atomic_val_t new_value)100 bool z_vrfy_atomic_cas(atomic_t *target, atomic_val_t old_value,
101 		       atomic_val_t new_value)
102 {
103 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t)));
104 
105 	return z_impl_atomic_cas((atomic_t *)target, old_value, new_value);
106 }
107 #include <syscalls/atomic_cas_mrsh.c>
108 #endif /* CONFIG_USERSPACE */
109 
z_impl_atomic_ptr_cas(atomic_ptr_t * target,atomic_ptr_val_t old_value,atomic_ptr_val_t new_value)110 bool z_impl_atomic_ptr_cas(atomic_ptr_t *target, atomic_ptr_val_t old_value,
111 			   atomic_ptr_val_t new_value)
112 {
113 	k_spinlock_key_t key;
114 	int ret = false;
115 
116 	key = k_spin_lock(&lock);
117 
118 	if (*target == old_value) {
119 		*target = new_value;
120 		ret = true;
121 	}
122 
123 	k_spin_unlock(&lock, key);
124 
125 	return ret;
126 }
127 
128 #ifdef CONFIG_USERSPACE
z_vrfy_atomic_ptr_cas(atomic_ptr_t * target,atomic_ptr_val_t old_value,atomic_ptr_val_t new_value)129 static inline bool z_vrfy_atomic_ptr_cas(atomic_ptr_t *target,
130 					 atomic_ptr_val_t old_value,
131 					 atomic_ptr_val_t new_value)
132 {
133 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_ptr_t)));
134 
135 	return z_impl_atomic_ptr_cas(target, old_value, new_value);
136 }
137 #include <syscalls/atomic_ptr_cas_mrsh.c>
138 #endif /* CONFIG_USERSPACE */
139 
140 /**
141  *
142  * @brief Atomic addition primitive
143  *
144  * This routine provides the atomic addition operator. The <value> is
145  * atomically added to the value at <target>, placing the result at <target>,
146  * and the old value from <target> is returned.
147  *
148  * @param target memory location to add to
149  * @param value the value to add
150  *
151  * @return The previous value from <target>
152  */
z_impl_atomic_add(atomic_t * target,atomic_val_t value)153 atomic_val_t z_impl_atomic_add(atomic_t *target, atomic_val_t value)
154 {
155 	k_spinlock_key_t key;
156 	atomic_val_t ret;
157 
158 	key = k_spin_lock(&lock);
159 
160 	ret = *target;
161 	*target += value;
162 
163 	k_spin_unlock(&lock, key);
164 
165 	return ret;
166 }
167 
168 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_add);
169 
170 /**
171  *
172  * @brief Atomic subtraction primitive
173  *
174  * This routine provides the atomic subtraction operator. The <value> is
175  * atomically subtracted from the value at <target>, placing the result at
176  * <target>, and the old value from <target> is returned.
177  *
178  * @param target the memory location to subtract from
179  * @param value the value to subtract
180  *
181  * @return The previous value from <target>
182  */
z_impl_atomic_sub(atomic_t * target,atomic_val_t value)183 atomic_val_t z_impl_atomic_sub(atomic_t *target, atomic_val_t value)
184 {
185 	k_spinlock_key_t key;
186 	atomic_val_t ret;
187 
188 	key = k_spin_lock(&lock);
189 
190 	ret = *target;
191 	*target -= value;
192 
193 	k_spin_unlock(&lock, key);
194 
195 	return ret;
196 }
197 
198 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_sub);
199 
200 /**
201  *
202  * @brief Atomic get primitive
203  *
204  * @param target memory location to read from
205  *
206  * This routine provides the atomic get primitive to atomically read
207  * a value from <target>. It simply does an ordinary load.  Note that <target>
208  * is expected to be aligned to a 4-byte boundary.
209  *
210  * @return The value read from <target>
211  */
atomic_get(const atomic_t * target)212 atomic_val_t atomic_get(const atomic_t *target)
213 {
214 	return *target;
215 }
216 
atomic_ptr_get(const atomic_ptr_t * target)217 atomic_ptr_val_t atomic_ptr_get(const atomic_ptr_t *target)
218 {
219 	return *target;
220 }
221 
222 /**
223  *
224  * @brief Atomic get-and-set primitive
225  *
226  * This routine provides the atomic set operator. The <value> is atomically
227  * written at <target> and the previous value at <target> is returned.
228  *
229  * @param target the memory location to write to
230  * @param value the value to write
231  *
232  * @return The previous value from <target>
233  */
z_impl_atomic_set(atomic_t * target,atomic_val_t value)234 atomic_val_t z_impl_atomic_set(atomic_t *target, atomic_val_t value)
235 {
236 	k_spinlock_key_t key;
237 	atomic_val_t ret;
238 
239 	key = k_spin_lock(&lock);
240 
241 	ret = *target;
242 	*target = value;
243 
244 	k_spin_unlock(&lock, key);
245 
246 	return ret;
247 }
248 
249 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_set);
250 
z_impl_atomic_ptr_set(atomic_ptr_t * target,atomic_ptr_val_t value)251 atomic_ptr_val_t z_impl_atomic_ptr_set(atomic_ptr_t *target,
252 				       atomic_ptr_val_t value)
253 {
254 	k_spinlock_key_t key;
255 	atomic_ptr_val_t ret;
256 
257 	key = k_spin_lock(&lock);
258 
259 	ret = *target;
260 	*target = value;
261 
262 	k_spin_unlock(&lock, key);
263 
264 	return ret;
265 }
266 
267 #ifdef CONFIG_USERSPACE
z_vrfy_atomic_ptr_set(atomic_ptr_t * target,atomic_ptr_val_t value)268 static inline atomic_ptr_val_t z_vrfy_atomic_ptr_set(atomic_ptr_t *target,
269 						     atomic_ptr_val_t value)
270 {
271 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_ptr_t)));
272 
273 	return z_impl_atomic_ptr_set(target, value);
274 }
275 #include <syscalls/atomic_ptr_set_mrsh.c>
276 #endif /* CONFIG_USERSPACE */
277 
278 /**
279  *
280  * @brief Atomic bitwise inclusive OR primitive
281  *
282  * This routine provides the atomic bitwise inclusive OR operator. The <value>
283  * is atomically bitwise OR'ed with the value at <target>, placing the result
284  * at <target>, and the previous value at <target> is returned.
285  *
286  * @param target the memory location to be modified
287  * @param value the value to OR
288  *
289  * @return The previous value from <target>
290  */
z_impl_atomic_or(atomic_t * target,atomic_val_t value)291 atomic_val_t z_impl_atomic_or(atomic_t *target, atomic_val_t value)
292 {
293 	k_spinlock_key_t key;
294 	atomic_val_t ret;
295 
296 	key = k_spin_lock(&lock);
297 
298 	ret = *target;
299 	*target |= value;
300 
301 	k_spin_unlock(&lock, key);
302 
303 	return ret;
304 }
305 
306 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_or);
307 
308 /**
309  *
310  * @brief Atomic bitwise exclusive OR (XOR) primitive
311  *
312  * This routine provides the atomic bitwise exclusive OR operator. The <value>
313  * is atomically bitwise XOR'ed with the value at <target>, placing the result
314  * at <target>, and the previous value at <target> is returned.
315  *
316  * @param target the memory location to be modified
317  * @param value the value to XOR
318  *
319  * @return The previous value from <target>
320  */
z_impl_atomic_xor(atomic_t * target,atomic_val_t value)321 atomic_val_t z_impl_atomic_xor(atomic_t *target, atomic_val_t value)
322 {
323 	k_spinlock_key_t key;
324 	atomic_val_t ret;
325 
326 	key = k_spin_lock(&lock);
327 
328 	ret = *target;
329 	*target ^= value;
330 
331 	k_spin_unlock(&lock, key);
332 
333 	return ret;
334 }
335 
336 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_xor);
337 
338 /**
339  *
340  * @brief Atomic bitwise AND primitive
341  *
342  * This routine provides the atomic bitwise AND operator. The <value> is
343  * atomically bitwise AND'ed with the value at <target>, placing the result
344  * at <target>, and the previous value at <target> is returned.
345  *
346  * @param target the memory location to be modified
347  * @param value the value to AND
348  *
349  * @return The previous value from <target>
350  */
z_impl_atomic_and(atomic_t * target,atomic_val_t value)351 atomic_val_t z_impl_atomic_and(atomic_t *target, atomic_val_t value)
352 {
353 	k_spinlock_key_t key;
354 	atomic_val_t ret;
355 
356 	key = k_spin_lock(&lock);
357 
358 	ret = *target;
359 	*target &= value;
360 
361 	k_spin_unlock(&lock, key);
362 
363 	return ret;
364 }
365 
366 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_and);
367 
368 /**
369  *
370  * @brief Atomic bitwise NAND primitive
371  *
372  * This routine provides the atomic bitwise NAND operator. The <value> is
373  * atomically bitwise NAND'ed with the value at <target>, placing the result
374  * at <target>, and the previous value at <target> is returned.
375  *
376  * @param target the memory location to be modified
377  * @param value the value to NAND
378  *
379  * @return The previous value from <target>
380  */
z_impl_atomic_nand(atomic_t * target,atomic_val_t value)381 atomic_val_t z_impl_atomic_nand(atomic_t *target, atomic_val_t value)
382 {
383 	k_spinlock_key_t key;
384 	atomic_val_t ret;
385 
386 	key = k_spin_lock(&lock);
387 
388 	ret = *target;
389 	*target = ~(*target & value);
390 
391 	k_spin_unlock(&lock, key);
392 
393 	return ret;
394 }
395 
396 ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_nand);
397 
398 #ifdef CONFIG_USERSPACE
399 #include <syscalls/atomic_add_mrsh.c>
400 #include <syscalls/atomic_sub_mrsh.c>
401 #include <syscalls/atomic_set_mrsh.c>
402 #include <syscalls/atomic_or_mrsh.c>
403 #include <syscalls/atomic_xor_mrsh.c>
404 #include <syscalls/atomic_and_mrsh.c>
405 #include <syscalls/atomic_nand_mrsh.c>
406 #endif
407