1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Atomic operations usable in machine independent code */
3 #ifndef _LINUX_ATOMIC_H
4 #define _LINUX_ATOMIC_H
5 #include <linux/types.h>
6 
7 #include <asm/atomic.h>
8 #include <asm/barrier.h>
9 
10 /*
11  * Relaxed variants of xchg, cmpxchg and some atomic operations.
12  *
13  * We support four variants:
14  *
15  * - Fully ordered: The default implementation, no suffix required.
16  * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
17  * - Release: Provides RELEASE semantics, _release suffix.
18  * - Relaxed: No ordering guarantees, _relaxed suffix.
19  *
20  * For compound atomics performing both a load and a store, ACQUIRE
21  * semantics apply only to the load and RELEASE semantics only to the
22  * store portion of the operation. Note that a failed cmpxchg_acquire
23  * does -not- imply any memory ordering constraints.
24  *
25  * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
26  */
27 
28 #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
29 #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
30 
31 #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
32 #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
33 
34 /*
35  * The idea here is to build acquire/release variants by adding explicit
36  * barriers on top of the relaxed variant. In the case where the relaxed
37  * variant is already fully ordered, no additional barriers are needed.
38  *
39  * If an architecture overrides __atomic_acquire_fence() it will probably
40  * want to define smp_mb__after_spinlock().
41  */
42 #ifndef __atomic_acquire_fence
43 #define __atomic_acquire_fence		smp_mb__after_atomic
44 #endif
45 
46 #ifndef __atomic_release_fence
47 #define __atomic_release_fence		smp_mb__before_atomic
48 #endif
49 
50 #ifndef __atomic_pre_full_fence
51 #define __atomic_pre_full_fence		smp_mb__before_atomic
52 #endif
53 
54 #ifndef __atomic_post_full_fence
55 #define __atomic_post_full_fence	smp_mb__after_atomic
56 #endif
57 
58 #define __atomic_op_acquire(op, args...)				\
59 ({									\
60 	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
61 	__atomic_acquire_fence();					\
62 	__ret;								\
63 })
64 
65 #define __atomic_op_release(op, args...)				\
66 ({									\
67 	__atomic_release_fence();					\
68 	op##_relaxed(args);						\
69 })
70 
71 #define __atomic_op_fence(op, args...)					\
72 ({									\
73 	typeof(op##_relaxed(args)) __ret;				\
74 	__atomic_pre_full_fence();					\
75 	__ret = op##_relaxed(args);					\
76 	__atomic_post_full_fence();					\
77 	__ret;								\
78 })
79 
80 #ifdef ARCH_ATOMIC
81 #include <linux/atomic-arch-fallback.h>
82 #include <asm-generic/atomic-instrumented.h>
83 #else
84 #include <linux/atomic-fallback.h>
85 #endif
86 
87 #include <asm-generic/atomic-long.h>
88 
89 #endif /* _LINUX_ATOMIC_H */
90