1 /**
2  * Copyright (c) 2021 Intel Corporation
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 #ifndef ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_
6 #define ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_
7 
8 /* Included from <sys/atomic.h> */
9 
10 /* Recent GCC versions actually do have working atomics support on
11  * Xtensa (and so should work with CONFIG_ATOMIC_OPERATIONS_BUILTIN),
12  * but existing versions of Xtensa's XCC do not.  So we define an
13  * inline implementation here that is more or less identical
14  */
15 
atomic_get(const atomic_t * target)16 static ALWAYS_INLINE atomic_val_t atomic_get(const atomic_t *target)
17 {
18 	atomic_val_t ret;
19 
20 	/* Actual Xtensa hardware seems to have only in-order
21 	 * pipelines, but the architecture does define a barrier load,
22 	 * so use it.  There is a matching s32ri instruction, but
23 	 * nothing in the Zephyr API requires a barrier store (all the
24 	 * atomic write ops have exchange semantics.
25 	 */
26 	__asm__ volatile("l32ai %0, %1, 0"
27 			 : "=r"(ret) : "r"(target) : "memory");
28 	return ret;
29 }
30 
31 static ALWAYS_INLINE
xtensa_cas(atomic_t * addr,atomic_val_t oldval,atomic_val_t newval)32 atomic_val_t xtensa_cas(atomic_t *addr, atomic_val_t oldval,
33 			atomic_val_t newval)
34 {
35 	__asm__ volatile("wsr %1, SCOMPARE1; s32c1i %0, %2, 0"
36 			 : "+r"(newval), "+r"(oldval) : "r"(addr) : "memory");
37 
38 	return newval; /* got swapped with the old memory by s32c1i */
39 }
40 
41 static ALWAYS_INLINE
atomic_cas(atomic_t * target,atomic_val_t oldval,atomic_val_t newval)42 bool atomic_cas(atomic_t *target, atomic_val_t oldval, atomic_val_t newval)
43 {
44 	return oldval == xtensa_cas(target, oldval, newval);
45 }
46 
47 static ALWAYS_INLINE
atomic_ptr_cas(atomic_ptr_t * target,void * oldval,void * newval)48 bool atomic_ptr_cas(atomic_ptr_t *target, void *oldval, void *newval)
49 {
50 	return (atomic_val_t) oldval
51 		== xtensa_cas((atomic_t *) target, (atomic_val_t) oldval,
52 			      (atomic_val_t) newval);
53 }
54 
55 /* Generates an atomic exchange sequence that swaps the value at
56  * address "target", whose old value is read to be "cur", with the
57  * specified expression.  Evaluates to the old value which was
58  * atomically replaced.
59  */
60 
61 #define Z__GEN_ATOMXCHG(expr) ({				\
62 	atomic_val_t res, cur;				\
63 	do {						\
64 		cur = *target;				\
65 		res = xtensa_cas(target, cur, (expr));	\
66 	} while (res != cur);				\
67 	res; })
68 
69 static ALWAYS_INLINE
atomic_set(atomic_t * target,atomic_val_t value)70 atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
71 {
72 	return Z__GEN_ATOMXCHG(value);
73 }
74 
75 static ALWAYS_INLINE
atomic_add(atomic_t * target,atomic_val_t value)76 atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
77 {
78 	return Z__GEN_ATOMXCHG(cur + value);
79 }
80 
81 static ALWAYS_INLINE
atomic_sub(atomic_t * target,atomic_val_t value)82 atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
83 {
84 	return Z__GEN_ATOMXCHG(cur - value);
85 }
86 
87 static ALWAYS_INLINE
atomic_inc(atomic_t * target)88 atomic_val_t atomic_inc(atomic_t *target)
89 {
90 	return Z__GEN_ATOMXCHG(cur + 1);
91 }
92 
93 static ALWAYS_INLINE
atomic_dec(atomic_t * target)94 atomic_val_t atomic_dec(atomic_t *target)
95 {
96 	return Z__GEN_ATOMXCHG(cur - 1);
97 }
98 
atomic_or(atomic_t * target,atomic_val_t value)99 static ALWAYS_INLINE atomic_val_t atomic_or(atomic_t *target,
100 					    atomic_val_t value)
101 {
102 	return Z__GEN_ATOMXCHG(cur | value);
103 }
104 
atomic_xor(atomic_t * target,atomic_val_t value)105 static ALWAYS_INLINE atomic_val_t atomic_xor(atomic_t *target,
106 					     atomic_val_t value)
107 {
108 	return Z__GEN_ATOMXCHG(cur ^ value);
109 }
110 
atomic_and(atomic_t * target,atomic_val_t value)111 static ALWAYS_INLINE atomic_val_t atomic_and(atomic_t *target,
112 					     atomic_val_t value)
113 {
114 	return Z__GEN_ATOMXCHG(cur & value);
115 }
116 
atomic_nand(atomic_t * target,atomic_val_t value)117 static ALWAYS_INLINE atomic_val_t atomic_nand(atomic_t *target,
118 					      atomic_val_t value)
119 {
120 	return Z__GEN_ATOMXCHG(~(cur & value));
121 }
122 
atomic_ptr_get(const atomic_ptr_t * target)123 static ALWAYS_INLINE void *atomic_ptr_get(const atomic_ptr_t *target)
124 {
125 	return (void *) atomic_get((atomic_t *)target);
126 }
127 
atomic_ptr_set(atomic_ptr_t * target,void * value)128 static ALWAYS_INLINE void *atomic_ptr_set(atomic_ptr_t *target, void *value)
129 {
130 	return (void *) atomic_set((atomic_t *) target, (atomic_val_t) value);
131 }
132 
atomic_clear(atomic_t * target)133 static ALWAYS_INLINE atomic_val_t atomic_clear(atomic_t *target)
134 {
135 	return atomic_set(target, 0);
136 }
137 
atomic_ptr_clear(atomic_ptr_t * target)138 static ALWAYS_INLINE void *atomic_ptr_clear(atomic_ptr_t *target)
139 {
140 	return (void *) atomic_set((atomic_t *) target, 0);
141 }
142 
143 #endif /* ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_ */
144