1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Based on arch/arm/include/asm/atomic.h
4 *
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * Copyright (C) 2012 ARM Ltd.
8 */
9
10 #ifndef __ASM_ATOMIC_LSE_H
11 #define __ASM_ATOMIC_LSE_H
12
13 #define ATOMIC_OP(op, asm_op) \
14 static inline void __lse_atomic_##op(int i, atomic_t *v) \
15 { \
16 asm volatile( \
17 " " #asm_op " %w[i], %[v]\n" \
18 : [i] "+r" (i), [v] "+Q" (v->counter) \
19 : "r" (v)); \
20 }
21
ATOMIC_OP(andnot,stclr)22 ATOMIC_OP(andnot, stclr)
23 ATOMIC_OP(or, stset)
24 ATOMIC_OP(xor, steor)
25 ATOMIC_OP(add, stadd)
26
27 #undef ATOMIC_OP
28
29 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
30 static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
31 { \
32 asm volatile( \
33 " " #asm_op #mb " %w[i], %w[i], %[v]" \
34 : [i] "+r" (i), [v] "+Q" (v->counter) \
35 : "r" (v) \
36 : cl); \
37 \
38 return i; \
39 }
40
41 #define ATOMIC_FETCH_OPS(op, asm_op) \
42 ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
43 ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
44 ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
45 ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
46
47 ATOMIC_FETCH_OPS(andnot, ldclr)
48 ATOMIC_FETCH_OPS(or, ldset)
49 ATOMIC_FETCH_OPS(xor, ldeor)
50 ATOMIC_FETCH_OPS(add, ldadd)
51
52 #undef ATOMIC_FETCH_OP
53 #undef ATOMIC_FETCH_OPS
54
55 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
56 static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
57 { \
58 u32 tmp; \
59 \
60 asm volatile( \
61 " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
62 " add %w[i], %w[i], %w[tmp]" \
63 : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
64 : "r" (v) \
65 : cl); \
66 \
67 return i; \
68 }
69
70 ATOMIC_OP_ADD_RETURN(_relaxed, )
71 ATOMIC_OP_ADD_RETURN(_acquire, a, "memory")
72 ATOMIC_OP_ADD_RETURN(_release, l, "memory")
73 ATOMIC_OP_ADD_RETURN( , al, "memory")
74
75 #undef ATOMIC_OP_ADD_RETURN
76
77 static inline void __lse_atomic_and(int i, atomic_t *v)
78 {
79 asm volatile(
80 " mvn %w[i], %w[i]\n"
81 " stclr %w[i], %[v]"
82 : [i] "+&r" (i), [v] "+Q" (v->counter)
83 : "r" (v));
84 }
85
86 #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
87 static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
88 { \
89 asm volatile( \
90 " mvn %w[i], %w[i]\n" \
91 " ldclr" #mb " %w[i], %w[i], %[v]" \
92 : [i] "+&r" (i), [v] "+Q" (v->counter) \
93 : "r" (v) \
94 : cl); \
95 \
96 return i; \
97 }
98
99 ATOMIC_FETCH_OP_AND(_relaxed, )
100 ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
101 ATOMIC_FETCH_OP_AND(_release, l, "memory")
102 ATOMIC_FETCH_OP_AND( , al, "memory")
103
104 #undef ATOMIC_FETCH_OP_AND
105
__lse_atomic_sub(int i,atomic_t * v)106 static inline void __lse_atomic_sub(int i, atomic_t *v)
107 {
108 asm volatile(
109 " neg %w[i], %w[i]\n"
110 " stadd %w[i], %[v]"
111 : [i] "+&r" (i), [v] "+Q" (v->counter)
112 : "r" (v));
113 }
114
115 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
116 static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
117 { \
118 u32 tmp; \
119 \
120 asm volatile( \
121 " neg %w[i], %w[i]\n" \
122 " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
123 " add %w[i], %w[i], %w[tmp]" \
124 : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
125 : "r" (v) \
126 : cl); \
127 \
128 return i; \
129 }
130
131 ATOMIC_OP_SUB_RETURN(_relaxed, )
132 ATOMIC_OP_SUB_RETURN(_acquire, a, "memory")
133 ATOMIC_OP_SUB_RETURN(_release, l, "memory")
134 ATOMIC_OP_SUB_RETURN( , al, "memory")
135
136 #undef ATOMIC_OP_SUB_RETURN
137
138 #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
139 static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
140 { \
141 asm volatile( \
142 " neg %w[i], %w[i]\n" \
143 " ldadd" #mb " %w[i], %w[i], %[v]" \
144 : [i] "+&r" (i), [v] "+Q" (v->counter) \
145 : "r" (v) \
146 : cl); \
147 \
148 return i; \
149 }
150
151 ATOMIC_FETCH_OP_SUB(_relaxed, )
152 ATOMIC_FETCH_OP_SUB(_acquire, a, "memory")
153 ATOMIC_FETCH_OP_SUB(_release, l, "memory")
154 ATOMIC_FETCH_OP_SUB( , al, "memory")
155
156 #undef ATOMIC_FETCH_OP_SUB
157
158 #define ATOMIC64_OP(op, asm_op) \
159 static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
160 { \
161 asm volatile( \
162 " " #asm_op " %[i], %[v]\n" \
163 : [i] "+r" (i), [v] "+Q" (v->counter) \
164 : "r" (v)); \
165 }
166
ATOMIC64_OP(andnot,stclr)167 ATOMIC64_OP(andnot, stclr)
168 ATOMIC64_OP(or, stset)
169 ATOMIC64_OP(xor, steor)
170 ATOMIC64_OP(add, stadd)
171
172 #undef ATOMIC64_OP
173
174 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
175 static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
176 { \
177 asm volatile( \
178 " " #asm_op #mb " %[i], %[i], %[v]" \
179 : [i] "+r" (i), [v] "+Q" (v->counter) \
180 : "r" (v) \
181 : cl); \
182 \
183 return i; \
184 }
185
186 #define ATOMIC64_FETCH_OPS(op, asm_op) \
187 ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
188 ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
189 ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
190 ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
191
192 ATOMIC64_FETCH_OPS(andnot, ldclr)
193 ATOMIC64_FETCH_OPS(or, ldset)
194 ATOMIC64_FETCH_OPS(xor, ldeor)
195 ATOMIC64_FETCH_OPS(add, ldadd)
196
197 #undef ATOMIC64_FETCH_OP
198 #undef ATOMIC64_FETCH_OPS
199
200 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
201 static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
202 { \
203 unsigned long tmp; \
204 \
205 asm volatile( \
206 " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
207 " add %[i], %[i], %x[tmp]" \
208 : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
209 : "r" (v) \
210 : cl); \
211 \
212 return i; \
213 }
214
215 ATOMIC64_OP_ADD_RETURN(_relaxed, )
216 ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory")
217 ATOMIC64_OP_ADD_RETURN(_release, l, "memory")
218 ATOMIC64_OP_ADD_RETURN( , al, "memory")
219
220 #undef ATOMIC64_OP_ADD_RETURN
221
222 static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
223 {
224 asm volatile(
225 " mvn %[i], %[i]\n"
226 " stclr %[i], %[v]"
227 : [i] "+&r" (i), [v] "+Q" (v->counter)
228 : "r" (v));
229 }
230
231 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
232 static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
233 { \
234 asm volatile( \
235 " mvn %[i], %[i]\n" \
236 " ldclr" #mb " %[i], %[i], %[v]" \
237 : [i] "+&r" (i), [v] "+Q" (v->counter) \
238 : "r" (v) \
239 : cl); \
240 \
241 return i; \
242 }
243
244 ATOMIC64_FETCH_OP_AND(_relaxed, )
245 ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
246 ATOMIC64_FETCH_OP_AND(_release, l, "memory")
247 ATOMIC64_FETCH_OP_AND( , al, "memory")
248
249 #undef ATOMIC64_FETCH_OP_AND
250
__lse_atomic64_sub(s64 i,atomic64_t * v)251 static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
252 {
253 asm volatile(
254 " neg %[i], %[i]\n"
255 " stadd %[i], %[v]"
256 : [i] "+&r" (i), [v] "+Q" (v->counter)
257 : "r" (v));
258 }
259
260 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
261 static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
262 { \
263 unsigned long tmp; \
264 \
265 asm volatile( \
266 " neg %[i], %[i]\n" \
267 " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
268 " add %[i], %[i], %x[tmp]" \
269 : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
270 : "r" (v) \
271 : cl); \
272 \
273 return i; \
274 }
275
276 ATOMIC64_OP_SUB_RETURN(_relaxed, )
277 ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory")
278 ATOMIC64_OP_SUB_RETURN(_release, l, "memory")
279 ATOMIC64_OP_SUB_RETURN( , al, "memory")
280
281 #undef ATOMIC64_OP_SUB_RETURN
282
283 #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
284 static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
285 { \
286 asm volatile( \
287 " neg %[i], %[i]\n" \
288 " ldadd" #mb " %[i], %[i], %[v]" \
289 : [i] "+&r" (i), [v] "+Q" (v->counter) \
290 : "r" (v) \
291 : cl); \
292 \
293 return i; \
294 }
295
296 ATOMIC64_FETCH_OP_SUB(_relaxed, )
297 ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory")
298 ATOMIC64_FETCH_OP_SUB(_release, l, "memory")
299 ATOMIC64_FETCH_OP_SUB( , al, "memory")
300
301 #undef ATOMIC64_FETCH_OP_SUB
302
__lse_atomic64_dec_if_positive(atomic64_t * v)303 static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
304 {
305 unsigned long tmp;
306
307 asm volatile(
308 "1: ldr %x[tmp], %[v]\n"
309 " subs %[ret], %x[tmp], #1\n"
310 " b.lt 2f\n"
311 " casal %x[tmp], %[ret], %[v]\n"
312 " sub %x[tmp], %x[tmp], #1\n"
313 " sub %x[tmp], %x[tmp], %[ret]\n"
314 " cbnz %x[tmp], 1b\n"
315 "2:"
316 : [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
317 :
318 : "cc", "memory");
319
320 return (long)v;
321 }
322
323 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
324 static __always_inline u##sz \
325 __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
326 u##sz old, \
327 u##sz new) \
328 { \
329 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
330 register u##sz x1 asm ("x1") = old; \
331 register u##sz x2 asm ("x2") = new; \
332 unsigned long tmp; \
333 \
334 asm volatile( \
335 " mov %" #w "[tmp], %" #w "[old]\n" \
336 " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
337 " mov %" #w "[ret], %" #w "[tmp]" \
338 : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr), \
339 [tmp] "=&r" (tmp) \
340 : [old] "r" (x1), [new] "r" (x2) \
341 : cl); \
342 \
343 return x0; \
344 }
345
346 __CMPXCHG_CASE(w, b, , 8, )
347 __CMPXCHG_CASE(w, h, , 16, )
348 __CMPXCHG_CASE(w, , , 32, )
349 __CMPXCHG_CASE(x, , , 64, )
350 __CMPXCHG_CASE(w, b, acq_, 8, a, "memory")
351 __CMPXCHG_CASE(w, h, acq_, 16, a, "memory")
352 __CMPXCHG_CASE(w, , acq_, 32, a, "memory")
353 __CMPXCHG_CASE(x, , acq_, 64, a, "memory")
354 __CMPXCHG_CASE(w, b, rel_, 8, l, "memory")
355 __CMPXCHG_CASE(w, h, rel_, 16, l, "memory")
356 __CMPXCHG_CASE(w, , rel_, 32, l, "memory")
357 __CMPXCHG_CASE(x, , rel_, 64, l, "memory")
358 __CMPXCHG_CASE(w, b, mb_, 8, al, "memory")
359 __CMPXCHG_CASE(w, h, mb_, 16, al, "memory")
360 __CMPXCHG_CASE(w, , mb_, 32, al, "memory")
361 __CMPXCHG_CASE(x, , mb_, 64, al, "memory")
362
363 #undef __CMPXCHG_CASE
364
365 #define __CMPXCHG_DBL(name, mb, cl...) \
366 static __always_inline long \
367 __lse__cmpxchg_double##name(unsigned long old1, \
368 unsigned long old2, \
369 unsigned long new1, \
370 unsigned long new2, \
371 volatile void *ptr) \
372 { \
373 unsigned long oldval1 = old1; \
374 unsigned long oldval2 = old2; \
375 register unsigned long x0 asm ("x0") = old1; \
376 register unsigned long x1 asm ("x1") = old2; \
377 register unsigned long x2 asm ("x2") = new1; \
378 register unsigned long x3 asm ("x3") = new2; \
379 register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
380 \
381 asm volatile( \
382 " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
383 " eor %[old1], %[old1], %[oldval1]\n" \
384 " eor %[old2], %[old2], %[oldval2]\n" \
385 " orr %[old1], %[old1], %[old2]" \
386 : [old1] "+&r" (x0), [old2] "+&r" (x1), \
387 [v] "+Q" (*(unsigned long *)ptr) \
388 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
389 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
390 : cl); \
391 \
392 return x0; \
393 }
394
395 __CMPXCHG_DBL( , )
396 __CMPXCHG_DBL(_mb, al, "memory")
397
398 #undef __CMPXCHG_DBL
399
400 #endif /* __ASM_ATOMIC_LSE_H */
401