Lines Matching +full:3 +full:v

26 static __inline__ int arch_atomic_read(const atomic_t *v)  in arch_atomic_read()  argument
30 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); in arch_atomic_read()
35 static __inline__ void arch_atomic_set(atomic_t *v, int i) in arch_atomic_set() argument
37 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); in arch_atomic_set()
41 static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
46 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
48 " stwcx. %0,0,%3 \n" \
50 : "=&r" (t), "+m" (v->counter) \
51 : "r" (a), "r" (&v->counter) \
56 static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
61 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
63 " stwcx. %0,0,%3\n" \
65 : "=&r" (t), "+m" (v->counter) \
66 : "r" (a), "r" (&v->counter) \
73 static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
79 #asm_op " %1,%3,%0\n" \
82 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
83 : "r" (a), "r" (&v->counter) \
121 static __inline__ void arch_atomic_inc(atomic_t *v) in ATOMIC_OPS()
130 : "=&r" (t), "+m" (v->counter) in ATOMIC_OPS()
131 : "r" (&v->counter) in ATOMIC_OPS()
136 static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v) in arch_atomic_inc_return_relaxed() argument
145 : "=&r" (t), "+m" (v->counter) in arch_atomic_inc_return_relaxed()
146 : "r" (&v->counter) in arch_atomic_inc_return_relaxed()
152 static __inline__ void arch_atomic_dec(atomic_t *v) in arch_atomic_dec() argument
161 : "=&r" (t), "+m" (v->counter) in arch_atomic_dec()
162 : "r" (&v->counter) in arch_atomic_dec()
167 static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v) in arch_atomic_dec_return_relaxed() argument
176 : "=&r" (t), "+m" (v->counter) in arch_atomic_dec_return_relaxed()
177 : "r" (&v->counter) in arch_atomic_dec_return_relaxed()
186 #define arch_atomic_cmpxchg(v, o, n) \ argument
187 (arch_cmpxchg(&((v)->counter), (o), (n)))
188 #define arch_atomic_cmpxchg_relaxed(v, o, n) \ argument
189 arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
190 #define arch_atomic_cmpxchg_acquire(v, o, n) \ argument
191 arch_cmpxchg_acquire(&((v)->counter), (o), (n))
193 #define arch_atomic_xchg(v, new) \ argument
194 (arch_xchg(&((v)->counter), new))
195 #define arch_atomic_xchg_relaxed(v, new) \ argument
196 arch_xchg_relaxed(&((v)->counter), (new))
205 arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new) in arch_atomic_try_cmpxchg_lock() argument
211 " cmpw 0,%0,%3 \n" in arch_atomic_try_cmpxchg_lock()
217 : "=&r" (r), "+m" (v->counter) in arch_atomic_try_cmpxchg_lock()
218 : "r" (&v->counter), "r" (o), "r" (new), "i" (IS_ENABLED(CONFIG_PPC64) ? 1 : 0) in arch_atomic_try_cmpxchg_lock()
228 * @v: pointer of type atomic_t
229 * @a: the amount to add to v...
230 * @u: ...unless v is equal to u.
232 * Atomically adds @a to @v, so long as it was not @u.
233 * Returns the old value of @v.
235 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) in arch_atomic_fetch_add_unless() argument
242 cmpw 0,%0,%3 \n\ in arch_atomic_fetch_add_unless()
251 : "r" (&v->counter), "r" (a), "r" (u) in arch_atomic_fetch_add_unless()
260 * @v: pointer of type atomic_t
262 * Atomically increments @v by 1, so long as @v is non-zero.
263 * Returns non-zero if @v was non-zero, and zero otherwise.
265 static __inline__ int arch_atomic_inc_not_zero(atomic_t *v) in arch_atomic_inc_not_zero() argument
281 : "r" (&v->counter) in arch_atomic_inc_not_zero()
286 #define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v)) argument
289 * Atomically test *v and decrement if it is greater than 0.
290 * The function returns the old value of *v minus 1, even if
291 * the atomic variable, v, was not decremented.
293 static __inline__ int arch_atomic_dec_if_positive(atomic_t *v) in arch_atomic_dec_if_positive() argument
308 : "r" (&v->counter) in arch_atomic_dec_if_positive()
319 static __inline__ s64 arch_atomic64_read(const atomic64_t *v) in arch_atomic64_read() argument
323 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); in arch_atomic64_read()
328 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i) in arch_atomic64_set() argument
330 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); in arch_atomic64_set()
334 static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
339 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
341 " stdcx. %0,0,%3 \n" \
343 : "=&r" (t), "+m" (v->counter) \
344 : "r" (a), "r" (&v->counter) \
350 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
355 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
357 " stdcx. %0,0,%3\n" \
359 : "=&r" (t), "+m" (v->counter) \
360 : "r" (a), "r" (&v->counter) \
368 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
374 #asm_op " %1,%3,%0\n" \
377 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
378 : "r" (a), "r" (&v->counter) \
416 static __inline__ void arch_atomic64_inc(atomic64_t *v) in ATOMIC64_OPS()
425 : "=&r" (t), "+m" (v->counter) in ATOMIC64_OPS()
426 : "r" (&v->counter) in ATOMIC64_OPS()
431 static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v) in arch_atomic64_inc_return_relaxed() argument
440 : "=&r" (t), "+m" (v->counter) in arch_atomic64_inc_return_relaxed()
441 : "r" (&v->counter) in arch_atomic64_inc_return_relaxed()
447 static __inline__ void arch_atomic64_dec(atomic64_t *v) in arch_atomic64_dec() argument
456 : "=&r" (t), "+m" (v->counter) in arch_atomic64_dec()
457 : "r" (&v->counter) in arch_atomic64_dec()
462 static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v) in arch_atomic64_dec_return_relaxed() argument
471 : "=&r" (t), "+m" (v->counter) in arch_atomic64_dec_return_relaxed()
472 : "r" (&v->counter) in arch_atomic64_dec_return_relaxed()
482 * Atomically test *v and decrement if it is greater than 0.
483 * The function returns the old value of *v minus 1.
485 static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v) in arch_atomic64_dec_if_positive() argument
499 : "r" (&v->counter) in arch_atomic64_dec_if_positive()
506 #define arch_atomic64_cmpxchg(v, o, n) \ argument
507 (arch_cmpxchg(&((v)->counter), (o), (n)))
508 #define arch_atomic64_cmpxchg_relaxed(v, o, n) \ argument
509 arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
510 #define arch_atomic64_cmpxchg_acquire(v, o, n) \ argument
511 arch_cmpxchg_acquire(&((v)->counter), (o), (n))
513 #define arch_atomic64_xchg(v, new) \ argument
514 (arch_xchg(&((v)->counter), new))
515 #define arch_atomic64_xchg_relaxed(v, new) \ argument
516 arch_xchg_relaxed(&((v)->counter), (new))
520 * @v: pointer of type atomic64_t
521 * @a: the amount to add to v...
522 * @u: ...unless v is equal to u.
524 * Atomically adds @a to @v, so long as it was not @u.
525 * Returns the old value of @v.
527 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) in arch_atomic64_fetch_add_unless() argument
534 cmpd 0,%0,%3 \n\ in arch_atomic64_fetch_add_unless()
543 : "r" (&v->counter), "r" (a), "r" (u) in arch_atomic64_fetch_add_unless()
552 * @v: pointer of type atomic64_t
554 * Atomically increments @v by 1, so long as @v is non-zero.
555 * Returns non-zero if @v was non-zero, and zero otherwise.
557 static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v) in arch_atomic64_inc_not_zero() argument
573 : "r" (&v->counter) in arch_atomic64_inc_not_zero()
578 #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v)) argument