Lines Matching +full:2 +full:v

26 static __inline__ int arch_atomic_read(const atomic_t *v)  in arch_atomic_read()  argument
30 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); in arch_atomic_read()
35 static __inline__ void arch_atomic_set(atomic_t *v, int i) in arch_atomic_set() argument
37 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); in arch_atomic_set()
41 static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
47 #asm_op " %0,%2,%0\n" \
50 : "=&r" (t), "+m" (v->counter) \
51 : "r" (a), "r" (&v->counter) \
56 static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
62 #asm_op " %0,%2,%0\n" \
65 : "=&r" (t), "+m" (v->counter) \
66 : "r" (a), "r" (&v->counter) \
73 static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
82 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
83 : "r" (a), "r" (&v->counter) \
121 static __inline__ void arch_atomic_inc(atomic_t *v) in ATOMIC_OPS()
126 "1: lwarx %0,0,%2 # atomic_inc\n\ in ATOMIC_OPS()
128 " stwcx. %0,0,%2 \n\ in ATOMIC_OPS()
130 : "=&r" (t), "+m" (v->counter) in ATOMIC_OPS()
131 : "r" (&v->counter) in ATOMIC_OPS()
136 static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v) in arch_atomic_inc_return_relaxed() argument
141 "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n" in arch_atomic_inc_return_relaxed()
143 " stwcx. %0,0,%2\n" in arch_atomic_inc_return_relaxed()
145 : "=&r" (t), "+m" (v->counter) in arch_atomic_inc_return_relaxed()
146 : "r" (&v->counter) in arch_atomic_inc_return_relaxed()
152 static __inline__ void arch_atomic_dec(atomic_t *v) in arch_atomic_dec() argument
157 "1: lwarx %0,0,%2 # atomic_dec\n\ in arch_atomic_dec()
159 " stwcx. %0,0,%2\n\ in arch_atomic_dec()
161 : "=&r" (t), "+m" (v->counter) in arch_atomic_dec()
162 : "r" (&v->counter) in arch_atomic_dec()
167 static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v) in arch_atomic_dec_return_relaxed() argument
172 "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n" in arch_atomic_dec_return_relaxed()
174 " stwcx. %0,0,%2\n" in arch_atomic_dec_return_relaxed()
176 : "=&r" (t), "+m" (v->counter) in arch_atomic_dec_return_relaxed()
177 : "r" (&v->counter) in arch_atomic_dec_return_relaxed()
186 #define arch_atomic_cmpxchg(v, o, n) \ argument
187 (arch_cmpxchg(&((v)->counter), (o), (n)))
188 #define arch_atomic_cmpxchg_relaxed(v, o, n) \ argument
189 arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
190 #define arch_atomic_cmpxchg_acquire(v, o, n) \ argument
191 arch_cmpxchg_acquire(&((v)->counter), (o), (n))
193 #define arch_atomic_xchg(v, new) \ argument
194 (arch_xchg(&((v)->counter), new))
195 #define arch_atomic_xchg_relaxed(v, new) \ argument
196 arch_xchg_relaxed(&((v)->counter), (new))
205 arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new) in arch_atomic_try_cmpxchg_lock() argument
210 "1: lwarx %0,0,%2,%5 # atomic_try_cmpxchg_acquire \n" in arch_atomic_try_cmpxchg_lock()
212 " bne- 2f \n" in arch_atomic_try_cmpxchg_lock()
213 " stwcx. %4,0,%2 \n" in arch_atomic_try_cmpxchg_lock()
216 "2: \n" in arch_atomic_try_cmpxchg_lock()
217 : "=&r" (r), "+m" (v->counter) in arch_atomic_try_cmpxchg_lock()
218 : "r" (&v->counter), "r" (o), "r" (new), "i" (IS_ENABLED(CONFIG_PPC64) ? 1 : 0) in arch_atomic_try_cmpxchg_lock()
228 * @v: pointer of type atomic_t
229 * @a: the amount to add to v...
230 * @u: ...unless v is equal to u.
232 * Atomically adds @a to @v, so long as it was not @u.
233 * Returns the old value of @v.
235 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) in arch_atomic_fetch_add_unless() argument
243 beq 2f \n\ in arch_atomic_fetch_add_unless()
244 add %0,%2,%0 \n" in arch_atomic_fetch_add_unless()
248 " subf %0,%2,%0 \n\ in arch_atomic_fetch_add_unless()
249 2:" in arch_atomic_fetch_add_unless()
251 : "r" (&v->counter), "r" (a), "r" (u) in arch_atomic_fetch_add_unless()
260 * @v: pointer of type atomic_t
262 * Atomically increments @v by 1, so long as @v is non-zero.
263 * Returns non-zero if @v was non-zero, and zero otherwise.
265 static __inline__ int arch_atomic_inc_not_zero(atomic_t *v) in arch_atomic_inc_not_zero() argument
271 "1: lwarx %0,0,%2 # atomic_inc_not_zero\n\ in arch_atomic_inc_not_zero()
273 beq- 2f\n\ in arch_atomic_inc_not_zero()
275 " stwcx. %1,0,%2\n\ in arch_atomic_inc_not_zero()
279 2:" in arch_atomic_inc_not_zero()
281 : "r" (&v->counter) in arch_atomic_inc_not_zero()
286 #define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v)) argument
289 * Atomically test *v and decrement if it is greater than 0.
290 * The function returns the old value of *v minus 1, even if
291 * the atomic variable, v, was not decremented.
293 static __inline__ int arch_atomic_dec_if_positive(atomic_t *v) in arch_atomic_dec_if_positive() argument
302 blt- 2f\n" in arch_atomic_dec_if_positive()
307 2:" : "=&b" (t) in arch_atomic_dec_if_positive()
308 : "r" (&v->counter) in arch_atomic_dec_if_positive()
319 static __inline__ s64 arch_atomic64_read(const atomic64_t *v) in arch_atomic64_read() argument
323 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); in arch_atomic64_read()
328 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i) in arch_atomic64_set() argument
330 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); in arch_atomic64_set()
334 static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
340 #asm_op " %0,%2,%0\n" \
343 : "=&r" (t), "+m" (v->counter) \
344 : "r" (a), "r" (&v->counter) \
350 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
356 #asm_op " %0,%2,%0\n" \
359 : "=&r" (t), "+m" (v->counter) \
360 : "r" (a), "r" (&v->counter) \
368 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
377 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
378 : "r" (a), "r" (&v->counter) \
416 static __inline__ void arch_atomic64_inc(atomic64_t *v) in ATOMIC64_OPS()
421 "1: ldarx %0,0,%2 # atomic64_inc\n\ in ATOMIC64_OPS()
423 stdcx. %0,0,%2 \n\ in ATOMIC64_OPS()
425 : "=&r" (t), "+m" (v->counter) in ATOMIC64_OPS()
426 : "r" (&v->counter) in ATOMIC64_OPS()
431 static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v) in arch_atomic64_inc_return_relaxed() argument
436 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n" in arch_atomic64_inc_return_relaxed()
438 " stdcx. %0,0,%2\n" in arch_atomic64_inc_return_relaxed()
440 : "=&r" (t), "+m" (v->counter) in arch_atomic64_inc_return_relaxed()
441 : "r" (&v->counter) in arch_atomic64_inc_return_relaxed()
447 static __inline__ void arch_atomic64_dec(atomic64_t *v) in arch_atomic64_dec() argument
452 "1: ldarx %0,0,%2 # atomic64_dec\n\ in arch_atomic64_dec()
454 stdcx. %0,0,%2\n\ in arch_atomic64_dec()
456 : "=&r" (t), "+m" (v->counter) in arch_atomic64_dec()
457 : "r" (&v->counter) in arch_atomic64_dec()
462 static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v) in arch_atomic64_dec_return_relaxed() argument
467 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n" in arch_atomic64_dec_return_relaxed()
469 " stdcx. %0,0,%2\n" in arch_atomic64_dec_return_relaxed()
471 : "=&r" (t), "+m" (v->counter) in arch_atomic64_dec_return_relaxed()
472 : "r" (&v->counter) in arch_atomic64_dec_return_relaxed()
482 * Atomically test *v and decrement if it is greater than 0.
483 * The function returns the old value of *v minus 1.
485 static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v) in arch_atomic64_dec_if_positive() argument
493 blt- 2f\n\ in arch_atomic64_dec_if_positive()
498 2:" : "=&r" (t) in arch_atomic64_dec_if_positive()
499 : "r" (&v->counter) in arch_atomic64_dec_if_positive()
506 #define arch_atomic64_cmpxchg(v, o, n) \ argument
507 (arch_cmpxchg(&((v)->counter), (o), (n)))
508 #define arch_atomic64_cmpxchg_relaxed(v, o, n) \ argument
509 arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
510 #define arch_atomic64_cmpxchg_acquire(v, o, n) \ argument
511 arch_cmpxchg_acquire(&((v)->counter), (o), (n))
513 #define arch_atomic64_xchg(v, new) \ argument
514 (arch_xchg(&((v)->counter), new))
515 #define arch_atomic64_xchg_relaxed(v, new) \ argument
516 arch_xchg_relaxed(&((v)->counter), (new))
520 * @v: pointer of type atomic64_t
521 * @a: the amount to add to v...
522 * @u: ...unless v is equal to u.
524 * Atomically adds @a to @v, so long as it was not @u.
525 * Returns the old value of @v.
527 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) in arch_atomic64_fetch_add_unless() argument
535 beq 2f \n\ in arch_atomic64_fetch_add_unless()
536 add %0,%2,%0 \n" in arch_atomic64_fetch_add_unless()
540 " subf %0,%2,%0 \n\ in arch_atomic64_fetch_add_unless()
541 2:" in arch_atomic64_fetch_add_unless()
543 : "r" (&v->counter), "r" (a), "r" (u) in arch_atomic64_fetch_add_unless()
552 * @v: pointer of type atomic64_t
554 * Atomically increments @v by 1, so long as @v is non-zero.
555 * Returns non-zero if @v was non-zero, and zero otherwise.
557 static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v) in arch_atomic64_inc_not_zero() argument
563 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\ in arch_atomic64_inc_not_zero()
565 beq- 2f\n\ in arch_atomic64_inc_not_zero()
567 stdcx. %1,0,%2\n\ in arch_atomic64_inc_not_zero()
571 2:" in arch_atomic64_inc_not_zero()
573 : "r" (&v->counter) in arch_atomic64_inc_not_zero()
578 #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v)) argument