Lines Matching +full:2 +full:v
26 static __inline__ int arch_atomic_read(const atomic_t *v) in arch_atomic_read() argument
30 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); in arch_atomic_read()
35 static __inline__ void arch_atomic_set(atomic_t *v, int i) in arch_atomic_set() argument
37 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); in arch_atomic_set()
41 static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
47 #asm_op "%I2" suffix " %0,%0,%2\n" \
50 : "=&r" (t), "+m" (v->counter) \
51 : "r"#sign (a), "r" (&v->counter) \
56 static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
62 #asm_op "%I2" suffix " %0,%0,%2\n" \
65 : "=&r" (t), "+m" (v->counter) \
66 : "r"#sign (a), "r" (&v->counter) \
73 static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
82 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
83 : "r"#sign (a), "r" (&v->counter) \
121 #define arch_atomic_cmpxchg(v, o, n) \ argument
122 (arch_cmpxchg(&((v)->counter), (o), (n)))
123 #define arch_atomic_cmpxchg_relaxed(v, o, n) \ argument
124 arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
125 #define arch_atomic_cmpxchg_acquire(v, o, n) \ argument
126 arch_cmpxchg_acquire(&((v)->counter), (o), (n))
128 #define arch_atomic_xchg(v, new) \ argument
129 (arch_xchg(&((v)->counter), new))
130 #define arch_atomic_xchg_relaxed(v, new) \ argument
131 arch_xchg_relaxed(&((v)->counter), (new))
140 arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new) in arch_atomic_try_cmpxchg_lock() argument
146 "1: lwarx %0,0,%2,%[eh] # atomic_try_cmpxchg_acquire \n" in arch_atomic_try_cmpxchg_lock()
148 " bne- 2f \n" in arch_atomic_try_cmpxchg_lock()
149 " stwcx. %4,0,%2 \n" in arch_atomic_try_cmpxchg_lock()
152 "2: \n" in arch_atomic_try_cmpxchg_lock()
153 : "=&r" (r), "+m" (v->counter) in arch_atomic_try_cmpxchg_lock()
154 : "r" (&v->counter), "r" (o), "r" (new), [eh] "n" (eh) in arch_atomic_try_cmpxchg_lock()
164 * @v: pointer of type atomic_t
165 * @a: the amount to add to v...
166 * @u: ...unless v is equal to u.
168 * Atomically adds @a to @v, so long as it was not @u.
169 * Returns the old value of @v.
171 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) in arch_atomic_fetch_add_unless() argument
179 beq 2f \n\ in arch_atomic_fetch_add_unless()
180 add%I2c %0,%0,%2 \n" in arch_atomic_fetch_add_unless()
184 " sub%I2c %0,%0,%2 \n\ in arch_atomic_fetch_add_unless()
185 2:" in arch_atomic_fetch_add_unless()
187 : "r" (&v->counter), "rI" (a), "r" (u) in arch_atomic_fetch_add_unless()
195 * Atomically test *v and decrement if it is greater than 0.
196 * The function returns the old value of *v minus 1, even if
197 * the atomic variable, v, was not decremented.
199 static __inline__ int arch_atomic_dec_if_positive(atomic_t *v) in arch_atomic_dec_if_positive() argument
208 blt- 2f\n" in arch_atomic_dec_if_positive()
213 2:" : "=&b" (t) in arch_atomic_dec_if_positive()
214 : "r" (&v->counter) in arch_atomic_dec_if_positive()
225 static __inline__ s64 arch_atomic64_read(const atomic64_t *v) in arch_atomic64_read() argument
229 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); in arch_atomic64_read()
234 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i) in arch_atomic64_set() argument
236 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); in arch_atomic64_set()
240 static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
246 #asm_op " %0,%2,%0\n" \
249 : "=&r" (t), "+m" (v->counter) \
250 : "r" (a), "r" (&v->counter) \
256 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
262 #asm_op " %0,%2,%0\n" \
265 : "=&r" (t), "+m" (v->counter) \
266 : "r" (a), "r" (&v->counter) \
274 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
283 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
284 : "r" (a), "r" (&v->counter) \
322 static __inline__ void arch_atomic64_inc(atomic64_t *v) in ATOMIC64_OPS()
327 "1: ldarx %0,0,%2 # atomic64_inc\n\ in ATOMIC64_OPS()
329 stdcx. %0,0,%2 \n\ in ATOMIC64_OPS()
331 : "=&r" (t), "+m" (v->counter) in ATOMIC64_OPS()
332 : "r" (&v->counter) in ATOMIC64_OPS()
337 static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v) in arch_atomic64_inc_return_relaxed() argument
342 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n" in arch_atomic64_inc_return_relaxed()
344 " stdcx. %0,0,%2\n" in arch_atomic64_inc_return_relaxed()
346 : "=&r" (t), "+m" (v->counter) in arch_atomic64_inc_return_relaxed()
347 : "r" (&v->counter) in arch_atomic64_inc_return_relaxed()
353 static __inline__ void arch_atomic64_dec(atomic64_t *v) in arch_atomic64_dec() argument
358 "1: ldarx %0,0,%2 # atomic64_dec\n\ in arch_atomic64_dec()
360 stdcx. %0,0,%2\n\ in arch_atomic64_dec()
362 : "=&r" (t), "+m" (v->counter) in arch_atomic64_dec()
363 : "r" (&v->counter) in arch_atomic64_dec()
368 static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v) in arch_atomic64_dec_return_relaxed() argument
373 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n" in arch_atomic64_dec_return_relaxed()
375 " stdcx. %0,0,%2\n" in arch_atomic64_dec_return_relaxed()
377 : "=&r" (t), "+m" (v->counter) in arch_atomic64_dec_return_relaxed()
378 : "r" (&v->counter) in arch_atomic64_dec_return_relaxed()
388 * Atomically test *v and decrement if it is greater than 0.
389 * The function returns the old value of *v minus 1.
391 static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v) in arch_atomic64_dec_if_positive() argument
399 blt- 2f\n\ in arch_atomic64_dec_if_positive()
404 2:" : "=&r" (t) in arch_atomic64_dec_if_positive()
405 : "r" (&v->counter) in arch_atomic64_dec_if_positive()
412 #define arch_atomic64_cmpxchg(v, o, n) \ argument
413 (arch_cmpxchg(&((v)->counter), (o), (n)))
414 #define arch_atomic64_cmpxchg_relaxed(v, o, n) \ argument
415 arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
416 #define arch_atomic64_cmpxchg_acquire(v, o, n) \ argument
417 arch_cmpxchg_acquire(&((v)->counter), (o), (n))
419 #define arch_atomic64_xchg(v, new) \ argument
420 (arch_xchg(&((v)->counter), new))
421 #define arch_atomic64_xchg_relaxed(v, new) \ argument
422 arch_xchg_relaxed(&((v)->counter), (new))
426 * @v: pointer of type atomic64_t
427 * @a: the amount to add to v...
428 * @u: ...unless v is equal to u.
430 * Atomically adds @a to @v, so long as it was not @u.
431 * Returns the old value of @v.
433 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) in arch_atomic64_fetch_add_unless() argument
441 beq 2f \n\ in arch_atomic64_fetch_add_unless()
442 add %0,%2,%0 \n" in arch_atomic64_fetch_add_unless()
446 " subf %0,%2,%0 \n\ in arch_atomic64_fetch_add_unless()
447 2:" in arch_atomic64_fetch_add_unless()
449 : "r" (&v->counter), "r" (a), "r" (u) in arch_atomic64_fetch_add_unless()
458 * @v: pointer of type atomic64_t
460 * Atomically increments @v by 1, so long as @v is non-zero.
461 * Returns non-zero if @v was non-zero, and zero otherwise.
463 static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v) in arch_atomic64_inc_not_zero() argument
469 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\ in arch_atomic64_inc_not_zero()
471 beq- 2f\n\ in arch_atomic64_inc_not_zero()
473 stdcx. %1,0,%2\n\ in arch_atomic64_inc_not_zero()
477 2:" in arch_atomic64_inc_not_zero()
479 : "r" (&v->counter) in arch_atomic64_inc_not_zero()
484 #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v)) argument