Lines Matching +full:1 +full:v

25 static __inline__ int atomic_read(const atomic_t *v)  in atomic_read()  argument
29 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); in atomic_read()
34 static __inline__ void atomic_set(atomic_t *v, int i) in atomic_set() argument
36 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); in atomic_set()
40 static __inline__ void atomic_##op(int a, atomic_t *v) \
45 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
48 " bne- 1b\n" \
49 : "=&r" (t), "+m" (v->counter) \
50 : "r" (a), "r" (&v->counter) \
55 static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
60 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
63 " bne- 1b\n" \
64 : "=&r" (t), "+m" (v->counter) \
65 : "r" (a), "r" (&v->counter) \
72 static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
77 "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
78 #asm_op " %1,%3,%0\n" \
79 " stwcx. %1,0,%4\n" \
80 " bne- 1b\n" \
81 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
82 : "r" (a), "r" (&v->counter) \
120 static __inline__ void atomic_inc(atomic_t *v) in ATOMIC_OPS()
125 "1: lwarx %0,0,%2 # atomic_inc\n\ in ATOMIC_OPS()
126 addic %0,%0,1\n" in ATOMIC_OPS()
128 bne- 1b" in ATOMIC_OPS()
129 : "=&r" (t), "+m" (v->counter) in ATOMIC_OPS()
130 : "r" (&v->counter) in ATOMIC_OPS()
135 static __inline__ int atomic_inc_return_relaxed(atomic_t *v) in atomic_inc_return_relaxed() argument
140 "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n" in atomic_inc_return_relaxed()
141 " addic %0,%0,1\n" in atomic_inc_return_relaxed()
143 " bne- 1b" in atomic_inc_return_relaxed()
144 : "=&r" (t), "+m" (v->counter) in atomic_inc_return_relaxed()
145 : "r" (&v->counter) in atomic_inc_return_relaxed()
151 static __inline__ void atomic_dec(atomic_t *v) in atomic_dec() argument
156 "1: lwarx %0,0,%2 # atomic_dec\n\ in atomic_dec()
157 addic %0,%0,-1\n" in atomic_dec()
159 bne- 1b" in atomic_dec()
160 : "=&r" (t), "+m" (v->counter) in atomic_dec()
161 : "r" (&v->counter) in atomic_dec()
166 static __inline__ int atomic_dec_return_relaxed(atomic_t *v) in atomic_dec_return_relaxed() argument
171 "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n" in atomic_dec_return_relaxed()
172 " addic %0,%0,-1\n" in atomic_dec_return_relaxed()
174 " bne- 1b" in atomic_dec_return_relaxed()
175 : "=&r" (t), "+m" (v->counter) in atomic_dec_return_relaxed()
176 : "r" (&v->counter) in atomic_dec_return_relaxed()
185 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) argument
186 #define atomic_cmpxchg_relaxed(v, o, n) \ argument
187 cmpxchg_relaxed(&((v)->counter), (o), (n))
188 #define atomic_cmpxchg_acquire(v, o, n) \ argument
189 cmpxchg_acquire(&((v)->counter), (o), (n))
191 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) argument
192 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) argument
201 atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new) in atomic_try_cmpxchg_lock() argument
206 "1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_acquire \n" in atomic_try_cmpxchg_lock()
210 " bne- 1b \n" in atomic_try_cmpxchg_lock()
213 : "=&r" (r), "+m" (v->counter) in atomic_try_cmpxchg_lock()
214 : "r" (&v->counter), "r" (o), "r" (new) in atomic_try_cmpxchg_lock()
224 * @v: pointer of type atomic_t
225 * @a: the amount to add to v...
226 * @u: ...unless v is equal to u.
228 * Atomically adds @a to @v, so long as it was not @u.
229 * Returns the old value of @v.
231 static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) in atomic_fetch_add_unless() argument
237 "1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\ in atomic_fetch_add_unless()
241 " stwcx. %0,0,%1 \n\ in atomic_fetch_add_unless()
242 bne- 1b \n" in atomic_fetch_add_unless()
247 : "r" (&v->counter), "r" (a), "r" (u) in atomic_fetch_add_unless()
256 * @v: pointer of type atomic_t
258 * Atomically increments @v by 1, so long as @v is non-zero.
259 * Returns non-zero if @v was non-zero, and zero otherwise.
261 static __inline__ int atomic_inc_not_zero(atomic_t *v) in atomic_inc_not_zero() argument
267 "1: lwarx %0,0,%2 # atomic_inc_not_zero\n\ in atomic_inc_not_zero()
270 addic %1,%0,1\n" in atomic_inc_not_zero()
271 " stwcx. %1,0,%2\n\ in atomic_inc_not_zero()
272 bne- 1b\n" in atomic_inc_not_zero()
277 : "r" (&v->counter) in atomic_inc_not_zero()
282 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v)) argument
285 * Atomically test *v and decrement if it is greater than 0.
286 * The function returns the old value of *v minus 1, even if
287 * the atomic variable, v, was not decremented.
289 static __inline__ int atomic_dec_if_positive(atomic_t *v) in atomic_dec_if_positive() argument
295 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ in atomic_dec_if_positive()
296 cmpwi %0,1\n\ in atomic_dec_if_positive()
297 addi %0,%0,-1\n\ in atomic_dec_if_positive()
299 " stwcx. %0,0,%1\n\ in atomic_dec_if_positive()
300 bne- 1b" in atomic_dec_if_positive()
304 : "r" (&v->counter) in atomic_dec_if_positive()
315 static __inline__ s64 atomic64_read(const atomic64_t *v) in atomic64_read() argument
319 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); in atomic64_read()
324 static __inline__ void atomic64_set(atomic64_t *v, s64 i) in atomic64_set() argument
326 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); in atomic64_set()
330 static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
335 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
338 " bne- 1b\n" \
339 : "=&r" (t), "+m" (v->counter) \
340 : "r" (a), "r" (&v->counter) \
346 atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
351 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
354 " bne- 1b\n" \
355 : "=&r" (t), "+m" (v->counter) \
356 : "r" (a), "r" (&v->counter) \
364 atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
369 "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
370 #asm_op " %1,%3,%0\n" \
371 " stdcx. %1,0,%4\n" \
372 " bne- 1b\n" \
373 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
374 : "r" (a), "r" (&v->counter) \
412 static __inline__ void atomic64_inc(atomic64_t *v) in ATOMIC64_OPS()
417 "1: ldarx %0,0,%2 # atomic64_inc\n\ in ATOMIC64_OPS()
418 addic %0,%0,1\n\ in ATOMIC64_OPS()
420 bne- 1b" in ATOMIC64_OPS()
421 : "=&r" (t), "+m" (v->counter) in ATOMIC64_OPS()
422 : "r" (&v->counter) in ATOMIC64_OPS()
427 static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v) in atomic64_inc_return_relaxed() argument
432 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n" in atomic64_inc_return_relaxed()
433 " addic %0,%0,1\n" in atomic64_inc_return_relaxed()
435 " bne- 1b" in atomic64_inc_return_relaxed()
436 : "=&r" (t), "+m" (v->counter) in atomic64_inc_return_relaxed()
437 : "r" (&v->counter) in atomic64_inc_return_relaxed()
443 static __inline__ void atomic64_dec(atomic64_t *v) in atomic64_dec() argument
448 "1: ldarx %0,0,%2 # atomic64_dec\n\ in atomic64_dec()
449 addic %0,%0,-1\n\ in atomic64_dec()
451 bne- 1b" in atomic64_dec()
452 : "=&r" (t), "+m" (v->counter) in atomic64_dec()
453 : "r" (&v->counter) in atomic64_dec()
458 static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v) in atomic64_dec_return_relaxed() argument
463 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n" in atomic64_dec_return_relaxed()
464 " addic %0,%0,-1\n" in atomic64_dec_return_relaxed()
466 " bne- 1b" in atomic64_dec_return_relaxed()
467 : "=&r" (t), "+m" (v->counter) in atomic64_dec_return_relaxed()
468 : "r" (&v->counter) in atomic64_dec_return_relaxed()
478 * Atomically test *v and decrement if it is greater than 0.
479 * The function returns the old value of *v minus 1.
481 static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v) in atomic64_dec_if_positive() argument
487 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ in atomic64_dec_if_positive()
488 addic. %0,%0,-1\n\ in atomic64_dec_if_positive()
490 stdcx. %0,0,%1\n\ in atomic64_dec_if_positive()
491 bne- 1b" in atomic64_dec_if_positive()
495 : "r" (&v->counter) in atomic64_dec_if_positive()
502 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) argument
503 #define atomic64_cmpxchg_relaxed(v, o, n) \ argument
504 cmpxchg_relaxed(&((v)->counter), (o), (n))
505 #define atomic64_cmpxchg_acquire(v, o, n) \ argument
506 cmpxchg_acquire(&((v)->counter), (o), (n))
508 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) argument
509 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) argument
513 * @v: pointer of type atomic64_t
514 * @a: the amount to add to v...
515 * @u: ...unless v is equal to u.
517 * Atomically adds @a to @v, so long as it was not @u.
518 * Returns the old value of @v.
520 static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) in atomic64_fetch_add_unless() argument
526 "1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\ in atomic64_fetch_add_unless()
530 " stdcx. %0,0,%1 \n\ in atomic64_fetch_add_unless()
531 bne- 1b \n" in atomic64_fetch_add_unless()
536 : "r" (&v->counter), "r" (a), "r" (u) in atomic64_fetch_add_unless()
545 * @v: pointer of type atomic64_t
547 * Atomically increments @v by 1, so long as @v is non-zero.
548 * Returns non-zero if @v was non-zero, and zero otherwise.
550 static __inline__ int atomic64_inc_not_zero(atomic64_t *v) in atomic64_inc_not_zero() argument
556 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\ in atomic64_inc_not_zero()
559 addic %1,%0,1\n\ in atomic64_inc_not_zero()
560 stdcx. %1,0,%2\n\ in atomic64_inc_not_zero()
561 bne- 1b\n" in atomic64_inc_not_zero()
566 : "r" (&v->counter) in atomic64_inc_not_zero()
571 #define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v)) argument