Lines Matching +full:2 +full:v
29 #define arch_atomic_read(v) READ_ONCE((v)->counter) argument
30 #define arch_atomic64_read(v) READ_ONCE((v)->counter) argument
32 #define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) argument
33 #define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i)) argument
42 static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
47 " " #asm_op " %0,%2,%0\n" \
49 " beq %0,2f\n" \
50 ".subsection 2\n" \
51 "2: br 1b\n" \
53 :"=&r" (temp), "=m" (v->counter) \
54 :"Ir" (i), "m" (v->counter)); \
58 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
63 " " #asm_op " %0,%3,%2\n" \
66 " beq %0,2f\n" \
67 ".subsection 2\n" \
68 "2: br 1b\n" \
70 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
71 :"Ir" (i), "m" (v->counter) : "memory"); \
77 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
81 "1: ldl_l %2,%1\n" \
82 " " #asm_op " %2,%3,%0\n" \
84 " beq %0,2f\n" \
85 ".subsection 2\n" \
86 "2: br 1b\n" \
88 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
89 :"Ir" (i), "m" (v->counter) : "memory"); \
95 static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
100 " " #asm_op " %0,%2,%0\n" \
102 " beq %0,2f\n" \
103 ".subsection 2\n" \
104 "2: br 1b\n" \
106 :"=&r" (temp), "=m" (v->counter) \
107 :"Ir" (i), "m" (v->counter)); \
112 arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
117 " " #asm_op " %0,%3,%2\n" \
120 " beq %0,2f\n" \
121 ".subsection 2\n" \
122 "2: br 1b\n" \
124 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
125 :"Ir" (i), "m" (v->counter) : "memory"); \
132 arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
136 "1: ldq_l %2,%1\n" \
137 " " #asm_op " %2,%3,%0\n" \
139 " beq %0,2f\n" \
140 ".subsection 2\n" \
141 "2: br 1b\n" \
143 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
144 :"Ir" (i), "m" (v->counter) : "memory"); \
203 #define arch_atomic64_cmpxchg(v, old, new) \ in ATOMIC_OPS() argument
204 (arch_cmpxchg(&((v)->counter), old, new)) in ATOMIC_OPS()
205 #define arch_atomic64_xchg(v, new) \ in ATOMIC_OPS() argument
206 (arch_xchg(&((v)->counter), new)) in ATOMIC_OPS()
208 #define arch_atomic_cmpxchg(v, old, new) \ in ATOMIC_OPS() argument
209 (arch_cmpxchg(&((v)->counter), old, new)) in ATOMIC_OPS()
210 #define arch_atomic_xchg(v, new) \ in ATOMIC_OPS() argument
211 (arch_xchg(&((v)->counter), new)) in ATOMIC_OPS()
215 * @v: pointer of type atomic_t in ATOMIC_OPS()
216 * @a: the amount to add to v... in ATOMIC_OPS()
217 * @u: ...unless v is equal to u. in ATOMIC_OPS()
219 * Atomically adds @a to @v, so long as it was not @u. in ATOMIC_OPS()
220 * Returns the old value of @v. in ATOMIC_OPS()
222 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) in ATOMIC_OPS()
230 " bne %[c],2f\n" in ATOMIC_OPS()
233 "2:\n" in ATOMIC_OPS()
234 ".subsection 2\n" in ATOMIC_OPS()
238 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u) in ATOMIC_OPS()
247 * @v: pointer of type atomic64_t
248 * @a: the amount to add to v...
249 * @u: ...unless v is equal to u.
251 * Atomically adds @a to @v, so long as it was not @u.
252 * Returns the old value of @v.
254 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) in arch_atomic64_fetch_add_unless() argument
262 " bne %[c],2f\n" in arch_atomic64_fetch_add_unless()
265 "2:\n" in arch_atomic64_fetch_add_unless()
266 ".subsection 2\n" in arch_atomic64_fetch_add_unless()
270 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u) in arch_atomic64_fetch_add_unless()
279 * @v: pointer of type atomic_t
281 * The function returns the old value of *v minus 1, even if
282 * the atomic variable, v, was not decremented.
284 static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) in arch_atomic64_dec_if_positive() argument
291 " ble %[old],2f\n" in arch_atomic64_dec_if_positive()
294 "2:\n" in arch_atomic64_dec_if_positive()
295 ".subsection 2\n" in arch_atomic64_dec_if_positive()
299 : [mem] "m"(*v) in arch_atomic64_dec_if_positive()