Lines Matching full:ref
11 * don't try to detect the ref hitting 0 - which means that get/put can just
22 * the ref hitting 0 on every put - this would require global synchronization
26 * the ref can't hit 0 before the user drops the initial ref, so as long as we
27 * convert to non percpu mode before the initial ref is dropped everything
40 static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) in percpu_count_ptr() argument
43 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); in percpu_count_ptr()
48 * @ref: percpu_ref to initialize
53 * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
54 * refcount of 1; analagous to atomic_long_set(ref, 1). See the
60 int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, in percpu_ref_init() argument
67 ref->percpu_count_ptr = (unsigned long) in percpu_ref_init()
69 if (!ref->percpu_count_ptr) in percpu_ref_init()
72 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; in percpu_ref_init()
73 ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT; in percpu_ref_init()
76 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; in percpu_ref_init()
77 ref->allow_reinit = true; in percpu_ref_init()
83 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; in percpu_ref_init()
87 atomic_long_set(&ref->count, start_count); in percpu_ref_init()
89 ref->release = release; in percpu_ref_init()
90 ref->confirm_switch = NULL; in percpu_ref_init()
97 * @ref: percpu_ref to exit
99 * This function exits @ref. The caller is responsible for ensuring that
100 * @ref is no longer in active use. The usual places to invoke this
101 * function from are the @ref->release() callback or in init failure path
105 void percpu_ref_exit(struct percpu_ref *ref) in percpu_ref_exit() argument
107 unsigned long __percpu *percpu_count = percpu_count_ptr(ref); in percpu_ref_exit()
111 WARN_ON_ONCE(ref->confirm_switch); in percpu_ref_exit()
113 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; in percpu_ref_exit()
120 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); in percpu_ref_call_confirm_rcu() local
122 ref->confirm_switch(ref); in percpu_ref_call_confirm_rcu()
123 ref->confirm_switch = NULL; in percpu_ref_call_confirm_rcu()
126 if (!ref->allow_reinit) in percpu_ref_call_confirm_rcu()
127 percpu_ref_exit(ref); in percpu_ref_call_confirm_rcu()
129 /* drop ref from percpu_ref_switch_to_atomic() */ in percpu_ref_call_confirm_rcu()
130 percpu_ref_put(ref); in percpu_ref_call_confirm_rcu()
135 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); in percpu_ref_switch_to_atomic_rcu() local
136 unsigned long __percpu *percpu_count = percpu_count_ptr(ref); in percpu_ref_switch_to_atomic_rcu()
144 atomic_long_read(&ref->count), (long)count); in percpu_ref_switch_to_atomic_rcu()
148 * to &ref->count; since gets could be happening on one cpu while puts in percpu_ref_switch_to_atomic_rcu()
150 * @ref->count to hit 0 before we've got a consistent value - but the in percpu_ref_switch_to_atomic_rcu()
154 * &ref->count; we need the bias value to prevent &ref->count from in percpu_ref_switch_to_atomic_rcu()
158 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count); in percpu_ref_switch_to_atomic_rcu()
160 WARN_ONCE(atomic_long_read(&ref->count) <= 0, in percpu_ref_switch_to_atomic_rcu()
161 "percpu ref (%ps) <= 0 (%ld) after switching to atomic", in percpu_ref_switch_to_atomic_rcu()
162 ref->release, atomic_long_read(&ref->count)); in percpu_ref_switch_to_atomic_rcu()
164 /* @ref is viewed as dead on all CPUs, send out switch confirmation */ in percpu_ref_switch_to_atomic_rcu()
168 static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref) in percpu_ref_noop_confirm_switch() argument
172 static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, in __percpu_ref_switch_to_atomic() argument
175 if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) { in __percpu_ref_switch_to_atomic()
177 confirm_switch(ref); in __percpu_ref_switch_to_atomic()
182 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; in __percpu_ref_switch_to_atomic()
188 ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; in __percpu_ref_switch_to_atomic()
190 percpu_ref_get(ref); /* put after confirmation */ in __percpu_ref_switch_to_atomic()
191 call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu); in __percpu_ref_switch_to_atomic()
194 static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) in __percpu_ref_switch_to_percpu() argument
196 unsigned long __percpu *percpu_count = percpu_count_ptr(ref); in __percpu_ref_switch_to_percpu()
201 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) in __percpu_ref_switch_to_percpu()
204 if (WARN_ON_ONCE(!ref->allow_reinit)) in __percpu_ref_switch_to_percpu()
207 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); in __percpu_ref_switch_to_percpu()
218 smp_store_release(&ref->percpu_count_ptr, in __percpu_ref_switch_to_percpu()
219 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); in __percpu_ref_switch_to_percpu()
222 static void __percpu_ref_switch_mode(struct percpu_ref *ref, in __percpu_ref_switch_mode() argument
232 wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch, in __percpu_ref_switch_mode()
235 if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD)) in __percpu_ref_switch_mode()
236 __percpu_ref_switch_to_atomic(ref, confirm_switch); in __percpu_ref_switch_mode()
238 __percpu_ref_switch_to_percpu(ref); in __percpu_ref_switch_mode()
243 * @ref: percpu_ref to switch to atomic mode
249 * Schedule switching of @ref to atomic mode. All its percpu counts will
254 * operations. Note that @ref will stay in atomic mode across kill/reinit
257 * This function may block if @ref is in the process of switching to atomic
258 * mode. If the caller ensures that @ref is not in the process of
261 void percpu_ref_switch_to_atomic(struct percpu_ref *ref, in percpu_ref_switch_to_atomic() argument
268 ref->force_atomic = true; in percpu_ref_switch_to_atomic()
269 __percpu_ref_switch_mode(ref, confirm_switch); in percpu_ref_switch_to_atomic()
277 * @ref: percpu_ref to switch to atomic mode
279 * Schedule switching the ref to atomic mode, and wait for the
283 void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref) in percpu_ref_switch_to_atomic_sync() argument
285 percpu_ref_switch_to_atomic(ref, NULL); in percpu_ref_switch_to_atomic_sync()
286 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); in percpu_ref_switch_to_atomic_sync()
292 * @ref: percpu_ref to switch to percpu mode
295 * To re-use an expired ref, use percpu_ref_reinit().
297 * Switch @ref to percpu mode. This function may be invoked concurrently
300 * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
304 * This function may block if @ref is in the process of switching to atomic
305 * mode. If the caller ensures that @ref is not in the process of
308 void percpu_ref_switch_to_percpu(struct percpu_ref *ref) in percpu_ref_switch_to_percpu() argument
314 ref->force_atomic = false; in percpu_ref_switch_to_percpu()
315 __percpu_ref_switch_mode(ref, NULL); in percpu_ref_switch_to_percpu()
322 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
323 * @ref: percpu_ref to kill
328 * called after @ref is seen as dead from all CPUs at which point all
333 * but it may block if @confirm_kill is specified and @ref is in the
338 void percpu_ref_kill_and_confirm(struct percpu_ref *ref, in percpu_ref_kill_and_confirm() argument
345 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, in percpu_ref_kill_and_confirm()
346 "%s called more than once on %ps!", __func__, ref->release); in percpu_ref_kill_and_confirm()
348 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; in percpu_ref_kill_and_confirm()
349 __percpu_ref_switch_mode(ref, confirm_kill); in percpu_ref_kill_and_confirm()
350 percpu_ref_put(ref); in percpu_ref_kill_and_confirm()
358 * @ref: perpcu_ref to re-initialize
360 * Re-initialize @ref so that it's in the same state as when it finished
361 * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
364 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
367 void percpu_ref_reinit(struct percpu_ref *ref) in percpu_ref_reinit() argument
369 WARN_ON_ONCE(!percpu_ref_is_zero(ref)); in percpu_ref_reinit()
371 percpu_ref_resurrect(ref); in percpu_ref_reinit()
377 * @ref: perpcu_ref to resurrect
379 * Modify @ref so that it's in the same state as before percpu_ref_kill() was
380 * called. @ref must be dead but must not yet have exited.
382 * If @ref->release() frees @ref then the caller is responsible for
383 * guaranteeing that @ref->release() does not get called while this
386 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
389 void percpu_ref_resurrect(struct percpu_ref *ref) in percpu_ref_resurrect() argument
396 WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)); in percpu_ref_resurrect()
397 WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count)); in percpu_ref_resurrect()
399 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; in percpu_ref_resurrect()
400 percpu_ref_get(ref); in percpu_ref_resurrect()
401 __percpu_ref_switch_mode(ref, NULL); in percpu_ref_resurrect()