Lines Matching +full:i +full:- +full:leak +full:- +full:current
1 // SPDX-License-Identifier: GPL-2.0+
3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
11 * For detailed explanation of Read-Copy Update mechanism see -
32 /* Holdoff in nanoseconds for auto-expediting. */
37 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
41 /* Early-boot callback-management, so early that no lock is required! */
81 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
86 int i; in init_srcu_struct_nodes() local
97 ssp->level[0] = &ssp->node[0]; in init_srcu_struct_nodes()
98 for (i = 1; i < rcu_num_lvls; i++) in init_srcu_struct_nodes()
99 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; in init_srcu_struct_nodes()
105 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != in init_srcu_struct_nodes()
106 ARRAY_SIZE(snp->srcu_data_have_cbs)); in init_srcu_struct_nodes()
107 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { in init_srcu_struct_nodes()
108 snp->srcu_have_cbs[i] = 0; in init_srcu_struct_nodes()
109 snp->srcu_data_have_cbs[i] = 0; in init_srcu_struct_nodes()
111 snp->srcu_gp_seq_needed_exp = 0; in init_srcu_struct_nodes()
112 snp->grplo = -1; in init_srcu_struct_nodes()
113 snp->grphi = -1; in init_srcu_struct_nodes()
114 if (snp == &ssp->node[0]) { in init_srcu_struct_nodes()
116 snp->srcu_parent = NULL; in init_srcu_struct_nodes()
120 /* Non-root node. */ in init_srcu_struct_nodes()
121 if (snp == ssp->level[level + 1]) in init_srcu_struct_nodes()
123 snp->srcu_parent = ssp->level[level - 1] + in init_srcu_struct_nodes()
124 (snp - ssp->level[level]) / in init_srcu_struct_nodes()
125 levelspread[level - 1]; in init_srcu_struct_nodes()
129 * Initialize the per-CPU srcu_data array, which feeds into the in init_srcu_struct_nodes()
132 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != in init_srcu_struct_nodes()
133 ARRAY_SIZE(sdp->srcu_unlock_count)); in init_srcu_struct_nodes()
134 level = rcu_num_lvls - 1; in init_srcu_struct_nodes()
135 snp_first = ssp->level[level]; in init_srcu_struct_nodes()
137 sdp = per_cpu_ptr(ssp->sda, cpu); in init_srcu_struct_nodes()
139 rcu_segcblist_init(&sdp->srcu_cblist); in init_srcu_struct_nodes()
140 sdp->srcu_cblist_invoking = false; in init_srcu_struct_nodes()
141 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; in init_srcu_struct_nodes()
142 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; in init_srcu_struct_nodes()
143 sdp->mynode = &snp_first[cpu / levelspread[level]]; in init_srcu_struct_nodes()
144 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { in init_srcu_struct_nodes()
145 if (snp->grplo < 0) in init_srcu_struct_nodes()
146 snp->grplo = cpu; in init_srcu_struct_nodes()
147 snp->grphi = cpu; in init_srcu_struct_nodes()
149 sdp->cpu = cpu; in init_srcu_struct_nodes()
150 INIT_WORK(&sdp->work, srcu_invoke_callbacks); in init_srcu_struct_nodes()
151 timer_setup(&sdp->delay_work, srcu_delay_timer, 0); in init_srcu_struct_nodes()
152 sdp->ssp = ssp; in init_srcu_struct_nodes()
153 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); in init_srcu_struct_nodes()
158 * Initialize non-compile-time initialized fields, including the
161 * also tells us that ->sda has already been wired up to srcu_data.
165 mutex_init(&ssp->srcu_cb_mutex); in init_srcu_struct_fields()
166 mutex_init(&ssp->srcu_gp_mutex); in init_srcu_struct_fields()
167 ssp->srcu_idx = 0; in init_srcu_struct_fields()
168 ssp->srcu_gp_seq = 0; in init_srcu_struct_fields()
169 ssp->srcu_barrier_seq = 0; in init_srcu_struct_fields()
170 mutex_init(&ssp->srcu_barrier_mutex); in init_srcu_struct_fields()
171 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); in init_srcu_struct_fields()
172 INIT_DELAYED_WORK(&ssp->work, process_srcu); in init_srcu_struct_fields()
174 ssp->sda = alloc_percpu(struct srcu_data); in init_srcu_struct_fields()
175 if (!ssp->sda) in init_srcu_struct_fields()
176 return -ENOMEM; in init_srcu_struct_fields()
178 ssp->srcu_gp_seq_needed_exp = 0; in init_srcu_struct_fields()
179 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); in init_srcu_struct_fields()
180 smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ in init_srcu_struct_fields()
189 /* Don't re-initialize a lock while it is held. */ in __init_srcu_struct()
191 lockdep_init_map(&ssp->dep_map, name, key, 0); in __init_srcu_struct()
200 * init_srcu_struct - initialize a sleep-RCU structure
217 * First-use initialization of statically allocated srcu_struct
219 * done with compile-time initialization, so this check is added
220 * to each update-side SRCU primitive. Use ssp->lock, which -is-
221 * compile-time initialized, to resolve races involving multiple
222 * CPUs trying to garner first-use privileges.
229 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ in check_init_srcu_struct()
232 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { in check_init_srcu_struct()
241 * Returns approximate total of the readers' ->srcu_lock_count[] values
242 * for the rank of per-CPU counters specified by idx.
250 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_lock_idx()
252 sum += READ_ONCE(cpuc->srcu_lock_count[idx]); in srcu_readers_lock_idx()
258 * Returns approximate total of the readers' ->srcu_unlock_count[] values
259 * for the rank of per-CPU counters specified by idx.
267 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_unlock_idx()
269 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); in srcu_readers_unlock_idx()
275 * Return true if the number of pre-existing readers is determined to
301 * the current index but not have incremented the lock counter yet. in srcu_readers_active_idx_check()
304 * that more than ULONG_MAX non-nested readers come and go in in srcu_readers_active_idx_check()
307 * above, then that reader's next load of ->srcu_idx is guaranteed in srcu_readers_active_idx_check()
311 * of 2*NR_CPUS increments, which cannot overflow given current in srcu_readers_active_idx_check()
312 * systems, especially not on 64-bit systems. in srcu_readers_active_idx_check()
316 * especially on 64-bit systems. in srcu_readers_active_idx_check()
322 * srcu_readers_active - returns true if there are readers. and false
336 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_active()
338 sum += READ_ONCE(cpuc->srcu_lock_count[0]); in srcu_readers_active()
339 sum += READ_ONCE(cpuc->srcu_lock_count[1]); in srcu_readers_active()
340 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); in srcu_readers_active()
341 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); in srcu_readers_active()
349 * Return grace-period delay, zero if there are expedited grace
354 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), in srcu_get_delay()
355 READ_ONCE(ssp->srcu_gp_seq_needed_exp))) in srcu_get_delay()
361 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
365 * was initialized via init_srcu_struct(), else you leak memory.
372 return; /* Just leak it! */ in cleanup_srcu_struct()
374 return; /* Just leak it! */ in cleanup_srcu_struct()
375 flush_delayed_work(&ssp->work); in cleanup_srcu_struct()
377 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); in cleanup_srcu_struct()
379 del_timer_sync(&sdp->delay_work); in cleanup_srcu_struct()
380 flush_work(&sdp->work); in cleanup_srcu_struct()
381 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) in cleanup_srcu_struct()
382 return; /* Forgot srcu_barrier(), so just leak it! */ in cleanup_srcu_struct()
384 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || in cleanup_srcu_struct()
387 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))); in cleanup_srcu_struct()
390 free_percpu(ssp->sda); in cleanup_srcu_struct()
391 ssp->sda = NULL; in cleanup_srcu_struct()
396 * Counts the new reader in the appropriate per-CPU element of the
404 idx = READ_ONCE(ssp->srcu_idx) & 0x1; in __srcu_read_lock()
405 this_cpu_inc(ssp->sda->srcu_lock_count[idx]); in __srcu_read_lock()
412 * Removes the count for the old reader from the appropriate per-CPU
419 this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); in __srcu_read_unlock()
426 * (defined below) to allow SRCU readers to exit their read-side critical
428 * we repeatedly block for 1-millisecond time periods.
437 struct srcu_data *sdp = this_cpu_ptr(ssp->sda); in srcu_gp_start()
441 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); in srcu_gp_start()
443 rcu_segcblist_advance(&sdp->srcu_cblist, in srcu_gp_start()
444 rcu_seq_current(&ssp->srcu_gp_seq)); in srcu_gp_start()
445 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, in srcu_gp_start()
446 rcu_seq_snap(&ssp->srcu_gp_seq)); in srcu_gp_start()
448 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ in srcu_gp_start()
449 rcu_seq_start(&ssp->srcu_gp_seq); in srcu_gp_start()
450 state = rcu_seq_state(ssp->srcu_gp_seq); in srcu_gp_start()
459 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); in srcu_delay_timer()
466 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); in srcu_queue_delayed_work_on()
470 timer_reduce(&sdp->delay_work, jiffies + delay); in srcu_queue_delayed_work_on()
485 * just-completed grace period, the one corresponding to idx. If possible,
493 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { in srcu_schedule_cbs_snp()
494 if (!(mask & (1 << (cpu - snp->grplo)))) in srcu_schedule_cbs_snp()
496 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); in srcu_schedule_cbs_snp()
504 * The ->srcu_cb_mutex acquisition does not protect any data, but
506 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
523 mutex_lock(&ssp->srcu_cb_mutex); in srcu_gp_end()
525 /* End the current grace period. */ in srcu_gp_end()
527 idx = rcu_seq_state(ssp->srcu_gp_seq); in srcu_gp_end()
530 WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns()); in srcu_gp_end()
531 rcu_seq_end(&ssp->srcu_gp_seq); in srcu_gp_end()
532 gpseq = rcu_seq_current(&ssp->srcu_gp_seq); in srcu_gp_end()
533 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) in srcu_gp_end()
534 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq); in srcu_gp_end()
536 mutex_unlock(&ssp->srcu_gp_mutex); in srcu_gp_end()
540 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); in srcu_gp_end()
544 last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; in srcu_gp_end()
546 cbs = snp->srcu_have_cbs[idx] == gpseq; in srcu_gp_end()
547 snp->srcu_have_cbs[idx] = gpseq; in srcu_gp_end()
548 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); in srcu_gp_end()
549 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) in srcu_gp_end()
550 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq); in srcu_gp_end()
551 mask = snp->srcu_data_have_cbs[idx]; in srcu_gp_end()
552 snp->srcu_data_have_cbs[idx] = 0; in srcu_gp_end()
559 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { in srcu_gp_end()
560 sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_gp_end()
563 sdp->srcu_gp_seq_needed + 100)) in srcu_gp_end()
564 sdp->srcu_gp_seq_needed = gpseq; in srcu_gp_end()
566 sdp->srcu_gp_seq_needed_exp + 100)) in srcu_gp_end()
567 sdp->srcu_gp_seq_needed_exp = gpseq; in srcu_gp_end()
573 mutex_unlock(&ssp->srcu_cb_mutex); in srcu_gp_end()
577 gpseq = rcu_seq_current(&ssp->srcu_gp_seq); in srcu_gp_end()
579 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { in srcu_gp_end()
589 * Funnel-locking scheme to scalably mediate many concurrent expedited
590 * grace-period requests. This function is invoked for the first known
600 for (; snp != NULL; snp = snp->srcu_parent) { in srcu_funnel_exp_start()
601 if (rcu_seq_done(&ssp->srcu_gp_seq, s) || in srcu_funnel_exp_start()
602 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) in srcu_funnel_exp_start()
605 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { in srcu_funnel_exp_start()
609 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); in srcu_funnel_exp_start()
613 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) in srcu_funnel_exp_start()
614 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); in srcu_funnel_exp_start()
619 * Funnel-locking scheme to scalably mediate many concurrent grace-period
621 * period s. Losers must either ensure that their desired grace-period
632 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); in srcu_funnel_gp_start()
633 struct srcu_node *snp = sdp->mynode; in srcu_funnel_gp_start()
637 for (; snp != NULL; snp = snp->srcu_parent) { in srcu_funnel_gp_start()
638 if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode) in srcu_funnel_gp_start()
641 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { in srcu_funnel_gp_start()
642 snp_seq = snp->srcu_have_cbs[idx]; in srcu_funnel_gp_start()
643 if (snp == sdp->mynode && snp_seq == s) in srcu_funnel_gp_start()
644 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; in srcu_funnel_gp_start()
646 if (snp == sdp->mynode && snp_seq != s) { in srcu_funnel_gp_start()
656 snp->srcu_have_cbs[idx] = s; in srcu_funnel_gp_start()
657 if (snp == sdp->mynode) in srcu_funnel_gp_start()
658 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; in srcu_funnel_gp_start()
659 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) in srcu_funnel_gp_start()
660 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); in srcu_funnel_gp_start()
666 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { in srcu_funnel_gp_start()
671 smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ in srcu_funnel_gp_start()
673 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) in srcu_funnel_gp_start()
674 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); in srcu_funnel_gp_start()
677 if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && in srcu_funnel_gp_start()
678 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { in srcu_funnel_gp_start()
679 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); in srcu_funnel_gp_start()
682 queue_delayed_work(rcu_gp_wq, &ssp->work, in srcu_funnel_gp_start()
684 else if (list_empty(&ssp->work.work.entry)) in srcu_funnel_gp_start()
685 list_add(&ssp->work.work.entry, &srcu_boot_list); in srcu_funnel_gp_start()
693 * The caller must ensure that ->srcu_idx is not changed while checking.
700 if (--trycount + !srcu_get_delay(ssp) <= 0) in try_check_zero()
707 * Increment the ->srcu_idx counter so that future SRCU readers will
708 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
709 * us to wait for pre-existing readers in a starvation-free manner.
716 * of ->srcu_idx. Also ensure that if a given reader sees the in srcu_flip()
717 * new value of ->srcu_idx, this updater's earlier scans cannot in srcu_flip()
723 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); in srcu_flip()
738 * Note that it is OK for several current from-idle requests for a new
742 * Note also that if any CPU (including the current one) is still invoking
749 * This function is also subject to counter-wrap errors, but let's face
754 * of a needlessly non-expedited grace period is similarly negligible.
766 sdp = raw_cpu_ptr(ssp->sda); in srcu_might_be_idle()
768 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { in srcu_might_be_idle()
782 tlast = READ_ONCE(ssp->srcu_last_gp_end); in srcu_might_be_idle()
788 curseq = rcu_seq_current(&ssp->srcu_gp_seq); in srcu_might_be_idle()
789 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ in srcu_might_be_idle()
790 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) in srcu_might_be_idle()
792 smp_mb(); /* Order ->srcu_gp_seq with prior access. */ in srcu_might_be_idle()
793 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) in srcu_might_be_idle()
799 * SRCU callback function to leak a callback.
806 * Start an SRCU grace period, and also queue the callback if non-NULL.
820 sdp = raw_cpu_ptr(ssp->sda); in srcu_gp_start_if_needed()
823 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); in srcu_gp_start_if_needed()
824 rcu_segcblist_advance(&sdp->srcu_cblist, in srcu_gp_start_if_needed()
825 rcu_seq_current(&ssp->srcu_gp_seq)); in srcu_gp_start_if_needed()
826 s = rcu_seq_snap(&ssp->srcu_gp_seq); in srcu_gp_start_if_needed()
827 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); in srcu_gp_start_if_needed()
828 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { in srcu_gp_start_if_needed()
829 sdp->srcu_gp_seq_needed = s; in srcu_gp_start_if_needed()
832 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { in srcu_gp_start_if_needed()
833 sdp->srcu_gp_seq_needed_exp = s; in srcu_gp_start_if_needed()
840 srcu_funnel_exp_start(ssp, sdp->mynode, s); in srcu_gp_start_if_needed()
847 * the current CPU and the specified srcu_struct structure, initiating
848 * grace-period processing if it is not already running.
851 * all pre-existing SRCU read-side critical section. On systems with
854 * its last corresponding SRCU read-side critical section whose beginning
856 * an SRCU read-side critical section that continues beyond the start of
858 * but before the beginning of that SRCU read-side critical section.
877 /* Probable double call_srcu(), so leak the callback. */ in __call_srcu()
878 WRITE_ONCE(rhp->func, srcu_leak_callback); in __call_srcu()
882 rhp->func = func; in __call_srcu()
887 * call_srcu() - Queue a callback for invocation after an SRCU grace period
893 * grace period elapses, in other words after all pre-existing SRCU
894 * read-side critical sections have completed. However, the callback
895 * function might well execute concurrently with other SRCU read-side
897 * read-side critical sections are delimited by srcu_read_lock() and
921 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); in __synchronize_srcu()
937 * because the current CPU might have been totally uninvolved with in __synchronize_srcu()
944 * synchronize_srcu_expedited - Brute-force SRCU grace period
951 * memory-ordering properties as does synchronize_srcu().
960 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
965 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
971 * SRCU read-side critical section; doing so will result in deadlock.
973 * srcu_struct from some other srcu_struct's read-side critical section,
976 * There are memory-ordering constraints implied by synchronize_srcu().
979 * the end of its last corresponding SRCU read-side critical section
981 * each CPU having an SRCU read-side critical section that extends beyond
984 * the beginning of that SRCU read-side critical section. Note that these
994 * Of course, these memory-ordering guarantees apply only when
998 * Implementation of these memory-ordering guarantees is similar to
1016 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1027 // Any prior manipulation of SRCU-protected data must happen in get_state_synchronize_srcu()
1028 // before the load from ->srcu_gp_seq. in get_state_synchronize_srcu()
1030 return rcu_seq_snap(&ssp->srcu_gp_seq); in get_state_synchronize_srcu()
1035 * start_poll_synchronize_srcu - Provide cookie and start grace period
1051 * poll_state_synchronize_srcu - Has cookie's grace period ended?
1061 * This is more pronounced on 32-bit systems where cookies are 32 bits,
1063 * 25-microsecond expedited SRCU grace periods. However, a more likely
1065 * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit
1071 * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
1077 if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie)) in poll_state_synchronize_srcu()
1095 ssp = sdp->ssp; in srcu_barrier_cb()
1096 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) in srcu_barrier_cb()
1097 complete(&ssp->srcu_barrier_completion); in srcu_barrier_cb()
1101 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1102 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1108 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); in srcu_barrier()
1111 mutex_lock(&ssp->srcu_barrier_mutex); in srcu_barrier()
1112 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { in srcu_barrier()
1114 mutex_unlock(&ssp->srcu_barrier_mutex); in srcu_barrier()
1117 rcu_seq_start(&ssp->srcu_barrier_seq); in srcu_barrier()
1118 init_completion(&ssp->srcu_barrier_completion); in srcu_barrier()
1121 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); in srcu_barrier()
1132 sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_barrier()
1134 atomic_inc(&ssp->srcu_barrier_cpu_cnt); in srcu_barrier()
1135 sdp->srcu_barrier_head.func = srcu_barrier_cb; in srcu_barrier()
1136 debug_rcu_head_queue(&sdp->srcu_barrier_head); in srcu_barrier()
1137 if (!rcu_segcblist_entrain(&sdp->srcu_cblist, in srcu_barrier()
1138 &sdp->srcu_barrier_head)) { in srcu_barrier()
1139 debug_rcu_head_unqueue(&sdp->srcu_barrier_head); in srcu_barrier()
1140 atomic_dec(&ssp->srcu_barrier_cpu_cnt); in srcu_barrier()
1146 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) in srcu_barrier()
1147 complete(&ssp->srcu_barrier_completion); in srcu_barrier()
1148 wait_for_completion(&ssp->srcu_barrier_completion); in srcu_barrier()
1150 rcu_seq_end(&ssp->srcu_barrier_seq); in srcu_barrier()
1151 mutex_unlock(&ssp->srcu_barrier_mutex); in srcu_barrier()
1156 * srcu_batches_completed - return batches completed.
1164 return READ_ONCE(ssp->srcu_idx); in srcu_batches_completed()
1169 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1177 mutex_lock(&ssp->srcu_gp_mutex); in srcu_advance_state()
1181 * fetching ->srcu_idx for their index, at any point in time there in srcu_advance_state()
1186 * The load-acquire ensures that we see the accesses performed in srcu_advance_state()
1189 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ in srcu_advance_state()
1192 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { in srcu_advance_state()
1193 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); in srcu_advance_state()
1195 mutex_unlock(&ssp->srcu_gp_mutex); in srcu_advance_state()
1198 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); in srcu_advance_state()
1203 mutex_unlock(&ssp->srcu_gp_mutex); in srcu_advance_state()
1208 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { in srcu_advance_state()
1209 idx = 1 ^ (ssp->srcu_idx & 1); in srcu_advance_state()
1211 mutex_unlock(&ssp->srcu_gp_mutex); in srcu_advance_state()
1216 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); in srcu_advance_state()
1220 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { in srcu_advance_state()
1223 * SRCU read-side critical sections are normally short, in srcu_advance_state()
1226 idx = 1 ^ (ssp->srcu_idx & 1); in srcu_advance_state()
1228 mutex_unlock(&ssp->srcu_gp_mutex); in srcu_advance_state()
1231 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ in srcu_advance_state()
1252 ssp = sdp->ssp; in srcu_invoke_callbacks()
1255 rcu_segcblist_advance(&sdp->srcu_cblist, in srcu_invoke_callbacks()
1256 rcu_seq_current(&ssp->srcu_gp_seq)); in srcu_invoke_callbacks()
1257 if (sdp->srcu_cblist_invoking || in srcu_invoke_callbacks()
1258 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { in srcu_invoke_callbacks()
1264 sdp->srcu_cblist_invoking = true; in srcu_invoke_callbacks()
1265 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); in srcu_invoke_callbacks()
1272 rhp->func(rhp); in srcu_invoke_callbacks()
1282 rcu_segcblist_add_len(&sdp->srcu_cblist, -len); in srcu_invoke_callbacks()
1283 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, in srcu_invoke_callbacks()
1284 rcu_seq_snap(&ssp->srcu_gp_seq)); in srcu_invoke_callbacks()
1285 sdp->srcu_cblist_invoking = false; in srcu_invoke_callbacks()
1286 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); in srcu_invoke_callbacks()
1294 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1301 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { in srcu_reschedule()
1302 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { in srcu_reschedule()
1306 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { in srcu_reschedule()
1313 queue_delayed_work(rcu_gp_wq, &ssp->work, delay); in srcu_reschedule()
1317 * This is the work-queue function that handles SRCU grace periods.
1336 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); in srcutorture_get_gp_data()
1346 idx = ssp->srcu_idx & 0x1; in srcu_torture_stats_print()
1347 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", in srcu_torture_stats_print()
1348 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); in srcu_torture_stats_print()
1355 sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_torture_stats_print()
1356 u0 = data_race(sdp->srcu_unlock_count[!idx]); in srcu_torture_stats_print()
1357 u1 = data_race(sdp->srcu_unlock_count[idx]); in srcu_torture_stats_print()
1365 l0 = data_race(sdp->srcu_lock_count[!idx]); in srcu_torture_stats_print()
1366 l1 = data_race(sdp->srcu_lock_count[idx]); in srcu_torture_stats_print()
1368 c0 = l0 - u0; in srcu_torture_stats_print()
1369 c1 = l1 - u1; in srcu_torture_stats_print()
1372 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]); in srcu_torture_stats_print()
1384 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); in srcu_bootup_announce()
1402 list_del_init(&ssp->work.work.entry); in srcu_init()
1403 queue_work(rcu_gp_wq, &ssp->work.work); in srcu_init()
1409 /* Initialize any global-scope srcu_struct structures used by this module. */
1412 int i; in srcu_module_coming() local
1413 struct srcu_struct **sspp = mod->srcu_struct_ptrs; in srcu_module_coming()
1416 for (i = 0; i < mod->num_srcu_structs; i++) { in srcu_module_coming()
1424 /* Clean up any global-scope srcu_struct structures used by this module. */
1427 int i; in srcu_module_going() local
1428 struct srcu_struct **sspp = mod->srcu_struct_ptrs; in srcu_module_going()
1430 for (i = 0; i < mod->num_srcu_structs; i++) in srcu_module_going()