Lines Matching full:ssp

46 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
83 static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static) in init_srcu_struct_nodes() argument
94 ssp->level[0] = &ssp->node[0]; in init_srcu_struct_nodes()
96 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; in init_srcu_struct_nodes()
100 srcu_for_each_node_breadth_first(ssp, snp) { in init_srcu_struct_nodes()
111 if (snp == &ssp->node[0]) { in init_srcu_struct_nodes()
118 if (snp == ssp->level[level + 1]) in init_srcu_struct_nodes()
120 snp->srcu_parent = ssp->level[level - 1] + in init_srcu_struct_nodes()
121 (snp - ssp->level[level]) / in init_srcu_struct_nodes()
132 snp_first = ssp->level[level]; in init_srcu_struct_nodes()
134 sdp = per_cpu_ptr(ssp->sda, cpu); in init_srcu_struct_nodes()
138 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; in init_srcu_struct_nodes()
139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; in init_srcu_struct_nodes()
149 sdp->ssp = ssp; in init_srcu_struct_nodes()
168 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) in init_srcu_struct_fields() argument
170 mutex_init(&ssp->srcu_cb_mutex); in init_srcu_struct_fields()
171 mutex_init(&ssp->srcu_gp_mutex); in init_srcu_struct_fields()
172 ssp->srcu_idx = 0; in init_srcu_struct_fields()
173 ssp->srcu_gp_seq = 0; in init_srcu_struct_fields()
174 ssp->srcu_barrier_seq = 0; in init_srcu_struct_fields()
175 mutex_init(&ssp->srcu_barrier_mutex); in init_srcu_struct_fields()
176 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); in init_srcu_struct_fields()
177 INIT_DELAYED_WORK(&ssp->work, process_srcu); in init_srcu_struct_fields()
179 ssp->sda = alloc_percpu(struct srcu_data); in init_srcu_struct_fields()
180 init_srcu_struct_nodes(ssp, is_static); in init_srcu_struct_fields()
181 ssp->srcu_gp_seq_needed_exp = 0; in init_srcu_struct_fields()
182 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); in init_srcu_struct_fields()
183 smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ in init_srcu_struct_fields()
184 return ssp->sda ? 0 : -ENOMEM; in init_srcu_struct_fields()
189 int __init_srcu_struct(struct srcu_struct *ssp, const char *name, in __init_srcu_struct() argument
193 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); in __init_srcu_struct()
194 lockdep_init_map(&ssp->dep_map, name, key, 0); in __init_srcu_struct()
195 spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); in __init_srcu_struct()
196 return init_srcu_struct_fields(ssp, false); in __init_srcu_struct()
204 * @ssp: structure to initialize.
210 int init_srcu_struct(struct srcu_struct *ssp) in init_srcu_struct() argument
212 spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); in init_srcu_struct()
213 return init_srcu_struct_fields(ssp, false); in init_srcu_struct()
223 * to each update-side SRCU primitive. Use ssp->lock, which -is-
227 static void check_init_srcu_struct(struct srcu_struct *ssp) in check_init_srcu_struct() argument
232 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ in check_init_srcu_struct()
234 spin_lock_irqsave_rcu_node(ssp, flags); in check_init_srcu_struct()
235 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { in check_init_srcu_struct()
236 spin_unlock_irqrestore_rcu_node(ssp, flags); in check_init_srcu_struct()
239 init_srcu_struct_fields(ssp, true); in check_init_srcu_struct()
240 spin_unlock_irqrestore_rcu_node(ssp, flags); in check_init_srcu_struct()
247 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) in srcu_readers_lock_idx() argument
253 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_lock_idx()
264 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) in srcu_readers_unlock_idx() argument
270 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_unlock_idx()
281 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) in srcu_readers_active_idx_check() argument
285 unlocks = srcu_readers_unlock_idx(ssp, idx); in srcu_readers_active_idx_check()
321 return srcu_readers_lock_idx(ssp, idx) == unlocks; in srcu_readers_active_idx_check()
327 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
333 static bool srcu_readers_active(struct srcu_struct *ssp) in srcu_readers_active() argument
339 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_active()
355 static unsigned long srcu_get_delay(struct srcu_struct *ssp) in srcu_get_delay() argument
357 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), in srcu_get_delay()
358 READ_ONCE(ssp->srcu_gp_seq_needed_exp))) in srcu_get_delay()
365 * @ssp: structure to clean up.
370 void cleanup_srcu_struct(struct srcu_struct *ssp) in cleanup_srcu_struct() argument
374 if (WARN_ON(!srcu_get_delay(ssp))) in cleanup_srcu_struct()
376 if (WARN_ON(srcu_readers_active(ssp))) in cleanup_srcu_struct()
378 flush_delayed_work(&ssp->work); in cleanup_srcu_struct()
380 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); in cleanup_srcu_struct()
387 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || in cleanup_srcu_struct()
388 WARN_ON(srcu_readers_active(ssp))) { in cleanup_srcu_struct()
390 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))); in cleanup_srcu_struct()
393 free_percpu(ssp->sda); in cleanup_srcu_struct()
394 ssp->sda = NULL; in cleanup_srcu_struct()
403 int __srcu_read_lock(struct srcu_struct *ssp) in __srcu_read_lock() argument
407 idx = READ_ONCE(ssp->srcu_idx) & 0x1; in __srcu_read_lock()
408 this_cpu_inc(ssp->sda->srcu_lock_count[idx]); in __srcu_read_lock()
419 void __srcu_read_unlock(struct srcu_struct *ssp, int idx) in __srcu_read_unlock() argument
422 this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); in __srcu_read_unlock()
438 static void srcu_gp_start(struct srcu_struct *ssp) in srcu_gp_start() argument
440 struct srcu_data *sdp = this_cpu_ptr(ssp->sda); in srcu_gp_start()
443 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock)); in srcu_gp_start()
444 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); in srcu_gp_start()
447 rcu_seq_current(&ssp->srcu_gp_seq)); in srcu_gp_start()
449 rcu_seq_snap(&ssp->srcu_gp_seq)); in srcu_gp_start()
452 rcu_seq_start(&ssp->srcu_gp_seq); in srcu_gp_start()
453 state = rcu_seq_state(ssp->srcu_gp_seq); in srcu_gp_start()
491 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, in srcu_schedule_cbs_snp() argument
499 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); in srcu_schedule_cbs_snp()
512 static void srcu_gp_end(struct srcu_struct *ssp) in srcu_gp_end() argument
526 mutex_lock(&ssp->srcu_cb_mutex); in srcu_gp_end()
529 spin_lock_irq_rcu_node(ssp); in srcu_gp_end()
530 idx = rcu_seq_state(ssp->srcu_gp_seq); in srcu_gp_end()
532 cbdelay = srcu_get_delay(ssp); in srcu_gp_end()
533 WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns()); in srcu_gp_end()
534 rcu_seq_end(&ssp->srcu_gp_seq); in srcu_gp_end()
535 gpseq = rcu_seq_current(&ssp->srcu_gp_seq); in srcu_gp_end()
536 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) in srcu_gp_end()
537 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq); in srcu_gp_end()
538 spin_unlock_irq_rcu_node(ssp); in srcu_gp_end()
539 mutex_unlock(&ssp->srcu_gp_mutex); in srcu_gp_end()
544 srcu_for_each_node_breadth_first(ssp, snp) { in srcu_gp_end()
547 last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; in srcu_gp_end()
558 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); in srcu_gp_end()
563 sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_gp_end()
576 mutex_unlock(&ssp->srcu_cb_mutex); in srcu_gp_end()
579 spin_lock_irq_rcu_node(ssp); in srcu_gp_end()
580 gpseq = rcu_seq_current(&ssp->srcu_gp_seq); in srcu_gp_end()
582 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { in srcu_gp_end()
583 srcu_gp_start(ssp); in srcu_gp_end()
584 spin_unlock_irq_rcu_node(ssp); in srcu_gp_end()
585 srcu_reschedule(ssp, 0); in srcu_gp_end()
587 spin_unlock_irq_rcu_node(ssp); in srcu_gp_end()
598 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, in srcu_funnel_exp_start() argument
604 if (rcu_seq_done(&ssp->srcu_gp_seq, s) || in srcu_funnel_exp_start()
615 spin_lock_irqsave_rcu_node(ssp, flags); in srcu_funnel_exp_start()
616 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) in srcu_funnel_exp_start()
617 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); in srcu_funnel_exp_start()
618 spin_unlock_irqrestore_rcu_node(ssp, flags); in srcu_funnel_exp_start()
631 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, in srcu_funnel_gp_start() argument
641 if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode) in srcu_funnel_gp_start()
656 srcu_funnel_exp_start(ssp, snp, s); in srcu_funnel_gp_start()
668 spin_lock_irqsave_rcu_node(ssp, flags); in srcu_funnel_gp_start()
669 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { in srcu_funnel_gp_start()
674 smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ in srcu_funnel_gp_start()
676 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) in srcu_funnel_gp_start()
677 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); in srcu_funnel_gp_start()
680 if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && in srcu_funnel_gp_start()
681 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { in srcu_funnel_gp_start()
682 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); in srcu_funnel_gp_start()
683 srcu_gp_start(ssp); in srcu_funnel_gp_start()
685 queue_delayed_work(rcu_gp_wq, &ssp->work, in srcu_funnel_gp_start()
686 srcu_get_delay(ssp)); in srcu_funnel_gp_start()
687 else if (list_empty(&ssp->work.work.entry)) in srcu_funnel_gp_start()
688 list_add(&ssp->work.work.entry, &srcu_boot_list); in srcu_funnel_gp_start()
690 spin_unlock_irqrestore_rcu_node(ssp, flags); in srcu_funnel_gp_start()
698 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) in try_check_zero() argument
701 if (srcu_readers_active_idx_check(ssp, idx)) in try_check_zero()
703 if (--trycount + !srcu_get_delay(ssp) <= 0) in try_check_zero()
714 static void srcu_flip(struct srcu_struct *ssp) in srcu_flip() argument
726 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); in srcu_flip()
759 static bool srcu_might_be_idle(struct srcu_struct *ssp) in srcu_might_be_idle() argument
767 check_init_srcu_struct(ssp); in srcu_might_be_idle()
769 sdp = raw_cpu_ptr(ssp->sda); in srcu_might_be_idle()
785 tlast = READ_ONCE(ssp->srcu_last_gp_end); in srcu_might_be_idle()
791 curseq = rcu_seq_current(&ssp->srcu_gp_seq); in srcu_might_be_idle()
793 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) in srcu_might_be_idle()
796 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) in srcu_might_be_idle()
836 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, in __call_srcu() argument
846 check_init_srcu_struct(ssp); in __call_srcu()
854 idx = srcu_read_lock(ssp); in __call_srcu()
855 sdp = raw_cpu_ptr(ssp->sda); in __call_srcu()
859 rcu_seq_current(&ssp->srcu_gp_seq)); in __call_srcu()
860 s = rcu_seq_snap(&ssp->srcu_gp_seq); in __call_srcu()
872 srcu_funnel_gp_start(ssp, sdp, s, do_norm); in __call_srcu()
874 srcu_funnel_exp_start(ssp, sdp->mynode, s); in __call_srcu()
875 srcu_read_unlock(ssp, idx); in __call_srcu()
880 * @ssp: srcu_struct in queue the callback
895 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, in call_srcu() argument
898 __call_srcu(ssp, rhp, func, true); in call_srcu()
905 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) in __synchronize_srcu() argument
909 RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) || in __synchronize_srcu()
918 check_init_srcu_struct(ssp); in __synchronize_srcu()
921 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); in __synchronize_srcu()
937 * @ssp: srcu_struct with which to synchronize.
945 void synchronize_srcu_expedited(struct srcu_struct *ssp) in synchronize_srcu_expedited() argument
947 __synchronize_srcu(ssp, rcu_gp_is_normal()); in synchronize_srcu_expedited()
953 * @ssp: srcu_struct with which to synchronize.
995 void synchronize_srcu(struct srcu_struct *ssp) in synchronize_srcu() argument
997 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) in synchronize_srcu()
998 synchronize_srcu_expedited(ssp); in synchronize_srcu()
1000 __synchronize_srcu(ssp, true); in synchronize_srcu()
1010 struct srcu_struct *ssp; in srcu_barrier_cb() local
1013 ssp = sdp->ssp; in srcu_barrier_cb()
1014 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) in srcu_barrier_cb()
1015 complete(&ssp->srcu_barrier_completion); in srcu_barrier_cb()
1020 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1022 void srcu_barrier(struct srcu_struct *ssp) in srcu_barrier() argument
1026 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); in srcu_barrier()
1028 check_init_srcu_struct(ssp); in srcu_barrier()
1029 mutex_lock(&ssp->srcu_barrier_mutex); in srcu_barrier()
1030 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { in srcu_barrier()
1032 mutex_unlock(&ssp->srcu_barrier_mutex); in srcu_barrier()
1035 rcu_seq_start(&ssp->srcu_barrier_seq); in srcu_barrier()
1036 init_completion(&ssp->srcu_barrier_completion); in srcu_barrier()
1039 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); in srcu_barrier()
1050 sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_barrier()
1052 atomic_inc(&ssp->srcu_barrier_cpu_cnt); in srcu_barrier()
1058 atomic_dec(&ssp->srcu_barrier_cpu_cnt); in srcu_barrier()
1064 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) in srcu_barrier()
1065 complete(&ssp->srcu_barrier_completion); in srcu_barrier()
1066 wait_for_completion(&ssp->srcu_barrier_completion); in srcu_barrier()
1068 rcu_seq_end(&ssp->srcu_barrier_seq); in srcu_barrier()
1069 mutex_unlock(&ssp->srcu_barrier_mutex); in srcu_barrier()
1075 * @ssp: srcu_struct on which to report batch completion.
1080 unsigned long srcu_batches_completed(struct srcu_struct *ssp) in srcu_batches_completed() argument
1082 return READ_ONCE(ssp->srcu_idx); in srcu_batches_completed()
1091 static void srcu_advance_state(struct srcu_struct *ssp) in srcu_advance_state() argument
1095 mutex_lock(&ssp->srcu_gp_mutex); in srcu_advance_state()
1107 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ in srcu_advance_state()
1109 spin_lock_irq_rcu_node(ssp); in srcu_advance_state()
1110 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { in srcu_advance_state()
1111 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); in srcu_advance_state()
1112 spin_unlock_irq_rcu_node(ssp); in srcu_advance_state()
1113 mutex_unlock(&ssp->srcu_gp_mutex); in srcu_advance_state()
1116 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); in srcu_advance_state()
1118 srcu_gp_start(ssp); in srcu_advance_state()
1119 spin_unlock_irq_rcu_node(ssp); in srcu_advance_state()
1121 mutex_unlock(&ssp->srcu_gp_mutex); in srcu_advance_state()
1126 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { in srcu_advance_state()
1127 idx = 1 ^ (ssp->srcu_idx & 1); in srcu_advance_state()
1128 if (!try_check_zero(ssp, idx, 1)) { in srcu_advance_state()
1129 mutex_unlock(&ssp->srcu_gp_mutex); in srcu_advance_state()
1132 srcu_flip(ssp); in srcu_advance_state()
1133 spin_lock_irq_rcu_node(ssp); in srcu_advance_state()
1134 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); in srcu_advance_state()
1135 spin_unlock_irq_rcu_node(ssp); in srcu_advance_state()
1138 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { in srcu_advance_state()
1144 idx = 1 ^ (ssp->srcu_idx & 1); in srcu_advance_state()
1145 if (!try_check_zero(ssp, idx, 2)) { in srcu_advance_state()
1146 mutex_unlock(&ssp->srcu_gp_mutex); in srcu_advance_state()
1149 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ in srcu_advance_state()
1165 struct srcu_struct *ssp; in srcu_invoke_callbacks() local
1169 ssp = sdp->ssp; in srcu_invoke_callbacks()
1173 rcu_seq_current(&ssp->srcu_gp_seq)); in srcu_invoke_callbacks()
1199 rcu_seq_snap(&ssp->srcu_gp_seq)); in srcu_invoke_callbacks()
1211 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) in srcu_reschedule() argument
1215 spin_lock_irq_rcu_node(ssp); in srcu_reschedule()
1216 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { in srcu_reschedule()
1217 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { in srcu_reschedule()
1221 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { in srcu_reschedule()
1223 srcu_gp_start(ssp); in srcu_reschedule()
1225 spin_unlock_irq_rcu_node(ssp); in srcu_reschedule()
1228 queue_delayed_work(rcu_gp_wq, &ssp->work, delay); in srcu_reschedule()
1236 struct srcu_struct *ssp; in process_srcu() local
1238 ssp = container_of(work, struct srcu_struct, work.work); in process_srcu()
1240 srcu_advance_state(ssp); in process_srcu()
1241 srcu_reschedule(ssp, srcu_get_delay(ssp)); in process_srcu()
1245 struct srcu_struct *ssp, int *flags, in srcutorture_get_gp_data() argument
1251 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); in srcutorture_get_gp_data()
1255 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) in srcu_torture_stats_print() argument
1261 idx = ssp->srcu_idx & 0x1; in srcu_torture_stats_print()
1263 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); in srcu_torture_stats_print()
1270 sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_torture_stats_print()
1306 struct srcu_struct *ssp; in srcu_init() local
1310 ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, in srcu_init()
1312 check_init_srcu_struct(ssp); in srcu_init()
1313 list_del_init(&ssp->work.work.entry); in srcu_init()
1314 queue_work(rcu_gp_wq, &ssp->work.work); in srcu_init()