Lines Matching refs:sp

55 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
91 static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) in init_srcu_struct_nodes() argument
102 sp->level[0] = &sp->node[0]; in init_srcu_struct_nodes()
104 sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1]; in init_srcu_struct_nodes()
108 rcu_for_each_node_breadth_first(sp, snp) { in init_srcu_struct_nodes()
119 if (snp == &sp->node[0]) { in init_srcu_struct_nodes()
126 if (snp == sp->level[level + 1]) in init_srcu_struct_nodes()
128 snp->srcu_parent = sp->level[level - 1] + in init_srcu_struct_nodes()
129 (snp - sp->level[level]) / in init_srcu_struct_nodes()
140 snp_first = sp->level[level]; in init_srcu_struct_nodes()
142 sdp = per_cpu_ptr(sp->sda, cpu); in init_srcu_struct_nodes()
146 sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; in init_srcu_struct_nodes()
147 sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq; in init_srcu_struct_nodes()
156 sdp->sp = sp; in init_srcu_struct_nodes()
175 static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) in init_srcu_struct_fields() argument
177 mutex_init(&sp->srcu_cb_mutex); in init_srcu_struct_fields()
178 mutex_init(&sp->srcu_gp_mutex); in init_srcu_struct_fields()
179 sp->srcu_idx = 0; in init_srcu_struct_fields()
180 sp->srcu_gp_seq = 0; in init_srcu_struct_fields()
181 sp->srcu_barrier_seq = 0; in init_srcu_struct_fields()
182 mutex_init(&sp->srcu_barrier_mutex); in init_srcu_struct_fields()
183 atomic_set(&sp->srcu_barrier_cpu_cnt, 0); in init_srcu_struct_fields()
184 INIT_DELAYED_WORK(&sp->work, process_srcu); in init_srcu_struct_fields()
186 sp->sda = alloc_percpu(struct srcu_data); in init_srcu_struct_fields()
187 init_srcu_struct_nodes(sp, is_static); in init_srcu_struct_fields()
188 sp->srcu_gp_seq_needed_exp = 0; in init_srcu_struct_fields()
189 sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); in init_srcu_struct_fields()
190 smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ in init_srcu_struct_fields()
191 return sp->sda ? 0 : -ENOMEM; in init_srcu_struct_fields()
196 int __init_srcu_struct(struct srcu_struct *sp, const char *name, in __init_srcu_struct() argument
200 debug_check_no_locks_freed((void *)sp, sizeof(*sp)); in __init_srcu_struct()
201 lockdep_init_map(&sp->dep_map, name, key, 0); in __init_srcu_struct()
202 spin_lock_init(&ACCESS_PRIVATE(sp, lock)); in __init_srcu_struct()
203 return init_srcu_struct_fields(sp, false); in __init_srcu_struct()
217 int init_srcu_struct(struct srcu_struct *sp) in init_srcu_struct() argument
219 spin_lock_init(&ACCESS_PRIVATE(sp, lock)); in init_srcu_struct()
220 return init_srcu_struct_fields(sp, false); in init_srcu_struct()
234 static void check_init_srcu_struct(struct srcu_struct *sp) in check_init_srcu_struct() argument
240 if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ in check_init_srcu_struct()
242 spin_lock_irqsave_rcu_node(sp, flags); in check_init_srcu_struct()
243 if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { in check_init_srcu_struct()
244 spin_unlock_irqrestore_rcu_node(sp, flags); in check_init_srcu_struct()
247 init_srcu_struct_fields(sp, true); in check_init_srcu_struct()
248 spin_unlock_irqrestore_rcu_node(sp, flags); in check_init_srcu_struct()
255 static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) in srcu_readers_lock_idx() argument
261 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); in srcu_readers_lock_idx()
272 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) in srcu_readers_unlock_idx() argument
278 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); in srcu_readers_unlock_idx()
289 static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) in srcu_readers_active_idx_check() argument
293 unlocks = srcu_readers_unlock_idx(sp, idx); in srcu_readers_active_idx_check()
329 return srcu_readers_lock_idx(sp, idx) == unlocks; in srcu_readers_active_idx_check()
341 static bool srcu_readers_active(struct srcu_struct *sp) in srcu_readers_active() argument
347 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); in srcu_readers_active()
363 static unsigned long srcu_get_delay(struct srcu_struct *sp) in srcu_get_delay() argument
365 if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq), in srcu_get_delay()
366 READ_ONCE(sp->srcu_gp_seq_needed_exp))) in srcu_get_delay()
372 void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced) in _cleanup_srcu_struct() argument
376 if (WARN_ON(!srcu_get_delay(sp))) in _cleanup_srcu_struct()
378 if (WARN_ON(srcu_readers_active(sp))) in _cleanup_srcu_struct()
381 if (WARN_ON(delayed_work_pending(&sp->work))) in _cleanup_srcu_struct()
384 flush_delayed_work(&sp->work); in _cleanup_srcu_struct()
388 if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work))) in _cleanup_srcu_struct()
391 flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); in _cleanup_srcu_struct()
393 if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || in _cleanup_srcu_struct()
394 WARN_ON(srcu_readers_active(sp))) { in _cleanup_srcu_struct()
396 __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); in _cleanup_srcu_struct()
399 free_percpu(sp->sda); in _cleanup_srcu_struct()
400 sp->sda = NULL; in _cleanup_srcu_struct()
409 int __srcu_read_lock(struct srcu_struct *sp) in __srcu_read_lock() argument
413 idx = READ_ONCE(sp->srcu_idx) & 0x1; in __srcu_read_lock()
414 this_cpu_inc(sp->sda->srcu_lock_count[idx]); in __srcu_read_lock()
425 void __srcu_read_unlock(struct srcu_struct *sp, int idx) in __srcu_read_unlock() argument
428 this_cpu_inc(sp->sda->srcu_unlock_count[idx]); in __srcu_read_unlock()
444 static void srcu_gp_start(struct srcu_struct *sp) in srcu_gp_start() argument
446 struct srcu_data *sdp = this_cpu_ptr(sp->sda); in srcu_gp_start()
449 lockdep_assert_held(&ACCESS_PRIVATE(sp, lock)); in srcu_gp_start()
450 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); in srcu_gp_start()
452 rcu_seq_current(&sp->srcu_gp_seq)); in srcu_gp_start()
454 rcu_seq_snap(&sp->srcu_gp_seq)); in srcu_gp_start()
456 rcu_seq_start(&sp->srcu_gp_seq); in srcu_gp_start()
457 state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); in srcu_gp_start()
511 static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, in srcu_schedule_cbs_snp() argument
519 srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay); in srcu_schedule_cbs_snp()
532 static void srcu_gp_end(struct srcu_struct *sp) in srcu_gp_end() argument
546 mutex_lock(&sp->srcu_cb_mutex); in srcu_gp_end()
549 spin_lock_irq_rcu_node(sp); in srcu_gp_end()
550 idx = rcu_seq_state(sp->srcu_gp_seq); in srcu_gp_end()
552 cbdelay = srcu_get_delay(sp); in srcu_gp_end()
553 sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); in srcu_gp_end()
554 rcu_seq_end(&sp->srcu_gp_seq); in srcu_gp_end()
555 gpseq = rcu_seq_current(&sp->srcu_gp_seq); in srcu_gp_end()
556 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) in srcu_gp_end()
557 sp->srcu_gp_seq_needed_exp = gpseq; in srcu_gp_end()
558 spin_unlock_irq_rcu_node(sp); in srcu_gp_end()
559 mutex_unlock(&sp->srcu_gp_mutex); in srcu_gp_end()
564 rcu_for_each_node_breadth_first(sp, snp) { in srcu_gp_end()
567 last_lvl = snp >= sp->level[rcu_num_lvls - 1]; in srcu_gp_end()
578 srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); in srcu_gp_end()
583 sdp = per_cpu_ptr(sp->sda, cpu); in srcu_gp_end()
596 mutex_unlock(&sp->srcu_cb_mutex); in srcu_gp_end()
599 spin_lock_irq_rcu_node(sp); in srcu_gp_end()
600 gpseq = rcu_seq_current(&sp->srcu_gp_seq); in srcu_gp_end()
602 ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { in srcu_gp_end()
603 srcu_gp_start(sp); in srcu_gp_end()
604 spin_unlock_irq_rcu_node(sp); in srcu_gp_end()
605 srcu_reschedule(sp, 0); in srcu_gp_end()
607 spin_unlock_irq_rcu_node(sp); in srcu_gp_end()
618 static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, in srcu_funnel_exp_start() argument
624 if (rcu_seq_done(&sp->srcu_gp_seq, s) || in srcu_funnel_exp_start()
635 spin_lock_irqsave_rcu_node(sp, flags); in srcu_funnel_exp_start()
636 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) in srcu_funnel_exp_start()
637 sp->srcu_gp_seq_needed_exp = s; in srcu_funnel_exp_start()
638 spin_unlock_irqrestore_rcu_node(sp, flags); in srcu_funnel_exp_start()
651 static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, in srcu_funnel_gp_start() argument
661 if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) in srcu_funnel_gp_start()
676 srcu_funnel_exp_start(sp, snp, s); in srcu_funnel_gp_start()
688 spin_lock_irqsave_rcu_node(sp, flags); in srcu_funnel_gp_start()
689 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { in srcu_funnel_gp_start()
694 smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/ in srcu_funnel_gp_start()
696 if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) in srcu_funnel_gp_start()
697 sp->srcu_gp_seq_needed_exp = s; in srcu_funnel_gp_start()
700 if (!rcu_seq_done(&sp->srcu_gp_seq, s) && in srcu_funnel_gp_start()
701 rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { in srcu_funnel_gp_start()
702 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); in srcu_funnel_gp_start()
703 srcu_gp_start(sp); in srcu_funnel_gp_start()
704 queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp)); in srcu_funnel_gp_start()
706 spin_unlock_irqrestore_rcu_node(sp, flags); in srcu_funnel_gp_start()
714 static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) in try_check_zero() argument
717 if (srcu_readers_active_idx_check(sp, idx)) in try_check_zero()
719 if (--trycount + !srcu_get_delay(sp) <= 0) in try_check_zero()
730 static void srcu_flip(struct srcu_struct *sp) in srcu_flip() argument
742 WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1); in srcu_flip()
775 static bool srcu_might_be_idle(struct srcu_struct *sp) in srcu_might_be_idle() argument
784 sdp = this_cpu_ptr(sp->sda); in srcu_might_be_idle()
800 time_in_range_open(t, sp->srcu_last_gp_end, in srcu_might_be_idle()
801 sp->srcu_last_gp_end + exp_holdoff)) in srcu_might_be_idle()
805 curseq = rcu_seq_current(&sp->srcu_gp_seq); in srcu_might_be_idle()
807 if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed))) in srcu_might_be_idle()
810 if (curseq != rcu_seq_current(&sp->srcu_gp_seq)) in srcu_might_be_idle()
850 void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, in __call_srcu() argument
859 check_init_srcu_struct(sp); in __call_srcu()
868 sdp = this_cpu_ptr(sp->sda); in __call_srcu()
872 rcu_seq_current(&sp->srcu_gp_seq)); in __call_srcu()
873 s = rcu_seq_snap(&sp->srcu_gp_seq); in __call_srcu()
885 srcu_funnel_gp_start(sp, sdp, s, do_norm); in __call_srcu()
887 srcu_funnel_exp_start(sp, sdp->mynode, s); in __call_srcu()
907 void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, in call_srcu() argument
910 __call_srcu(sp, rhp, func, true); in call_srcu()
917 static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) in __synchronize_srcu() argument
921 RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || in __synchronize_srcu()
930 check_init_srcu_struct(sp); in __synchronize_srcu()
933 __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm); in __synchronize_srcu()
957 void synchronize_srcu_expedited(struct srcu_struct *sp) in synchronize_srcu_expedited() argument
959 __synchronize_srcu(sp, rcu_gp_is_normal()); in synchronize_srcu_expedited()
1007 void synchronize_srcu(struct srcu_struct *sp) in synchronize_srcu() argument
1009 if (srcu_might_be_idle(sp) || rcu_gp_is_expedited()) in synchronize_srcu()
1010 synchronize_srcu_expedited(sp); in synchronize_srcu()
1012 __synchronize_srcu(sp, true); in synchronize_srcu()
1022 struct srcu_struct *sp; in srcu_barrier_cb() local
1025 sp = sdp->sp; in srcu_barrier_cb()
1026 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) in srcu_barrier_cb()
1027 complete(&sp->srcu_barrier_completion); in srcu_barrier_cb()
1034 void srcu_barrier(struct srcu_struct *sp) in srcu_barrier() argument
1038 unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq); in srcu_barrier()
1040 check_init_srcu_struct(sp); in srcu_barrier()
1041 mutex_lock(&sp->srcu_barrier_mutex); in srcu_barrier()
1042 if (rcu_seq_done(&sp->srcu_barrier_seq, s)) { in srcu_barrier()
1044 mutex_unlock(&sp->srcu_barrier_mutex); in srcu_barrier()
1047 rcu_seq_start(&sp->srcu_barrier_seq); in srcu_barrier()
1048 init_completion(&sp->srcu_barrier_completion); in srcu_barrier()
1051 atomic_set(&sp->srcu_barrier_cpu_cnt, 1); in srcu_barrier()
1062 sdp = per_cpu_ptr(sp->sda, cpu); in srcu_barrier()
1064 atomic_inc(&sp->srcu_barrier_cpu_cnt); in srcu_barrier()
1070 atomic_dec(&sp->srcu_barrier_cpu_cnt); in srcu_barrier()
1076 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) in srcu_barrier()
1077 complete(&sp->srcu_barrier_completion); in srcu_barrier()
1078 wait_for_completion(&sp->srcu_barrier_completion); in srcu_barrier()
1080 rcu_seq_end(&sp->srcu_barrier_seq); in srcu_barrier()
1081 mutex_unlock(&sp->srcu_barrier_mutex); in srcu_barrier()
1092 unsigned long srcu_batches_completed(struct srcu_struct *sp) in srcu_batches_completed() argument
1094 return sp->srcu_idx; in srcu_batches_completed()
1103 static void srcu_advance_state(struct srcu_struct *sp) in srcu_advance_state() argument
1107 mutex_lock(&sp->srcu_gp_mutex); in srcu_advance_state()
1119 idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ in srcu_advance_state()
1121 spin_lock_irq_rcu_node(sp); in srcu_advance_state()
1122 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { in srcu_advance_state()
1123 WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); in srcu_advance_state()
1124 spin_unlock_irq_rcu_node(sp); in srcu_advance_state()
1125 mutex_unlock(&sp->srcu_gp_mutex); in srcu_advance_state()
1128 idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); in srcu_advance_state()
1130 srcu_gp_start(sp); in srcu_advance_state()
1131 spin_unlock_irq_rcu_node(sp); in srcu_advance_state()
1133 mutex_unlock(&sp->srcu_gp_mutex); in srcu_advance_state()
1138 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { in srcu_advance_state()
1139 idx = 1 ^ (sp->srcu_idx & 1); in srcu_advance_state()
1140 if (!try_check_zero(sp, idx, 1)) { in srcu_advance_state()
1141 mutex_unlock(&sp->srcu_gp_mutex); in srcu_advance_state()
1144 srcu_flip(sp); in srcu_advance_state()
1145 rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); in srcu_advance_state()
1148 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { in srcu_advance_state()
1154 idx = 1 ^ (sp->srcu_idx & 1); in srcu_advance_state()
1155 if (!try_check_zero(sp, idx, 2)) { in srcu_advance_state()
1156 mutex_unlock(&sp->srcu_gp_mutex); in srcu_advance_state()
1159 srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */ in srcu_advance_state()
1175 struct srcu_struct *sp; in srcu_invoke_callbacks() local
1178 sp = sdp->sp; in srcu_invoke_callbacks()
1182 rcu_seq_current(&sp->srcu_gp_seq)); in srcu_invoke_callbacks()
1208 rcu_seq_snap(&sp->srcu_gp_seq)); in srcu_invoke_callbacks()
1220 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) in srcu_reschedule() argument
1224 spin_lock_irq_rcu_node(sp); in srcu_reschedule()
1225 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { in srcu_reschedule()
1226 if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { in srcu_reschedule()
1230 } else if (!rcu_seq_state(sp->srcu_gp_seq)) { in srcu_reschedule()
1232 srcu_gp_start(sp); in srcu_reschedule()
1234 spin_unlock_irq_rcu_node(sp); in srcu_reschedule()
1237 queue_delayed_work(rcu_gp_wq, &sp->work, delay); in srcu_reschedule()
1245 struct srcu_struct *sp; in process_srcu() local
1247 sp = container_of(work, struct srcu_struct, work.work); in process_srcu()
1249 srcu_advance_state(sp); in process_srcu()
1250 srcu_reschedule(sp, srcu_get_delay(sp)); in process_srcu()
1254 struct srcu_struct *sp, int *flags, in srcutorture_get_gp_data() argument
1260 *gp_seq = rcu_seq_current(&sp->srcu_gp_seq); in srcutorture_get_gp_data()
1264 void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf) in srcu_torture_stats_print() argument
1270 idx = sp->srcu_idx & 0x1; in srcu_torture_stats_print()
1272 tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx); in srcu_torture_stats_print()
1279 sdp = per_cpu_ptr(sp->sda, cpu); in srcu_torture_stats_print()