Lines Matching refs:rdp

149 static void rcu_report_exp_rdp(struct rcu_data *rdp);
151 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
152 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
153 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
235 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_get_n_cbs_cpu() local
237 if (rcu_segcblist_is_enabled(&rdp->cblist)) in rcu_get_n_cbs_cpu()
238 return rcu_segcblist_n_cbs(&rdp->cblist); in rcu_get_n_cbs_cpu()
290 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) in rcu_dynticks_in_eqs_since() argument
292 return snap != rcu_dynticks_snap(rdp->cpu); in rcu_dynticks_in_eqs_since()
475 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
548 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_irq_work_resched() local
557 if (do_nocb_deferred_wakeup(rdp) && need_resched()) { in rcu_irq_work_resched()
611 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in __rcu_irq_enter_check_tick() local
620 if (!tick_nohz_full_cpu(rdp->cpu) || in __rcu_irq_enter_check_tick()
621 !READ_ONCE(rdp->rcu_urgent_qs) || in __rcu_irq_enter_check_tick()
622 READ_ONCE(rdp->rcu_forced_tick)) { in __rcu_irq_enter_check_tick()
634 raw_spin_lock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
635 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { in __rcu_irq_enter_check_tick()
638 WRITE_ONCE(rdp->rcu_forced_tick, true); in __rcu_irq_enter_check_tick()
639 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in __rcu_irq_enter_check_tick()
641 raw_spin_unlock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
668 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) in rcu_disable_urgency_upon_qs() argument
670 raw_lockdep_assert_held_rcu_node(rdp->mynode); in rcu_disable_urgency_upon_qs()
671 WRITE_ONCE(rdp->rcu_urgent_qs, false); in rcu_disable_urgency_upon_qs()
672 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); in rcu_disable_urgency_upon_qs()
673 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { in rcu_disable_urgency_upon_qs()
674 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in rcu_disable_urgency_upon_qs()
675 WRITE_ONCE(rdp->rcu_forced_tick, false); in rcu_disable_urgency_upon_qs()
730 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument
733 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, in rcu_gpnum_ovf()
735 WRITE_ONCE(rdp->gpwrap, true); in rcu_gpnum_ovf()
736 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
737 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
745 static int dyntick_save_progress_counter(struct rcu_data *rdp) in dyntick_save_progress_counter() argument
747 rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu); in dyntick_save_progress_counter()
748 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { in dyntick_save_progress_counter()
749 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in dyntick_save_progress_counter()
750 rcu_gpnum_ovf(rdp->mynode, rdp); in dyntick_save_progress_counter()
762 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) in rcu_implicit_dynticks_qs() argument
765 struct rcu_node *rnp = rdp->mynode; in rcu_implicit_dynticks_qs()
775 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { in rcu_implicit_dynticks_qs()
776 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_implicit_dynticks_qs()
777 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs()
799 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) { in rcu_implicit_dynticks_qs()
809 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)], in rcu_implicit_dynticks_qs()
810 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, in rcu_implicit_dynticks_qs()
811 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); in rcu_implicit_dynticks_qs()
827 if (!READ_ONCE(rdp->rcu_need_heavy_qs) && in rcu_implicit_dynticks_qs()
831 WRITE_ONCE(rdp->rcu_need_heavy_qs, true); in rcu_implicit_dynticks_qs()
833 smp_store_release(&rdp->rcu_urgent_qs, true); in rcu_implicit_dynticks_qs()
835 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_implicit_dynticks_qs()
846 if (tick_nohz_full_cpu(rdp->cpu) && in rcu_implicit_dynticks_qs()
847 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || in rcu_implicit_dynticks_qs()
849 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_implicit_dynticks_qs()
850 resched_cpu(rdp->cpu); in rcu_implicit_dynticks_qs()
851 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_implicit_dynticks_qs()
863 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { in rcu_implicit_dynticks_qs()
864 resched_cpu(rdp->cpu); in rcu_implicit_dynticks_qs()
865 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_implicit_dynticks_qs()
868 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_implicit_dynticks_qs()
869 (rnp->ffmask & rdp->grpmask)) { in rcu_implicit_dynticks_qs()
870 rdp->rcu_iw_pending = true; in rcu_implicit_dynticks_qs()
871 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_implicit_dynticks_qs()
872 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); in rcu_implicit_dynticks_qs()
875 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) { in rcu_implicit_dynticks_qs()
876 int cpu = rdp->cpu; in rcu_implicit_dynticks_qs()
882 rsrp = &rdp->snap_record; in rcu_implicit_dynticks_qs()
886 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu); in rcu_implicit_dynticks_qs()
887 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu); in rcu_implicit_dynticks_qs()
888 rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu); in rcu_implicit_dynticks_qs()
890 rsrp->gp_seq = rdp->gp_seq; in rcu_implicit_dynticks_qs()
898 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp() argument
922 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, in rcu_start_this_gp() argument
938 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); in rcu_start_this_gp()
946 trace_rcu_this_gp(rnp, rdp, gp_seq_req, in rcu_start_this_gp()
958 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, in rcu_start_this_gp()
970 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); in rcu_start_this_gp()
973 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); in rcu_start_this_gp()
977 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); in rcu_start_this_gp()
986 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1000 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_future_gp_cleanup() local
1005 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1049 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_accelerate_cbs() argument
1054 rcu_lockdep_assert_cblist_protected(rdp); in rcu_accelerate_cbs()
1058 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_accelerate_cbs()
1061 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc")); in rcu_accelerate_cbs()
1074 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) in rcu_accelerate_cbs()
1075 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); in rcu_accelerate_cbs()
1078 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) in rcu_accelerate_cbs()
1083 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc")); in rcu_accelerate_cbs()
1096 struct rcu_data *rdp) in rcu_accelerate_cbs_unlocked() argument
1101 rcu_lockdep_assert_cblist_protected(rdp); in rcu_accelerate_cbs_unlocked()
1103 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { in rcu_accelerate_cbs_unlocked()
1105 (void)rcu_segcblist_accelerate(&rdp->cblist, c); in rcu_accelerate_cbs_unlocked()
1109 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_accelerate_cbs_unlocked()
1125 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_advance_cbs() argument
1127 rcu_lockdep_assert_cblist_protected(rdp); in rcu_advance_cbs()
1131 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_advance_cbs()
1138 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1141 return rcu_accelerate_cbs(rnp, rdp); in rcu_advance_cbs()
1149 struct rcu_data *rdp) in rcu_advance_cbs_nowake() argument
1151 rcu_lockdep_assert_cblist_protected(rdp); in rcu_advance_cbs_nowake()
1156 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); in rcu_advance_cbs_nowake()
1179 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) in __note_gp_changes() argument
1183 const bool offloaded = rcu_rdp_is_offloaded(rdp); in __note_gp_changes()
1187 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1191 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1192 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1194 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ in __note_gp_changes()
1195 rdp->core_needs_qs = false; in __note_gp_changes()
1196 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); in __note_gp_changes()
1199 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ in __note_gp_changes()
1200 if (rdp->core_needs_qs) in __note_gp_changes()
1201 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1205 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1206 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1213 need_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1214 rdp->cpu_no_qs.b.norm = need_qs; in __note_gp_changes()
1215 rdp->core_needs_qs = need_qs; in __note_gp_changes()
1216 zero_cpu_stall_ticks(rdp); in __note_gp_changes()
1218 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1219 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1220 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in __note_gp_changes()
1221 if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap)) in __note_gp_changes()
1222 WRITE_ONCE(rdp->last_sched_clock, jiffies); in __note_gp_changes()
1223 WRITE_ONCE(rdp->gpwrap, false); in __note_gp_changes()
1224 rcu_gpnum_ovf(rnp, rdp); in __note_gp_changes()
1228 static void note_gp_changes(struct rcu_data *rdp) in note_gp_changes() argument
1235 rnp = rdp->mynode; in note_gp_changes()
1236 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1237 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ in note_gp_changes()
1242 needwake = __note_gp_changes(rnp, rdp); in note_gp_changes()
1395 struct rcu_data *rdp; in rcu_gp_init() local
1502 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_init()
1506 if (rnp == rdp->mynode) in rcu_gp_init()
1507 (void)__note_gp_changes(rnp, rdp); in rcu_gp_init()
1675 struct rcu_data *rdp; in rcu_gp_cleanup() local
1716 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
1717 if (rnp == rdp->mynode) in rcu_gp_cleanup()
1718 needgp = __note_gp_changes(rnp, rdp) || needgp; in rcu_gp_cleanup()
1724 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_gp_cleanup()
1725 check_cb_ovld_locked(rdp, rnp); in rcu_gp_cleanup()
1743 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
1745 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
1750 offloaded = rcu_rdp_is_offloaded(rdp); in rcu_gp_cleanup()
1751 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { in rcu_gp_cleanup()
1959 rcu_report_qs_rdp(struct rcu_data *rdp) in rcu_report_qs_rdp() argument
1966 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); in rcu_report_qs_rdp()
1967 rnp = rdp->mynode; in rcu_report_qs_rdp()
1969 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
1970 rdp->gpwrap) { in rcu_report_qs_rdp()
1978 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ in rcu_report_qs_rdp()
1982 mask = rdp->grpmask; in rcu_report_qs_rdp()
1983 rdp->core_needs_qs = false; in rcu_report_qs_rdp()
1993 if (!rcu_rdp_is_offloaded(rdp)) { in rcu_report_qs_rdp()
1999 WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp)); in rcu_report_qs_rdp()
2000 } else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) { in rcu_report_qs_rdp()
2008 rcu_disable_urgency_upon_qs(rdp); in rcu_report_qs_rdp()
2013 rcu_nocb_lock_irqsave(rdp, flags); in rcu_report_qs_rdp()
2014 rcu_accelerate_cbs_unlocked(rnp, rdp); in rcu_report_qs_rdp()
2015 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_report_qs_rdp()
2027 rcu_check_quiescent_state(struct rcu_data *rdp) in rcu_check_quiescent_state() argument
2030 note_gp_changes(rdp); in rcu_check_quiescent_state()
2036 if (!rdp->core_needs_qs) in rcu_check_quiescent_state()
2043 if (rdp->cpu_no_qs.b.norm) in rcu_check_quiescent_state()
2050 rcu_report_qs_rdp(rdp); in rcu_check_quiescent_state()
2069 static void rcu_do_batch(struct rcu_data *rdp) in rcu_do_batch() argument
2084 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { in rcu_do_batch()
2086 rcu_segcblist_n_cbs(&rdp->cblist), 0); in rcu_do_batch()
2088 !rcu_segcblist_empty(&rdp->cblist), in rcu_do_batch()
2090 rcu_is_callbacks_kthread(rdp)); in rcu_do_batch()
2099 rcu_nocb_lock_irqsave(rdp, flags); in rcu_do_batch()
2101 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL); in rcu_do_batch()
2104 bl = max(rdp->blimit, pending >> div); in rcu_do_batch()
2105 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) && in rcu_do_batch()
2116 rcu_segcblist_n_cbs(&rdp->cblist), bl); in rcu_do_batch()
2117 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2118 if (rcu_rdp_is_offloaded(rdp)) in rcu_do_batch()
2119 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2121 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); in rcu_do_batch()
2122 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_do_batch()
2165 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING && in rcu_do_batch()
2167 rdp->rcu_cpu_has_work = 1; in rcu_do_batch()
2173 rcu_nocb_lock_irqsave(rdp, flags); in rcu_do_batch()
2174 rdp->n_cbs_invoked += count; in rcu_do_batch()
2176 is_idle_task(current), rcu_is_callbacks_kthread(rdp)); in rcu_do_batch()
2179 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2180 rcu_segcblist_add_len(&rdp->cblist, -count); in rcu_do_batch()
2183 count = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2184 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) in rcu_do_batch()
2185 rdp->blimit = blimit; in rcu_do_batch()
2188 if (count == 0 && rdp->qlen_last_fqs_check != 0) { in rcu_do_batch()
2189 rdp->qlen_last_fqs_check = 0; in rcu_do_batch()
2190 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcu_do_batch()
2191 } else if (count < rdp->qlen_last_fqs_check - qhimark) in rcu_do_batch()
2192 rdp->qlen_last_fqs_check = count; in rcu_do_batch()
2198 empty = rcu_segcblist_empty(&rdp->cblist); in rcu_do_batch()
2202 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0); in rcu_do_batch()
2203 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0); in rcu_do_batch()
2205 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_do_batch()
2256 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) in force_qs_rnp() argument
2261 struct rcu_data *rdp; in force_qs_rnp() local
2286 rdp = per_cpu_ptr(&rcu_data, cpu); in force_qs_rnp()
2287 if (f(rdp)) { in force_qs_rnp()
2288 mask |= rdp->grpmask; in force_qs_rnp()
2289 rcu_disable_urgency_upon_qs(rdp); in force_qs_rnp()
2352 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); in rcu_core() local
2353 struct rcu_node *rnp = rdp->mynode; in rcu_core()
2371 const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist); in rcu_core()
2376 WARN_ON_ONCE(!rdp->beenonline); in rcu_core()
2387 rcu_check_quiescent_state(rdp); in rcu_core()
2391 rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) { in rcu_core()
2392 rcu_nocb_lock_irqsave(rdp, flags); in rcu_core()
2393 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_core()
2394 rcu_accelerate_cbs_unlocked(rnp, rdp); in rcu_core()
2395 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_core()
2398 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); in rcu_core()
2401 if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) && in rcu_core()
2403 rcu_do_batch(rdp); in rcu_core()
2405 if (rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_core()
2410 do_nocb_deferred_wakeup(rdp); in rcu_core()
2415 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); in rcu_core()
2535 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, in __call_rcu_core() argument
2556 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > in __call_rcu_core()
2557 rdp->qlen_last_fqs_check + qhimark)) { in __call_rcu_core()
2560 note_gp_changes(rdp); in __call_rcu_core()
2564 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); in __call_rcu_core()
2567 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; in __call_rcu_core()
2568 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap && in __call_rcu_core()
2569 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) in __call_rcu_core()
2571 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in __call_rcu_core()
2572 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in __call_rcu_core()
2590 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) in check_cb_ovld_locked() argument
2595 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) in check_cb_ovld_locked()
2596 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); in check_cb_ovld_locked()
2598 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); in check_cb_ovld_locked()
2613 static void check_cb_ovld(struct rcu_data *rdp) in check_cb_ovld() argument
2615 struct rcu_node *const rnp = rdp->mynode; in check_cb_ovld()
2618 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == in check_cb_ovld()
2619 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) in check_cb_ovld()
2622 check_cb_ovld_locked(rdp, rnp); in check_cb_ovld()
2632 struct rcu_data *rdp; in __call_rcu_common() local
2655 rdp = this_cpu_ptr(&rcu_data); in __call_rcu_common()
2659 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { in __call_rcu_common()
2665 if (rcu_segcblist_empty(&rdp->cblist)) in __call_rcu_common()
2666 rcu_segcblist_init(&rdp->cblist); in __call_rcu_common()
2669 check_cb_ovld(rdp); in __call_rcu_common()
2670 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) in __call_rcu_common()
2673 rcu_segcblist_enqueue(&rdp->cblist, head); in __call_rcu_common()
2677 rcu_segcblist_n_cbs(&rdp->cblist)); in __call_rcu_common()
2680 rcu_segcblist_n_cbs(&rdp->cblist)); in __call_rcu_common()
2682 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); in __call_rcu_common()
2685 if (unlikely(rcu_rdp_is_offloaded(rdp))) { in __call_rcu_common()
2686 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ in __call_rcu_common()
2688 __call_rcu_core(rdp, head, flags); in __call_rcu_common()
3638 struct rcu_data *rdp; in start_poll_synchronize_rcu_common() local
3643 rdp = this_cpu_ptr(&rcu_data); in start_poll_synchronize_rcu_common()
3644 rnp = rdp->mynode; in start_poll_synchronize_rcu_common()
3652 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq)); in start_poll_synchronize_rcu_common()
3856 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_pending() local
3857 struct rcu_node *rnp = rdp->mynode; in rcu_pending()
3862 check_cpu_stall(rdp); in rcu_pending()
3865 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE)) in rcu_pending()
3874 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) in rcu_pending()
3878 if (!rcu_rdp_is_offloaded(rdp) && in rcu_pending()
3879 rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_pending()
3883 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && in rcu_pending()
3884 !rcu_rdp_is_offloaded(rdp) && in rcu_pending()
3885 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_pending()
3889 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in rcu_pending()
3890 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ in rcu_pending()
3932 static void rcu_barrier_entrain(struct rcu_data *rdp) in rcu_barrier_entrain() argument
3935 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap); in rcu_barrier_entrain()
3943 rdp->barrier_head.func = rcu_barrier_callback; in rcu_barrier_entrain()
3944 debug_rcu_head_queue(&rdp->barrier_head); in rcu_barrier_entrain()
3945 rcu_nocb_lock(rdp); in rcu_barrier_entrain()
3951 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_barrier_entrain()
3952 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false)); in rcu_barrier_entrain()
3953 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_barrier_entrain()
3954 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { in rcu_barrier_entrain()
3957 debug_rcu_head_unqueue(&rdp->barrier_head); in rcu_barrier_entrain()
3960 rcu_nocb_unlock(rdp); in rcu_barrier_entrain()
3962 wake_nocb_gp(rdp, false); in rcu_barrier_entrain()
3963 smp_store_release(&rdp->barrier_seq_snap, gseq); in rcu_barrier_entrain()
3972 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier_handler() local
3975 WARN_ON_ONCE(cpu != rdp->cpu); in rcu_barrier_handler()
3978 rcu_barrier_entrain(rdp); in rcu_barrier_handler()
3995 struct rcu_data *rdp; in rcu_barrier() local
4034 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
4036 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq) in rcu_barrier()
4039 if (!rcu_segcblist_n_cbs(&rdp->cblist)) { in rcu_barrier()
4040 WRITE_ONCE(rdp->barrier_seq_snap, gseq); in rcu_barrier()
4045 if (!rcu_rdp_cpu_online(rdp)) { in rcu_barrier()
4046 rcu_barrier_entrain(rdp); in rcu_barrier()
4047 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); in rcu_barrier()
4057 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); in rcu_barrier()
4076 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
4078 WRITE_ONCE(rdp->barrier_seq_snap, gseq); in rcu_barrier()
4102 static bool rcu_rdp_cpu_online(struct rcu_data *rdp) in rcu_rdp_cpu_online() argument
4104 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode)); in rcu_rdp_cpu_online()
4124 struct rcu_data *rdp; in rcu_lockdep_current_cpu_online() local
4130 rdp = this_cpu_ptr(&rcu_data); in rcu_lockdep_current_cpu_online()
4138 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock)) in rcu_lockdep_current_cpu_online()
4161 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_dying_cpu() local
4162 struct rcu_node *rnp = rdp->mynode; in rcutree_dying_cpu()
4167 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask); in rcutree_dying_cpu()
4270 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_boot_init_percpu_data() local
4273 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); in rcu_boot_init_percpu_data()
4274 INIT_WORK(&rdp->strict_work, strict_work_handler); in rcu_boot_init_percpu_data()
4277 rdp->barrier_seq_snap = rcu_state.barrier_sequence; in rcu_boot_init_percpu_data()
4278 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4279 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4280 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4281 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4282 rdp->last_sched_clock = jiffies; in rcu_boot_init_percpu_data()
4283 rdp->cpu = cpu; in rcu_boot_init_percpu_data()
4284 rcu_boot_init_nocb_percpu_data(rdp); in rcu_boot_init_percpu_data()
4301 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_prepare_cpu() local
4306 rdp->qlen_last_fqs_check = 0; in rcutree_prepare_cpu()
4307 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcutree_prepare_cpu()
4308 rdp->blimit = blimit; in rcutree_prepare_cpu()
4316 if (!rcu_segcblist_is_enabled(&rdp->cblist)) in rcutree_prepare_cpu()
4317 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ in rcutree_prepare_cpu()
4324 rnp = rdp->mynode; in rcutree_prepare_cpu()
4326 rdp->gp_seq = READ_ONCE(rnp->gp_seq); in rcutree_prepare_cpu()
4327 rdp->gp_seq_needed = rdp->gp_seq; in rcutree_prepare_cpu()
4328 rdp->cpu_no_qs.b.norm = true; in rcutree_prepare_cpu()
4329 rdp->core_needs_qs = false; in rcutree_prepare_cpu()
4330 rdp->rcu_iw_pending = false; in rcutree_prepare_cpu()
4331 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); in rcutree_prepare_cpu()
4332 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; in rcutree_prepare_cpu()
4333 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); in rcutree_prepare_cpu()
4347 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_affinity_setting() local
4349 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); in rcutree_affinity_setting()
4357 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_beenfullyonline() local
4359 return smp_load_acquire(&rdp->beenonline); in rcu_cpu_beenfullyonline()
4369 struct rcu_data *rdp; in rcutree_online_cpu() local
4372 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_online_cpu()
4373 rnp = rdp->mynode; in rcutree_online_cpu()
4375 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
4394 struct rcu_data *rdp; in rcutree_offline_cpu() local
4397 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_offline_cpu()
4398 rnp = rdp->mynode; in rcutree_offline_cpu()
4400 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
4425 struct rcu_data *rdp; in rcu_cpu_starting() local
4430 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_starting()
4431 if (rdp->cpu_started) in rcu_cpu_starting()
4433 rdp->cpu_started = true; in rcu_cpu_starting()
4435 rnp = rdp->mynode; in rcu_cpu_starting()
4436 mask = rdp->grpmask; in rcu_cpu_starting()
4448 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcu_cpu_starting()
4449 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcu_cpu_starting()
4450 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); in rcu_cpu_starting()
4458 rcu_disable_urgency_upon_qs(rdp); in rcu_cpu_starting()
4465 smp_store_release(&rdp->beenonline, true); in rcu_cpu_starting()
4481 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_report_dead() local
4482 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_report_dead()
4485 do_nocb_deferred_wakeup(rdp); in rcu_report_dead()
4490 mask = rdp->grpmask; in rcu_report_dead()
4494 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcu_report_dead()
4495 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); in rcu_report_dead()
4498 rcu_disable_urgency_upon_qs(rdp); in rcu_report_dead()
4507 rdp->cpu_started = false; in rcu_report_dead()
4521 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_migrate_callbacks() local
4524 if (rcu_rdp_is_offloaded(rdp) || in rcutree_migrate_callbacks()
4525 rcu_segcblist_empty(&rdp->cblist)) in rcutree_migrate_callbacks()
4529 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp)); in rcutree_migrate_callbacks()
4530 rcu_barrier_entrain(rdp); in rcutree_migrate_callbacks()
4537 needwake = rcu_advance_cbs(my_rnp, rdp) || in rcutree_migrate_callbacks()
4539 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); in rcutree_migrate_callbacks()
4542 rcu_segcblist_disable(&rdp->cblist); in rcutree_migrate_callbacks()
4555 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || in rcutree_migrate_callbacks()
4556 !rcu_segcblist_empty(&rdp->cblist), in rcutree_migrate_callbacks()
4558 cpu, rcu_segcblist_n_cbs(&rdp->cblist), in rcutree_migrate_callbacks()
4559 rcu_segcblist_first_cb(&rdp->cblist)); in rcutree_migrate_callbacks()
4641 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_spawn_gp_kthread() local
4666 rcu_spawn_one_boost_kthread(rdp->mynode); in rcu_spawn_gp_kthread()