Lines Matching refs:rdp

151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
228 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_get_n_cbs_cpu() local
230 if (rcu_segcblist_is_enabled(&rdp->cblist)) in rcu_get_n_cbs_cpu()
231 return rcu_segcblist_n_cbs(&rdp->cblist); in rcu_get_n_cbs_cpu()
249 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_dynticks_eqs_enter() local
258 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); in rcu_dynticks_eqs_enter()
274 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_dynticks_eqs_exit() local
282 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); in rcu_dynticks_eqs_exit()
288 arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks); in rcu_dynticks_eqs_exit()
305 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_dynticks_eqs_online() local
307 if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR) in rcu_dynticks_eqs_online()
309 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); in rcu_dynticks_eqs_online()
319 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_dynticks_curr_cpu_in_eqs() local
321 return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR); in rcu_dynticks_curr_cpu_in_eqs()
328 static int rcu_dynticks_snap(struct rcu_data *rdp) in rcu_dynticks_snap() argument
330 int snap = atomic_add_return(0, &rdp->dynticks); in rcu_dynticks_snap()
349 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) in rcu_dynticks_in_eqs_since() argument
351 return snap != rcu_dynticks_snap(rdp); in rcu_dynticks_in_eqs_since()
360 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_dynticks_zero_in_eqs() local
364 snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK | in rcu_dynticks_zero_in_eqs()
373 return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK); in rcu_dynticks_zero_in_eqs()
388 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); in rcu_eqs_special_set() local
390 new_old = atomic_read(&rdp->dynticks); in rcu_eqs_special_set()
396 new_old = atomic_cmpxchg(&rdp->dynticks, old, new); in rcu_eqs_special_set()
563 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
622 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_eqs_enter() local
624 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); in rcu_eqs_enter()
625 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); in rcu_eqs_enter()
627 rdp->dynticks_nesting == 0); in rcu_eqs_enter()
628 if (rdp->dynticks_nesting != 1) { in rcu_eqs_enter()
630 rdp->dynticks_nesting--; in rcu_eqs_enter()
636 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks)); in rcu_eqs_enter()
638 rdp = this_cpu_ptr(&rcu_data); in rcu_eqs_enter()
639 do_nocb_deferred_wakeup(rdp); in rcu_eqs_enter()
644 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_eqs_enter()
647 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ in rcu_eqs_enter()
704 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_nmi_exit() local
712 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0); in rcu_nmi_exit()
719 if (rdp->dynticks_nmi_nesting != 1) { in rcu_nmi_exit()
720 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, in rcu_nmi_exit()
721 atomic_read(&rdp->dynticks)); in rcu_nmi_exit()
722 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ in rcu_nmi_exit()
723 rdp->dynticks_nmi_nesting - 2); in rcu_nmi_exit()
729 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); in rcu_nmi_exit()
730 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ in rcu_nmi_exit()
736 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_nmi_exit()
837 struct rcu_data *rdp; in rcu_eqs_exit() local
841 rdp = this_cpu_ptr(&rcu_data); in rcu_eqs_exit()
842 oldval = rdp->dynticks_nesting; in rcu_eqs_exit()
846 rdp->dynticks_nesting++; in rcu_eqs_exit()
856 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_eqs_exit()
859 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks)); in rcu_eqs_exit()
861 WRITE_ONCE(rdp->dynticks_nesting, 1); in rcu_eqs_exit()
862 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); in rcu_eqs_exit()
863 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); in rcu_eqs_exit()
929 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in __rcu_irq_enter_check_tick() local
938 if (!tick_nohz_full_cpu(rdp->cpu) || in __rcu_irq_enter_check_tick()
939 !READ_ONCE(rdp->rcu_urgent_qs) || in __rcu_irq_enter_check_tick()
940 READ_ONCE(rdp->rcu_forced_tick)) { in __rcu_irq_enter_check_tick()
952 raw_spin_lock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
953 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) { in __rcu_irq_enter_check_tick()
956 WRITE_ONCE(rdp->rcu_forced_tick, true); in __rcu_irq_enter_check_tick()
957 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in __rcu_irq_enter_check_tick()
959 raw_spin_unlock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
978 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_nmi_enter() local
981 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0); in rcu_nmi_enter()
1008 instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_nmi_enter()
1010 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_nmi_enter()
1022 rdp->dynticks_nmi_nesting, in rcu_nmi_enter()
1023 rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks)); in rcu_nmi_enter()
1025 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ in rcu_nmi_enter()
1026 rdp->dynticks_nmi_nesting + incby); in rcu_nmi_enter()
1078 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) in rcu_disable_urgency_upon_qs() argument
1080 raw_lockdep_assert_held_rcu_node(rdp->mynode); in rcu_disable_urgency_upon_qs()
1081 WRITE_ONCE(rdp->rcu_urgent_qs, false); in rcu_disable_urgency_upon_qs()
1082 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); in rcu_disable_urgency_upon_qs()
1083 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { in rcu_disable_urgency_upon_qs()
1084 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in rcu_disable_urgency_upon_qs()
1085 WRITE_ONCE(rdp->rcu_forced_tick, false); in rcu_disable_urgency_upon_qs()
1143 struct rcu_data *rdp; in rcu_lockdep_current_cpu_online() local
1150 rdp = this_cpu_ptr(&rcu_data); in rcu_lockdep_current_cpu_online()
1151 rnp = rdp->mynode; in rcu_lockdep_current_cpu_online()
1152 if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) in rcu_lockdep_current_cpu_online()
1168 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument
1171 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, in rcu_gpnum_ovf()
1173 WRITE_ONCE(rdp->gpwrap, true); in rcu_gpnum_ovf()
1174 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
1175 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
1183 static int dyntick_save_progress_counter(struct rcu_data *rdp) in dyntick_save_progress_counter() argument
1185 rdp->dynticks_snap = rcu_dynticks_snap(rdp); in dyntick_save_progress_counter()
1186 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { in dyntick_save_progress_counter()
1187 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in dyntick_save_progress_counter()
1188 rcu_gpnum_ovf(rdp->mynode, rdp); in dyntick_save_progress_counter()
1200 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) in rcu_implicit_dynticks_qs() argument
1205 struct rcu_node *rnp = rdp->mynode; in rcu_implicit_dynticks_qs()
1215 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { in rcu_implicit_dynticks_qs()
1216 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_implicit_dynticks_qs()
1217 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs()
1239 if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) { in rcu_implicit_dynticks_qs()
1249 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); in rcu_implicit_dynticks_qs()
1251 __func__, rdp->cpu, ".o"[onl], in rcu_implicit_dynticks_qs()
1252 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, in rcu_implicit_dynticks_qs()
1253 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); in rcu_implicit_dynticks_qs()
1269 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); in rcu_implicit_dynticks_qs()
1270 rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu); in rcu_implicit_dynticks_qs()
1290 if (tick_nohz_full_cpu(rdp->cpu) && in rcu_implicit_dynticks_qs()
1291 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || in rcu_implicit_dynticks_qs()
1294 resched_cpu(rdp->cpu); in rcu_implicit_dynticks_qs()
1295 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_implicit_dynticks_qs()
1307 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { in rcu_implicit_dynticks_qs()
1308 resched_cpu(rdp->cpu); in rcu_implicit_dynticks_qs()
1309 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_implicit_dynticks_qs()
1312 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_implicit_dynticks_qs()
1313 (rnp->ffmask & rdp->grpmask)) { in rcu_implicit_dynticks_qs()
1314 init_irq_work(&rdp->rcu_iw, rcu_iw_handler); in rcu_implicit_dynticks_qs()
1315 atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ); in rcu_implicit_dynticks_qs()
1316 rdp->rcu_iw_pending = true; in rcu_implicit_dynticks_qs()
1317 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_implicit_dynticks_qs()
1318 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); in rcu_implicit_dynticks_qs()
1326 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp() argument
1350 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, in rcu_start_this_gp() argument
1366 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); in rcu_start_this_gp()
1374 trace_rcu_this_gp(rnp, rdp, gp_seq_req, in rcu_start_this_gp()
1386 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, in rcu_start_this_gp()
1398 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); in rcu_start_this_gp()
1401 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); in rcu_start_this_gp()
1405 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); in rcu_start_this_gp()
1414 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1428 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_future_gp_cleanup() local
1433 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1477 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_accelerate_cbs() argument
1482 rcu_lockdep_assert_cblist_protected(rdp); in rcu_accelerate_cbs()
1486 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_accelerate_cbs()
1500 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) in rcu_accelerate_cbs()
1501 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); in rcu_accelerate_cbs()
1504 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) in rcu_accelerate_cbs()
1520 struct rcu_data *rdp) in rcu_accelerate_cbs_unlocked() argument
1525 rcu_lockdep_assert_cblist_protected(rdp); in rcu_accelerate_cbs_unlocked()
1527 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { in rcu_accelerate_cbs_unlocked()
1529 (void)rcu_segcblist_accelerate(&rdp->cblist, c); in rcu_accelerate_cbs_unlocked()
1533 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_accelerate_cbs_unlocked()
1549 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_advance_cbs() argument
1551 rcu_lockdep_assert_cblist_protected(rdp); in rcu_advance_cbs()
1555 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_advance_cbs()
1562 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1565 return rcu_accelerate_cbs(rnp, rdp); in rcu_advance_cbs()
1573 struct rcu_data *rdp) in rcu_advance_cbs_nowake() argument
1575 rcu_lockdep_assert_cblist_protected(rdp); in rcu_advance_cbs_nowake()
1579 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); in rcu_advance_cbs_nowake()
1602 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) in __note_gp_changes() argument
1607 rcu_segcblist_is_offloaded(&rdp->cblist); in __note_gp_changes()
1611 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1615 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1616 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1618 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ in __note_gp_changes()
1619 rdp->core_needs_qs = false; in __note_gp_changes()
1620 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); in __note_gp_changes()
1623 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ in __note_gp_changes()
1624 if (rdp->core_needs_qs) in __note_gp_changes()
1625 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1629 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1630 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1637 need_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1638 rdp->cpu_no_qs.b.norm = need_qs; in __note_gp_changes()
1639 rdp->core_needs_qs = need_qs; in __note_gp_changes()
1640 zero_cpu_stall_ticks(rdp); in __note_gp_changes()
1642 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1643 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1644 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in __note_gp_changes()
1645 WRITE_ONCE(rdp->gpwrap, false); in __note_gp_changes()
1646 rcu_gpnum_ovf(rnp, rdp); in __note_gp_changes()
1650 static void note_gp_changes(struct rcu_data *rdp) in note_gp_changes() argument
1657 rnp = rdp->mynode; in note_gp_changes()
1658 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1659 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ in note_gp_changes()
1664 needwake = __note_gp_changes(rnp, rdp); in note_gp_changes()
1721 struct rcu_data *rdp; in rcu_gp_init() local
1823 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_init()
1827 if (rnp == rdp->mynode) in rcu_gp_init()
1828 (void)__note_gp_changes(rnp, rdp); in rcu_gp_init()
1979 struct rcu_data *rdp; in rcu_gp_cleanup() local
2017 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2018 if (rnp == rdp->mynode) in rcu_gp_cleanup()
2019 needgp = __note_gp_changes(rnp, rdp) || needgp; in rcu_gp_cleanup()
2025 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_gp_cleanup()
2026 check_cb_ovld_locked(rdp, rnp); in rcu_gp_cleanup()
2044 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2046 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
2052 rcu_segcblist_is_offloaded(&rdp->cblist); in rcu_gp_cleanup()
2053 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { in rcu_gp_cleanup()
2246 rcu_report_qs_rdp(struct rcu_data *rdp) in rcu_report_qs_rdp() argument
2252 rcu_segcblist_is_offloaded(&rdp->cblist); in rcu_report_qs_rdp()
2255 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); in rcu_report_qs_rdp()
2256 rnp = rdp->mynode; in rcu_report_qs_rdp()
2258 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
2259 rdp->gpwrap) { in rcu_report_qs_rdp()
2267 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ in rcu_report_qs_rdp()
2271 mask = rdp->grpmask; in rcu_report_qs_rdp()
2272 rdp->core_needs_qs = false; in rcu_report_qs_rdp()
2281 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_report_qs_rdp()
2283 rcu_disable_urgency_upon_qs(rdp); in rcu_report_qs_rdp()
2298 rcu_check_quiescent_state(struct rcu_data *rdp) in rcu_check_quiescent_state() argument
2301 note_gp_changes(rdp); in rcu_check_quiescent_state()
2307 if (!rdp->core_needs_qs) in rcu_check_quiescent_state()
2314 if (rdp->cpu_no_qs.b.norm) in rcu_check_quiescent_state()
2321 rcu_report_qs_rdp(rdp); in rcu_check_quiescent_state()
2331 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcutree_dying_cpu() local
2332 struct rcu_node *rnp = rdp->mynode; in rcutree_dying_cpu()
2337 blkd = !!(rnp->qsmask & rdp->grpmask); in rcutree_dying_cpu()
2396 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_dead_cpu() local
2397 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcutree_dead_cpu()
2416 static void rcu_do_batch(struct rcu_data *rdp) in rcu_do_batch() argument
2421 rcu_segcblist_is_offloaded(&rdp->cblist); in rcu_do_batch()
2428 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { in rcu_do_batch()
2430 rcu_segcblist_n_cbs(&rdp->cblist), 0); in rcu_do_batch()
2432 !rcu_segcblist_empty(&rdp->cblist), in rcu_do_batch()
2444 rcu_nocb_lock(rdp); in rcu_do_batch()
2446 pending = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2449 bl = max(rdp->blimit, pending >> div); in rcu_do_batch()
2457 rcu_segcblist_n_cbs(&rdp->cblist), bl); in rcu_do_batch()
2458 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2460 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2461 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_do_batch()
2506 rcu_nocb_lock(rdp); in rcu_do_batch()
2508 rdp->n_cbs_invoked += count; in rcu_do_batch()
2513 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2515 rcu_segcblist_insert_count(&rdp->cblist, &rcl); in rcu_do_batch()
2518 count = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2519 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) in rcu_do_batch()
2520 rdp->blimit = blimit; in rcu_do_batch()
2523 if (count == 0 && rdp->qlen_last_fqs_check != 0) { in rcu_do_batch()
2524 rdp->qlen_last_fqs_check = 0; in rcu_do_batch()
2525 rdp->n_force_qs_snap = rcu_state.n_force_qs; in rcu_do_batch()
2526 } else if (count < rdp->qlen_last_fqs_check - qhimark) in rcu_do_batch()
2527 rdp->qlen_last_fqs_check = count; in rcu_do_batch()
2533 WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist)); in rcu_do_batch()
2535 count != 0 && rcu_segcblist_empty(&rdp->cblist)); in rcu_do_batch()
2537 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_do_batch()
2540 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_do_batch()
2580 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) in force_qs_rnp() argument
2585 struct rcu_data *rdp; in force_qs_rnp() local
2610 rdp = per_cpu_ptr(&rcu_data, cpu); in force_qs_rnp()
2611 if (f(rdp)) { in force_qs_rnp()
2612 mask |= rdp->grpmask; in force_qs_rnp()
2613 rcu_disable_urgency_upon_qs(rdp); in force_qs_rnp()
2676 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); in rcu_core() local
2677 struct rcu_node *rnp = rdp->mynode; in rcu_core()
2679 rcu_segcblist_is_offloaded(&rdp->cblist); in rcu_core()
2684 WARN_ON_ONCE(!rdp->beenonline); in rcu_core()
2695 rcu_check_quiescent_state(rdp); in rcu_core()
2699 rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) { in rcu_core()
2701 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_core()
2702 rcu_accelerate_cbs_unlocked(rnp, rdp); in rcu_core()
2706 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); in rcu_core()
2709 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) && in rcu_core()
2711 rcu_do_batch(rdp); in rcu_core()
2714 do_nocb_deferred_wakeup(rdp); in rcu_core()
2719 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); in rcu_core()
2837 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, in __call_rcu_core() argument
2858 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > in __call_rcu_core()
2859 rdp->qlen_last_fqs_check + qhimark)) { in __call_rcu_core()
2862 note_gp_changes(rdp); in __call_rcu_core()
2866 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); in __call_rcu_core()
2869 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; in __call_rcu_core()
2870 if (rcu_state.n_force_qs == rdp->n_force_qs_snap && in __call_rcu_core()
2871 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) in __call_rcu_core()
2873 rdp->n_force_qs_snap = rcu_state.n_force_qs; in __call_rcu_core()
2874 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in __call_rcu_core()
2892 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) in check_cb_ovld_locked() argument
2897 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) in check_cb_ovld_locked()
2898 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); in check_cb_ovld_locked()
2900 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); in check_cb_ovld_locked()
2915 static void check_cb_ovld(struct rcu_data *rdp) in check_cb_ovld() argument
2917 struct rcu_node *const rnp = rdp->mynode; in check_cb_ovld()
2920 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == in check_cb_ovld()
2921 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) in check_cb_ovld()
2924 check_cb_ovld_locked(rdp, rnp); in check_cb_ovld()
2933 struct rcu_data *rdp; in __call_rcu() local
2954 rdp = this_cpu_ptr(&rcu_data); in __call_rcu()
2957 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { in __call_rcu()
2963 if (rcu_segcblist_empty(&rdp->cblist)) in __call_rcu()
2964 rcu_segcblist_init(&rdp->cblist); in __call_rcu()
2967 check_cb_ovld(rdp); in __call_rcu()
2968 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags)) in __call_rcu()
2971 rcu_segcblist_enqueue(&rdp->cblist, head); in __call_rcu()
2975 rcu_segcblist_n_cbs(&rdp->cblist)); in __call_rcu()
2978 rcu_segcblist_n_cbs(&rdp->cblist)); in __call_rcu()
2982 unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) { in __call_rcu()
2983 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ in __call_rcu()
2985 __call_rcu_core(rdp, head, flags); in __call_rcu()
3690 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_pending() local
3691 struct rcu_node *rnp = rdp->mynode; in rcu_pending()
3694 check_cpu_stall(rdp); in rcu_pending()
3697 if (rcu_nocb_need_deferred_wakeup(rdp)) in rcu_pending()
3706 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) in rcu_pending()
3710 if (rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_pending()
3714 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && in rcu_pending()
3716 !rcu_segcblist_is_offloaded(&rdp->cblist)) && in rcu_pending()
3717 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_pending()
3721 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in rcu_pending()
3722 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ in rcu_pending()
3767 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier_func() local
3770 rdp->barrier_head.func = rcu_barrier_callback; in rcu_barrier_func()
3771 debug_rcu_head_queue(&rdp->barrier_head); in rcu_barrier_func()
3772 rcu_nocb_lock(rdp); in rcu_barrier_func()
3773 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies)); in rcu_barrier_func()
3774 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { in rcu_barrier_func()
3777 debug_rcu_head_unqueue(&rdp->barrier_head); in rcu_barrier_func()
3781 rcu_nocb_unlock(rdp); in rcu_barrier_func()
3795 struct rcu_data *rdp; in rcu_barrier() local
3833 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
3835 !rcu_segcblist_is_offloaded(&rdp->cblist)) in rcu_barrier()
3837 if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) { in rcu_barrier()
3841 } else if (rcu_segcblist_n_cbs(&rdp->cblist) && in rcu_barrier()
3911 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_boot_init_percpu_data() local
3914 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); in rcu_boot_init_percpu_data()
3915 INIT_WORK(&rdp->strict_work, strict_work_handler); in rcu_boot_init_percpu_data()
3916 WARN_ON_ONCE(rdp->dynticks_nesting != 1); in rcu_boot_init_percpu_data()
3917 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp))); in rcu_boot_init_percpu_data()
3918 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
3919 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
3920 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
3921 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
3922 rdp->cpu = cpu; in rcu_boot_init_percpu_data()
3923 rcu_boot_init_nocb_percpu_data(rdp); in rcu_boot_init_percpu_data()
3939 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_prepare_cpu() local
3944 rdp->qlen_last_fqs_check = 0; in rcutree_prepare_cpu()
3945 rdp->n_force_qs_snap = rcu_state.n_force_qs; in rcutree_prepare_cpu()
3946 rdp->blimit = blimit; in rcutree_prepare_cpu()
3947 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ in rcutree_prepare_cpu()
3948 !rcu_segcblist_is_offloaded(&rdp->cblist)) in rcutree_prepare_cpu()
3949 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ in rcutree_prepare_cpu()
3950 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ in rcutree_prepare_cpu()
3959 rnp = rdp->mynode; in rcutree_prepare_cpu()
3961 rdp->beenonline = true; /* We have now been online. */ in rcutree_prepare_cpu()
3962 rdp->gp_seq = READ_ONCE(rnp->gp_seq); in rcutree_prepare_cpu()
3963 rdp->gp_seq_needed = rdp->gp_seq; in rcutree_prepare_cpu()
3964 rdp->cpu_no_qs.b.norm = true; in rcutree_prepare_cpu()
3965 rdp->core_needs_qs = false; in rcutree_prepare_cpu()
3966 rdp->rcu_iw_pending = false; in rcutree_prepare_cpu()
3967 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; in rcutree_prepare_cpu()
3968 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); in rcutree_prepare_cpu()
3981 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_affinity_setting() local
3983 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); in rcutree_affinity_setting()
3993 struct rcu_data *rdp; in rcutree_online_cpu() local
3996 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_online_cpu()
3997 rnp = rdp->mynode; in rcutree_online_cpu()
3999 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
4018 struct rcu_data *rdp; in rcutree_offline_cpu() local
4021 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_offline_cpu()
4022 rnp = rdp->mynode; in rcutree_offline_cpu()
4024 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
4049 struct rcu_data *rdp; in rcu_cpu_starting() local
4053 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_starting()
4054 if (rdp->cpu_started) in rcu_cpu_starting()
4056 rdp->cpu_started = true; in rcu_cpu_starting()
4058 rnp = rdp->mynode; in rcu_cpu_starting()
4059 mask = rdp->grpmask; in rcu_cpu_starting()
4067 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcu_cpu_starting()
4068 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcu_cpu_starting()
4069 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); in rcu_cpu_starting()
4071 rcu_disable_urgency_upon_qs(rdp); in rcu_cpu_starting()
4092 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_report_dead() local
4093 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_report_dead()
4102 mask = rdp->grpmask; in rcu_report_dead()
4105 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcu_report_dead()
4106 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); in rcu_report_dead()
4116 rdp->cpu_started = false; in rcu_report_dead()
4130 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_migrate_callbacks() local
4133 if (rcu_segcblist_is_offloaded(&rdp->cblist) || in rcutree_migrate_callbacks()
4134 rcu_segcblist_empty(&rdp->cblist)) in rcutree_migrate_callbacks()
4144 needwake = rcu_advance_cbs(my_rnp, rdp) || in rcutree_migrate_callbacks()
4146 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); in rcutree_migrate_callbacks()
4148 rcu_segcblist_disable(&rdp->cblist); in rcutree_migrate_callbacks()
4161 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || in rcutree_migrate_callbacks()
4162 !rcu_segcblist_empty(&rdp->cblist), in rcutree_migrate_callbacks()
4164 cpu, rcu_segcblist_n_cbs(&rdp->cblist), in rcutree_migrate_callbacks()
4165 rcu_segcblist_first_cb(&rdp->cblist)); in rcutree_migrate_callbacks()