Lines Matching refs:rdp

151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
154 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
240 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_get_n_cbs_cpu() local
242 if (rcu_segcblist_is_enabled(&rdp->cblist)) in rcu_get_n_cbs_cpu()
243 return rcu_segcblist_n_cbs(&rdp->cblist); in rcu_get_n_cbs_cpu()
316 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_dynticks_eqs_online() local
318 if (atomic_read(&rdp->dynticks) & 0x1) in rcu_dynticks_eqs_online()
337 static int rcu_dynticks_snap(struct rcu_data *rdp) in rcu_dynticks_snap() argument
340 return atomic_read_acquire(&rdp->dynticks); in rcu_dynticks_snap()
355 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_is_idle_cpu() local
357 return rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)); in rcu_is_idle_cpu()
365 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) in rcu_dynticks_in_eqs_since() argument
367 return snap != rcu_dynticks_snap(rdp); in rcu_dynticks_in_eqs_since()
376 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_dynticks_zero_in_eqs() local
380 snap = atomic_read(&rdp->dynticks) & ~0x1; in rcu_dynticks_zero_in_eqs()
388 return snap == atomic_read(&rdp->dynticks); in rcu_dynticks_zero_in_eqs()
552 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
611 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_eqs_enter() local
613 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); in rcu_eqs_enter()
614 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); in rcu_eqs_enter()
616 rdp->dynticks_nesting == 0); in rcu_eqs_enter()
617 if (rdp->dynticks_nesting != 1) { in rcu_eqs_enter()
619 rdp->dynticks_nesting--; in rcu_eqs_enter()
625 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks)); in rcu_eqs_enter()
631 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_eqs_enter()
634 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ in rcu_eqs_enter()
685 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_irq_work_resched() local
694 if (do_nocb_deferred_wakeup(rdp) && need_resched()) { in rcu_irq_work_resched()
743 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_nmi_exit() local
751 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0); in rcu_nmi_exit()
758 if (rdp->dynticks_nmi_nesting != 1) { in rcu_nmi_exit()
759 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, in rcu_nmi_exit()
760 atomic_read(&rdp->dynticks)); in rcu_nmi_exit()
761 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ in rcu_nmi_exit()
762 rdp->dynticks_nmi_nesting - 2); in rcu_nmi_exit()
768 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); in rcu_nmi_exit()
769 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ in rcu_nmi_exit()
775 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_nmi_exit()
854 struct rcu_data *rdp; in rcu_eqs_exit() local
858 rdp = this_cpu_ptr(&rcu_data); in rcu_eqs_exit()
859 oldval = rdp->dynticks_nesting; in rcu_eqs_exit()
863 rdp->dynticks_nesting++; in rcu_eqs_exit()
873 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_eqs_exit()
876 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks)); in rcu_eqs_exit()
878 WRITE_ONCE(rdp->dynticks_nesting, 1); in rcu_eqs_exit()
879 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); in rcu_eqs_exit()
880 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); in rcu_eqs_exit()
946 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in __rcu_irq_enter_check_tick() local
955 if (!tick_nohz_full_cpu(rdp->cpu) || in __rcu_irq_enter_check_tick()
956 !READ_ONCE(rdp->rcu_urgent_qs) || in __rcu_irq_enter_check_tick()
957 READ_ONCE(rdp->rcu_forced_tick)) { in __rcu_irq_enter_check_tick()
969 raw_spin_lock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
970 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) { in __rcu_irq_enter_check_tick()
973 WRITE_ONCE(rdp->rcu_forced_tick, true); in __rcu_irq_enter_check_tick()
974 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in __rcu_irq_enter_check_tick()
976 raw_spin_unlock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
995 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_nmi_enter() local
998 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0); in rcu_nmi_enter()
1025 instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_nmi_enter()
1027 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_nmi_enter()
1038 rdp->dynticks_nmi_nesting, in rcu_nmi_enter()
1039 rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks)); in rcu_nmi_enter()
1041 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ in rcu_nmi_enter()
1042 rdp->dynticks_nmi_nesting + incby); in rcu_nmi_enter()
1094 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) in rcu_disable_urgency_upon_qs() argument
1096 raw_lockdep_assert_held_rcu_node(rdp->mynode); in rcu_disable_urgency_upon_qs()
1097 WRITE_ONCE(rdp->rcu_urgent_qs, false); in rcu_disable_urgency_upon_qs()
1098 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); in rcu_disable_urgency_upon_qs()
1099 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { in rcu_disable_urgency_upon_qs()
1100 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in rcu_disable_urgency_upon_qs()
1101 WRITE_ONCE(rdp->rcu_forced_tick, false); in rcu_disable_urgency_upon_qs()
1162 struct rcu_data *rdp; in rcu_lockdep_current_cpu_online() local
1169 rdp = this_cpu_ptr(&rcu_data); in rcu_lockdep_current_cpu_online()
1170 rnp = rdp->mynode; in rcu_lockdep_current_cpu_online()
1171 if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1) in rcu_lockdep_current_cpu_online()
1187 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument
1190 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, in rcu_gpnum_ovf()
1192 WRITE_ONCE(rdp->gpwrap, true); in rcu_gpnum_ovf()
1193 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
1194 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
1202 static int dyntick_save_progress_counter(struct rcu_data *rdp) in dyntick_save_progress_counter() argument
1204 rdp->dynticks_snap = rcu_dynticks_snap(rdp); in dyntick_save_progress_counter()
1205 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { in dyntick_save_progress_counter()
1206 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in dyntick_save_progress_counter()
1207 rcu_gpnum_ovf(rdp->mynode, rdp); in dyntick_save_progress_counter()
1219 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) in rcu_implicit_dynticks_qs() argument
1224 struct rcu_node *rnp = rdp->mynode; in rcu_implicit_dynticks_qs()
1234 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { in rcu_implicit_dynticks_qs()
1235 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_implicit_dynticks_qs()
1236 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs()
1258 if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) { in rcu_implicit_dynticks_qs()
1268 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); in rcu_implicit_dynticks_qs()
1270 __func__, rdp->cpu, ".o"[onl], in rcu_implicit_dynticks_qs()
1271 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, in rcu_implicit_dynticks_qs()
1272 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); in rcu_implicit_dynticks_qs()
1288 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); in rcu_implicit_dynticks_qs()
1289 rnhqp = per_cpu_ptr(&rcu_data.rcu_need_heavy_qs, rdp->cpu); in rcu_implicit_dynticks_qs()
1309 if (tick_nohz_full_cpu(rdp->cpu) && in rcu_implicit_dynticks_qs()
1310 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || in rcu_implicit_dynticks_qs()
1313 resched_cpu(rdp->cpu); in rcu_implicit_dynticks_qs()
1314 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_implicit_dynticks_qs()
1326 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { in rcu_implicit_dynticks_qs()
1327 resched_cpu(rdp->cpu); in rcu_implicit_dynticks_qs()
1328 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_implicit_dynticks_qs()
1331 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_implicit_dynticks_qs()
1332 (rnp->ffmask & rdp->grpmask)) { in rcu_implicit_dynticks_qs()
1333 rdp->rcu_iw_pending = true; in rcu_implicit_dynticks_qs()
1334 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_implicit_dynticks_qs()
1335 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); in rcu_implicit_dynticks_qs()
1343 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp() argument
1367 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, in rcu_start_this_gp() argument
1383 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); in rcu_start_this_gp()
1391 trace_rcu_this_gp(rnp, rdp, gp_seq_req, in rcu_start_this_gp()
1403 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, in rcu_start_this_gp()
1415 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); in rcu_start_this_gp()
1418 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); in rcu_start_this_gp()
1422 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); in rcu_start_this_gp()
1431 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1445 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_future_gp_cleanup() local
1450 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1494 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_accelerate_cbs() argument
1499 rcu_lockdep_assert_cblist_protected(rdp); in rcu_accelerate_cbs()
1503 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_accelerate_cbs()
1506 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc")); in rcu_accelerate_cbs()
1519 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) in rcu_accelerate_cbs()
1520 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); in rcu_accelerate_cbs()
1523 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) in rcu_accelerate_cbs()
1528 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc")); in rcu_accelerate_cbs()
1541 struct rcu_data *rdp) in rcu_accelerate_cbs_unlocked() argument
1546 rcu_lockdep_assert_cblist_protected(rdp); in rcu_accelerate_cbs_unlocked()
1548 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { in rcu_accelerate_cbs_unlocked()
1550 (void)rcu_segcblist_accelerate(&rdp->cblist, c); in rcu_accelerate_cbs_unlocked()
1554 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_accelerate_cbs_unlocked()
1570 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_advance_cbs() argument
1572 rcu_lockdep_assert_cblist_protected(rdp); in rcu_advance_cbs()
1576 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_advance_cbs()
1583 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1586 return rcu_accelerate_cbs(rnp, rdp); in rcu_advance_cbs()
1594 struct rcu_data *rdp) in rcu_advance_cbs_nowake() argument
1596 rcu_lockdep_assert_cblist_protected(rdp); in rcu_advance_cbs_nowake()
1600 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); in rcu_advance_cbs_nowake()
1623 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) in __note_gp_changes() argument
1627 const bool offloaded = rcu_rdp_is_offloaded(rdp); in __note_gp_changes()
1631 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1635 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1636 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1638 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ in __note_gp_changes()
1639 rdp->core_needs_qs = false; in __note_gp_changes()
1640 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); in __note_gp_changes()
1643 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ in __note_gp_changes()
1644 if (rdp->core_needs_qs) in __note_gp_changes()
1645 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1649 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1650 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1657 need_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1658 rdp->cpu_no_qs.b.norm = need_qs; in __note_gp_changes()
1659 rdp->core_needs_qs = need_qs; in __note_gp_changes()
1660 zero_cpu_stall_ticks(rdp); in __note_gp_changes()
1662 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1663 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1664 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in __note_gp_changes()
1665 WRITE_ONCE(rdp->gpwrap, false); in __note_gp_changes()
1666 rcu_gpnum_ovf(rnp, rdp); in __note_gp_changes()
1670 static void note_gp_changes(struct rcu_data *rdp) in note_gp_changes() argument
1677 rnp = rdp->mynode; in note_gp_changes()
1678 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1679 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ in note_gp_changes()
1684 needwake = __note_gp_changes(rnp, rdp); in note_gp_changes()
1742 struct rcu_data *rdp; in rcu_gp_init() local
1850 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_init()
1854 if (rnp == rdp->mynode) in rcu_gp_init()
1855 (void)__note_gp_changes(rnp, rdp); in rcu_gp_init()
2011 struct rcu_data *rdp; in rcu_gp_cleanup() local
2049 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2050 if (rnp == rdp->mynode) in rcu_gp_cleanup()
2051 needgp = __note_gp_changes(rnp, rdp) || needgp; in rcu_gp_cleanup()
2057 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_gp_cleanup()
2058 check_cb_ovld_locked(rdp, rnp); in rcu_gp_cleanup()
2076 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2078 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
2083 offloaded = rcu_rdp_is_offloaded(rdp); in rcu_gp_cleanup()
2084 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { in rcu_gp_cleanup()
2277 rcu_report_qs_rdp(struct rcu_data *rdp) in rcu_report_qs_rdp() argument
2282 const bool offloaded = rcu_rdp_is_offloaded(rdp); in rcu_report_qs_rdp()
2285 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); in rcu_report_qs_rdp()
2286 rnp = rdp->mynode; in rcu_report_qs_rdp()
2288 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
2289 rdp->gpwrap) { in rcu_report_qs_rdp()
2297 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ in rcu_report_qs_rdp()
2301 mask = rdp->grpmask; in rcu_report_qs_rdp()
2302 rdp->core_needs_qs = false; in rcu_report_qs_rdp()
2311 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_report_qs_rdp()
2313 rcu_disable_urgency_upon_qs(rdp); in rcu_report_qs_rdp()
2328 rcu_check_quiescent_state(struct rcu_data *rdp) in rcu_check_quiescent_state() argument
2331 note_gp_changes(rdp); in rcu_check_quiescent_state()
2337 if (!rdp->core_needs_qs) in rcu_check_quiescent_state()
2344 if (rdp->cpu_no_qs.b.norm) in rcu_check_quiescent_state()
2351 rcu_report_qs_rdp(rdp); in rcu_check_quiescent_state()
2361 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcutree_dying_cpu() local
2362 struct rcu_node *rnp = rdp->mynode; in rcutree_dying_cpu()
2367 blkd = !!(rnp->qsmask & rdp->grpmask); in rcutree_dying_cpu()
2426 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_dead_cpu() local
2427 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcutree_dead_cpu()
2444 static void rcu_do_batch(struct rcu_data *rdp) in rcu_do_batch() argument
2449 const bool offloaded = rcu_rdp_is_offloaded(rdp); in rcu_do_batch()
2456 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { in rcu_do_batch()
2458 rcu_segcblist_n_cbs(&rdp->cblist), 0); in rcu_do_batch()
2460 !rcu_segcblist_empty(&rdp->cblist), in rcu_do_batch()
2472 rcu_nocb_lock(rdp); in rcu_do_batch()
2474 pending = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2477 bl = max(rdp->blimit, pending >> div); in rcu_do_batch()
2485 rcu_segcblist_n_cbs(&rdp->cblist), bl); in rcu_do_batch()
2486 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2488 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2490 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); in rcu_do_batch()
2491 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_do_batch()
2536 rcu_nocb_lock(rdp); in rcu_do_batch()
2537 rdp->n_cbs_invoked += count; in rcu_do_batch()
2542 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2543 rcu_segcblist_add_len(&rdp->cblist, -count); in rcu_do_batch()
2546 count = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2547 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) in rcu_do_batch()
2548 rdp->blimit = blimit; in rcu_do_batch()
2551 if (count == 0 && rdp->qlen_last_fqs_check != 0) { in rcu_do_batch()
2552 rdp->qlen_last_fqs_check = 0; in rcu_do_batch()
2553 rdp->n_force_qs_snap = rcu_state.n_force_qs; in rcu_do_batch()
2554 } else if (count < rdp->qlen_last_fqs_check - qhimark) in rcu_do_batch()
2555 rdp->qlen_last_fqs_check = count; in rcu_do_batch()
2561 empty = rcu_segcblist_empty(&rdp->cblist); in rcu_do_batch()
2565 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0); in rcu_do_batch()
2566 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0); in rcu_do_batch()
2568 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_do_batch()
2571 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_do_batch()
2613 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) in force_qs_rnp() argument
2618 struct rcu_data *rdp; in force_qs_rnp() local
2643 rdp = per_cpu_ptr(&rcu_data, cpu); in force_qs_rnp()
2644 if (f(rdp)) { in force_qs_rnp()
2645 mask |= rdp->grpmask; in force_qs_rnp()
2646 rcu_disable_urgency_upon_qs(rdp); in force_qs_rnp()
2709 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); in rcu_core() local
2710 struct rcu_node *rnp = rdp->mynode; in rcu_core()
2711 const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist); in rcu_core()
2716 WARN_ON_ONCE(!rdp->beenonline); in rcu_core()
2727 rcu_check_quiescent_state(rdp); in rcu_core()
2731 rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) { in rcu_core()
2732 rcu_nocb_lock_irqsave(rdp, flags); in rcu_core()
2733 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_core()
2734 rcu_accelerate_cbs_unlocked(rnp, rdp); in rcu_core()
2735 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_core()
2738 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); in rcu_core()
2741 if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) && in rcu_core()
2743 rcu_do_batch(rdp); in rcu_core()
2746 do_nocb_deferred_wakeup(rdp); in rcu_core()
2751 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); in rcu_core()
2868 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, in __call_rcu_core() argument
2889 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > in __call_rcu_core()
2890 rdp->qlen_last_fqs_check + qhimark)) { in __call_rcu_core()
2893 note_gp_changes(rdp); in __call_rcu_core()
2897 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); in __call_rcu_core()
2900 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; in __call_rcu_core()
2901 if (rcu_state.n_force_qs == rdp->n_force_qs_snap && in __call_rcu_core()
2902 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) in __call_rcu_core()
2904 rdp->n_force_qs_snap = rcu_state.n_force_qs; in __call_rcu_core()
2905 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in __call_rcu_core()
2923 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) in check_cb_ovld_locked() argument
2928 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) in check_cb_ovld_locked()
2929 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); in check_cb_ovld_locked()
2931 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); in check_cb_ovld_locked()
2946 static void check_cb_ovld(struct rcu_data *rdp) in check_cb_ovld() argument
2948 struct rcu_node *const rnp = rdp->mynode; in check_cb_ovld()
2951 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == in check_cb_ovld()
2952 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) in check_cb_ovld()
2955 check_cb_ovld_locked(rdp, rnp); in check_cb_ovld()
2965 struct rcu_data *rdp; in __call_rcu() local
2988 rdp = this_cpu_ptr(&rcu_data); in __call_rcu()
2991 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { in __call_rcu()
2997 if (rcu_segcblist_empty(&rdp->cblist)) in __call_rcu()
2998 rcu_segcblist_init(&rdp->cblist); in __call_rcu()
3001 check_cb_ovld(rdp); in __call_rcu()
3002 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags)) in __call_rcu()
3005 rcu_segcblist_enqueue(&rdp->cblist, head); in __call_rcu()
3009 rcu_segcblist_n_cbs(&rdp->cblist)); in __call_rcu()
3012 rcu_segcblist_n_cbs(&rdp->cblist)); in __call_rcu()
3014 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); in __call_rcu()
3017 if (unlikely(rcu_rdp_is_offloaded(rdp))) { in __call_rcu()
3018 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ in __call_rcu()
3020 __call_rcu_core(rdp, head, flags); in __call_rcu()
3787 struct rcu_data *rdp; in start_poll_synchronize_rcu() local
3792 rdp = this_cpu_ptr(&rcu_data); in start_poll_synchronize_rcu()
3793 rnp = rdp->mynode; in start_poll_synchronize_rcu()
3795 needwake = rcu_start_this_gp(rnp, rdp, gp_seq); in start_poll_synchronize_rcu()
3874 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_pending() local
3875 struct rcu_node *rnp = rdp->mynode; in rcu_pending()
3880 check_cpu_stall(rdp); in rcu_pending()
3883 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE)) in rcu_pending()
3892 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) in rcu_pending()
3896 if (!rcu_rdp_is_offloaded(rdp) && in rcu_pending()
3897 rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_pending()
3901 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && in rcu_pending()
3902 !rcu_rdp_is_offloaded(rdp) && in rcu_pending()
3903 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_pending()
3907 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in rcu_pending()
3908 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ in rcu_pending()
3953 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier_func() local
3956 rdp->barrier_head.func = rcu_barrier_callback; in rcu_barrier_func()
3957 debug_rcu_head_queue(&rdp->barrier_head); in rcu_barrier_func()
3958 rcu_nocb_lock(rdp); in rcu_barrier_func()
3959 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies)); in rcu_barrier_func()
3960 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { in rcu_barrier_func()
3963 debug_rcu_head_unqueue(&rdp->barrier_head); in rcu_barrier_func()
3967 rcu_nocb_unlock(rdp); in rcu_barrier_func()
3981 struct rcu_data *rdp; in rcu_barrier() local
4019 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
4021 !rcu_rdp_is_offloaded(rdp)) in rcu_barrier()
4023 if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) { in rcu_barrier()
4027 } else if (rcu_segcblist_n_cbs(&rdp->cblist) && in rcu_barrier()
4097 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_boot_init_percpu_data() local
4100 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); in rcu_boot_init_percpu_data()
4101 INIT_WORK(&rdp->strict_work, strict_work_handler); in rcu_boot_init_percpu_data()
4102 WARN_ON_ONCE(rdp->dynticks_nesting != 1); in rcu_boot_init_percpu_data()
4103 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp))); in rcu_boot_init_percpu_data()
4104 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4105 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4106 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4107 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4108 rdp->cpu = cpu; in rcu_boot_init_percpu_data()
4109 rcu_boot_init_nocb_percpu_data(rdp); in rcu_boot_init_percpu_data()
4125 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_prepare_cpu() local
4130 rdp->qlen_last_fqs_check = 0; in rcutree_prepare_cpu()
4131 rdp->n_force_qs_snap = rcu_state.n_force_qs; in rcutree_prepare_cpu()
4132 rdp->blimit = blimit; in rcutree_prepare_cpu()
4133 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ in rcutree_prepare_cpu()
4141 if (!rcu_segcblist_is_enabled(&rdp->cblist)) in rcutree_prepare_cpu()
4142 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ in rcutree_prepare_cpu()
4149 rnp = rdp->mynode; in rcutree_prepare_cpu()
4151 rdp->beenonline = true; /* We have now been online. */ in rcutree_prepare_cpu()
4152 rdp->gp_seq = READ_ONCE(rnp->gp_seq); in rcutree_prepare_cpu()
4153 rdp->gp_seq_needed = rdp->gp_seq; in rcutree_prepare_cpu()
4154 rdp->cpu_no_qs.b.norm = true; in rcutree_prepare_cpu()
4155 rdp->core_needs_qs = false; in rcutree_prepare_cpu()
4156 rdp->rcu_iw_pending = false; in rcutree_prepare_cpu()
4157 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); in rcutree_prepare_cpu()
4158 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; in rcutree_prepare_cpu()
4159 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); in rcutree_prepare_cpu()
4173 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_affinity_setting() local
4175 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); in rcutree_affinity_setting()
4185 struct rcu_data *rdp; in rcutree_online_cpu() local
4188 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_online_cpu()
4189 rnp = rdp->mynode; in rcutree_online_cpu()
4191 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
4210 struct rcu_data *rdp; in rcutree_offline_cpu() local
4213 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_offline_cpu()
4214 rnp = rdp->mynode; in rcutree_offline_cpu()
4216 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
4241 struct rcu_data *rdp; in rcu_cpu_starting() local
4245 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_starting()
4246 if (rdp->cpu_started) in rcu_cpu_starting()
4248 rdp->cpu_started = true; in rcu_cpu_starting()
4250 rnp = rdp->mynode; in rcu_cpu_starting()
4251 mask = rdp->grpmask; in rcu_cpu_starting()
4262 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcu_cpu_starting()
4263 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcu_cpu_starting()
4264 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); in rcu_cpu_starting()
4268 rcu_disable_urgency_upon_qs(rdp); in rcu_cpu_starting()
4292 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_report_dead() local
4293 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_report_dead()
4296 do_nocb_deferred_wakeup(rdp); in rcu_report_dead()
4305 mask = rdp->grpmask; in rcu_report_dead()
4311 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcu_report_dead()
4312 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); in rcu_report_dead()
4325 rdp->cpu_started = false; in rcu_report_dead()
4339 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_migrate_callbacks() local
4342 if (rcu_rdp_is_offloaded(rdp) || in rcutree_migrate_callbacks()
4343 rcu_segcblist_empty(&rdp->cblist)) in rcutree_migrate_callbacks()
4353 needwake = rcu_advance_cbs(my_rnp, rdp) || in rcutree_migrate_callbacks()
4355 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); in rcutree_migrate_callbacks()
4357 rcu_segcblist_disable(&rdp->cblist); in rcutree_migrate_callbacks()
4370 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || in rcutree_migrate_callbacks()
4371 !rcu_segcblist_empty(&rdp->cblist), in rcutree_migrate_callbacks()
4373 cpu, rcu_segcblist_n_cbs(&rdp->cblist), in rcutree_migrate_callbacks()
4374 rcu_segcblist_first_cb(&rdp->cblist)); in rcutree_migrate_callbacks()