Lines Matching refs:rnp

145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
219 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) in rcu_rnp_online_cpus() argument
221 return READ_ONCE(rnp->qsmaskinitnext); in rcu_rnp_online_cpus()
1163 struct rcu_node *rnp; in rcu_lockdep_current_cpu_online() local
1170 rnp = rdp->mynode; in rcu_lockdep_current_cpu_online()
1171 if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1) in rcu_lockdep_current_cpu_online()
1187 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument
1189 raw_lockdep_assert_held_rcu_node(rnp); in rcu_gpnum_ovf()
1191 rnp->gp_seq)) in rcu_gpnum_ovf()
1193 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
1194 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
1224 struct rcu_node *rnp = rdp->mynode; in rcu_implicit_dynticks_qs() local
1236 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs()
1258 if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) { in rcu_implicit_dynticks_qs()
1263 __func__, rnp->grplo, rnp->grphi, rnp->level, in rcu_implicit_dynticks_qs()
1264 (long)rnp->gp_seq, (long)rnp->completedqs); in rcu_implicit_dynticks_qs()
1265 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) in rcu_implicit_dynticks_qs()
1268 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); in rcu_implicit_dynticks_qs()
1331 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_implicit_dynticks_qs()
1332 (rnp->ffmask & rdp->grpmask)) { in rcu_implicit_dynticks_qs()
1334 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_implicit_dynticks_qs()
1343 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp() argument
1346 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), in trace_rcu_this_gp()
1347 gp_seq_req, rnp->level, in trace_rcu_this_gp()
1348 rnp->grplo, rnp->grphi, s); in trace_rcu_this_gp()
1371 struct rcu_node *rnp; in rcu_start_this_gp() local
1384 for (rnp = rnp_start; 1; rnp = rnp->parent) { in rcu_start_this_gp()
1385 if (rnp != rnp_start) in rcu_start_this_gp()
1386 raw_spin_lock_rcu_node(rnp); in rcu_start_this_gp()
1387 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || in rcu_start_this_gp()
1388 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || in rcu_start_this_gp()
1389 (rnp != rnp_start && in rcu_start_this_gp()
1390 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { in rcu_start_this_gp()
1391 trace_rcu_this_gp(rnp, rdp, gp_seq_req, in rcu_start_this_gp()
1395 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); in rcu_start_this_gp()
1396 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { in rcu_start_this_gp()
1407 if (rnp != rnp_start && rnp->parent != NULL) in rcu_start_this_gp()
1408 raw_spin_unlock_rcu_node(rnp); in rcu_start_this_gp()
1409 if (!rnp->parent) in rcu_start_this_gp()
1415 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); in rcu_start_this_gp()
1418 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); in rcu_start_this_gp()
1422 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); in rcu_start_this_gp()
1429 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { in rcu_start_this_gp()
1430 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1431 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1433 if (rnp != rnp_start) in rcu_start_this_gp()
1434 raw_spin_unlock_rcu_node(rnp); in rcu_start_this_gp()
1442 static bool rcu_future_gp_cleanup(struct rcu_node *rnp) in rcu_future_gp_cleanup() argument
1447 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); in rcu_future_gp_cleanup()
1449 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ in rcu_future_gp_cleanup()
1450 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1494 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_accelerate_cbs() argument
1500 raw_lockdep_assert_held_rcu_node(rnp); in rcu_accelerate_cbs()
1520 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); in rcu_accelerate_cbs()
1540 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, in rcu_accelerate_cbs_unlocked() argument
1553 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_accelerate_cbs_unlocked()
1554 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_accelerate_cbs_unlocked()
1555 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_accelerate_cbs_unlocked()
1570 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_advance_cbs() argument
1573 raw_lockdep_assert_held_rcu_node(rnp); in rcu_advance_cbs()
1583 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1586 return rcu_accelerate_cbs(rnp, rdp); in rcu_advance_cbs()
1593 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, in rcu_advance_cbs_nowake() argument
1597 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || in rcu_advance_cbs_nowake()
1598 !raw_spin_trylock_rcu_node(rnp)) in rcu_advance_cbs_nowake()
1600 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); in rcu_advance_cbs_nowake()
1601 raw_spin_unlock_rcu_node(rnp); in rcu_advance_cbs_nowake()
1623 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) in __note_gp_changes() argument
1629 raw_lockdep_assert_held_rcu_node(rnp); in __note_gp_changes()
1631 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1635 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1638 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ in __note_gp_changes()
1643 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ in __note_gp_changes()
1645 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1649 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1656 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); in __note_gp_changes()
1657 need_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1662 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1663 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1664 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in __note_gp_changes()
1666 rcu_gpnum_ovf(rnp, rdp); in __note_gp_changes()
1674 struct rcu_node *rnp; in note_gp_changes() local
1677 rnp = rdp->mynode; in note_gp_changes()
1678 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1680 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ in note_gp_changes()
1684 needwake = __note_gp_changes(rnp, rdp); in note_gp_changes()
1685 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in note_gp_changes()
1743 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_init() local
1746 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_init()
1749 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1759 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1769 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1781 rcu_for_each_leaf_node(rnp) { in rcu_gp_init()
1783 firstseq = READ_ONCE(rnp->ofl_seq); in rcu_gp_init()
1785 while (firstseq == READ_ONCE(rnp->ofl_seq)) in rcu_gp_init()
1789 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_init()
1790 if (rnp->qsmaskinit == rnp->qsmaskinitnext && in rcu_gp_init()
1791 !rnp->wait_blkd_tasks) { in rcu_gp_init()
1793 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1799 oldmask = rnp->qsmaskinit; in rcu_gp_init()
1800 rnp->qsmaskinit = rnp->qsmaskinitnext; in rcu_gp_init()
1803 if (!oldmask != !rnp->qsmaskinit) { in rcu_gp_init()
1805 if (!rnp->wait_blkd_tasks) /* Ever offline? */ in rcu_gp_init()
1806 rcu_init_new_rnp(rnp); in rcu_gp_init()
1807 } else if (rcu_preempt_has_tasks(rnp)) { in rcu_gp_init()
1808 rnp->wait_blkd_tasks = true; /* blocked tasks */ in rcu_gp_init()
1810 rcu_cleanup_dead_rnp(rnp); in rcu_gp_init()
1822 if (rnp->wait_blkd_tasks && in rcu_gp_init()
1823 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { in rcu_gp_init()
1824 rnp->wait_blkd_tasks = false; in rcu_gp_init()
1825 if (!rnp->qsmaskinit) in rcu_gp_init()
1826 rcu_cleanup_dead_rnp(rnp); in rcu_gp_init()
1829 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1847 rcu_for_each_node_breadth_first(rnp) { in rcu_gp_init()
1849 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_gp_init()
1851 rcu_preempt_check_blocked_tasks(rnp); in rcu_gp_init()
1852 rnp->qsmask = rnp->qsmaskinit; in rcu_gp_init()
1853 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); in rcu_gp_init()
1854 if (rnp == rdp->mynode) in rcu_gp_init()
1855 (void)__note_gp_changes(rnp, rdp); in rcu_gp_init()
1856 rcu_preempt_boost_start_gp(rnp); in rcu_gp_init()
1857 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, in rcu_gp_init()
1858 rnp->level, rnp->grplo, in rcu_gp_init()
1859 rnp->grphi, rnp->qsmask); in rcu_gp_init()
1861 mask = rnp->qsmask & ~rnp->qsmaskinitnext; in rcu_gp_init()
1862 rnp->rcu_gp_init_mask = mask; in rcu_gp_init()
1863 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) in rcu_gp_init()
1864 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_gp_init()
1866 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1884 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_fqs_check_wake() local
1896 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_fqs_check_wake()
1907 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_fqs() local
1920 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_fqs()
1923 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_fqs()
1936 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_fqs_loop() local
1963 if (!READ_ONCE(rnp->qsmask) && in rcu_gp_fqs_loop()
1964 !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_fqs_loop()
2012 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_cleanup() local
2016 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2030 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2043 rcu_for_each_node_breadth_first(rnp) { in rcu_gp_cleanup()
2044 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2045 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) in rcu_gp_cleanup()
2046 dump_blkd_tasks(rnp, 10); in rcu_gp_cleanup()
2047 WARN_ON_ONCE(rnp->qsmask); in rcu_gp_cleanup()
2048 WRITE_ONCE(rnp->gp_seq, new_gp_seq); in rcu_gp_cleanup()
2050 if (rnp == rdp->mynode) in rcu_gp_cleanup()
2051 needgp = __note_gp_changes(rnp, rdp) || needgp; in rcu_gp_cleanup()
2053 needgp = rcu_future_gp_cleanup(rnp) || needgp; in rcu_gp_cleanup()
2055 if (rcu_is_leaf_node(rnp)) in rcu_gp_cleanup()
2056 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) { in rcu_gp_cleanup()
2058 check_cb_ovld_locked(rdp, rnp); in rcu_gp_cleanup()
2060 sq = rcu_nocb_gp_get(rnp); in rcu_gp_cleanup()
2061 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2067 rnp = rcu_get_root(); in rcu_gp_cleanup()
2068 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ in rcu_gp_cleanup()
2077 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { in rcu_gp_cleanup()
2078 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
2084 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { in rcu_gp_cleanup()
2094 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2173 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, in rcu_report_qs_rnp() argument
2175 __releases(rnp->lock) in rcu_report_qs_rnp()
2180 raw_lockdep_assert_held_rcu_node(rnp); in rcu_report_qs_rnp()
2184 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { in rcu_report_qs_rnp()
2190 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rnp()
2194 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && in rcu_report_qs_rnp()
2195 rcu_preempt_blocked_readers_cgp(rnp)); in rcu_report_qs_rnp()
2196 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); in rcu_report_qs_rnp()
2197 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, in rcu_report_qs_rnp()
2198 mask, rnp->qsmask, rnp->level, in rcu_report_qs_rnp()
2199 rnp->grplo, rnp->grphi, in rcu_report_qs_rnp()
2200 !!rnp->gp_tasks); in rcu_report_qs_rnp()
2201 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_report_qs_rnp()
2204 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rnp()
2207 rnp->completedqs = rnp->gp_seq; in rcu_report_qs_rnp()
2208 mask = rnp->grpmask; in rcu_report_qs_rnp()
2209 if (rnp->parent == NULL) { in rcu_report_qs_rnp()
2215 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rnp()
2216 rnp_c = rnp; in rcu_report_qs_rnp()
2217 rnp = rnp->parent; in rcu_report_qs_rnp()
2218 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_qs_rnp()
2238 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) in rcu_report_unblock_qs_rnp() argument
2239 __releases(rnp->lock) in rcu_report_unblock_qs_rnp()
2245 raw_lockdep_assert_held_rcu_node(rnp); in rcu_report_unblock_qs_rnp()
2247 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || in rcu_report_unblock_qs_rnp()
2248 rnp->qsmask != 0) { in rcu_report_unblock_qs_rnp()
2249 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_unblock_qs_rnp()
2253 rnp->completedqs = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
2254 rnp_p = rnp->parent; in rcu_report_unblock_qs_rnp()
2265 gps = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
2266 mask = rnp->grpmask; in rcu_report_unblock_qs_rnp()
2267 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_report_unblock_qs_rnp()
2283 struct rcu_node *rnp; in rcu_report_qs_rdp() local
2286 rnp = rdp->mynode; in rcu_report_qs_rdp()
2287 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_qs_rdp()
2288 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
2298 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rdp()
2303 if ((rnp->qsmask & mask) == 0) { in rcu_report_qs_rdp()
2304 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rdp()
2311 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_report_qs_rdp()
2314 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_report_qs_rdp()
2362 struct rcu_node *rnp = rdp->mynode; in rcutree_dying_cpu() local
2367 blkd = !!(rnp->qsmask & rdp->grpmask); in rcutree_dying_cpu()
2368 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), in rcutree_dying_cpu()
2393 struct rcu_node *rnp = rnp_leaf; in rcu_cleanup_dead_rnp() local
2401 mask = rnp->grpmask; in rcu_cleanup_dead_rnp()
2402 rnp = rnp->parent; in rcu_cleanup_dead_rnp()
2403 if (!rnp) in rcu_cleanup_dead_rnp()
2405 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_cleanup_dead_rnp()
2406 rnp->qsmaskinit &= ~mask; in rcu_cleanup_dead_rnp()
2408 WARN_ON_ONCE(rnp->qsmask); in rcu_cleanup_dead_rnp()
2409 if (rnp->qsmaskinit) { in rcu_cleanup_dead_rnp()
2410 raw_spin_unlock_rcu_node(rnp); in rcu_cleanup_dead_rnp()
2414 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_cleanup_dead_rnp()
2427 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcutree_dead_cpu() local
2434 rcu_boost_kthread_setaffinity(rnp, -1); in rcutree_dead_cpu()
2619 struct rcu_node *rnp; in force_qs_rnp() local
2623 rcu_for_each_leaf_node(rnp) { in force_qs_rnp()
2626 raw_spin_lock_irqsave_rcu_node(rnp, flags); in force_qs_rnp()
2627 rcu_state.cbovldnext |= !!rnp->cbovldmask; in force_qs_rnp()
2628 if (rnp->qsmask == 0) { in force_qs_rnp()
2629 if (rcu_preempt_blocked_readers_cgp(rnp)) { in force_qs_rnp()
2635 rcu_initiate_boost(rnp, flags); in force_qs_rnp()
2639 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in force_qs_rnp()
2642 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { in force_qs_rnp()
2651 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in force_qs_rnp()
2654 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in force_qs_rnp()
2667 struct rcu_node *rnp; in rcu_force_quiescent_state() local
2671 rnp = __this_cpu_read(rcu_data.mynode); in rcu_force_quiescent_state()
2672 for (; rnp != NULL; rnp = rnp->parent) { in rcu_force_quiescent_state()
2674 !raw_spin_trylock(&rnp->fqslock); in rcu_force_quiescent_state()
2679 rnp_old = rnp; in rcu_force_quiescent_state()
2710 struct rcu_node *rnp = rdp->mynode; in rcu_core() local
2734 rcu_accelerate_cbs_unlocked(rnp, rdp); in rcu_core()
2738 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); in rcu_core()
2923 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) in check_cb_ovld_locked() argument
2925 raw_lockdep_assert_held_rcu_node(rnp); in check_cb_ovld_locked()
2929 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); in check_cb_ovld_locked()
2931 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); in check_cb_ovld_locked()
2948 struct rcu_node *const rnp = rdp->mynode; in check_cb_ovld() local
2952 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) in check_cb_ovld()
2954 raw_spin_lock_rcu_node(rnp); in check_cb_ovld()
2955 check_cb_ovld_locked(rdp, rnp); in check_cb_ovld()
2956 raw_spin_unlock_rcu_node(rnp); in check_cb_ovld()
3788 struct rcu_node *rnp; in start_poll_synchronize_rcu() local
3793 rnp = rdp->mynode; in start_poll_synchronize_rcu()
3794 raw_spin_lock_rcu_node(rnp); // irqs already disabled. in start_poll_synchronize_rcu()
3795 needwake = rcu_start_this_gp(rnp, rdp, gp_seq); in start_poll_synchronize_rcu()
3796 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in start_poll_synchronize_rcu()
3875 struct rcu_node *rnp = rdp->mynode; in rcu_pending() local
3907 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in rcu_pending()
4073 struct rcu_node *rnp = rnp_leaf; in rcu_init_new_rnp() local
4076 WARN_ON_ONCE(rnp->wait_blkd_tasks); in rcu_init_new_rnp()
4078 mask = rnp->grpmask; in rcu_init_new_rnp()
4079 rnp = rnp->parent; in rcu_init_new_rnp()
4080 if (rnp == NULL) in rcu_init_new_rnp()
4082 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ in rcu_init_new_rnp()
4083 oldmask = rnp->qsmaskinit; in rcu_init_new_rnp()
4084 rnp->qsmaskinit |= mask; in rcu_init_new_rnp()
4085 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ in rcu_init_new_rnp()
4126 struct rcu_node *rnp = rcu_get_root(); in rcutree_prepare_cpu() local
4129 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcutree_prepare_cpu()
4135 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcutree_prepare_cpu()
4149 rnp = rdp->mynode; in rcutree_prepare_cpu()
4150 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcutree_prepare_cpu()
4152 rdp->gp_seq = READ_ONCE(rnp->gp_seq); in rcutree_prepare_cpu()
4160 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcutree_prepare_cpu()
4161 rcu_spawn_one_boost_kthread(rnp); in rcutree_prepare_cpu()
4186 struct rcu_node *rnp; in rcutree_online_cpu() local
4189 rnp = rdp->mynode; in rcutree_online_cpu()
4190 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcutree_online_cpu()
4191 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
4192 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcutree_online_cpu()
4211 struct rcu_node *rnp; in rcutree_offline_cpu() local
4214 rnp = rdp->mynode; in rcutree_offline_cpu()
4215 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcutree_offline_cpu()
4216 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
4217 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcutree_offline_cpu()
4242 struct rcu_node *rnp; in rcu_cpu_starting() local
4250 rnp = rdp->mynode; in rcu_cpu_starting()
4252 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); in rcu_cpu_starting()
4253 WARN_ON_ONCE(!(rnp->ofl_seq & 0x1)); in rcu_cpu_starting()
4255 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_cpu_starting()
4256 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); in rcu_cpu_starting()
4257 newcpu = !(rnp->expmaskinitnext & mask); in rcu_cpu_starting()
4258 rnp->expmaskinitnext |= mask; in rcu_cpu_starting()
4262 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcu_cpu_starting()
4267 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */ in rcu_cpu_starting()
4270 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_cpu_starting()
4272 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_cpu_starting()
4275 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); in rcu_cpu_starting()
4276 WARN_ON_ONCE(rnp->ofl_seq & 0x1); in rcu_cpu_starting()
4293 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_report_dead() local
4306 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); in rcu_report_dead()
4307 WARN_ON_ONCE(!(rnp->ofl_seq & 0x1)); in rcu_report_dead()
4310 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ in rcu_report_dead()
4313 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ in rcu_report_dead()
4315 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_report_dead()
4316 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_dead()
4318 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); in rcu_report_dead()
4319 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_dead()
4322 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); in rcu_report_dead()
4323 WARN_ON_ONCE(rnp->ofl_seq & 0x1); in rcu_report_dead()
4407 struct rcu_node *rnp; in rcu_spawn_gp_kthread() local
4434 rnp = rcu_get_root(); in rcu_spawn_gp_kthread()
4435 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_spawn_gp_kthread()
4440 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_spawn_gp_kthread()
4482 struct rcu_node *rnp; in rcu_init_one() local
4501 rnp = rcu_state.level[i]; in rcu_init_one()
4502 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { in rcu_init_one()
4503 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); in rcu_init_one()
4504 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), in rcu_init_one()
4506 raw_spin_lock_init(&rnp->fqslock); in rcu_init_one()
4507 lockdep_set_class_and_name(&rnp->fqslock, in rcu_init_one()
4509 rnp->gp_seq = rcu_state.gp_seq; in rcu_init_one()
4510 rnp->gp_seq_needed = rcu_state.gp_seq; in rcu_init_one()
4511 rnp->completedqs = rcu_state.gp_seq; in rcu_init_one()
4512 rnp->qsmask = 0; in rcu_init_one()
4513 rnp->qsmaskinit = 0; in rcu_init_one()
4514 rnp->grplo = j * cpustride; in rcu_init_one()
4515 rnp->grphi = (j + 1) * cpustride - 1; in rcu_init_one()
4516 if (rnp->grphi >= nr_cpu_ids) in rcu_init_one()
4517 rnp->grphi = nr_cpu_ids - 1; in rcu_init_one()
4519 rnp->grpnum = 0; in rcu_init_one()
4520 rnp->grpmask = 0; in rcu_init_one()
4521 rnp->parent = NULL; in rcu_init_one()
4523 rnp->grpnum = j % levelspread[i - 1]; in rcu_init_one()
4524 rnp->grpmask = BIT(rnp->grpnum); in rcu_init_one()
4525 rnp->parent = rcu_state.level[i - 1] + in rcu_init_one()
4528 rnp->level = i; in rcu_init_one()
4529 INIT_LIST_HEAD(&rnp->blkd_tasks); in rcu_init_one()
4530 rcu_init_one_nocb(rnp); in rcu_init_one()
4531 init_waitqueue_head(&rnp->exp_wq[0]); in rcu_init_one()
4532 init_waitqueue_head(&rnp->exp_wq[1]); in rcu_init_one()
4533 init_waitqueue_head(&rnp->exp_wq[2]); in rcu_init_one()
4534 init_waitqueue_head(&rnp->exp_wq[3]); in rcu_init_one()
4535 spin_lock_init(&rnp->exp_lock); in rcu_init_one()
4541 rnp = rcu_first_leaf_node(); in rcu_init_one()
4543 while (i > rnp->grphi) in rcu_init_one()
4544 rnp++; in rcu_init_one()
4545 per_cpu_ptr(&rcu_data, i)->mynode = rnp; in rcu_init_one()
4651 struct rcu_node *rnp; in rcu_dump_rcu_node_tree() local
4655 rcu_for_each_node_breadth_first(rnp) { in rcu_dump_rcu_node_tree()
4656 if (rnp->level != level) { in rcu_dump_rcu_node_tree()
4659 level = rnp->level; in rcu_dump_rcu_node_tree()
4661 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); in rcu_dump_rcu_node_tree()