Lines Matching refs:rnp
145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
191 unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) in rcu_rnp_online_cpus() argument
193 return READ_ONCE(rnp->qsmaskinitnext); in rcu_rnp_online_cpus()
943 struct rcu_node *rnp; in rcu_lockdep_current_cpu_online() local
950 rnp = rdp->mynode; in rcu_lockdep_current_cpu_online()
951 if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) in rcu_lockdep_current_cpu_online()
967 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument
969 raw_lockdep_assert_held_rcu_node(rnp); in rcu_gpnum_ovf()
971 rnp->gp_seq)) in rcu_gpnum_ovf()
973 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
974 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
1004 struct rcu_node *rnp = rdp->mynode; in rcu_implicit_dynticks_qs() local
1016 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs()
1021 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) && in rcu_implicit_dynticks_qs()
1028 __func__, rnp->grplo, rnp->grphi, rnp->level, in rcu_implicit_dynticks_qs()
1029 (long)rnp->gp_seq, (long)rnp->completedqs); in rcu_implicit_dynticks_qs()
1030 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) in rcu_implicit_dynticks_qs()
1033 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); in rcu_implicit_dynticks_qs()
1094 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_implicit_dynticks_qs()
1095 (rnp->ffmask & rdp->grpmask)) { in rcu_implicit_dynticks_qs()
1098 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_implicit_dynticks_qs()
1107 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp() argument
1110 trace_rcu_future_grace_period(rcu_state.name, rnp->gp_seq, gp_seq_req, in trace_rcu_this_gp()
1111 rnp->level, rnp->grplo, rnp->grphi, s); in trace_rcu_this_gp()
1134 struct rcu_node *rnp; in rcu_start_this_gp() local
1147 for (rnp = rnp_start; 1; rnp = rnp->parent) { in rcu_start_this_gp()
1148 if (rnp != rnp_start) in rcu_start_this_gp()
1149 raw_spin_lock_rcu_node(rnp); in rcu_start_this_gp()
1150 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || in rcu_start_this_gp()
1151 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || in rcu_start_this_gp()
1152 (rnp != rnp_start && in rcu_start_this_gp()
1153 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { in rcu_start_this_gp()
1154 trace_rcu_this_gp(rnp, rdp, gp_seq_req, in rcu_start_this_gp()
1158 rnp->gp_seq_needed = gp_seq_req; in rcu_start_this_gp()
1159 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { in rcu_start_this_gp()
1170 if (rnp != rnp_start && rnp->parent != NULL) in rcu_start_this_gp()
1171 raw_spin_unlock_rcu_node(rnp); in rcu_start_this_gp()
1172 if (!rnp->parent) in rcu_start_this_gp()
1178 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); in rcu_start_this_gp()
1181 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); in rcu_start_this_gp()
1185 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); in rcu_start_this_gp()
1192 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { in rcu_start_this_gp()
1193 rnp_start->gp_seq_needed = rnp->gp_seq_needed; in rcu_start_this_gp()
1194 rdp->gp_seq_needed = rnp->gp_seq_needed; in rcu_start_this_gp()
1196 if (rnp != rnp_start) in rcu_start_this_gp()
1197 raw_spin_unlock_rcu_node(rnp); in rcu_start_this_gp()
1205 static bool rcu_future_gp_cleanup(struct rcu_node *rnp) in rcu_future_gp_cleanup() argument
1210 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); in rcu_future_gp_cleanup()
1212 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ in rcu_future_gp_cleanup()
1213 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1256 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_accelerate_cbs() argument
1262 raw_lockdep_assert_held_rcu_node(rnp); in rcu_accelerate_cbs()
1280 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); in rcu_accelerate_cbs()
1297 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, in rcu_accelerate_cbs_unlocked() argument
1310 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_accelerate_cbs_unlocked()
1311 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_accelerate_cbs_unlocked()
1312 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_accelerate_cbs_unlocked()
1327 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_advance_cbs() argument
1330 raw_lockdep_assert_held_rcu_node(rnp); in rcu_advance_cbs()
1340 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1343 return rcu_accelerate_cbs(rnp, rdp); in rcu_advance_cbs()
1350 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, in rcu_advance_cbs_nowake() argument
1354 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || in rcu_advance_cbs_nowake()
1355 !raw_spin_trylock_rcu_node(rnp)) in rcu_advance_cbs_nowake()
1357 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); in rcu_advance_cbs_nowake()
1358 raw_spin_unlock_rcu_node(rnp); in rcu_advance_cbs_nowake()
1367 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) in __note_gp_changes() argument
1374 raw_lockdep_assert_held_rcu_node(rnp); in __note_gp_changes()
1376 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1380 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1383 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ in __note_gp_changes()
1387 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ in __note_gp_changes()
1391 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1398 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); in __note_gp_changes()
1399 need_gp = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1404 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1405 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1406 rdp->gp_seq_needed = rnp->gp_seq_needed; in __note_gp_changes()
1408 rcu_gpnum_ovf(rnp, rdp); in __note_gp_changes()
1416 struct rcu_node *rnp; in note_gp_changes() local
1419 rnp = rdp->mynode; in note_gp_changes()
1420 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1422 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ in note_gp_changes()
1426 needwake = __note_gp_changes(rnp, rdp); in note_gp_changes()
1427 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in note_gp_changes()
1449 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_init() local
1452 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_init()
1455 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1465 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1474 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1483 rcu_for_each_leaf_node(rnp) { in rcu_gp_init()
1485 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_init()
1486 if (rnp->qsmaskinit == rnp->qsmaskinitnext && in rcu_gp_init()
1487 !rnp->wait_blkd_tasks) { in rcu_gp_init()
1489 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1495 oldmask = rnp->qsmaskinit; in rcu_gp_init()
1496 rnp->qsmaskinit = rnp->qsmaskinitnext; in rcu_gp_init()
1499 if (!oldmask != !rnp->qsmaskinit) { in rcu_gp_init()
1501 if (!rnp->wait_blkd_tasks) /* Ever offline? */ in rcu_gp_init()
1502 rcu_init_new_rnp(rnp); in rcu_gp_init()
1503 } else if (rcu_preempt_has_tasks(rnp)) { in rcu_gp_init()
1504 rnp->wait_blkd_tasks = true; /* blocked tasks */ in rcu_gp_init()
1506 rcu_cleanup_dead_rnp(rnp); in rcu_gp_init()
1518 if (rnp->wait_blkd_tasks && in rcu_gp_init()
1519 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { in rcu_gp_init()
1520 rnp->wait_blkd_tasks = false; in rcu_gp_init()
1521 if (!rnp->qsmaskinit) in rcu_gp_init()
1522 rcu_cleanup_dead_rnp(rnp); in rcu_gp_init()
1525 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1543 rcu_for_each_node_breadth_first(rnp) { in rcu_gp_init()
1545 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_gp_init()
1547 rcu_preempt_check_blocked_tasks(rnp); in rcu_gp_init()
1548 rnp->qsmask = rnp->qsmaskinit; in rcu_gp_init()
1549 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); in rcu_gp_init()
1550 if (rnp == rdp->mynode) in rcu_gp_init()
1551 (void)__note_gp_changes(rnp, rdp); in rcu_gp_init()
1552 rcu_preempt_boost_start_gp(rnp); in rcu_gp_init()
1553 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, in rcu_gp_init()
1554 rnp->level, rnp->grplo, in rcu_gp_init()
1555 rnp->grphi, rnp->qsmask); in rcu_gp_init()
1557 mask = rnp->qsmask & ~rnp->qsmaskinitnext; in rcu_gp_init()
1558 rnp->rcu_gp_init_mask = mask; in rcu_gp_init()
1559 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) in rcu_gp_init()
1560 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_gp_init()
1562 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1576 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_fqs_check_wake() local
1584 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_fqs_check_wake()
1595 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_fqs() local
1608 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_fqs()
1611 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_fqs()
1624 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_fqs_loop() local
1644 if (!READ_ONCE(rnp->qsmask) && in rcu_gp_fqs_loop()
1645 !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_fqs_loop()
1690 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_cleanup() local
1694 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_cleanup()
1708 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_cleanup()
1721 rcu_for_each_node_breadth_first(rnp) { in rcu_gp_cleanup()
1722 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_cleanup()
1723 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) in rcu_gp_cleanup()
1724 dump_blkd_tasks(rnp, 10); in rcu_gp_cleanup()
1725 WARN_ON_ONCE(rnp->qsmask); in rcu_gp_cleanup()
1726 WRITE_ONCE(rnp->gp_seq, new_gp_seq); in rcu_gp_cleanup()
1728 if (rnp == rdp->mynode) in rcu_gp_cleanup()
1729 needgp = __note_gp_changes(rnp, rdp) || needgp; in rcu_gp_cleanup()
1731 needgp = rcu_future_gp_cleanup(rnp) || needgp; in rcu_gp_cleanup()
1732 sq = rcu_nocb_gp_get(rnp); in rcu_gp_cleanup()
1733 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_cleanup()
1739 rnp = rcu_get_root(); in rcu_gp_cleanup()
1740 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ in rcu_gp_cleanup()
1748 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { in rcu_gp_cleanup()
1749 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
1756 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { in rcu_gp_cleanup()
1766 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_cleanup()
1842 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, in rcu_report_qs_rnp() argument
1844 __releases(rnp->lock) in rcu_report_qs_rnp()
1849 raw_lockdep_assert_held_rcu_node(rnp); in rcu_report_qs_rnp()
1853 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { in rcu_report_qs_rnp()
1859 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rnp()
1863 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && in rcu_report_qs_rnp()
1864 rcu_preempt_blocked_readers_cgp(rnp)); in rcu_report_qs_rnp()
1865 rnp->qsmask &= ~mask; in rcu_report_qs_rnp()
1866 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, in rcu_report_qs_rnp()
1867 mask, rnp->qsmask, rnp->level, in rcu_report_qs_rnp()
1868 rnp->grplo, rnp->grphi, in rcu_report_qs_rnp()
1869 !!rnp->gp_tasks); in rcu_report_qs_rnp()
1870 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_report_qs_rnp()
1873 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rnp()
1876 rnp->completedqs = rnp->gp_seq; in rcu_report_qs_rnp()
1877 mask = rnp->grpmask; in rcu_report_qs_rnp()
1878 if (rnp->parent == NULL) { in rcu_report_qs_rnp()
1884 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rnp()
1885 rnp_c = rnp; in rcu_report_qs_rnp()
1886 rnp = rnp->parent; in rcu_report_qs_rnp()
1887 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_qs_rnp()
1907 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) in rcu_report_unblock_qs_rnp() argument
1908 __releases(rnp->lock) in rcu_report_unblock_qs_rnp()
1914 raw_lockdep_assert_held_rcu_node(rnp); in rcu_report_unblock_qs_rnp()
1916 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || in rcu_report_unblock_qs_rnp()
1917 rnp->qsmask != 0) { in rcu_report_unblock_qs_rnp()
1918 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_unblock_qs_rnp()
1922 rnp->completedqs = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
1923 rnp_p = rnp->parent; in rcu_report_unblock_qs_rnp()
1934 gps = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
1935 mask = rnp->grpmask; in rcu_report_unblock_qs_rnp()
1936 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_report_unblock_qs_rnp()
1953 struct rcu_node *rnp; in rcu_report_qs_rdp() local
1955 rnp = rdp->mynode; in rcu_report_qs_rdp()
1956 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_qs_rdp()
1957 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
1967 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rdp()
1972 if ((rnp->qsmask & mask) == 0) { in rcu_report_qs_rdp()
1973 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rdp()
1980 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_report_qs_rdp()
1982 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_report_qs_rdp()
2030 struct rcu_node *rnp = rdp->mynode; in rcutree_dying_cpu() local
2035 blkd = !!(rnp->qsmask & rdp->grpmask); in rcutree_dying_cpu()
2036 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, in rcutree_dying_cpu()
2061 struct rcu_node *rnp = rnp_leaf; in rcu_cleanup_dead_rnp() local
2069 mask = rnp->grpmask; in rcu_cleanup_dead_rnp()
2070 rnp = rnp->parent; in rcu_cleanup_dead_rnp()
2071 if (!rnp) in rcu_cleanup_dead_rnp()
2073 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_cleanup_dead_rnp()
2074 rnp->qsmaskinit &= ~mask; in rcu_cleanup_dead_rnp()
2076 WARN_ON_ONCE(rnp->qsmask); in rcu_cleanup_dead_rnp()
2077 if (rnp->qsmaskinit) { in rcu_cleanup_dead_rnp()
2078 raw_spin_unlock_rcu_node(rnp); in rcu_cleanup_dead_rnp()
2082 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_cleanup_dead_rnp()
2095 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcutree_dead_cpu() local
2101 rcu_boost_kthread_setaffinity(rnp, -1); in rcutree_dead_cpu()
2262 struct rcu_node *rnp; in force_qs_rnp() local
2264 rcu_for_each_leaf_node(rnp) { in force_qs_rnp()
2267 raw_spin_lock_irqsave_rcu_node(rnp, flags); in force_qs_rnp()
2268 if (rnp->qsmask == 0) { in force_qs_rnp()
2270 rcu_preempt_blocked_readers_cgp(rnp)) { in force_qs_rnp()
2276 rcu_initiate_boost(rnp, flags); in force_qs_rnp()
2280 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in force_qs_rnp()
2283 for_each_leaf_node_possible_cpu(rnp, cpu) { in force_qs_rnp()
2284 unsigned long bit = leaf_node_cpu_bit(rnp, cpu); in force_qs_rnp()
2285 if ((rnp->qsmask & bit) != 0) { in force_qs_rnp()
2292 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in force_qs_rnp()
2295 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in force_qs_rnp()
2308 struct rcu_node *rnp; in rcu_force_quiescent_state() local
2312 rnp = __this_cpu_read(rcu_data.mynode); in rcu_force_quiescent_state()
2313 for (; rnp != NULL; rnp = rnp->parent) { in rcu_force_quiescent_state()
2315 !raw_spin_trylock(&rnp->fqslock); in rcu_force_quiescent_state()
2320 rnp_old = rnp; in rcu_force_quiescent_state()
2343 struct rcu_node *rnp = rdp->mynode; in rcu_core() local
2368 rcu_accelerate_cbs_unlocked(rnp, rdp); in rcu_core()
2372 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); in rcu_core()
2792 struct rcu_node *rnp = rdp->mynode; in rcu_pending() local
2822 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in rcu_pending()
2969 struct rcu_node *rnp = rnp_leaf; in rcu_init_new_rnp() local
2972 WARN_ON_ONCE(rnp->wait_blkd_tasks); in rcu_init_new_rnp()
2974 mask = rnp->grpmask; in rcu_init_new_rnp()
2975 rnp = rnp->parent; in rcu_init_new_rnp()
2976 if (rnp == NULL) in rcu_init_new_rnp()
2978 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ in rcu_init_new_rnp()
2979 oldmask = rnp->qsmaskinit; in rcu_init_new_rnp()
2980 rnp->qsmaskinit |= mask; in rcu_init_new_rnp()
2981 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ in rcu_init_new_rnp()
3021 struct rcu_node *rnp = rcu_get_root(); in rcutree_prepare_cpu() local
3024 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcutree_prepare_cpu()
3033 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcutree_prepare_cpu()
3040 rnp = rdp->mynode; in rcutree_prepare_cpu()
3041 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcutree_prepare_cpu()
3043 rdp->gp_seq = rnp->gp_seq; in rcutree_prepare_cpu()
3044 rdp->gp_seq_needed = rnp->gp_seq; in rcutree_prepare_cpu()
3048 rdp->rcu_iw_gp_seq = rnp->gp_seq - 1; in rcutree_prepare_cpu()
3050 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcutree_prepare_cpu()
3075 struct rcu_node *rnp; in rcutree_online_cpu() local
3078 rnp = rdp->mynode; in rcutree_online_cpu()
3079 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcutree_online_cpu()
3080 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
3081 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcutree_online_cpu()
3097 struct rcu_node *rnp; in rcutree_offline_cpu() local
3100 rnp = rdp->mynode; in rcutree_offline_cpu()
3101 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcutree_offline_cpu()
3102 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
3103 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcutree_offline_cpu()
3129 struct rcu_node *rnp; in rcu_cpu_starting() local
3137 rnp = rdp->mynode; in rcu_cpu_starting()
3139 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_cpu_starting()
3140 rnp->qsmaskinitnext |= mask; in rcu_cpu_starting()
3141 oldmask = rnp->expmaskinitnext; in rcu_cpu_starting()
3142 rnp->expmaskinitnext |= mask; in rcu_cpu_starting()
3143 oldmask ^= rnp->expmaskinitnext; in rcu_cpu_starting()
3147 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcu_cpu_starting()
3150 if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ in rcu_cpu_starting()
3152 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_cpu_starting()
3154 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_cpu_starting()
3173 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_report_dead() local
3184 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ in rcu_report_dead()
3187 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ in rcu_report_dead()
3189 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_report_dead()
3190 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_dead()
3192 rnp->qsmaskinitnext &= ~mask; in rcu_report_dead()
3193 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_dead()
3277 struct rcu_node *rnp; in rcu_spawn_gp_kthread() local
3304 rnp = rcu_get_root(); in rcu_spawn_gp_kthread()
3305 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_spawn_gp_kthread()
3307 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_spawn_gp_kthread()
3348 struct rcu_node *rnp; in rcu_init_one() local
3367 rnp = rcu_state.level[i]; in rcu_init_one()
3368 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { in rcu_init_one()
3369 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); in rcu_init_one()
3370 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), in rcu_init_one()
3372 raw_spin_lock_init(&rnp->fqslock); in rcu_init_one()
3373 lockdep_set_class_and_name(&rnp->fqslock, in rcu_init_one()
3375 rnp->gp_seq = rcu_state.gp_seq; in rcu_init_one()
3376 rnp->gp_seq_needed = rcu_state.gp_seq; in rcu_init_one()
3377 rnp->completedqs = rcu_state.gp_seq; in rcu_init_one()
3378 rnp->qsmask = 0; in rcu_init_one()
3379 rnp->qsmaskinit = 0; in rcu_init_one()
3380 rnp->grplo = j * cpustride; in rcu_init_one()
3381 rnp->grphi = (j + 1) * cpustride - 1; in rcu_init_one()
3382 if (rnp->grphi >= nr_cpu_ids) in rcu_init_one()
3383 rnp->grphi = nr_cpu_ids - 1; in rcu_init_one()
3385 rnp->grpnum = 0; in rcu_init_one()
3386 rnp->grpmask = 0; in rcu_init_one()
3387 rnp->parent = NULL; in rcu_init_one()
3389 rnp->grpnum = j % levelspread[i - 1]; in rcu_init_one()
3390 rnp->grpmask = BIT(rnp->grpnum); in rcu_init_one()
3391 rnp->parent = rcu_state.level[i - 1] + in rcu_init_one()
3394 rnp->level = i; in rcu_init_one()
3395 INIT_LIST_HEAD(&rnp->blkd_tasks); in rcu_init_one()
3396 rcu_init_one_nocb(rnp); in rcu_init_one()
3397 init_waitqueue_head(&rnp->exp_wq[0]); in rcu_init_one()
3398 init_waitqueue_head(&rnp->exp_wq[1]); in rcu_init_one()
3399 init_waitqueue_head(&rnp->exp_wq[2]); in rcu_init_one()
3400 init_waitqueue_head(&rnp->exp_wq[3]); in rcu_init_one()
3401 spin_lock_init(&rnp->exp_lock); in rcu_init_one()
3407 rnp = rcu_first_leaf_node(); in rcu_init_one()
3409 while (i > rnp->grphi) in rcu_init_one()
3410 rnp++; in rcu_init_one()
3411 per_cpu_ptr(&rcu_data, i)->mynode = rnp; in rcu_init_one()
3503 struct rcu_node *rnp; in rcu_dump_rcu_node_tree() local
3507 rcu_for_each_node_breadth_first(rnp) { in rcu_dump_rcu_node_tree()
3508 if (rnp->level != level) { in rcu_dump_rcu_node_tree()
3511 level = rnp->level; in rcu_dump_rcu_node_tree()
3513 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); in rcu_dump_rcu_node_tree()