Lines Matching refs:rnp

163 		  struct rcu_node *rnp, unsigned long gps, unsigned long flags);
166 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
210 unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) in rcu_rnp_online_cpus() argument
212 return READ_ONCE(rnp->qsmaskinitnext); in rcu_rnp_online_cpus()
636 struct rcu_node *rnp; in show_rcu_gp_kthreads() local
642 rcu_for_each_node_breadth_first(rsp, rnp) { in show_rcu_gp_kthreads()
643 if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed)) in show_rcu_gp_kthreads()
646 rnp->grplo, rnp->grphi, rnp->gp_seq, in show_rcu_gp_kthreads()
647 rnp->gp_seq_needed); in show_rcu_gp_kthreads()
648 if (!rcu_is_leaf_node(rnp)) in show_rcu_gp_kthreads()
650 for_each_leaf_node_possible_cpu(rnp, cpu) { in show_rcu_gp_kthreads()
1071 struct rcu_node *rnp; in rcu_lockdep_current_cpu_online() local
1079 rnp = rdp->mynode; in rcu_lockdep_current_cpu_online()
1080 if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) { in rcu_lockdep_current_cpu_online()
1112 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument
1114 raw_lockdep_assert_held_rcu_node(rnp); in rcu_gpnum_ovf()
1116 rnp->gp_seq)) in rcu_gpnum_ovf()
1118 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
1119 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
1147 struct rcu_node *rnp; in rcu_iw_handler() local
1150 rnp = rdp->mynode; in rcu_iw_handler()
1151 raw_spin_lock_rcu_node(rnp); in rcu_iw_handler()
1153 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_iw_handler()
1156 raw_spin_unlock_rcu_node(rnp); in rcu_iw_handler()
1170 struct rcu_node *rnp = rdp->mynode; in rcu_implicit_dynticks_qs() local
1183 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs()
1197 rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) { in rcu_implicit_dynticks_qs()
1199 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs()
1207 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) && in rcu_implicit_dynticks_qs()
1214 __func__, rnp->grplo, rnp->grphi, rnp->level, in rcu_implicit_dynticks_qs()
1215 (long)rnp->gp_seq, (long)rnp->completedqs); in rcu_implicit_dynticks_qs()
1216 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) in rcu_implicit_dynticks_qs()
1219 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); in rcu_implicit_dynticks_qs()
1263 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_implicit_dynticks_qs()
1264 (rnp->ffmask & rdp->grpmask)) { in rcu_implicit_dynticks_qs()
1267 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_implicit_dynticks_qs()
1334 struct rcu_node *rnp; in rcu_dump_cpu_stacks() local
1336 rcu_for_each_leaf_node(rsp, rnp) { in rcu_dump_cpu_stacks()
1337 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_dump_cpu_stacks()
1338 for_each_leaf_node_possible_cpu(rnp, cpu) in rcu_dump_cpu_stacks()
1339 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) in rcu_dump_cpu_stacks()
1342 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_dump_cpu_stacks()
1379 struct rcu_node *rnp = rcu_get_root(rsp); in print_other_cpu_stall() local
1394 rcu_for_each_leaf_node(rsp, rnp) { in print_other_cpu_stall()
1395 raw_spin_lock_irqsave_rcu_node(rnp, flags); in print_other_cpu_stall()
1396 ndetected += rcu_print_task_stall(rnp); in print_other_cpu_stall()
1397 if (rnp->qsmask != 0) { in print_other_cpu_stall()
1398 for_each_leaf_node_possible_cpu(rnp, cpu) in print_other_cpu_stall()
1399 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { in print_other_cpu_stall()
1404 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in print_other_cpu_stall()
1450 struct rcu_node *rnp = rcu_get_root(rsp); in print_cpu_stall() local
1480 raw_spin_lock_irqsave_rcu_node(rnp, flags); in print_cpu_stall()
1485 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in print_cpu_stall()
1507 struct rcu_node *rnp; in check_cpu_stall() local
1543 rnp = rdp->mynode; in check_cpu_stall()
1546 (READ_ONCE(rnp->qsmask) & rdp->grpmask) && in check_cpu_stall()
1579 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp() argument
1582 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req, in trace_rcu_this_gp()
1583 rnp->level, rnp->grplo, rnp->grphi, s); in trace_rcu_this_gp()
1607 struct rcu_node *rnp; in rcu_start_this_gp() local
1620 for (rnp = rnp_start; 1; rnp = rnp->parent) { in rcu_start_this_gp()
1621 if (rnp != rnp_start) in rcu_start_this_gp()
1622 raw_spin_lock_rcu_node(rnp); in rcu_start_this_gp()
1623 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || in rcu_start_this_gp()
1624 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || in rcu_start_this_gp()
1625 (rnp != rnp_start && in rcu_start_this_gp()
1626 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { in rcu_start_this_gp()
1627 trace_rcu_this_gp(rnp, rdp, gp_seq_req, in rcu_start_this_gp()
1631 rnp->gp_seq_needed = gp_seq_req; in rcu_start_this_gp()
1632 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { in rcu_start_this_gp()
1643 if (rnp != rnp_start && rnp->parent != NULL) in rcu_start_this_gp()
1644 raw_spin_unlock_rcu_node(rnp); in rcu_start_this_gp()
1645 if (!rnp->parent) in rcu_start_this_gp()
1651 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); in rcu_start_this_gp()
1654 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); in rcu_start_this_gp()
1658 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); in rcu_start_this_gp()
1665 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { in rcu_start_this_gp()
1666 rnp_start->gp_seq_needed = rnp->gp_seq_needed; in rcu_start_this_gp()
1667 rdp->gp_seq_needed = rnp->gp_seq_needed; in rcu_start_this_gp()
1669 if (rnp != rnp_start) in rcu_start_this_gp()
1670 raw_spin_unlock_rcu_node(rnp); in rcu_start_this_gp()
1678 static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) in rcu_future_gp_cleanup() argument
1683 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); in rcu_future_gp_cleanup()
1685 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ in rcu_future_gp_cleanup()
1686 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1719 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_accelerate_cbs() argument
1725 raw_lockdep_assert_held_rcu_node(rnp); in rcu_accelerate_cbs()
1743 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); in rcu_accelerate_cbs()
1761 struct rcu_node *rnp, in rcu_accelerate_cbs_unlocked() argument
1774 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_accelerate_cbs_unlocked()
1775 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_accelerate_cbs_unlocked()
1776 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_accelerate_cbs_unlocked()
1791 static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_advance_cbs() argument
1794 raw_lockdep_assert_held_rcu_node(rnp); in rcu_advance_cbs()
1804 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1807 return rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_advance_cbs()
1816 static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, in __note_gp_changes() argument
1822 raw_lockdep_assert_held_rcu_node(rnp); in __note_gp_changes()
1824 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1828 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1830 ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */ in __note_gp_changes()
1833 ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */ in __note_gp_changes()
1837 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1844 trace_rcu_grace_period(rsp->name, rnp->gp_seq, TPS("cpustart")); in __note_gp_changes()
1845 need_gp = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1851 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1852 if (ULONG_CMP_GE(rnp->gp_seq_needed, rdp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1853 rdp->gp_seq_needed = rnp->gp_seq_needed; in __note_gp_changes()
1855 rcu_gpnum_ovf(rnp, rdp); in __note_gp_changes()
1863 struct rcu_node *rnp; in note_gp_changes() local
1866 rnp = rdp->mynode; in note_gp_changes()
1867 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1869 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ in note_gp_changes()
1873 needwake = __note_gp_changes(rsp, rnp, rdp); in note_gp_changes()
1874 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in note_gp_changes()
1896 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_init() local
1899 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_init()
1902 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1912 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1921 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1930 rcu_for_each_leaf_node(rsp, rnp) { in rcu_gp_init()
1932 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_init()
1933 if (rnp->qsmaskinit == rnp->qsmaskinitnext && in rcu_gp_init()
1934 !rnp->wait_blkd_tasks) { in rcu_gp_init()
1936 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1942 oldmask = rnp->qsmaskinit; in rcu_gp_init()
1943 rnp->qsmaskinit = rnp->qsmaskinitnext; in rcu_gp_init()
1946 if (!oldmask != !rnp->qsmaskinit) { in rcu_gp_init()
1948 if (!rnp->wait_blkd_tasks) /* Ever offline? */ in rcu_gp_init()
1949 rcu_init_new_rnp(rnp); in rcu_gp_init()
1950 } else if (rcu_preempt_has_tasks(rnp)) { in rcu_gp_init()
1951 rnp->wait_blkd_tasks = true; /* blocked tasks */ in rcu_gp_init()
1953 rcu_cleanup_dead_rnp(rnp); in rcu_gp_init()
1965 if (rnp->wait_blkd_tasks && in rcu_gp_init()
1966 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { in rcu_gp_init()
1967 rnp->wait_blkd_tasks = false; in rcu_gp_init()
1968 if (!rnp->qsmaskinit) in rcu_gp_init()
1969 rcu_cleanup_dead_rnp(rnp); in rcu_gp_init()
1972 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1990 rcu_for_each_node_breadth_first(rsp, rnp) { in rcu_gp_init()
1992 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_gp_init()
1994 rcu_preempt_check_blocked_tasks(rsp, rnp); in rcu_gp_init()
1995 rnp->qsmask = rnp->qsmaskinit; in rcu_gp_init()
1996 WRITE_ONCE(rnp->gp_seq, rsp->gp_seq); in rcu_gp_init()
1997 if (rnp == rdp->mynode) in rcu_gp_init()
1998 (void)__note_gp_changes(rsp, rnp, rdp); in rcu_gp_init()
1999 rcu_preempt_boost_start_gp(rnp); in rcu_gp_init()
2000 trace_rcu_grace_period_init(rsp->name, rnp->gp_seq, in rcu_gp_init()
2001 rnp->level, rnp->grplo, in rcu_gp_init()
2002 rnp->grphi, rnp->qsmask); in rcu_gp_init()
2004 mask = rnp->qsmask & ~rnp->qsmaskinitnext; in rcu_gp_init()
2005 rnp->rcu_gp_init_mask = mask; in rcu_gp_init()
2006 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) in rcu_gp_init()
2007 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); in rcu_gp_init()
2009 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
2023 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_fqs_check_wake() local
2031 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_fqs_check_wake()
2042 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_fqs() local
2055 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_fqs()
2058 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_fqs()
2071 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_cleanup() local
2075 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2088 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2101 rcu_for_each_node_breadth_first(rsp, rnp) { in rcu_gp_cleanup()
2102 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2103 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) in rcu_gp_cleanup()
2104 dump_blkd_tasks(rsp, rnp, 10); in rcu_gp_cleanup()
2105 WARN_ON_ONCE(rnp->qsmask); in rcu_gp_cleanup()
2106 WRITE_ONCE(rnp->gp_seq, new_gp_seq); in rcu_gp_cleanup()
2108 if (rnp == rdp->mynode) in rcu_gp_cleanup()
2109 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; in rcu_gp_cleanup()
2111 needgp = rcu_future_gp_cleanup(rsp, rnp) || needgp; in rcu_gp_cleanup()
2112 sq = rcu_nocb_gp_get(rnp); in rcu_gp_cleanup()
2113 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2119 rnp = rcu_get_root(rsp); in rcu_gp_cleanup()
2120 raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */ in rcu_gp_cleanup()
2128 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { in rcu_gp_cleanup()
2129 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
2134 if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) { in rcu_gp_cleanup()
2142 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2155 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_kthread() local
2199 if (!READ_ONCE(rnp->qsmask) && in rcu_gp_kthread()
2200 !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_kthread()
2276 struct rcu_node *rnp, unsigned long gps, unsigned long flags) in rcu_report_qs_rnp() argument
2277 __releases(rnp->lock) in rcu_report_qs_rnp()
2282 raw_lockdep_assert_held_rcu_node(rnp); in rcu_report_qs_rnp()
2286 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { in rcu_report_qs_rnp()
2292 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rnp()
2296 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && in rcu_report_qs_rnp()
2297 rcu_preempt_blocked_readers_cgp(rnp)); in rcu_report_qs_rnp()
2298 rnp->qsmask &= ~mask; in rcu_report_qs_rnp()
2299 trace_rcu_quiescent_state_report(rsp->name, rnp->gp_seq, in rcu_report_qs_rnp()
2300 mask, rnp->qsmask, rnp->level, in rcu_report_qs_rnp()
2301 rnp->grplo, rnp->grphi, in rcu_report_qs_rnp()
2302 !!rnp->gp_tasks); in rcu_report_qs_rnp()
2303 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_report_qs_rnp()
2306 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rnp()
2309 rnp->completedqs = rnp->gp_seq; in rcu_report_qs_rnp()
2310 mask = rnp->grpmask; in rcu_report_qs_rnp()
2311 if (rnp->parent == NULL) { in rcu_report_qs_rnp()
2317 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rnp()
2318 rnp_c = rnp; in rcu_report_qs_rnp()
2319 rnp = rnp->parent; in rcu_report_qs_rnp()
2320 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_qs_rnp()
2341 struct rcu_node *rnp, unsigned long flags) in rcu_report_unblock_qs_rnp() argument
2342 __releases(rnp->lock) in rcu_report_unblock_qs_rnp()
2348 raw_lockdep_assert_held_rcu_node(rnp); in rcu_report_unblock_qs_rnp()
2351 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || in rcu_report_unblock_qs_rnp()
2352 rnp->qsmask != 0) { in rcu_report_unblock_qs_rnp()
2353 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_unblock_qs_rnp()
2357 rnp->completedqs = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
2358 rnp_p = rnp->parent; in rcu_report_unblock_qs_rnp()
2369 gps = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
2370 mask = rnp->grpmask; in rcu_report_unblock_qs_rnp()
2371 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_report_unblock_qs_rnp()
2386 struct rcu_node *rnp; in rcu_report_qs_rdp() local
2388 rnp = rdp->mynode; in rcu_report_qs_rdp()
2389 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_qs_rdp()
2390 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
2401 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rdp()
2405 if ((rnp->qsmask & mask) == 0) { in rcu_report_qs_rdp()
2406 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rdp()
2414 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_report_qs_rdp()
2416 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); in rcu_report_qs_rdp()
2463 RCU_TRACE(struct rcu_node *rnp = rdp->mynode;) in rcu_cleanup_dying_cpu()
2468 RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);) in rcu_cleanup_dying_cpu()
2469 trace_rcu_grace_period(rsp->name, rnp->gp_seq, in rcu_cleanup_dying_cpu()
2493 struct rcu_node *rnp = rnp_leaf; in rcu_cleanup_dead_rnp() local
2501 mask = rnp->grpmask; in rcu_cleanup_dead_rnp()
2502 rnp = rnp->parent; in rcu_cleanup_dead_rnp()
2503 if (!rnp) in rcu_cleanup_dead_rnp()
2505 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_cleanup_dead_rnp()
2506 rnp->qsmaskinit &= ~mask; in rcu_cleanup_dead_rnp()
2508 WARN_ON_ONCE(rnp->qsmask); in rcu_cleanup_dead_rnp()
2509 if (rnp->qsmaskinit) { in rcu_cleanup_dead_rnp()
2510 raw_spin_unlock_rcu_node(rnp); in rcu_cleanup_dead_rnp()
2514 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_cleanup_dead_rnp()
2527 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_cleanup_dead_cpu() local
2533 rcu_boost_kthread_setaffinity(rnp, -1); in rcu_cleanup_dead_cpu()
2683 struct rcu_node *rnp; in force_qs_rnp() local
2685 rcu_for_each_leaf_node(rsp, rnp) { in force_qs_rnp()
2688 raw_spin_lock_irqsave_rcu_node(rnp, flags); in force_qs_rnp()
2689 if (rnp->qsmask == 0) { in force_qs_rnp()
2692 rcu_preempt_blocked_readers_cgp(rnp)) { in force_qs_rnp()
2698 rcu_initiate_boost(rnp, flags); in force_qs_rnp()
2702 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in force_qs_rnp()
2705 for_each_leaf_node_possible_cpu(rnp, cpu) { in force_qs_rnp()
2706 unsigned long bit = leaf_node_cpu_bit(rnp, cpu); in force_qs_rnp()
2707 if ((rnp->qsmask & bit) != 0) { in force_qs_rnp()
2714 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); in force_qs_rnp()
2717 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in force_qs_rnp()
2730 struct rcu_node *rnp; in force_quiescent_state() local
2734 rnp = __this_cpu_read(rsp->rda->mynode); in force_quiescent_state()
2735 for (; rnp != NULL; rnp = rnp->parent) { in force_quiescent_state()
2737 !raw_spin_trylock(&rnp->fqslock); in force_quiescent_state()
2742 rnp_old = rnp; in force_quiescent_state()
2763 rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_check_gp_start_stall() argument
2781 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_check_gp_start_stall()
2788 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_check_gp_start_stall()
2793 if (rnp_root != rnp) in rcu_check_gp_start_stall()
2802 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_check_gp_start_stall()
2812 if (rnp_root != rnp) in rcu_check_gp_start_stall()
2814 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_check_gp_start_stall()
2827 struct rcu_node *rnp = rdp->mynode; in __rcu_process_callbacks() local
2839 rcu_accelerate_cbs_unlocked(rsp, rnp, rdp); in __rcu_process_callbacks()
2843 rcu_check_gp_start_stall(rsp, rnp, rdp); in __rcu_process_callbacks()
3273 struct rcu_node *rnp = rdp->mynode; in __rcu_pending() local
3297 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in __rcu_pending()
3516 struct rcu_node *rnp = rnp_leaf; in rcu_init_new_rnp() local
3519 WARN_ON_ONCE(rnp->wait_blkd_tasks); in rcu_init_new_rnp()
3521 mask = rnp->grpmask; in rcu_init_new_rnp()
3522 rnp = rnp->parent; in rcu_init_new_rnp()
3523 if (rnp == NULL) in rcu_init_new_rnp()
3525 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ in rcu_init_new_rnp()
3526 oldmask = rnp->qsmaskinit; in rcu_init_new_rnp()
3527 rnp->qsmaskinit |= mask; in rcu_init_new_rnp()
3528 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ in rcu_init_new_rnp()
3567 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_init_percpu_data() local
3570 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_init_percpu_data()
3579 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_init_percpu_data()
3586 rnp = rdp->mynode; in rcu_init_percpu_data()
3587 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_init_percpu_data()
3589 rdp->gp_seq = rnp->gp_seq; in rcu_init_percpu_data()
3590 rdp->gp_seq_needed = rnp->gp_seq; in rcu_init_percpu_data()
3595 rdp->rcu_iw_gp_seq = rnp->gp_seq - 1; in rcu_init_percpu_data()
3597 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_init_percpu_data()
3635 struct rcu_node *rnp; in rcutree_online_cpu() local
3640 rnp = rdp->mynode; in rcutree_online_cpu()
3641 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcutree_online_cpu()
3642 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
3643 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcutree_online_cpu()
3662 struct rcu_node *rnp; in rcutree_offline_cpu() local
3667 rnp = rdp->mynode; in rcutree_offline_cpu()
3668 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcutree_offline_cpu()
3669 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
3670 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcutree_offline_cpu()
3725 struct rcu_node *rnp; in rcu_cpu_starting() local
3735 rnp = rdp->mynode; in rcu_cpu_starting()
3737 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_cpu_starting()
3738 rnp->qsmaskinitnext |= mask; in rcu_cpu_starting()
3739 oldmask = rnp->expmaskinitnext; in rcu_cpu_starting()
3740 rnp->expmaskinitnext |= mask; in rcu_cpu_starting()
3741 oldmask ^= rnp->expmaskinitnext; in rcu_cpu_starting()
3745 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcu_cpu_starting()
3748 if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ in rcu_cpu_starting()
3750 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); in rcu_cpu_starting()
3752 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_cpu_starting()
3769 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_cleanup_dying_idle_cpu() local
3774 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ in rcu_cleanup_dying_idle_cpu()
3777 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ in rcu_cleanup_dying_idle_cpu()
3779 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); in rcu_cleanup_dying_idle_cpu()
3780 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_cleanup_dying_idle_cpu()
3782 rnp->qsmaskinitnext &= ~mask; in rcu_cleanup_dying_idle_cpu()
3783 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_cleanup_dying_idle_cpu()
3890 struct rcu_node *rnp; in rcu_spawn_gp_kthread() local
3914 rnp = rcu_get_root(rsp); in rcu_spawn_gp_kthread()
3915 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_spawn_gp_kthread()
3921 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_spawn_gp_kthread()
3963 struct rcu_node *rnp; in rcu_init_one() local
3981 rnp = rsp->level[i]; in rcu_init_one()
3982 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { in rcu_init_one()
3983 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); in rcu_init_one()
3984 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), in rcu_init_one()
3986 raw_spin_lock_init(&rnp->fqslock); in rcu_init_one()
3987 lockdep_set_class_and_name(&rnp->fqslock, in rcu_init_one()
3989 rnp->gp_seq = rsp->gp_seq; in rcu_init_one()
3990 rnp->gp_seq_needed = rsp->gp_seq; in rcu_init_one()
3991 rnp->completedqs = rsp->gp_seq; in rcu_init_one()
3992 rnp->qsmask = 0; in rcu_init_one()
3993 rnp->qsmaskinit = 0; in rcu_init_one()
3994 rnp->grplo = j * cpustride; in rcu_init_one()
3995 rnp->grphi = (j + 1) * cpustride - 1; in rcu_init_one()
3996 if (rnp->grphi >= nr_cpu_ids) in rcu_init_one()
3997 rnp->grphi = nr_cpu_ids - 1; in rcu_init_one()
3999 rnp->grpnum = 0; in rcu_init_one()
4000 rnp->grpmask = 0; in rcu_init_one()
4001 rnp->parent = NULL; in rcu_init_one()
4003 rnp->grpnum = j % levelspread[i - 1]; in rcu_init_one()
4004 rnp->grpmask = 1UL << rnp->grpnum; in rcu_init_one()
4005 rnp->parent = rsp->level[i - 1] + in rcu_init_one()
4008 rnp->level = i; in rcu_init_one()
4009 INIT_LIST_HEAD(&rnp->blkd_tasks); in rcu_init_one()
4010 rcu_init_one_nocb(rnp); in rcu_init_one()
4011 init_waitqueue_head(&rnp->exp_wq[0]); in rcu_init_one()
4012 init_waitqueue_head(&rnp->exp_wq[1]); in rcu_init_one()
4013 init_waitqueue_head(&rnp->exp_wq[2]); in rcu_init_one()
4014 init_waitqueue_head(&rnp->exp_wq[3]); in rcu_init_one()
4015 spin_lock_init(&rnp->exp_lock); in rcu_init_one()
4021 rnp = rcu_first_leaf_node(rsp); in rcu_init_one()
4023 while (i > rnp->grphi) in rcu_init_one()
4024 rnp++; in rcu_init_one()
4025 per_cpu_ptr(rsp->rda, i)->mynode = rnp; in rcu_init_one()
4117 struct rcu_node *rnp; in rcu_dump_rcu_node_tree() local
4121 rcu_for_each_node_breadth_first(rsp, rnp) { in rcu_dump_rcu_node_tree()
4122 if (rnp->level != level) { in rcu_dump_rcu_node_tree()
4125 level = rnp->level; in rcu_dump_rcu_node_tree()
4127 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); in rcu_dump_rcu_node_tree()