Lines Matching refs:rnp

88 	struct rcu_node *rnp;  in sync_exp_reset_tree_hotplug()  local
100 rcu_for_each_leaf_node(rsp, rnp) { in sync_exp_reset_tree_hotplug()
101 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug()
102 if (rnp->expmaskinit == rnp->expmaskinitnext) { in sync_exp_reset_tree_hotplug()
103 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug()
108 oldmask = rnp->expmaskinit; in sync_exp_reset_tree_hotplug()
109 rnp->expmaskinit = rnp->expmaskinitnext; in sync_exp_reset_tree_hotplug()
110 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug()
117 mask = rnp->grpmask; in sync_exp_reset_tree_hotplug()
118 rnp_up = rnp->parent; in sync_exp_reset_tree_hotplug()
141 struct rcu_node *rnp; in sync_exp_reset_tree() local
144 rcu_for_each_node_breadth_first(rsp, rnp) { in sync_exp_reset_tree()
145 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_exp_reset_tree()
146 WARN_ON_ONCE(rnp->expmask); in sync_exp_reset_tree()
147 rnp->expmask = rnp->expmaskinit; in sync_exp_reset_tree()
148 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree()
161 static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp) in sync_rcu_preempt_exp_done() argument
163 raw_lockdep_assert_held_rcu_node(rnp); in sync_rcu_preempt_exp_done()
165 return rnp->exp_tasks == NULL && in sync_rcu_preempt_exp_done()
166 READ_ONCE(rnp->expmask) == 0; in sync_rcu_preempt_exp_done()
174 static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp) in sync_rcu_preempt_exp_done_unlocked() argument
179 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_rcu_preempt_exp_done_unlocked()
180 ret = sync_rcu_preempt_exp_done(rnp); in sync_rcu_preempt_exp_done_unlocked()
181 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_rcu_preempt_exp_done_unlocked()
197 static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, in __rcu_report_exp_rnp() argument
199 __releases(rnp->lock) in __rcu_report_exp_rnp()
204 if (!sync_rcu_preempt_exp_done(rnp)) { in __rcu_report_exp_rnp()
205 if (!rnp->expmask) in __rcu_report_exp_rnp()
206 rcu_initiate_boost(rnp, flags); in __rcu_report_exp_rnp()
208 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in __rcu_report_exp_rnp()
211 if (rnp->parent == NULL) { in __rcu_report_exp_rnp()
212 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in __rcu_report_exp_rnp()
219 mask = rnp->grpmask; in __rcu_report_exp_rnp()
220 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ in __rcu_report_exp_rnp()
221 rnp = rnp->parent; in __rcu_report_exp_rnp()
222 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ in __rcu_report_exp_rnp()
223 WARN_ON_ONCE(!(rnp->expmask & mask)); in __rcu_report_exp_rnp()
224 rnp->expmask &= ~mask; in __rcu_report_exp_rnp()
233 struct rcu_node *rnp, bool wake) in rcu_report_exp_rnp() argument
237 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_exp_rnp()
238 __rcu_report_exp_rnp(rsp, rnp, wake, flags); in rcu_report_exp_rnp()
245 static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_report_exp_cpu_mult() argument
250 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_exp_cpu_mult()
251 if (!(rnp->expmask & mask)) { in rcu_report_exp_cpu_mult()
252 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_exp_cpu_mult()
255 rnp->expmask &= ~mask; in rcu_report_exp_cpu_mult()
256 __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */ in rcu_report_exp_cpu_mult()
290 struct rcu_node *rnp = rdp->mynode; in exp_funnel_lock() local
294 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && in exp_funnel_lock()
295 (rnp == rnp_root || in exp_funnel_lock()
307 for (; rnp != NULL; rnp = rnp->parent) { in exp_funnel_lock()
312 spin_lock(&rnp->exp_lock); in exp_funnel_lock()
313 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { in exp_funnel_lock()
316 spin_unlock(&rnp->exp_lock); in exp_funnel_lock()
317 trace_rcu_exp_funnel_lock(rsp->name, rnp->level, in exp_funnel_lock()
318 rnp->grplo, rnp->grphi, in exp_funnel_lock()
320 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], in exp_funnel_lock()
324 rnp->exp_seq_rq = s; /* Followers can wait on us. */ in exp_funnel_lock()
325 spin_unlock(&rnp->exp_lock); in exp_funnel_lock()
326 trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo, in exp_funnel_lock()
327 rnp->grphi, TPS("nxtlvl")); in exp_funnel_lock()
344 struct rcu_node *rnp; in sync_sched_exp_handler() local
348 rnp = rdp->mynode; in sync_sched_exp_handler()
349 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || in sync_sched_exp_handler()
368 struct rcu_node *rnp; in sync_sched_exp_online_cleanup() local
372 rnp = rdp->mynode; in sync_sched_exp_online_cleanup()
373 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) in sync_sched_exp_online_cleanup()
393 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); in sync_rcu_exp_select_node_cpus() local
397 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_rcu_exp_select_node_cpus()
401 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { in sync_rcu_exp_select_node_cpus()
402 unsigned long mask = leaf_node_cpu_bit(rnp, cpu); in sync_rcu_exp_select_node_cpus()
408 !(rnp->qsmaskinitnext & mask)) { in sync_rcu_exp_select_node_cpus()
418 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; in sync_rcu_exp_select_node_cpus()
425 if (rcu_preempt_has_tasks(rnp)) in sync_rcu_exp_select_node_cpus()
426 rnp->exp_tasks = rnp->blkd_tasks.next; in sync_rcu_exp_select_node_cpus()
427 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_rcu_exp_select_node_cpus()
430 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { in sync_rcu_exp_select_node_cpus()
431 unsigned long mask = leaf_node_cpu_bit(rnp, cpu); in sync_rcu_exp_select_node_cpus()
448 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_rcu_exp_select_node_cpus()
449 if ((rnp->qsmaskinitnext & mask) && in sync_rcu_exp_select_node_cpus()
450 (rnp->expmask & mask)) { in sync_rcu_exp_select_node_cpus()
452 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_rcu_exp_select_node_cpus()
458 if (!(rnp->expmask & mask)) in sync_rcu_exp_select_node_cpus()
460 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_rcu_exp_select_node_cpus()
465 rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false); in sync_rcu_exp_select_node_cpus()
476 struct rcu_node *rnp; in sync_rcu_exp_select_cpus() local
483 rcu_for_each_leaf_node(rsp, rnp) { in sync_rcu_exp_select_cpus()
484 rnp->exp_need_flush = false; in sync_rcu_exp_select_cpus()
485 if (!READ_ONCE(rnp->expmask)) in sync_rcu_exp_select_cpus()
487 rnp->rew.rew_func = func; in sync_rcu_exp_select_cpus()
488 rnp->rew.rew_rsp = rsp; in sync_rcu_exp_select_cpus()
491 rcu_is_last_leaf_node(rsp, rnp)) { in sync_rcu_exp_select_cpus()
493 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); in sync_rcu_exp_select_cpus()
496 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); in sync_rcu_exp_select_cpus()
498 cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask); in sync_rcu_exp_select_cpus()
500 if (unlikely(cpu > rnp->grphi)) in sync_rcu_exp_select_cpus()
502 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); in sync_rcu_exp_select_cpus()
504 rnp->exp_need_flush = true; in sync_rcu_exp_select_cpus()
508 rcu_for_each_leaf_node(rsp, rnp) in sync_rcu_exp_select_cpus()
509 if (rnp->exp_need_flush) in sync_rcu_exp_select_cpus()
510 flush_work(&rnp->rew.rew_work); in sync_rcu_exp_select_cpus()
520 struct rcu_node *rnp; in synchronize_sched_expedited_wait() local
542 rcu_for_each_leaf_node(rsp, rnp) { in synchronize_sched_expedited_wait()
543 ndetected += rcu_print_task_exp_stall(rnp); in synchronize_sched_expedited_wait()
544 for_each_leaf_node_possible_cpu(rnp, cpu) { in synchronize_sched_expedited_wait()
547 mask = leaf_node_cpu_bit(rnp, cpu); in synchronize_sched_expedited_wait()
548 if (!(rnp->expmask & mask)) in synchronize_sched_expedited_wait()
554 "o."[!!(rdp->grpmask & rnp->expmaskinit)], in synchronize_sched_expedited_wait()
555 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]); in synchronize_sched_expedited_wait()
563 rcu_for_each_node_breadth_first(rsp, rnp) { in synchronize_sched_expedited_wait()
564 if (rnp == rnp_root) in synchronize_sched_expedited_wait()
566 if (sync_rcu_preempt_exp_done_unlocked(rnp)) in synchronize_sched_expedited_wait()
569 rnp->level, rnp->grplo, rnp->grphi, in synchronize_sched_expedited_wait()
570 rnp->expmask, in synchronize_sched_expedited_wait()
571 ".T"[!!rnp->exp_tasks]); in synchronize_sched_expedited_wait()
575 rcu_for_each_leaf_node(rsp, rnp) { in synchronize_sched_expedited_wait()
576 for_each_leaf_node_possible_cpu(rnp, cpu) { in synchronize_sched_expedited_wait()
577 mask = leaf_node_cpu_bit(rnp, cpu); in synchronize_sched_expedited_wait()
578 if (!(rnp->expmask & mask)) in synchronize_sched_expedited_wait()
595 struct rcu_node *rnp; in rcu_exp_wait_wake() local
607 rcu_for_each_node_breadth_first(rsp, rnp) { in rcu_exp_wait_wake()
608 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { in rcu_exp_wait_wake()
609 spin_lock(&rnp->exp_lock); in rcu_exp_wait_wake()
611 if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) in rcu_exp_wait_wake()
612 rnp->exp_seq_rq = s; in rcu_exp_wait_wake()
613 spin_unlock(&rnp->exp_lock); in rcu_exp_wait_wake()
616 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]); in rcu_exp_wait_wake()
656 struct rcu_node *rnp; in _synchronize_rcu_expedited() local
685 rnp = rcu_get_root(rsp); in _synchronize_rcu_expedited()
686 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], in _synchronize_rcu_expedited()