Lines Matching refs:rnp
13 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
80 struct rcu_node *rnp; in sync_exp_reset_tree_hotplug() local
92 rcu_for_each_leaf_node(rnp) { in sync_exp_reset_tree_hotplug()
93 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug()
94 if (rnp->expmaskinit == rnp->expmaskinitnext) { in sync_exp_reset_tree_hotplug()
95 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug()
100 oldmask = rnp->expmaskinit; in sync_exp_reset_tree_hotplug()
101 rnp->expmaskinit = rnp->expmaskinitnext; in sync_exp_reset_tree_hotplug()
102 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug()
109 mask = rnp->grpmask; in sync_exp_reset_tree_hotplug()
110 rnp_up = rnp->parent; in sync_exp_reset_tree_hotplug()
133 struct rcu_node *rnp; in sync_exp_reset_tree() local
136 rcu_for_each_node_breadth_first(rnp) { in sync_exp_reset_tree()
137 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_exp_reset_tree()
138 WARN_ON_ONCE(rnp->expmask); in sync_exp_reset_tree()
139 WRITE_ONCE(rnp->expmask, rnp->expmaskinit); in sync_exp_reset_tree()
140 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree()
150 static bool sync_rcu_exp_done(struct rcu_node *rnp) in sync_rcu_exp_done() argument
152 raw_lockdep_assert_held_rcu_node(rnp); in sync_rcu_exp_done()
153 return READ_ONCE(rnp->exp_tasks) == NULL && in sync_rcu_exp_done()
154 READ_ONCE(rnp->expmask) == 0; in sync_rcu_exp_done()
161 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp) in sync_rcu_exp_done_unlocked() argument
166 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_rcu_exp_done_unlocked()
167 ret = sync_rcu_exp_done(rnp); in sync_rcu_exp_done_unlocked()
168 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_rcu_exp_done_unlocked()
182 static void __rcu_report_exp_rnp(struct rcu_node *rnp, in __rcu_report_exp_rnp() argument
184 __releases(rnp->lock) in __rcu_report_exp_rnp()
188 raw_lockdep_assert_held_rcu_node(rnp); in __rcu_report_exp_rnp()
190 if (!sync_rcu_exp_done(rnp)) { in __rcu_report_exp_rnp()
191 if (!rnp->expmask) in __rcu_report_exp_rnp()
192 rcu_initiate_boost(rnp, flags); in __rcu_report_exp_rnp()
194 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in __rcu_report_exp_rnp()
197 if (rnp->parent == NULL) { in __rcu_report_exp_rnp()
198 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in __rcu_report_exp_rnp()
205 mask = rnp->grpmask; in __rcu_report_exp_rnp()
206 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ in __rcu_report_exp_rnp()
207 rnp = rnp->parent; in __rcu_report_exp_rnp()
208 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ in __rcu_report_exp_rnp()
209 WARN_ON_ONCE(!(rnp->expmask & mask)); in __rcu_report_exp_rnp()
210 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); in __rcu_report_exp_rnp()
218 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake) in rcu_report_exp_rnp() argument
222 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_exp_rnp()
223 __rcu_report_exp_rnp(rnp, wake, flags); in rcu_report_exp_rnp()
230 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, in rcu_report_exp_cpu_mult() argument
237 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_exp_cpu_mult()
238 if (!(rnp->expmask & mask)) { in rcu_report_exp_cpu_mult()
239 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_exp_cpu_mult()
242 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); in rcu_report_exp_cpu_mult()
243 for_each_leaf_node_cpu_mask(rnp, cpu, mask) { in rcu_report_exp_cpu_mult()
250 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */ in rcu_report_exp_cpu_mult()
283 struct rcu_node *rnp = rdp->mynode; in exp_funnel_lock() local
287 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && in exp_funnel_lock()
288 (rnp == rnp_root || in exp_funnel_lock()
300 for (; rnp != NULL; rnp = rnp->parent) { in exp_funnel_lock()
305 spin_lock(&rnp->exp_lock); in exp_funnel_lock()
306 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { in exp_funnel_lock()
309 spin_unlock(&rnp->exp_lock); in exp_funnel_lock()
310 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, in exp_funnel_lock()
311 rnp->grplo, rnp->grphi, in exp_funnel_lock()
313 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], in exp_funnel_lock()
317 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */ in exp_funnel_lock()
318 spin_unlock(&rnp->exp_lock); in exp_funnel_lock()
319 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, in exp_funnel_lock()
320 rnp->grplo, rnp->grphi, TPS("nxtlvl")); in exp_funnel_lock()
346 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); in sync_rcu_exp_select_node_cpus() local
348 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_rcu_exp_select_node_cpus()
352 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { in sync_rcu_exp_select_node_cpus()
358 !(rnp->qsmaskinitnext & mask)) { in sync_rcu_exp_select_node_cpus()
368 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; in sync_rcu_exp_select_node_cpus()
375 if (rcu_preempt_has_tasks(rnp)) in sync_rcu_exp_select_node_cpus()
376 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next); in sync_rcu_exp_select_node_cpus()
377 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_rcu_exp_select_node_cpus()
380 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) { in sync_rcu_exp_select_node_cpus()
400 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_rcu_exp_select_node_cpus()
401 if ((rnp->qsmaskinitnext & mask) && in sync_rcu_exp_select_node_cpus()
402 (rnp->expmask & mask)) { in sync_rcu_exp_select_node_cpus()
404 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_rcu_exp_select_node_cpus()
410 if (rnp->expmask & mask) in sync_rcu_exp_select_node_cpus()
412 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_rcu_exp_select_node_cpus()
416 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false); in sync_rcu_exp_select_node_cpus()
426 struct rcu_node *rnp; in sync_rcu_exp_select_cpus() local
433 rcu_for_each_leaf_node(rnp) { in sync_rcu_exp_select_cpus()
434 rnp->exp_need_flush = false; in sync_rcu_exp_select_cpus()
435 if (!READ_ONCE(rnp->expmask)) in sync_rcu_exp_select_cpus()
439 rcu_is_last_leaf_node(rnp)) { in sync_rcu_exp_select_cpus()
441 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); in sync_rcu_exp_select_cpus()
444 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); in sync_rcu_exp_select_cpus()
445 cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1); in sync_rcu_exp_select_cpus()
447 if (unlikely(cpu > rnp->grphi - rnp->grplo)) in sync_rcu_exp_select_cpus()
450 cpu += rnp->grplo; in sync_rcu_exp_select_cpus()
451 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); in sync_rcu_exp_select_cpus()
452 rnp->exp_need_flush = true; in sync_rcu_exp_select_cpus()
456 rcu_for_each_leaf_node(rnp) in sync_rcu_exp_select_cpus()
457 if (rnp->exp_need_flush) in sync_rcu_exp_select_cpus()
458 flush_work(&rnp->rew.rew_work); in sync_rcu_exp_select_cpus()
494 struct rcu_node *rnp; in synchronize_rcu_expedited_wait() local
503 rcu_for_each_leaf_node(rnp) { in synchronize_rcu_expedited_wait()
504 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { in synchronize_rcu_expedited_wait()
528 rcu_for_each_leaf_node(rnp) { in synchronize_rcu_expedited_wait()
529 ndetected += rcu_print_task_exp_stall(rnp); in synchronize_rcu_expedited_wait()
530 for_each_leaf_node_possible_cpu(rnp, cpu) { in synchronize_rcu_expedited_wait()
533 mask = leaf_node_cpu_bit(rnp, cpu); in synchronize_rcu_expedited_wait()
534 if (!(READ_ONCE(rnp->expmask) & mask)) in synchronize_rcu_expedited_wait()
540 "o."[!!(rdp->grpmask & rnp->expmaskinit)], in synchronize_rcu_expedited_wait()
541 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]); in synchronize_rcu_expedited_wait()
550 rcu_for_each_node_breadth_first(rnp) { in synchronize_rcu_expedited_wait()
551 if (rnp == rnp_root) in synchronize_rcu_expedited_wait()
553 if (sync_rcu_exp_done_unlocked(rnp)) in synchronize_rcu_expedited_wait()
556 rnp->level, rnp->grplo, rnp->grphi, in synchronize_rcu_expedited_wait()
557 data_race(rnp->expmask), in synchronize_rcu_expedited_wait()
558 ".T"[!!data_race(rnp->exp_tasks)]); in synchronize_rcu_expedited_wait()
562 rcu_for_each_leaf_node(rnp) { in synchronize_rcu_expedited_wait()
563 for_each_leaf_node_possible_cpu(rnp, cpu) { in synchronize_rcu_expedited_wait()
564 mask = leaf_node_cpu_bit(rnp, cpu); in synchronize_rcu_expedited_wait()
565 if (!(READ_ONCE(rnp->expmask) & mask)) in synchronize_rcu_expedited_wait()
582 struct rcu_node *rnp; in rcu_exp_wait_wake() local
593 rcu_for_each_node_breadth_first(rnp) { in rcu_exp_wait_wake()
594 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { in rcu_exp_wait_wake()
595 spin_lock(&rnp->exp_lock); in rcu_exp_wait_wake()
597 if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) in rcu_exp_wait_wake()
598 WRITE_ONCE(rnp->exp_seq_rq, s); in rcu_exp_wait_wake()
599 spin_unlock(&rnp->exp_lock); in rcu_exp_wait_wake()
602 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]); in rcu_exp_wait_wake()
646 struct rcu_node *rnp = rdp->mynode; in rcu_exp_handler() local
679 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_exp_handler()
680 if (rnp->expmask & rdp->grpmask) { in rcu_exp_handler()
684 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_exp_handler()
702 static int rcu_print_task_exp_stall(struct rcu_node *rnp) in rcu_print_task_exp_stall() argument
708 if (!READ_ONCE(rnp->exp_tasks)) in rcu_print_task_exp_stall()
710 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_print_task_exp_stall()
711 t = list_entry(rnp->exp_tasks->prev, in rcu_print_task_exp_stall()
713 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_task_exp_stall()
717 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_print_task_exp_stall()
737 struct rcu_node *rnp = rdp->mynode; in rcu_exp_handler() local
739 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || in rcu_exp_handler()
756 struct rcu_node *rnp; in sync_sched_exp_online_cleanup() local
759 rnp = rdp->mynode; in sync_sched_exp_online_cleanup()
762 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || in sync_sched_exp_online_cleanup()
786 static int rcu_print_task_exp_stall(struct rcu_node *rnp) in rcu_print_task_exp_stall() argument
817 struct rcu_node *rnp; in synchronize_rcu_expedited() local
852 rnp = rcu_get_root(); in synchronize_rcu_expedited()
853 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], in synchronize_rcu_expedited()