Lines Matching refs:rnp
130 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
177 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_preempt_ctxt_queue() argument
178 __releases(rnp->lock) /* But leaves rrupts disabled. */ in rcu_preempt_ctxt_queue()
180 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + in rcu_preempt_ctxt_queue()
181 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + in rcu_preempt_ctxt_queue()
182 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + in rcu_preempt_ctxt_queue()
183 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); in rcu_preempt_ctxt_queue()
186 raw_lockdep_assert_held_rcu_node(rnp); in rcu_preempt_ctxt_queue()
187 WARN_ON_ONCE(rdp->mynode != rnp); in rcu_preempt_ctxt_queue()
188 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); in rcu_preempt_ctxt_queue()
190 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask & in rcu_preempt_ctxt_queue()
211 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
229 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
242 list_add(&t->rcu_node_entry, rnp->exp_tasks); in rcu_preempt_ctxt_queue()
253 list_add(&t->rcu_node_entry, rnp->gp_tasks); in rcu_preempt_ctxt_queue()
269 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { in rcu_preempt_ctxt_queue()
270 rnp->gp_tasks = &t->rcu_node_entry; in rcu_preempt_ctxt_queue()
271 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); in rcu_preempt_ctxt_queue()
273 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) in rcu_preempt_ctxt_queue()
274 rnp->exp_tasks = &t->rcu_node_entry; in rcu_preempt_ctxt_queue()
276 !(rnp->qsmask & rdp->grpmask)); in rcu_preempt_ctxt_queue()
278 !(rnp->expmask & rdp->grpmask)); in rcu_preempt_ctxt_queue()
279 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */ in rcu_preempt_ctxt_queue()
339 struct rcu_node *rnp; in rcu_preempt_note_context_switch() local
348 rnp = rdp->mynode; in rcu_preempt_note_context_switch()
349 raw_spin_lock_rcu_node(rnp); in rcu_preempt_note_context_switch()
351 t->rcu_blocked_node = rnp; in rcu_preempt_note_context_switch()
358 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); in rcu_preempt_note_context_switch()
362 (rnp->qsmask & rdp->grpmask) in rcu_preempt_note_context_switch()
363 ? rnp->gp_seq in rcu_preempt_note_context_switch()
364 : rcu_seq_snap(&rnp->gp_seq)); in rcu_preempt_note_context_switch()
365 rcu_preempt_ctxt_queue(rnp, rdp); in rcu_preempt_note_context_switch()
393 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) in rcu_preempt_blocked_readers_cgp() argument
395 return rnp->gp_tasks != NULL; in rcu_preempt_blocked_readers_cgp()
447 struct rcu_node *rnp) in rcu_next_node_entry() argument
452 if (np == &rnp->blkd_tasks) in rcu_next_node_entry()
461 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) in rcu_preempt_has_tasks() argument
463 return !list_empty(&rnp->blkd_tasks); in rcu_preempt_has_tasks()
480 struct rcu_node *rnp; in rcu_read_unlock_special() local
545 rnp = t->rcu_blocked_node; in rcu_read_unlock_special()
546 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_read_unlock_special()
547 WARN_ON_ONCE(rnp != t->rcu_blocked_node); in rcu_read_unlock_special()
548 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); in rcu_read_unlock_special()
549 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); in rcu_read_unlock_special()
550 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq && in rcu_read_unlock_special()
551 (!empty_norm || rnp->qsmask)); in rcu_read_unlock_special()
552 empty_exp = sync_rcu_preempt_exp_done(rnp); in rcu_read_unlock_special()
554 np = rcu_next_node_entry(t, rnp); in rcu_read_unlock_special()
558 rnp->gp_seq, t->pid); in rcu_read_unlock_special()
559 if (&t->rcu_node_entry == rnp->gp_tasks) in rcu_read_unlock_special()
560 rnp->gp_tasks = np; in rcu_read_unlock_special()
561 if (&t->rcu_node_entry == rnp->exp_tasks) in rcu_read_unlock_special()
562 rnp->exp_tasks = np; in rcu_read_unlock_special()
565 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; in rcu_read_unlock_special()
566 if (&t->rcu_node_entry == rnp->boost_tasks) in rcu_read_unlock_special()
567 rnp->boost_tasks = np; in rcu_read_unlock_special()
576 empty_exp_now = sync_rcu_preempt_exp_done(rnp); in rcu_read_unlock_special()
577 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_read_unlock_special()
579 rnp->gp_seq, in rcu_read_unlock_special()
580 0, rnp->qsmask, in rcu_read_unlock_special()
581 rnp->level, in rcu_read_unlock_special()
582 rnp->grplo, in rcu_read_unlock_special()
583 rnp->grphi, in rcu_read_unlock_special()
584 !!rnp->gp_tasks); in rcu_read_unlock_special()
585 rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags); in rcu_read_unlock_special()
587 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_read_unlock_special()
592 rt_mutex_futex_unlock(&rnp->boost_mtx); in rcu_read_unlock_special()
599 rcu_report_exp_rnp(rcu_state_p, rnp, true); in rcu_read_unlock_special()
609 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) in rcu_print_detail_task_stall_rnp() argument
614 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_print_detail_task_stall_rnp()
615 if (!rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_print_detail_task_stall_rnp()
616 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_print_detail_task_stall_rnp()
619 t = list_entry(rnp->gp_tasks->prev, in rcu_print_detail_task_stall_rnp()
621 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_detail_task_stall_rnp()
629 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_print_detail_task_stall_rnp()
638 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_print_detail_task_stall() local
640 rcu_print_detail_task_stall_rnp(rnp); in rcu_print_detail_task_stall()
641 rcu_for_each_leaf_node(rsp, rnp) in rcu_print_detail_task_stall()
642 rcu_print_detail_task_stall_rnp(rnp); in rcu_print_detail_task_stall()
645 static void rcu_print_task_stall_begin(struct rcu_node *rnp) in rcu_print_task_stall_begin() argument
648 rnp->level, rnp->grplo, rnp->grphi); in rcu_print_task_stall_begin()
660 static int rcu_print_task_stall(struct rcu_node *rnp) in rcu_print_task_stall() argument
665 if (!rcu_preempt_blocked_readers_cgp(rnp)) in rcu_print_task_stall()
667 rcu_print_task_stall_begin(rnp); in rcu_print_task_stall()
668 t = list_entry(rnp->gp_tasks->prev, in rcu_print_task_stall()
670 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_task_stall()
683 static int rcu_print_task_exp_stall(struct rcu_node *rnp) in rcu_print_task_exp_stall() argument
688 if (!rnp->exp_tasks) in rcu_print_task_exp_stall()
690 t = list_entry(rnp->exp_tasks->prev, in rcu_print_task_exp_stall()
692 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_task_exp_stall()
710 rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp) in rcu_preempt_check_blocked_tasks() argument
715 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) in rcu_preempt_check_blocked_tasks()
716 dump_blkd_tasks(rsp, rnp, 10); in rcu_preempt_check_blocked_tasks()
717 if (rcu_preempt_has_tasks(rnp) && in rcu_preempt_check_blocked_tasks()
718 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { in rcu_preempt_check_blocked_tasks()
719 rnp->gp_tasks = rnp->blkd_tasks.next; in rcu_preempt_check_blocked_tasks()
720 t = container_of(rnp->gp_tasks, struct task_struct, in rcu_preempt_check_blocked_tasks()
723 rnp->gp_seq, t->pid); in rcu_preempt_check_blocked_tasks()
725 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
869 dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) in dump_blkd_tasks() argument
878 raw_lockdep_assert_held_rcu_node(rnp); in dump_blkd_tasks()
880 __func__, rnp->grplo, rnp->grphi, rnp->level, in dump_blkd_tasks()
881 (long)rnp->gp_seq, (long)rnp->completedqs); in dump_blkd_tasks()
882 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) in dump_blkd_tasks()
886 __func__, rnp->gp_tasks, rnp->boost_tasks, rnp->exp_tasks); in dump_blkd_tasks()
889 list_for_each(lhp, &rnp->blkd_tasks) { in dump_blkd_tasks()
895 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { in dump_blkd_tasks()
897 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); in dump_blkd_tasks()
930 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) in rcu_preempt_blocked_readers_cgp() argument
938 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) in rcu_preempt_has_tasks() argument
955 static int rcu_print_task_stall(struct rcu_node *rnp) in rcu_print_task_stall() argument
965 static int rcu_print_task_exp_stall(struct rcu_node *rnp) in rcu_print_task_exp_stall() argument
976 rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp) in rcu_preempt_check_blocked_tasks() argument
978 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
1018 dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) in dump_blkd_tasks() argument
1020 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); in dump_blkd_tasks()
1045 static int rcu_boost(struct rcu_node *rnp) in rcu_boost() argument
1051 if (READ_ONCE(rnp->exp_tasks) == NULL && in rcu_boost()
1052 READ_ONCE(rnp->boost_tasks) == NULL) in rcu_boost()
1055 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_boost()
1061 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { in rcu_boost()
1062 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_boost()
1072 if (rnp->exp_tasks != NULL) in rcu_boost()
1073 tb = rnp->exp_tasks; in rcu_boost()
1075 tb = rnp->boost_tasks; in rcu_boost()
1094 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); in rcu_boost()
1095 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_boost()
1097 rt_mutex_lock(&rnp->boost_mtx); in rcu_boost()
1098 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ in rcu_boost()
1100 return READ_ONCE(rnp->exp_tasks) != NULL || in rcu_boost()
1101 READ_ONCE(rnp->boost_tasks) != NULL; in rcu_boost()
1109 struct rcu_node *rnp = (struct rcu_node *)arg; in rcu_boost_kthread() local
1115 rnp->boost_kthread_status = RCU_KTHREAD_WAITING; in rcu_boost_kthread()
1117 rcu_wait(rnp->boost_tasks || rnp->exp_tasks); in rcu_boost_kthread()
1119 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; in rcu_boost_kthread()
1120 more2boost = rcu_boost(rnp); in rcu_boost_kthread()
1126 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; in rcu_boost_kthread()
1148 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) in rcu_initiate_boost() argument
1149 __releases(rnp->lock) in rcu_initiate_boost()
1153 raw_lockdep_assert_held_rcu_node(rnp); in rcu_initiate_boost()
1154 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { in rcu_initiate_boost()
1155 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_initiate_boost()
1158 if (rnp->exp_tasks != NULL || in rcu_initiate_boost()
1159 (rnp->gp_tasks != NULL && in rcu_initiate_boost()
1160 rnp->boost_tasks == NULL && in rcu_initiate_boost()
1161 rnp->qsmask == 0 && in rcu_initiate_boost()
1162 ULONG_CMP_GE(jiffies, rnp->boost_time))) { in rcu_initiate_boost()
1163 if (rnp->exp_tasks == NULL) in rcu_initiate_boost()
1164 rnp->boost_tasks = rnp->gp_tasks; in rcu_initiate_boost()
1165 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_initiate_boost()
1166 t = rnp->boost_kthread_task; in rcu_initiate_boost()
1168 rcu_wake_cond(t, rnp->boost_kthread_status); in rcu_initiate_boost()
1170 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_initiate_boost()
1205 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) in rcu_preempt_boost_start_gp() argument
1207 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; in rcu_preempt_boost_start_gp()
1216 struct rcu_node *rnp) in rcu_spawn_one_boost_kthread() argument
1218 int rnp_index = rnp - &rsp->node[0]; in rcu_spawn_one_boost_kthread()
1226 if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) in rcu_spawn_one_boost_kthread()
1230 if (rnp->boost_kthread_task != NULL) in rcu_spawn_one_boost_kthread()
1232 t = kthread_create(rcu_boost_kthread, (void *)rnp, in rcu_spawn_one_boost_kthread()
1236 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_spawn_one_boost_kthread()
1237 rnp->boost_kthread_task = t; in rcu_spawn_one_boost_kthread()
1238 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_spawn_one_boost_kthread()
1315 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) in rcu_boost_kthread_setaffinity() argument
1317 struct task_struct *t = rnp->boost_kthread_task; in rcu_boost_kthread_setaffinity()
1318 unsigned long mask = rcu_rnp_online_cpus(rnp); in rcu_boost_kthread_setaffinity()
1326 for_each_leaf_node_possible_cpu(rnp, cpu) in rcu_boost_kthread_setaffinity()
1327 if ((mask & leaf_node_cpu_bit(rnp, cpu)) && in rcu_boost_kthread_setaffinity()
1350 struct rcu_node *rnp; in rcu_spawn_boost_kthreads() local
1356 rcu_for_each_leaf_node(rcu_state_p, rnp) in rcu_spawn_boost_kthreads()
1357 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); in rcu_spawn_boost_kthreads()
1363 struct rcu_node *rnp = rdp->mynode; in rcu_prepare_kthreads() local
1367 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); in rcu_prepare_kthreads()
1372 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) in rcu_initiate_boost() argument
1373 __releases(rnp->lock) in rcu_initiate_boost()
1375 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_initiate_boost()
1388 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) in rcu_preempt_boost_start_gp() argument
1392 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) in rcu_boost_kthread_setaffinity() argument
1490 struct rcu_node *rnp; in rcu_try_advance_all_cbs() local
1500 rnp = rdp->mynode; in rcu_try_advance_all_cbs()
1508 rcu_seq_current(&rnp->gp_seq)) || in rcu_try_advance_all_cbs()
1577 struct rcu_node *rnp; in rcu_prepare_for_idle() local
1620 rnp = rdp->mynode; in rcu_prepare_for_idle()
1621 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_prepare_for_idle()
1622 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_prepare_for_idle()
1623 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_prepare_for_idle()
1890 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) in rcu_nocb_gp_get() argument
1892 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1]; in rcu_nocb_gp_get()
1895 static void rcu_init_one_nocb(struct rcu_node *rnp) in rcu_init_one_nocb() argument
1897 init_swait_queue_head(&rnp->nocb_gp_wq[0]); in rcu_init_one_nocb()
1898 init_swait_queue_head(&rnp->nocb_gp_wq[1]); in rcu_init_one_nocb()
2142 struct rcu_node *rnp = rdp->mynode; in rcu_nocb_wait_gp() local
2149 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_nocb_wait_gp()
2150 needwake = rcu_start_this_gp(rnp, rdp, c); in rcu_nocb_wait_gp()
2151 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_nocb_wait_gp()
2160 trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait")); in rcu_nocb_wait_gp()
2163 rnp->nocb_gp_wq[rcu_seq_ctr(c) & 0x1], in rcu_nocb_wait_gp()
2164 (d = rcu_seq_done(&rnp->gp_seq, c))); in rcu_nocb_wait_gp()
2168 trace_rcu_this_gp(rnp, rdp, c, TPS("ResumeWait")); in rcu_nocb_wait_gp()
2170 trace_rcu_this_gp(rnp, rdp, c, TPS("EndWait")); in rcu_nocb_wait_gp()
2598 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) in rcu_nocb_gp_get() argument
2603 static void rcu_init_one_nocb(struct rcu_node *rnp) in rcu_init_one_nocb() argument