Lines Matching refs:rdp

168 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
170 struct rcu_data *rdp, bool wake);
635 struct rcu_data *rdp; in show_rcu_gp_kthreads() local
651 rdp = per_cpu_ptr(rsp->rda, cpu); in show_rcu_gp_kthreads()
652 if (rdp->gpwrap || in show_rcu_gp_kthreads()
654 rdp->gp_seq_needed)) in show_rcu_gp_kthreads()
657 cpu, rdp->gp_seq_needed); in show_rcu_gp_kthreads()
712 struct rcu_data *rdp; in rcu_eqs_enter() local
728 rdp = this_cpu_ptr(rsp->rda); in rcu_eqs_enter()
729 do_nocb_deferred_wakeup(rdp); in rcu_eqs_enter()
1070 struct rcu_data *rdp; in rcu_lockdep_current_cpu_online() local
1078 rdp = this_cpu_ptr(rsp->rda); in rcu_lockdep_current_cpu_online()
1079 rnp = rdp->mynode; in rcu_lockdep_current_cpu_online()
1080 if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) { in rcu_lockdep_current_cpu_online()
1112 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument
1115 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, in rcu_gpnum_ovf()
1117 WRITE_ONCE(rdp->gpwrap, true); in rcu_gpnum_ovf()
1118 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
1119 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
1127 static int dyntick_save_progress_counter(struct rcu_data *rdp) in dyntick_save_progress_counter() argument
1129 rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks); in dyntick_save_progress_counter()
1130 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { in dyntick_save_progress_counter()
1131 trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti")); in dyntick_save_progress_counter()
1132 rcu_gpnum_ovf(rdp->mynode, rdp); in dyntick_save_progress_counter()
1146 struct rcu_data *rdp; in rcu_iw_handler() local
1149 rdp = container_of(iwp, struct rcu_data, rcu_iw); in rcu_iw_handler()
1150 rnp = rdp->mynode; in rcu_iw_handler()
1152 if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { in rcu_iw_handler()
1153 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_iw_handler()
1154 rdp->rcu_iw_pending = false; in rcu_iw_handler()
1165 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) in rcu_implicit_dynticks_qs() argument
1170 struct rcu_node *rnp = rdp->mynode; in rcu_implicit_dynticks_qs()
1180 if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) { in rcu_implicit_dynticks_qs()
1181 trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_implicit_dynticks_qs()
1182 rdp->dynticks_fqs++; in rcu_implicit_dynticks_qs()
1183 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs()
1194 ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); in rcu_implicit_dynticks_qs()
1195 if (time_after(jiffies, rdp->rsp->gp_start + jtsq) && in rcu_implicit_dynticks_qs()
1196 READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) && in rcu_implicit_dynticks_qs()
1197 rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) { in rcu_implicit_dynticks_qs()
1198 trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("rqc")); in rcu_implicit_dynticks_qs()
1199 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs()
1201 } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) { in rcu_implicit_dynticks_qs()
1207 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) && in rcu_implicit_dynticks_qs()
1208 time_after(jiffies, rdp->rsp->gp_start + HZ)) { in rcu_implicit_dynticks_qs()
1219 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); in rcu_implicit_dynticks_qs()
1221 __func__, rdp->cpu, ".o"[onl], in rcu_implicit_dynticks_qs()
1222 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, in rcu_implicit_dynticks_qs()
1223 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); in rcu_implicit_dynticks_qs()
1244 rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu); in rcu_implicit_dynticks_qs()
1246 (time_after(jiffies, rdp->rsp->gp_start + jtsq) || in rcu_implicit_dynticks_qs()
1247 time_after(jiffies, rdp->rsp->jiffies_resched))) { in rcu_implicit_dynticks_qs()
1251 rdp->rsp->jiffies_resched += jtsq; /* Re-enable beating. */ in rcu_implicit_dynticks_qs()
1260 if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) { in rcu_implicit_dynticks_qs()
1261 resched_cpu(rdp->cpu); in rcu_implicit_dynticks_qs()
1263 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_implicit_dynticks_qs()
1264 (rnp->ffmask & rdp->grpmask)) { in rcu_implicit_dynticks_qs()
1265 init_irq_work(&rdp->rcu_iw, rcu_iw_handler); in rcu_implicit_dynticks_qs()
1266 rdp->rcu_iw_pending = true; in rcu_implicit_dynticks_qs()
1267 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_implicit_dynticks_qs()
1268 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); in rcu_implicit_dynticks_qs()
1449 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); in print_cpu_stall() local
1465 raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); in print_cpu_stall()
1467 raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); in print_cpu_stall()
1499 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) in check_cpu_stall() argument
1543 rnp = rdp->mynode; in check_cpu_stall()
1546 (READ_ONCE(rnp->qsmask) & rdp->grpmask) && in check_cpu_stall()
1579 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp() argument
1582 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req, in trace_rcu_this_gp()
1602 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, in rcu_start_this_gp() argument
1606 struct rcu_state *rsp = rdp->rsp; in rcu_start_this_gp()
1619 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); in rcu_start_this_gp()
1627 trace_rcu_this_gp(rnp, rdp, gp_seq_req, in rcu_start_this_gp()
1639 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, in rcu_start_this_gp()
1651 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); in rcu_start_this_gp()
1654 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); in rcu_start_this_gp()
1658 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); in rcu_start_this_gp()
1667 rdp->gp_seq_needed = rnp->gp_seq_needed; in rcu_start_this_gp()
1681 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); in rcu_future_gp_cleanup() local
1686 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1720 struct rcu_data *rdp) in rcu_accelerate_cbs() argument
1728 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_accelerate_cbs()
1742 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) in rcu_accelerate_cbs()
1743 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); in rcu_accelerate_cbs()
1746 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) in rcu_accelerate_cbs()
1747 trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccWaitCB")); in rcu_accelerate_cbs()
1749 trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccReadyCB")); in rcu_accelerate_cbs()
1762 struct rcu_data *rdp) in rcu_accelerate_cbs_unlocked() argument
1769 if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { in rcu_accelerate_cbs_unlocked()
1771 (void)rcu_segcblist_accelerate(&rdp->cblist, c); in rcu_accelerate_cbs_unlocked()
1775 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_accelerate_cbs_unlocked()
1792 struct rcu_data *rdp) in rcu_advance_cbs() argument
1797 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_advance_cbs()
1804 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1807 return rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_advance_cbs()
1817 struct rcu_data *rdp) in __note_gp_changes() argument
1824 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1828 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1829 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1830 ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */ in __note_gp_changes()
1831 trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend")); in __note_gp_changes()
1833 ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */ in __note_gp_changes()
1837 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1838 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1845 need_gp = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1846 rdp->cpu_no_qs.b.norm = need_gp; in __note_gp_changes()
1847 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr); in __note_gp_changes()
1848 rdp->core_needs_qs = need_gp; in __note_gp_changes()
1849 zero_cpu_stall_ticks(rdp); in __note_gp_changes()
1851 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1852 if (ULONG_CMP_GE(rnp->gp_seq_needed, rdp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1853 rdp->gp_seq_needed = rnp->gp_seq_needed; in __note_gp_changes()
1854 WRITE_ONCE(rdp->gpwrap, false); in __note_gp_changes()
1855 rcu_gpnum_ovf(rnp, rdp); in __note_gp_changes()
1859 static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) in note_gp_changes() argument
1866 rnp = rdp->mynode; in note_gp_changes()
1867 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1868 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ in note_gp_changes()
1873 needwake = __note_gp_changes(rsp, rnp, rdp); in note_gp_changes()
1895 struct rcu_data *rdp; in rcu_gp_init() local
1993 rdp = this_cpu_ptr(rsp->rda); in rcu_gp_init()
1997 if (rnp == rdp->mynode) in rcu_gp_init()
1998 (void)__note_gp_changes(rsp, rnp, rdp); in rcu_gp_init()
2070 struct rcu_data *rdp; in rcu_gp_cleanup() local
2107 rdp = this_cpu_ptr(rsp->rda); in rcu_gp_cleanup()
2108 if (rnp == rdp->mynode) in rcu_gp_cleanup()
2109 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; in rcu_gp_cleanup()
2127 rdp = this_cpu_ptr(rsp->rda); in rcu_gp_cleanup()
2129 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
2134 if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) { in rcu_gp_cleanup()
2381 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) in rcu_report_qs_rdp() argument
2388 rnp = rdp->mynode; in rcu_report_qs_rdp()
2390 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
2391 rdp->gpwrap) { in rcu_report_qs_rdp()
2399 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ in rcu_report_qs_rdp()
2400 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr); in rcu_report_qs_rdp()
2404 mask = rdp->grpmask; in rcu_report_qs_rdp()
2408 rdp->core_needs_qs = false; in rcu_report_qs_rdp()
2414 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_report_qs_rdp()
2430 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) in rcu_check_quiescent_state() argument
2433 note_gp_changes(rsp, rdp); in rcu_check_quiescent_state()
2439 if (!rdp->core_needs_qs) in rcu_check_quiescent_state()
2446 if (rdp->cpu_no_qs.b.norm) in rcu_check_quiescent_state()
2453 rcu_report_qs_rdp(rdp->cpu, rsp, rdp); in rcu_check_quiescent_state()
2462 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);) in rcu_cleanup_dying_cpu()
2463 RCU_TRACE(struct rcu_node *rnp = rdp->mynode;) in rcu_cleanup_dying_cpu()
2468 RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);) in rcu_cleanup_dying_cpu()
2526 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_cleanup_dead_cpu() local
2527 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_cleanup_dead_cpu()
2540 static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) in rcu_do_batch() argument
2548 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { in rcu_do_batch()
2550 rcu_segcblist_n_lazy_cbs(&rdp->cblist), in rcu_do_batch()
2551 rcu_segcblist_n_cbs(&rdp->cblist), 0); in rcu_do_batch()
2553 !rcu_segcblist_empty(&rdp->cblist), in rcu_do_batch()
2566 bl = rdp->blimit; in rcu_do_batch()
2567 trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist), in rcu_do_batch()
2568 rcu_segcblist_n_cbs(&rdp->cblist), bl); in rcu_do_batch()
2569 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2594 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2596 rcu_segcblist_insert_count(&rdp->cblist, &rcl); in rcu_do_batch()
2599 count = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2600 if (rdp->blimit == LONG_MAX && count <= qlowmark) in rcu_do_batch()
2601 rdp->blimit = blimit; in rcu_do_batch()
2604 if (count == 0 && rdp->qlen_last_fqs_check != 0) { in rcu_do_batch()
2605 rdp->qlen_last_fqs_check = 0; in rcu_do_batch()
2606 rdp->n_force_qs_snap = rsp->n_force_qs; in rcu_do_batch()
2607 } else if (count < rdp->qlen_last_fqs_check - qhimark) in rcu_do_batch()
2608 rdp->qlen_last_fqs_check = count; in rcu_do_batch()
2614 WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0)); in rcu_do_batch()
2619 if (rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_do_batch()
2764 struct rcu_data *rdp) in rcu_check_gp_start_stall() argument
2826 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); in __rcu_process_callbacks() local
2827 struct rcu_node *rnp = rdp->mynode; in __rcu_process_callbacks()
2829 WARN_ON_ONCE(!rdp->beenonline); in __rcu_process_callbacks()
2832 rcu_check_quiescent_state(rsp, rdp); in __rcu_process_callbacks()
2836 rcu_segcblist_is_enabled(&rdp->cblist)) { in __rcu_process_callbacks()
2838 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in __rcu_process_callbacks()
2839 rcu_accelerate_cbs_unlocked(rsp, rnp, rdp); in __rcu_process_callbacks()
2843 rcu_check_gp_start_stall(rsp, rnp, rdp); in __rcu_process_callbacks()
2846 if (rcu_segcblist_ready_cbs(&rdp->cblist)) in __rcu_process_callbacks()
2847 invoke_rcu_callbacks(rsp, rdp); in __rcu_process_callbacks()
2850 do_nocb_deferred_wakeup(rdp); in __rcu_process_callbacks()
2875 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) in invoke_rcu_callbacks() argument
2880 rcu_do_batch(rsp, rdp); in invoke_rcu_callbacks()
2895 static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, in __call_rcu_core() argument
2916 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > in __call_rcu_core()
2917 rdp->qlen_last_fqs_check + qhimark)) { in __call_rcu_core()
2920 note_gp_changes(rsp, rdp); in __call_rcu_core()
2924 rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp); in __call_rcu_core()
2927 rdp->blimit = LONG_MAX; in __call_rcu_core()
2928 if (rsp->n_force_qs == rdp->n_force_qs_snap && in __call_rcu_core()
2929 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) in __call_rcu_core()
2931 rdp->n_force_qs_snap = rsp->n_force_qs; in __call_rcu_core()
2932 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in __call_rcu_core()
2955 struct rcu_data *rdp; in __call_rcu() local
2974 rdp = this_cpu_ptr(rsp->rda); in __call_rcu()
2977 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) { in __call_rcu()
2981 rdp = per_cpu_ptr(rsp->rda, cpu); in __call_rcu()
2982 if (likely(rdp->mynode)) { in __call_rcu()
2984 offline = !__call_rcu_nocb(rdp, head, lazy, flags); in __call_rcu()
2996 if (rcu_segcblist_empty(&rdp->cblist)) in __call_rcu()
2997 rcu_segcblist_init(&rdp->cblist); in __call_rcu()
2999 rcu_segcblist_enqueue(&rdp->cblist, head, lazy); in __call_rcu()
3005 rcu_segcblist_n_lazy_cbs(&rdp->cblist), in __call_rcu()
3006 rcu_segcblist_n_cbs(&rdp->cblist)); in __call_rcu()
3009 rcu_segcblist_n_lazy_cbs(&rdp->cblist), in __call_rcu()
3010 rcu_segcblist_n_cbs(&rdp->cblist)); in __call_rcu()
3013 __call_rcu_core(rsp, rdp, head, flags); in __call_rcu()
3271 static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) in __rcu_pending() argument
3273 struct rcu_node *rnp = rdp->mynode; in __rcu_pending()
3276 check_cpu_stall(rsp, rdp); in __rcu_pending()
3283 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) in __rcu_pending()
3287 if (rcu_segcblist_ready_cbs(&rdp->cblist)) in __rcu_pending()
3292 rcu_segcblist_is_enabled(&rdp->cblist) && in __rcu_pending()
3293 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in __rcu_pending()
3297 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in __rcu_pending()
3298 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ in __rcu_pending()
3302 if (rcu_nocb_need_deferred_wakeup(rdp)) in __rcu_pending()
3333 struct rcu_data *rdp; in rcu_cpu_has_callbacks() local
3337 rdp = this_cpu_ptr(rsp->rda); in rcu_cpu_has_callbacks()
3338 if (rcu_segcblist_empty(&rdp->cblist)) in rcu_cpu_has_callbacks()
3341 if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) { in rcu_cpu_has_callbacks()
3368 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head); in rcu_barrier_callback() local
3369 struct rcu_state *rsp = rdp->rsp; in rcu_barrier_callback()
3386 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); in rcu_barrier_func() local
3389 rdp->barrier_head.func = rcu_barrier_callback; in rcu_barrier_func()
3390 debug_rcu_head_queue(&rdp->barrier_head); in rcu_barrier_func()
3391 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) { in rcu_barrier_func()
3394 debug_rcu_head_unqueue(&rdp->barrier_head); in rcu_barrier_func()
3407 struct rcu_data *rdp; in _rcu_barrier() local
3446 rdp = per_cpu_ptr(rsp->rda, cpu); in _rcu_barrier()
3456 __call_rcu(&rdp->barrier_head, in _rcu_barrier()
3459 } else if (rcu_segcblist_n_cbs(&rdp->cblist)) { in _rcu_barrier()
3540 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_boot_init_percpu_data() local
3543 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); in rcu_boot_init_percpu_data()
3544 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); in rcu_boot_init_percpu_data()
3545 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1); in rcu_boot_init_percpu_data()
3546 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks))); in rcu_boot_init_percpu_data()
3547 rdp->rcu_ofl_gp_seq = rsp->gp_seq; in rcu_boot_init_percpu_data()
3548 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
3549 rdp->rcu_onl_gp_seq = rsp->gp_seq; in rcu_boot_init_percpu_data()
3550 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
3551 rdp->cpu = cpu; in rcu_boot_init_percpu_data()
3552 rdp->rsp = rsp; in rcu_boot_init_percpu_data()
3553 rcu_boot_init_nocb_percpu_data(rdp); in rcu_boot_init_percpu_data()
3566 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_init_percpu_data() local
3571 rdp->qlen_last_fqs_check = 0; in rcu_init_percpu_data()
3572 rdp->n_force_qs_snap = rsp->n_force_qs; in rcu_init_percpu_data()
3573 rdp->blimit = blimit; in rcu_init_percpu_data()
3574 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ in rcu_init_percpu_data()
3575 !init_nocb_callback_list(rdp)) in rcu_init_percpu_data()
3576 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ in rcu_init_percpu_data()
3577 rdp->dynticks->dynticks_nesting = 1; /* CPU not up, no tearing. */ in rcu_init_percpu_data()
3586 rnp = rdp->mynode; in rcu_init_percpu_data()
3588 rdp->beenonline = true; /* We have now been online. */ in rcu_init_percpu_data()
3589 rdp->gp_seq = rnp->gp_seq; in rcu_init_percpu_data()
3590 rdp->gp_seq_needed = rnp->gp_seq; in rcu_init_percpu_data()
3591 rdp->cpu_no_qs.b.norm = true; in rcu_init_percpu_data()
3592 rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu); in rcu_init_percpu_data()
3593 rdp->core_needs_qs = false; in rcu_init_percpu_data()
3594 rdp->rcu_iw_pending = false; in rcu_init_percpu_data()
3595 rdp->rcu_iw_gp_seq = rnp->gp_seq - 1; in rcu_init_percpu_data()
3596 trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuonl")); in rcu_init_percpu_data()
3622 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); in rcutree_affinity_setting() local
3624 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); in rcutree_affinity_setting()
3634 struct rcu_data *rdp; in rcutree_online_cpu() local
3639 rdp = per_cpu_ptr(rsp->rda, cpu); in rcutree_online_cpu()
3640 rnp = rdp->mynode; in rcutree_online_cpu()
3642 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
3661 struct rcu_data *rdp; in rcutree_offline_cpu() local
3666 rdp = per_cpu_ptr(rsp->rda, cpu); in rcutree_offline_cpu()
3667 rnp = rdp->mynode; in rcutree_offline_cpu()
3669 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
3724 struct rcu_data *rdp; in rcu_cpu_starting() local
3734 rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_cpu_starting()
3735 rnp = rdp->mynode; in rcu_cpu_starting()
3736 mask = rdp->grpmask; in rcu_cpu_starting()
3745 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcu_cpu_starting()
3746 rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq); in rcu_cpu_starting()
3747 rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags); in rcu_cpu_starting()
3768 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_cleanup_dying_idle_cpu() local
3769 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_cleanup_dying_idle_cpu()
3772 mask = rdp->grpmask; in rcu_cleanup_dying_idle_cpu()
3775 rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq); in rcu_cleanup_dying_idle_cpu()
3776 rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags); in rcu_cleanup_dying_idle_cpu()
3815 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_migrate_callbacks() local
3816 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); in rcu_migrate_callbacks()
3819 if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist)) in rcu_migrate_callbacks()
3824 if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) { in rcu_migrate_callbacks()
3830 needwake = rcu_advance_cbs(rsp, rnp_root, rdp) || in rcu_migrate_callbacks()
3832 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); in rcu_migrate_callbacks()
3838 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || in rcu_migrate_callbacks()
3839 !rcu_segcblist_empty(&rdp->cblist), in rcu_migrate_callbacks()
3841 cpu, rcu_segcblist_n_cbs(&rdp->cblist), in rcu_migrate_callbacks()
3842 rcu_segcblist_first_cb(&rdp->cblist)); in rcu_migrate_callbacks()