Lines Matching refs:rcu_data

77 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
154 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
240 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_get_n_cbs_cpu()
260 return arch_atomic_add_return(incby, this_cpu_ptr(&rcu_data.dynticks)); in rcu_dynticks_inc()
316 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_dynticks_eqs_online()
330 return !(atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1); in rcu_dynticks_curr_cpu_in_eqs()
337 static int rcu_dynticks_snap(struct rcu_data *rdp) in rcu_dynticks_snap()
355 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_is_idle_cpu()
365 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) in rcu_dynticks_in_eqs_since()
376 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_dynticks_zero_in_eqs()
406 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); in rcu_momentary_dyntick_idle()
434 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0, in rcu_is_cpu_rrupt_from_idle()
436 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0, in rcu_is_cpu_rrupt_from_idle()
440 nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting); in rcu_is_cpu_rrupt_from_idle()
450 return __this_cpu_read(rcu_data.dynticks_nesting) == 0; in rcu_is_cpu_rrupt_from_idle()
552 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
611 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_eqs_enter()
685 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_irq_work_resched()
743 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_nmi_exit()
819 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0, in rcu_irq_exit_check_preempt()
821 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != in rcu_irq_exit_check_preempt()
854 struct rcu_data *rdp; in rcu_eqs_exit()
858 rdp = this_cpu_ptr(&rcu_data); in rcu_eqs_exit()
946 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in __rcu_irq_enter_check_tick()
995 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_nmi_enter()
1094 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) in rcu_disable_urgency_upon_qs()
1142 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); in rcu_request_urgent_qs_task()
1162 struct rcu_data *rdp; in rcu_lockdep_current_cpu_online()
1169 rdp = this_cpu_ptr(&rcu_data); in rcu_lockdep_current_cpu_online()
1187 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf()
1202 static int dyntick_save_progress_counter(struct rcu_data *rdp) in dyntick_save_progress_counter()
1219 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) in rcu_implicit_dynticks_qs()
1288 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); in rcu_implicit_dynticks_qs()
1289 rnhqp = per_cpu_ptr(&rcu_data.rcu_need_heavy_qs, rdp->cpu); in rcu_implicit_dynticks_qs()
1343 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp()
1367 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, in rcu_start_this_gp()
1445 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_future_gp_cleanup()
1494 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_accelerate_cbs()
1541 struct rcu_data *rdp) in rcu_accelerate_cbs_unlocked()
1570 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_advance_cbs()
1594 struct rcu_data *rdp) in rcu_advance_cbs_nowake()
1623 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) in __note_gp_changes()
1670 static void note_gp_changes(struct rcu_data *rdp) in note_gp_changes()
1742 struct rcu_data *rdp; in rcu_gp_init()
1850 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_init()
2011 struct rcu_data *rdp; in rcu_gp_cleanup()
2049 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2057 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_gp_cleanup()
2076 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2277 rcu_report_qs_rdp(struct rcu_data *rdp) in rcu_report_qs_rdp()
2328 rcu_check_quiescent_state(struct rcu_data *rdp) in rcu_check_quiescent_state()
2361 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcutree_dying_cpu()
2426 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_dead_cpu()
2444 static void rcu_do_batch(struct rcu_data *rdp) in rcu_do_batch()
2588 raw_cpu_inc(rcu_data.ticks_this_gp); in rcu_sched_clock_irq()
2590 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { in rcu_sched_clock_irq()
2596 __this_cpu_write(rcu_data.rcu_urgent_qs, false); in rcu_sched_clock_irq()
2613 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) in force_qs_rnp()
2618 struct rcu_data *rdp; in force_qs_rnp()
2643 rdp = per_cpu_ptr(&rcu_data, cpu); in force_qs_rnp()
2671 rnp = __this_cpu_read(rcu_data.mynode); in rcu_force_quiescent_state()
2709 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); in rcu_core()
2775 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); in invoke_rcu_core_kthread()
2776 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task); in invoke_rcu_core_kthread()
2778 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); in invoke_rcu_core_kthread()
2797 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; in rcu_cpu_kthread_park()
2802 return __this_cpu_read(rcu_data.rcu_cpu_has_work); in rcu_cpu_kthread_should_run()
2812 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); in rcu_cpu_kthread()
2813 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); in rcu_cpu_kthread()
2841 .store = &rcu_data.rcu_cpu_kthread_task,
2857 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; in rcu_spawn_core_kthreads()
2868 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, in __call_rcu_core()
2923 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) in check_cb_ovld_locked()
2946 static void check_cb_ovld(struct rcu_data *rdp) in check_cb_ovld()
2965 struct rcu_data *rdp; in __call_rcu()
2988 rdp = this_cpu_ptr(&rcu_data); in __call_rcu()
3787 struct rcu_data *rdp; in start_poll_synchronize_rcu()
3792 rdp = this_cpu_ptr(&rcu_data); in start_poll_synchronize_rcu()
3874 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_pending()
3953 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier_func()
3981 struct rcu_data *rdp; in rcu_barrier()
4019 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
4097 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_boot_init_percpu_data()
4125 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_prepare_cpu()
4173 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_affinity_setting()
4185 struct rcu_data *rdp; in rcutree_online_cpu()
4188 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_online_cpu()
4210 struct rcu_data *rdp; in rcutree_offline_cpu()
4213 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_offline_cpu()
4241 struct rcu_data *rdp; in rcu_cpu_starting()
4245 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_starting()
4292 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_report_dead()
4300 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); in rcu_report_dead()
4337 struct rcu_data *my_rdp; in rcutree_migrate_callbacks()
4339 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_migrate_callbacks()
4347 my_rdp = this_cpu_ptr(&rcu_data); in rcutree_migrate_callbacks()
4545 per_cpu_ptr(&rcu_data, i)->mynode = rnp; in rcu_init_one()