Lines Matching refs:rcu_data
82 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
228 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_get_n_cbs_cpu()
249 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_dynticks_eqs_enter()
274 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_dynticks_eqs_exit()
305 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_dynticks_eqs_online()
319 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_dynticks_curr_cpu_in_eqs()
328 static int rcu_dynticks_snap(struct rcu_data *rdp) in rcu_dynticks_snap()
349 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) in rcu_dynticks_in_eqs_since()
360 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_dynticks_zero_in_eqs()
388 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); in rcu_eqs_special_set()
416 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); in rcu_momentary_dyntick_idle()
418 &this_cpu_ptr(&rcu_data)->dynticks); in rcu_momentary_dyntick_idle()
445 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0, in rcu_is_cpu_rrupt_from_idle()
447 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0, in rcu_is_cpu_rrupt_from_idle()
451 nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting); in rcu_is_cpu_rrupt_from_idle()
461 return __this_cpu_read(rcu_data.dynticks_nesting) == 0; in rcu_is_cpu_rrupt_from_idle()
563 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
622 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_eqs_enter()
638 rdp = this_cpu_ptr(&rcu_data); in rcu_eqs_enter()
704 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_nmi_exit()
785 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0, in rcu_irq_exit_preempt()
787 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != in rcu_irq_exit_preempt()
802 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0, in rcu_irq_exit_check_preempt()
804 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != in rcu_irq_exit_check_preempt()
837 struct rcu_data *rdp; in rcu_eqs_exit()
841 rdp = this_cpu_ptr(&rcu_data); in rcu_eqs_exit()
929 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in __rcu_irq_enter_check_tick()
978 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_nmi_enter()
1078 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) in rcu_disable_urgency_upon_qs()
1123 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); in rcu_request_urgent_qs_task()
1143 struct rcu_data *rdp; in rcu_lockdep_current_cpu_online()
1150 rdp = this_cpu_ptr(&rcu_data); in rcu_lockdep_current_cpu_online()
1168 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf()
1183 static int dyntick_save_progress_counter(struct rcu_data *rdp) in dyntick_save_progress_counter()
1200 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) in rcu_implicit_dynticks_qs()
1269 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); in rcu_implicit_dynticks_qs()
1270 rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu); in rcu_implicit_dynticks_qs()
1326 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp()
1350 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, in rcu_start_this_gp()
1428 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_future_gp_cleanup()
1477 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_accelerate_cbs()
1520 struct rcu_data *rdp) in rcu_accelerate_cbs_unlocked()
1549 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_advance_cbs()
1573 struct rcu_data *rdp) in rcu_advance_cbs_nowake()
1602 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) in __note_gp_changes()
1650 static void note_gp_changes(struct rcu_data *rdp) in note_gp_changes()
1721 struct rcu_data *rdp; in rcu_gp_init()
1823 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_init()
1979 struct rcu_data *rdp; in rcu_gp_cleanup()
2017 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2025 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_gp_cleanup()
2044 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2246 rcu_report_qs_rdp(struct rcu_data *rdp) in rcu_report_qs_rdp()
2298 rcu_check_quiescent_state(struct rcu_data *rdp) in rcu_check_quiescent_state()
2331 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcutree_dying_cpu()
2396 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_dead_cpu()
2405 do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu)); in rcutree_dead_cpu()
2416 static void rcu_do_batch(struct rcu_data *rdp) in rcu_do_batch()
2556 raw_cpu_inc(rcu_data.ticks_this_gp); in rcu_sched_clock_irq()
2558 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { in rcu_sched_clock_irq()
2564 __this_cpu_write(rcu_data.rcu_urgent_qs, false); in rcu_sched_clock_irq()
2580 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) in force_qs_rnp()
2585 struct rcu_data *rdp; in force_qs_rnp()
2610 rdp = per_cpu_ptr(&rcu_data, cpu); in force_qs_rnp()
2638 rnp = __this_cpu_read(rcu_data.mynode); in rcu_force_quiescent_state()
2676 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); in rcu_core()
2743 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); in invoke_rcu_core_kthread()
2744 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task); in invoke_rcu_core_kthread()
2746 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); in invoke_rcu_core_kthread()
2765 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; in rcu_cpu_kthread_park()
2770 return __this_cpu_read(rcu_data.rcu_cpu_has_work); in rcu_cpu_kthread_should_run()
2780 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); in rcu_cpu_kthread()
2781 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); in rcu_cpu_kthread()
2809 .store = &rcu_data.rcu_cpu_kthread_task,
2825 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; in rcu_spawn_core_kthreads()
2837 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, in __call_rcu_core()
2892 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) in check_cb_ovld_locked()
2915 static void check_cb_ovld(struct rcu_data *rdp) in check_cb_ovld()
2933 struct rcu_data *rdp; in __call_rcu()
2954 rdp = this_cpu_ptr(&rcu_data); in __call_rcu()
3690 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_pending()
3767 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier_func()
3795 struct rcu_data *rdp; in rcu_barrier()
3833 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
3911 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_boot_init_percpu_data()
3939 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_prepare_cpu()
3981 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_affinity_setting()
3993 struct rcu_data *rdp; in rcutree_online_cpu()
3996 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_online_cpu()
4018 struct rcu_data *rdp; in rcutree_offline_cpu()
4021 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_offline_cpu()
4049 struct rcu_data *rdp; in rcu_cpu_starting()
4053 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_starting()
4092 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_report_dead()
4097 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); in rcu_report_dead()
4128 struct rcu_data *my_rdp; in rcutree_migrate_callbacks()
4130 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_migrate_callbacks()
4138 my_rdp = this_cpu_ptr(&rcu_data); in rcutree_migrate_callbacks()
4335 per_cpu_ptr(&rcu_data, i)->mynode = rnp; in rcu_init_one()