Lines Matching refs:rcu_data
16 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp) in rcu_rdp_is_offloaded()
31 (rdp == this_cpu_ptr(&rcu_data) && in rcu_rdp_is_offloaded()
153 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_preempt_ctxt_queue()
285 if (__this_cpu_read(rcu_data.cpu_no_qs.s)) { in rcu_qs()
287 __this_cpu_read(rcu_data.gp_seq), in rcu_qs()
289 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); in rcu_qs()
311 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_note_context_switch()
469 struct rcu_data *rdp; in rcu_preempt_deferred_qs_irqrestore()
479 rdp = this_cpu_ptr(&rcu_data); in rcu_preempt_deferred_qs_irqrestore()
583 return (__this_cpu_read(rcu_data.exp_deferred_qs) || in rcu_preempt_need_deferred_qs()
610 struct rcu_data *rdp; in rcu_preempt_deferred_qs_handler()
612 rdp = container_of(iwp, struct rcu_data, defer_qs_iw); in rcu_preempt_deferred_qs_handler()
636 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_read_unlock_special()
732 __this_cpu_read(rcu_data.core_needs_qs) && in rcu_flavor_sched_clock_irq()
733 __this_cpu_read(rcu_data.cpu_no_qs.b.norm) && in rcu_flavor_sched_clock_irq()
775 struct rcu_data *rdp; in dump_blkd_tasks()
797 rdp = per_cpu_ptr(&rcu_data, cpu); in dump_blkd_tasks()
815 struct rcu_data *rdp; in rcu_read_unlock_strict()
820 rdp = this_cpu_ptr(&rcu_data); in rcu_read_unlock_strict()
844 if (!__this_cpu_read(rcu_data.cpu_no_qs.s)) in rcu_qs()
847 __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs")); in rcu_qs()
848 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); in rcu_qs()
849 if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) in rcu_qs()
851 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, false); in rcu_qs()
852 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); in rcu_qs()
866 if (!raw_cpu_read(rcu_data.rcu_urgent_qs)) in rcu_all_qs()
870 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { in rcu_all_qs()
874 this_cpu_write(rcu_data.rcu_urgent_qs, false); in rcu_all_qs()
875 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) { in rcu_all_qs()
893 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) in rcu_note_context_switch()
895 this_cpu_write(rcu_data.rcu_urgent_qs, false); in rcu_note_context_switch()
896 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) in rcu_note_context_switch()
1141 return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current; in rcu_is_callbacks_kthread()
1271 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && in rcu_needs_cpu()
1272 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data)); in rcu_needs_cpu()
1325 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_try_advance_all_cbs()
1360 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_needs_cpu()
1398 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_prepare_for_idle()
1441 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_cleanup_after_idle()