Lines Matching defs:rcu_data
152 struct rcu_data { struct
154 unsigned long gp_seq; /* Track rsp->gp_seq counter. */
155 unsigned long gp_seq_needed; /* Track furthest future GP request. */
156 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
157 bool core_needs_qs; /* Core waits for quiescent state. */
158 bool beenonline; /* CPU online at least once. */
159 bool gpwrap; /* Possible ->gp_seq wrap. */
160 bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
161 bool cpu_started; /* RCU watching this onlining CPU. */
162 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
163 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
164 unsigned long ticks_this_gp; /* The number of scheduling-clock */
168 struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */
169 bool defer_qs_iw_pending; /* Scheduler attention pending? */
170 struct work_struct strict_work; /* Schedule readers for strict GPs. */
173 struct rcu_segcblist cblist; /* Segmented callback list, with */
176 long qlen_last_fqs_check;
178 unsigned long n_cbs_invoked; /* # callbacks invoked since boot. */
179 unsigned long n_force_qs_snap;
181 long blimit; /* Upper limit on a processed batch */
184 int dynticks_snap; /* Per-GP tracking for dynticks. */
185 long dynticks_nesting; /* Track process nesting level. */
186 long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
187 atomic_t dynticks; /* Even value for idle, else odd. */
188 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
189 bool rcu_urgent_qs; /* GP old need light quiescent state. */
190 bool rcu_forced_tick; /* Forced tick to provide QS. */
191 bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
193 unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */
194 unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */
195 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
199 struct rcu_head barrier_head;
200 int exp_dynticks_snap; /* Double-check need for IPI. */
204 struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
205 struct swait_queue_head nocb_state_wq; /* For offloading state changes */
206 struct task_struct *nocb_gp_kthread;
230 struct rcu_data *nocb_next_cb_rdp; argument
234 struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp; argument
259 /* Values for nocb_defer_wakeup field in struct rcu_data. */ argument