Lines Matching full:gp
50 unsigned long gp_seq_needed; /* Track furthest future GP request. */
58 unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
60 /* Per-GP initial value for qsmask. */
65 /* to allow the current expedited GP */
68 /* Per-GP initial values for expmask. */
70 /* beginning of each expedited GP. */
72 /* Online CPUs for next expedited GP. */
127 /* Place for rcu_nocb_kthread() to wait GP. */
165 unsigned long gp_seq_needed; /* Track furthest future GP request. */
193 int dynticks_snap; /* Per-GP tracking for dynticks. */
194 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
195 bool rcu_urgent_qs; /* GP old need light quiescent state. */
197 bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
214 struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */
224 /* The following fields are used by GP kthread, hence own cacheline. */
226 u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */
228 u8 nocb_gp_gp; /* GP to wait for on last scan? */
243 /* GP rdp takes GP-end wakeups. */
320 unsigned long gp_max; /* Maximum GP duration in */
323 struct swait_queue_head gp_wq; /* Where GP task waits. */
324 short gp_flags; /* Commands for GP task. */
325 short gp_state; /* GP kthread sleep state. */
326 unsigned long gp_wake_time; /* Last GP kthread wake. */
328 unsigned long gp_seq_polled; /* GP seq for polled API. */
329 unsigned long gp_seq_polled_snap; /* ->gp_seq_polled at normal GP start. */
330 unsigned long gp_seq_polled_exp_snap; /* ->gp_seq_polled at expedited GP start. */
343 struct mutex exp_mutex; /* Serialize expedited GP. */
358 unsigned long gp_start; /* Time at which GP started, */
360 unsigned long gp_end; /* Time last GP ended, again */
362 unsigned long gp_activity; /* Time of last GP kthread */
364 unsigned long gp_req_activity; /* Time of last GP request */
371 /* GP start. */
377 /* GP pre-initialization. */
387 #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */