1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  *
5  * Copyright IBM Corporation, 2008
6  *
7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8  *	    Manfred Spraul <manfred@colorfullife.com>
9  *	    Paul E. McKenney <paulmck@linux.ibm.com>
10  *
11  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13  *
14  * For detailed explanation of Read-Copy Update mechanism see -
15  *	Documentation/RCU
16  */
17 
18 #define pr_fmt(fmt) "rcu: " fmt
19 
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/moduleparam.h>
35 #include <linux/percpu.h>
36 #include <linux/notifier.h>
37 #include <linux/cpu.h>
38 #include <linux/mutex.h>
39 #include <linux/time.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/wait.h>
42 #include <linux/kthread.h>
43 #include <uapi/linux/sched/types.h>
44 #include <linux/prefetch.h>
45 #include <linux/delay.h>
46 #include <linux/random.h>
47 #include <linux/trace_events.h>
48 #include <linux/suspend.h>
49 #include <linux/ftrace.h>
50 #include <linux/tick.h>
51 #include <linux/sysrq.h>
52 #include <linux/kprobes.h>
53 #include <linux/gfp.h>
54 #include <linux/oom.h>
55 #include <linux/smpboot.h>
56 #include <linux/jiffies.h>
57 #include <linux/slab.h>
58 #include <linux/sched/isolation.h>
59 #include <linux/sched/clock.h>
60 #include <linux/vmalloc.h>
61 #include <linux/mm.h>
62 #include <linux/kasan.h>
63 #include "../time/tick-internal.h"
64 
65 #include "tree.h"
66 #include "rcu.h"
67 
68 #ifdef MODULE_PARAM_PREFIX
69 #undef MODULE_PARAM_PREFIX
70 #endif
71 #define MODULE_PARAM_PREFIX "rcutree."
72 
73 /* Data structures. */
74 
75 /*
76  * Steal a bit from the bottom of ->dynticks for idle entry/exit
77  * control.  Initially this is for TLB flushing.
78  */
79 #define RCU_DYNTICK_CTRL_MASK 0x1
80 #define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
81 
82 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
83 	.dynticks_nesting = 1,
84 	.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
85 	.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
86 };
87 static struct rcu_state rcu_state = {
88 	.level = { &rcu_state.node[0] },
89 	.gp_state = RCU_GP_IDLE,
90 	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
91 	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
92 	.name = RCU_NAME,
93 	.abbr = RCU_ABBR,
94 	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
95 	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
96 	.ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
97 };
98 
99 /* Dump rcu_node combining tree at boot to verify correct setup. */
100 static bool dump_tree;
101 module_param(dump_tree, bool, 0444);
102 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
103 static bool use_softirq = true;
104 module_param(use_softirq, bool, 0444);
105 /* Control rcu_node-tree auto-balancing at boot time. */
106 static bool rcu_fanout_exact;
107 module_param(rcu_fanout_exact, bool, 0444);
108 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
109 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
110 module_param(rcu_fanout_leaf, int, 0444);
111 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
112 /* Number of rcu_nodes at specified level. */
113 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
114 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
115 
116 /*
117  * The rcu_scheduler_active variable is initialized to the value
118  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
119  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
120  * RCU can assume that there is but one task, allowing RCU to (for example)
121  * optimize synchronize_rcu() to a simple barrier().  When this variable
122  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
123  * to detect real grace periods.  This variable is also used to suppress
124  * boot-time false positives from lockdep-RCU error checking.  Finally, it
125  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
126  * is fully initialized, including all of its kthreads having been spawned.
127  */
128 int rcu_scheduler_active __read_mostly;
129 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
130 
131 /*
132  * The rcu_scheduler_fully_active variable transitions from zero to one
133  * during the early_initcall() processing, which is after the scheduler
134  * is capable of creating new tasks.  So RCU processing (for example,
135  * creating tasks for RCU priority boosting) must be delayed until after
136  * rcu_scheduler_fully_active transitions from zero to one.  We also
137  * currently delay invocation of any RCU callbacks until after this point.
138  *
139  * It might later prove better for people registering RCU callbacks during
140  * early boot to take responsibility for these callbacks, but one step at
141  * a time.
142  */
143 static int rcu_scheduler_fully_active __read_mostly;
144 
145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
146 			      unsigned long gps, unsigned long flags);
147 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
148 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
150 static void invoke_rcu_core(void);
151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
152 static void sync_sched_exp_online_cleanup(int cpu);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
154 
155 /* rcuc/rcub kthread realtime priority */
156 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
157 module_param(kthread_prio, int, 0444);
158 
159 /* Delay in jiffies for grace-period initialization delays, debug only. */
160 
161 static int gp_preinit_delay;
162 module_param(gp_preinit_delay, int, 0444);
163 static int gp_init_delay;
164 module_param(gp_init_delay, int, 0444);
165 static int gp_cleanup_delay;
166 module_param(gp_cleanup_delay, int, 0444);
167 
168 // Add delay to rcu_read_unlock() for strict grace periods.
169 static int rcu_unlock_delay;
170 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
171 module_param(rcu_unlock_delay, int, 0444);
172 #endif
173 
174 /*
175  * This rcu parameter is runtime-read-only. It reflects
176  * a minimum allowed number of objects which can be cached
177  * per-CPU. Object size is equal to one page. This value
178  * can be changed at boot time.
179  */
180 static int rcu_min_cached_objs = 2;
181 module_param(rcu_min_cached_objs, int, 0444);
182 
183 /* Retrieve RCU kthreads priority for rcutorture */
rcu_get_gp_kthreads_prio(void)184 int rcu_get_gp_kthreads_prio(void)
185 {
186 	return kthread_prio;
187 }
188 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
189 
190 /*
191  * Number of grace periods between delays, normalized by the duration of
192  * the delay.  The longer the delay, the more the grace periods between
193  * each delay.  The reason for this normalization is that it means that,
194  * for non-zero delays, the overall slowdown of grace periods is constant
195  * regardless of the duration of the delay.  This arrangement balances
196  * the need for long delays to increase some race probabilities with the
197  * need for fast grace periods to increase other race probabilities.
198  */
199 #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays. */
200 
201 /*
202  * Compute the mask of online CPUs for the specified rcu_node structure.
203  * This will not be stable unless the rcu_node structure's ->lock is
204  * held, but the bit corresponding to the current CPU will be stable
205  * in most contexts.
206  */
rcu_rnp_online_cpus(struct rcu_node * rnp)207 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
208 {
209 	return READ_ONCE(rnp->qsmaskinitnext);
210 }
211 
212 /*
213  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
214  * permit this function to be invoked without holding the root rcu_node
215  * structure's ->lock, but of course results can be subject to change.
216  */
rcu_gp_in_progress(void)217 static int rcu_gp_in_progress(void)
218 {
219 	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
220 }
221 
222 /*
223  * Return the number of callbacks queued on the specified CPU.
224  * Handles both the nocbs and normal cases.
225  */
rcu_get_n_cbs_cpu(int cpu)226 static long rcu_get_n_cbs_cpu(int cpu)
227 {
228 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
229 
230 	if (rcu_segcblist_is_enabled(&rdp->cblist))
231 		return rcu_segcblist_n_cbs(&rdp->cblist);
232 	return 0;
233 }
234 
rcu_softirq_qs(void)235 void rcu_softirq_qs(void)
236 {
237 	rcu_qs();
238 	rcu_preempt_deferred_qs(current);
239 }
240 
241 /*
242  * Record entry into an extended quiescent state.  This is only to be
243  * called when not already in an extended quiescent state, that is,
244  * RCU is watching prior to the call to this function and is no longer
245  * watching upon return.
246  */
rcu_dynticks_eqs_enter(void)247 static noinstr void rcu_dynticks_eqs_enter(void)
248 {
249 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
250 	int seq;
251 
252 	/*
253 	 * CPUs seeing atomic_add_return() must see prior RCU read-side
254 	 * critical sections, and we also must force ordering with the
255 	 * next idle sojourn.
256 	 */
257 	rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
258 	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
259 	// RCU is no longer watching.  Better be in extended quiescent state!
260 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
261 		     (seq & RCU_DYNTICK_CTRL_CTR));
262 	/* Better not have special action (TLB flush) pending! */
263 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
264 		     (seq & RCU_DYNTICK_CTRL_MASK));
265 }
266 
267 /*
268  * Record exit from an extended quiescent state.  This is only to be
269  * called from an extended quiescent state, that is, RCU is not watching
270  * prior to the call to this function and is watching upon return.
271  */
rcu_dynticks_eqs_exit(void)272 static noinstr void rcu_dynticks_eqs_exit(void)
273 {
274 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
275 	int seq;
276 
277 	/*
278 	 * CPUs seeing atomic_add_return() must see prior idle sojourns,
279 	 * and we also must force ordering with the next RCU read-side
280 	 * critical section.
281 	 */
282 	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
283 	// RCU is now watching.  Better not be in an extended quiescent state!
284 	rcu_dynticks_task_trace_exit();  // After ->dynticks update!
285 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
286 		     !(seq & RCU_DYNTICK_CTRL_CTR));
287 	if (seq & RCU_DYNTICK_CTRL_MASK) {
288 		arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
289 		smp_mb__after_atomic(); /* _exit after clearing mask. */
290 	}
291 }
292 
293 /*
294  * Reset the current CPU's ->dynticks counter to indicate that the
295  * newly onlined CPU is no longer in an extended quiescent state.
296  * This will either leave the counter unchanged, or increment it
297  * to the next non-quiescent value.
298  *
299  * The non-atomic test/increment sequence works because the upper bits
300  * of the ->dynticks counter are manipulated only by the corresponding CPU,
301  * or when the corresponding CPU is offline.
302  */
rcu_dynticks_eqs_online(void)303 static void rcu_dynticks_eqs_online(void)
304 {
305 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
306 
307 	if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
308 		return;
309 	atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
310 }
311 
312 /*
313  * Is the current CPU in an extended quiescent state?
314  *
315  * No ordering, as we are sampling CPU-local information.
316  */
rcu_dynticks_curr_cpu_in_eqs(void)317 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
318 {
319 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
320 
321 	return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
322 }
323 
324 /*
325  * Snapshot the ->dynticks counter with full ordering so as to allow
326  * stable comparison of this counter with past and future snapshots.
327  */
rcu_dynticks_snap(struct rcu_data * rdp)328 static int rcu_dynticks_snap(struct rcu_data *rdp)
329 {
330 	int snap = atomic_add_return(0, &rdp->dynticks);
331 
332 	return snap & ~RCU_DYNTICK_CTRL_MASK;
333 }
334 
335 /*
336  * Return true if the snapshot returned from rcu_dynticks_snap()
337  * indicates that RCU is in an extended quiescent state.
338  */
rcu_dynticks_in_eqs(int snap)339 static bool rcu_dynticks_in_eqs(int snap)
340 {
341 	return !(snap & RCU_DYNTICK_CTRL_CTR);
342 }
343 
344 /*
345  * Return true if the CPU corresponding to the specified rcu_data
346  * structure has spent some time in an extended quiescent state since
347  * rcu_dynticks_snap() returned the specified snapshot.
348  */
rcu_dynticks_in_eqs_since(struct rcu_data * rdp,int snap)349 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
350 {
351 	return snap != rcu_dynticks_snap(rdp);
352 }
353 
354 /*
355  * Return true if the referenced integer is zero while the specified
356  * CPU remains within a single extended quiescent state.
357  */
rcu_dynticks_zero_in_eqs(int cpu,int * vp)358 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
359 {
360 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
361 	int snap;
362 
363 	// If not quiescent, force back to earlier extended quiescent state.
364 	snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
365 					       RCU_DYNTICK_CTRL_CTR);
366 
367 	smp_rmb(); // Order ->dynticks and *vp reads.
368 	if (READ_ONCE(*vp))
369 		return false;  // Non-zero, so report failure;
370 	smp_rmb(); // Order *vp read and ->dynticks re-read.
371 
372 	// If still in the same extended quiescent state, we are good!
373 	return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
374 }
375 
376 /*
377  * Set the special (bottom) bit of the specified CPU so that it
378  * will take special action (such as flushing its TLB) on the
379  * next exit from an extended quiescent state.  Returns true if
380  * the bit was successfully set, or false if the CPU was not in
381  * an extended quiescent state.
382  */
rcu_eqs_special_set(int cpu)383 bool rcu_eqs_special_set(int cpu)
384 {
385 	int old;
386 	int new;
387 	int new_old;
388 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
389 
390 	new_old = atomic_read(&rdp->dynticks);
391 	do {
392 		old = new_old;
393 		if (old & RCU_DYNTICK_CTRL_CTR)
394 			return false;
395 		new = old | RCU_DYNTICK_CTRL_MASK;
396 		new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
397 	} while (new_old != old);
398 	return true;
399 }
400 
401 /*
402  * Let the RCU core know that this CPU has gone through the scheduler,
403  * which is a quiescent state.  This is called when the need for a
404  * quiescent state is urgent, so we burn an atomic operation and full
405  * memory barriers to let the RCU core know about it, regardless of what
406  * this CPU might (or might not) do in the near future.
407  *
408  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
409  *
410  * The caller must have disabled interrupts and must not be idle.
411  */
rcu_momentary_dyntick_idle(void)412 notrace void rcu_momentary_dyntick_idle(void)
413 {
414 	int special;
415 
416 	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
417 	special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
418 				    &this_cpu_ptr(&rcu_data)->dynticks);
419 	/* It is illegal to call this from idle state. */
420 	WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
421 	rcu_preempt_deferred_qs(current);
422 }
423 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
424 
425 /**
426  * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
427  *
428  * If the current CPU is idle and running at a first-level (not nested)
429  * interrupt, or directly, from idle, return true.
430  *
431  * The caller must have at least disabled IRQs.
432  */
rcu_is_cpu_rrupt_from_idle(void)433 static int rcu_is_cpu_rrupt_from_idle(void)
434 {
435 	long nesting;
436 
437 	/*
438 	 * Usually called from the tick; but also used from smp_function_call()
439 	 * for expedited grace periods. This latter can result in running from
440 	 * the idle task, instead of an actual IPI.
441 	 */
442 	lockdep_assert_irqs_disabled();
443 
444 	/* Check for counter underflows */
445 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
446 			 "RCU dynticks_nesting counter underflow!");
447 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
448 			 "RCU dynticks_nmi_nesting counter underflow/zero!");
449 
450 	/* Are we at first interrupt nesting level? */
451 	nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
452 	if (nesting > 1)
453 		return false;
454 
455 	/*
456 	 * If we're not in an interrupt, we must be in the idle task!
457 	 */
458 	WARN_ON_ONCE(!nesting && !is_idle_task(current));
459 
460 	/* Does CPU appear to be idle from an RCU standpoint? */
461 	return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
462 }
463 
464 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
465 				// Maximum callbacks per rcu_do_batch ...
466 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
467 static long blimit = DEFAULT_RCU_BLIMIT;
468 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
469 static long qhimark = DEFAULT_RCU_QHIMARK;
470 #define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
471 static long qlowmark = DEFAULT_RCU_QLOMARK;
472 #define DEFAULT_RCU_QOVLD_MULT 2
473 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
474 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
475 static long qovld_calc = -1;	  // No pre-initialization lock acquisitions!
476 
477 module_param(blimit, long, 0444);
478 module_param(qhimark, long, 0444);
479 module_param(qlowmark, long, 0444);
480 module_param(qovld, long, 0444);
481 
482 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
483 static ulong jiffies_till_next_fqs = ULONG_MAX;
484 static bool rcu_kick_kthreads;
485 static int rcu_divisor = 7;
486 module_param(rcu_divisor, int, 0644);
487 
488 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
489 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
490 module_param(rcu_resched_ns, long, 0644);
491 
492 /*
493  * How long the grace period must be before we start recruiting
494  * quiescent-state help from rcu_note_context_switch().
495  */
496 static ulong jiffies_till_sched_qs = ULONG_MAX;
497 module_param(jiffies_till_sched_qs, ulong, 0444);
498 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
499 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
500 
501 /*
502  * Make sure that we give the grace-period kthread time to detect any
503  * idle CPUs before taking active measures to force quiescent states.
504  * However, don't go below 100 milliseconds, adjusted upwards for really
505  * large systems.
506  */
adjust_jiffies_till_sched_qs(void)507 static void adjust_jiffies_till_sched_qs(void)
508 {
509 	unsigned long j;
510 
511 	/* If jiffies_till_sched_qs was specified, respect the request. */
512 	if (jiffies_till_sched_qs != ULONG_MAX) {
513 		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
514 		return;
515 	}
516 	/* Otherwise, set to third fqs scan, but bound below on large system. */
517 	j = READ_ONCE(jiffies_till_first_fqs) +
518 		      2 * READ_ONCE(jiffies_till_next_fqs);
519 	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
520 		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
521 	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
522 	WRITE_ONCE(jiffies_to_sched_qs, j);
523 }
524 
param_set_first_fqs_jiffies(const char * val,const struct kernel_param * kp)525 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
526 {
527 	ulong j;
528 	int ret = kstrtoul(val, 0, &j);
529 
530 	if (!ret) {
531 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
532 		adjust_jiffies_till_sched_qs();
533 	}
534 	return ret;
535 }
536 
param_set_next_fqs_jiffies(const char * val,const struct kernel_param * kp)537 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
538 {
539 	ulong j;
540 	int ret = kstrtoul(val, 0, &j);
541 
542 	if (!ret) {
543 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
544 		adjust_jiffies_till_sched_qs();
545 	}
546 	return ret;
547 }
548 
549 static struct kernel_param_ops first_fqs_jiffies_ops = {
550 	.set = param_set_first_fqs_jiffies,
551 	.get = param_get_ulong,
552 };
553 
554 static struct kernel_param_ops next_fqs_jiffies_ops = {
555 	.set = param_set_next_fqs_jiffies,
556 	.get = param_get_ulong,
557 };
558 
559 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
560 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
561 module_param(rcu_kick_kthreads, bool, 0644);
562 
563 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
564 static int rcu_pending(int user);
565 
566 /*
567  * Return the number of RCU GPs completed thus far for debug & stats.
568  */
rcu_get_gp_seq(void)569 unsigned long rcu_get_gp_seq(void)
570 {
571 	return READ_ONCE(rcu_state.gp_seq);
572 }
573 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
574 
575 /*
576  * Return the number of RCU expedited batches completed thus far for
577  * debug & stats.  Odd numbers mean that a batch is in progress, even
578  * numbers mean idle.  The value returned will thus be roughly double
579  * the cumulative batches since boot.
580  */
rcu_exp_batches_completed(void)581 unsigned long rcu_exp_batches_completed(void)
582 {
583 	return rcu_state.expedited_sequence;
584 }
585 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
586 
587 /*
588  * Return the root node of the rcu_state structure.
589  */
rcu_get_root(void)590 static struct rcu_node *rcu_get_root(void)
591 {
592 	return &rcu_state.node[0];
593 }
594 
595 /*
596  * Send along grace-period-related data for rcutorture diagnostics.
597  */
rcutorture_get_gp_data(enum rcutorture_type test_type,int * flags,unsigned long * gp_seq)598 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
599 			    unsigned long *gp_seq)
600 {
601 	switch (test_type) {
602 	case RCU_FLAVOR:
603 		*flags = READ_ONCE(rcu_state.gp_flags);
604 		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
605 		break;
606 	default:
607 		break;
608 	}
609 }
610 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
611 
612 /*
613  * Enter an RCU extended quiescent state, which can be either the
614  * idle loop or adaptive-tickless usermode execution.
615  *
616  * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
617  * the possibility of usermode upcalls having messed up our count
618  * of interrupt nesting level during the prior busy period.
619  */
rcu_eqs_enter(bool user)620 static noinstr void rcu_eqs_enter(bool user)
621 {
622 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
623 
624 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
625 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
626 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
627 		     rdp->dynticks_nesting == 0);
628 	if (rdp->dynticks_nesting != 1) {
629 		// RCU will still be watching, so just do accounting and leave.
630 		rdp->dynticks_nesting--;
631 		return;
632 	}
633 
634 	lockdep_assert_irqs_disabled();
635 	instrumentation_begin();
636 	trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
637 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
638 	rdp = this_cpu_ptr(&rcu_data);
639 	do_nocb_deferred_wakeup(rdp);
640 	rcu_prepare_for_idle();
641 	rcu_preempt_deferred_qs(current);
642 
643 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
644 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
645 
646 	instrumentation_end();
647 	WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
648 	// RCU is watching here ...
649 	rcu_dynticks_eqs_enter();
650 	// ... but is no longer watching here.
651 	rcu_dynticks_task_enter();
652 }
653 
654 /**
655  * rcu_idle_enter - inform RCU that current CPU is entering idle
656  *
657  * Enter idle mode, in other words, -leave- the mode in which RCU
658  * read-side critical sections can occur.  (Though RCU read-side
659  * critical sections can occur in irq handlers in idle, a possibility
660  * handled by irq_enter() and irq_exit().)
661  *
662  * If you add or remove a call to rcu_idle_enter(), be sure to test with
663  * CONFIG_RCU_EQS_DEBUG=y.
664  */
rcu_idle_enter(void)665 void rcu_idle_enter(void)
666 {
667 	lockdep_assert_irqs_disabled();
668 	rcu_eqs_enter(false);
669 }
670 EXPORT_SYMBOL_GPL(rcu_idle_enter);
671 
672 #ifdef CONFIG_NO_HZ_FULL
673 /**
674  * rcu_user_enter - inform RCU that we are resuming userspace.
675  *
676  * Enter RCU idle mode right before resuming userspace.  No use of RCU
677  * is permitted between this call and rcu_user_exit(). This way the
678  * CPU doesn't need to maintain the tick for RCU maintenance purposes
679  * when the CPU runs in userspace.
680  *
681  * If you add or remove a call to rcu_user_enter(), be sure to test with
682  * CONFIG_RCU_EQS_DEBUG=y.
683  */
rcu_user_enter(void)684 noinstr void rcu_user_enter(void)
685 {
686 	lockdep_assert_irqs_disabled();
687 	rcu_eqs_enter(true);
688 }
689 #endif /* CONFIG_NO_HZ_FULL */
690 
691 /**
692  * rcu_nmi_exit - inform RCU of exit from NMI context
693  *
694  * If we are returning from the outermost NMI handler that interrupted an
695  * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
696  * to let the RCU grace-period handling know that the CPU is back to
697  * being RCU-idle.
698  *
699  * If you add or remove a call to rcu_nmi_exit(), be sure to test
700  * with CONFIG_RCU_EQS_DEBUG=y.
701  */
rcu_nmi_exit(void)702 noinstr void rcu_nmi_exit(void)
703 {
704 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
705 
706 	instrumentation_begin();
707 	/*
708 	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
709 	 * (We are exiting an NMI handler, so RCU better be paying attention
710 	 * to us!)
711 	 */
712 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
713 	WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
714 
715 	/*
716 	 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
717 	 * leave it in non-RCU-idle state.
718 	 */
719 	if (rdp->dynticks_nmi_nesting != 1) {
720 		trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
721 				  atomic_read(&rdp->dynticks));
722 		WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
723 			   rdp->dynticks_nmi_nesting - 2);
724 		instrumentation_end();
725 		return;
726 	}
727 
728 	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
729 	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
730 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
731 
732 	if (!in_nmi())
733 		rcu_prepare_for_idle();
734 
735 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
736 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
737 	instrumentation_end();
738 
739 	// RCU is watching here ...
740 	rcu_dynticks_eqs_enter();
741 	// ... but is no longer watching here.
742 
743 	if (!in_nmi())
744 		rcu_dynticks_task_enter();
745 }
746 
747 /**
748  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
749  *
750  * Exit from an interrupt handler, which might possibly result in entering
751  * idle mode, in other words, leaving the mode in which read-side critical
752  * sections can occur.  The caller must have disabled interrupts.
753  *
754  * This code assumes that the idle loop never does anything that might
755  * result in unbalanced calls to irq_enter() and irq_exit().  If your
756  * architecture's idle loop violates this assumption, RCU will give you what
757  * you deserve, good and hard.  But very infrequently and irreproducibly.
758  *
759  * Use things like work queues to work around this limitation.
760  *
761  * You have been warned.
762  *
763  * If you add or remove a call to rcu_irq_exit(), be sure to test with
764  * CONFIG_RCU_EQS_DEBUG=y.
765  */
rcu_irq_exit(void)766 void noinstr rcu_irq_exit(void)
767 {
768 	lockdep_assert_irqs_disabled();
769 	rcu_nmi_exit();
770 }
771 
772 /**
773  * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
774  *			  towards in kernel preemption
775  *
776  * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
777  * from RCU point of view. Invoked from return from interrupt before kernel
778  * preemption.
779  */
rcu_irq_exit_preempt(void)780 void rcu_irq_exit_preempt(void)
781 {
782 	lockdep_assert_irqs_disabled();
783 	rcu_nmi_exit();
784 
785 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
786 			 "RCU dynticks_nesting counter underflow/zero!");
787 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
788 			 DYNTICK_IRQ_NONIDLE,
789 			 "Bad RCU  dynticks_nmi_nesting counter\n");
790 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
791 			 "RCU in extended quiescent state!");
792 }
793 
794 #ifdef CONFIG_PROVE_RCU
795 /**
796  * rcu_irq_exit_check_preempt - Validate that scheduling is possible
797  */
rcu_irq_exit_check_preempt(void)798 void rcu_irq_exit_check_preempt(void)
799 {
800 	lockdep_assert_irqs_disabled();
801 
802 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
803 			 "RCU dynticks_nesting counter underflow/zero!");
804 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
805 			 DYNTICK_IRQ_NONIDLE,
806 			 "Bad RCU  dynticks_nmi_nesting counter\n");
807 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
808 			 "RCU in extended quiescent state!");
809 }
810 #endif /* #ifdef CONFIG_PROVE_RCU */
811 
812 /*
813  * Wrapper for rcu_irq_exit() where interrupts are enabled.
814  *
815  * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
816  * with CONFIG_RCU_EQS_DEBUG=y.
817  */
rcu_irq_exit_irqson(void)818 void rcu_irq_exit_irqson(void)
819 {
820 	unsigned long flags;
821 
822 	local_irq_save(flags);
823 	rcu_irq_exit();
824 	local_irq_restore(flags);
825 }
826 
827 /*
828  * Exit an RCU extended quiescent state, which can be either the
829  * idle loop or adaptive-tickless usermode execution.
830  *
831  * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
832  * allow for the possibility of usermode upcalls messing up our count of
833  * interrupt nesting level during the busy period that is just now starting.
834  */
rcu_eqs_exit(bool user)835 static void noinstr rcu_eqs_exit(bool user)
836 {
837 	struct rcu_data *rdp;
838 	long oldval;
839 
840 	lockdep_assert_irqs_disabled();
841 	rdp = this_cpu_ptr(&rcu_data);
842 	oldval = rdp->dynticks_nesting;
843 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
844 	if (oldval) {
845 		// RCU was already watching, so just do accounting and leave.
846 		rdp->dynticks_nesting++;
847 		return;
848 	}
849 	rcu_dynticks_task_exit();
850 	// RCU is not watching here ...
851 	rcu_dynticks_eqs_exit();
852 	// ... but is watching here.
853 	instrumentation_begin();
854 
855 	// instrumentation for the noinstr rcu_dynticks_eqs_exit()
856 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
857 
858 	rcu_cleanup_after_idle();
859 	trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
860 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
861 	WRITE_ONCE(rdp->dynticks_nesting, 1);
862 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
863 	WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
864 	instrumentation_end();
865 }
866 
867 /**
868  * rcu_idle_exit - inform RCU that current CPU is leaving idle
869  *
870  * Exit idle mode, in other words, -enter- the mode in which RCU
871  * read-side critical sections can occur.
872  *
873  * If you add or remove a call to rcu_idle_exit(), be sure to test with
874  * CONFIG_RCU_EQS_DEBUG=y.
875  */
rcu_idle_exit(void)876 void rcu_idle_exit(void)
877 {
878 	unsigned long flags;
879 
880 	local_irq_save(flags);
881 	rcu_eqs_exit(false);
882 	local_irq_restore(flags);
883 }
884 EXPORT_SYMBOL_GPL(rcu_idle_exit);
885 
886 #ifdef CONFIG_NO_HZ_FULL
887 /**
888  * rcu_user_exit - inform RCU that we are exiting userspace.
889  *
890  * Exit RCU idle mode while entering the kernel because it can
891  * run a RCU read side critical section anytime.
892  *
893  * If you add or remove a call to rcu_user_exit(), be sure to test with
894  * CONFIG_RCU_EQS_DEBUG=y.
895  */
rcu_user_exit(void)896 void noinstr rcu_user_exit(void)
897 {
898 	rcu_eqs_exit(1);
899 }
900 
901 /**
902  * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
903  *
904  * The scheduler tick is not normally enabled when CPUs enter the kernel
905  * from nohz_full userspace execution.  After all, nohz_full userspace
906  * execution is an RCU quiescent state and the time executing in the kernel
907  * is quite short.  Except of course when it isn't.  And it is not hard to
908  * cause a large system to spend tens of seconds or even minutes looping
909  * in the kernel, which can cause a number of problems, include RCU CPU
910  * stall warnings.
911  *
912  * Therefore, if a nohz_full CPU fails to report a quiescent state
913  * in a timely manner, the RCU grace-period kthread sets that CPU's
914  * ->rcu_urgent_qs flag with the expectation that the next interrupt or
915  * exception will invoke this function, which will turn on the scheduler
916  * tick, which will enable RCU to detect that CPU's quiescent states,
917  * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
918  * The tick will be disabled once a quiescent state is reported for
919  * this CPU.
920  *
921  * Of course, in carefully tuned systems, there might never be an
922  * interrupt or exception.  In that case, the RCU grace-period kthread
923  * will eventually cause one to happen.  However, in less carefully
924  * controlled environments, this function allows RCU to get what it
925  * needs without creating otherwise useless interruptions.
926  */
__rcu_irq_enter_check_tick(void)927 void __rcu_irq_enter_check_tick(void)
928 {
929 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
930 
931 	 // Enabling the tick is unsafe in NMI handlers.
932 	if (WARN_ON_ONCE(in_nmi()))
933 		return;
934 
935 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
936 			 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
937 
938 	if (!tick_nohz_full_cpu(rdp->cpu) ||
939 	    !READ_ONCE(rdp->rcu_urgent_qs) ||
940 	    READ_ONCE(rdp->rcu_forced_tick)) {
941 		// RCU doesn't need nohz_full help from this CPU, or it is
942 		// already getting that help.
943 		return;
944 	}
945 
946 	// We get here only when not in an extended quiescent state and
947 	// from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
948 	// already watching and (2) The fact that we are in an interrupt
949 	// handler and that the rcu_node lock is an irq-disabled lock
950 	// prevents self-deadlock.  So we can safely recheck under the lock.
951 	// Note that the nohz_full state currently cannot change.
952 	raw_spin_lock_rcu_node(rdp->mynode);
953 	if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
954 		// A nohz_full CPU is in the kernel and RCU needs a
955 		// quiescent state.  Turn on the tick!
956 		WRITE_ONCE(rdp->rcu_forced_tick, true);
957 		tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
958 	}
959 	raw_spin_unlock_rcu_node(rdp->mynode);
960 }
961 #endif /* CONFIG_NO_HZ_FULL */
962 
963 /**
964  * rcu_nmi_enter - inform RCU of entry to NMI context
965  *
966  * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
967  * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
968  * that the CPU is active.  This implementation permits nested NMIs, as
969  * long as the nesting level does not overflow an int.  (You will probably
970  * run out of stack space first.)
971  *
972  * If you add or remove a call to rcu_nmi_enter(), be sure to test
973  * with CONFIG_RCU_EQS_DEBUG=y.
974  */
rcu_nmi_enter(void)975 noinstr void rcu_nmi_enter(void)
976 {
977 	long incby = 2;
978 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
979 
980 	/* Complain about underflow. */
981 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
982 
983 	/*
984 	 * If idle from RCU viewpoint, atomically increment ->dynticks
985 	 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
986 	 * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
987 	 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
988 	 * to be in the outermost NMI handler that interrupted an RCU-idle
989 	 * period (observation due to Andy Lutomirski).
990 	 */
991 	if (rcu_dynticks_curr_cpu_in_eqs()) {
992 
993 		if (!in_nmi())
994 			rcu_dynticks_task_exit();
995 
996 		// RCU is not watching here ...
997 		rcu_dynticks_eqs_exit();
998 		// ... but is watching here.
999 
1000 		if (!in_nmi()) {
1001 			instrumentation_begin();
1002 			rcu_cleanup_after_idle();
1003 			instrumentation_end();
1004 		}
1005 
1006 		instrumentation_begin();
1007 		// instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
1008 		instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
1009 		// instrumentation for the noinstr rcu_dynticks_eqs_exit()
1010 		instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
1011 
1012 		incby = 1;
1013 	} else if (!in_nmi()) {
1014 		instrumentation_begin();
1015 		rcu_irq_enter_check_tick();
1016 		instrumentation_end();
1017 	} else  {
1018 		instrumentation_begin();
1019 	}
1020 
1021 	trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
1022 			  rdp->dynticks_nmi_nesting,
1023 			  rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
1024 	instrumentation_end();
1025 	WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
1026 		   rdp->dynticks_nmi_nesting + incby);
1027 	barrier();
1028 }
1029 
1030 /**
1031  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
1032  *
1033  * Enter an interrupt handler, which might possibly result in exiting
1034  * idle mode, in other words, entering the mode in which read-side critical
1035  * sections can occur.  The caller must have disabled interrupts.
1036  *
1037  * Note that the Linux kernel is fully capable of entering an interrupt
1038  * handler that it never exits, for example when doing upcalls to user mode!
1039  * This code assumes that the idle loop never does upcalls to user mode.
1040  * If your architecture's idle loop does do upcalls to user mode (or does
1041  * anything else that results in unbalanced calls to the irq_enter() and
1042  * irq_exit() functions), RCU will give you what you deserve, good and hard.
1043  * But very infrequently and irreproducibly.
1044  *
1045  * Use things like work queues to work around this limitation.
1046  *
1047  * You have been warned.
1048  *
1049  * If you add or remove a call to rcu_irq_enter(), be sure to test with
1050  * CONFIG_RCU_EQS_DEBUG=y.
1051  */
rcu_irq_enter(void)1052 noinstr void rcu_irq_enter(void)
1053 {
1054 	lockdep_assert_irqs_disabled();
1055 	rcu_nmi_enter();
1056 }
1057 
1058 /*
1059  * Wrapper for rcu_irq_enter() where interrupts are enabled.
1060  *
1061  * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1062  * with CONFIG_RCU_EQS_DEBUG=y.
1063  */
rcu_irq_enter_irqson(void)1064 void rcu_irq_enter_irqson(void)
1065 {
1066 	unsigned long flags;
1067 
1068 	local_irq_save(flags);
1069 	rcu_irq_enter();
1070 	local_irq_restore(flags);
1071 }
1072 
1073 /*
1074  * If any sort of urgency was applied to the current CPU (for example,
1075  * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1076  * to get to a quiescent state, disable it.
1077  */
rcu_disable_urgency_upon_qs(struct rcu_data * rdp)1078 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
1079 {
1080 	raw_lockdep_assert_held_rcu_node(rdp->mynode);
1081 	WRITE_ONCE(rdp->rcu_urgent_qs, false);
1082 	WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
1083 	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
1084 		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1085 		WRITE_ONCE(rdp->rcu_forced_tick, false);
1086 	}
1087 }
1088 
1089 /**
1090  * rcu_is_watching - see if RCU thinks that the current CPU is not idle
1091  *
1092  * Return true if RCU is watching the running CPU, which means that this
1093  * CPU can safely enter RCU read-side critical sections.  In other words,
1094  * if the current CPU is not in its idle loop or is in an interrupt or
1095  * NMI handler, return true.
1096  */
rcu_is_watching(void)1097 bool rcu_is_watching(void)
1098 {
1099 	bool ret;
1100 
1101 	preempt_disable_notrace();
1102 	ret = !rcu_dynticks_curr_cpu_in_eqs();
1103 	preempt_enable_notrace();
1104 	return ret;
1105 }
1106 EXPORT_SYMBOL_GPL(rcu_is_watching);
1107 
1108 /*
1109  * If a holdout task is actually running, request an urgent quiescent
1110  * state from its CPU.  This is unsynchronized, so migrations can cause
1111  * the request to go to the wrong CPU.  Which is OK, all that will happen
1112  * is that the CPU's next context switch will be a bit slower and next
1113  * time around this task will generate another request.
1114  */
rcu_request_urgent_qs_task(struct task_struct * t)1115 void rcu_request_urgent_qs_task(struct task_struct *t)
1116 {
1117 	int cpu;
1118 
1119 	barrier();
1120 	cpu = task_cpu(t);
1121 	if (!task_curr(t))
1122 		return; /* This task is not running on that CPU. */
1123 	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
1124 }
1125 
1126 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1127 
1128 /*
1129  * Is the current CPU online as far as RCU is concerned?
1130  *
1131  * Disable preemption to avoid false positives that could otherwise
1132  * happen due to the current CPU number being sampled, this task being
1133  * preempted, its old CPU being taken offline, resuming on some other CPU,
1134  * then determining that its old CPU is now offline.
1135  *
1136  * Disable checking if in an NMI handler because we cannot safely
1137  * report errors from NMI handlers anyway.  In addition, it is OK to use
1138  * RCU on an offline processor during initial boot, hence the check for
1139  * rcu_scheduler_fully_active.
1140  */
rcu_lockdep_current_cpu_online(void)1141 bool rcu_lockdep_current_cpu_online(void)
1142 {
1143 	struct rcu_data *rdp;
1144 	struct rcu_node *rnp;
1145 	bool ret = false;
1146 
1147 	if (in_nmi() || !rcu_scheduler_fully_active)
1148 		return true;
1149 	preempt_disable_notrace();
1150 	rdp = this_cpu_ptr(&rcu_data);
1151 	rnp = rdp->mynode;
1152 	if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
1153 		ret = true;
1154 	preempt_enable_notrace();
1155 	return ret;
1156 }
1157 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1158 
1159 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1160 
1161 /*
1162  * We are reporting a quiescent state on behalf of some other CPU, so
1163  * it is our responsibility to check for and handle potential overflow
1164  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1165  * After all, the CPU might be in deep idle state, and thus executing no
1166  * code whatsoever.
1167  */
rcu_gpnum_ovf(struct rcu_node * rnp,struct rcu_data * rdp)1168 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1169 {
1170 	raw_lockdep_assert_held_rcu_node(rnp);
1171 	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1172 			 rnp->gp_seq))
1173 		WRITE_ONCE(rdp->gpwrap, true);
1174 	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1175 		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1176 }
1177 
1178 /*
1179  * Snapshot the specified CPU's dynticks counter so that we can later
1180  * credit them with an implicit quiescent state.  Return 1 if this CPU
1181  * is in dynticks idle mode, which is an extended quiescent state.
1182  */
dyntick_save_progress_counter(struct rcu_data * rdp)1183 static int dyntick_save_progress_counter(struct rcu_data *rdp)
1184 {
1185 	rdp->dynticks_snap = rcu_dynticks_snap(rdp);
1186 	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1187 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1188 		rcu_gpnum_ovf(rdp->mynode, rdp);
1189 		return 1;
1190 	}
1191 	return 0;
1192 }
1193 
1194 /*
1195  * Return true if the specified CPU has passed through a quiescent
1196  * state by virtue of being in or having passed through an dynticks
1197  * idle state since the last call to dyntick_save_progress_counter()
1198  * for this same CPU, or by virtue of having been offline.
1199  */
rcu_implicit_dynticks_qs(struct rcu_data * rdp)1200 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1201 {
1202 	unsigned long jtsq;
1203 	bool *rnhqp;
1204 	bool *ruqp;
1205 	struct rcu_node *rnp = rdp->mynode;
1206 
1207 	/*
1208 	 * If the CPU passed through or entered a dynticks idle phase with
1209 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
1210 	 * already acknowledged the request to pass through a quiescent
1211 	 * state.  Either way, that CPU cannot possibly be in an RCU
1212 	 * read-side critical section that started before the beginning
1213 	 * of the current RCU grace period.
1214 	 */
1215 	if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1216 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1217 		rcu_gpnum_ovf(rnp, rdp);
1218 		return 1;
1219 	}
1220 
1221 	/*
1222 	 * Complain if a CPU that is considered to be offline from RCU's
1223 	 * perspective has not yet reported a quiescent state.  After all,
1224 	 * the offline CPU should have reported a quiescent state during
1225 	 * the CPU-offline process, or, failing that, by rcu_gp_init()
1226 	 * if it ran concurrently with either the CPU going offline or the
1227 	 * last task on a leaf rcu_node structure exiting its RCU read-side
1228 	 * critical section while all CPUs corresponding to that structure
1229 	 * are offline.  This added warning detects bugs in any of these
1230 	 * code paths.
1231 	 *
1232 	 * The rcu_node structure's ->lock is held here, which excludes
1233 	 * the relevant portions the CPU-hotplug code, the grace-period
1234 	 * initialization code, and the rcu_read_unlock() code paths.
1235 	 *
1236 	 * For more detail, please refer to the "Hotplug CPU" section
1237 	 * of RCU's Requirements documentation.
1238 	 */
1239 	if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
1240 		bool onl;
1241 		struct rcu_node *rnp1;
1242 
1243 		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1244 			__func__, rnp->grplo, rnp->grphi, rnp->level,
1245 			(long)rnp->gp_seq, (long)rnp->completedqs);
1246 		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1247 			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1248 				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1249 		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1250 		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1251 			__func__, rdp->cpu, ".o"[onl],
1252 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1253 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1254 		return 1; /* Break things loose after complaining. */
1255 	}
1256 
1257 	/*
1258 	 * A CPU running for an extended time within the kernel can
1259 	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1260 	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1261 	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
1262 	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1263 	 * variable are safe because the assignments are repeated if this
1264 	 * CPU failed to pass through a quiescent state.  This code
1265 	 * also checks .jiffies_resched in case jiffies_to_sched_qs
1266 	 * is set way high.
1267 	 */
1268 	jtsq = READ_ONCE(jiffies_to_sched_qs);
1269 	ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1270 	rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
1271 	if (!READ_ONCE(*rnhqp) &&
1272 	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1273 	     time_after(jiffies, rcu_state.jiffies_resched) ||
1274 	     rcu_state.cbovld)) {
1275 		WRITE_ONCE(*rnhqp, true);
1276 		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1277 		smp_store_release(ruqp, true);
1278 	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1279 		WRITE_ONCE(*ruqp, true);
1280 	}
1281 
1282 	/*
1283 	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1284 	 * The above code handles this, but only for straight cond_resched().
1285 	 * And some in-kernel loops check need_resched() before calling
1286 	 * cond_resched(), which defeats the above code for CPUs that are
1287 	 * running in-kernel with scheduling-clock interrupts disabled.
1288 	 * So hit them over the head with the resched_cpu() hammer!
1289 	 */
1290 	if (tick_nohz_full_cpu(rdp->cpu) &&
1291 	    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1292 	     rcu_state.cbovld)) {
1293 		WRITE_ONCE(*ruqp, true);
1294 		resched_cpu(rdp->cpu);
1295 		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1296 	}
1297 
1298 	/*
1299 	 * If more than halfway to RCU CPU stall-warning time, invoke
1300 	 * resched_cpu() more frequently to try to loosen things up a bit.
1301 	 * Also check to see if the CPU is getting hammered with interrupts,
1302 	 * but only once per grace period, just to keep the IPIs down to
1303 	 * a dull roar.
1304 	 */
1305 	if (time_after(jiffies, rcu_state.jiffies_resched)) {
1306 		if (time_after(jiffies,
1307 			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1308 			resched_cpu(rdp->cpu);
1309 			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1310 		}
1311 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1312 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1313 		    (rnp->ffmask & rdp->grpmask)) {
1314 			init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
1315 			atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ);
1316 			rdp->rcu_iw_pending = true;
1317 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
1318 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1319 		}
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
trace_rcu_this_gp(struct rcu_node * rnp,struct rcu_data * rdp,unsigned long gp_seq_req,const char * s)1326 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1327 			      unsigned long gp_seq_req, const char *s)
1328 {
1329 	trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1330 				      gp_seq_req, rnp->level,
1331 				      rnp->grplo, rnp->grphi, s);
1332 }
1333 
1334 /*
1335  * rcu_start_this_gp - Request the start of a particular grace period
1336  * @rnp_start: The leaf node of the CPU from which to start.
1337  * @rdp: The rcu_data corresponding to the CPU from which to start.
1338  * @gp_seq_req: The gp_seq of the grace period to start.
1339  *
1340  * Start the specified grace period, as needed to handle newly arrived
1341  * callbacks.  The required future grace periods are recorded in each
1342  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
1343  * is reason to awaken the grace-period kthread.
1344  *
1345  * The caller must hold the specified rcu_node structure's ->lock, which
1346  * is why the caller is responsible for waking the grace-period kthread.
1347  *
1348  * Returns true if the GP thread needs to be awakened else false.
1349  */
rcu_start_this_gp(struct rcu_node * rnp_start,struct rcu_data * rdp,unsigned long gp_seq_req)1350 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1351 			      unsigned long gp_seq_req)
1352 {
1353 	bool ret = false;
1354 	struct rcu_node *rnp;
1355 
1356 	/*
1357 	 * Use funnel locking to either acquire the root rcu_node
1358 	 * structure's lock or bail out if the need for this grace period
1359 	 * has already been recorded -- or if that grace period has in
1360 	 * fact already started.  If there is already a grace period in
1361 	 * progress in a non-leaf node, no recording is needed because the
1362 	 * end of the grace period will scan the leaf rcu_node structures.
1363 	 * Note that rnp_start->lock must not be released.
1364 	 */
1365 	raw_lockdep_assert_held_rcu_node(rnp_start);
1366 	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1367 	for (rnp = rnp_start; 1; rnp = rnp->parent) {
1368 		if (rnp != rnp_start)
1369 			raw_spin_lock_rcu_node(rnp);
1370 		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1371 		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1372 		    (rnp != rnp_start &&
1373 		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1374 			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1375 					  TPS("Prestarted"));
1376 			goto unlock_out;
1377 		}
1378 		WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1379 		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1380 			/*
1381 			 * We just marked the leaf or internal node, and a
1382 			 * grace period is in progress, which means that
1383 			 * rcu_gp_cleanup() will see the marking.  Bail to
1384 			 * reduce contention.
1385 			 */
1386 			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1387 					  TPS("Startedleaf"));
1388 			goto unlock_out;
1389 		}
1390 		if (rnp != rnp_start && rnp->parent != NULL)
1391 			raw_spin_unlock_rcu_node(rnp);
1392 		if (!rnp->parent)
1393 			break;  /* At root, and perhaps also leaf. */
1394 	}
1395 
1396 	/* If GP already in progress, just leave, otherwise start one. */
1397 	if (rcu_gp_in_progress()) {
1398 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1399 		goto unlock_out;
1400 	}
1401 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1402 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1403 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1404 	if (!READ_ONCE(rcu_state.gp_kthread)) {
1405 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1406 		goto unlock_out;
1407 	}
1408 	trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1409 	ret = true;  /* Caller must wake GP kthread. */
1410 unlock_out:
1411 	/* Push furthest requested GP to leaf node and rcu_data structure. */
1412 	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1413 		WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1414 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1415 	}
1416 	if (rnp != rnp_start)
1417 		raw_spin_unlock_rcu_node(rnp);
1418 	return ret;
1419 }
1420 
1421 /*
1422  * Clean up any old requests for the just-ended grace period.  Also return
1423  * whether any additional grace periods have been requested.
1424  */
rcu_future_gp_cleanup(struct rcu_node * rnp)1425 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1426 {
1427 	bool needmore;
1428 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1429 
1430 	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1431 	if (!needmore)
1432 		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1433 	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1434 			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1435 	return needmore;
1436 }
1437 
1438 /*
1439  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1440  * interrupt or softirq handler, in which case we just might immediately
1441  * sleep upon return, resulting in a grace-period hang), and don't bother
1442  * awakening when there is nothing for the grace-period kthread to do
1443  * (as in several CPUs raced to awaken, we lost), and finally don't try
1444  * to awaken a kthread that has not yet been created.  If all those checks
1445  * are passed, track some debug information and awaken.
1446  *
1447  * So why do the self-wakeup when in an interrupt or softirq handler
1448  * in the grace-period kthread's context?  Because the kthread might have
1449  * been interrupted just as it was going to sleep, and just after the final
1450  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1451  * is required, and is therefore supplied.
1452  */
rcu_gp_kthread_wake(void)1453 static void rcu_gp_kthread_wake(void)
1454 {
1455 	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1456 
1457 	if ((current == t && !in_irq() && !in_serving_softirq()) ||
1458 	    !READ_ONCE(rcu_state.gp_flags) || !t)
1459 		return;
1460 	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1461 	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1462 	swake_up_one(&rcu_state.gp_wq);
1463 }
1464 
1465 /*
1466  * If there is room, assign a ->gp_seq number to any callbacks on this
1467  * CPU that have not already been assigned.  Also accelerate any callbacks
1468  * that were previously assigned a ->gp_seq number that has since proven
1469  * to be too conservative, which can happen if callbacks get assigned a
1470  * ->gp_seq number while RCU is idle, but with reference to a non-root
1471  * rcu_node structure.  This function is idempotent, so it does not hurt
1472  * to call it repeatedly.  Returns an flag saying that we should awaken
1473  * the RCU grace-period kthread.
1474  *
1475  * The caller must hold rnp->lock with interrupts disabled.
1476  */
rcu_accelerate_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1477 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1478 {
1479 	unsigned long gp_seq_req;
1480 	bool ret = false;
1481 
1482 	rcu_lockdep_assert_cblist_protected(rdp);
1483 	raw_lockdep_assert_held_rcu_node(rnp);
1484 
1485 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1486 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1487 		return false;
1488 
1489 	/*
1490 	 * Callbacks are often registered with incomplete grace-period
1491 	 * information.  Something about the fact that getting exact
1492 	 * information requires acquiring a global lock...  RCU therefore
1493 	 * makes a conservative estimate of the grace period number at which
1494 	 * a given callback will become ready to invoke.	The following
1495 	 * code checks this estimate and improves it when possible, thus
1496 	 * accelerating callback invocation to an earlier grace-period
1497 	 * number.
1498 	 */
1499 	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1500 	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1501 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1502 
1503 	/* Trace depending on how much we were able to accelerate. */
1504 	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1505 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1506 	else
1507 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1508 
1509 	return ret;
1510 }
1511 
1512 /*
1513  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1514  * rcu_node structure's ->lock be held.  It consults the cached value
1515  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1516  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1517  * while holding the leaf rcu_node structure's ->lock.
1518  */
rcu_accelerate_cbs_unlocked(struct rcu_node * rnp,struct rcu_data * rdp)1519 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1520 					struct rcu_data *rdp)
1521 {
1522 	unsigned long c;
1523 	bool needwake;
1524 
1525 	rcu_lockdep_assert_cblist_protected(rdp);
1526 	c = rcu_seq_snap(&rcu_state.gp_seq);
1527 	if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1528 		/* Old request still live, so mark recent callbacks. */
1529 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
1530 		return;
1531 	}
1532 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1533 	needwake = rcu_accelerate_cbs(rnp, rdp);
1534 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1535 	if (needwake)
1536 		rcu_gp_kthread_wake();
1537 }
1538 
1539 /*
1540  * Move any callbacks whose grace period has completed to the
1541  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1542  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1543  * sublist.  This function is idempotent, so it does not hurt to
1544  * invoke it repeatedly.  As long as it is not invoked -too- often...
1545  * Returns true if the RCU grace-period kthread needs to be awakened.
1546  *
1547  * The caller must hold rnp->lock with interrupts disabled.
1548  */
rcu_advance_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1549 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1550 {
1551 	rcu_lockdep_assert_cblist_protected(rdp);
1552 	raw_lockdep_assert_held_rcu_node(rnp);
1553 
1554 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1555 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1556 		return false;
1557 
1558 	/*
1559 	 * Find all callbacks whose ->gp_seq numbers indicate that they
1560 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1561 	 */
1562 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1563 
1564 	/* Classify any remaining callbacks. */
1565 	return rcu_accelerate_cbs(rnp, rdp);
1566 }
1567 
1568 /*
1569  * Move and classify callbacks, but only if doing so won't require
1570  * that the RCU grace-period kthread be awakened.
1571  */
rcu_advance_cbs_nowake(struct rcu_node * rnp,struct rcu_data * rdp)1572 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1573 						  struct rcu_data *rdp)
1574 {
1575 	rcu_lockdep_assert_cblist_protected(rdp);
1576 	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) ||
1577 	    !raw_spin_trylock_rcu_node(rnp))
1578 		return;
1579 	WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1580 	raw_spin_unlock_rcu_node(rnp);
1581 }
1582 
1583 /*
1584  * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1585  * quiescent state.  This is intended to be invoked when the CPU notices
1586  * a new grace period.
1587  */
rcu_strict_gp_check_qs(void)1588 static void rcu_strict_gp_check_qs(void)
1589 {
1590 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1591 		rcu_read_lock();
1592 		rcu_read_unlock();
1593 	}
1594 }
1595 
1596 /*
1597  * Update CPU-local rcu_data state to record the beginnings and ends of
1598  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1599  * structure corresponding to the current CPU, and must have irqs disabled.
1600  * Returns true if the grace-period kthread needs to be awakened.
1601  */
__note_gp_changes(struct rcu_node * rnp,struct rcu_data * rdp)1602 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1603 {
1604 	bool ret = false;
1605 	bool need_qs;
1606 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
1607 			       rcu_segcblist_is_offloaded(&rdp->cblist);
1608 
1609 	raw_lockdep_assert_held_rcu_node(rnp);
1610 
1611 	if (rdp->gp_seq == rnp->gp_seq)
1612 		return false; /* Nothing to do. */
1613 
1614 	/* Handle the ends of any preceding grace periods first. */
1615 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1616 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1617 		if (!offloaded)
1618 			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1619 		rdp->core_needs_qs = false;
1620 		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1621 	} else {
1622 		if (!offloaded)
1623 			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1624 		if (rdp->core_needs_qs)
1625 			rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1626 	}
1627 
1628 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
1629 	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1630 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1631 		/*
1632 		 * If the current grace period is waiting for this CPU,
1633 		 * set up to detect a quiescent state, otherwise don't
1634 		 * go looking for one.
1635 		 */
1636 		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1637 		need_qs = !!(rnp->qsmask & rdp->grpmask);
1638 		rdp->cpu_no_qs.b.norm = need_qs;
1639 		rdp->core_needs_qs = need_qs;
1640 		zero_cpu_stall_ticks(rdp);
1641 	}
1642 	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1643 	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1644 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1645 	WRITE_ONCE(rdp->gpwrap, false);
1646 	rcu_gpnum_ovf(rnp, rdp);
1647 	return ret;
1648 }
1649 
note_gp_changes(struct rcu_data * rdp)1650 static void note_gp_changes(struct rcu_data *rdp)
1651 {
1652 	unsigned long flags;
1653 	bool needwake;
1654 	struct rcu_node *rnp;
1655 
1656 	local_irq_save(flags);
1657 	rnp = rdp->mynode;
1658 	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1659 	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1660 	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1661 		local_irq_restore(flags);
1662 		return;
1663 	}
1664 	needwake = __note_gp_changes(rnp, rdp);
1665 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1666 	rcu_strict_gp_check_qs();
1667 	if (needwake)
1668 		rcu_gp_kthread_wake();
1669 }
1670 
rcu_gp_slow(int delay)1671 static void rcu_gp_slow(int delay)
1672 {
1673 	if (delay > 0 &&
1674 	    !(rcu_seq_ctr(rcu_state.gp_seq) %
1675 	      (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1676 		schedule_timeout_idle(delay);
1677 }
1678 
1679 static unsigned long sleep_duration;
1680 
1681 /* Allow rcutorture to stall the grace-period kthread. */
rcu_gp_set_torture_wait(int duration)1682 void rcu_gp_set_torture_wait(int duration)
1683 {
1684 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1685 		WRITE_ONCE(sleep_duration, duration);
1686 }
1687 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1688 
1689 /* Actually implement the aforementioned wait. */
rcu_gp_torture_wait(void)1690 static void rcu_gp_torture_wait(void)
1691 {
1692 	unsigned long duration;
1693 
1694 	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1695 		return;
1696 	duration = xchg(&sleep_duration, 0UL);
1697 	if (duration > 0) {
1698 		pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1699 		schedule_timeout_idle(duration);
1700 		pr_alert("%s: Wait complete\n", __func__);
1701 	}
1702 }
1703 
1704 /*
1705  * Handler for on_each_cpu() to invoke the target CPU's RCU core
1706  * processing.
1707  */
rcu_strict_gp_boundary(void * unused)1708 static void rcu_strict_gp_boundary(void *unused)
1709 {
1710 	invoke_rcu_core();
1711 }
1712 
1713 /*
1714  * Initialize a new grace period.  Return false if no grace period required.
1715  */
rcu_gp_init(void)1716 static bool rcu_gp_init(void)
1717 {
1718 	unsigned long flags;
1719 	unsigned long oldmask;
1720 	unsigned long mask;
1721 	struct rcu_data *rdp;
1722 	struct rcu_node *rnp = rcu_get_root();
1723 
1724 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1725 	raw_spin_lock_irq_rcu_node(rnp);
1726 	if (!READ_ONCE(rcu_state.gp_flags)) {
1727 		/* Spurious wakeup, tell caller to go back to sleep.  */
1728 		raw_spin_unlock_irq_rcu_node(rnp);
1729 		return false;
1730 	}
1731 	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1732 
1733 	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1734 		/*
1735 		 * Grace period already in progress, don't start another.
1736 		 * Not supposed to be able to happen.
1737 		 */
1738 		raw_spin_unlock_irq_rcu_node(rnp);
1739 		return false;
1740 	}
1741 
1742 	/* Advance to a new grace period and initialize state. */
1743 	record_gp_stall_check_time();
1744 	/* Record GP times before starting GP, hence rcu_seq_start(). */
1745 	rcu_seq_start(&rcu_state.gp_seq);
1746 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1747 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1748 	raw_spin_unlock_irq_rcu_node(rnp);
1749 
1750 	/*
1751 	 * Apply per-leaf buffered online and offline operations to
1752 	 * the rcu_node tree. Note that this new grace period need not
1753 	 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1754 	 * offlining path, when combined with checks in this function,
1755 	 * will handle CPUs that are currently going offline or that will
1756 	 * go offline later.  Please also refer to "Hotplug CPU" section
1757 	 * of RCU's Requirements documentation.
1758 	 */
1759 	rcu_state.gp_state = RCU_GP_ONOFF;
1760 	rcu_for_each_leaf_node(rnp) {
1761 		raw_spin_lock(&rcu_state.ofl_lock);
1762 		raw_spin_lock_irq_rcu_node(rnp);
1763 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1764 		    !rnp->wait_blkd_tasks) {
1765 			/* Nothing to do on this leaf rcu_node structure. */
1766 			raw_spin_unlock_irq_rcu_node(rnp);
1767 			raw_spin_unlock(&rcu_state.ofl_lock);
1768 			continue;
1769 		}
1770 
1771 		/* Record old state, apply changes to ->qsmaskinit field. */
1772 		oldmask = rnp->qsmaskinit;
1773 		rnp->qsmaskinit = rnp->qsmaskinitnext;
1774 
1775 		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1776 		if (!oldmask != !rnp->qsmaskinit) {
1777 			if (!oldmask) { /* First online CPU for rcu_node. */
1778 				if (!rnp->wait_blkd_tasks) /* Ever offline? */
1779 					rcu_init_new_rnp(rnp);
1780 			} else if (rcu_preempt_has_tasks(rnp)) {
1781 				rnp->wait_blkd_tasks = true; /* blocked tasks */
1782 			} else { /* Last offline CPU and can propagate. */
1783 				rcu_cleanup_dead_rnp(rnp);
1784 			}
1785 		}
1786 
1787 		/*
1788 		 * If all waited-on tasks from prior grace period are
1789 		 * done, and if all this rcu_node structure's CPUs are
1790 		 * still offline, propagate up the rcu_node tree and
1791 		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1792 		 * rcu_node structure's CPUs has since come back online,
1793 		 * simply clear ->wait_blkd_tasks.
1794 		 */
1795 		if (rnp->wait_blkd_tasks &&
1796 		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1797 			rnp->wait_blkd_tasks = false;
1798 			if (!rnp->qsmaskinit)
1799 				rcu_cleanup_dead_rnp(rnp);
1800 		}
1801 
1802 		raw_spin_unlock_irq_rcu_node(rnp);
1803 		raw_spin_unlock(&rcu_state.ofl_lock);
1804 	}
1805 	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1806 
1807 	/*
1808 	 * Set the quiescent-state-needed bits in all the rcu_node
1809 	 * structures for all currently online CPUs in breadth-first
1810 	 * order, starting from the root rcu_node structure, relying on the
1811 	 * layout of the tree within the rcu_state.node[] array.  Note that
1812 	 * other CPUs will access only the leaves of the hierarchy, thus
1813 	 * seeing that no grace period is in progress, at least until the
1814 	 * corresponding leaf node has been initialized.
1815 	 *
1816 	 * The grace period cannot complete until the initialization
1817 	 * process finishes, because this kthread handles both.
1818 	 */
1819 	rcu_state.gp_state = RCU_GP_INIT;
1820 	rcu_for_each_node_breadth_first(rnp) {
1821 		rcu_gp_slow(gp_init_delay);
1822 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1823 		rdp = this_cpu_ptr(&rcu_data);
1824 		rcu_preempt_check_blocked_tasks(rnp);
1825 		rnp->qsmask = rnp->qsmaskinit;
1826 		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1827 		if (rnp == rdp->mynode)
1828 			(void)__note_gp_changes(rnp, rdp);
1829 		rcu_preempt_boost_start_gp(rnp);
1830 		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1831 					    rnp->level, rnp->grplo,
1832 					    rnp->grphi, rnp->qsmask);
1833 		/* Quiescent states for tasks on any now-offline CPUs. */
1834 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1835 		rnp->rcu_gp_init_mask = mask;
1836 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1837 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1838 		else
1839 			raw_spin_unlock_irq_rcu_node(rnp);
1840 		cond_resched_tasks_rcu_qs();
1841 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1842 	}
1843 
1844 	// If strict, make all CPUs aware of new grace period.
1845 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1846 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1847 
1848 	return true;
1849 }
1850 
1851 /*
1852  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1853  * time.
1854  */
rcu_gp_fqs_check_wake(int * gfp)1855 static bool rcu_gp_fqs_check_wake(int *gfp)
1856 {
1857 	struct rcu_node *rnp = rcu_get_root();
1858 
1859 	// If under overload conditions, force an immediate FQS scan.
1860 	if (*gfp & RCU_GP_FLAG_OVLD)
1861 		return true;
1862 
1863 	// Someone like call_rcu() requested a force-quiescent-state scan.
1864 	*gfp = READ_ONCE(rcu_state.gp_flags);
1865 	if (*gfp & RCU_GP_FLAG_FQS)
1866 		return true;
1867 
1868 	// The current grace period has completed.
1869 	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1870 		return true;
1871 
1872 	return false;
1873 }
1874 
1875 /*
1876  * Do one round of quiescent-state forcing.
1877  */
rcu_gp_fqs(bool first_time)1878 static void rcu_gp_fqs(bool first_time)
1879 {
1880 	struct rcu_node *rnp = rcu_get_root();
1881 
1882 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1883 	rcu_state.n_force_qs++;
1884 	if (first_time) {
1885 		/* Collect dyntick-idle snapshots. */
1886 		force_qs_rnp(dyntick_save_progress_counter);
1887 	} else {
1888 		/* Handle dyntick-idle and offline CPUs. */
1889 		force_qs_rnp(rcu_implicit_dynticks_qs);
1890 	}
1891 	/* Clear flag to prevent immediate re-entry. */
1892 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1893 		raw_spin_lock_irq_rcu_node(rnp);
1894 		WRITE_ONCE(rcu_state.gp_flags,
1895 			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1896 		raw_spin_unlock_irq_rcu_node(rnp);
1897 	}
1898 }
1899 
1900 /*
1901  * Loop doing repeated quiescent-state forcing until the grace period ends.
1902  */
rcu_gp_fqs_loop(void)1903 static void rcu_gp_fqs_loop(void)
1904 {
1905 	bool first_gp_fqs;
1906 	int gf = 0;
1907 	unsigned long j;
1908 	int ret;
1909 	struct rcu_node *rnp = rcu_get_root();
1910 
1911 	first_gp_fqs = true;
1912 	j = READ_ONCE(jiffies_till_first_fqs);
1913 	if (rcu_state.cbovld)
1914 		gf = RCU_GP_FLAG_OVLD;
1915 	ret = 0;
1916 	for (;;) {
1917 		if (!ret) {
1918 			rcu_state.jiffies_force_qs = jiffies + j;
1919 			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1920 				   jiffies + (j ? 3 * j : 2));
1921 		}
1922 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1923 				       TPS("fqswait"));
1924 		rcu_state.gp_state = RCU_GP_WAIT_FQS;
1925 		ret = swait_event_idle_timeout_exclusive(
1926 				rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
1927 		rcu_gp_torture_wait();
1928 		rcu_state.gp_state = RCU_GP_DOING_FQS;
1929 		/* Locking provides needed memory barriers. */
1930 		/* If grace period done, leave loop. */
1931 		if (!READ_ONCE(rnp->qsmask) &&
1932 		    !rcu_preempt_blocked_readers_cgp(rnp))
1933 			break;
1934 		/* If time for quiescent-state forcing, do it. */
1935 		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1936 		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1937 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1938 					       TPS("fqsstart"));
1939 			rcu_gp_fqs(first_gp_fqs);
1940 			gf = 0;
1941 			if (first_gp_fqs) {
1942 				first_gp_fqs = false;
1943 				gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1944 			}
1945 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1946 					       TPS("fqsend"));
1947 			cond_resched_tasks_rcu_qs();
1948 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1949 			ret = 0; /* Force full wait till next FQS. */
1950 			j = READ_ONCE(jiffies_till_next_fqs);
1951 		} else {
1952 			/* Deal with stray signal. */
1953 			cond_resched_tasks_rcu_qs();
1954 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1955 			WARN_ON(signal_pending(current));
1956 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1957 					       TPS("fqswaitsig"));
1958 			ret = 1; /* Keep old FQS timing. */
1959 			j = jiffies;
1960 			if (time_after(jiffies, rcu_state.jiffies_force_qs))
1961 				j = 1;
1962 			else
1963 				j = rcu_state.jiffies_force_qs - j;
1964 			gf = 0;
1965 		}
1966 	}
1967 }
1968 
1969 /*
1970  * Clean up after the old grace period.
1971  */
rcu_gp_cleanup(void)1972 static void rcu_gp_cleanup(void)
1973 {
1974 	int cpu;
1975 	bool needgp = false;
1976 	unsigned long gp_duration;
1977 	unsigned long new_gp_seq;
1978 	bool offloaded;
1979 	struct rcu_data *rdp;
1980 	struct rcu_node *rnp = rcu_get_root();
1981 	struct swait_queue_head *sq;
1982 
1983 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1984 	raw_spin_lock_irq_rcu_node(rnp);
1985 	rcu_state.gp_end = jiffies;
1986 	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1987 	if (gp_duration > rcu_state.gp_max)
1988 		rcu_state.gp_max = gp_duration;
1989 
1990 	/*
1991 	 * We know the grace period is complete, but to everyone else
1992 	 * it appears to still be ongoing.  But it is also the case
1993 	 * that to everyone else it looks like there is nothing that
1994 	 * they can do to advance the grace period.  It is therefore
1995 	 * safe for us to drop the lock in order to mark the grace
1996 	 * period as completed in all of the rcu_node structures.
1997 	 */
1998 	raw_spin_unlock_irq_rcu_node(rnp);
1999 
2000 	/*
2001 	 * Propagate new ->gp_seq value to rcu_node structures so that
2002 	 * other CPUs don't have to wait until the start of the next grace
2003 	 * period to process their callbacks.  This also avoids some nasty
2004 	 * RCU grace-period initialization races by forcing the end of
2005 	 * the current grace period to be completely recorded in all of
2006 	 * the rcu_node structures before the beginning of the next grace
2007 	 * period is recorded in any of the rcu_node structures.
2008 	 */
2009 	new_gp_seq = rcu_state.gp_seq;
2010 	rcu_seq_end(&new_gp_seq);
2011 	rcu_for_each_node_breadth_first(rnp) {
2012 		raw_spin_lock_irq_rcu_node(rnp);
2013 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2014 			dump_blkd_tasks(rnp, 10);
2015 		WARN_ON_ONCE(rnp->qsmask);
2016 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2017 		rdp = this_cpu_ptr(&rcu_data);
2018 		if (rnp == rdp->mynode)
2019 			needgp = __note_gp_changes(rnp, rdp) || needgp;
2020 		/* smp_mb() provided by prior unlock-lock pair. */
2021 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
2022 		// Reset overload indication for CPUs no longer overloaded
2023 		if (rcu_is_leaf_node(rnp))
2024 			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2025 				rdp = per_cpu_ptr(&rcu_data, cpu);
2026 				check_cb_ovld_locked(rdp, rnp);
2027 			}
2028 		sq = rcu_nocb_gp_get(rnp);
2029 		raw_spin_unlock_irq_rcu_node(rnp);
2030 		rcu_nocb_gp_cleanup(sq);
2031 		cond_resched_tasks_rcu_qs();
2032 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
2033 		rcu_gp_slow(gp_cleanup_delay);
2034 	}
2035 	rnp = rcu_get_root();
2036 	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2037 
2038 	/* Declare grace period done, trace first to use old GP number. */
2039 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2040 	rcu_seq_end(&rcu_state.gp_seq);
2041 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2042 	rcu_state.gp_state = RCU_GP_IDLE;
2043 	/* Check for GP requests since above loop. */
2044 	rdp = this_cpu_ptr(&rcu_data);
2045 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2046 		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2047 				  TPS("CleanupMore"));
2048 		needgp = true;
2049 	}
2050 	/* Advance CBs to reduce false positives below. */
2051 	offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2052 		    rcu_segcblist_is_offloaded(&rdp->cblist);
2053 	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2054 		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2055 		WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2056 		trace_rcu_grace_period(rcu_state.name,
2057 				       rcu_state.gp_seq,
2058 				       TPS("newreq"));
2059 	} else {
2060 		WRITE_ONCE(rcu_state.gp_flags,
2061 			   rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2062 	}
2063 	raw_spin_unlock_irq_rcu_node(rnp);
2064 
2065 	// If strict, make all CPUs aware of the end of the old grace period.
2066 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2067 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2068 }
2069 
2070 /*
2071  * Body of kthread that handles grace periods.
2072  */
rcu_gp_kthread(void * unused)2073 static int __noreturn rcu_gp_kthread(void *unused)
2074 {
2075 	rcu_bind_gp_kthread();
2076 	for (;;) {
2077 
2078 		/* Handle grace-period start. */
2079 		for (;;) {
2080 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2081 					       TPS("reqwait"));
2082 			rcu_state.gp_state = RCU_GP_WAIT_GPS;
2083 			swait_event_idle_exclusive(rcu_state.gp_wq,
2084 					 READ_ONCE(rcu_state.gp_flags) &
2085 					 RCU_GP_FLAG_INIT);
2086 			rcu_gp_torture_wait();
2087 			rcu_state.gp_state = RCU_GP_DONE_GPS;
2088 			/* Locking provides needed memory barrier. */
2089 			if (rcu_gp_init())
2090 				break;
2091 			cond_resched_tasks_rcu_qs();
2092 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2093 			WARN_ON(signal_pending(current));
2094 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2095 					       TPS("reqwaitsig"));
2096 		}
2097 
2098 		/* Handle quiescent-state forcing. */
2099 		rcu_gp_fqs_loop();
2100 
2101 		/* Handle grace-period end. */
2102 		rcu_state.gp_state = RCU_GP_CLEANUP;
2103 		rcu_gp_cleanup();
2104 		rcu_state.gp_state = RCU_GP_CLEANED;
2105 	}
2106 }
2107 
2108 /*
2109  * Report a full set of quiescent states to the rcu_state data structure.
2110  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2111  * another grace period is required.  Whether we wake the grace-period
2112  * kthread or it awakens itself for the next round of quiescent-state
2113  * forcing, that kthread will clean up after the just-completed grace
2114  * period.  Note that the caller must hold rnp->lock, which is released
2115  * before return.
2116  */
rcu_report_qs_rsp(unsigned long flags)2117 static void rcu_report_qs_rsp(unsigned long flags)
2118 	__releases(rcu_get_root()->lock)
2119 {
2120 	raw_lockdep_assert_held_rcu_node(rcu_get_root());
2121 	WARN_ON_ONCE(!rcu_gp_in_progress());
2122 	WRITE_ONCE(rcu_state.gp_flags,
2123 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2124 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2125 	rcu_gp_kthread_wake();
2126 }
2127 
2128 /*
2129  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2130  * Allows quiescent states for a group of CPUs to be reported at one go
2131  * to the specified rcu_node structure, though all the CPUs in the group
2132  * must be represented by the same rcu_node structure (which need not be a
2133  * leaf rcu_node structure, though it often will be).  The gps parameter
2134  * is the grace-period snapshot, which means that the quiescent states
2135  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
2136  * must be held upon entry, and it is released before return.
2137  *
2138  * As a special case, if mask is zero, the bit-already-cleared check is
2139  * disabled.  This allows propagating quiescent state due to resumed tasks
2140  * during grace-period initialization.
2141  */
rcu_report_qs_rnp(unsigned long mask,struct rcu_node * rnp,unsigned long gps,unsigned long flags)2142 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2143 			      unsigned long gps, unsigned long flags)
2144 	__releases(rnp->lock)
2145 {
2146 	unsigned long oldmask = 0;
2147 	struct rcu_node *rnp_c;
2148 
2149 	raw_lockdep_assert_held_rcu_node(rnp);
2150 
2151 	/* Walk up the rcu_node hierarchy. */
2152 	for (;;) {
2153 		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2154 
2155 			/*
2156 			 * Our bit has already been cleared, or the
2157 			 * relevant grace period is already over, so done.
2158 			 */
2159 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2160 			return;
2161 		}
2162 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2163 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2164 			     rcu_preempt_blocked_readers_cgp(rnp));
2165 		WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2166 		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2167 						 mask, rnp->qsmask, rnp->level,
2168 						 rnp->grplo, rnp->grphi,
2169 						 !!rnp->gp_tasks);
2170 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2171 
2172 			/* Other bits still set at this level, so done. */
2173 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2174 			return;
2175 		}
2176 		rnp->completedqs = rnp->gp_seq;
2177 		mask = rnp->grpmask;
2178 		if (rnp->parent == NULL) {
2179 
2180 			/* No more levels.  Exit loop holding root lock. */
2181 
2182 			break;
2183 		}
2184 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2185 		rnp_c = rnp;
2186 		rnp = rnp->parent;
2187 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2188 		oldmask = READ_ONCE(rnp_c->qsmask);
2189 	}
2190 
2191 	/*
2192 	 * Get here if we are the last CPU to pass through a quiescent
2193 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
2194 	 * to clean up and start the next grace period if one is needed.
2195 	 */
2196 	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2197 }
2198 
2199 /*
2200  * Record a quiescent state for all tasks that were previously queued
2201  * on the specified rcu_node structure and that were blocking the current
2202  * RCU grace period.  The caller must hold the corresponding rnp->lock with
2203  * irqs disabled, and this lock is released upon return, but irqs remain
2204  * disabled.
2205  */
2206 static void __maybe_unused
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)2207 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2208 	__releases(rnp->lock)
2209 {
2210 	unsigned long gps;
2211 	unsigned long mask;
2212 	struct rcu_node *rnp_p;
2213 
2214 	raw_lockdep_assert_held_rcu_node(rnp);
2215 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2216 	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2217 	    rnp->qsmask != 0) {
2218 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2219 		return;  /* Still need more quiescent states! */
2220 	}
2221 
2222 	rnp->completedqs = rnp->gp_seq;
2223 	rnp_p = rnp->parent;
2224 	if (rnp_p == NULL) {
2225 		/*
2226 		 * Only one rcu_node structure in the tree, so don't
2227 		 * try to report up to its nonexistent parent!
2228 		 */
2229 		rcu_report_qs_rsp(flags);
2230 		return;
2231 	}
2232 
2233 	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2234 	gps = rnp->gp_seq;
2235 	mask = rnp->grpmask;
2236 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2237 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
2238 	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2239 }
2240 
2241 /*
2242  * Record a quiescent state for the specified CPU to that CPU's rcu_data
2243  * structure.  This must be called from the specified CPU.
2244  */
2245 static void
rcu_report_qs_rdp(struct rcu_data * rdp)2246 rcu_report_qs_rdp(struct rcu_data *rdp)
2247 {
2248 	unsigned long flags;
2249 	unsigned long mask;
2250 	bool needwake = false;
2251 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2252 			       rcu_segcblist_is_offloaded(&rdp->cblist);
2253 	struct rcu_node *rnp;
2254 
2255 	WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2256 	rnp = rdp->mynode;
2257 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2258 	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2259 	    rdp->gpwrap) {
2260 
2261 		/*
2262 		 * The grace period in which this quiescent state was
2263 		 * recorded has ended, so don't report it upwards.
2264 		 * We will instead need a new quiescent state that lies
2265 		 * within the current grace period.
2266 		 */
2267 		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
2268 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2269 		return;
2270 	}
2271 	mask = rdp->grpmask;
2272 	rdp->core_needs_qs = false;
2273 	if ((rnp->qsmask & mask) == 0) {
2274 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2275 	} else {
2276 		/*
2277 		 * This GP can't end until cpu checks in, so all of our
2278 		 * callbacks can be processed during the next GP.
2279 		 */
2280 		if (!offloaded)
2281 			needwake = rcu_accelerate_cbs(rnp, rdp);
2282 
2283 		rcu_disable_urgency_upon_qs(rdp);
2284 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2285 		/* ^^^ Released rnp->lock */
2286 		if (needwake)
2287 			rcu_gp_kthread_wake();
2288 	}
2289 }
2290 
2291 /*
2292  * Check to see if there is a new grace period of which this CPU
2293  * is not yet aware, and if so, set up local rcu_data state for it.
2294  * Otherwise, see if this CPU has just passed through its first
2295  * quiescent state for this grace period, and record that fact if so.
2296  */
2297 static void
rcu_check_quiescent_state(struct rcu_data * rdp)2298 rcu_check_quiescent_state(struct rcu_data *rdp)
2299 {
2300 	/* Check for grace-period ends and beginnings. */
2301 	note_gp_changes(rdp);
2302 
2303 	/*
2304 	 * Does this CPU still need to do its part for current grace period?
2305 	 * If no, return and let the other CPUs do their part as well.
2306 	 */
2307 	if (!rdp->core_needs_qs)
2308 		return;
2309 
2310 	/*
2311 	 * Was there a quiescent state since the beginning of the grace
2312 	 * period? If no, then exit and wait for the next call.
2313 	 */
2314 	if (rdp->cpu_no_qs.b.norm)
2315 		return;
2316 
2317 	/*
2318 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2319 	 * judge of that).
2320 	 */
2321 	rcu_report_qs_rdp(rdp);
2322 }
2323 
2324 /*
2325  * Near the end of the offline process.  Trace the fact that this CPU
2326  * is going offline.
2327  */
rcutree_dying_cpu(unsigned int cpu)2328 int rcutree_dying_cpu(unsigned int cpu)
2329 {
2330 	bool blkd;
2331 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2332 	struct rcu_node *rnp = rdp->mynode;
2333 
2334 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2335 		return 0;
2336 
2337 	blkd = !!(rnp->qsmask & rdp->grpmask);
2338 	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2339 			       blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
2340 	return 0;
2341 }
2342 
2343 /*
2344  * All CPUs for the specified rcu_node structure have gone offline,
2345  * and all tasks that were preempted within an RCU read-side critical
2346  * section while running on one of those CPUs have since exited their RCU
2347  * read-side critical section.  Some other CPU is reporting this fact with
2348  * the specified rcu_node structure's ->lock held and interrupts disabled.
2349  * This function therefore goes up the tree of rcu_node structures,
2350  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2351  * the leaf rcu_node structure's ->qsmaskinit field has already been
2352  * updated.
2353  *
2354  * This function does check that the specified rcu_node structure has
2355  * all CPUs offline and no blocked tasks, so it is OK to invoke it
2356  * prematurely.  That said, invoking it after the fact will cost you
2357  * a needless lock acquisition.  So once it has done its work, don't
2358  * invoke it again.
2359  */
rcu_cleanup_dead_rnp(struct rcu_node * rnp_leaf)2360 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2361 {
2362 	long mask;
2363 	struct rcu_node *rnp = rnp_leaf;
2364 
2365 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
2366 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2367 	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2368 	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2369 		return;
2370 	for (;;) {
2371 		mask = rnp->grpmask;
2372 		rnp = rnp->parent;
2373 		if (!rnp)
2374 			break;
2375 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2376 		rnp->qsmaskinit &= ~mask;
2377 		/* Between grace periods, so better already be zero! */
2378 		WARN_ON_ONCE(rnp->qsmask);
2379 		if (rnp->qsmaskinit) {
2380 			raw_spin_unlock_rcu_node(rnp);
2381 			/* irqs remain disabled. */
2382 			return;
2383 		}
2384 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2385 	}
2386 }
2387 
2388 /*
2389  * The CPU has been completely removed, and some other CPU is reporting
2390  * this fact from process context.  Do the remainder of the cleanup.
2391  * There can only be one CPU hotplug operation at a time, so no need for
2392  * explicit locking.
2393  */
rcutree_dead_cpu(unsigned int cpu)2394 int rcutree_dead_cpu(unsigned int cpu)
2395 {
2396 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2397 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2398 
2399 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2400 		return 0;
2401 
2402 	/* Adjust any no-longer-needed kthreads. */
2403 	rcu_boost_kthread_setaffinity(rnp, -1);
2404 	/* Do any needed no-CB deferred wakeups from this CPU. */
2405 	do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
2406 
2407 	// Stop-machine done, so allow nohz_full to disable tick.
2408 	tick_dep_clear(TICK_DEP_BIT_RCU);
2409 	return 0;
2410 }
2411 
2412 /*
2413  * Invoke any RCU callbacks that have made it to the end of their grace
2414  * period.  Thottle as specified by rdp->blimit.
2415  */
rcu_do_batch(struct rcu_data * rdp)2416 static void rcu_do_batch(struct rcu_data *rdp)
2417 {
2418 	int div;
2419 	unsigned long flags;
2420 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2421 			       rcu_segcblist_is_offloaded(&rdp->cblist);
2422 	struct rcu_head *rhp;
2423 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2424 	long bl, count;
2425 	long pending, tlimit = 0;
2426 
2427 	/* If no callbacks are ready, just return. */
2428 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2429 		trace_rcu_batch_start(rcu_state.name,
2430 				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2431 		trace_rcu_batch_end(rcu_state.name, 0,
2432 				    !rcu_segcblist_empty(&rdp->cblist),
2433 				    need_resched(), is_idle_task(current),
2434 				    rcu_is_callbacks_kthread());
2435 		return;
2436 	}
2437 
2438 	/*
2439 	 * Extract the list of ready callbacks, disabling to prevent
2440 	 * races with call_rcu() from interrupt handlers.  Leave the
2441 	 * callback counts, as rcu_barrier() needs to be conservative.
2442 	 */
2443 	local_irq_save(flags);
2444 	rcu_nocb_lock(rdp);
2445 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2446 	pending = rcu_segcblist_n_cbs(&rdp->cblist);
2447 	div = READ_ONCE(rcu_divisor);
2448 	div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2449 	bl = max(rdp->blimit, pending >> div);
2450 	if (unlikely(bl > 100)) {
2451 		long rrn = READ_ONCE(rcu_resched_ns);
2452 
2453 		rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2454 		tlimit = local_clock() + rrn;
2455 	}
2456 	trace_rcu_batch_start(rcu_state.name,
2457 			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
2458 	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2459 	if (offloaded)
2460 		rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2461 	rcu_nocb_unlock_irqrestore(rdp, flags);
2462 
2463 	/* Invoke callbacks. */
2464 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2465 	rhp = rcu_cblist_dequeue(&rcl);
2466 	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2467 		rcu_callback_t f;
2468 
2469 		debug_rcu_head_unqueue(rhp);
2470 
2471 		rcu_lock_acquire(&rcu_callback_map);
2472 		trace_rcu_invoke_callback(rcu_state.name, rhp);
2473 
2474 		f = rhp->func;
2475 		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2476 		f(rhp);
2477 
2478 		rcu_lock_release(&rcu_callback_map);
2479 
2480 		/*
2481 		 * Stop only if limit reached and CPU has something to do.
2482 		 * Note: The rcl structure counts down from zero.
2483 		 */
2484 		if (-rcl.len >= bl && !offloaded &&
2485 		    (need_resched() ||
2486 		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2487 			break;
2488 		if (unlikely(tlimit)) {
2489 			/* only call local_clock() every 32 callbacks */
2490 			if (likely((-rcl.len & 31) || local_clock() < tlimit))
2491 				continue;
2492 			/* Exceeded the time limit, so leave. */
2493 			break;
2494 		}
2495 		if (offloaded) {
2496 			WARN_ON_ONCE(in_serving_softirq());
2497 			local_bh_enable();
2498 			lockdep_assert_irqs_enabled();
2499 			cond_resched_tasks_rcu_qs();
2500 			lockdep_assert_irqs_enabled();
2501 			local_bh_disable();
2502 		}
2503 	}
2504 
2505 	local_irq_save(flags);
2506 	rcu_nocb_lock(rdp);
2507 	count = -rcl.len;
2508 	rdp->n_cbs_invoked += count;
2509 	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2510 			    is_idle_task(current), rcu_is_callbacks_kthread());
2511 
2512 	/* Update counts and requeue any remaining callbacks. */
2513 	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2514 	smp_mb(); /* List handling before counting for rcu_barrier(). */
2515 	rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2516 
2517 	/* Reinstate batch limit if we have worked down the excess. */
2518 	count = rcu_segcblist_n_cbs(&rdp->cblist);
2519 	if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2520 		rdp->blimit = blimit;
2521 
2522 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2523 	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2524 		rdp->qlen_last_fqs_check = 0;
2525 		rdp->n_force_qs_snap = rcu_state.n_force_qs;
2526 	} else if (count < rdp->qlen_last_fqs_check - qhimark)
2527 		rdp->qlen_last_fqs_check = count;
2528 
2529 	/*
2530 	 * The following usually indicates a double call_rcu().  To track
2531 	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2532 	 */
2533 	WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist));
2534 	WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2535 		     count != 0 && rcu_segcblist_empty(&rdp->cblist));
2536 
2537 	rcu_nocb_unlock_irqrestore(rdp, flags);
2538 
2539 	/* Re-invoke RCU core processing if there are callbacks remaining. */
2540 	if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
2541 		invoke_rcu_core();
2542 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2543 }
2544 
2545 /*
2546  * This function is invoked from each scheduling-clock interrupt,
2547  * and checks to see if this CPU is in a non-context-switch quiescent
2548  * state, for example, user mode or idle loop.  It also schedules RCU
2549  * core processing.  If the current grace period has gone on too long,
2550  * it will ask the scheduler to manufacture a context switch for the sole
2551  * purpose of providing a providing the needed quiescent state.
2552  */
rcu_sched_clock_irq(int user)2553 void rcu_sched_clock_irq(int user)
2554 {
2555 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2556 	raw_cpu_inc(rcu_data.ticks_this_gp);
2557 	/* The load-acquire pairs with the store-release setting to true. */
2558 	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2559 		/* Idle and userspace execution already are quiescent states. */
2560 		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2561 			set_tsk_need_resched(current);
2562 			set_preempt_need_resched();
2563 		}
2564 		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
2565 	}
2566 	rcu_flavor_sched_clock_irq(user);
2567 	if (rcu_pending(user))
2568 		invoke_rcu_core();
2569 
2570 	trace_rcu_utilization(TPS("End scheduler-tick"));
2571 }
2572 
2573 /*
2574  * Scan the leaf rcu_node structures.  For each structure on which all
2575  * CPUs have reported a quiescent state and on which there are tasks
2576  * blocking the current grace period, initiate RCU priority boosting.
2577  * Otherwise, invoke the specified function to check dyntick state for
2578  * each CPU that has not yet reported a quiescent state.
2579  */
force_qs_rnp(int (* f)(struct rcu_data * rdp))2580 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2581 {
2582 	int cpu;
2583 	unsigned long flags;
2584 	unsigned long mask;
2585 	struct rcu_data *rdp;
2586 	struct rcu_node *rnp;
2587 
2588 	rcu_state.cbovld = rcu_state.cbovldnext;
2589 	rcu_state.cbovldnext = false;
2590 	rcu_for_each_leaf_node(rnp) {
2591 		cond_resched_tasks_rcu_qs();
2592 		mask = 0;
2593 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2594 		rcu_state.cbovldnext |= !!rnp->cbovldmask;
2595 		if (rnp->qsmask == 0) {
2596 			if (rcu_preempt_blocked_readers_cgp(rnp)) {
2597 				/*
2598 				 * No point in scanning bits because they
2599 				 * are all zero.  But we might need to
2600 				 * priority-boost blocked readers.
2601 				 */
2602 				rcu_initiate_boost(rnp, flags);
2603 				/* rcu_initiate_boost() releases rnp->lock */
2604 				continue;
2605 			}
2606 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2607 			continue;
2608 		}
2609 		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2610 			rdp = per_cpu_ptr(&rcu_data, cpu);
2611 			if (f(rdp)) {
2612 				mask |= rdp->grpmask;
2613 				rcu_disable_urgency_upon_qs(rdp);
2614 			}
2615 		}
2616 		if (mask != 0) {
2617 			/* Idle/offline CPUs, report (releases rnp->lock). */
2618 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2619 		} else {
2620 			/* Nothing to do here, so just drop the lock. */
2621 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2622 		}
2623 	}
2624 }
2625 
2626 /*
2627  * Force quiescent states on reluctant CPUs, and also detect which
2628  * CPUs are in dyntick-idle mode.
2629  */
rcu_force_quiescent_state(void)2630 void rcu_force_quiescent_state(void)
2631 {
2632 	unsigned long flags;
2633 	bool ret;
2634 	struct rcu_node *rnp;
2635 	struct rcu_node *rnp_old = NULL;
2636 
2637 	/* Funnel through hierarchy to reduce memory contention. */
2638 	rnp = __this_cpu_read(rcu_data.mynode);
2639 	for (; rnp != NULL; rnp = rnp->parent) {
2640 		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2641 		       !raw_spin_trylock(&rnp->fqslock);
2642 		if (rnp_old != NULL)
2643 			raw_spin_unlock(&rnp_old->fqslock);
2644 		if (ret)
2645 			return;
2646 		rnp_old = rnp;
2647 	}
2648 	/* rnp_old == rcu_get_root(), rnp == NULL. */
2649 
2650 	/* Reached the root of the rcu_node tree, acquire lock. */
2651 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2652 	raw_spin_unlock(&rnp_old->fqslock);
2653 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2654 		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2655 		return;  /* Someone beat us to it. */
2656 	}
2657 	WRITE_ONCE(rcu_state.gp_flags,
2658 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2659 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2660 	rcu_gp_kthread_wake();
2661 }
2662 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2663 
2664 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2665 // grace periods.
strict_work_handler(struct work_struct * work)2666 static void strict_work_handler(struct work_struct *work)
2667 {
2668 	rcu_read_lock();
2669 	rcu_read_unlock();
2670 }
2671 
2672 /* Perform RCU core processing work for the current CPU.  */
rcu_core(void)2673 static __latent_entropy void rcu_core(void)
2674 {
2675 	unsigned long flags;
2676 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2677 	struct rcu_node *rnp = rdp->mynode;
2678 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2679 			       rcu_segcblist_is_offloaded(&rdp->cblist);
2680 
2681 	if (cpu_is_offline(smp_processor_id()))
2682 		return;
2683 	trace_rcu_utilization(TPS("Start RCU core"));
2684 	WARN_ON_ONCE(!rdp->beenonline);
2685 
2686 	/* Report any deferred quiescent states if preemption enabled. */
2687 	if (!(preempt_count() & PREEMPT_MASK)) {
2688 		rcu_preempt_deferred_qs(current);
2689 	} else if (rcu_preempt_need_deferred_qs(current)) {
2690 		set_tsk_need_resched(current);
2691 		set_preempt_need_resched();
2692 	}
2693 
2694 	/* Update RCU state based on any recent quiescent states. */
2695 	rcu_check_quiescent_state(rdp);
2696 
2697 	/* No grace period and unregistered callbacks? */
2698 	if (!rcu_gp_in_progress() &&
2699 	    rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) {
2700 		local_irq_save(flags);
2701 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2702 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2703 		local_irq_restore(flags);
2704 	}
2705 
2706 	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2707 
2708 	/* If there are callbacks ready, invoke them. */
2709 	if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2710 	    likely(READ_ONCE(rcu_scheduler_fully_active)))
2711 		rcu_do_batch(rdp);
2712 
2713 	/* Do any needed deferred wakeups of rcuo kthreads. */
2714 	do_nocb_deferred_wakeup(rdp);
2715 	trace_rcu_utilization(TPS("End RCU core"));
2716 
2717 	// If strict GPs, schedule an RCU reader in a clean environment.
2718 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2719 		queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2720 }
2721 
rcu_core_si(struct softirq_action * h)2722 static void rcu_core_si(struct softirq_action *h)
2723 {
2724 	rcu_core();
2725 }
2726 
rcu_wake_cond(struct task_struct * t,int status)2727 static void rcu_wake_cond(struct task_struct *t, int status)
2728 {
2729 	/*
2730 	 * If the thread is yielding, only wake it when this
2731 	 * is invoked from idle
2732 	 */
2733 	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2734 		wake_up_process(t);
2735 }
2736 
invoke_rcu_core_kthread(void)2737 static void invoke_rcu_core_kthread(void)
2738 {
2739 	struct task_struct *t;
2740 	unsigned long flags;
2741 
2742 	local_irq_save(flags);
2743 	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2744 	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2745 	if (t != NULL && t != current)
2746 		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2747 	local_irq_restore(flags);
2748 }
2749 
2750 /*
2751  * Wake up this CPU's rcuc kthread to do RCU core processing.
2752  */
invoke_rcu_core(void)2753 static void invoke_rcu_core(void)
2754 {
2755 	if (!cpu_online(smp_processor_id()))
2756 		return;
2757 	if (use_softirq)
2758 		raise_softirq(RCU_SOFTIRQ);
2759 	else
2760 		invoke_rcu_core_kthread();
2761 }
2762 
rcu_cpu_kthread_park(unsigned int cpu)2763 static void rcu_cpu_kthread_park(unsigned int cpu)
2764 {
2765 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2766 }
2767 
rcu_cpu_kthread_should_run(unsigned int cpu)2768 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2769 {
2770 	return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2771 }
2772 
2773 /*
2774  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2775  * the RCU softirq used in configurations of RCU that do not support RCU
2776  * priority boosting.
2777  */
rcu_cpu_kthread(unsigned int cpu)2778 static void rcu_cpu_kthread(unsigned int cpu)
2779 {
2780 	unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2781 	char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2782 	int spincnt;
2783 
2784 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2785 	for (spincnt = 0; spincnt < 10; spincnt++) {
2786 		local_bh_disable();
2787 		*statusp = RCU_KTHREAD_RUNNING;
2788 		local_irq_disable();
2789 		work = *workp;
2790 		*workp = 0;
2791 		local_irq_enable();
2792 		if (work)
2793 			rcu_core();
2794 		local_bh_enable();
2795 		if (*workp == 0) {
2796 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2797 			*statusp = RCU_KTHREAD_WAITING;
2798 			return;
2799 		}
2800 	}
2801 	*statusp = RCU_KTHREAD_YIELDING;
2802 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2803 	schedule_timeout_idle(2);
2804 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2805 	*statusp = RCU_KTHREAD_WAITING;
2806 }
2807 
2808 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2809 	.store			= &rcu_data.rcu_cpu_kthread_task,
2810 	.thread_should_run	= rcu_cpu_kthread_should_run,
2811 	.thread_fn		= rcu_cpu_kthread,
2812 	.thread_comm		= "rcuc/%u",
2813 	.setup			= rcu_cpu_kthread_setup,
2814 	.park			= rcu_cpu_kthread_park,
2815 };
2816 
2817 /*
2818  * Spawn per-CPU RCU core processing kthreads.
2819  */
rcu_spawn_core_kthreads(void)2820 static int __init rcu_spawn_core_kthreads(void)
2821 {
2822 	int cpu;
2823 
2824 	for_each_possible_cpu(cpu)
2825 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2826 	if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2827 		return 0;
2828 	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2829 		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2830 	return 0;
2831 }
2832 early_initcall(rcu_spawn_core_kthreads);
2833 
2834 /*
2835  * Handle any core-RCU processing required by a call_rcu() invocation.
2836  */
__call_rcu_core(struct rcu_data * rdp,struct rcu_head * head,unsigned long flags)2837 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2838 			    unsigned long flags)
2839 {
2840 	/*
2841 	 * If called from an extended quiescent state, invoke the RCU
2842 	 * core in order to force a re-evaluation of RCU's idleness.
2843 	 */
2844 	if (!rcu_is_watching())
2845 		invoke_rcu_core();
2846 
2847 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2848 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2849 		return;
2850 
2851 	/*
2852 	 * Force the grace period if too many callbacks or too long waiting.
2853 	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2854 	 * if some other CPU has recently done so.  Also, don't bother
2855 	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2856 	 * is the only one waiting for a grace period to complete.
2857 	 */
2858 	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2859 		     rdp->qlen_last_fqs_check + qhimark)) {
2860 
2861 		/* Are we ignoring a completed grace period? */
2862 		note_gp_changes(rdp);
2863 
2864 		/* Start a new grace period if one not already started. */
2865 		if (!rcu_gp_in_progress()) {
2866 			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2867 		} else {
2868 			/* Give the grace period a kick. */
2869 			rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2870 			if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
2871 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2872 				rcu_force_quiescent_state();
2873 			rdp->n_force_qs_snap = rcu_state.n_force_qs;
2874 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2875 		}
2876 	}
2877 }
2878 
2879 /*
2880  * RCU callback function to leak a callback.
2881  */
rcu_leak_callback(struct rcu_head * rhp)2882 static void rcu_leak_callback(struct rcu_head *rhp)
2883 {
2884 }
2885 
2886 /*
2887  * Check and if necessary update the leaf rcu_node structure's
2888  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2889  * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2890  * structure's ->lock.
2891  */
check_cb_ovld_locked(struct rcu_data * rdp,struct rcu_node * rnp)2892 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2893 {
2894 	raw_lockdep_assert_held_rcu_node(rnp);
2895 	if (qovld_calc <= 0)
2896 		return; // Early boot and wildcard value set.
2897 	if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2898 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2899 	else
2900 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2901 }
2902 
2903 /*
2904  * Check and if necessary update the leaf rcu_node structure's
2905  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2906  * number of queued RCU callbacks.  No locks need be held, but the
2907  * caller must have disabled interrupts.
2908  *
2909  * Note that this function ignores the possibility that there are a lot
2910  * of callbacks all of which have already seen the end of their respective
2911  * grace periods.  This omission is due to the need for no-CBs CPUs to
2912  * be holding ->nocb_lock to do this check, which is too heavy for a
2913  * common-case operation.
2914  */
check_cb_ovld(struct rcu_data * rdp)2915 static void check_cb_ovld(struct rcu_data *rdp)
2916 {
2917 	struct rcu_node *const rnp = rdp->mynode;
2918 
2919 	if (qovld_calc <= 0 ||
2920 	    ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2921 	     !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2922 		return; // Early boot wildcard value or already set correctly.
2923 	raw_spin_lock_rcu_node(rnp);
2924 	check_cb_ovld_locked(rdp, rnp);
2925 	raw_spin_unlock_rcu_node(rnp);
2926 }
2927 
2928 /* Helper function for call_rcu() and friends.  */
2929 static void
__call_rcu(struct rcu_head * head,rcu_callback_t func)2930 __call_rcu(struct rcu_head *head, rcu_callback_t func)
2931 {
2932 	unsigned long flags;
2933 	struct rcu_data *rdp;
2934 	bool was_alldone;
2935 
2936 	/* Misaligned rcu_head! */
2937 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2938 
2939 	if (debug_rcu_head_queue(head)) {
2940 		/*
2941 		 * Probable double call_rcu(), so leak the callback.
2942 		 * Use rcu:rcu_callback trace event to find the previous
2943 		 * time callback was passed to __call_rcu().
2944 		 */
2945 		WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n",
2946 			  head, head->func);
2947 		WRITE_ONCE(head->func, rcu_leak_callback);
2948 		return;
2949 	}
2950 	head->func = func;
2951 	head->next = NULL;
2952 	local_irq_save(flags);
2953 	kasan_record_aux_stack(head);
2954 	rdp = this_cpu_ptr(&rcu_data);
2955 
2956 	/* Add the callback to our list. */
2957 	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2958 		// This can trigger due to call_rcu() from offline CPU:
2959 		WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2960 		WARN_ON_ONCE(!rcu_is_watching());
2961 		// Very early boot, before rcu_init().  Initialize if needed
2962 		// and then drop through to queue the callback.
2963 		if (rcu_segcblist_empty(&rdp->cblist))
2964 			rcu_segcblist_init(&rdp->cblist);
2965 	}
2966 
2967 	check_cb_ovld(rdp);
2968 	if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
2969 		return; // Enqueued onto ->nocb_bypass, so just leave.
2970 	// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2971 	rcu_segcblist_enqueue(&rdp->cblist, head);
2972 	if (__is_kvfree_rcu_offset((unsigned long)func))
2973 		trace_rcu_kvfree_callback(rcu_state.name, head,
2974 					 (unsigned long)func,
2975 					 rcu_segcblist_n_cbs(&rdp->cblist));
2976 	else
2977 		trace_rcu_callback(rcu_state.name, head,
2978 				   rcu_segcblist_n_cbs(&rdp->cblist));
2979 
2980 	/* Go handle any RCU core processing required. */
2981 	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2982 	    unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
2983 		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2984 	} else {
2985 		__call_rcu_core(rdp, head, flags);
2986 		local_irq_restore(flags);
2987 	}
2988 }
2989 
2990 /**
2991  * call_rcu() - Queue an RCU callback for invocation after a grace period.
2992  * @head: structure to be used for queueing the RCU updates.
2993  * @func: actual callback function to be invoked after the grace period
2994  *
2995  * The callback function will be invoked some time after a full grace
2996  * period elapses, in other words after all pre-existing RCU read-side
2997  * critical sections have completed.  However, the callback function
2998  * might well execute concurrently with RCU read-side critical sections
2999  * that started after call_rcu() was invoked.  RCU read-side critical
3000  * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
3001  * may be nested.  In addition, regions of code across which interrupts,
3002  * preemption, or softirqs have been disabled also serve as RCU read-side
3003  * critical sections.  This includes hardware interrupt handlers, softirq
3004  * handlers, and NMI handlers.
3005  *
3006  * Note that all CPUs must agree that the grace period extended beyond
3007  * all pre-existing RCU read-side critical section.  On systems with more
3008  * than one CPU, this means that when "func()" is invoked, each CPU is
3009  * guaranteed to have executed a full memory barrier since the end of its
3010  * last RCU read-side critical section whose beginning preceded the call
3011  * to call_rcu().  It also means that each CPU executing an RCU read-side
3012  * critical section that continues beyond the start of "func()" must have
3013  * executed a memory barrier after the call_rcu() but before the beginning
3014  * of that RCU read-side critical section.  Note that these guarantees
3015  * include CPUs that are offline, idle, or executing in user mode, as
3016  * well as CPUs that are executing in the kernel.
3017  *
3018  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3019  * resulting RCU callback function "func()", then both CPU A and CPU B are
3020  * guaranteed to execute a full memory barrier during the time interval
3021  * between the call to call_rcu() and the invocation of "func()" -- even
3022  * if CPU A and CPU B are the same CPU (but again only if the system has
3023  * more than one CPU).
3024  */
call_rcu(struct rcu_head * head,rcu_callback_t func)3025 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3026 {
3027 	__call_rcu(head, func);
3028 }
3029 EXPORT_SYMBOL_GPL(call_rcu);
3030 
3031 
3032 /* Maximum number of jiffies to wait before draining a batch. */
3033 #define KFREE_DRAIN_JIFFIES (HZ / 50)
3034 #define KFREE_N_BATCHES 2
3035 #define FREE_N_CHANNELS 2
3036 
3037 /**
3038  * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
3039  * @nr_records: Number of active pointers in the array
3040  * @next: Next bulk object in the block chain
3041  * @records: Array of the kvfree_rcu() pointers
3042  */
3043 struct kvfree_rcu_bulk_data {
3044 	unsigned long nr_records;
3045 	struct kvfree_rcu_bulk_data *next;
3046 	void *records[];
3047 };
3048 
3049 /*
3050  * This macro defines how many entries the "records" array
3051  * will contain. It is based on the fact that the size of
3052  * kvfree_rcu_bulk_data structure becomes exactly one page.
3053  */
3054 #define KVFREE_BULK_MAX_ENTR \
3055 	((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
3056 
3057 /**
3058  * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
3059  * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
3060  * @head_free: List of kfree_rcu() objects waiting for a grace period
3061  * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
3062  * @krcp: Pointer to @kfree_rcu_cpu structure
3063  */
3064 
3065 struct kfree_rcu_cpu_work {
3066 	struct rcu_work rcu_work;
3067 	struct rcu_head *head_free;
3068 	struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
3069 	struct kfree_rcu_cpu *krcp;
3070 };
3071 
3072 /**
3073  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
3074  * @head: List of kfree_rcu() objects not yet waiting for a grace period
3075  * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
3076  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
3077  * @lock: Synchronize access to this structure
3078  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
3079  * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
3080  * @initialized: The @rcu_work fields have been initialized
3081  * @count: Number of objects for which GP not started
3082  * @bkvcache:
3083  *	A simple cache list that contains objects for reuse purpose.
3084  *	In order to save some per-cpu space the list is singular.
3085  *	Even though it is lockless an access has to be protected by the
3086  *	per-cpu lock.
3087  * @nr_bkv_objs: number of allocated objects at @bkvcache.
3088  *
3089  * This is a per-CPU structure.  The reason that it is not included in
3090  * the rcu_data structure is to permit this code to be extracted from
3091  * the RCU files.  Such extraction could allow further optimization of
3092  * the interactions with the slab allocators.
3093  */
3094 struct kfree_rcu_cpu {
3095 	struct rcu_head *head;
3096 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
3097 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
3098 	raw_spinlock_t lock;
3099 	struct delayed_work monitor_work;
3100 	bool monitor_todo;
3101 	bool initialized;
3102 	int count;
3103 	struct llist_head bkvcache;
3104 	int nr_bkv_objs;
3105 };
3106 
3107 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3108 	.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3109 };
3110 
3111 static __always_inline void
debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data * bhead)3112 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
3113 {
3114 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3115 	int i;
3116 
3117 	for (i = 0; i < bhead->nr_records; i++)
3118 		debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
3119 #endif
3120 }
3121 
3122 static inline struct kfree_rcu_cpu *
krc_this_cpu_lock(unsigned long * flags)3123 krc_this_cpu_lock(unsigned long *flags)
3124 {
3125 	struct kfree_rcu_cpu *krcp;
3126 
3127 	local_irq_save(*flags);	// For safely calling this_cpu_ptr().
3128 	krcp = this_cpu_ptr(&krc);
3129 	raw_spin_lock(&krcp->lock);
3130 
3131 	return krcp;
3132 }
3133 
3134 static inline void
krc_this_cpu_unlock(struct kfree_rcu_cpu * krcp,unsigned long flags)3135 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3136 {
3137 	raw_spin_unlock(&krcp->lock);
3138 	local_irq_restore(flags);
3139 }
3140 
3141 static inline struct kvfree_rcu_bulk_data *
get_cached_bnode(struct kfree_rcu_cpu * krcp)3142 get_cached_bnode(struct kfree_rcu_cpu *krcp)
3143 {
3144 	if (!krcp->nr_bkv_objs)
3145 		return NULL;
3146 
3147 	krcp->nr_bkv_objs--;
3148 	return (struct kvfree_rcu_bulk_data *)
3149 		llist_del_first(&krcp->bkvcache);
3150 }
3151 
3152 static inline bool
put_cached_bnode(struct kfree_rcu_cpu * krcp,struct kvfree_rcu_bulk_data * bnode)3153 put_cached_bnode(struct kfree_rcu_cpu *krcp,
3154 	struct kvfree_rcu_bulk_data *bnode)
3155 {
3156 	// Check the limit.
3157 	if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3158 		return false;
3159 
3160 	llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3161 	krcp->nr_bkv_objs++;
3162 	return true;
3163 
3164 }
3165 
3166 /*
3167  * This function is invoked in workqueue context after a grace period.
3168  * It frees all the objects queued on ->bhead_free or ->head_free.
3169  */
kfree_rcu_work(struct work_struct * work)3170 static void kfree_rcu_work(struct work_struct *work)
3171 {
3172 	unsigned long flags;
3173 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3174 	struct rcu_head *head, *next;
3175 	struct kfree_rcu_cpu *krcp;
3176 	struct kfree_rcu_cpu_work *krwp;
3177 	int i, j;
3178 
3179 	krwp = container_of(to_rcu_work(work),
3180 			    struct kfree_rcu_cpu_work, rcu_work);
3181 	krcp = krwp->krcp;
3182 
3183 	raw_spin_lock_irqsave(&krcp->lock, flags);
3184 	// Channels 1 and 2.
3185 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3186 		bkvhead[i] = krwp->bkvhead_free[i];
3187 		krwp->bkvhead_free[i] = NULL;
3188 	}
3189 
3190 	// Channel 3.
3191 	head = krwp->head_free;
3192 	krwp->head_free = NULL;
3193 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3194 
3195 	// Handle two first channels.
3196 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3197 		for (; bkvhead[i]; bkvhead[i] = bnext) {
3198 			bnext = bkvhead[i]->next;
3199 			debug_rcu_bhead_unqueue(bkvhead[i]);
3200 
3201 			rcu_lock_acquire(&rcu_callback_map);
3202 			if (i == 0) { // kmalloc() / kfree().
3203 				trace_rcu_invoke_kfree_bulk_callback(
3204 					rcu_state.name, bkvhead[i]->nr_records,
3205 					bkvhead[i]->records);
3206 
3207 				kfree_bulk(bkvhead[i]->nr_records,
3208 					bkvhead[i]->records);
3209 			} else { // vmalloc() / vfree().
3210 				for (j = 0; j < bkvhead[i]->nr_records; j++) {
3211 					trace_rcu_invoke_kvfree_callback(
3212 						rcu_state.name,
3213 						bkvhead[i]->records[j], 0);
3214 
3215 					vfree(bkvhead[i]->records[j]);
3216 				}
3217 			}
3218 			rcu_lock_release(&rcu_callback_map);
3219 
3220 			krcp = krc_this_cpu_lock(&flags);
3221 			if (put_cached_bnode(krcp, bkvhead[i]))
3222 				bkvhead[i] = NULL;
3223 			krc_this_cpu_unlock(krcp, flags);
3224 
3225 			if (bkvhead[i])
3226 				free_page((unsigned long) bkvhead[i]);
3227 
3228 			cond_resched_tasks_rcu_qs();
3229 		}
3230 	}
3231 
3232 	/*
3233 	 * Emergency case only. It can happen under low memory
3234 	 * condition when an allocation gets failed, so the "bulk"
3235 	 * path can not be temporary maintained.
3236 	 */
3237 	for (; head; head = next) {
3238 		unsigned long offset = (unsigned long)head->func;
3239 		void *ptr = (void *)head - offset;
3240 
3241 		next = head->next;
3242 		debug_rcu_head_unqueue((struct rcu_head *)ptr);
3243 		rcu_lock_acquire(&rcu_callback_map);
3244 		trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3245 
3246 		if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3247 			kvfree(ptr);
3248 
3249 		rcu_lock_release(&rcu_callback_map);
3250 		cond_resched_tasks_rcu_qs();
3251 	}
3252 }
3253 
3254 /*
3255  * Schedule the kfree batch RCU work to run in workqueue context after a GP.
3256  *
3257  * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES
3258  * timeout has been reached.
3259  */
queue_kfree_rcu_work(struct kfree_rcu_cpu * krcp)3260 static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
3261 {
3262 	struct kfree_rcu_cpu_work *krwp;
3263 	bool repeat = false;
3264 	int i, j;
3265 
3266 	lockdep_assert_held(&krcp->lock);
3267 
3268 	for (i = 0; i < KFREE_N_BATCHES; i++) {
3269 		krwp = &(krcp->krw_arr[i]);
3270 
3271 		/*
3272 		 * Try to detach bkvhead or head and attach it over any
3273 		 * available corresponding free channel. It can be that
3274 		 * a previous RCU batch is in progress, it means that
3275 		 * immediately to queue another one is not possible so
3276 		 * return false to tell caller to retry.
3277 		 */
3278 		if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3279 			(krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3280 				(krcp->head && !krwp->head_free)) {
3281 			// Channel 1 corresponds to SLAB ptrs.
3282 			// Channel 2 corresponds to vmalloc ptrs.
3283 			for (j = 0; j < FREE_N_CHANNELS; j++) {
3284 				if (!krwp->bkvhead_free[j]) {
3285 					krwp->bkvhead_free[j] = krcp->bkvhead[j];
3286 					krcp->bkvhead[j] = NULL;
3287 				}
3288 			}
3289 
3290 			// Channel 3 corresponds to emergency path.
3291 			if (!krwp->head_free) {
3292 				krwp->head_free = krcp->head;
3293 				krcp->head = NULL;
3294 			}
3295 
3296 			WRITE_ONCE(krcp->count, 0);
3297 
3298 			/*
3299 			 * One work is per one batch, so there are three
3300 			 * "free channels", the batch can handle. It can
3301 			 * be that the work is in the pending state when
3302 			 * channels have been detached following by each
3303 			 * other.
3304 			 */
3305 			queue_rcu_work(system_wq, &krwp->rcu_work);
3306 		}
3307 
3308 		// Repeat if any "free" corresponding channel is still busy.
3309 		if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
3310 			repeat = true;
3311 	}
3312 
3313 	return !repeat;
3314 }
3315 
kfree_rcu_drain_unlock(struct kfree_rcu_cpu * krcp,unsigned long flags)3316 static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
3317 					  unsigned long flags)
3318 {
3319 	// Attempt to start a new batch.
3320 	krcp->monitor_todo = false;
3321 	if (queue_kfree_rcu_work(krcp)) {
3322 		// Success! Our job is done here.
3323 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3324 		return;
3325 	}
3326 
3327 	// Previous RCU batch still in progress, try again later.
3328 	krcp->monitor_todo = true;
3329 	schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3330 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3331 }
3332 
3333 /*
3334  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3335  * It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
3336  */
kfree_rcu_monitor(struct work_struct * work)3337 static void kfree_rcu_monitor(struct work_struct *work)
3338 {
3339 	unsigned long flags;
3340 	struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
3341 						 monitor_work.work);
3342 
3343 	raw_spin_lock_irqsave(&krcp->lock, flags);
3344 	if (krcp->monitor_todo)
3345 		kfree_rcu_drain_unlock(krcp, flags);
3346 	else
3347 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3348 }
3349 
3350 static inline bool
kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu * krcp,void * ptr)3351 kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
3352 {
3353 	struct kvfree_rcu_bulk_data *bnode;
3354 	int idx;
3355 
3356 	if (unlikely(!krcp->initialized))
3357 		return false;
3358 
3359 	lockdep_assert_held(&krcp->lock);
3360 	idx = !!is_vmalloc_addr(ptr);
3361 
3362 	/* Check if a new block is required. */
3363 	if (!krcp->bkvhead[idx] ||
3364 			krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3365 		bnode = get_cached_bnode(krcp);
3366 		if (!bnode) {
3367 			/*
3368 			 * To keep this path working on raw non-preemptible
3369 			 * sections, prevent the optional entry into the
3370 			 * allocator as it uses sleeping locks. In fact, even
3371 			 * if the caller of kfree_rcu() is preemptible, this
3372 			 * path still is not, as krcp->lock is a raw spinlock.
3373 			 * With additional page pre-allocation in the works,
3374 			 * hitting this return is going to be much less likely.
3375 			 */
3376 			if (IS_ENABLED(CONFIG_PREEMPT_RT))
3377 				return false;
3378 
3379 			/*
3380 			 * NOTE: For one argument of kvfree_rcu() we can
3381 			 * drop the lock and get the page in sleepable
3382 			 * context. That would allow to maintain an array
3383 			 * for the CONFIG_PREEMPT_RT as well if no cached
3384 			 * pages are available.
3385 			 */
3386 			bnode = (struct kvfree_rcu_bulk_data *)
3387 				__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
3388 		}
3389 
3390 		/* Switch to emergency path. */
3391 		if (unlikely(!bnode))
3392 			return false;
3393 
3394 		/* Initialize the new block. */
3395 		bnode->nr_records = 0;
3396 		bnode->next = krcp->bkvhead[idx];
3397 
3398 		/* Attach it to the head. */
3399 		krcp->bkvhead[idx] = bnode;
3400 	}
3401 
3402 	/* Finally insert. */
3403 	krcp->bkvhead[idx]->records
3404 		[krcp->bkvhead[idx]->nr_records++] = ptr;
3405 
3406 	return true;
3407 }
3408 
3409 /*
3410  * Queue a request for lazy invocation of appropriate free routine after a
3411  * grace period. Please note there are three paths are maintained, two are the
3412  * main ones that use array of pointers interface and third one is emergency
3413  * one, that is used only when the main path can not be maintained temporary,
3414  * due to memory pressure.
3415  *
3416  * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3417  * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3418  * be free'd in workqueue context. This allows us to: batch requests together to
3419  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3420  */
kvfree_call_rcu(struct rcu_head * head,rcu_callback_t func)3421 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3422 {
3423 	unsigned long flags;
3424 	struct kfree_rcu_cpu *krcp;
3425 	bool success;
3426 	void *ptr;
3427 
3428 	if (head) {
3429 		ptr = (void *) head - (unsigned long) func;
3430 	} else {
3431 		/*
3432 		 * Please note there is a limitation for the head-less
3433 		 * variant, that is why there is a clear rule for such
3434 		 * objects: it can be used from might_sleep() context
3435 		 * only. For other places please embed an rcu_head to
3436 		 * your data.
3437 		 */
3438 		might_sleep();
3439 		ptr = (unsigned long *) func;
3440 	}
3441 
3442 	krcp = krc_this_cpu_lock(&flags);
3443 
3444 	// Queue the object but don't yet schedule the batch.
3445 	if (debug_rcu_head_queue(ptr)) {
3446 		// Probable double kfree_rcu(), just leak.
3447 		WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3448 			  __func__, head);
3449 
3450 		// Mark as success and leave.
3451 		success = true;
3452 		goto unlock_return;
3453 	}
3454 
3455 	/*
3456 	 * Under high memory pressure GFP_NOWAIT can fail,
3457 	 * in that case the emergency path is maintained.
3458 	 */
3459 	success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
3460 	if (!success) {
3461 		if (head == NULL)
3462 			// Inline if kvfree_rcu(one_arg) call.
3463 			goto unlock_return;
3464 
3465 		head->func = func;
3466 		head->next = krcp->head;
3467 		krcp->head = head;
3468 		success = true;
3469 	}
3470 
3471 	WRITE_ONCE(krcp->count, krcp->count + 1);
3472 
3473 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
3474 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3475 	    !krcp->monitor_todo) {
3476 		krcp->monitor_todo = true;
3477 		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3478 	}
3479 
3480 unlock_return:
3481 	krc_this_cpu_unlock(krcp, flags);
3482 
3483 	/*
3484 	 * Inline kvfree() after synchronize_rcu(). We can do
3485 	 * it from might_sleep() context only, so the current
3486 	 * CPU can pass the QS state.
3487 	 */
3488 	if (!success) {
3489 		debug_rcu_head_unqueue((struct rcu_head *) ptr);
3490 		synchronize_rcu();
3491 		kvfree(ptr);
3492 	}
3493 }
3494 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3495 
3496 static unsigned long
kfree_rcu_shrink_count(struct shrinker * shrink,struct shrink_control * sc)3497 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3498 {
3499 	int cpu;
3500 	unsigned long count = 0;
3501 
3502 	/* Snapshot count of all CPUs */
3503 	for_each_possible_cpu(cpu) {
3504 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3505 
3506 		count += READ_ONCE(krcp->count);
3507 	}
3508 
3509 	return count;
3510 }
3511 
3512 static unsigned long
kfree_rcu_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)3513 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3514 {
3515 	int cpu, freed = 0;
3516 	unsigned long flags;
3517 
3518 	for_each_possible_cpu(cpu) {
3519 		int count;
3520 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3521 
3522 		count = krcp->count;
3523 		raw_spin_lock_irqsave(&krcp->lock, flags);
3524 		if (krcp->monitor_todo)
3525 			kfree_rcu_drain_unlock(krcp, flags);
3526 		else
3527 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3528 
3529 		sc->nr_to_scan -= count;
3530 		freed += count;
3531 
3532 		if (sc->nr_to_scan <= 0)
3533 			break;
3534 	}
3535 
3536 	return freed == 0 ? SHRINK_STOP : freed;
3537 }
3538 
3539 static struct shrinker kfree_rcu_shrinker = {
3540 	.count_objects = kfree_rcu_shrink_count,
3541 	.scan_objects = kfree_rcu_shrink_scan,
3542 	.batch = 0,
3543 	.seeks = DEFAULT_SEEKS,
3544 };
3545 
kfree_rcu_scheduler_running(void)3546 void __init kfree_rcu_scheduler_running(void)
3547 {
3548 	int cpu;
3549 	unsigned long flags;
3550 
3551 	for_each_possible_cpu(cpu) {
3552 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3553 
3554 		raw_spin_lock_irqsave(&krcp->lock, flags);
3555 		if (!krcp->head || krcp->monitor_todo) {
3556 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3557 			continue;
3558 		}
3559 		krcp->monitor_todo = true;
3560 		schedule_delayed_work_on(cpu, &krcp->monitor_work,
3561 					 KFREE_DRAIN_JIFFIES);
3562 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3563 	}
3564 }
3565 
3566 /*
3567  * During early boot, any blocking grace-period wait automatically
3568  * implies a grace period.  Later on, this is never the case for PREEMPTION.
3569  *
3570  * Howevr, because a context switch is a grace period for !PREEMPTION, any
3571  * blocking grace-period wait automatically implies a grace period if
3572  * there is only one CPU online at any point time during execution of
3573  * either synchronize_rcu() or synchronize_rcu_expedited().  It is OK to
3574  * occasionally incorrectly indicate that there are multiple CPUs online
3575  * when there was in fact only one the whole time, as this just adds some
3576  * overhead: RCU still operates correctly.
3577  */
rcu_blocking_is_gp(void)3578 static int rcu_blocking_is_gp(void)
3579 {
3580 	int ret;
3581 
3582 	if (IS_ENABLED(CONFIG_PREEMPTION))
3583 		return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3584 	might_sleep();  /* Check for RCU read-side critical section. */
3585 	preempt_disable();
3586 	ret = num_online_cpus() <= 1;
3587 	preempt_enable();
3588 	return ret;
3589 }
3590 
3591 /**
3592  * synchronize_rcu - wait until a grace period has elapsed.
3593  *
3594  * Control will return to the caller some time after a full grace
3595  * period has elapsed, in other words after all currently executing RCU
3596  * read-side critical sections have completed.  Note, however, that
3597  * upon return from synchronize_rcu(), the caller might well be executing
3598  * concurrently with new RCU read-side critical sections that began while
3599  * synchronize_rcu() was waiting.  RCU read-side critical sections are
3600  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
3601  * In addition, regions of code across which interrupts, preemption, or
3602  * softirqs have been disabled also serve as RCU read-side critical
3603  * sections.  This includes hardware interrupt handlers, softirq handlers,
3604  * and NMI handlers.
3605  *
3606  * Note that this guarantee implies further memory-ordering guarantees.
3607  * On systems with more than one CPU, when synchronize_rcu() returns,
3608  * each CPU is guaranteed to have executed a full memory barrier since
3609  * the end of its last RCU read-side critical section whose beginning
3610  * preceded the call to synchronize_rcu().  In addition, each CPU having
3611  * an RCU read-side critical section that extends beyond the return from
3612  * synchronize_rcu() is guaranteed to have executed a full memory barrier
3613  * after the beginning of synchronize_rcu() and before the beginning of
3614  * that RCU read-side critical section.  Note that these guarantees include
3615  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3616  * that are executing in the kernel.
3617  *
3618  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3619  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3620  * to have executed a full memory barrier during the execution of
3621  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3622  * again only if the system has more than one CPU).
3623  */
synchronize_rcu(void)3624 void synchronize_rcu(void)
3625 {
3626 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3627 			 lock_is_held(&rcu_lock_map) ||
3628 			 lock_is_held(&rcu_sched_lock_map),
3629 			 "Illegal synchronize_rcu() in RCU read-side critical section");
3630 	if (rcu_blocking_is_gp())
3631 		return;
3632 	if (rcu_gp_is_expedited())
3633 		synchronize_rcu_expedited();
3634 	else
3635 		wait_rcu_gp(call_rcu);
3636 }
3637 EXPORT_SYMBOL_GPL(synchronize_rcu);
3638 
3639 /**
3640  * get_state_synchronize_rcu - Snapshot current RCU state
3641  *
3642  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3643  * to determine whether or not a full grace period has elapsed in the
3644  * meantime.
3645  */
get_state_synchronize_rcu(void)3646 unsigned long get_state_synchronize_rcu(void)
3647 {
3648 	/*
3649 	 * Any prior manipulation of RCU-protected data must happen
3650 	 * before the load from ->gp_seq.
3651 	 */
3652 	smp_mb();  /* ^^^ */
3653 	return rcu_seq_snap(&rcu_state.gp_seq);
3654 }
3655 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3656 
3657 /**
3658  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3659  *
3660  * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3661  *
3662  * If a full RCU grace period has elapsed since the earlier call to
3663  * get_state_synchronize_rcu(), just return.  Otherwise, invoke
3664  * synchronize_rcu() to wait for a full grace period.
3665  *
3666  * Yes, this function does not take counter wrap into account.  But
3667  * counter wrap is harmless.  If the counter wraps, we have waited for
3668  * more than 2 billion grace periods (and way more on a 64-bit system!),
3669  * so waiting for one additional grace period should be just fine.
3670  */
cond_synchronize_rcu(unsigned long oldstate)3671 void cond_synchronize_rcu(unsigned long oldstate)
3672 {
3673 	if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
3674 		synchronize_rcu();
3675 	else
3676 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3677 }
3678 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3679 
3680 /*
3681  * Check to see if there is any immediate RCU-related work to be done by
3682  * the current CPU, returning 1 if so and zero otherwise.  The checks are
3683  * in order of increasing expense: checks that can be carried out against
3684  * CPU-local state are performed first.  However, we must check for CPU
3685  * stalls first, else we might not get a chance.
3686  */
rcu_pending(int user)3687 static int rcu_pending(int user)
3688 {
3689 	bool gp_in_progress;
3690 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3691 	struct rcu_node *rnp = rdp->mynode;
3692 
3693 	/* Check for CPU stalls, if enabled. */
3694 	check_cpu_stall(rdp);
3695 
3696 	/* Does this CPU need a deferred NOCB wakeup? */
3697 	if (rcu_nocb_need_deferred_wakeup(rdp))
3698 		return 1;
3699 
3700 	/* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3701 	if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3702 		return 0;
3703 
3704 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3705 	gp_in_progress = rcu_gp_in_progress();
3706 	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3707 		return 1;
3708 
3709 	/* Does this CPU have callbacks ready to invoke? */
3710 	if (rcu_segcblist_ready_cbs(&rdp->cblist))
3711 		return 1;
3712 
3713 	/* Has RCU gone idle with this CPU needing another grace period? */
3714 	if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3715 	    (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) ||
3716 	     !rcu_segcblist_is_offloaded(&rdp->cblist)) &&
3717 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3718 		return 1;
3719 
3720 	/* Have RCU grace period completed or started?  */
3721 	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3722 	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3723 		return 1;
3724 
3725 	/* nothing to do */
3726 	return 0;
3727 }
3728 
3729 /*
3730  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3731  * the compiler is expected to optimize this away.
3732  */
rcu_barrier_trace(const char * s,int cpu,unsigned long done)3733 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3734 {
3735 	trace_rcu_barrier(rcu_state.name, s, cpu,
3736 			  atomic_read(&rcu_state.barrier_cpu_count), done);
3737 }
3738 
3739 /*
3740  * RCU callback function for rcu_barrier().  If we are last, wake
3741  * up the task executing rcu_barrier().
3742  *
3743  * Note that the value of rcu_state.barrier_sequence must be captured
3744  * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3745  * other CPUs might count the value down to zero before this CPU gets
3746  * around to invoking rcu_barrier_trace(), which might result in bogus
3747  * data from the next instance of rcu_barrier().
3748  */
rcu_barrier_callback(struct rcu_head * rhp)3749 static void rcu_barrier_callback(struct rcu_head *rhp)
3750 {
3751 	unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3752 
3753 	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3754 		rcu_barrier_trace(TPS("LastCB"), -1, s);
3755 		complete(&rcu_state.barrier_completion);
3756 	} else {
3757 		rcu_barrier_trace(TPS("CB"), -1, s);
3758 	}
3759 }
3760 
3761 /*
3762  * Called with preemption disabled, and from cross-cpu IRQ context.
3763  */
rcu_barrier_func(void * cpu_in)3764 static void rcu_barrier_func(void *cpu_in)
3765 {
3766 	uintptr_t cpu = (uintptr_t)cpu_in;
3767 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3768 
3769 	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3770 	rdp->barrier_head.func = rcu_barrier_callback;
3771 	debug_rcu_head_queue(&rdp->barrier_head);
3772 	rcu_nocb_lock(rdp);
3773 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
3774 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3775 		atomic_inc(&rcu_state.barrier_cpu_count);
3776 	} else {
3777 		debug_rcu_head_unqueue(&rdp->barrier_head);
3778 		rcu_barrier_trace(TPS("IRQNQ"), -1,
3779 				  rcu_state.barrier_sequence);
3780 	}
3781 	rcu_nocb_unlock(rdp);
3782 }
3783 
3784 /**
3785  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3786  *
3787  * Note that this primitive does not necessarily wait for an RCU grace period
3788  * to complete.  For example, if there are no RCU callbacks queued anywhere
3789  * in the system, then rcu_barrier() is within its rights to return
3790  * immediately, without waiting for anything, much less an RCU grace period.
3791  */
rcu_barrier(void)3792 void rcu_barrier(void)
3793 {
3794 	uintptr_t cpu;
3795 	struct rcu_data *rdp;
3796 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3797 
3798 	rcu_barrier_trace(TPS("Begin"), -1, s);
3799 
3800 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3801 	mutex_lock(&rcu_state.barrier_mutex);
3802 
3803 	/* Did someone else do our work for us? */
3804 	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3805 		rcu_barrier_trace(TPS("EarlyExit"), -1,
3806 				  rcu_state.barrier_sequence);
3807 		smp_mb(); /* caller's subsequent code after above check. */
3808 		mutex_unlock(&rcu_state.barrier_mutex);
3809 		return;
3810 	}
3811 
3812 	/* Mark the start of the barrier operation. */
3813 	rcu_seq_start(&rcu_state.barrier_sequence);
3814 	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3815 
3816 	/*
3817 	 * Initialize the count to two rather than to zero in order
3818 	 * to avoid a too-soon return to zero in case of an immediate
3819 	 * invocation of the just-enqueued callback (or preemption of
3820 	 * this task).  Exclude CPU-hotplug operations to ensure that no
3821 	 * offline non-offloaded CPU has callbacks queued.
3822 	 */
3823 	init_completion(&rcu_state.barrier_completion);
3824 	atomic_set(&rcu_state.barrier_cpu_count, 2);
3825 	get_online_cpus();
3826 
3827 	/*
3828 	 * Force each CPU with callbacks to register a new callback.
3829 	 * When that callback is invoked, we will know that all of the
3830 	 * corresponding CPU's preceding callbacks have been invoked.
3831 	 */
3832 	for_each_possible_cpu(cpu) {
3833 		rdp = per_cpu_ptr(&rcu_data, cpu);
3834 		if (cpu_is_offline(cpu) &&
3835 		    !rcu_segcblist_is_offloaded(&rdp->cblist))
3836 			continue;
3837 		if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
3838 			rcu_barrier_trace(TPS("OnlineQ"), cpu,
3839 					  rcu_state.barrier_sequence);
3840 			smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
3841 		} else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
3842 			   cpu_is_offline(cpu)) {
3843 			rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
3844 					  rcu_state.barrier_sequence);
3845 			local_irq_disable();
3846 			rcu_barrier_func((void *)cpu);
3847 			local_irq_enable();
3848 		} else if (cpu_is_offline(cpu)) {
3849 			rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
3850 					  rcu_state.barrier_sequence);
3851 		} else {
3852 			rcu_barrier_trace(TPS("OnlineNQ"), cpu,
3853 					  rcu_state.barrier_sequence);
3854 		}
3855 	}
3856 	put_online_cpus();
3857 
3858 	/*
3859 	 * Now that we have an rcu_barrier_callback() callback on each
3860 	 * CPU, and thus each counted, remove the initial count.
3861 	 */
3862 	if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
3863 		complete(&rcu_state.barrier_completion);
3864 
3865 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3866 	wait_for_completion(&rcu_state.barrier_completion);
3867 
3868 	/* Mark the end of the barrier operation. */
3869 	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3870 	rcu_seq_end(&rcu_state.barrier_sequence);
3871 
3872 	/* Other rcu_barrier() invocations can now safely proceed. */
3873 	mutex_unlock(&rcu_state.barrier_mutex);
3874 }
3875 EXPORT_SYMBOL_GPL(rcu_barrier);
3876 
3877 /*
3878  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
3879  * first CPU in a given leaf rcu_node structure coming online.  The caller
3880  * must hold the corresponding leaf rcu_node ->lock with interrrupts
3881  * disabled.
3882  */
rcu_init_new_rnp(struct rcu_node * rnp_leaf)3883 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3884 {
3885 	long mask;
3886 	long oldmask;
3887 	struct rcu_node *rnp = rnp_leaf;
3888 
3889 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
3890 	WARN_ON_ONCE(rnp->wait_blkd_tasks);
3891 	for (;;) {
3892 		mask = rnp->grpmask;
3893 		rnp = rnp->parent;
3894 		if (rnp == NULL)
3895 			return;
3896 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
3897 		oldmask = rnp->qsmaskinit;
3898 		rnp->qsmaskinit |= mask;
3899 		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
3900 		if (oldmask)
3901 			return;
3902 	}
3903 }
3904 
3905 /*
3906  * Do boot-time initialization of a CPU's per-CPU RCU data.
3907  */
3908 static void __init
rcu_boot_init_percpu_data(int cpu)3909 rcu_boot_init_percpu_data(int cpu)
3910 {
3911 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3912 
3913 	/* Set up local state, ensuring consistent view of global state. */
3914 	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3915 	INIT_WORK(&rdp->strict_work, strict_work_handler);
3916 	WARN_ON_ONCE(rdp->dynticks_nesting != 1);
3917 	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
3918 	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
3919 	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
3920 	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
3921 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
3922 	rdp->cpu = cpu;
3923 	rcu_boot_init_nocb_percpu_data(rdp);
3924 }
3925 
3926 /*
3927  * Invoked early in the CPU-online process, when pretty much all services
3928  * are available.  The incoming CPU is not present.
3929  *
3930  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
3931  * offline event can be happening at a given time.  Note also that we can
3932  * accept some slop in the rsp->gp_seq access due to the fact that this
3933  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
3934  * And any offloaded callbacks are being numbered elsewhere.
3935  */
rcutree_prepare_cpu(unsigned int cpu)3936 int rcutree_prepare_cpu(unsigned int cpu)
3937 {
3938 	unsigned long flags;
3939 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3940 	struct rcu_node *rnp = rcu_get_root();
3941 
3942 	/* Set up local state, ensuring consistent view of global state. */
3943 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
3944 	rdp->qlen_last_fqs_check = 0;
3945 	rdp->n_force_qs_snap = rcu_state.n_force_qs;
3946 	rdp->blimit = blimit;
3947 	if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
3948 	    !rcu_segcblist_is_offloaded(&rdp->cblist))
3949 		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
3950 	rdp->dynticks_nesting = 1;	/* CPU not up, no tearing. */
3951 	rcu_dynticks_eqs_online();
3952 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
3953 
3954 	/*
3955 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
3956 	 * propagation up the rcu_node tree will happen at the beginning
3957 	 * of the next grace period.
3958 	 */
3959 	rnp = rdp->mynode;
3960 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
3961 	rdp->beenonline = true;	 /* We have now been online. */
3962 	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
3963 	rdp->gp_seq_needed = rdp->gp_seq;
3964 	rdp->cpu_no_qs.b.norm = true;
3965 	rdp->core_needs_qs = false;
3966 	rdp->rcu_iw_pending = false;
3967 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
3968 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
3969 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3970 	rcu_prepare_kthreads(cpu);
3971 	rcu_spawn_cpu_nocb_kthread(cpu);
3972 
3973 	return 0;
3974 }
3975 
3976 /*
3977  * Update RCU priority boot kthread affinity for CPU-hotplug changes.
3978  */
rcutree_affinity_setting(unsigned int cpu,int outgoing)3979 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
3980 {
3981 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3982 
3983 	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
3984 }
3985 
3986 /*
3987  * Near the end of the CPU-online process.  Pretty much all services
3988  * enabled, and the CPU is now very much alive.
3989  */
rcutree_online_cpu(unsigned int cpu)3990 int rcutree_online_cpu(unsigned int cpu)
3991 {
3992 	unsigned long flags;
3993 	struct rcu_data *rdp;
3994 	struct rcu_node *rnp;
3995 
3996 	rdp = per_cpu_ptr(&rcu_data, cpu);
3997 	rnp = rdp->mynode;
3998 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
3999 	rnp->ffmask |= rdp->grpmask;
4000 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4001 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4002 		return 0; /* Too early in boot for scheduler work. */
4003 	sync_sched_exp_online_cleanup(cpu);
4004 	rcutree_affinity_setting(cpu, -1);
4005 
4006 	// Stop-machine done, so allow nohz_full to disable tick.
4007 	tick_dep_clear(TICK_DEP_BIT_RCU);
4008 	return 0;
4009 }
4010 
4011 /*
4012  * Near the beginning of the process.  The CPU is still very much alive
4013  * with pretty much all services enabled.
4014  */
rcutree_offline_cpu(unsigned int cpu)4015 int rcutree_offline_cpu(unsigned int cpu)
4016 {
4017 	unsigned long flags;
4018 	struct rcu_data *rdp;
4019 	struct rcu_node *rnp;
4020 
4021 	rdp = per_cpu_ptr(&rcu_data, cpu);
4022 	rnp = rdp->mynode;
4023 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4024 	rnp->ffmask &= ~rdp->grpmask;
4025 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4026 
4027 	rcutree_affinity_setting(cpu, cpu);
4028 
4029 	// nohz_full CPUs need the tick for stop-machine to work quickly
4030 	tick_dep_set(TICK_DEP_BIT_RCU);
4031 	return 0;
4032 }
4033 
4034 /*
4035  * Mark the specified CPU as being online so that subsequent grace periods
4036  * (both expedited and normal) will wait on it.  Note that this means that
4037  * incoming CPUs are not allowed to use RCU read-side critical sections
4038  * until this function is called.  Failing to observe this restriction
4039  * will result in lockdep splats.
4040  *
4041  * Note that this function is special in that it is invoked directly
4042  * from the incoming CPU rather than from the cpuhp_step mechanism.
4043  * This is because this function must be invoked at a precise location.
4044  */
rcu_cpu_starting(unsigned int cpu)4045 void rcu_cpu_starting(unsigned int cpu)
4046 {
4047 	unsigned long flags;
4048 	unsigned long mask;
4049 	struct rcu_data *rdp;
4050 	struct rcu_node *rnp;
4051 	bool newcpu;
4052 
4053 	rdp = per_cpu_ptr(&rcu_data, cpu);
4054 	if (rdp->cpu_started)
4055 		return;
4056 	rdp->cpu_started = true;
4057 
4058 	rnp = rdp->mynode;
4059 	mask = rdp->grpmask;
4060 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4061 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4062 	newcpu = !(rnp->expmaskinitnext & mask);
4063 	rnp->expmaskinitnext |= mask;
4064 	/* Allow lockless access for expedited grace periods. */
4065 	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4066 	ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4067 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4068 	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4069 	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4070 	if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
4071 		rcu_disable_urgency_upon_qs(rdp);
4072 		/* Report QS -after- changing ->qsmaskinitnext! */
4073 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4074 	} else {
4075 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4076 	}
4077 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4078 }
4079 
4080 /*
4081  * The outgoing function has no further need of RCU, so remove it from
4082  * the rcu_node tree's ->qsmaskinitnext bit masks.
4083  *
4084  * Note that this function is special in that it is invoked directly
4085  * from the outgoing CPU rather than from the cpuhp_step mechanism.
4086  * This is because this function must be invoked at a precise location.
4087  */
rcu_report_dead(unsigned int cpu)4088 void rcu_report_dead(unsigned int cpu)
4089 {
4090 	unsigned long flags;
4091 	unsigned long mask;
4092 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4093 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4094 
4095 	/* QS for any half-done expedited grace period. */
4096 	preempt_disable();
4097 	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
4098 	preempt_enable();
4099 	rcu_preempt_deferred_qs(current);
4100 
4101 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4102 	mask = rdp->grpmask;
4103 	raw_spin_lock(&rcu_state.ofl_lock);
4104 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4105 	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4106 	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4107 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4108 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
4109 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4110 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
4111 	}
4112 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4113 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4114 	raw_spin_unlock(&rcu_state.ofl_lock);
4115 
4116 	rdp->cpu_started = false;
4117 }
4118 
4119 #ifdef CONFIG_HOTPLUG_CPU
4120 /*
4121  * The outgoing CPU has just passed through the dying-idle state, and we
4122  * are being invoked from the CPU that was IPIed to continue the offline
4123  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4124  */
rcutree_migrate_callbacks(int cpu)4125 void rcutree_migrate_callbacks(int cpu)
4126 {
4127 	unsigned long flags;
4128 	struct rcu_data *my_rdp;
4129 	struct rcu_node *my_rnp;
4130 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4131 	bool needwake;
4132 
4133 	if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
4134 	    rcu_segcblist_empty(&rdp->cblist))
4135 		return;  /* No callbacks to migrate. */
4136 
4137 	local_irq_save(flags);
4138 	my_rdp = this_cpu_ptr(&rcu_data);
4139 	my_rnp = my_rdp->mynode;
4140 	rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4141 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
4142 	raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4143 	/* Leverage recent GPs and set GP for new callbacks. */
4144 	needwake = rcu_advance_cbs(my_rnp, rdp) ||
4145 		   rcu_advance_cbs(my_rnp, my_rdp);
4146 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4147 	needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4148 	rcu_segcblist_disable(&rdp->cblist);
4149 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
4150 		     !rcu_segcblist_n_cbs(&my_rdp->cblist));
4151 	if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) {
4152 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4153 		__call_rcu_nocb_wake(my_rdp, true, flags);
4154 	} else {
4155 		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4156 		raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4157 	}
4158 	if (needwake)
4159 		rcu_gp_kthread_wake();
4160 	lockdep_assert_irqs_enabled();
4161 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4162 		  !rcu_segcblist_empty(&rdp->cblist),
4163 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4164 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4165 		  rcu_segcblist_first_cb(&rdp->cblist));
4166 }
4167 #endif
4168 
4169 /*
4170  * On non-huge systems, use expedited RCU grace periods to make suspend
4171  * and hibernation run faster.
4172  */
rcu_pm_notify(struct notifier_block * self,unsigned long action,void * hcpu)4173 static int rcu_pm_notify(struct notifier_block *self,
4174 			 unsigned long action, void *hcpu)
4175 {
4176 	switch (action) {
4177 	case PM_HIBERNATION_PREPARE:
4178 	case PM_SUSPEND_PREPARE:
4179 		rcu_expedite_gp();
4180 		break;
4181 	case PM_POST_HIBERNATION:
4182 	case PM_POST_SUSPEND:
4183 		rcu_unexpedite_gp();
4184 		break;
4185 	default:
4186 		break;
4187 	}
4188 	return NOTIFY_OK;
4189 }
4190 
4191 /*
4192  * Spawn the kthreads that handle RCU's grace periods.
4193  */
rcu_spawn_gp_kthread(void)4194 static int __init rcu_spawn_gp_kthread(void)
4195 {
4196 	unsigned long flags;
4197 	int kthread_prio_in = kthread_prio;
4198 	struct rcu_node *rnp;
4199 	struct sched_param sp;
4200 	struct task_struct *t;
4201 
4202 	/* Force priority into range. */
4203 	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4204 	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4205 		kthread_prio = 2;
4206 	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4207 		kthread_prio = 1;
4208 	else if (kthread_prio < 0)
4209 		kthread_prio = 0;
4210 	else if (kthread_prio > 99)
4211 		kthread_prio = 99;
4212 
4213 	if (kthread_prio != kthread_prio_in)
4214 		pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
4215 			 kthread_prio, kthread_prio_in);
4216 
4217 	rcu_scheduler_fully_active = 1;
4218 	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4219 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4220 		return 0;
4221 	if (kthread_prio) {
4222 		sp.sched_priority = kthread_prio;
4223 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4224 	}
4225 	rnp = rcu_get_root();
4226 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4227 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
4228 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4229 	// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4230 	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4231 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4232 	wake_up_process(t);
4233 	rcu_spawn_nocb_kthreads();
4234 	rcu_spawn_boost_kthreads();
4235 	return 0;
4236 }
4237 early_initcall(rcu_spawn_gp_kthread);
4238 
4239 /*
4240  * This function is invoked towards the end of the scheduler's
4241  * initialization process.  Before this is called, the idle task might
4242  * contain synchronous grace-period primitives (during which time, this idle
4243  * task is booting the system, and such primitives are no-ops).  After this
4244  * function is called, any synchronous grace-period primitives are run as
4245  * expedited, with the requesting task driving the grace period forward.
4246  * A later core_initcall() rcu_set_runtime_mode() will switch to full
4247  * runtime RCU functionality.
4248  */
rcu_scheduler_starting(void)4249 void rcu_scheduler_starting(void)
4250 {
4251 	WARN_ON(num_online_cpus() != 1);
4252 	WARN_ON(nr_context_switches() > 0);
4253 	rcu_test_sync_prims();
4254 	rcu_scheduler_active = RCU_SCHEDULER_INIT;
4255 	rcu_test_sync_prims();
4256 }
4257 
4258 /*
4259  * Helper function for rcu_init() that initializes the rcu_state structure.
4260  */
rcu_init_one(void)4261 static void __init rcu_init_one(void)
4262 {
4263 	static const char * const buf[] = RCU_NODE_NAME_INIT;
4264 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
4265 	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4266 	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4267 
4268 	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
4269 	int cpustride = 1;
4270 	int i;
4271 	int j;
4272 	struct rcu_node *rnp;
4273 
4274 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4275 
4276 	/* Silence gcc 4.8 false positive about array index out of range. */
4277 	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4278 		panic("rcu_init_one: rcu_num_lvls out of range");
4279 
4280 	/* Initialize the level-tracking arrays. */
4281 
4282 	for (i = 1; i < rcu_num_lvls; i++)
4283 		rcu_state.level[i] =
4284 			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4285 	rcu_init_levelspread(levelspread, num_rcu_lvl);
4286 
4287 	/* Initialize the elements themselves, starting from the leaves. */
4288 
4289 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
4290 		cpustride *= levelspread[i];
4291 		rnp = rcu_state.level[i];
4292 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4293 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4294 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4295 						   &rcu_node_class[i], buf[i]);
4296 			raw_spin_lock_init(&rnp->fqslock);
4297 			lockdep_set_class_and_name(&rnp->fqslock,
4298 						   &rcu_fqs_class[i], fqs[i]);
4299 			rnp->gp_seq = rcu_state.gp_seq;
4300 			rnp->gp_seq_needed = rcu_state.gp_seq;
4301 			rnp->completedqs = rcu_state.gp_seq;
4302 			rnp->qsmask = 0;
4303 			rnp->qsmaskinit = 0;
4304 			rnp->grplo = j * cpustride;
4305 			rnp->grphi = (j + 1) * cpustride - 1;
4306 			if (rnp->grphi >= nr_cpu_ids)
4307 				rnp->grphi = nr_cpu_ids - 1;
4308 			if (i == 0) {
4309 				rnp->grpnum = 0;
4310 				rnp->grpmask = 0;
4311 				rnp->parent = NULL;
4312 			} else {
4313 				rnp->grpnum = j % levelspread[i - 1];
4314 				rnp->grpmask = BIT(rnp->grpnum);
4315 				rnp->parent = rcu_state.level[i - 1] +
4316 					      j / levelspread[i - 1];
4317 			}
4318 			rnp->level = i;
4319 			INIT_LIST_HEAD(&rnp->blkd_tasks);
4320 			rcu_init_one_nocb(rnp);
4321 			init_waitqueue_head(&rnp->exp_wq[0]);
4322 			init_waitqueue_head(&rnp->exp_wq[1]);
4323 			init_waitqueue_head(&rnp->exp_wq[2]);
4324 			init_waitqueue_head(&rnp->exp_wq[3]);
4325 			spin_lock_init(&rnp->exp_lock);
4326 		}
4327 	}
4328 
4329 	init_swait_queue_head(&rcu_state.gp_wq);
4330 	init_swait_queue_head(&rcu_state.expedited_wq);
4331 	rnp = rcu_first_leaf_node();
4332 	for_each_possible_cpu(i) {
4333 		while (i > rnp->grphi)
4334 			rnp++;
4335 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4336 		rcu_boot_init_percpu_data(i);
4337 	}
4338 }
4339 
4340 /*
4341  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4342  * replace the definitions in tree.h because those are needed to size
4343  * the ->node array in the rcu_state structure.
4344  */
rcu_init_geometry(void)4345 static void __init rcu_init_geometry(void)
4346 {
4347 	ulong d;
4348 	int i;
4349 	int rcu_capacity[RCU_NUM_LVLS];
4350 
4351 	/*
4352 	 * Initialize any unspecified boot parameters.
4353 	 * The default values of jiffies_till_first_fqs and
4354 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4355 	 * value, which is a function of HZ, then adding one for each
4356 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4357 	 */
4358 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4359 	if (jiffies_till_first_fqs == ULONG_MAX)
4360 		jiffies_till_first_fqs = d;
4361 	if (jiffies_till_next_fqs == ULONG_MAX)
4362 		jiffies_till_next_fqs = d;
4363 	adjust_jiffies_till_sched_qs();
4364 
4365 	/* If the compile-time values are accurate, just leave. */
4366 	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4367 	    nr_cpu_ids == NR_CPUS)
4368 		return;
4369 	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4370 		rcu_fanout_leaf, nr_cpu_ids);
4371 
4372 	/*
4373 	 * The boot-time rcu_fanout_leaf parameter must be at least two
4374 	 * and cannot exceed the number of bits in the rcu_node masks.
4375 	 * Complain and fall back to the compile-time values if this
4376 	 * limit is exceeded.
4377 	 */
4378 	if (rcu_fanout_leaf < 2 ||
4379 	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4380 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4381 		WARN_ON(1);
4382 		return;
4383 	}
4384 
4385 	/*
4386 	 * Compute number of nodes that can be handled an rcu_node tree
4387 	 * with the given number of levels.
4388 	 */
4389 	rcu_capacity[0] = rcu_fanout_leaf;
4390 	for (i = 1; i < RCU_NUM_LVLS; i++)
4391 		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4392 
4393 	/*
4394 	 * The tree must be able to accommodate the configured number of CPUs.
4395 	 * If this limit is exceeded, fall back to the compile-time values.
4396 	 */
4397 	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4398 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4399 		WARN_ON(1);
4400 		return;
4401 	}
4402 
4403 	/* Calculate the number of levels in the tree. */
4404 	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4405 	}
4406 	rcu_num_lvls = i + 1;
4407 
4408 	/* Calculate the number of rcu_nodes at each level of the tree. */
4409 	for (i = 0; i < rcu_num_lvls; i++) {
4410 		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4411 		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4412 	}
4413 
4414 	/* Calculate the total number of rcu_node structures. */
4415 	rcu_num_nodes = 0;
4416 	for (i = 0; i < rcu_num_lvls; i++)
4417 		rcu_num_nodes += num_rcu_lvl[i];
4418 }
4419 
4420 /*
4421  * Dump out the structure of the rcu_node combining tree associated
4422  * with the rcu_state structure.
4423  */
rcu_dump_rcu_node_tree(void)4424 static void __init rcu_dump_rcu_node_tree(void)
4425 {
4426 	int level = 0;
4427 	struct rcu_node *rnp;
4428 
4429 	pr_info("rcu_node tree layout dump\n");
4430 	pr_info(" ");
4431 	rcu_for_each_node_breadth_first(rnp) {
4432 		if (rnp->level != level) {
4433 			pr_cont("\n");
4434 			pr_info(" ");
4435 			level = rnp->level;
4436 		}
4437 		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4438 	}
4439 	pr_cont("\n");
4440 }
4441 
4442 struct workqueue_struct *rcu_gp_wq;
4443 struct workqueue_struct *rcu_par_gp_wq;
4444 
kfree_rcu_batch_init(void)4445 static void __init kfree_rcu_batch_init(void)
4446 {
4447 	int cpu;
4448 	int i;
4449 
4450 	for_each_possible_cpu(cpu) {
4451 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4452 		struct kvfree_rcu_bulk_data *bnode;
4453 
4454 		for (i = 0; i < KFREE_N_BATCHES; i++) {
4455 			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4456 			krcp->krw_arr[i].krcp = krcp;
4457 		}
4458 
4459 		for (i = 0; i < rcu_min_cached_objs; i++) {
4460 			bnode = (struct kvfree_rcu_bulk_data *)
4461 				__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
4462 
4463 			if (bnode)
4464 				put_cached_bnode(krcp, bnode);
4465 			else
4466 				pr_err("Failed to preallocate for %d CPU!\n", cpu);
4467 		}
4468 
4469 		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4470 		krcp->initialized = true;
4471 	}
4472 	if (register_shrinker(&kfree_rcu_shrinker))
4473 		pr_err("Failed to register kfree_rcu() shrinker!\n");
4474 }
4475 
rcu_init(void)4476 void __init rcu_init(void)
4477 {
4478 	int cpu;
4479 
4480 	rcu_early_boot_tests();
4481 
4482 	kfree_rcu_batch_init();
4483 	rcu_bootup_announce();
4484 	rcu_init_geometry();
4485 	rcu_init_one();
4486 	if (dump_tree)
4487 		rcu_dump_rcu_node_tree();
4488 	if (use_softirq)
4489 		open_softirq(RCU_SOFTIRQ, rcu_core_si);
4490 
4491 	/*
4492 	 * We don't need protection against CPU-hotplug here because
4493 	 * this is called early in boot, before either interrupts
4494 	 * or the scheduler are operational.
4495 	 */
4496 	pm_notifier(rcu_pm_notify, 0);
4497 	for_each_online_cpu(cpu) {
4498 		rcutree_prepare_cpu(cpu);
4499 		rcu_cpu_starting(cpu);
4500 		rcutree_online_cpu(cpu);
4501 	}
4502 
4503 	/* Create workqueue for expedited GPs and for Tree SRCU. */
4504 	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4505 	WARN_ON(!rcu_gp_wq);
4506 	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4507 	WARN_ON(!rcu_par_gp_wq);
4508 	srcu_init();
4509 
4510 	/* Fill in default value for rcutree.qovld boot parameter. */
4511 	/* -After- the rcu_node ->lock fields are initialized! */
4512 	if (qovld < 0)
4513 		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4514 	else
4515 		qovld_calc = qovld;
4516 }
4517 
4518 #include "tree_stall.h"
4519 #include "tree_exp.h"
4520 #include "tree_plugin.h"
4521