1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 *
5 * Copyright (C) IBM Corporation, 2006
6 * Copyright (C) Fujitsu, 2012
7 *
8 * Authors: Paul McKenney <paulmck@linux.ibm.com>
9 * Lai Jiangshan <laijs@cn.fujitsu.com>
10 *
11 * For detailed explanation of Read-Copy Update mechanism see -
12 * Documentation/RCU/ *.txt
13 *
14 */
15
16 #define pr_fmt(fmt) "rcu: " fmt
17
18 #include <linux/export.h>
19 #include <linux/mutex.h>
20 #include <linux/percpu.h>
21 #include <linux/preempt.h>
22 #include <linux/rcupdate_wait.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/srcu.h>
29
30 #include "rcu.h"
31 #include "rcu_segcblist.h"
32
33 /* Holdoff in nanoseconds for auto-expediting. */
34 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
35 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
36 module_param(exp_holdoff, ulong, 0444);
37
38 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
39 static ulong counter_wrap_check = (ULONG_MAX >> 2);
40 module_param(counter_wrap_check, ulong, 0444);
41
42 /*
43 * Control conversion to SRCU_SIZE_BIG:
44 * 0: Don't convert at all.
45 * 1: Convert at init_srcu_struct() time.
46 * 2: Convert when rcutorture invokes srcu_torture_stats_print().
47 * 3: Decide at boot time based on system shape (default).
48 * 0x1x: Convert when excessive contention encountered.
49 */
50 #define SRCU_SIZING_NONE 0
51 #define SRCU_SIZING_INIT 1
52 #define SRCU_SIZING_TORTURE 2
53 #define SRCU_SIZING_AUTO 3
54 #define SRCU_SIZING_CONTEND 0x10
55 #define SRCU_SIZING_IS(x) ((convert_to_big & ~SRCU_SIZING_CONTEND) == x)
56 #define SRCU_SIZING_IS_NONE() (SRCU_SIZING_IS(SRCU_SIZING_NONE))
57 #define SRCU_SIZING_IS_INIT() (SRCU_SIZING_IS(SRCU_SIZING_INIT))
58 #define SRCU_SIZING_IS_TORTURE() (SRCU_SIZING_IS(SRCU_SIZING_TORTURE))
59 #define SRCU_SIZING_IS_CONTEND() (convert_to_big & SRCU_SIZING_CONTEND)
60 static int convert_to_big = SRCU_SIZING_AUTO;
61 module_param(convert_to_big, int, 0444);
62
63 /* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
64 static int big_cpu_lim __read_mostly = 128;
65 module_param(big_cpu_lim, int, 0444);
66
67 /* Contention events per jiffy to initiate transition to big. */
68 static int small_contention_lim __read_mostly = 100;
69 module_param(small_contention_lim, int, 0444);
70
71 /* Early-boot callback-management, so early that no lock is required! */
72 static LIST_HEAD(srcu_boot_list);
73 static bool __read_mostly srcu_init_done;
74
75 static void srcu_invoke_callbacks(struct work_struct *work);
76 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
77 static void process_srcu(struct work_struct *work);
78 static void srcu_delay_timer(struct timer_list *t);
79
80 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
81 #define spin_lock_rcu_node(p) \
82 do { \
83 spin_lock(&ACCESS_PRIVATE(p, lock)); \
84 smp_mb__after_unlock_lock(); \
85 } while (0)
86
87 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
88
89 #define spin_lock_irq_rcu_node(p) \
90 do { \
91 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
92 smp_mb__after_unlock_lock(); \
93 } while (0)
94
95 #define spin_unlock_irq_rcu_node(p) \
96 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
97
98 #define spin_lock_irqsave_rcu_node(p, flags) \
99 do { \
100 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
101 smp_mb__after_unlock_lock(); \
102 } while (0)
103
104 #define spin_trylock_irqsave_rcu_node(p, flags) \
105 ({ \
106 bool ___locked = spin_trylock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
107 \
108 if (___locked) \
109 smp_mb__after_unlock_lock(); \
110 ___locked; \
111 })
112
113 #define spin_unlock_irqrestore_rcu_node(p, flags) \
114 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
115
116 /*
117 * Initialize SRCU per-CPU data. Note that statically allocated
118 * srcu_struct structures might already have srcu_read_lock() and
119 * srcu_read_unlock() running against them. So if the is_static parameter
120 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
121 */
init_srcu_struct_data(struct srcu_struct * ssp)122 static void init_srcu_struct_data(struct srcu_struct *ssp)
123 {
124 int cpu;
125 struct srcu_data *sdp;
126
127 /*
128 * Initialize the per-CPU srcu_data array, which feeds into the
129 * leaves of the srcu_node tree.
130 */
131 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
132 ARRAY_SIZE(sdp->srcu_unlock_count));
133 for_each_possible_cpu(cpu) {
134 sdp = per_cpu_ptr(ssp->sda, cpu);
135 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
136 rcu_segcblist_init(&sdp->srcu_cblist);
137 sdp->srcu_cblist_invoking = false;
138 sdp->srcu_gp_seq_needed = ssp->srcu_sup->srcu_gp_seq;
139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_sup->srcu_gp_seq;
140 sdp->mynode = NULL;
141 sdp->cpu = cpu;
142 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
143 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
144 sdp->ssp = ssp;
145 }
146 }
147
148 /* Invalid seq state, used during snp node initialization */
149 #define SRCU_SNP_INIT_SEQ 0x2
150
151 /*
152 * Check whether sequence number corresponding to snp node,
153 * is invalid.
154 */
srcu_invl_snp_seq(unsigned long s)155 static inline bool srcu_invl_snp_seq(unsigned long s)
156 {
157 return s == SRCU_SNP_INIT_SEQ;
158 }
159
160 /*
161 * Allocated and initialize SRCU combining tree. Returns @true if
162 * allocation succeeded and @false otherwise.
163 */
init_srcu_struct_nodes(struct srcu_struct * ssp,gfp_t gfp_flags)164 static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
165 {
166 int cpu;
167 int i;
168 int level = 0;
169 int levelspread[RCU_NUM_LVLS];
170 struct srcu_data *sdp;
171 struct srcu_node *snp;
172 struct srcu_node *snp_first;
173
174 /* Initialize geometry if it has not already been initialized. */
175 rcu_init_geometry();
176 ssp->srcu_sup->node = kcalloc(rcu_num_nodes, sizeof(*ssp->srcu_sup->node), gfp_flags);
177 if (!ssp->srcu_sup->node)
178 return false;
179
180 /* Work out the overall tree geometry. */
181 ssp->srcu_sup->level[0] = &ssp->srcu_sup->node[0];
182 for (i = 1; i < rcu_num_lvls; i++)
183 ssp->srcu_sup->level[i] = ssp->srcu_sup->level[i - 1] + num_rcu_lvl[i - 1];
184 rcu_init_levelspread(levelspread, num_rcu_lvl);
185
186 /* Each pass through this loop initializes one srcu_node structure. */
187 srcu_for_each_node_breadth_first(ssp, snp) {
188 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
189 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
190 ARRAY_SIZE(snp->srcu_data_have_cbs));
191 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
192 snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
193 snp->srcu_data_have_cbs[i] = 0;
194 }
195 snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
196 snp->grplo = -1;
197 snp->grphi = -1;
198 if (snp == &ssp->srcu_sup->node[0]) {
199 /* Root node, special case. */
200 snp->srcu_parent = NULL;
201 continue;
202 }
203
204 /* Non-root node. */
205 if (snp == ssp->srcu_sup->level[level + 1])
206 level++;
207 snp->srcu_parent = ssp->srcu_sup->level[level - 1] +
208 (snp - ssp->srcu_sup->level[level]) /
209 levelspread[level - 1];
210 }
211
212 /*
213 * Initialize the per-CPU srcu_data array, which feeds into the
214 * leaves of the srcu_node tree.
215 */
216 level = rcu_num_lvls - 1;
217 snp_first = ssp->srcu_sup->level[level];
218 for_each_possible_cpu(cpu) {
219 sdp = per_cpu_ptr(ssp->sda, cpu);
220 sdp->mynode = &snp_first[cpu / levelspread[level]];
221 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
222 if (snp->grplo < 0)
223 snp->grplo = cpu;
224 snp->grphi = cpu;
225 }
226 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
227 }
228 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
229 return true;
230 }
231
232 /*
233 * Initialize non-compile-time initialized fields, including the
234 * associated srcu_node and srcu_data structures. The is_static parameter
235 * tells us that ->sda has already been wired up to srcu_data.
236 */
init_srcu_struct_fields(struct srcu_struct * ssp,bool is_static)237 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
238 {
239 if (!is_static)
240 ssp->srcu_sup = kzalloc(sizeof(*ssp->srcu_sup), GFP_KERNEL);
241 if (!ssp->srcu_sup)
242 return -ENOMEM;
243 if (!is_static)
244 spin_lock_init(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
245 ssp->srcu_sup->srcu_size_state = SRCU_SIZE_SMALL;
246 ssp->srcu_sup->node = NULL;
247 mutex_init(&ssp->srcu_sup->srcu_cb_mutex);
248 mutex_init(&ssp->srcu_sup->srcu_gp_mutex);
249 ssp->srcu_idx = 0;
250 ssp->srcu_sup->srcu_gp_seq = 0;
251 ssp->srcu_sup->srcu_barrier_seq = 0;
252 mutex_init(&ssp->srcu_sup->srcu_barrier_mutex);
253 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 0);
254 INIT_DELAYED_WORK(&ssp->srcu_sup->work, process_srcu);
255 ssp->srcu_sup->sda_is_static = is_static;
256 if (!is_static)
257 ssp->sda = alloc_percpu(struct srcu_data);
258 if (!ssp->sda) {
259 if (!is_static)
260 kfree(ssp->srcu_sup);
261 return -ENOMEM;
262 }
263 init_srcu_struct_data(ssp);
264 ssp->srcu_sup->srcu_gp_seq_needed_exp = 0;
265 ssp->srcu_sup->srcu_last_gp_end = ktime_get_mono_fast_ns();
266 if (READ_ONCE(ssp->srcu_sup->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
267 if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) {
268 if (!ssp->srcu_sup->sda_is_static) {
269 free_percpu(ssp->sda);
270 ssp->sda = NULL;
271 kfree(ssp->srcu_sup);
272 return -ENOMEM;
273 }
274 } else {
275 WRITE_ONCE(ssp->srcu_sup->srcu_size_state, SRCU_SIZE_BIG);
276 }
277 }
278 ssp->srcu_sup->srcu_ssp = ssp;
279 smp_store_release(&ssp->srcu_sup->srcu_gp_seq_needed, 0); /* Init done. */
280 return 0;
281 }
282
283 #ifdef CONFIG_DEBUG_LOCK_ALLOC
284
__init_srcu_struct(struct srcu_struct * ssp,const char * name,struct lock_class_key * key)285 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
286 struct lock_class_key *key)
287 {
288 /* Don't re-initialize a lock while it is held. */
289 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
290 lockdep_init_map(&ssp->dep_map, name, key, 0);
291 return init_srcu_struct_fields(ssp, false);
292 }
293 EXPORT_SYMBOL_GPL(__init_srcu_struct);
294
295 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
296
297 /**
298 * init_srcu_struct - initialize a sleep-RCU structure
299 * @ssp: structure to initialize.
300 *
301 * Must invoke this on a given srcu_struct before passing that srcu_struct
302 * to any other function. Each srcu_struct represents a separate domain
303 * of SRCU protection.
304 */
init_srcu_struct(struct srcu_struct * ssp)305 int init_srcu_struct(struct srcu_struct *ssp)
306 {
307 return init_srcu_struct_fields(ssp, false);
308 }
309 EXPORT_SYMBOL_GPL(init_srcu_struct);
310
311 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
312
313 /*
314 * Initiate a transition to SRCU_SIZE_BIG with lock held.
315 */
__srcu_transition_to_big(struct srcu_struct * ssp)316 static void __srcu_transition_to_big(struct srcu_struct *ssp)
317 {
318 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
319 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_ALLOC);
320 }
321
322 /*
323 * Initiate an idempotent transition to SRCU_SIZE_BIG.
324 */
srcu_transition_to_big(struct srcu_struct * ssp)325 static void srcu_transition_to_big(struct srcu_struct *ssp)
326 {
327 unsigned long flags;
328
329 /* Double-checked locking on ->srcu_size-state. */
330 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL)
331 return;
332 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags);
333 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL) {
334 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
335 return;
336 }
337 __srcu_transition_to_big(ssp);
338 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
339 }
340
341 /*
342 * Check to see if the just-encountered contention event justifies
343 * a transition to SRCU_SIZE_BIG.
344 */
spin_lock_irqsave_check_contention(struct srcu_struct * ssp)345 static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
346 {
347 unsigned long j;
348
349 if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_sup->srcu_size_state)
350 return;
351 j = jiffies;
352 if (ssp->srcu_sup->srcu_size_jiffies != j) {
353 ssp->srcu_sup->srcu_size_jiffies = j;
354 ssp->srcu_sup->srcu_n_lock_retries = 0;
355 }
356 if (++ssp->srcu_sup->srcu_n_lock_retries <= small_contention_lim)
357 return;
358 __srcu_transition_to_big(ssp);
359 }
360
361 /*
362 * Acquire the specified srcu_data structure's ->lock, but check for
363 * excessive contention, which results in initiation of a transition
364 * to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
365 * parameter permits this.
366 */
spin_lock_irqsave_sdp_contention(struct srcu_data * sdp,unsigned long * flags)367 static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
368 {
369 struct srcu_struct *ssp = sdp->ssp;
370
371 if (spin_trylock_irqsave_rcu_node(sdp, *flags))
372 return;
373 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags);
374 spin_lock_irqsave_check_contention(ssp);
375 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, *flags);
376 spin_lock_irqsave_rcu_node(sdp, *flags);
377 }
378
379 /*
380 * Acquire the specified srcu_struct structure's ->lock, but check for
381 * excessive contention, which results in initiation of a transition
382 * to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
383 * parameter permits this.
384 */
spin_lock_irqsave_ssp_contention(struct srcu_struct * ssp,unsigned long * flags)385 static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
386 {
387 if (spin_trylock_irqsave_rcu_node(ssp->srcu_sup, *flags))
388 return;
389 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags);
390 spin_lock_irqsave_check_contention(ssp);
391 }
392
393 /*
394 * First-use initialization of statically allocated srcu_struct
395 * structure. Wiring up the combining tree is more than can be
396 * done with compile-time initialization, so this check is added
397 * to each update-side SRCU primitive. Use ssp->lock, which -is-
398 * compile-time initialized, to resolve races involving multiple
399 * CPUs trying to garner first-use privileges.
400 */
check_init_srcu_struct(struct srcu_struct * ssp)401 static void check_init_srcu_struct(struct srcu_struct *ssp)
402 {
403 unsigned long flags;
404
405 /* The smp_load_acquire() pairs with the smp_store_release(). */
406 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed))) /*^^^*/
407 return; /* Already initialized. */
408 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags);
409 if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq_needed)) {
410 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
411 return;
412 }
413 init_srcu_struct_fields(ssp, true);
414 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
415 }
416
417 /*
418 * Returns approximate total of the readers' ->srcu_lock_count[] values
419 * for the rank of per-CPU counters specified by idx.
420 */
srcu_readers_lock_idx(struct srcu_struct * ssp,int idx)421 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
422 {
423 int cpu;
424 unsigned long sum = 0;
425
426 for_each_possible_cpu(cpu) {
427 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
428
429 sum += atomic_long_read(&cpuc->srcu_lock_count[idx]);
430 }
431 return sum;
432 }
433
434 /*
435 * Returns approximate total of the readers' ->srcu_unlock_count[] values
436 * for the rank of per-CPU counters specified by idx.
437 */
srcu_readers_unlock_idx(struct srcu_struct * ssp,int idx)438 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
439 {
440 int cpu;
441 unsigned long mask = 0;
442 unsigned long sum = 0;
443
444 for_each_possible_cpu(cpu) {
445 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
446
447 sum += atomic_long_read(&cpuc->srcu_unlock_count[idx]);
448 if (IS_ENABLED(CONFIG_PROVE_RCU))
449 mask = mask | READ_ONCE(cpuc->srcu_nmi_safety);
450 }
451 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask >> 1)),
452 "Mixed NMI-safe readers for srcu_struct at %ps.\n", ssp);
453 return sum;
454 }
455
456 /*
457 * Return true if the number of pre-existing readers is determined to
458 * be zero.
459 */
srcu_readers_active_idx_check(struct srcu_struct * ssp,int idx)460 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
461 {
462 unsigned long unlocks;
463
464 unlocks = srcu_readers_unlock_idx(ssp, idx);
465
466 /*
467 * Make sure that a lock is always counted if the corresponding
468 * unlock is counted. Needs to be a smp_mb() as the read side may
469 * contain a read from a variable that is written to before the
470 * synchronize_srcu() in the write side. In this case smp_mb()s
471 * A and B act like the store buffering pattern.
472 *
473 * This smp_mb() also pairs with smp_mb() C to prevent accesses
474 * after the synchronize_srcu() from being executed before the
475 * grace period ends.
476 */
477 smp_mb(); /* A */
478
479 /*
480 * If the locks are the same as the unlocks, then there must have
481 * been no readers on this index at some point in this function.
482 * But there might be more readers, as a task might have read
483 * the current ->srcu_idx but not yet have incremented its CPU's
484 * ->srcu_lock_count[idx] counter. In fact, it is possible
485 * that most of the tasks have been preempted between fetching
486 * ->srcu_idx and incrementing ->srcu_lock_count[idx]. And there
487 * could be almost (ULONG_MAX / sizeof(struct task_struct)) tasks
488 * in a system whose address space was fully populated with memory.
489 * Call this quantity Nt.
490 *
491 * So suppose that the updater is preempted at this point in the
492 * code for a long time. That now-preempted updater has already
493 * flipped ->srcu_idx (possibly during the preceding grace period),
494 * done an smp_mb() (again, possibly during the preceding grace
495 * period), and summed up the ->srcu_unlock_count[idx] counters.
496 * How many times can a given one of the aforementioned Nt tasks
497 * increment the old ->srcu_idx value's ->srcu_lock_count[idx]
498 * counter, in the absence of nesting?
499 *
500 * It can clearly do so once, given that it has already fetched
501 * the old value of ->srcu_idx and is just about to use that value
502 * to index its increment of ->srcu_lock_count[idx]. But as soon as
503 * it leaves that SRCU read-side critical section, it will increment
504 * ->srcu_unlock_count[idx], which must follow the updater's above
505 * read from that same value. Thus, as soon the reading task does
506 * an smp_mb() and a later fetch from ->srcu_idx, that task will be
507 * guaranteed to get the new index. Except that the increment of
508 * ->srcu_unlock_count[idx] in __srcu_read_unlock() is after the
509 * smp_mb(), and the fetch from ->srcu_idx in __srcu_read_lock()
510 * is before the smp_mb(). Thus, that task might not see the new
511 * value of ->srcu_idx until the -second- __srcu_read_lock(),
512 * which in turn means that this task might well increment
513 * ->srcu_lock_count[idx] for the old value of ->srcu_idx twice,
514 * not just once.
515 *
516 * However, it is important to note that a given smp_mb() takes
517 * effect not just for the task executing it, but also for any
518 * later task running on that same CPU.
519 *
520 * That is, there can be almost Nt + Nc further increments of
521 * ->srcu_lock_count[idx] for the old index, where Nc is the number
522 * of CPUs. But this is OK because the size of the task_struct
523 * structure limits the value of Nt and current systems limit Nc
524 * to a few thousand.
525 *
526 * OK, but what about nesting? This does impose a limit on
527 * nesting of half of the size of the task_struct structure
528 * (measured in bytes), which should be sufficient. A late 2022
529 * TREE01 rcutorture run reported this size to be no less than
530 * 9408 bytes, allowing up to 4704 levels of nesting, which is
531 * comfortably beyond excessive. Especially on 64-bit systems,
532 * which are unlikely to be configured with an address space fully
533 * populated with memory, at least not anytime soon.
534 */
535 return srcu_readers_lock_idx(ssp, idx) == unlocks;
536 }
537
538 /**
539 * srcu_readers_active - returns true if there are readers. and false
540 * otherwise
541 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
542 *
543 * Note that this is not an atomic primitive, and can therefore suffer
544 * severe errors when invoked on an active srcu_struct. That said, it
545 * can be useful as an error check at cleanup time.
546 */
srcu_readers_active(struct srcu_struct * ssp)547 static bool srcu_readers_active(struct srcu_struct *ssp)
548 {
549 int cpu;
550 unsigned long sum = 0;
551
552 for_each_possible_cpu(cpu) {
553 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
554
555 sum += atomic_long_read(&cpuc->srcu_lock_count[0]);
556 sum += atomic_long_read(&cpuc->srcu_lock_count[1]);
557 sum -= atomic_long_read(&cpuc->srcu_unlock_count[0]);
558 sum -= atomic_long_read(&cpuc->srcu_unlock_count[1]);
559 }
560 return sum;
561 }
562
563 /*
564 * We use an adaptive strategy for synchronize_srcu() and especially for
565 * synchronize_srcu_expedited(). We spin for a fixed time period
566 * (defined below, boot time configurable) to allow SRCU readers to exit
567 * their read-side critical sections. If there are still some readers
568 * after one jiffy, we repeatedly block for one jiffy time periods.
569 * The blocking time is increased as the grace-period age increases,
570 * with max blocking time capped at 10 jiffies.
571 */
572 #define SRCU_DEFAULT_RETRY_CHECK_DELAY 5
573
574 static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
575 module_param(srcu_retry_check_delay, ulong, 0444);
576
577 #define SRCU_INTERVAL 1 // Base delay if no expedited GPs pending.
578 #define SRCU_MAX_INTERVAL 10 // Maximum incremental delay from slow readers.
579
580 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO 3UL // Lowmark on default per-GP-phase
581 // no-delay instances.
582 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI 1000UL // Highmark on default per-GP-phase
583 // no-delay instances.
584
585 #define SRCU_UL_CLAMP_LO(val, low) ((val) > (low) ? (val) : (low))
586 #define SRCU_UL_CLAMP_HI(val, high) ((val) < (high) ? (val) : (high))
587 #define SRCU_UL_CLAMP(val, low, high) SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
588 // per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
589 // one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
590 // called from process_srcu().
591 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED \
592 (2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
593
594 // Maximum per-GP-phase consecutive no-delay instances.
595 #define SRCU_DEFAULT_MAX_NODELAY_PHASE \
596 SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED, \
597 SRCU_DEFAULT_MAX_NODELAY_PHASE_LO, \
598 SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
599
600 static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
601 module_param(srcu_max_nodelay_phase, ulong, 0444);
602
603 // Maximum consecutive no-delay instances.
604 #define SRCU_DEFAULT_MAX_NODELAY (SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ? \
605 SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
606
607 static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
608 module_param(srcu_max_nodelay, ulong, 0444);
609
610 /*
611 * Return grace-period delay, zero if there are expedited grace
612 * periods pending, SRCU_INTERVAL otherwise.
613 */
srcu_get_delay(struct srcu_struct * ssp)614 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
615 {
616 unsigned long gpstart;
617 unsigned long j;
618 unsigned long jbase = SRCU_INTERVAL;
619 struct srcu_usage *sup = ssp->srcu_sup;
620
621 if (ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp)))
622 jbase = 0;
623 if (rcu_seq_state(READ_ONCE(sup->srcu_gp_seq))) {
624 j = jiffies - 1;
625 gpstart = READ_ONCE(sup->srcu_gp_start);
626 if (time_after(j, gpstart))
627 jbase += j - gpstart;
628 if (!jbase) {
629 WRITE_ONCE(sup->srcu_n_exp_nodelay, READ_ONCE(sup->srcu_n_exp_nodelay) + 1);
630 if (READ_ONCE(sup->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
631 jbase = 1;
632 }
633 }
634 return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
635 }
636
637 /**
638 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
639 * @ssp: structure to clean up.
640 *
641 * Must invoke this after you are finished using a given srcu_struct that
642 * was initialized via init_srcu_struct(), else you leak memory.
643 */
cleanup_srcu_struct(struct srcu_struct * ssp)644 void cleanup_srcu_struct(struct srcu_struct *ssp)
645 {
646 int cpu;
647 struct srcu_usage *sup = ssp->srcu_sup;
648
649 if (WARN_ON(!srcu_get_delay(ssp)))
650 return; /* Just leak it! */
651 if (WARN_ON(srcu_readers_active(ssp)))
652 return; /* Just leak it! */
653 flush_delayed_work(&sup->work);
654 for_each_possible_cpu(cpu) {
655 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
656
657 del_timer_sync(&sdp->delay_work);
658 flush_work(&sdp->work);
659 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
660 return; /* Forgot srcu_barrier(), so just leak it! */
661 }
662 if (WARN_ON(rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
663 WARN_ON(rcu_seq_current(&sup->srcu_gp_seq) != sup->srcu_gp_seq_needed) ||
664 WARN_ON(srcu_readers_active(ssp))) {
665 pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
666 __func__, ssp, rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)),
667 rcu_seq_current(&sup->srcu_gp_seq), sup->srcu_gp_seq_needed);
668 return; /* Caller forgot to stop doing call_srcu()? */
669 }
670 kfree(sup->node);
671 sup->node = NULL;
672 sup->srcu_size_state = SRCU_SIZE_SMALL;
673 if (!sup->sda_is_static) {
674 free_percpu(ssp->sda);
675 ssp->sda = NULL;
676 kfree(sup);
677 ssp->srcu_sup = NULL;
678 }
679 }
680 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
681
682 #ifdef CONFIG_PROVE_RCU
683 /*
684 * Check for consistent NMI safety.
685 */
srcu_check_nmi_safety(struct srcu_struct * ssp,bool nmi_safe)686 void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
687 {
688 int nmi_safe_mask = 1 << nmi_safe;
689 int old_nmi_safe_mask;
690 struct srcu_data *sdp;
691
692 /* NMI-unsafe use in NMI is a bad sign */
693 WARN_ON_ONCE(!nmi_safe && in_nmi());
694 sdp = raw_cpu_ptr(ssp->sda);
695 old_nmi_safe_mask = READ_ONCE(sdp->srcu_nmi_safety);
696 if (!old_nmi_safe_mask) {
697 WRITE_ONCE(sdp->srcu_nmi_safety, nmi_safe_mask);
698 return;
699 }
700 WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask);
701 }
702 EXPORT_SYMBOL_GPL(srcu_check_nmi_safety);
703 #endif /* CONFIG_PROVE_RCU */
704
705 /*
706 * Counts the new reader in the appropriate per-CPU element of the
707 * srcu_struct.
708 * Returns an index that must be passed to the matching srcu_read_unlock().
709 */
__srcu_read_lock(struct srcu_struct * ssp)710 int __srcu_read_lock(struct srcu_struct *ssp)
711 {
712 int idx;
713
714 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
715 this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter);
716 smp_mb(); /* B */ /* Avoid leaking the critical section. */
717 return idx;
718 }
719 EXPORT_SYMBOL_GPL(__srcu_read_lock);
720
721 /*
722 * Removes the count for the old reader from the appropriate per-CPU
723 * element of the srcu_struct. Note that this may well be a different
724 * CPU than that which was incremented by the corresponding srcu_read_lock().
725 */
__srcu_read_unlock(struct srcu_struct * ssp,int idx)726 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
727 {
728 smp_mb(); /* C */ /* Avoid leaking the critical section. */
729 this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);
730 }
731 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
732
733 #ifdef CONFIG_NEED_SRCU_NMI_SAFE
734
735 /*
736 * Counts the new reader in the appropriate per-CPU element of the
737 * srcu_struct, but in an NMI-safe manner using RMW atomics.
738 * Returns an index that must be passed to the matching srcu_read_unlock().
739 */
__srcu_read_lock_nmisafe(struct srcu_struct * ssp)740 int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
741 {
742 int idx;
743 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
744
745 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
746 atomic_long_inc(&sdp->srcu_lock_count[idx]);
747 smp_mb__after_atomic(); /* B */ /* Avoid leaking the critical section. */
748 return idx;
749 }
750 EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
751
752 /*
753 * Removes the count for the old reader from the appropriate per-CPU
754 * element of the srcu_struct. Note that this may well be a different
755 * CPU than that which was incremented by the corresponding srcu_read_lock().
756 */
__srcu_read_unlock_nmisafe(struct srcu_struct * ssp,int idx)757 void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
758 {
759 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
760
761 smp_mb__before_atomic(); /* C */ /* Avoid leaking the critical section. */
762 atomic_long_inc(&sdp->srcu_unlock_count[idx]);
763 }
764 EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);
765
766 #endif // CONFIG_NEED_SRCU_NMI_SAFE
767
768 /*
769 * Start an SRCU grace period.
770 */
srcu_gp_start(struct srcu_struct * ssp)771 static void srcu_gp_start(struct srcu_struct *ssp)
772 {
773 struct srcu_data *sdp;
774 int state;
775
776 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
777 sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
778 else
779 sdp = this_cpu_ptr(ssp->sda);
780 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
781 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed));
782 spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
783 rcu_segcblist_advance(&sdp->srcu_cblist,
784 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
785 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
786 rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
787 spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
788 WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
789 WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
790 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
791 rcu_seq_start(&ssp->srcu_sup->srcu_gp_seq);
792 state = rcu_seq_state(ssp->srcu_sup->srcu_gp_seq);
793 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
794 }
795
796
srcu_delay_timer(struct timer_list * t)797 static void srcu_delay_timer(struct timer_list *t)
798 {
799 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
800
801 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
802 }
803
srcu_queue_delayed_work_on(struct srcu_data * sdp,unsigned long delay)804 static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
805 unsigned long delay)
806 {
807 if (!delay) {
808 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
809 return;
810 }
811
812 timer_reduce(&sdp->delay_work, jiffies + delay);
813 }
814
815 /*
816 * Schedule callback invocation for the specified srcu_data structure,
817 * if possible, on the corresponding CPU.
818 */
srcu_schedule_cbs_sdp(struct srcu_data * sdp,unsigned long delay)819 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
820 {
821 srcu_queue_delayed_work_on(sdp, delay);
822 }
823
824 /*
825 * Schedule callback invocation for all srcu_data structures associated
826 * with the specified srcu_node structure that have callbacks for the
827 * just-completed grace period, the one corresponding to idx. If possible,
828 * schedule this invocation on the corresponding CPUs.
829 */
srcu_schedule_cbs_snp(struct srcu_struct * ssp,struct srcu_node * snp,unsigned long mask,unsigned long delay)830 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
831 unsigned long mask, unsigned long delay)
832 {
833 int cpu;
834
835 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
836 if (!(mask & (1 << (cpu - snp->grplo))))
837 continue;
838 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
839 }
840 }
841
842 /*
843 * Note the end of an SRCU grace period. Initiates callback invocation
844 * and starts a new grace period if needed.
845 *
846 * The ->srcu_cb_mutex acquisition does not protect any data, but
847 * instead prevents more than one grace period from starting while we
848 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
849 * array to have a finite number of elements.
850 */
srcu_gp_end(struct srcu_struct * ssp)851 static void srcu_gp_end(struct srcu_struct *ssp)
852 {
853 unsigned long cbdelay = 1;
854 bool cbs;
855 bool last_lvl;
856 int cpu;
857 unsigned long flags;
858 unsigned long gpseq;
859 int idx;
860 unsigned long mask;
861 struct srcu_data *sdp;
862 unsigned long sgsne;
863 struct srcu_node *snp;
864 int ss_state;
865 struct srcu_usage *sup = ssp->srcu_sup;
866
867 /* Prevent more than one additional grace period. */
868 mutex_lock(&sup->srcu_cb_mutex);
869
870 /* End the current grace period. */
871 spin_lock_irq_rcu_node(sup);
872 idx = rcu_seq_state(sup->srcu_gp_seq);
873 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
874 if (ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp)))
875 cbdelay = 0;
876
877 WRITE_ONCE(sup->srcu_last_gp_end, ktime_get_mono_fast_ns());
878 rcu_seq_end(&sup->srcu_gp_seq);
879 gpseq = rcu_seq_current(&sup->srcu_gp_seq);
880 if (ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, gpseq))
881 WRITE_ONCE(sup->srcu_gp_seq_needed_exp, gpseq);
882 spin_unlock_irq_rcu_node(sup);
883 mutex_unlock(&sup->srcu_gp_mutex);
884 /* A new grace period can start at this point. But only one. */
885
886 /* Initiate callback invocation as needed. */
887 ss_state = smp_load_acquire(&sup->srcu_size_state);
888 if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
889 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, get_boot_cpu_id()),
890 cbdelay);
891 } else {
892 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
893 srcu_for_each_node_breadth_first(ssp, snp) {
894 spin_lock_irq_rcu_node(snp);
895 cbs = false;
896 last_lvl = snp >= sup->level[rcu_num_lvls - 1];
897 if (last_lvl)
898 cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
899 snp->srcu_have_cbs[idx] = gpseq;
900 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
901 sgsne = snp->srcu_gp_seq_needed_exp;
902 if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
903 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
904 if (ss_state < SRCU_SIZE_BIG)
905 mask = ~0;
906 else
907 mask = snp->srcu_data_have_cbs[idx];
908 snp->srcu_data_have_cbs[idx] = 0;
909 spin_unlock_irq_rcu_node(snp);
910 if (cbs)
911 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
912 }
913 }
914
915 /* Occasionally prevent srcu_data counter wrap. */
916 if (!(gpseq & counter_wrap_check))
917 for_each_possible_cpu(cpu) {
918 sdp = per_cpu_ptr(ssp->sda, cpu);
919 spin_lock_irqsave_rcu_node(sdp, flags);
920 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
921 sdp->srcu_gp_seq_needed = gpseq;
922 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
923 sdp->srcu_gp_seq_needed_exp = gpseq;
924 spin_unlock_irqrestore_rcu_node(sdp, flags);
925 }
926
927 /* Callback initiation done, allow grace periods after next. */
928 mutex_unlock(&sup->srcu_cb_mutex);
929
930 /* Start a new grace period if needed. */
931 spin_lock_irq_rcu_node(sup);
932 gpseq = rcu_seq_current(&sup->srcu_gp_seq);
933 if (!rcu_seq_state(gpseq) &&
934 ULONG_CMP_LT(gpseq, sup->srcu_gp_seq_needed)) {
935 srcu_gp_start(ssp);
936 spin_unlock_irq_rcu_node(sup);
937 srcu_reschedule(ssp, 0);
938 } else {
939 spin_unlock_irq_rcu_node(sup);
940 }
941
942 /* Transition to big if needed. */
943 if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
944 if (ss_state == SRCU_SIZE_ALLOC)
945 init_srcu_struct_nodes(ssp, GFP_KERNEL);
946 else
947 smp_store_release(&sup->srcu_size_state, ss_state + 1);
948 }
949 }
950
951 /*
952 * Funnel-locking scheme to scalably mediate many concurrent expedited
953 * grace-period requests. This function is invoked for the first known
954 * expedited request for a grace period that has already been requested,
955 * but without expediting. To start a completely new grace period,
956 * whether expedited or not, use srcu_funnel_gp_start() instead.
957 */
srcu_funnel_exp_start(struct srcu_struct * ssp,struct srcu_node * snp,unsigned long s)958 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
959 unsigned long s)
960 {
961 unsigned long flags;
962 unsigned long sgsne;
963
964 if (snp)
965 for (; snp != NULL; snp = snp->srcu_parent) {
966 sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
967 if (WARN_ON_ONCE(rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, s)) ||
968 (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
969 return;
970 spin_lock_irqsave_rcu_node(snp, flags);
971 sgsne = snp->srcu_gp_seq_needed_exp;
972 if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
973 spin_unlock_irqrestore_rcu_node(snp, flags);
974 return;
975 }
976 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
977 spin_unlock_irqrestore_rcu_node(snp, flags);
978 }
979 spin_lock_irqsave_ssp_contention(ssp, &flags);
980 if (ULONG_CMP_LT(ssp->srcu_sup->srcu_gp_seq_needed_exp, s))
981 WRITE_ONCE(ssp->srcu_sup->srcu_gp_seq_needed_exp, s);
982 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
983 }
984
985 /*
986 * Funnel-locking scheme to scalably mediate many concurrent grace-period
987 * requests. The winner has to do the work of actually starting grace
988 * period s. Losers must either ensure that their desired grace-period
989 * number is recorded on at least their leaf srcu_node structure, or they
990 * must take steps to invoke their own callbacks.
991 *
992 * Note that this function also does the work of srcu_funnel_exp_start(),
993 * in some cases by directly invoking it.
994 *
995 * The srcu read lock should be hold around this function. And s is a seq snap
996 * after holding that lock.
997 */
srcu_funnel_gp_start(struct srcu_struct * ssp,struct srcu_data * sdp,unsigned long s,bool do_norm)998 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
999 unsigned long s, bool do_norm)
1000 {
1001 unsigned long flags;
1002 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
1003 unsigned long sgsne;
1004 struct srcu_node *snp;
1005 struct srcu_node *snp_leaf;
1006 unsigned long snp_seq;
1007 struct srcu_usage *sup = ssp->srcu_sup;
1008
1009 /* Ensure that snp node tree is fully initialized before traversing it */
1010 if (smp_load_acquire(&sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1011 snp_leaf = NULL;
1012 else
1013 snp_leaf = sdp->mynode;
1014
1015 if (snp_leaf)
1016 /* Each pass through the loop does one level of the srcu_node tree. */
1017 for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
1018 if (WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) && snp != snp_leaf)
1019 return; /* GP already done and CBs recorded. */
1020 spin_lock_irqsave_rcu_node(snp, flags);
1021 snp_seq = snp->srcu_have_cbs[idx];
1022 if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
1023 if (snp == snp_leaf && snp_seq == s)
1024 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
1025 spin_unlock_irqrestore_rcu_node(snp, flags);
1026 if (snp == snp_leaf && snp_seq != s) {
1027 srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
1028 return;
1029 }
1030 if (!do_norm)
1031 srcu_funnel_exp_start(ssp, snp, s);
1032 return;
1033 }
1034 snp->srcu_have_cbs[idx] = s;
1035 if (snp == snp_leaf)
1036 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
1037 sgsne = snp->srcu_gp_seq_needed_exp;
1038 if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
1039 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
1040 spin_unlock_irqrestore_rcu_node(snp, flags);
1041 }
1042
1043 /* Top of tree, must ensure the grace period will be started. */
1044 spin_lock_irqsave_ssp_contention(ssp, &flags);
1045 if (ULONG_CMP_LT(sup->srcu_gp_seq_needed, s)) {
1046 /*
1047 * Record need for grace period s. Pair with load
1048 * acquire setting up for initialization.
1049 */
1050 smp_store_release(&sup->srcu_gp_seq_needed, s); /*^^^*/
1051 }
1052 if (!do_norm && ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, s))
1053 WRITE_ONCE(sup->srcu_gp_seq_needed_exp, s);
1054
1055 /* If grace period not already in progress, start it. */
1056 if (!WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) &&
1057 rcu_seq_state(sup->srcu_gp_seq) == SRCU_STATE_IDLE) {
1058 WARN_ON_ONCE(ULONG_CMP_GE(sup->srcu_gp_seq, sup->srcu_gp_seq_needed));
1059 srcu_gp_start(ssp);
1060
1061 // And how can that list_add() in the "else" clause
1062 // possibly be safe for concurrent execution? Well,
1063 // it isn't. And it does not have to be. After all, it
1064 // can only be executed during early boot when there is only
1065 // the one boot CPU running with interrupts still disabled.
1066 if (likely(srcu_init_done))
1067 queue_delayed_work(rcu_gp_wq, &sup->work,
1068 !!srcu_get_delay(ssp));
1069 else if (list_empty(&sup->work.work.entry))
1070 list_add(&sup->work.work.entry, &srcu_boot_list);
1071 }
1072 spin_unlock_irqrestore_rcu_node(sup, flags);
1073 }
1074
1075 /*
1076 * Wait until all readers counted by array index idx complete, but
1077 * loop an additional time if there is an expedited grace period pending.
1078 * The caller must ensure that ->srcu_idx is not changed while checking.
1079 */
try_check_zero(struct srcu_struct * ssp,int idx,int trycount)1080 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
1081 {
1082 unsigned long curdelay;
1083
1084 curdelay = !srcu_get_delay(ssp);
1085
1086 for (;;) {
1087 if (srcu_readers_active_idx_check(ssp, idx))
1088 return true;
1089 if ((--trycount + curdelay) <= 0)
1090 return false;
1091 udelay(srcu_retry_check_delay);
1092 }
1093 }
1094
1095 /*
1096 * Increment the ->srcu_idx counter so that future SRCU readers will
1097 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
1098 * us to wait for pre-existing readers in a starvation-free manner.
1099 */
srcu_flip(struct srcu_struct * ssp)1100 static void srcu_flip(struct srcu_struct *ssp)
1101 {
1102 /*
1103 * Because the flip of ->srcu_idx is executed only if the
1104 * preceding call to srcu_readers_active_idx_check() found that
1105 * the ->srcu_unlock_count[] and ->srcu_lock_count[] sums matched
1106 * and because that summing uses atomic_long_read(), there is
1107 * ordering due to a control dependency between that summing and
1108 * the WRITE_ONCE() in this call to srcu_flip(). This ordering
1109 * ensures that if this updater saw a given reader's increment from
1110 * __srcu_read_lock(), that reader was using a value of ->srcu_idx
1111 * from before the previous call to srcu_flip(), which should be
1112 * quite rare. This ordering thus helps forward progress because
1113 * the grace period could otherwise be delayed by additional
1114 * calls to __srcu_read_lock() using that old (soon to be new)
1115 * value of ->srcu_idx.
1116 *
1117 * This sum-equality check and ordering also ensures that if
1118 * a given call to __srcu_read_lock() uses the new value of
1119 * ->srcu_idx, this updater's earlier scans cannot have seen
1120 * that reader's increments, which is all to the good, because
1121 * this grace period need not wait on that reader. After all,
1122 * if those earlier scans had seen that reader, there would have
1123 * been a sum mismatch and this code would not be reached.
1124 *
1125 * This means that the following smp_mb() is redundant, but
1126 * it stays until either (1) Compilers learn about this sort of
1127 * control dependency or (2) Some production workload running on
1128 * a production system is unduly delayed by this slowpath smp_mb().
1129 */
1130 smp_mb(); /* E */ /* Pairs with B and C. */
1131
1132 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); // Flip the counter.
1133
1134 /*
1135 * Ensure that if the updater misses an __srcu_read_unlock()
1136 * increment, that task's __srcu_read_lock() following its next
1137 * __srcu_read_lock() or __srcu_read_unlock() will see the above
1138 * counter update. Note that both this memory barrier and the
1139 * one in srcu_readers_active_idx_check() provide the guarantee
1140 * for __srcu_read_lock().
1141 */
1142 smp_mb(); /* D */ /* Pairs with C. */
1143 }
1144
1145 /*
1146 * If SRCU is likely idle, return true, otherwise return false.
1147 *
1148 * Note that it is OK for several current from-idle requests for a new
1149 * grace period from idle to specify expediting because they will all end
1150 * up requesting the same grace period anyhow. So no loss.
1151 *
1152 * Note also that if any CPU (including the current one) is still invoking
1153 * callbacks, this function will nevertheless say "idle". This is not
1154 * ideal, but the overhead of checking all CPUs' callback lists is even
1155 * less ideal, especially on large systems. Furthermore, the wakeup
1156 * can happen before the callback is fully removed, so we have no choice
1157 * but to accept this type of error.
1158 *
1159 * This function is also subject to counter-wrap errors, but let's face
1160 * it, if this function was preempted for enough time for the counters
1161 * to wrap, it really doesn't matter whether or not we expedite the grace
1162 * period. The extra overhead of a needlessly expedited grace period is
1163 * negligible when amortized over that time period, and the extra latency
1164 * of a needlessly non-expedited grace period is similarly negligible.
1165 */
srcu_might_be_idle(struct srcu_struct * ssp)1166 static bool srcu_might_be_idle(struct srcu_struct *ssp)
1167 {
1168 unsigned long curseq;
1169 unsigned long flags;
1170 struct srcu_data *sdp;
1171 unsigned long t;
1172 unsigned long tlast;
1173
1174 check_init_srcu_struct(ssp);
1175 /* If the local srcu_data structure has callbacks, not idle. */
1176 sdp = raw_cpu_ptr(ssp->sda);
1177 spin_lock_irqsave_rcu_node(sdp, flags);
1178 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
1179 spin_unlock_irqrestore_rcu_node(sdp, flags);
1180 return false; /* Callbacks already present, so not idle. */
1181 }
1182 spin_unlock_irqrestore_rcu_node(sdp, flags);
1183
1184 /*
1185 * No local callbacks, so probabilistically probe global state.
1186 * Exact information would require acquiring locks, which would
1187 * kill scalability, hence the probabilistic nature of the probe.
1188 */
1189
1190 /* First, see if enough time has passed since the last GP. */
1191 t = ktime_get_mono_fast_ns();
1192 tlast = READ_ONCE(ssp->srcu_sup->srcu_last_gp_end);
1193 if (exp_holdoff == 0 ||
1194 time_in_range_open(t, tlast, tlast + exp_holdoff))
1195 return false; /* Too soon after last GP. */
1196
1197 /* Next, check for probable idleness. */
1198 curseq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq);
1199 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
1200 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_sup->srcu_gp_seq_needed)))
1201 return false; /* Grace period in progress, so not idle. */
1202 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
1203 if (curseq != rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq))
1204 return false; /* GP # changed, so not idle. */
1205 return true; /* With reasonable probability, idle! */
1206 }
1207
1208 /*
1209 * SRCU callback function to leak a callback.
1210 */
srcu_leak_callback(struct rcu_head * rhp)1211 static void srcu_leak_callback(struct rcu_head *rhp)
1212 {
1213 }
1214
1215 /*
1216 * Start an SRCU grace period, and also queue the callback if non-NULL.
1217 */
srcu_gp_start_if_needed(struct srcu_struct * ssp,struct rcu_head * rhp,bool do_norm)1218 static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1219 struct rcu_head *rhp, bool do_norm)
1220 {
1221 unsigned long flags;
1222 int idx;
1223 bool needexp = false;
1224 bool needgp = false;
1225 unsigned long s;
1226 struct srcu_data *sdp;
1227 struct srcu_node *sdp_mynode;
1228 int ss_state;
1229
1230 check_init_srcu_struct(ssp);
1231 /*
1232 * While starting a new grace period, make sure we are in an
1233 * SRCU read-side critical section so that the grace-period
1234 * sequence number cannot wrap around in the meantime.
1235 */
1236 idx = __srcu_read_lock_nmisafe(ssp);
1237 ss_state = smp_load_acquire(&ssp->srcu_sup->srcu_size_state);
1238 if (ss_state < SRCU_SIZE_WAIT_CALL)
1239 sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
1240 else
1241 sdp = raw_cpu_ptr(ssp->sda);
1242 spin_lock_irqsave_sdp_contention(sdp, &flags);
1243 if (rhp)
1244 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
1245 rcu_segcblist_advance(&sdp->srcu_cblist,
1246 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
1247 s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
1248 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
1249 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
1250 sdp->srcu_gp_seq_needed = s;
1251 needgp = true;
1252 }
1253 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
1254 sdp->srcu_gp_seq_needed_exp = s;
1255 needexp = true;
1256 }
1257 spin_unlock_irqrestore_rcu_node(sdp, flags);
1258
1259 /* Ensure that snp node tree is fully initialized before traversing it */
1260 if (ss_state < SRCU_SIZE_WAIT_BARRIER)
1261 sdp_mynode = NULL;
1262 else
1263 sdp_mynode = sdp->mynode;
1264
1265 if (needgp)
1266 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
1267 else if (needexp)
1268 srcu_funnel_exp_start(ssp, sdp_mynode, s);
1269 __srcu_read_unlock_nmisafe(ssp, idx);
1270 return s;
1271 }
1272
1273 /*
1274 * Enqueue an SRCU callback on the srcu_data structure associated with
1275 * the current CPU and the specified srcu_struct structure, initiating
1276 * grace-period processing if it is not already running.
1277 *
1278 * Note that all CPUs must agree that the grace period extended beyond
1279 * all pre-existing SRCU read-side critical section. On systems with
1280 * more than one CPU, this means that when "func()" is invoked, each CPU
1281 * is guaranteed to have executed a full memory barrier since the end of
1282 * its last corresponding SRCU read-side critical section whose beginning
1283 * preceded the call to call_srcu(). It also means that each CPU executing
1284 * an SRCU read-side critical section that continues beyond the start of
1285 * "func()" must have executed a memory barrier after the call_srcu()
1286 * but before the beginning of that SRCU read-side critical section.
1287 * Note that these guarantees include CPUs that are offline, idle, or
1288 * executing in user mode, as well as CPUs that are executing in the kernel.
1289 *
1290 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
1291 * resulting SRCU callback function "func()", then both CPU A and CPU
1292 * B are guaranteed to execute a full memory barrier during the time
1293 * interval between the call to call_srcu() and the invocation of "func()".
1294 * This guarantee applies even if CPU A and CPU B are the same CPU (but
1295 * again only if the system has more than one CPU).
1296 *
1297 * Of course, these guarantees apply only for invocations of call_srcu(),
1298 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
1299 * srcu_struct structure.
1300 */
__call_srcu(struct srcu_struct * ssp,struct rcu_head * rhp,rcu_callback_t func,bool do_norm)1301 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1302 rcu_callback_t func, bool do_norm)
1303 {
1304 if (debug_rcu_head_queue(rhp)) {
1305 /* Probable double call_srcu(), so leak the callback. */
1306 WRITE_ONCE(rhp->func, srcu_leak_callback);
1307 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
1308 return;
1309 }
1310 rhp->func = func;
1311 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
1312 }
1313
1314 /**
1315 * call_srcu() - Queue a callback for invocation after an SRCU grace period
1316 * @ssp: srcu_struct in queue the callback
1317 * @rhp: structure to be used for queueing the SRCU callback.
1318 * @func: function to be invoked after the SRCU grace period
1319 *
1320 * The callback function will be invoked some time after a full SRCU
1321 * grace period elapses, in other words after all pre-existing SRCU
1322 * read-side critical sections have completed. However, the callback
1323 * function might well execute concurrently with other SRCU read-side
1324 * critical sections that started after call_srcu() was invoked. SRCU
1325 * read-side critical sections are delimited by srcu_read_lock() and
1326 * srcu_read_unlock(), and may be nested.
1327 *
1328 * The callback will be invoked from process context, but must nevertheless
1329 * be fast and must not block.
1330 */
call_srcu(struct srcu_struct * ssp,struct rcu_head * rhp,rcu_callback_t func)1331 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1332 rcu_callback_t func)
1333 {
1334 __call_srcu(ssp, rhp, func, true);
1335 }
1336 EXPORT_SYMBOL_GPL(call_srcu);
1337
1338 /*
1339 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
1340 */
__synchronize_srcu(struct srcu_struct * ssp,bool do_norm)1341 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1342 {
1343 struct rcu_synchronize rcu;
1344
1345 srcu_lock_sync(&ssp->dep_map);
1346
1347 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1348 lock_is_held(&rcu_bh_lock_map) ||
1349 lock_is_held(&rcu_lock_map) ||
1350 lock_is_held(&rcu_sched_lock_map),
1351 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
1352
1353 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
1354 return;
1355 might_sleep();
1356 check_init_srcu_struct(ssp);
1357 init_completion(&rcu.completion);
1358 init_rcu_head_on_stack(&rcu.head);
1359 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1360 wait_for_completion(&rcu.completion);
1361 destroy_rcu_head_on_stack(&rcu.head);
1362
1363 /*
1364 * Make sure that later code is ordered after the SRCU grace
1365 * period. This pairs with the spin_lock_irq_rcu_node()
1366 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
1367 * because the current CPU might have been totally uninvolved with
1368 * (and thus unordered against) that grace period.
1369 */
1370 smp_mb();
1371 }
1372
1373 /**
1374 * synchronize_srcu_expedited - Brute-force SRCU grace period
1375 * @ssp: srcu_struct with which to synchronize.
1376 *
1377 * Wait for an SRCU grace period to elapse, but be more aggressive about
1378 * spinning rather than blocking when waiting.
1379 *
1380 * Note that synchronize_srcu_expedited() has the same deadlock and
1381 * memory-ordering properties as does synchronize_srcu().
1382 */
synchronize_srcu_expedited(struct srcu_struct * ssp)1383 void synchronize_srcu_expedited(struct srcu_struct *ssp)
1384 {
1385 __synchronize_srcu(ssp, rcu_gp_is_normal());
1386 }
1387 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
1388
1389 /**
1390 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
1391 * @ssp: srcu_struct with which to synchronize.
1392 *
1393 * Wait for the count to drain to zero of both indexes. To avoid the
1394 * possible starvation of synchronize_srcu(), it waits for the count of
1395 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
1396 * and then flip the srcu_idx and wait for the count of the other index.
1397 *
1398 * Can block; must be called from process context.
1399 *
1400 * Note that it is illegal to call synchronize_srcu() from the corresponding
1401 * SRCU read-side critical section; doing so will result in deadlock.
1402 * However, it is perfectly legal to call synchronize_srcu() on one
1403 * srcu_struct from some other srcu_struct's read-side critical section,
1404 * as long as the resulting graph of srcu_structs is acyclic.
1405 *
1406 * There are memory-ordering constraints implied by synchronize_srcu().
1407 * On systems with more than one CPU, when synchronize_srcu() returns,
1408 * each CPU is guaranteed to have executed a full memory barrier since
1409 * the end of its last corresponding SRCU read-side critical section
1410 * whose beginning preceded the call to synchronize_srcu(). In addition,
1411 * each CPU having an SRCU read-side critical section that extends beyond
1412 * the return from synchronize_srcu() is guaranteed to have executed a
1413 * full memory barrier after the beginning of synchronize_srcu() and before
1414 * the beginning of that SRCU read-side critical section. Note that these
1415 * guarantees include CPUs that are offline, idle, or executing in user mode,
1416 * as well as CPUs that are executing in the kernel.
1417 *
1418 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
1419 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
1420 * to have executed a full memory barrier during the execution of
1421 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
1422 * are the same CPU, but again only if the system has more than one CPU.
1423 *
1424 * Of course, these memory-ordering guarantees apply only when
1425 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1426 * passed the same srcu_struct structure.
1427 *
1428 * Implementation of these memory-ordering guarantees is similar to
1429 * that of synchronize_rcu().
1430 *
1431 * If SRCU is likely idle, expedite the first request. This semantic
1432 * was provided by Classic SRCU, and is relied upon by its users, so TREE
1433 * SRCU must also provide it. Note that detecting idleness is heuristic
1434 * and subject to both false positives and negatives.
1435 */
synchronize_srcu(struct srcu_struct * ssp)1436 void synchronize_srcu(struct srcu_struct *ssp)
1437 {
1438 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1439 synchronize_srcu_expedited(ssp);
1440 else
1441 __synchronize_srcu(ssp, true);
1442 }
1443 EXPORT_SYMBOL_GPL(synchronize_srcu);
1444
1445 /**
1446 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1447 * @ssp: srcu_struct to provide cookie for.
1448 *
1449 * This function returns a cookie that can be passed to
1450 * poll_state_synchronize_srcu(), which will return true if a full grace
1451 * period has elapsed in the meantime. It is the caller's responsibility
1452 * to make sure that grace period happens, for example, by invoking
1453 * call_srcu() after return from get_state_synchronize_srcu().
1454 */
get_state_synchronize_srcu(struct srcu_struct * ssp)1455 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1456 {
1457 // Any prior manipulation of SRCU-protected data must happen
1458 // before the load from ->srcu_gp_seq.
1459 smp_mb();
1460 return rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
1461 }
1462 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1463
1464 /**
1465 * start_poll_synchronize_srcu - Provide cookie and start grace period
1466 * @ssp: srcu_struct to provide cookie for.
1467 *
1468 * This function returns a cookie that can be passed to
1469 * poll_state_synchronize_srcu(), which will return true if a full grace
1470 * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(),
1471 * this function also ensures that any needed SRCU grace period will be
1472 * started. This convenience does come at a cost in terms of CPU overhead.
1473 */
start_poll_synchronize_srcu(struct srcu_struct * ssp)1474 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1475 {
1476 return srcu_gp_start_if_needed(ssp, NULL, true);
1477 }
1478 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1479
1480 /**
1481 * poll_state_synchronize_srcu - Has cookie's grace period ended?
1482 * @ssp: srcu_struct to provide cookie for.
1483 * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
1484 *
1485 * This function takes the cookie that was returned from either
1486 * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
1487 * returns @true if an SRCU grace period elapsed since the time that the
1488 * cookie was created.
1489 *
1490 * Because cookies are finite in size, wrapping/overflow is possible.
1491 * This is more pronounced on 32-bit systems where cookies are 32 bits,
1492 * where in theory wrapping could happen in about 14 hours assuming
1493 * 25-microsecond expedited SRCU grace periods. However, a more likely
1494 * overflow lower bound is on the order of 24 days in the case of
1495 * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit
1496 * system requires geologic timespans, as in more than seven million years
1497 * even for expedited SRCU grace periods.
1498 *
1499 * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
1500 * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses
1501 * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
1502 * few minutes. If this proves to be a problem, this counter will be
1503 * expanded to the same size as for Tree SRCU.
1504 */
poll_state_synchronize_srcu(struct srcu_struct * ssp,unsigned long cookie)1505 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1506 {
1507 if (!rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie))
1508 return false;
1509 // Ensure that the end of the SRCU grace period happens before
1510 // any subsequent code that the caller might execute.
1511 smp_mb(); // ^^^
1512 return true;
1513 }
1514 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
1515
1516 /*
1517 * Callback function for srcu_barrier() use.
1518 */
srcu_barrier_cb(struct rcu_head * rhp)1519 static void srcu_barrier_cb(struct rcu_head *rhp)
1520 {
1521 struct srcu_data *sdp;
1522 struct srcu_struct *ssp;
1523
1524 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1525 ssp = sdp->ssp;
1526 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt))
1527 complete(&ssp->srcu_sup->srcu_barrier_completion);
1528 }
1529
1530 /*
1531 * Enqueue an srcu_barrier() callback on the specified srcu_data
1532 * structure's ->cblist. but only if that ->cblist already has at least one
1533 * callback enqueued. Note that if a CPU already has callbacks enqueue,
1534 * it must have already registered the need for a future grace period,
1535 * so all we need do is enqueue a callback that will use the same grace
1536 * period as the last callback already in the queue.
1537 */
srcu_barrier_one_cpu(struct srcu_struct * ssp,struct srcu_data * sdp)1538 static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1539 {
1540 spin_lock_irq_rcu_node(sdp);
1541 atomic_inc(&ssp->srcu_sup->srcu_barrier_cpu_cnt);
1542 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1543 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1544 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1545 &sdp->srcu_barrier_head)) {
1546 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1547 atomic_dec(&ssp->srcu_sup->srcu_barrier_cpu_cnt);
1548 }
1549 spin_unlock_irq_rcu_node(sdp);
1550 }
1551
1552 /**
1553 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1554 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1555 */
srcu_barrier(struct srcu_struct * ssp)1556 void srcu_barrier(struct srcu_struct *ssp)
1557 {
1558 int cpu;
1559 int idx;
1560 unsigned long s = rcu_seq_snap(&ssp->srcu_sup->srcu_barrier_seq);
1561
1562 check_init_srcu_struct(ssp);
1563 mutex_lock(&ssp->srcu_sup->srcu_barrier_mutex);
1564 if (rcu_seq_done(&ssp->srcu_sup->srcu_barrier_seq, s)) {
1565 smp_mb(); /* Force ordering following return. */
1566 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex);
1567 return; /* Someone else did our work for us. */
1568 }
1569 rcu_seq_start(&ssp->srcu_sup->srcu_barrier_seq);
1570 init_completion(&ssp->srcu_sup->srcu_barrier_completion);
1571
1572 /* Initial count prevents reaching zero until all CBs are posted. */
1573 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 1);
1574
1575 idx = __srcu_read_lock_nmisafe(ssp);
1576 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1577 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, get_boot_cpu_id()));
1578 else
1579 for_each_possible_cpu(cpu)
1580 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1581 __srcu_read_unlock_nmisafe(ssp, idx);
1582
1583 /* Remove the initial count, at which point reaching zero can happen. */
1584 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt))
1585 complete(&ssp->srcu_sup->srcu_barrier_completion);
1586 wait_for_completion(&ssp->srcu_sup->srcu_barrier_completion);
1587
1588 rcu_seq_end(&ssp->srcu_sup->srcu_barrier_seq);
1589 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex);
1590 }
1591 EXPORT_SYMBOL_GPL(srcu_barrier);
1592
1593 /**
1594 * srcu_batches_completed - return batches completed.
1595 * @ssp: srcu_struct on which to report batch completion.
1596 *
1597 * Report the number of batches, correlated with, but not necessarily
1598 * precisely the same as, the number of grace periods that have elapsed.
1599 */
srcu_batches_completed(struct srcu_struct * ssp)1600 unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1601 {
1602 return READ_ONCE(ssp->srcu_idx);
1603 }
1604 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1605
1606 /*
1607 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1608 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1609 * completed in that state.
1610 */
srcu_advance_state(struct srcu_struct * ssp)1611 static void srcu_advance_state(struct srcu_struct *ssp)
1612 {
1613 int idx;
1614
1615 mutex_lock(&ssp->srcu_sup->srcu_gp_mutex);
1616
1617 /*
1618 * Because readers might be delayed for an extended period after
1619 * fetching ->srcu_idx for their index, at any point in time there
1620 * might well be readers using both idx=0 and idx=1. We therefore
1621 * need to wait for readers to clear from both index values before
1622 * invoking a callback.
1623 *
1624 * The load-acquire ensures that we see the accesses performed
1625 * by the prior grace period.
1626 */
1627 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq)); /* ^^^ */
1628 if (idx == SRCU_STATE_IDLE) {
1629 spin_lock_irq_rcu_node(ssp->srcu_sup);
1630 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) {
1631 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq));
1632 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1633 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1634 return;
1635 }
1636 idx = rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq));
1637 if (idx == SRCU_STATE_IDLE)
1638 srcu_gp_start(ssp);
1639 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1640 if (idx != SRCU_STATE_IDLE) {
1641 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1642 return; /* Someone else started the grace period. */
1643 }
1644 }
1645
1646 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1647 idx = 1 ^ (ssp->srcu_idx & 1);
1648 if (!try_check_zero(ssp, idx, 1)) {
1649 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1650 return; /* readers present, retry later. */
1651 }
1652 srcu_flip(ssp);
1653 spin_lock_irq_rcu_node(ssp->srcu_sup);
1654 rcu_seq_set_state(&ssp->srcu_sup->srcu_gp_seq, SRCU_STATE_SCAN2);
1655 ssp->srcu_sup->srcu_n_exp_nodelay = 0;
1656 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1657 }
1658
1659 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1660
1661 /*
1662 * SRCU read-side critical sections are normally short,
1663 * so check at least twice in quick succession after a flip.
1664 */
1665 idx = 1 ^ (ssp->srcu_idx & 1);
1666 if (!try_check_zero(ssp, idx, 2)) {
1667 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1668 return; /* readers present, retry later. */
1669 }
1670 ssp->srcu_sup->srcu_n_exp_nodelay = 0;
1671 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
1672 }
1673 }
1674
1675 /*
1676 * Invoke a limited number of SRCU callbacks that have passed through
1677 * their grace period. If there are more to do, SRCU will reschedule
1678 * the workqueue. Note that needed memory barriers have been executed
1679 * in this task's context by srcu_readers_active_idx_check().
1680 */
srcu_invoke_callbacks(struct work_struct * work)1681 static void srcu_invoke_callbacks(struct work_struct *work)
1682 {
1683 long len;
1684 bool more;
1685 struct rcu_cblist ready_cbs;
1686 struct rcu_head *rhp;
1687 struct srcu_data *sdp;
1688 struct srcu_struct *ssp;
1689
1690 sdp = container_of(work, struct srcu_data, work);
1691
1692 ssp = sdp->ssp;
1693 rcu_cblist_init(&ready_cbs);
1694 spin_lock_irq_rcu_node(sdp);
1695 rcu_segcblist_advance(&sdp->srcu_cblist,
1696 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
1697 if (sdp->srcu_cblist_invoking ||
1698 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1699 spin_unlock_irq_rcu_node(sdp);
1700 return; /* Someone else on the job or nothing to do. */
1701 }
1702
1703 /* We are on the job! Extract and invoke ready callbacks. */
1704 sdp->srcu_cblist_invoking = true;
1705 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1706 len = ready_cbs.len;
1707 spin_unlock_irq_rcu_node(sdp);
1708 rhp = rcu_cblist_dequeue(&ready_cbs);
1709 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1710 debug_rcu_head_unqueue(rhp);
1711 local_bh_disable();
1712 rhp->func(rhp);
1713 local_bh_enable();
1714 }
1715 WARN_ON_ONCE(ready_cbs.len);
1716
1717 /*
1718 * Update counts, accelerate new callbacks, and if needed,
1719 * schedule another round of callback invocation.
1720 */
1721 spin_lock_irq_rcu_node(sdp);
1722 rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
1723 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1724 rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
1725 sdp->srcu_cblist_invoking = false;
1726 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1727 spin_unlock_irq_rcu_node(sdp);
1728 if (more)
1729 srcu_schedule_cbs_sdp(sdp, 0);
1730 }
1731
1732 /*
1733 * Finished one round of SRCU grace period. Start another if there are
1734 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1735 */
srcu_reschedule(struct srcu_struct * ssp,unsigned long delay)1736 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1737 {
1738 bool pushgp = true;
1739
1740 spin_lock_irq_rcu_node(ssp->srcu_sup);
1741 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) {
1742 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq))) {
1743 /* All requests fulfilled, time to go idle. */
1744 pushgp = false;
1745 }
1746 } else if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq)) {
1747 /* Outstanding request and no GP. Start one. */
1748 srcu_gp_start(ssp);
1749 }
1750 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1751
1752 if (pushgp)
1753 queue_delayed_work(rcu_gp_wq, &ssp->srcu_sup->work, delay);
1754 }
1755
1756 /*
1757 * This is the work-queue function that handles SRCU grace periods.
1758 */
process_srcu(struct work_struct * work)1759 static void process_srcu(struct work_struct *work)
1760 {
1761 unsigned long curdelay;
1762 unsigned long j;
1763 struct srcu_struct *ssp;
1764 struct srcu_usage *sup;
1765
1766 sup = container_of(work, struct srcu_usage, work.work);
1767 ssp = sup->srcu_ssp;
1768
1769 srcu_advance_state(ssp);
1770 curdelay = srcu_get_delay(ssp);
1771 if (curdelay) {
1772 WRITE_ONCE(sup->reschedule_count, 0);
1773 } else {
1774 j = jiffies;
1775 if (READ_ONCE(sup->reschedule_jiffies) == j) {
1776 WRITE_ONCE(sup->reschedule_count, READ_ONCE(sup->reschedule_count) + 1);
1777 if (READ_ONCE(sup->reschedule_count) > srcu_max_nodelay)
1778 curdelay = 1;
1779 } else {
1780 WRITE_ONCE(sup->reschedule_count, 1);
1781 WRITE_ONCE(sup->reschedule_jiffies, j);
1782 }
1783 }
1784 srcu_reschedule(ssp, curdelay);
1785 }
1786
srcutorture_get_gp_data(enum rcutorture_type test_type,struct srcu_struct * ssp,int * flags,unsigned long * gp_seq)1787 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1788 struct srcu_struct *ssp, int *flags,
1789 unsigned long *gp_seq)
1790 {
1791 if (test_type != SRCU_FLAVOR)
1792 return;
1793 *flags = 0;
1794 *gp_seq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq);
1795 }
1796 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1797
1798 static const char * const srcu_size_state_name[] = {
1799 "SRCU_SIZE_SMALL",
1800 "SRCU_SIZE_ALLOC",
1801 "SRCU_SIZE_WAIT_BARRIER",
1802 "SRCU_SIZE_WAIT_CALL",
1803 "SRCU_SIZE_WAIT_CBS1",
1804 "SRCU_SIZE_WAIT_CBS2",
1805 "SRCU_SIZE_WAIT_CBS3",
1806 "SRCU_SIZE_WAIT_CBS4",
1807 "SRCU_SIZE_BIG",
1808 "SRCU_SIZE_???",
1809 };
1810
srcu_torture_stats_print(struct srcu_struct * ssp,char * tt,char * tf)1811 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1812 {
1813 int cpu;
1814 int idx;
1815 unsigned long s0 = 0, s1 = 0;
1816 int ss_state = READ_ONCE(ssp->srcu_sup->srcu_size_state);
1817 int ss_state_idx = ss_state;
1818
1819 idx = ssp->srcu_idx & 0x1;
1820 if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
1821 ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
1822 pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
1823 tt, tf, rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq), ss_state,
1824 srcu_size_state_name[ss_state_idx]);
1825 if (!ssp->sda) {
1826 // Called after cleanup_srcu_struct(), perhaps.
1827 pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
1828 } else {
1829 pr_cont(" per-CPU(idx=%d):", idx);
1830 for_each_possible_cpu(cpu) {
1831 unsigned long l0, l1;
1832 unsigned long u0, u1;
1833 long c0, c1;
1834 struct srcu_data *sdp;
1835
1836 sdp = per_cpu_ptr(ssp->sda, cpu);
1837 u0 = data_race(atomic_long_read(&sdp->srcu_unlock_count[!idx]));
1838 u1 = data_race(atomic_long_read(&sdp->srcu_unlock_count[idx]));
1839
1840 /*
1841 * Make sure that a lock is always counted if the corresponding
1842 * unlock is counted.
1843 */
1844 smp_rmb();
1845
1846 l0 = data_race(atomic_long_read(&sdp->srcu_lock_count[!idx]));
1847 l1 = data_race(atomic_long_read(&sdp->srcu_lock_count[idx]));
1848
1849 c0 = l0 - u0;
1850 c1 = l1 - u1;
1851 pr_cont(" %d(%ld,%ld %c)",
1852 cpu, c0, c1,
1853 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1854 s0 += c0;
1855 s1 += c1;
1856 }
1857 pr_cont(" T(%ld,%ld)\n", s0, s1);
1858 }
1859 if (SRCU_SIZING_IS_TORTURE())
1860 srcu_transition_to_big(ssp);
1861 }
1862 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1863
srcu_bootup_announce(void)1864 static int __init srcu_bootup_announce(void)
1865 {
1866 pr_info("Hierarchical SRCU implementation.\n");
1867 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1868 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1869 if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
1870 pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
1871 if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
1872 pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
1873 pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
1874 return 0;
1875 }
1876 early_initcall(srcu_bootup_announce);
1877
srcu_init(void)1878 void __init srcu_init(void)
1879 {
1880 struct srcu_usage *sup;
1881
1882 /* Decide on srcu_struct-size strategy. */
1883 if (SRCU_SIZING_IS(SRCU_SIZING_AUTO)) {
1884 if (nr_cpu_ids >= big_cpu_lim) {
1885 convert_to_big = SRCU_SIZING_INIT; // Don't bother waiting for contention.
1886 pr_info("%s: Setting srcu_struct sizes to big.\n", __func__);
1887 } else {
1888 convert_to_big = SRCU_SIZING_NONE | SRCU_SIZING_CONTEND;
1889 pr_info("%s: Setting srcu_struct sizes based on contention.\n", __func__);
1890 }
1891 }
1892
1893 /*
1894 * Once that is set, call_srcu() can follow the normal path and
1895 * queue delayed work. This must follow RCU workqueues creation
1896 * and timers initialization.
1897 */
1898 srcu_init_done = true;
1899 while (!list_empty(&srcu_boot_list)) {
1900 sup = list_first_entry(&srcu_boot_list, struct srcu_usage,
1901 work.work.entry);
1902 list_del_init(&sup->work.work.entry);
1903 if (SRCU_SIZING_IS(SRCU_SIZING_INIT) &&
1904 sup->srcu_size_state == SRCU_SIZE_SMALL)
1905 sup->srcu_size_state = SRCU_SIZE_ALLOC;
1906 queue_work(rcu_gp_wq, &sup->work.work);
1907 }
1908 }
1909
1910 #ifdef CONFIG_MODULES
1911
1912 /* Initialize any global-scope srcu_struct structures used by this module. */
srcu_module_coming(struct module * mod)1913 static int srcu_module_coming(struct module *mod)
1914 {
1915 int i;
1916 struct srcu_struct *ssp;
1917 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1918
1919 for (i = 0; i < mod->num_srcu_structs; i++) {
1920 ssp = *(sspp++);
1921 ssp->sda = alloc_percpu(struct srcu_data);
1922 if (WARN_ON_ONCE(!ssp->sda))
1923 return -ENOMEM;
1924 }
1925 return 0;
1926 }
1927
1928 /* Clean up any global-scope srcu_struct structures used by this module. */
srcu_module_going(struct module * mod)1929 static void srcu_module_going(struct module *mod)
1930 {
1931 int i;
1932 struct srcu_struct *ssp;
1933 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1934
1935 for (i = 0; i < mod->num_srcu_structs; i++) {
1936 ssp = *(sspp++);
1937 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed)) &&
1938 !WARN_ON_ONCE(!ssp->srcu_sup->sda_is_static))
1939 cleanup_srcu_struct(ssp);
1940 if (!WARN_ON(srcu_readers_active(ssp)))
1941 free_percpu(ssp->sda);
1942 }
1943 }
1944
1945 /* Handle one module, either coming or going. */
srcu_module_notify(struct notifier_block * self,unsigned long val,void * data)1946 static int srcu_module_notify(struct notifier_block *self,
1947 unsigned long val, void *data)
1948 {
1949 struct module *mod = data;
1950 int ret = 0;
1951
1952 switch (val) {
1953 case MODULE_STATE_COMING:
1954 ret = srcu_module_coming(mod);
1955 break;
1956 case MODULE_STATE_GOING:
1957 srcu_module_going(mod);
1958 break;
1959 default:
1960 break;
1961 }
1962 return ret;
1963 }
1964
1965 static struct notifier_block srcu_module_nb = {
1966 .notifier_call = srcu_module_notify,
1967 .priority = 0,
1968 };
1969
init_srcu_module_notifier(void)1970 static __init int init_srcu_module_notifier(void)
1971 {
1972 int ret;
1973
1974 ret = register_module_notifier(&srcu_module_nb);
1975 if (ret)
1976 pr_warn("Failed to register srcu module notifier\n");
1977 return ret;
1978 }
1979 late_initcall(init_srcu_module_notifier);
1980
1981 #endif /* #ifdef CONFIG_MODULES */
1982