1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Sleepable Read-Copy Update mechanism for mutual exclusion,
4 * tiny version for non-preemptible single-CPU use.
5 *
6 * Copyright (C) IBM Corporation, 2017
7 *
8 * Author: Paul McKenney <paulmck@linux.ibm.com>
9 */
10
11 #include <linux/export.h>
12 #include <linux/mutex.h>
13 #include <linux/preempt.h>
14 #include <linux/rcupdate_wait.h>
15 #include <linux/sched.h>
16 #include <linux/delay.h>
17 #include <linux/srcu.h>
18
19 #include <linux/rcu_node_tree.h>
20 #include "rcu_segcblist.h"
21 #include "rcu.h"
22
23 int rcu_scheduler_active __read_mostly;
24 static LIST_HEAD(srcu_boot_list);
25 static bool srcu_init_done;
26
init_srcu_struct_fields(struct srcu_struct * ssp)27 static int init_srcu_struct_fields(struct srcu_struct *ssp)
28 {
29 ssp->srcu_lock_nesting[0] = 0;
30 ssp->srcu_lock_nesting[1] = 0;
31 init_swait_queue_head(&ssp->srcu_wq);
32 ssp->srcu_cb_head = NULL;
33 ssp->srcu_cb_tail = &ssp->srcu_cb_head;
34 ssp->srcu_gp_running = false;
35 ssp->srcu_gp_waiting = false;
36 ssp->srcu_idx = 0;
37 ssp->srcu_idx_max = 0;
38 INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
39 INIT_LIST_HEAD(&ssp->srcu_work.entry);
40 return 0;
41 }
42
43 #ifdef CONFIG_DEBUG_LOCK_ALLOC
44
__init_srcu_struct(struct srcu_struct * ssp,const char * name,struct lock_class_key * key)45 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
46 struct lock_class_key *key)
47 {
48 /* Don't re-initialize a lock while it is held. */
49 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
50 lockdep_init_map(&ssp->dep_map, name, key, 0);
51 return init_srcu_struct_fields(ssp);
52 }
53 EXPORT_SYMBOL_GPL(__init_srcu_struct);
54
55 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
56
57 /*
58 * init_srcu_struct - initialize a sleep-RCU structure
59 * @ssp: structure to initialize.
60 *
61 * Must invoke this on a given srcu_struct before passing that srcu_struct
62 * to any other function. Each srcu_struct represents a separate domain
63 * of SRCU protection.
64 */
init_srcu_struct(struct srcu_struct * ssp)65 int init_srcu_struct(struct srcu_struct *ssp)
66 {
67 return init_srcu_struct_fields(ssp);
68 }
69 EXPORT_SYMBOL_GPL(init_srcu_struct);
70
71 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
72
73 /*
74 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
75 * @ssp: structure to clean up.
76 *
77 * Must invoke this after you are finished using a given srcu_struct that
78 * was initialized via init_srcu_struct(), else you leak memory.
79 */
cleanup_srcu_struct(struct srcu_struct * ssp)80 void cleanup_srcu_struct(struct srcu_struct *ssp)
81 {
82 WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
83 flush_work(&ssp->srcu_work);
84 WARN_ON(ssp->srcu_gp_running);
85 WARN_ON(ssp->srcu_gp_waiting);
86 WARN_ON(ssp->srcu_cb_head);
87 WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
88 WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max);
89 WARN_ON(ssp->srcu_idx & 0x1);
90 }
91 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
92
93 /*
94 * Removes the count for the old reader from the appropriate element of
95 * the srcu_struct.
96 */
__srcu_read_unlock(struct srcu_struct * ssp,int idx)97 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
98 {
99 int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1;
100
101 WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
102 if (!newval && READ_ONCE(ssp->srcu_gp_waiting))
103 swake_up_one(&ssp->srcu_wq);
104 }
105 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
106
107 /*
108 * Workqueue handler to drive one grace period and invoke any callbacks
109 * that become ready as a result. Single-CPU and !PREEMPTION operation
110 * means that we get away with murder on synchronization. ;-)
111 */
srcu_drive_gp(struct work_struct * wp)112 void srcu_drive_gp(struct work_struct *wp)
113 {
114 int idx;
115 struct rcu_head *lh;
116 struct rcu_head *rhp;
117 struct srcu_struct *ssp;
118
119 ssp = container_of(wp, struct srcu_struct, srcu_work);
120 if (ssp->srcu_gp_running || USHORT_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
121 return; /* Already running or nothing to do. */
122
123 /* Remove recently arrived callbacks and wait for readers. */
124 WRITE_ONCE(ssp->srcu_gp_running, true);
125 local_irq_disable();
126 lh = ssp->srcu_cb_head;
127 ssp->srcu_cb_head = NULL;
128 ssp->srcu_cb_tail = &ssp->srcu_cb_head;
129 local_irq_enable();
130 idx = (ssp->srcu_idx & 0x2) / 2;
131 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
132 WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
133 swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
134 WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
135 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
136
137 /* Invoke the callbacks we removed above. */
138 while (lh) {
139 rhp = lh;
140 lh = lh->next;
141 local_bh_disable();
142 rhp->func(rhp);
143 local_bh_enable();
144 }
145
146 /*
147 * Enable rescheduling, and if there are more callbacks,
148 * reschedule ourselves. This can race with a call_srcu()
149 * at interrupt level, but the ->srcu_gp_running checks will
150 * straighten that out.
151 */
152 WRITE_ONCE(ssp->srcu_gp_running, false);
153 if (USHORT_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
154 schedule_work(&ssp->srcu_work);
155 }
156 EXPORT_SYMBOL_GPL(srcu_drive_gp);
157
srcu_gp_start_if_needed(struct srcu_struct * ssp)158 static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
159 {
160 unsigned short cookie;
161
162 cookie = get_state_synchronize_srcu(ssp);
163 if (USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie))
164 return;
165 WRITE_ONCE(ssp->srcu_idx_max, cookie);
166 if (!READ_ONCE(ssp->srcu_gp_running)) {
167 if (likely(srcu_init_done))
168 schedule_work(&ssp->srcu_work);
169 else if (list_empty(&ssp->srcu_work.entry))
170 list_add(&ssp->srcu_work.entry, &srcu_boot_list);
171 }
172 }
173
174 /*
175 * Enqueue an SRCU callback on the specified srcu_struct structure,
176 * initiating grace-period processing if it is not already running.
177 */
call_srcu(struct srcu_struct * ssp,struct rcu_head * rhp,rcu_callback_t func)178 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
179 rcu_callback_t func)
180 {
181 unsigned long flags;
182
183 rhp->func = func;
184 rhp->next = NULL;
185 local_irq_save(flags);
186 *ssp->srcu_cb_tail = rhp;
187 ssp->srcu_cb_tail = &rhp->next;
188 local_irq_restore(flags);
189 srcu_gp_start_if_needed(ssp);
190 }
191 EXPORT_SYMBOL_GPL(call_srcu);
192
193 /*
194 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
195 */
synchronize_srcu(struct srcu_struct * ssp)196 void synchronize_srcu(struct srcu_struct *ssp)
197 {
198 struct rcu_synchronize rs;
199
200 init_rcu_head_on_stack(&rs.head);
201 init_completion(&rs.completion);
202 call_srcu(ssp, &rs.head, wakeme_after_rcu);
203 wait_for_completion(&rs.completion);
204 destroy_rcu_head_on_stack(&rs.head);
205 }
206 EXPORT_SYMBOL_GPL(synchronize_srcu);
207
208 /*
209 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
210 */
get_state_synchronize_srcu(struct srcu_struct * ssp)211 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
212 {
213 unsigned long ret;
214
215 barrier();
216 ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1;
217 barrier();
218 return ret & USHRT_MAX;
219 }
220 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
221
222 /*
223 * start_poll_synchronize_srcu - Provide cookie and start grace period
224 *
225 * The difference between this and get_state_synchronize_srcu() is that
226 * this function ensures that the poll_state_synchronize_srcu() will
227 * eventually return the value true.
228 */
start_poll_synchronize_srcu(struct srcu_struct * ssp)229 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
230 {
231 unsigned long ret = get_state_synchronize_srcu(ssp);
232
233 srcu_gp_start_if_needed(ssp);
234 return ret;
235 }
236 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
237
238 /*
239 * poll_state_synchronize_srcu - Has cookie's grace period ended?
240 */
poll_state_synchronize_srcu(struct srcu_struct * ssp,unsigned long cookie)241 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
242 {
243 bool ret = USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx), cookie);
244
245 barrier();
246 return ret;
247 }
248 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
249
250 /* Lockdep diagnostics. */
rcu_scheduler_starting(void)251 void __init rcu_scheduler_starting(void)
252 {
253 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
254 }
255
256 /*
257 * Queue work for srcu_struct structures with early boot callbacks.
258 * The work won't actually execute until the workqueue initialization
259 * phase that takes place after the scheduler starts.
260 */
srcu_init(void)261 void __init srcu_init(void)
262 {
263 struct srcu_struct *ssp;
264
265 srcu_init_done = true;
266 while (!list_empty(&srcu_boot_list)) {
267 ssp = list_first_entry(&srcu_boot_list,
268 struct srcu_struct, srcu_work.entry);
269 list_del_init(&ssp->srcu_work.entry);
270 schedule_work(&ssp->srcu_work);
271 }
272 }
273