1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to io context handling
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/slab.h>
11 #include <linux/sched/task.h>
12
13 #include "blk.h"
14
15 /*
16 * For io context allocations
17 */
18 static struct kmem_cache *iocontext_cachep;
19
20 /**
21 * get_io_context - increment reference count to io_context
22 * @ioc: io_context to get
23 *
24 * Increment reference count to @ioc.
25 */
get_io_context(struct io_context * ioc)26 void get_io_context(struct io_context *ioc)
27 {
28 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
29 atomic_long_inc(&ioc->refcount);
30 }
31
icq_free_icq_rcu(struct rcu_head * head)32 static void icq_free_icq_rcu(struct rcu_head *head)
33 {
34 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
35
36 kmem_cache_free(icq->__rcu_icq_cache, icq);
37 }
38
39 /*
40 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
41 * and queue locked for legacy.
42 */
ioc_exit_icq(struct io_cq * icq)43 static void ioc_exit_icq(struct io_cq *icq)
44 {
45 struct elevator_type *et = icq->q->elevator->type;
46
47 if (icq->flags & ICQ_EXITED)
48 return;
49
50 if (et->ops.exit_icq)
51 et->ops.exit_icq(icq);
52
53 icq->flags |= ICQ_EXITED;
54 }
55
56 /*
57 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
58 * and queue locked for legacy.
59 */
ioc_destroy_icq(struct io_cq * icq)60 static void ioc_destroy_icq(struct io_cq *icq)
61 {
62 struct io_context *ioc = icq->ioc;
63 struct request_queue *q = icq->q;
64 struct elevator_type *et = q->elevator->type;
65
66 lockdep_assert_held(&ioc->lock);
67
68 radix_tree_delete(&ioc->icq_tree, icq->q->id);
69 hlist_del_init(&icq->ioc_node);
70 list_del_init(&icq->q_node);
71
72 /*
73 * Both setting lookup hint to and clearing it from @icq are done
74 * under queue_lock. If it's not pointing to @icq now, it never
75 * will. Hint assignment itself can race safely.
76 */
77 if (rcu_access_pointer(ioc->icq_hint) == icq)
78 rcu_assign_pointer(ioc->icq_hint, NULL);
79
80 ioc_exit_icq(icq);
81
82 /*
83 * @icq->q might have gone away by the time RCU callback runs
84 * making it impossible to determine icq_cache. Record it in @icq.
85 */
86 icq->__rcu_icq_cache = et->icq_cache;
87 icq->flags |= ICQ_DESTROYED;
88 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
89 }
90
91 /*
92 * Slow path for ioc release in put_io_context(). Performs double-lock
93 * dancing to unlink all icq's and then frees ioc.
94 */
ioc_release_fn(struct work_struct * work)95 static void ioc_release_fn(struct work_struct *work)
96 {
97 struct io_context *ioc = container_of(work, struct io_context,
98 release_work);
99 spin_lock_irq(&ioc->lock);
100
101 while (!hlist_empty(&ioc->icq_list)) {
102 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
103 struct io_cq, ioc_node);
104 struct request_queue *q = icq->q;
105
106 if (spin_trylock(&q->queue_lock)) {
107 ioc_destroy_icq(icq);
108 spin_unlock(&q->queue_lock);
109 } else {
110 /* Make sure q and icq cannot be freed. */
111 rcu_read_lock();
112
113 /* Re-acquire the locks in the correct order. */
114 spin_unlock(&ioc->lock);
115 spin_lock(&q->queue_lock);
116 spin_lock(&ioc->lock);
117
118 /*
119 * The icq may have been destroyed when the ioc lock
120 * was released.
121 */
122 if (!(icq->flags & ICQ_DESTROYED))
123 ioc_destroy_icq(icq);
124
125 spin_unlock(&q->queue_lock);
126 rcu_read_unlock();
127 }
128 }
129
130 spin_unlock_irq(&ioc->lock);
131
132 kmem_cache_free(iocontext_cachep, ioc);
133 }
134
135 /**
136 * put_io_context - put a reference of io_context
137 * @ioc: io_context to put
138 *
139 * Decrement reference count of @ioc and release it if the count reaches
140 * zero.
141 */
put_io_context(struct io_context * ioc)142 void put_io_context(struct io_context *ioc)
143 {
144 unsigned long flags;
145 bool free_ioc = false;
146
147 if (ioc == NULL)
148 return;
149
150 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
151
152 /*
153 * Releasing ioc requires reverse order double locking and we may
154 * already be holding a queue_lock. Do it asynchronously from wq.
155 */
156 if (atomic_long_dec_and_test(&ioc->refcount)) {
157 spin_lock_irqsave(&ioc->lock, flags);
158 if (!hlist_empty(&ioc->icq_list))
159 queue_work(system_power_efficient_wq,
160 &ioc->release_work);
161 else
162 free_ioc = true;
163 spin_unlock_irqrestore(&ioc->lock, flags);
164 }
165
166 if (free_ioc)
167 kmem_cache_free(iocontext_cachep, ioc);
168 }
169
170 /**
171 * put_io_context_active - put active reference on ioc
172 * @ioc: ioc of interest
173 *
174 * Undo get_io_context_active(). If active reference reaches zero after
175 * put, @ioc can never issue further IOs and ioscheds are notified.
176 */
put_io_context_active(struct io_context * ioc)177 void put_io_context_active(struct io_context *ioc)
178 {
179 struct io_cq *icq;
180
181 if (!atomic_dec_and_test(&ioc->active_ref)) {
182 put_io_context(ioc);
183 return;
184 }
185
186 spin_lock_irq(&ioc->lock);
187 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
188 if (icq->flags & ICQ_EXITED)
189 continue;
190
191 ioc_exit_icq(icq);
192 }
193 spin_unlock_irq(&ioc->lock);
194
195 put_io_context(ioc);
196 }
197
198 /* Called by the exiting task */
exit_io_context(struct task_struct * task)199 void exit_io_context(struct task_struct *task)
200 {
201 struct io_context *ioc;
202
203 task_lock(task);
204 ioc = task->io_context;
205 task->io_context = NULL;
206 task_unlock(task);
207
208 atomic_dec(&ioc->nr_tasks);
209 put_io_context_active(ioc);
210 }
211
__ioc_clear_queue(struct list_head * icq_list)212 static void __ioc_clear_queue(struct list_head *icq_list)
213 {
214 unsigned long flags;
215
216 rcu_read_lock();
217 while (!list_empty(icq_list)) {
218 struct io_cq *icq = list_entry(icq_list->next,
219 struct io_cq, q_node);
220 struct io_context *ioc = icq->ioc;
221
222 spin_lock_irqsave(&ioc->lock, flags);
223 if (icq->flags & ICQ_DESTROYED) {
224 spin_unlock_irqrestore(&ioc->lock, flags);
225 continue;
226 }
227 ioc_destroy_icq(icq);
228 spin_unlock_irqrestore(&ioc->lock, flags);
229 }
230 rcu_read_unlock();
231 }
232
233 /**
234 * ioc_clear_queue - break any ioc association with the specified queue
235 * @q: request_queue being cleared
236 *
237 * Walk @q->icq_list and exit all io_cq's.
238 */
ioc_clear_queue(struct request_queue * q)239 void ioc_clear_queue(struct request_queue *q)
240 {
241 LIST_HEAD(icq_list);
242
243 spin_lock_irq(&q->queue_lock);
244 list_splice_init(&q->icq_list, &icq_list);
245 spin_unlock_irq(&q->queue_lock);
246
247 __ioc_clear_queue(&icq_list);
248 }
249
create_task_io_context(struct task_struct * task,gfp_t gfp_flags,int node)250 int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
251 {
252 struct io_context *ioc;
253 int ret;
254
255 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
256 node);
257 if (unlikely(!ioc))
258 return -ENOMEM;
259
260 /* initialize */
261 atomic_long_set(&ioc->refcount, 1);
262 atomic_set(&ioc->nr_tasks, 1);
263 atomic_set(&ioc->active_ref, 1);
264 spin_lock_init(&ioc->lock);
265 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
266 INIT_HLIST_HEAD(&ioc->icq_list);
267 INIT_WORK(&ioc->release_work, ioc_release_fn);
268
269 /*
270 * Try to install. ioc shouldn't be installed if someone else
271 * already did or @task, which isn't %current, is exiting. Note
272 * that we need to allow ioc creation on exiting %current as exit
273 * path may issue IOs from e.g. exit_files(). The exit path is
274 * responsible for not issuing IO after exit_io_context().
275 */
276 task_lock(task);
277 if (!task->io_context &&
278 (task == current || !(task->flags & PF_EXITING)))
279 task->io_context = ioc;
280 else
281 kmem_cache_free(iocontext_cachep, ioc);
282
283 ret = task->io_context ? 0 : -EBUSY;
284
285 task_unlock(task);
286
287 return ret;
288 }
289
290 /**
291 * get_task_io_context - get io_context of a task
292 * @task: task of interest
293 * @gfp_flags: allocation flags, used if allocation is necessary
294 * @node: allocation node, used if allocation is necessary
295 *
296 * Return io_context of @task. If it doesn't exist, it is created with
297 * @gfp_flags and @node. The returned io_context has its reference count
298 * incremented.
299 *
300 * This function always goes through task_lock() and it's better to use
301 * %current->io_context + get_io_context() for %current.
302 */
get_task_io_context(struct task_struct * task,gfp_t gfp_flags,int node)303 struct io_context *get_task_io_context(struct task_struct *task,
304 gfp_t gfp_flags, int node)
305 {
306 struct io_context *ioc;
307
308 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
309
310 do {
311 task_lock(task);
312 ioc = task->io_context;
313 if (likely(ioc)) {
314 get_io_context(ioc);
315 task_unlock(task);
316 return ioc;
317 }
318 task_unlock(task);
319 } while (!create_task_io_context(task, gfp_flags, node));
320
321 return NULL;
322 }
323
324 /**
325 * ioc_lookup_icq - lookup io_cq from ioc
326 * @ioc: the associated io_context
327 * @q: the associated request_queue
328 *
329 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
330 * with @q->queue_lock held.
331 */
ioc_lookup_icq(struct io_context * ioc,struct request_queue * q)332 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
333 {
334 struct io_cq *icq;
335
336 lockdep_assert_held(&q->queue_lock);
337
338 /*
339 * icq's are indexed from @ioc using radix tree and hint pointer,
340 * both of which are protected with RCU. All removals are done
341 * holding both q and ioc locks, and we're holding q lock - if we
342 * find a icq which points to us, it's guaranteed to be valid.
343 */
344 rcu_read_lock();
345 icq = rcu_dereference(ioc->icq_hint);
346 if (icq && icq->q == q)
347 goto out;
348
349 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
350 if (icq && icq->q == q)
351 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
352 else
353 icq = NULL;
354 out:
355 rcu_read_unlock();
356 return icq;
357 }
358 EXPORT_SYMBOL(ioc_lookup_icq);
359
360 /**
361 * ioc_create_icq - create and link io_cq
362 * @ioc: io_context of interest
363 * @q: request_queue of interest
364 * @gfp_mask: allocation mask
365 *
366 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
367 * will be created using @gfp_mask.
368 *
369 * The caller is responsible for ensuring @ioc won't go away and @q is
370 * alive and will stay alive until this function returns.
371 */
ioc_create_icq(struct io_context * ioc,struct request_queue * q,gfp_t gfp_mask)372 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
373 gfp_t gfp_mask)
374 {
375 struct elevator_type *et = q->elevator->type;
376 struct io_cq *icq;
377
378 /* allocate stuff */
379 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
380 q->node);
381 if (!icq)
382 return NULL;
383
384 if (radix_tree_maybe_preload(gfp_mask) < 0) {
385 kmem_cache_free(et->icq_cache, icq);
386 return NULL;
387 }
388
389 icq->ioc = ioc;
390 icq->q = q;
391 INIT_LIST_HEAD(&icq->q_node);
392 INIT_HLIST_NODE(&icq->ioc_node);
393
394 /* lock both q and ioc and try to link @icq */
395 spin_lock_irq(&q->queue_lock);
396 spin_lock(&ioc->lock);
397
398 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
399 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
400 list_add(&icq->q_node, &q->icq_list);
401 if (et->ops.init_icq)
402 et->ops.init_icq(icq);
403 } else {
404 kmem_cache_free(et->icq_cache, icq);
405 icq = ioc_lookup_icq(ioc, q);
406 if (!icq)
407 printk(KERN_ERR "cfq: icq link failed!\n");
408 }
409
410 spin_unlock(&ioc->lock);
411 spin_unlock_irq(&q->queue_lock);
412 radix_tree_preload_end();
413 return icq;
414 }
415
blk_ioc_init(void)416 static int __init blk_ioc_init(void)
417 {
418 iocontext_cachep = kmem_cache_create("blkdev_ioc",
419 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
420 return 0;
421 }
422 subsys_initcall(blk_ioc_init);
423