1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-core.h"
9 #include "dm-rq.h"
10 
11 #include <linux/blk-mq.h>
12 
13 #define DM_MSG_PREFIX "core-rq"
14 
15 /*
16  * One of these is allocated per request.
17  */
18 struct dm_rq_target_io {
19 	struct mapped_device *md;
20 	struct dm_target *ti;
21 	struct request *orig, *clone;
22 	struct kthread_work work;
23 	blk_status_t error;
24 	union map_info info;
25 	struct dm_stats_aux stats_aux;
26 	unsigned long duration_jiffies;
27 	unsigned int n_sectors;
28 	unsigned int completed;
29 };
30 
31 #define DM_MQ_NR_HW_QUEUES 1
32 #define DM_MQ_QUEUE_DEPTH 2048
33 static unsigned int dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
34 static unsigned int dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
35 
36 /*
37  * Request-based DM's mempools' reserved IOs set by the user.
38  */
39 #define RESERVED_REQUEST_BASED_IOS	256
40 static unsigned int reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
41 
dm_get_reserved_rq_based_ios(void)42 unsigned int dm_get_reserved_rq_based_ios(void)
43 {
44 	return __dm_get_module_param(&reserved_rq_based_ios,
45 				     RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
46 }
47 
dm_get_blk_mq_nr_hw_queues(void)48 static unsigned int dm_get_blk_mq_nr_hw_queues(void)
49 {
50 	return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
51 }
52 
dm_get_blk_mq_queue_depth(void)53 static unsigned int dm_get_blk_mq_queue_depth(void)
54 {
55 	return __dm_get_module_param(&dm_mq_queue_depth,
56 				     DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
57 }
58 
dm_request_based(struct mapped_device * md)59 int dm_request_based(struct mapped_device *md)
60 {
61 	return queue_is_mq(md->queue);
62 }
63 
dm_start_queue(struct request_queue * q)64 void dm_start_queue(struct request_queue *q)
65 {
66 	blk_mq_unquiesce_queue(q);
67 	blk_mq_kick_requeue_list(q);
68 }
69 
dm_stop_queue(struct request_queue * q)70 void dm_stop_queue(struct request_queue *q)
71 {
72 	blk_mq_quiesce_queue(q);
73 }
74 
75 /*
76  * Partial completion handling for request-based dm
77  */
end_clone_bio(struct bio * clone)78 static void end_clone_bio(struct bio *clone)
79 {
80 	struct dm_rq_clone_bio_info *info =
81 		container_of(clone, struct dm_rq_clone_bio_info, clone);
82 	struct dm_rq_target_io *tio = info->tio;
83 	unsigned int nr_bytes = info->orig->bi_iter.bi_size;
84 	blk_status_t error = clone->bi_status;
85 	bool is_last = !clone->bi_next;
86 
87 	bio_put(clone);
88 
89 	if (tio->error)
90 		/*
91 		 * An error has already been detected on the request.
92 		 * Once error occurred, just let clone->end_io() handle
93 		 * the remainder.
94 		 */
95 		return;
96 	else if (error) {
97 		/*
98 		 * Don't notice the error to the upper layer yet.
99 		 * The error handling decision is made by the target driver,
100 		 * when the request is completed.
101 		 */
102 		tio->error = error;
103 		goto exit;
104 	}
105 
106 	/*
107 	 * I/O for the bio successfully completed.
108 	 * Notice the data completion to the upper layer.
109 	 */
110 	tio->completed += nr_bytes;
111 
112 	/*
113 	 * Update the original request.
114 	 * Do not use blk_mq_end_request() here, because it may complete
115 	 * the original request before the clone, and break the ordering.
116 	 */
117 	if (is_last)
118  exit:
119 		blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
120 }
121 
tio_from_request(struct request * rq)122 static struct dm_rq_target_io *tio_from_request(struct request *rq)
123 {
124 	return blk_mq_rq_to_pdu(rq);
125 }
126 
rq_end_stats(struct mapped_device * md,struct request * orig)127 static void rq_end_stats(struct mapped_device *md, struct request *orig)
128 {
129 	if (unlikely(dm_stats_used(&md->stats))) {
130 		struct dm_rq_target_io *tio = tio_from_request(orig);
131 
132 		tio->duration_jiffies = jiffies - tio->duration_jiffies;
133 		dm_stats_account_io(&md->stats, rq_data_dir(orig),
134 				    blk_rq_pos(orig), tio->n_sectors, true,
135 				    tio->duration_jiffies, &tio->stats_aux);
136 	}
137 }
138 
139 /*
140  * Don't touch any member of the md after calling this function because
141  * the md may be freed in dm_put() at the end of this function.
142  * Or do dm_get() before calling this function and dm_put() later.
143  */
rq_completed(struct mapped_device * md)144 static void rq_completed(struct mapped_device *md)
145 {
146 	/*
147 	 * dm_put() must be at the end of this function. See the comment above
148 	 */
149 	dm_put(md);
150 }
151 
152 /*
153  * Complete the clone and the original request.
154  * Must be called without clone's queue lock held,
155  * see end_clone_request() for more details.
156  */
dm_end_request(struct request * clone,blk_status_t error)157 static void dm_end_request(struct request *clone, blk_status_t error)
158 {
159 	struct dm_rq_target_io *tio = clone->end_io_data;
160 	struct mapped_device *md = tio->md;
161 	struct request *rq = tio->orig;
162 
163 	blk_rq_unprep_clone(clone);
164 	tio->ti->type->release_clone_rq(clone, NULL);
165 
166 	rq_end_stats(md, rq);
167 	blk_mq_end_request(rq, error);
168 	rq_completed(md);
169 }
170 
__dm_mq_kick_requeue_list(struct request_queue * q,unsigned long msecs)171 static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
172 {
173 	blk_mq_delay_kick_requeue_list(q, msecs);
174 }
175 
dm_mq_kick_requeue_list(struct mapped_device * md)176 void dm_mq_kick_requeue_list(struct mapped_device *md)
177 {
178 	__dm_mq_kick_requeue_list(md->queue, 0);
179 }
180 EXPORT_SYMBOL(dm_mq_kick_requeue_list);
181 
dm_mq_delay_requeue_request(struct request * rq,unsigned long msecs)182 static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
183 {
184 	blk_mq_requeue_request(rq, false);
185 	__dm_mq_kick_requeue_list(rq->q, msecs);
186 }
187 
dm_requeue_original_request(struct dm_rq_target_io * tio,bool delay_requeue)188 static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
189 {
190 	struct mapped_device *md = tio->md;
191 	struct request *rq = tio->orig;
192 	unsigned long delay_ms = delay_requeue ? 100 : 0;
193 
194 	rq_end_stats(md, rq);
195 	if (tio->clone) {
196 		blk_rq_unprep_clone(tio->clone);
197 		tio->ti->type->release_clone_rq(tio->clone, NULL);
198 	}
199 
200 	dm_mq_delay_requeue_request(rq, delay_ms);
201 	rq_completed(md);
202 }
203 
dm_done(struct request * clone,blk_status_t error,bool mapped)204 static void dm_done(struct request *clone, blk_status_t error, bool mapped)
205 {
206 	int r = DM_ENDIO_DONE;
207 	struct dm_rq_target_io *tio = clone->end_io_data;
208 	dm_request_endio_fn rq_end_io = NULL;
209 
210 	if (tio->ti) {
211 		rq_end_io = tio->ti->type->rq_end_io;
212 
213 		if (mapped && rq_end_io)
214 			r = rq_end_io(tio->ti, clone, error, &tio->info);
215 	}
216 
217 	if (unlikely(error == BLK_STS_TARGET)) {
218 		if (req_op(clone) == REQ_OP_DISCARD &&
219 		    !clone->q->limits.max_discard_sectors)
220 			disable_discard(tio->md);
221 		else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
222 			 !clone->q->limits.max_write_zeroes_sectors)
223 			disable_write_zeroes(tio->md);
224 	}
225 
226 	switch (r) {
227 	case DM_ENDIO_DONE:
228 		/* The target wants to complete the I/O */
229 		dm_end_request(clone, error);
230 		break;
231 	case DM_ENDIO_INCOMPLETE:
232 		/* The target will handle the I/O */
233 		return;
234 	case DM_ENDIO_REQUEUE:
235 		/* The target wants to requeue the I/O */
236 		dm_requeue_original_request(tio, false);
237 		break;
238 	case DM_ENDIO_DELAY_REQUEUE:
239 		/* The target wants to requeue the I/O after a delay */
240 		dm_requeue_original_request(tio, true);
241 		break;
242 	default:
243 		DMCRIT("unimplemented target endio return value: %d", r);
244 		BUG();
245 	}
246 }
247 
248 /*
249  * Request completion handler for request-based dm
250  */
dm_softirq_done(struct request * rq)251 static void dm_softirq_done(struct request *rq)
252 {
253 	bool mapped = true;
254 	struct dm_rq_target_io *tio = tio_from_request(rq);
255 	struct request *clone = tio->clone;
256 
257 	if (!clone) {
258 		struct mapped_device *md = tio->md;
259 
260 		rq_end_stats(md, rq);
261 		blk_mq_end_request(rq, tio->error);
262 		rq_completed(md);
263 		return;
264 	}
265 
266 	if (rq->rq_flags & RQF_FAILED)
267 		mapped = false;
268 
269 	dm_done(clone, tio->error, mapped);
270 }
271 
272 /*
273  * Complete the clone and the original request with the error status
274  * through softirq context.
275  */
dm_complete_request(struct request * rq,blk_status_t error)276 static void dm_complete_request(struct request *rq, blk_status_t error)
277 {
278 	struct dm_rq_target_io *tio = tio_from_request(rq);
279 
280 	tio->error = error;
281 	if (likely(!blk_should_fake_timeout(rq->q)))
282 		blk_mq_complete_request(rq);
283 }
284 
285 /*
286  * Complete the not-mapped clone and the original request with the error status
287  * through softirq context.
288  * Target's rq_end_io() function isn't called.
289  * This may be used when the target's clone_and_map_rq() function fails.
290  */
dm_kill_unmapped_request(struct request * rq,blk_status_t error)291 static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
292 {
293 	rq->rq_flags |= RQF_FAILED;
294 	dm_complete_request(rq, error);
295 }
296 
end_clone_request(struct request * clone,blk_status_t error)297 static enum rq_end_io_ret end_clone_request(struct request *clone,
298 					    blk_status_t error)
299 {
300 	struct dm_rq_target_io *tio = clone->end_io_data;
301 
302 	dm_complete_request(tio->orig, error);
303 	return RQ_END_IO_NONE;
304 }
305 
dm_rq_bio_constructor(struct bio * bio,struct bio * bio_orig,void * data)306 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
307 				 void *data)
308 {
309 	struct dm_rq_target_io *tio = data;
310 	struct dm_rq_clone_bio_info *info =
311 		container_of(bio, struct dm_rq_clone_bio_info, clone);
312 
313 	info->orig = bio_orig;
314 	info->tio = tio;
315 	bio->bi_end_io = end_clone_bio;
316 
317 	return 0;
318 }
319 
setup_clone(struct request * clone,struct request * rq,struct dm_rq_target_io * tio,gfp_t gfp_mask)320 static int setup_clone(struct request *clone, struct request *rq,
321 		       struct dm_rq_target_io *tio, gfp_t gfp_mask)
322 {
323 	int r;
324 
325 	r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
326 			      dm_rq_bio_constructor, tio);
327 	if (r)
328 		return r;
329 
330 	clone->end_io = end_clone_request;
331 	clone->end_io_data = tio;
332 
333 	tio->clone = clone;
334 
335 	return 0;
336 }
337 
init_tio(struct dm_rq_target_io * tio,struct request * rq,struct mapped_device * md)338 static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
339 		     struct mapped_device *md)
340 {
341 	tio->md = md;
342 	tio->ti = NULL;
343 	tio->clone = NULL;
344 	tio->orig = rq;
345 	tio->error = 0;
346 	tio->completed = 0;
347 	/*
348 	 * Avoid initializing info for blk-mq; it passes
349 	 * target-specific data through info.ptr
350 	 * (see: dm_mq_init_request)
351 	 */
352 	if (!md->init_tio_pdu)
353 		memset(&tio->info, 0, sizeof(tio->info));
354 }
355 
356 /*
357  * Returns:
358  * DM_MAPIO_*       : the request has been processed as indicated
359  * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
360  * < 0              : the request was completed due to failure
361  */
map_request(struct dm_rq_target_io * tio)362 static int map_request(struct dm_rq_target_io *tio)
363 {
364 	int r;
365 	struct dm_target *ti = tio->ti;
366 	struct mapped_device *md = tio->md;
367 	struct request *rq = tio->orig;
368 	struct request *clone = NULL;
369 	blk_status_t ret;
370 
371 	r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
372 	switch (r) {
373 	case DM_MAPIO_SUBMITTED:
374 		/* The target has taken the I/O to submit by itself later */
375 		break;
376 	case DM_MAPIO_REMAPPED:
377 		if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
378 			/* -ENOMEM */
379 			ti->type->release_clone_rq(clone, &tio->info);
380 			return DM_MAPIO_REQUEUE;
381 		}
382 
383 		/* The target has remapped the I/O so dispatch it */
384 		trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
385 				     blk_rq_pos(rq));
386 		ret = blk_insert_cloned_request(clone);
387 		switch (ret) {
388 		case BLK_STS_OK:
389 			break;
390 		case BLK_STS_RESOURCE:
391 		case BLK_STS_DEV_RESOURCE:
392 			blk_rq_unprep_clone(clone);
393 			blk_mq_cleanup_rq(clone);
394 			tio->ti->type->release_clone_rq(clone, &tio->info);
395 			tio->clone = NULL;
396 			return DM_MAPIO_REQUEUE;
397 		default:
398 			/* must complete clone in terms of original request */
399 			dm_complete_request(rq, ret);
400 		}
401 		break;
402 	case DM_MAPIO_REQUEUE:
403 		/* The target wants to requeue the I/O */
404 		break;
405 	case DM_MAPIO_DELAY_REQUEUE:
406 		/* The target wants to requeue the I/O after a delay */
407 		dm_requeue_original_request(tio, true);
408 		break;
409 	case DM_MAPIO_KILL:
410 		/* The target wants to complete the I/O */
411 		dm_kill_unmapped_request(rq, BLK_STS_IOERR);
412 		break;
413 	default:
414 		DMCRIT("unimplemented target map return value: %d", r);
415 		BUG();
416 	}
417 
418 	return r;
419 }
420 
421 /* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device * md,char * buf)422 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
423 {
424 	return sprintf(buf, "%u\n", 0);
425 }
426 
dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device * md,const char * buf,size_t count)427 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
428 						     const char *buf, size_t count)
429 {
430 	return count;
431 }
432 
dm_start_request(struct mapped_device * md,struct request * orig)433 static void dm_start_request(struct mapped_device *md, struct request *orig)
434 {
435 	blk_mq_start_request(orig);
436 
437 	if (unlikely(dm_stats_used(&md->stats))) {
438 		struct dm_rq_target_io *tio = tio_from_request(orig);
439 
440 		tio->duration_jiffies = jiffies;
441 		tio->n_sectors = blk_rq_sectors(orig);
442 		dm_stats_account_io(&md->stats, rq_data_dir(orig),
443 				    blk_rq_pos(orig), tio->n_sectors, false, 0,
444 				    &tio->stats_aux);
445 	}
446 
447 	/*
448 	 * Hold the md reference here for the in-flight I/O.
449 	 * We can't rely on the reference count by device opener,
450 	 * because the device may be closed during the request completion
451 	 * when all bios are completed.
452 	 * See the comment in rq_completed() too.
453 	 */
454 	dm_get(md);
455 }
456 
dm_mq_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,unsigned int numa_node)457 static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
458 			      unsigned int hctx_idx, unsigned int numa_node)
459 {
460 	struct mapped_device *md = set->driver_data;
461 	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
462 
463 	/*
464 	 * Must initialize md member of tio, otherwise it won't
465 	 * be available in dm_mq_queue_rq.
466 	 */
467 	tio->md = md;
468 
469 	if (md->init_tio_pdu) {
470 		/* target-specific per-io data is immediately after the tio */
471 		tio->info.ptr = tio + 1;
472 	}
473 
474 	return 0;
475 }
476 
dm_mq_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)477 static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
478 			  const struct blk_mq_queue_data *bd)
479 {
480 	struct request *rq = bd->rq;
481 	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
482 	struct mapped_device *md = tio->md;
483 	struct dm_target *ti = md->immutable_target;
484 
485 	/*
486 	 * blk-mq's unquiesce may come from outside events, such as
487 	 * elevator switch, updating nr_requests or others, and request may
488 	 * come during suspend, so simply ask for blk-mq to requeue it.
489 	 */
490 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)))
491 		return BLK_STS_RESOURCE;
492 
493 	if (unlikely(!ti)) {
494 		int srcu_idx;
495 		struct dm_table *map;
496 
497 		map = dm_get_live_table(md, &srcu_idx);
498 		if (unlikely(!map)) {
499 			dm_put_live_table(md, srcu_idx);
500 			return BLK_STS_RESOURCE;
501 		}
502 		ti = dm_table_find_target(map, 0);
503 		dm_put_live_table(md, srcu_idx);
504 	}
505 
506 	if (ti->type->busy && ti->type->busy(ti))
507 		return BLK_STS_RESOURCE;
508 
509 	dm_start_request(md, rq);
510 
511 	/* Init tio using md established in .init_request */
512 	init_tio(tio, rq, md);
513 
514 	/*
515 	 * Establish tio->ti before calling map_request().
516 	 */
517 	tio->ti = ti;
518 
519 	/* Direct call is fine since .queue_rq allows allocations */
520 	if (map_request(tio) == DM_MAPIO_REQUEUE) {
521 		/* Undo dm_start_request() before requeuing */
522 		rq_end_stats(md, rq);
523 		rq_completed(md);
524 		return BLK_STS_RESOURCE;
525 	}
526 
527 	return BLK_STS_OK;
528 }
529 
530 static const struct blk_mq_ops dm_mq_ops = {
531 	.queue_rq = dm_mq_queue_rq,
532 	.complete = dm_softirq_done,
533 	.init_request = dm_mq_init_request,
534 };
535 
dm_mq_init_request_queue(struct mapped_device * md,struct dm_table * t)536 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
537 {
538 	struct dm_target *immutable_tgt;
539 	int err;
540 
541 	md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
542 	if (!md->tag_set)
543 		return -ENOMEM;
544 
545 	md->tag_set->ops = &dm_mq_ops;
546 	md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
547 	md->tag_set->numa_node = md->numa_node_id;
548 	md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
549 	md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
550 	md->tag_set->driver_data = md;
551 
552 	md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
553 	immutable_tgt = dm_table_get_immutable_target(t);
554 	if (immutable_tgt && immutable_tgt->per_io_data_size) {
555 		/* any target-specific per-io data is immediately after the tio */
556 		md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
557 		md->init_tio_pdu = true;
558 	}
559 
560 	err = blk_mq_alloc_tag_set(md->tag_set);
561 	if (err)
562 		goto out_kfree_tag_set;
563 
564 	err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
565 	if (err)
566 		goto out_tag_set;
567 	return 0;
568 
569 out_tag_set:
570 	blk_mq_free_tag_set(md->tag_set);
571 out_kfree_tag_set:
572 	kfree(md->tag_set);
573 	md->tag_set = NULL;
574 
575 	return err;
576 }
577 
dm_mq_cleanup_mapped_device(struct mapped_device * md)578 void dm_mq_cleanup_mapped_device(struct mapped_device *md)
579 {
580 	if (md->tag_set) {
581 		blk_mq_free_tag_set(md->tag_set);
582 		kfree(md->tag_set);
583 		md->tag_set = NULL;
584 	}
585 }
586 
587 module_param(reserved_rq_based_ios, uint, 0644);
588 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
589 
590 /* Unused, but preserved for userspace compatibility */
591 static bool use_blk_mq = true;
592 module_param(use_blk_mq, bool, 0644);
593 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
594 
595 module_param(dm_mq_nr_hw_queues, uint, 0644);
596 MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
597 
598 module_param(dm_mq_queue_depth, uint, 0644);
599 MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
600