Lines Matching full:md
18 struct mapped_device *md; member
58 int dm_request_based(struct mapped_device *md) in dm_request_based() argument
60 return queue_is_mq(md->queue); in dm_request_based()
126 static void rq_end_stats(struct mapped_device *md, struct request *orig) in rq_end_stats() argument
128 if (unlikely(dm_stats_used(&md->stats))) { in rq_end_stats()
131 dm_stats_account_io(&md->stats, rq_data_dir(orig), in rq_end_stats()
138 * Don't touch any member of the md after calling this function because
139 * the md may be freed in dm_put() at the end of this function.
142 static void rq_completed(struct mapped_device *md) in rq_completed() argument
147 dm_put(md); in rq_completed()
158 struct mapped_device *md = tio->md; in dm_end_request() local
164 rq_end_stats(md, rq); in dm_end_request()
166 rq_completed(md); in dm_end_request()
174 void dm_mq_kick_requeue_list(struct mapped_device *md) in dm_mq_kick_requeue_list() argument
176 __dm_mq_kick_requeue_list(md->queue, 0); in dm_mq_kick_requeue_list()
188 struct mapped_device *md = tio->md; in dm_requeue_original_request() local
192 rq_end_stats(md, rq); in dm_requeue_original_request()
199 rq_completed(md); in dm_requeue_original_request()
218 disable_discard(tio->md); in dm_done()
221 disable_write_zeroes(tio->md); in dm_done()
256 struct mapped_device *md = tio->md; in dm_softirq_done() local
258 rq_end_stats(md, rq); in dm_softirq_done()
260 rq_completed(md); in dm_softirq_done()
323 r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask, in setup_clone()
337 struct mapped_device *md) in init_tio() argument
339 tio->md = md; in init_tio()
350 if (!md->init_tio_pdu) in init_tio()
364 struct mapped_device *md = tio->md; in map_request() local
382 trace_block_rq_remap(clone, disk_devt(dm_disk(md)), in map_request()
420 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) in dm_attr_rq_based_seq_io_merge_deadline_show() argument
425 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, in dm_attr_rq_based_seq_io_merge_deadline_store() argument
431 static void dm_start_request(struct mapped_device *md, struct request *orig) in dm_start_request() argument
435 if (unlikely(dm_stats_used(&md->stats))) { in dm_start_request()
439 dm_stats_account_io(&md->stats, rq_data_dir(orig), in dm_start_request()
445 * Hold the md reference here for the in-flight I/O. in dm_start_request()
451 dm_get(md); in dm_start_request()
457 struct mapped_device *md = set->driver_data; in dm_mq_init_request() local
461 * Must initialize md member of tio, otherwise it won't in dm_mq_init_request()
464 tio->md = md; in dm_mq_init_request()
466 if (md->init_tio_pdu) { in dm_mq_init_request()
479 struct mapped_device *md = tio->md; in dm_mq_queue_rq() local
480 struct dm_target *ti = md->immutable_target; in dm_mq_queue_rq()
487 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) in dm_mq_queue_rq()
494 map = dm_get_live_table(md, &srcu_idx); in dm_mq_queue_rq()
496 dm_put_live_table(md, srcu_idx); in dm_mq_queue_rq()
500 dm_put_live_table(md, srcu_idx); in dm_mq_queue_rq()
506 dm_start_request(md, rq); in dm_mq_queue_rq()
508 /* Init tio using md established in .init_request */ in dm_mq_queue_rq()
509 init_tio(tio, rq, md); in dm_mq_queue_rq()
519 rq_end_stats(md, rq); in dm_mq_queue_rq()
520 rq_completed(md); in dm_mq_queue_rq()
533 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) in dm_mq_init_request_queue() argument
538 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); in dm_mq_init_request_queue()
539 if (!md->tag_set) in dm_mq_init_request_queue()
542 md->tag_set->ops = &dm_mq_ops; in dm_mq_init_request_queue()
543 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); in dm_mq_init_request_queue()
544 md->tag_set->numa_node = md->numa_node_id; in dm_mq_init_request_queue()
545 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING; in dm_mq_init_request_queue()
546 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); in dm_mq_init_request_queue()
547 md->tag_set->driver_data = md; in dm_mq_init_request_queue()
549 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); in dm_mq_init_request_queue()
553 md->tag_set->cmd_size += immutable_tgt->per_io_data_size; in dm_mq_init_request_queue()
554 md->init_tio_pdu = true; in dm_mq_init_request_queue()
557 err = blk_mq_alloc_tag_set(md->tag_set); in dm_mq_init_request_queue()
561 err = blk_mq_init_allocated_queue(md->tag_set, md->queue); in dm_mq_init_request_queue()
567 blk_mq_free_tag_set(md->tag_set); in dm_mq_init_request_queue()
569 kfree(md->tag_set); in dm_mq_init_request_queue()
570 md->tag_set = NULL; in dm_mq_init_request_queue()
575 void dm_mq_cleanup_mapped_device(struct mapped_device *md) in dm_mq_cleanup_mapped_device() argument
577 if (md->tag_set) { in dm_mq_cleanup_mapped_device()
578 blk_mq_free_tag_set(md->tag_set); in dm_mq_cleanup_mapped_device()
579 kfree(md->tag_set); in dm_mq_cleanup_mapped_device()
580 md->tag_set = NULL; in dm_mq_cleanup_mapped_device()