/Linux-v4.19/drivers/nvme/target/ |
D | loop.c | 43 struct blk_mq_tag_set tag_set; member 98 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_loop_tagset() 232 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0); in nvme_loop_init_request() 296 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_loop_free_ctrl() 432 blk_mq_tagset_busy_iter(&ctrl->tag_set, in nvme_loop_shutdown_ctrl() 492 blk_mq_update_nr_hw_queues(&ctrl->tag_set, in nvme_loop_reset_ctrl_work() 533 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); in nvme_loop_create_io_queues() 534 ctrl->tag_set.ops = &nvme_loop_mq_ops; in nvme_loop_create_io_queues() 535 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_loop_create_io_queues() 536 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ in nvme_loop_create_io_queues() [all …]
|
/Linux-v4.19/drivers/mmc/core/ |
D | queue.c | 386 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); in mmc_mq_init_queue() 387 mq->tag_set.ops = mq_ops; in mmc_mq_init_queue() 388 mq->tag_set.queue_depth = q_depth; in mmc_mq_init_queue() 389 mq->tag_set.numa_node = NUMA_NO_NODE; in mmc_mq_init_queue() 390 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE | in mmc_mq_init_queue() 392 mq->tag_set.nr_hw_queues = 1; in mmc_mq_init_queue() 393 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); in mmc_mq_init_queue() 394 mq->tag_set.driver_data = mq; in mmc_mq_init_queue() 396 ret = blk_mq_alloc_tag_set(&mq->tag_set); in mmc_mq_init_queue() 400 mq->queue = blk_mq_init_queue(&mq->tag_set); in mmc_mq_init_queue() [all …]
|
D | queue.h | 77 struct blk_mq_tag_set tag_set; member
|
/Linux-v4.19/drivers/md/ |
D | dm-rq.c | 798 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); in dm_mq_init_request_queue() 799 if (!md->tag_set) in dm_mq_init_request_queue() 802 md->tag_set->ops = &dm_mq_ops; in dm_mq_init_request_queue() 803 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); in dm_mq_init_request_queue() 804 md->tag_set->numa_node = md->numa_node_id; in dm_mq_init_request_queue() 805 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; in dm_mq_init_request_queue() 806 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); in dm_mq_init_request_queue() 807 md->tag_set->driver_data = md; in dm_mq_init_request_queue() 809 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); in dm_mq_init_request_queue() 813 md->tag_set->cmd_size += immutable_tgt->per_io_data_size; in dm_mq_init_request_queue() [all …]
|
D | dm-core.h | 125 struct blk_mq_tag_set *tag_set; member
|
/Linux-v4.19/drivers/mtd/ubi/ |
D | block.c | 97 struct blk_mq_tag_set tag_set; member 406 dev->tag_set.ops = &ubiblock_mq_ops; in ubiblock_create() 407 dev->tag_set.queue_depth = 64; in ubiblock_create() 408 dev->tag_set.numa_node = NUMA_NO_NODE; in ubiblock_create() 409 dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in ubiblock_create() 410 dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu); in ubiblock_create() 411 dev->tag_set.driver_data = dev; in ubiblock_create() 412 dev->tag_set.nr_hw_queues = 1; in ubiblock_create() 414 ret = blk_mq_alloc_tag_set(&dev->tag_set); in ubiblock_create() 420 dev->rq = blk_mq_init_queue(&dev->tag_set); in ubiblock_create() [all …]
|
/Linux-v4.19/drivers/s390/block/ |
D | scm_blk.c | 453 bdev->tag_set.ops = &scm_mq_ops; in scm_blk_dev_setup() 454 bdev->tag_set.cmd_size = sizeof(blk_status_t); in scm_blk_dev_setup() 455 bdev->tag_set.nr_hw_queues = nr_requests; in scm_blk_dev_setup() 456 bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; in scm_blk_dev_setup() 457 bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in scm_blk_dev_setup() 458 bdev->tag_set.numa_node = NUMA_NO_NODE; in scm_blk_dev_setup() 460 ret = blk_mq_alloc_tag_set(&bdev->tag_set); in scm_blk_dev_setup() 464 rq = blk_mq_init_queue(&bdev->tag_set); in scm_blk_dev_setup() 509 blk_mq_free_tag_set(&bdev->tag_set); in scm_blk_dev_setup() 519 blk_mq_free_tag_set(&bdev->tag_set); in scm_blk_dev_cleanup()
|
D | scm_blk.h | 21 struct blk_mq_tag_set tag_set; member
|
/Linux-v4.19/include/scsi/ |
D | scsi_tcq.h | 33 if (hwq < shost->tag_set.nr_hw_queues) { in scsi_host_find_tag() 34 req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq], in scsi_host_find_tag()
|
D | scsi_host.h | 556 struct blk_mq_tag_set tag_set; member
|
/Linux-v4.19/drivers/block/ |
D | virtio_blk.c | 40 struct blk_mq_tag_set tag_set; member 690 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); in virtblk_probe() 691 vblk->tag_set.ops = &virtio_mq_ops; in virtblk_probe() 692 vblk->tag_set.queue_depth = virtblk_queue_depth; in virtblk_probe() 693 vblk->tag_set.numa_node = NUMA_NO_NODE; in virtblk_probe() 694 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in virtblk_probe() 695 vblk->tag_set.cmd_size = in virtblk_probe() 698 vblk->tag_set.driver_data = vblk; in virtblk_probe() 699 vblk->tag_set.nr_hw_queues = vblk->num_vqs; in virtblk_probe() 701 err = blk_mq_alloc_tag_set(&vblk->tag_set); in virtblk_probe() [all …]
|
D | nbd.c | 101 struct blk_mq_tag_set tag_set; member 217 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_remove() 645 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_read_stat() 646 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_read_stat() 750 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); in nbd_clear_que() 1005 if (nbd->tag_set.timeout) in nbd_reconnect_socket() 1006 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; in nbd_reconnect_socket() 1126 nbd->tag_set.timeout = 0; in nbd_config_put() 1154 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); in nbd_start_device() 1176 if (nbd->tag_set.timeout) in nbd_start_device() [all …]
|
D | null_blk_main.c | 71 static struct blk_mq_tag_set tag_set; variable 1498 nullb->tag_set == &nullb->__tag_set) in null_del_dev() 1499 blk_mq_free_tag_set(nullb->tag_set); in null_del_dev() 1738 nullb->tag_set = &tag_set; in null_add_dev() 1741 nullb->tag_set = &nullb->__tag_set; in null_add_dev() 1742 rv = null_init_tag_set(nullb, nullb->tag_set); in null_add_dev() 1751 nullb->tag_set->timeout = 5 * HZ; in null_add_dev() 1752 nullb->q = blk_mq_init_queue(nullb->tag_set); in null_add_dev() 1840 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) in null_add_dev() 1841 blk_mq_free_tag_set(nullb->tag_set); in null_add_dev() [all …]
|
D | xen-blkfront.c | 216 struct blk_mq_tag_set tag_set; member 966 memset(&info->tag_set, 0, sizeof(info->tag_set)); in xlvbd_init_blk_queue() 967 info->tag_set.ops = &blkfront_mq_ops; in xlvbd_init_blk_queue() 968 info->tag_set.nr_hw_queues = info->nr_rings; in xlvbd_init_blk_queue() 976 info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2; in xlvbd_init_blk_queue() 978 info->tag_set.queue_depth = BLK_RING_SIZE(info); in xlvbd_init_blk_queue() 979 info->tag_set.numa_node = NUMA_NO_NODE; in xlvbd_init_blk_queue() 980 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; in xlvbd_init_blk_queue() 981 info->tag_set.cmd_size = sizeof(struct blkif_req); in xlvbd_init_blk_queue() 982 info->tag_set.driver_data = info; in xlvbd_init_blk_queue() [all …]
|
D | skd_main.c | 218 struct blk_mq_tag_set tag_set; member 396 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count); in skd_in_flight() 1514 WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], in skd_isr_completion_posted() 1520 rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag); in skd_isr_completion_posted() 1910 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev); in skd_recover_requests() 2829 memset(&skdev->tag_set, 0, sizeof(skdev->tag_set)); in skd_cons_disk() 2830 skdev->tag_set.ops = &skd_mq_ops; in skd_cons_disk() 2831 skdev->tag_set.nr_hw_queues = 1; in skd_cons_disk() 2832 skdev->tag_set.queue_depth = skd_max_queue_depth; in skd_cons_disk() 2833 skdev->tag_set.cmd_size = sizeof(struct skd_request_context) + in skd_cons_disk() [all …]
|
D | loop.h | 64 struct blk_mq_tag_set tag_set; member
|
D | loop.c | 1844 lo->tag_set.ops = &loop_mq_ops; in loop_add() 1845 lo->tag_set.nr_hw_queues = 1; in loop_add() 1846 lo->tag_set.queue_depth = 128; in loop_add() 1847 lo->tag_set.numa_node = NUMA_NO_NODE; in loop_add() 1848 lo->tag_set.cmd_size = sizeof(struct loop_cmd); in loop_add() 1849 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; in loop_add() 1850 lo->tag_set.driver_data = lo; in loop_add() 1852 err = blk_mq_alloc_tag_set(&lo->tag_set); in loop_add() 1856 lo->lo_queue = blk_mq_init_queue(&lo->tag_set); in loop_add() 1916 blk_mq_free_tag_set(&lo->tag_set); in loop_add() [all …]
|
D | null_blk.h | 74 struct blk_mq_tag_set *tag_set; member
|
D | rbd.c | 380 struct blk_mq_tag_set tag_set; member 3762 blk_mq_free_tag_set(&rbd_dev->tag_set); in rbd_free_disk() 3983 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set)); in rbd_init_disk() 3984 rbd_dev->tag_set.ops = &rbd_mq_ops; in rbd_init_disk() 3985 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth; in rbd_init_disk() 3986 rbd_dev->tag_set.numa_node = NUMA_NO_NODE; in rbd_init_disk() 3987 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; in rbd_init_disk() 3988 rbd_dev->tag_set.nr_hw_queues = 1; in rbd_init_disk() 3989 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct); in rbd_init_disk() 3991 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set); in rbd_init_disk() [all …]
|
/Linux-v4.19/drivers/nvme/host/ |
D | fc.c | 152 struct blk_mq_tag_set tag_set; member 1737 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_fc_init_request() 1987 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_fc_ctrl_free() 2298 return queue->ctrl->tag_set.tags[queue->qnum - 1]; in nvme_fc_tagset() 2419 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); in nvme_fc_create_io_queues() 2420 ctrl->tag_set.ops = &nvme_fc_mq_ops; in nvme_fc_create_io_queues() 2421 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_fc_create_io_queues() 2422 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ in nvme_fc_create_io_queues() 2423 ctrl->tag_set.numa_node = NUMA_NO_NODE; in nvme_fc_create_io_queues() 2424 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in nvme_fc_create_io_queues() [all …]
|
D | rdma.c | 103 struct blk_mq_tag_set tag_set; member 271 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_rdma_exit_request() 285 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_rdma_init_request() 697 set = &ctrl->tag_set; in nvme_rdma_alloc_tagset() 849 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); in nvme_rdma_configure_io_queues() 855 blk_mq_update_nr_hw_queues(&ctrl->tag_set, in nvme_rdma_configure_io_queues() 893 blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, in nvme_rdma_teardown_io_queues() 1369 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_rdma_tagset()
|
/Linux-v4.19/block/ |
D | blk-mq-sched.c | 440 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_sched_alloc_tags() 457 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_sched_tags_teardown() 474 q->nr_requests = q->tag_set->queue_depth; in blk_mq_init_sched() 483 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, in blk_mq_init_sched()
|
D | blk-mq.c | 2301 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_map_swqueue() 2411 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_del_queue_tag_set() 2428 q->tag_set = set; in blk_mq_add_queue_tag_set() 2495 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) in blk_mq_hw_ctx_size() argument 2503 if (tag_set->flags & BLK_MQ_F_BLOCKING) in blk_mq_hw_ctx_size() 2656 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_free_queue() 2854 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_update_nr_requests()
|
/Linux-v4.19/drivers/scsi/ |
D | scsi_lib.c | 2199 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); in scsi_map_queues() 2334 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); in scsi_mq_alloc_queue() 2353 memset(&shost->tag_set, 0, sizeof(shost->tag_set)); in scsi_mq_setup_tags() 2354 shost->tag_set.ops = &scsi_mq_ops; in scsi_mq_setup_tags() 2355 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1; in scsi_mq_setup_tags() 2356 shost->tag_set.queue_depth = shost->can_queue; in scsi_mq_setup_tags() 2357 shost->tag_set.cmd_size = cmd_size; in scsi_mq_setup_tags() 2358 shost->tag_set.numa_node = NUMA_NO_NODE; in scsi_mq_setup_tags() 2359 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; in scsi_mq_setup_tags() 2360 shost->tag_set.flags |= in scsi_mq_setup_tags() [all …]
|
/Linux-v4.19/lib/ |
D | radix-tree.c | 152 static inline void tag_set(struct radix_tree_node *node, unsigned int tag, in tag_set() function 652 tag_set(node, tag, 0); in radix_tree_extend() 944 tag_set(node, tag, offset); in insert_entries() 1323 tag_set(parent, tag, end); in radix_tree_split() 1354 tag_set(node, tag, offset); in radix_tree_split() 1368 tag_set(node, tag, offset); in radix_tree_split() 1400 tag_set(node, tag, offset); in node_tag_set() 1439 tag_set(parent, tag, offset); in radix_tree_tag_set()
|