/Linux-v6.1/drivers/s390/block/ |
D | dasd_genhd.c | 51 block->tag_set.ops = &dasd_mq_ops; in dasd_gendisk_alloc() 52 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); in dasd_gendisk_alloc() 53 block->tag_set.nr_hw_queues = nr_hw_queues; in dasd_gendisk_alloc() 54 block->tag_set.queue_depth = queue_depth; in dasd_gendisk_alloc() 55 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in dasd_gendisk_alloc() 56 block->tag_set.numa_node = NUMA_NO_NODE; in dasd_gendisk_alloc() 57 rc = blk_mq_alloc_tag_set(&block->tag_set); in dasd_gendisk_alloc() 61 gdp = blk_mq_alloc_disk(&block->tag_set, block); in dasd_gendisk_alloc() 63 blk_mq_free_tag_set(&block->tag_set); in dasd_gendisk_alloc() 121 blk_mq_free_tag_set(&block->tag_set); in dasd_gendisk_free()
|
D | scm_blk.c | 453 bdev->tag_set.ops = &scm_mq_ops; in scm_blk_dev_setup() 454 bdev->tag_set.cmd_size = sizeof(blk_status_t); in scm_blk_dev_setup() 455 bdev->tag_set.nr_hw_queues = nr_requests; in scm_blk_dev_setup() 456 bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; in scm_blk_dev_setup() 457 bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in scm_blk_dev_setup() 458 bdev->tag_set.numa_node = NUMA_NO_NODE; in scm_blk_dev_setup() 460 ret = blk_mq_alloc_tag_set(&bdev->tag_set); in scm_blk_dev_setup() 464 bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, scmdev); in scm_blk_dev_setup() 506 blk_mq_free_tag_set(&bdev->tag_set); in scm_blk_dev_setup() 516 blk_mq_free_tag_set(&bdev->tag_set); in scm_blk_dev_cleanup()
|
/Linux-v6.1/drivers/md/ |
D | dm-rq.c | 538 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); in dm_mq_init_request_queue() 539 if (!md->tag_set) in dm_mq_init_request_queue() 542 md->tag_set->ops = &dm_mq_ops; in dm_mq_init_request_queue() 543 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); in dm_mq_init_request_queue() 544 md->tag_set->numa_node = md->numa_node_id; in dm_mq_init_request_queue() 545 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING; in dm_mq_init_request_queue() 546 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); in dm_mq_init_request_queue() 547 md->tag_set->driver_data = md; in dm_mq_init_request_queue() 549 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); in dm_mq_init_request_queue() 553 md->tag_set->cmd_size += immutable_tgt->per_io_data_size; in dm_mq_init_request_queue() [all …]
|
/Linux-v6.1/drivers/block/ |
D | z2ram.c | 312 static struct blk_mq_tag_set tag_set; variable 323 disk = blk_mq_alloc_disk(&tag_set, NULL); in z2ram_register_disk() 354 tag_set.ops = &z2_mq_ops; in z2_init() 355 tag_set.nr_hw_queues = 1; in z2_init() 356 tag_set.nr_maps = 1; in z2_init() 357 tag_set.queue_depth = 16; in z2_init() 358 tag_set.numa_node = NUMA_NO_NODE; in z2_init() 359 tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in z2_init() 360 ret = blk_mq_alloc_tag_set(&tag_set); in z2_init() 373 blk_mq_free_tag_set(&tag_set); in z2_init() [all …]
|
D | virtio_blk.c | 71 struct blk_mq_tag_set tag_set; member 949 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); in virtblk_probe() 950 vblk->tag_set.ops = &virtio_mq_ops; in virtblk_probe() 951 vblk->tag_set.queue_depth = queue_depth; in virtblk_probe() 952 vblk->tag_set.numa_node = NUMA_NO_NODE; in virtblk_probe() 953 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in virtblk_probe() 954 vblk->tag_set.cmd_size = in virtblk_probe() 957 vblk->tag_set.driver_data = vblk; in virtblk_probe() 958 vblk->tag_set.nr_hw_queues = vblk->num_vqs; in virtblk_probe() 959 vblk->tag_set.nr_maps = 1; in virtblk_probe() [all …]
|
D | nbd.c | 115 struct blk_mq_tag_set tag_set; member 254 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_remove() 419 (config->num_connections == 1 && nbd->tag_set.timeout)) { in nbd_xmit_timeout() 452 if (!nbd->tag_set.timeout) { in nbd_xmit_timeout() 737 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_handle_reply() 738 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_handle_reply() 905 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); in nbd_clear_que() 1201 if (nbd->tag_set.timeout) in nbd_reconnect_socket() 1202 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; in nbd_reconnect_socket() 1329 nbd->tag_set.timeout = 0; in nbd_config_put() [all …]
|
D | ataflop.c | 305 struct blk_mq_tag_set tag_set; member 1996 disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL); in ataflop_alloc_disk() 2050 blk_mq_free_tag_set(&unit[i].tag_set); in atari_floppy_cleanup() 2068 blk_mq_free_tag_set(&fs->tag_set); in atari_cleanup_floppy_disk() 2081 memset(&unit[i].tag_set, 0, sizeof(unit[i].tag_set)); in atari_floppy_init() 2082 unit[i].tag_set.ops = &ataflop_mq_ops; in atari_floppy_init() 2083 unit[i].tag_set.nr_hw_queues = 1; in atari_floppy_init() 2084 unit[i].tag_set.nr_maps = 1; in atari_floppy_init() 2085 unit[i].tag_set.queue_depth = 2; in atari_floppy_init() 2086 unit[i].tag_set.numa_node = NUMA_NO_NODE; in atari_floppy_init() [all …]
|
D | ublk_drv.c | 143 struct blk_mq_tag_set tag_set; member 971 req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag); in ublk_commit_completion() 999 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i); in ublk_abort_queue() 1089 blk_mq_tagset_busy_iter(&ub->tag_set, in ublk_wait_tagset_rqs_idle() 1192 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag); in ublk_handle_need_get_data() 1409 blk_mq_free_tag_set(&ub->tag_set); in ublk_cdev_rel() 1461 ub->tag_set.ops = &ublk_mq_ops; in ublk_add_tag_set() 1462 ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues; in ublk_add_tag_set() 1463 ub->tag_set.queue_depth = ub->dev_info.queue_depth; in ublk_add_tag_set() 1464 ub->tag_set.numa_node = NUMA_NO_NODE; in ublk_add_tag_set() [all …]
|
D | ps3disk.c | 32 struct blk_mq_tag_set tag_set; member 429 error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1, in ps3disk_probe() 434 gendisk = blk_mq_alloc_disk(&priv->tag_set, dev); in ps3disk_probe() 478 blk_mq_free_tag_set(&priv->tag_set); in ps3disk_probe() 504 blk_mq_free_tag_set(&priv->tag_set); in ps3disk_remove()
|
D | amiflop.c | 205 struct blk_mq_tag_set tag_set; member 1785 disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL); in fd_alloc_disk() 1815 memset(&unit[drive].tag_set, 0, sizeof(unit[drive].tag_set)); in fd_alloc_drive() 1816 unit[drive].tag_set.ops = &amiflop_mq_ops; in fd_alloc_drive() 1817 unit[drive].tag_set.nr_hw_queues = 1; in fd_alloc_drive() 1818 unit[drive].tag_set.nr_maps = 1; in fd_alloc_drive() 1819 unit[drive].tag_set.queue_depth = 2; in fd_alloc_drive() 1820 unit[drive].tag_set.numa_node = NUMA_NO_NODE; in fd_alloc_drive() 1821 unit[drive].tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in fd_alloc_drive() 1822 if (blk_mq_alloc_tag_set(&unit[drive].tag_set)) in fd_alloc_drive() [all …]
|
D | xen-blkfront.c | 226 struct blk_mq_tag_set tag_set; member 1116 memset(&info->tag_set, 0, sizeof(info->tag_set)); in xlvbd_alloc_gendisk() 1117 info->tag_set.ops = &blkfront_mq_ops; in xlvbd_alloc_gendisk() 1118 info->tag_set.nr_hw_queues = info->nr_rings; in xlvbd_alloc_gendisk() 1126 info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2; in xlvbd_alloc_gendisk() 1128 info->tag_set.queue_depth = BLK_RING_SIZE(info); in xlvbd_alloc_gendisk() 1129 info->tag_set.numa_node = NUMA_NO_NODE; in xlvbd_alloc_gendisk() 1130 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in xlvbd_alloc_gendisk() 1131 info->tag_set.cmd_size = sizeof(struct blkif_req); in xlvbd_alloc_gendisk() 1132 info->tag_set.driver_data = info; in xlvbd_alloc_gendisk() [all …]
|
/Linux-v6.1/drivers/mmc/core/ |
D | queue.c | 420 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); in mmc_init_queue() 421 mq->tag_set.ops = &mmc_mq_ops; in mmc_init_queue() 427 mq->tag_set.queue_depth = in mmc_init_queue() 430 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; in mmc_init_queue() 431 mq->tag_set.numa_node = NUMA_NO_NODE; in mmc_init_queue() 432 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in mmc_init_queue() 433 mq->tag_set.nr_hw_queues = 1; in mmc_init_queue() 434 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); in mmc_init_queue() 435 mq->tag_set.driver_data = mq; in mmc_init_queue() 449 ret = blk_mq_alloc_tag_set(&mq->tag_set); in mmc_init_queue() [all …]
|
/Linux-v6.1/drivers/mtd/ubi/ |
D | block.c | 89 struct blk_mq_tag_set tag_set; member 401 dev->tag_set.ops = &ubiblock_mq_ops; in ubiblock_create() 402 dev->tag_set.queue_depth = 64; in ubiblock_create() 403 dev->tag_set.numa_node = NUMA_NO_NODE; in ubiblock_create() 404 dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in ubiblock_create() 405 dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu); in ubiblock_create() 406 dev->tag_set.driver_data = dev; in ubiblock_create() 407 dev->tag_set.nr_hw_queues = 1; in ubiblock_create() 409 ret = blk_mq_alloc_tag_set(&dev->tag_set); in ubiblock_create() 417 gd = blk_mq_alloc_disk(&dev->tag_set, dev); in ubiblock_create() [all …]
|
/Linux-v6.1/block/ |
D | bsg-lib.c | 22 struct blk_mq_tag_set tag_set; member 279 container_of(q->tag_set, struct bsg_set, tag_set); in bsg_queue_rq() 324 container_of(q->tag_set, struct bsg_set, tag_set); in bsg_remove_queue() 328 blk_mq_free_tag_set(&bset->tag_set); in bsg_remove_queue() 337 container_of(rq->q->tag_set, struct bsg_set, tag_set); in bsg_timeout() 375 set = &bset->tag_set; in bsg_setup_queue()
|
D | blk-mq-sched.c | 502 if (blk_mq_is_shared_tags(q->tag_set->flags)) { in blk_mq_sched_alloc_map_and_rqs() 507 hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, in blk_mq_sched_alloc_map_and_rqs() 541 struct blk_mq_tag_set *set = queue->tag_set; in blk_mq_init_sched_shared_tags() 560 unsigned int flags = q->tag_set->flags; in blk_mq_init_sched() 569 q->nr_requests = q->tag_set->queue_depth; in blk_mq_init_sched() 578 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, in blk_mq_init_sched() 636 if (blk_mq_is_shared_tags(q->tag_set->flags)) { in blk_mq_sched_free_rqs() 637 blk_mq_free_rqs(q->tag_set, q->sched_shared_tags, in blk_mq_sched_free_rqs() 642 blk_mq_free_rqs(q->tag_set, in blk_mq_sched_free_rqs()
|
D | blk-mq-tag.c | 264 struct blk_mq_tag_set *set = q->tag_set; in bt_iter() 500 if (blk_mq_is_shared_tags(q->tag_set->flags)) { in blk_mq_queue_tag_busy_iter() 501 struct blk_mq_tags *tags = q->tag_set->shared_tags; in blk_mq_queue_tag_busy_iter() 608 struct blk_mq_tag_set *set = hctx->queue->tag_set; in blk_mq_tag_update_depth() 656 q->nr_requests - q->tag_set->reserved_tags); in blk_mq_tag_update_sched_shared_tags()
|
/Linux-v6.1/drivers/block/paride/ |
D | pd.c | 239 struct blk_mq_tag_set tag_set; member 902 memset(&disk->tag_set, 0, sizeof(disk->tag_set)); in pd_probe_drive() 903 disk->tag_set.ops = &pd_mq_ops; in pd_probe_drive() 904 disk->tag_set.cmd_size = sizeof(struct pd_req); in pd_probe_drive() 905 disk->tag_set.nr_hw_queues = 1; in pd_probe_drive() 906 disk->tag_set.nr_maps = 1; in pd_probe_drive() 907 disk->tag_set.queue_depth = 2; in pd_probe_drive() 908 disk->tag_set.numa_node = NUMA_NO_NODE; in pd_probe_drive() 909 disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in pd_probe_drive() 910 ret = blk_mq_alloc_tag_set(&disk->tag_set); in pd_probe_drive() [all …]
|
/Linux-v6.1/drivers/mtd/ |
D | mtd_blkdevs.c | 33 blk_mq_free_tag_set(dev->tag_set); in blktrans_dev_release() 34 kfree(dev->tag_set); in blktrans_dev_release() 326 new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL); in add_mtd_blktrans_dev() 327 if (!new->tag_set) in add_mtd_blktrans_dev() 330 ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2, in add_mtd_blktrans_dev() 336 gd = blk_mq_alloc_disk(new->tag_set, new); in add_mtd_blktrans_dev() 403 blk_mq_free_tag_set(new->tag_set); in add_mtd_blktrans_dev() 405 kfree(new->tag_set); in add_mtd_blktrans_dev()
|
/Linux-v6.1/include/scsi/ |
D | scsi_tcq.h | 32 if (hwq < shost->tag_set.nr_hw_queues) { in scsi_host_find_tag() 33 req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq], in scsi_host_find_tag()
|
/Linux-v6.1/drivers/block/rnbd/ |
D | rnbd-clt.c | 737 if (sess->tag_set.tags) in destroy_mq_tags() 738 blk_mq_free_tag_set(&sess->tag_set); in destroy_mq_tags() 1206 struct blk_mq_tag_set *tag_set = &sess->tag_set; in setup_mq_tags() local 1208 memset(tag_set, 0, sizeof(*tag_set)); in setup_mq_tags() 1209 tag_set->ops = &rnbd_mq_ops; in setup_mq_tags() 1210 tag_set->queue_depth = sess->queue_depth; in setup_mq_tags() 1211 tag_set->numa_node = NUMA_NO_NODE; in setup_mq_tags() 1212 tag_set->flags = BLK_MQ_F_SHOULD_MERGE | in setup_mq_tags() 1214 tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE; in setup_mq_tags() 1217 tag_set->nr_maps = sess->nr_poll_queues ? HCTX_MAX_TYPES : 2; in setup_mq_tags() [all …]
|
/Linux-v6.1/drivers/scsi/ |
D | scsi_lib.c | 1869 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); in scsi_map_queues() 1965 struct blk_mq_tag_set *tag_set = &shost->tag_set; in scsi_mq_setup_tags() local 1974 memset(tag_set, 0, sizeof(*tag_set)); in scsi_mq_setup_tags() 1976 tag_set->ops = &scsi_mq_ops; in scsi_mq_setup_tags() 1978 tag_set->ops = &scsi_mq_ops_no_commit; in scsi_mq_setup_tags() 1979 tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; in scsi_mq_setup_tags() 1980 tag_set->nr_maps = shost->nr_maps ? : 1; in scsi_mq_setup_tags() 1981 tag_set->queue_depth = shost->can_queue; in scsi_mq_setup_tags() 1982 tag_set->cmd_size = cmd_size; in scsi_mq_setup_tags() 1983 tag_set->numa_node = dev_to_node(shost->dma_dev); in scsi_mq_setup_tags() [all …]
|
/Linux-v6.1/arch/um/drivers/ |
D | ubd_kern.c | 169 struct blk_mq_tag_set tag_set; member 816 blk_mq_free_tag_set(&ubd_dev->tag_set); in ubd_device_release() 899 ubd_dev->tag_set.ops = &ubd_mq_ops; in ubd_add() 900 ubd_dev->tag_set.queue_depth = 64; in ubd_add() 901 ubd_dev->tag_set.numa_node = NUMA_NO_NODE; in ubd_add() 902 ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in ubd_add() 903 ubd_dev->tag_set.driver_data = ubd_dev; in ubd_add() 904 ubd_dev->tag_set.nr_hw_queues = 1; in ubd_add() 906 err = blk_mq_alloc_tag_set(&ubd_dev->tag_set); in ubd_add() 910 disk = blk_mq_alloc_disk(&ubd_dev->tag_set, ubd_dev); in ubd_add() [all …]
|
/Linux-v6.1/drivers/nvme/target/ |
D | loop.c | 35 struct blk_mq_tag_set tag_set; member 88 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_loop_tagset() 213 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0); in nvme_loop_init_request() 456 blk_mq_update_nr_hw_queues(&ctrl->tag_set, in nvme_loop_reset_ctrl_work() 496 ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set, in nvme_loop_create_io_queues()
|
/Linux-v6.1/drivers/cdrom/ |
D | gdrom.c | 104 struct blk_mq_tag_set tag_set; member 776 err = blk_mq_alloc_sq_tag_set(&gd.tag_set, &gdrom_mq_ops, 1, in probe_gdrom() 781 gd.disk = blk_mq_alloc_disk(&gd.tag_set, NULL); in probe_gdrom() 822 blk_mq_free_tag_set(&gd.tag_set); in probe_gdrom() 834 blk_mq_free_tag_set(&gd.tag_set); in remove_gdrom()
|
/Linux-v6.1/drivers/block/null_blk/ |
D | main.c | 72 static struct blk_mq_tag_set tag_set; variable 378 set = dev->nullb->tag_set; in nullb_update_nr_hw_queues() 1773 nullb->tag_set == &nullb->__tag_set) in null_del_dev() 1774 blk_mq_free_tag_set(nullb->tag_set); in null_del_dev() 2040 nullb->tag_set = &tag_set; in null_add_dev() 2043 nullb->tag_set = &nullb->__tag_set; in null_add_dev() 2044 rv = null_init_tag_set(nullb, nullb->tag_set); in null_add_dev() 2053 nullb->tag_set->timeout = 5 * HZ; in null_add_dev() 2054 nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb); in null_add_dev() 2142 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) in null_add_dev() [all …]
|