/Linux-v5.15/block/ |
D | blk-mq.c | 438 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) in blk_mq_alloc_request_hctx() argument 463 if (hctx_idx >= q->nr_hw_queues) in blk_mq_alloc_request_hctx() 475 data.hctx = q->queue_hw_ctx[hctx_idx]; in blk_mq_alloc_request_hctx() 2318 struct blk_mq_tags *tags, unsigned int hctx_idx) in blk_mq_clear_rq_mapping() argument 2320 struct blk_mq_tags *drv_tags = set->tags[hctx_idx]; in blk_mq_clear_rq_mapping() 2351 unsigned int hctx_idx) in blk_mq_free_rqs() argument 2363 set->ops->exit_request(set, rq, hctx_idx); in blk_mq_free_rqs() 2368 blk_mq_clear_rq_mapping(set, tags, hctx_idx); in blk_mq_free_rqs() 2393 unsigned int hctx_idx, in blk_mq_alloc_rq_map() argument 2401 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); in blk_mq_alloc_rq_map() [all …]
|
D | blk-mq.h | 56 unsigned int hctx_idx); 59 unsigned int hctx_idx, 64 unsigned int hctx_idx, unsigned int depth);
|
D | blk-mq-sched.c | 520 unsigned int hctx_idx) in blk_mq_sched_alloc_tags() argument 525 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, in blk_mq_sched_alloc_tags() 530 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); in blk_mq_sched_alloc_tags()
|
D | bsg-lib.c | 295 unsigned int hctx_idx, unsigned int numa_node) in bsg_init_rq() argument 318 unsigned int hctx_idx) in bsg_exit_rq() argument
|
D | kyber-iosched.c | 463 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() argument 518 static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_exit_hctx() argument
|
D | mq-deadline.c | 526 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in dd_init_hctx() argument
|
/Linux-v5.15/drivers/nvme/target/ |
D | loop.c | 204 struct request *req, unsigned int hctx_idx, in nvme_loop_init_request() argument 213 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0); in nvme_loop_init_request() 219 unsigned int hctx_idx) in nvme_loop_init_hctx() argument 222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx() 224 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_loop_init_hctx() 239 unsigned int hctx_idx) in nvme_loop_init_admin_hctx() argument 244 BUG_ON(hctx_idx != 0); in nvme_loop_init_admin_hctx()
|
/Linux-v5.15/drivers/mmc/core/ |
D | queue.c | 203 unsigned int hctx_idx, unsigned int numa_node) in mmc_mq_init_request() argument 218 unsigned int hctx_idx) in mmc_mq_exit_request() argument
|
/Linux-v5.15/drivers/nvme/host/ |
D | rdma.c | 289 struct request *rq, unsigned int hctx_idx) in nvme_rdma_exit_request() argument 297 struct request *rq, unsigned int hctx_idx, in nvme_rdma_init_request() argument 302 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_rdma_init_request() 323 unsigned int hctx_idx) in nvme_rdma_init_hctx() argument 326 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_rdma_init_hctx() 328 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_rdma_init_hctx() 335 unsigned int hctx_idx) in nvme_rdma_init_admin_hctx() argument 340 BUG_ON(hctx_idx != 0); in nvme_rdma_init_admin_hctx()
|
D | tcp.c | 415 struct request *rq, unsigned int hctx_idx) in nvme_tcp_exit_request() argument 423 struct request *rq, unsigned int hctx_idx, in nvme_tcp_init_request() argument 429 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_tcp_init_request() 448 unsigned int hctx_idx) in nvme_tcp_init_hctx() argument 451 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx() 458 unsigned int hctx_idx) in nvme_tcp_init_admin_hctx() argument
|
D | pci.c | 396 unsigned int hctx_idx) in nvme_admin_init_hctx() argument 401 WARN_ON(hctx_idx != 0); in nvme_admin_init_hctx() 409 unsigned int hctx_idx) in nvme_init_hctx() argument 412 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; in nvme_init_hctx() 414 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); in nvme_init_hctx() 420 unsigned int hctx_idx, unsigned int numa_node) in nvme_init_request() argument 424 int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; in nvme_init_request()
|
D | fc.c | 1827 unsigned int hctx_idx) in nvme_fc_exit_request() argument 2117 unsigned int hctx_idx, unsigned int numa_node) in nvme_fc_init_request() argument 2121 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_fc_init_request() 2202 unsigned int hctx_idx) in nvme_fc_init_hctx() argument 2206 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); in nvme_fc_init_hctx() 2213 unsigned int hctx_idx) in nvme_fc_init_admin_hctx() argument 2217 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); in nvme_fc_init_admin_hctx()
|
/Linux-v5.15/include/linux/ |
D | blk-mq.h | 473 unsigned int hctx_idx);
|
/Linux-v5.15/drivers/mtd/ubi/ |
D | block.c | 332 struct request *req, unsigned int hctx_idx, in ubiblock_init_request() argument
|
/Linux-v5.15/drivers/md/ |
D | dm-rq.c | 466 unsigned int hctx_idx, unsigned int numa_node) in dm_mq_init_request() argument
|
/Linux-v5.15/drivers/block/null_blk/ |
D | main.c | 1533 static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in null_exit_hctx() argument 1549 unsigned int hctx_idx) in null_init_hctx() argument 1559 nq = &nullb->queues[hctx_idx]; in null_init_hctx()
|
/Linux-v5.15/drivers/scsi/ |
D | scsi_lib.c | 1746 unsigned int hctx_idx, unsigned int numa_node) in scsi_mq_init_request() argument 1775 unsigned int hctx_idx) in scsi_mq_exit_request() argument 1797 unsigned int hctx_idx) in scsi_init_hctx() argument
|
/Linux-v5.15/drivers/block/ |
D | virtio_blk.c | 664 unsigned int hctx_idx, unsigned int numa_node) in virtblk_init_request() argument
|
D | nbd.c | 1664 unsigned int hctx_idx, unsigned int numa_node) in nbd_init_request() argument
|
/Linux-v5.15/drivers/block/mtip32xx/ |
D | mtip32xx.c | 3467 unsigned int hctx_idx) in mtip_free_cmd() argument 3480 unsigned int hctx_idx, unsigned int numa_node) in mtip_init_cmd() argument
|