Lines Matching full:queue
74 struct nvme_rdma_queue *queue; member
160 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) in nvme_rdma_queue_idx() argument
162 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx()
165 static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) in nvme_rdma_poll_queue() argument
167 return nvme_rdma_queue_idx(queue) > in nvme_rdma_poll_queue()
168 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_poll_queue()
169 queue->ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_poll_queue()
172 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) in nvme_rdma_inline_data_size() argument
174 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_rdma_inline_data_size()
224 * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue in nvme_rdma_alloc_ring()
226 * will issue error recovery and queue re-creation. in nvme_rdma_alloc_ring()
247 static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) in nvme_rdma_wait_for_cm() argument
251 ret = wait_for_completion_interruptible(&queue->cm_done); in nvme_rdma_wait_for_cm()
254 WARN_ON_ONCE(queue->cm_error > 0); in nvme_rdma_wait_for_cm()
255 return queue->cm_error; in nvme_rdma_wait_for_cm()
258 static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor) in nvme_rdma_create_qp() argument
260 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_create_qp()
267 init_attr.cap.max_send_wr = factor * queue->queue_size + 1; in nvme_rdma_create_qp()
269 init_attr.cap.max_recv_wr = queue->queue_size + 1; in nvme_rdma_create_qp()
274 init_attr.send_cq = queue->ib_cq; in nvme_rdma_create_qp()
275 init_attr.recv_cq = queue->ib_cq; in nvme_rdma_create_qp()
276 if (queue->pi_support) in nvme_rdma_create_qp()
278 init_attr.qp_context = queue; in nvme_rdma_create_qp()
280 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); in nvme_rdma_create_qp()
282 queue->qp = queue->cm_id->qp; in nvme_rdma_create_qp()
301 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; in nvme_rdma_init_request() local
309 if (queue->pi_support) in nvme_rdma_init_request()
314 req->queue = queue; in nvme_rdma_init_request()
324 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_rdma_init_hctx() local
328 hctx->driver_data = queue; in nvme_rdma_init_hctx()
336 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_init_admin_hctx() local
340 hctx->driver_data = queue; in nvme_rdma_init_admin_hctx()
414 static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue) in nvme_rdma_free_cq() argument
416 if (nvme_rdma_poll_queue(queue)) in nvme_rdma_free_cq()
417 ib_free_cq(queue->ib_cq); in nvme_rdma_free_cq()
419 ib_cq_pool_put(queue->ib_cq, queue->cq_size); in nvme_rdma_free_cq()
422 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) in nvme_rdma_destroy_queue_ib() argument
427 if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags)) in nvme_rdma_destroy_queue_ib()
430 dev = queue->device; in nvme_rdma_destroy_queue_ib()
433 if (queue->pi_support) in nvme_rdma_destroy_queue_ib()
434 ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs); in nvme_rdma_destroy_queue_ib()
435 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_destroy_queue_ib()
442 ib_destroy_qp(queue->qp); in nvme_rdma_destroy_queue_ib()
443 nvme_rdma_free_cq(queue); in nvme_rdma_destroy_queue_ib()
445 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, in nvme_rdma_destroy_queue_ib()
464 struct nvme_rdma_queue *queue) in nvme_rdma_create_cq() argument
466 int ret, comp_vector, idx = nvme_rdma_queue_idx(queue); in nvme_rdma_create_cq()
470 * Spread I/O queues completion vectors according their queue index. in nvme_rdma_create_cq()
476 if (nvme_rdma_poll_queue(queue)) { in nvme_rdma_create_cq()
478 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, in nvme_rdma_create_cq()
482 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, in nvme_rdma_create_cq()
486 if (IS_ERR(queue->ib_cq)) { in nvme_rdma_create_cq()
487 ret = PTR_ERR(queue->ib_cq); in nvme_rdma_create_cq()
494 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) in nvme_rdma_create_queue_ib() argument
501 queue->device = nvme_rdma_find_get_device(queue->cm_id); in nvme_rdma_create_queue_ib()
502 if (!queue->device) { in nvme_rdma_create_queue_ib()
503 dev_err(queue->cm_id->device->dev.parent, in nvme_rdma_create_queue_ib()
507 ibdev = queue->device->dev; in nvme_rdma_create_queue_ib()
510 queue->cq_size = cq_factor * queue->queue_size + 1; in nvme_rdma_create_queue_ib()
512 ret = nvme_rdma_create_cq(ibdev, queue); in nvme_rdma_create_queue_ib()
516 ret = nvme_rdma_create_qp(queue, send_wr_factor); in nvme_rdma_create_queue_ib()
520 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size, in nvme_rdma_create_queue_ib()
522 if (!queue->rsp_ring) { in nvme_rdma_create_queue_ib()
532 pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1; in nvme_rdma_create_queue_ib()
533 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs, in nvme_rdma_create_queue_ib()
534 queue->queue_size, in nvme_rdma_create_queue_ib()
538 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
540 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib()
544 if (queue->pi_support) { in nvme_rdma_create_queue_ib()
545 ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs, in nvme_rdma_create_queue_ib()
546 queue->queue_size, IB_MR_TYPE_INTEGRITY, in nvme_rdma_create_queue_ib()
549 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
551 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib()
556 set_bit(NVME_RDMA_Q_TR_READY, &queue->flags); in nvme_rdma_create_queue_ib()
561 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_create_queue_ib()
563 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, in nvme_rdma_create_queue_ib()
566 rdma_destroy_qp(queue->cm_id); in nvme_rdma_create_queue_ib()
568 nvme_rdma_free_cq(queue); in nvme_rdma_create_queue_ib()
570 nvme_rdma_dev_put(queue->device); in nvme_rdma_create_queue_ib()
577 struct nvme_rdma_queue *queue; in nvme_rdma_alloc_queue() local
581 queue = &ctrl->queues[idx]; in nvme_rdma_alloc_queue()
582 mutex_init(&queue->queue_lock); in nvme_rdma_alloc_queue()
583 queue->ctrl = ctrl; in nvme_rdma_alloc_queue()
585 queue->pi_support = true; in nvme_rdma_alloc_queue()
587 queue->pi_support = false; in nvme_rdma_alloc_queue()
588 init_completion(&queue->cm_done); in nvme_rdma_alloc_queue()
591 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; in nvme_rdma_alloc_queue()
593 queue->cmnd_capsule_len = sizeof(struct nvme_command); in nvme_rdma_alloc_queue()
595 queue->queue_size = queue_size; in nvme_rdma_alloc_queue()
597 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, in nvme_rdma_alloc_queue()
599 if (IS_ERR(queue->cm_id)) { in nvme_rdma_alloc_queue()
601 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); in nvme_rdma_alloc_queue()
602 ret = PTR_ERR(queue->cm_id); in nvme_rdma_alloc_queue()
609 queue->cm_error = -ETIMEDOUT; in nvme_rdma_alloc_queue()
610 ret = rdma_resolve_addr(queue->cm_id, src_addr, in nvme_rdma_alloc_queue()
619 ret = nvme_rdma_wait_for_cm(queue); in nvme_rdma_alloc_queue()
626 set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags); in nvme_rdma_alloc_queue()
631 rdma_destroy_id(queue->cm_id); in nvme_rdma_alloc_queue()
632 nvme_rdma_destroy_queue_ib(queue); in nvme_rdma_alloc_queue()
634 mutex_destroy(&queue->queue_lock); in nvme_rdma_alloc_queue()
638 static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) in __nvme_rdma_stop_queue() argument
640 rdma_disconnect(queue->cm_id); in __nvme_rdma_stop_queue()
641 ib_drain_qp(queue->qp); in __nvme_rdma_stop_queue()
644 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) in nvme_rdma_stop_queue() argument
646 mutex_lock(&queue->queue_lock); in nvme_rdma_stop_queue()
647 if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) in nvme_rdma_stop_queue()
648 __nvme_rdma_stop_queue(queue); in nvme_rdma_stop_queue()
649 mutex_unlock(&queue->queue_lock); in nvme_rdma_stop_queue()
652 static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) in nvme_rdma_free_queue() argument
654 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_free_queue()
657 rdma_destroy_id(queue->cm_id); in nvme_rdma_free_queue()
658 nvme_rdma_destroy_queue_ib(queue); in nvme_rdma_free_queue()
659 mutex_destroy(&queue->queue_lock); in nvme_rdma_free_queue()
680 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; in nvme_rdma_start_queue() local
689 set_bit(NVME_RDMA_Q_LIVE, &queue->flags); in nvme_rdma_start_queue()
691 if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_start_queue()
692 __nvme_rdma_stop_queue(queue); in nvme_rdma_start_queue()
694 "failed to connect queue: %d ret=%d\n", idx, ret); in nvme_rdma_start_queue()
762 * sufficient queue count to have dedicated default queues. in nvme_rdma_alloc_io_queues()
837 * Bind the async event SQE DMA mapping to the admin queue lifetime. in nvme_rdma_configure_admin_queue()
839 * error recovery and queue re-creation. in nvme_rdma_configure_admin_queue()
917 * queue number might have changed. in nvme_rdma_configure_io_queues()
1069 "ctrl sqsize %u > max queue size %u, clamping down\n", in nvme_rdma_setup_ctrl()
1196 struct nvme_rdma_queue *queue = wc->qp->qp_context; in nvme_rdma_wr_error() local
1197 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_wr_error()
1224 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, in nvme_rdma_inv_rkey() argument
1238 return ib_post_send(queue->qp, &wr, NULL); in nvme_rdma_inv_rkey()
1257 static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, in nvme_rdma_unmap_data() argument
1261 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_unmap_data()
1263 struct list_head *pool = &queue->qp->rdma_mrs; in nvme_rdma_unmap_data()
1269 pool = &queue->qp->sig_mrs; in nvme_rdma_unmap_data()
1272 ib_mr_pool_put(queue->qp, pool, req->mr); in nvme_rdma_unmap_data()
1290 static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, in nvme_rdma_map_sg_inline() argument
1303 sge->lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_map_sg_inline()
1308 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_rdma_map_sg_inline()
1316 static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue, in nvme_rdma_map_sg_single() argument
1323 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key); in nvme_rdma_map_sg_single()
1328 static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, in nvme_rdma_map_sg_fr() argument
1335 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_map_sg_fr()
1346 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr); in nvme_rdma_map_sg_fr()
1432 static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, in nvme_rdma_map_sg_pi() argument
1444 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs); in nvme_rdma_map_sg_pi()
1480 ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr); in nvme_rdma_map_sg_pi()
1547 static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, in nvme_rdma_map_data() argument
1551 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_map_data()
1569 ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count); in nvme_rdma_map_data()
1574 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && in nvme_rdma_map_data()
1575 queue->ctrl->use_inline_data && in nvme_rdma_map_data()
1577 nvme_rdma_inline_data_size(queue)) { in nvme_rdma_map_data()
1578 ret = nvme_rdma_map_sg_inline(queue, req, c, count); in nvme_rdma_map_data()
1583 ret = nvme_rdma_map_sg_single(queue, req, c); in nvme_rdma_map_data()
1588 ret = nvme_rdma_map_sg_fr(queue, req, c, count); in nvme_rdma_map_data()
1613 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, in nvme_rdma_post_send() argument
1622 sge->lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_post_send()
1636 ret = ib_post_send(queue->qp, first, NULL); in nvme_rdma_post_send()
1638 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_send()
1644 static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue, in nvme_rdma_post_recv() argument
1653 list.lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_post_recv()
1662 ret = ib_post_recv(queue->qp, &wr, NULL); in nvme_rdma_post_recv()
1664 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_recv()
1670 static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue) in nvme_rdma_tagset() argument
1672 u32 queue_idx = nvme_rdma_queue_idx(queue); in nvme_rdma_tagset()
1675 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_rdma_tagset()
1676 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_rdma_tagset()
1688 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_submit_async_event() local
1689 struct ib_device *dev = queue->device->dev; in nvme_rdma_submit_async_event()
1708 ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL); in nvme_rdma_submit_async_event()
1712 static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, in nvme_rdma_process_nvme_rsp() argument
1718 rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id); in nvme_rdma_process_nvme_rsp()
1720 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1722 cqe->command_id, queue->qp->qp_num); in nvme_rdma_process_nvme_rsp()
1723 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1734 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1737 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1742 ret = nvme_rdma_inv_rkey(queue, req); in nvme_rdma_process_nvme_rsp()
1744 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1747 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1760 struct nvme_rdma_queue *queue = wc->qp->qp_context; in nvme_rdma_recv_done() local
1761 struct ib_device *ibdev = queue->device->dev; in nvme_rdma_recv_done()
1772 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_recv_done()
1774 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_recv_done()
1781 * survive any kind of queue freeze and often don't respond to in nvme_rdma_recv_done()
1785 if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue), in nvme_rdma_recv_done()
1787 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_rdma_recv_done()
1790 nvme_rdma_process_nvme_rsp(queue, cqe, wc); in nvme_rdma_recv_done()
1793 nvme_rdma_post_recv(queue, qe); in nvme_rdma_recv_done()
1796 static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) in nvme_rdma_conn_established() argument
1800 for (i = 0; i < queue->queue_size; i++) { in nvme_rdma_conn_established()
1801 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); in nvme_rdma_conn_established()
1809 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, in nvme_rdma_conn_rejected() argument
1812 struct rdma_cm_id *cm_id = queue->cm_id; in nvme_rdma_conn_rejected()
1824 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1828 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1835 static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue) in nvme_rdma_addr_resolved() argument
1837 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl; in nvme_rdma_addr_resolved()
1840 ret = nvme_rdma_create_queue_ib(queue); in nvme_rdma_addr_resolved()
1845 rdma_set_service_type(queue->cm_id, ctrl->opts->tos); in nvme_rdma_addr_resolved()
1846 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CM_TIMEOUT_MS); in nvme_rdma_addr_resolved()
1849 queue->cm_error); in nvme_rdma_addr_resolved()
1856 nvme_rdma_destroy_queue_ib(queue); in nvme_rdma_addr_resolved()
1860 static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) in nvme_rdma_route_resolved() argument
1862 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_route_resolved()
1867 param.qp_num = queue->qp->qp_num; in nvme_rdma_route_resolved()
1870 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom; in nvme_rdma_route_resolved()
1878 priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); in nvme_rdma_route_resolved()
1880 * set the admin queue depth to the minimum size in nvme_rdma_route_resolved()
1892 priv.hrqsize = cpu_to_le16(queue->queue_size); in nvme_rdma_route_resolved()
1893 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); in nvme_rdma_route_resolved()
1896 ret = rdma_connect_locked(queue->cm_id, ¶m); in nvme_rdma_route_resolved()
1909 struct nvme_rdma_queue *queue = cm_id->context; in nvme_rdma_cm_handler() local
1912 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", in nvme_rdma_cm_handler()
1918 cm_error = nvme_rdma_addr_resolved(queue); in nvme_rdma_cm_handler()
1921 cm_error = nvme_rdma_route_resolved(queue); in nvme_rdma_cm_handler()
1924 queue->cm_error = nvme_rdma_conn_established(queue); in nvme_rdma_cm_handler()
1926 complete(&queue->cm_done); in nvme_rdma_cm_handler()
1929 cm_error = nvme_rdma_conn_rejected(queue, ev); in nvme_rdma_cm_handler()
1935 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1942 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1944 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1950 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1952 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1957 queue->cm_error = cm_error; in nvme_rdma_cm_handler()
1958 complete(&queue->cm_done); in nvme_rdma_cm_handler()
1967 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_complete_timed_out() local
1969 nvme_rdma_stop_queue(queue); in nvme_rdma_complete_timed_out()
1976 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_timeout() local
1977 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_timeout()
1980 rq->tag, nvme_rdma_queue_idx(queue)); in nvme_rdma_timeout()
2011 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_rdma_queue_rq()
2012 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_queue_rq() local
2018 bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags); in nvme_rdma_queue_rq()
2024 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_rdma_queue_rq()
2025 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_rdma_queue_rq()
2027 dev = queue->device->dev; in nvme_rdma_queue_rq()
2046 queue->pi_support && in nvme_rdma_queue_rq()
2054 err = nvme_rdma_map_data(queue, rq, c); in nvme_rdma_queue_rq()
2056 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_queue_rq()
2066 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, in nvme_rdma_queue_rq()
2074 nvme_rdma_unmap_data(queue, rq); in nvme_rdma_queue_rq()
2091 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_poll() local
2093 return ib_process_cq_direct(queue->ib_cq, -1); in nvme_rdma_poll()
2130 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_complete_rq() local
2131 struct ib_device *ibdev = queue->device->dev; in nvme_rdma_complete_rq()
2136 nvme_rdma_unmap_data(queue, rq); in nvme_rdma_complete_rq()