Lines Matching full:queue

73 	struct nvme_rdma_queue  *queue;  member
159 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) in nvme_rdma_queue_idx() argument
161 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx()
164 static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) in nvme_rdma_poll_queue() argument
166 return nvme_rdma_queue_idx(queue) > in nvme_rdma_poll_queue()
167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_poll_queue()
168 queue->ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_poll_queue()
171 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) in nvme_rdma_inline_data_size() argument
173 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_rdma_inline_data_size()
223 * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue in nvme_rdma_alloc_ring()
225 * will issue error recovery and queue re-creation. in nvme_rdma_alloc_ring()
246 static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) in nvme_rdma_wait_for_cm() argument
250 ret = wait_for_completion_interruptible_timeout(&queue->cm_done, in nvme_rdma_wait_for_cm()
256 WARN_ON_ONCE(queue->cm_error > 0); in nvme_rdma_wait_for_cm()
257 return queue->cm_error; in nvme_rdma_wait_for_cm()
260 static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor) in nvme_rdma_create_qp() argument
262 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_create_qp()
269 init_attr.cap.max_send_wr = factor * queue->queue_size + 1; in nvme_rdma_create_qp()
271 init_attr.cap.max_recv_wr = queue->queue_size + 1; in nvme_rdma_create_qp()
276 init_attr.send_cq = queue->ib_cq; in nvme_rdma_create_qp()
277 init_attr.recv_cq = queue->ib_cq; in nvme_rdma_create_qp()
278 if (queue->pi_support) in nvme_rdma_create_qp()
280 init_attr.qp_context = queue; in nvme_rdma_create_qp()
282 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); in nvme_rdma_create_qp()
284 queue->qp = queue->cm_id->qp; in nvme_rdma_create_qp()
303 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; in nvme_rdma_init_request() local
311 if (queue->pi_support) in nvme_rdma_init_request()
316 req->queue = queue; in nvme_rdma_init_request()
326 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_rdma_init_hctx() local
330 hctx->driver_data = queue; in nvme_rdma_init_hctx()
338 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_init_admin_hctx() local
342 hctx->driver_data = queue; in nvme_rdma_init_admin_hctx()
416 static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue) in nvme_rdma_free_cq() argument
418 if (nvme_rdma_poll_queue(queue)) in nvme_rdma_free_cq()
419 ib_free_cq(queue->ib_cq); in nvme_rdma_free_cq()
421 ib_cq_pool_put(queue->ib_cq, queue->cq_size); in nvme_rdma_free_cq()
424 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) in nvme_rdma_destroy_queue_ib() argument
429 if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags)) in nvme_rdma_destroy_queue_ib()
432 dev = queue->device; in nvme_rdma_destroy_queue_ib()
435 if (queue->pi_support) in nvme_rdma_destroy_queue_ib()
436 ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs); in nvme_rdma_destroy_queue_ib()
437 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_destroy_queue_ib()
444 ib_destroy_qp(queue->qp); in nvme_rdma_destroy_queue_ib()
445 nvme_rdma_free_cq(queue); in nvme_rdma_destroy_queue_ib()
447 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, in nvme_rdma_destroy_queue_ib()
466 struct nvme_rdma_queue *queue) in nvme_rdma_create_cq() argument
468 int ret, comp_vector, idx = nvme_rdma_queue_idx(queue); in nvme_rdma_create_cq()
472 * Spread I/O queues completion vectors according their queue index. in nvme_rdma_create_cq()
478 if (nvme_rdma_poll_queue(queue)) { in nvme_rdma_create_cq()
480 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, in nvme_rdma_create_cq()
484 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, in nvme_rdma_create_cq()
488 if (IS_ERR(queue->ib_cq)) { in nvme_rdma_create_cq()
489 ret = PTR_ERR(queue->ib_cq); in nvme_rdma_create_cq()
496 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) in nvme_rdma_create_queue_ib() argument
503 queue->device = nvme_rdma_find_get_device(queue->cm_id); in nvme_rdma_create_queue_ib()
504 if (!queue->device) { in nvme_rdma_create_queue_ib()
505 dev_err(queue->cm_id->device->dev.parent, in nvme_rdma_create_queue_ib()
509 ibdev = queue->device->dev; in nvme_rdma_create_queue_ib()
512 queue->cq_size = cq_factor * queue->queue_size + 1; in nvme_rdma_create_queue_ib()
514 ret = nvme_rdma_create_cq(ibdev, queue); in nvme_rdma_create_queue_ib()
518 ret = nvme_rdma_create_qp(queue, send_wr_factor); in nvme_rdma_create_queue_ib()
522 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size, in nvme_rdma_create_queue_ib()
524 if (!queue->rsp_ring) { in nvme_rdma_create_queue_ib()
534 pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1; in nvme_rdma_create_queue_ib()
535 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs, in nvme_rdma_create_queue_ib()
536 queue->queue_size, in nvme_rdma_create_queue_ib()
540 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
542 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib()
546 if (queue->pi_support) { in nvme_rdma_create_queue_ib()
547 ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs, in nvme_rdma_create_queue_ib()
548 queue->queue_size, IB_MR_TYPE_INTEGRITY, in nvme_rdma_create_queue_ib()
551 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
553 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib()
558 set_bit(NVME_RDMA_Q_TR_READY, &queue->flags); in nvme_rdma_create_queue_ib()
563 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_create_queue_ib()
565 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, in nvme_rdma_create_queue_ib()
568 rdma_destroy_qp(queue->cm_id); in nvme_rdma_create_queue_ib()
570 nvme_rdma_free_cq(queue); in nvme_rdma_create_queue_ib()
572 nvme_rdma_dev_put(queue->device); in nvme_rdma_create_queue_ib()
579 struct nvme_rdma_queue *queue; in nvme_rdma_alloc_queue() local
583 queue = &ctrl->queues[idx]; in nvme_rdma_alloc_queue()
584 mutex_init(&queue->queue_lock); in nvme_rdma_alloc_queue()
585 queue->ctrl = ctrl; in nvme_rdma_alloc_queue()
587 queue->pi_support = true; in nvme_rdma_alloc_queue()
589 queue->pi_support = false; in nvme_rdma_alloc_queue()
590 init_completion(&queue->cm_done); in nvme_rdma_alloc_queue()
593 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; in nvme_rdma_alloc_queue()
595 queue->cmnd_capsule_len = sizeof(struct nvme_command); in nvme_rdma_alloc_queue()
597 queue->queue_size = queue_size; in nvme_rdma_alloc_queue()
599 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, in nvme_rdma_alloc_queue()
601 if (IS_ERR(queue->cm_id)) { in nvme_rdma_alloc_queue()
603 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); in nvme_rdma_alloc_queue()
604 ret = PTR_ERR(queue->cm_id); in nvme_rdma_alloc_queue()
611 queue->cm_error = -ETIMEDOUT; in nvme_rdma_alloc_queue()
612 ret = rdma_resolve_addr(queue->cm_id, src_addr, in nvme_rdma_alloc_queue()
621 ret = nvme_rdma_wait_for_cm(queue); in nvme_rdma_alloc_queue()
628 set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags); in nvme_rdma_alloc_queue()
633 rdma_destroy_id(queue->cm_id); in nvme_rdma_alloc_queue()
634 nvme_rdma_destroy_queue_ib(queue); in nvme_rdma_alloc_queue()
636 mutex_destroy(&queue->queue_lock); in nvme_rdma_alloc_queue()
640 static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) in __nvme_rdma_stop_queue() argument
642 rdma_disconnect(queue->cm_id); in __nvme_rdma_stop_queue()
643 ib_drain_qp(queue->qp); in __nvme_rdma_stop_queue()
646 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) in nvme_rdma_stop_queue() argument
648 mutex_lock(&queue->queue_lock); in nvme_rdma_stop_queue()
649 if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) in nvme_rdma_stop_queue()
650 __nvme_rdma_stop_queue(queue); in nvme_rdma_stop_queue()
651 mutex_unlock(&queue->queue_lock); in nvme_rdma_stop_queue()
654 static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) in nvme_rdma_free_queue() argument
656 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_free_queue()
659 rdma_destroy_id(queue->cm_id); in nvme_rdma_free_queue()
660 nvme_rdma_destroy_queue_ib(queue); in nvme_rdma_free_queue()
661 mutex_destroy(&queue->queue_lock); in nvme_rdma_free_queue()
682 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; in nvme_rdma_start_queue() local
691 set_bit(NVME_RDMA_Q_LIVE, &queue->flags); in nvme_rdma_start_queue()
693 if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_start_queue()
694 __nvme_rdma_stop_queue(queue); in nvme_rdma_start_queue()
696 "failed to connect queue: %d ret=%d\n", idx, ret); in nvme_rdma_start_queue()
763 * sufficient queue count to have dedicated default queues. in nvme_rdma_alloc_io_queues()
877 * Bind the async event SQE DMA mapping to the admin queue lifetime. in nvme_rdma_configure_admin_queue()
879 * error recovery and queue re-creation. in nvme_rdma_configure_admin_queue()
1230 struct nvme_rdma_queue *queue = wc->qp->qp_context; in nvme_rdma_wr_error() local
1231 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_wr_error()
1258 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, in nvme_rdma_inv_rkey() argument
1272 return ib_post_send(queue->qp, &wr, NULL); in nvme_rdma_inv_rkey()
1275 static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, in nvme_rdma_unmap_data() argument
1279 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_unmap_data()
1281 struct list_head *pool = &queue->qp->rdma_mrs; in nvme_rdma_unmap_data()
1294 pool = &queue->qp->sig_mrs; in nvme_rdma_unmap_data()
1297 ib_mr_pool_put(queue->qp, pool, req->mr); in nvme_rdma_unmap_data()
1317 static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, in nvme_rdma_map_sg_inline() argument
1330 sge->lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_map_sg_inline()
1335 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_rdma_map_sg_inline()
1343 static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue, in nvme_rdma_map_sg_single() argument
1350 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key); in nvme_rdma_map_sg_single()
1355 static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, in nvme_rdma_map_sg_fr() argument
1362 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_map_sg_fr()
1373 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr); in nvme_rdma_map_sg_fr()
1459 static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, in nvme_rdma_map_sg_pi() argument
1471 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs); in nvme_rdma_map_sg_pi()
1507 ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr); in nvme_rdma_map_sg_pi()
1514 static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, in nvme_rdma_map_data() argument
1518 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_map_data()
1573 ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count); in nvme_rdma_map_data()
1578 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && in nvme_rdma_map_data()
1579 queue->ctrl->use_inline_data && in nvme_rdma_map_data()
1581 nvme_rdma_inline_data_size(queue)) { in nvme_rdma_map_data()
1582 ret = nvme_rdma_map_sg_inline(queue, req, c, count); in nvme_rdma_map_data()
1587 ret = nvme_rdma_map_sg_single(queue, req, c); in nvme_rdma_map_data()
1592 ret = nvme_rdma_map_sg_fr(queue, req, c, count); in nvme_rdma_map_data()
1628 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, in nvme_rdma_post_send() argument
1637 sge->lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_post_send()
1651 ret = ib_post_send(queue->qp, first, NULL); in nvme_rdma_post_send()
1653 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_send()
1659 static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue, in nvme_rdma_post_recv() argument
1668 list.lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_post_recv()
1677 ret = ib_post_recv(queue->qp, &wr, NULL); in nvme_rdma_post_recv()
1679 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_recv()
1685 static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue) in nvme_rdma_tagset() argument
1687 u32 queue_idx = nvme_rdma_queue_idx(queue); in nvme_rdma_tagset()
1690 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_rdma_tagset()
1691 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_rdma_tagset()
1703 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_submit_async_event() local
1704 struct ib_device *dev = queue->device->dev; in nvme_rdma_submit_async_event()
1723 ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL); in nvme_rdma_submit_async_event()
1727 static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, in nvme_rdma_process_nvme_rsp() argument
1733 rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id); in nvme_rdma_process_nvme_rsp()
1735 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1737 cqe->command_id, queue->qp->qp_num); in nvme_rdma_process_nvme_rsp()
1738 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1749 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1752 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1757 ret = nvme_rdma_inv_rkey(queue, req); in nvme_rdma_process_nvme_rsp()
1759 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1762 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1775 struct nvme_rdma_queue *queue = wc->qp->qp_context; in nvme_rdma_recv_done() local
1776 struct ib_device *ibdev = queue->device->dev; in nvme_rdma_recv_done()
1787 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_recv_done()
1789 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_recv_done()
1796 * survive any kind of queue freeze and often don't respond to in nvme_rdma_recv_done()
1800 if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue), in nvme_rdma_recv_done()
1802 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_rdma_recv_done()
1805 nvme_rdma_process_nvme_rsp(queue, cqe, wc); in nvme_rdma_recv_done()
1808 nvme_rdma_post_recv(queue, qe); in nvme_rdma_recv_done()
1811 static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) in nvme_rdma_conn_established() argument
1815 for (i = 0; i < queue->queue_size; i++) { in nvme_rdma_conn_established()
1816 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); in nvme_rdma_conn_established()
1824 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, in nvme_rdma_conn_rejected() argument
1827 struct rdma_cm_id *cm_id = queue->cm_id; in nvme_rdma_conn_rejected()
1839 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1843 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1850 static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue) in nvme_rdma_addr_resolved() argument
1852 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl; in nvme_rdma_addr_resolved()
1855 ret = nvme_rdma_create_queue_ib(queue); in nvme_rdma_addr_resolved()
1860 rdma_set_service_type(queue->cm_id, ctrl->opts->tos); in nvme_rdma_addr_resolved()
1861 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS); in nvme_rdma_addr_resolved()
1864 queue->cm_error); in nvme_rdma_addr_resolved()
1871 nvme_rdma_destroy_queue_ib(queue); in nvme_rdma_addr_resolved()
1875 static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) in nvme_rdma_route_resolved() argument
1877 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_route_resolved()
1882 param.qp_num = queue->qp->qp_num; in nvme_rdma_route_resolved()
1885 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom; in nvme_rdma_route_resolved()
1893 priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); in nvme_rdma_route_resolved()
1895 * set the admin queue depth to the minimum size in nvme_rdma_route_resolved()
1907 priv.hrqsize = cpu_to_le16(queue->queue_size); in nvme_rdma_route_resolved()
1908 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); in nvme_rdma_route_resolved()
1911 ret = rdma_connect_locked(queue->cm_id, &param); in nvme_rdma_route_resolved()
1924 struct nvme_rdma_queue *queue = cm_id->context; in nvme_rdma_cm_handler() local
1927 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", in nvme_rdma_cm_handler()
1933 cm_error = nvme_rdma_addr_resolved(queue); in nvme_rdma_cm_handler()
1936 cm_error = nvme_rdma_route_resolved(queue); in nvme_rdma_cm_handler()
1939 queue->cm_error = nvme_rdma_conn_established(queue); in nvme_rdma_cm_handler()
1941 complete(&queue->cm_done); in nvme_rdma_cm_handler()
1944 cm_error = nvme_rdma_conn_rejected(queue, ev); in nvme_rdma_cm_handler()
1950 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1957 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1959 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1965 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1967 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1972 queue->cm_error = cm_error; in nvme_rdma_cm_handler()
1973 complete(&queue->cm_done); in nvme_rdma_cm_handler()
1982 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_complete_timed_out() local
1984 nvme_rdma_stop_queue(queue); in nvme_rdma_complete_timed_out()
1995 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_timeout() local
1996 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_timeout()
1999 rq->tag, nvme_rdma_queue_idx(queue)); in nvme_rdma_timeout()
2030 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_rdma_queue_rq()
2031 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_queue_rq() local
2037 bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags); in nvme_rdma_queue_rq()
2043 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_rdma_queue_rq()
2044 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_rdma_queue_rq()
2046 dev = queue->device->dev; in nvme_rdma_queue_rq()
2065 queue->pi_support && in nvme_rdma_queue_rq()
2073 err = nvme_rdma_map_data(queue, rq, c); in nvme_rdma_queue_rq()
2075 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_queue_rq()
2085 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, in nvme_rdma_queue_rq()
2093 nvme_rdma_unmap_data(queue, rq); in nvme_rdma_queue_rq()
2110 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_poll() local
2112 return ib_process_cq_direct(queue->ib_cq, -1); in nvme_rdma_poll()
2149 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_complete_rq() local
2150 struct ib_device *ibdev = queue->device->dev; in nvme_rdma_complete_rq()
2155 nvme_rdma_unmap_data(queue, rq); in nvme_rdma_complete_rq()