Lines Matching full:queue
47 struct nvmet_rdma_queue *queue; member
62 struct nvmet_rdma_queue *queue; member
144 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
154 MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
169 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
209 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument
214 spin_lock_irqsave(&queue->rsps_lock, flags); in nvmet_rdma_get_rsp()
215 rsp = list_first_entry_or_null(&queue->free_rsps, in nvmet_rdma_get_rsp()
219 spin_unlock_irqrestore(&queue->rsps_lock, flags); in nvmet_rdma_get_rsp()
227 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); in nvmet_rdma_get_rsp()
245 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp()
250 spin_lock_irqsave(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
251 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); in nvmet_rdma_put_rsp()
252 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
450 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) in nvmet_rdma_alloc_rsps() argument
452 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_alloc_rsps()
453 int nr_rsps = queue->recv_queue_size * 2; in nvmet_rdma_alloc_rsps()
456 queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), in nvmet_rdma_alloc_rsps()
458 if (!queue->rsps) in nvmet_rdma_alloc_rsps()
462 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps()
468 list_add_tail(&rsp->free_list, &queue->free_rsps); in nvmet_rdma_alloc_rsps()
475 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps()
480 kfree(queue->rsps); in nvmet_rdma_alloc_rsps()
485 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) in nvmet_rdma_free_rsps() argument
487 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_free_rsps()
488 int i, nr_rsps = queue->recv_queue_size * 2; in nvmet_rdma_free_rsps()
491 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_free_rsps()
496 kfree(queue->rsps); in nvmet_rdma_free_rsps()
511 ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL); in nvmet_rdma_post_recv()
519 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) in nvmet_rdma_process_wr_wait_list() argument
521 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
522 while (!list_empty(&queue->rsp_wr_wait_list)) { in nvmet_rdma_process_wr_wait_list()
526 rsp = list_entry(queue->rsp_wr_wait_list.next, in nvmet_rdma_process_wr_wait_list()
530 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
532 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
535 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_process_wr_wait_list()
539 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
634 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_init()
653 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_destroy()
668 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_release_rsp() local
670 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_release_rsp()
678 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) in nvmet_rdma_release_rsp()
679 nvmet_rdma_process_wr_wait_list(queue); in nvmet_rdma_release_rsp()
684 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) in nvmet_rdma_error_comp() argument
686 if (queue->nvme_sq.ctrl) { in nvmet_rdma_error_comp()
687 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); in nvmet_rdma_error_comp()
692 * cleanup the queue in nvmet_rdma_error_comp()
694 nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_error_comp()
702 struct nvmet_rdma_queue *queue = cq->cq_context; in nvmet_rdma_send_done() local
710 nvmet_rdma_error_comp(queue); in nvmet_rdma_send_done()
718 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_queue_response()
739 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); in nvmet_rdma_queue_response()
741 ib_dma_sync_single_for_device(rsp->queue->dev->device, in nvmet_rdma_queue_response()
755 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_read_data_done() local
759 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_read_data_done()
769 nvmet_rdma_error_comp(queue); in nvmet_rdma_read_data_done()
788 struct nvmet_rdma_queue *queue = cq->cq_context; in nvmet_rdma_write_data_done() local
789 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_write_data_done()
796 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_write_data_done()
807 nvmet_rdma_error_comp(queue); in nvmet_rdma_write_data_done()
864 if (off + len > rsp->queue->dev->inline_data_size) { in nvmet_rdma_map_sgl_inline()
953 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_execute_command() local
956 &queue->sq_wr_avail) < 0)) { in nvmet_rdma_execute_command()
957 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", in nvmet_rdma_execute_command()
958 1 + rsp->n_rdma, queue->idx, in nvmet_rdma_execute_command()
959 queue->nvme_sq.ctrl->cntlid); in nvmet_rdma_execute_command()
960 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_execute_command()
965 if (rdma_rw_ctx_post(&rsp->rw, queue->qp, in nvmet_rdma_execute_command()
966 queue->cm_id->port_num, &rsp->read_cqe, NULL)) in nvmet_rdma_execute_command()
975 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, in nvmet_rdma_handle_command() argument
980 ib_dma_sync_single_for_cpu(queue->dev->device, in nvmet_rdma_handle_command()
983 ib_dma_sync_single_for_cpu(queue->dev->device, in nvmet_rdma_handle_command()
987 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, in nvmet_rdma_handle_command()
988 &queue->nvme_sq, &nvmet_rdma_ops)) in nvmet_rdma_handle_command()
996 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_handle_command()
997 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_handle_command()
998 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_handle_command()
1011 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_recv_done() local
1019 nvmet_rdma_error_comp(queue); in nvmet_rdma_recv_done()
1026 nvmet_rdma_error_comp(queue); in nvmet_rdma_recv_done()
1030 cmd->queue = queue; in nvmet_rdma_recv_done()
1031 rsp = nvmet_rdma_get_rsp(queue); in nvmet_rdma_recv_done()
1038 nvmet_rdma_post_recv(queue->dev, cmd); in nvmet_rdma_recv_done()
1041 rsp->queue = queue; in nvmet_rdma_recv_done()
1045 rsp->req.port = queue->port; in nvmet_rdma_recv_done()
1048 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { in nvmet_rdma_recv_done()
1051 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_recv_done()
1052 if (queue->state == NVMET_RDMA_Q_CONNECTING) in nvmet_rdma_recv_done()
1053 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); in nvmet_rdma_recv_done()
1056 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_recv_done()
1060 nvmet_rdma_handle_command(queue, rsp); in nvmet_rdma_recv_done()
1250 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) in nvmet_rdma_create_queue_ib() argument
1253 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_create_queue_ib()
1259 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; in nvmet_rdma_create_queue_ib()
1261 queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1, in nvmet_rdma_create_queue_ib()
1262 queue->comp_vector, IB_POLL_WORKQUEUE); in nvmet_rdma_create_queue_ib()
1263 if (IS_ERR(queue->cq)) { in nvmet_rdma_create_queue_ib()
1264 ret = PTR_ERR(queue->cq); in nvmet_rdma_create_queue_ib()
1271 qp_attr.qp_context = queue; in nvmet_rdma_create_queue_ib()
1273 qp_attr.send_cq = queue->cq; in nvmet_rdma_create_queue_ib()
1274 qp_attr.recv_cq = queue->cq; in nvmet_rdma_create_queue_ib()
1278 qp_attr.cap.max_send_wr = queue->send_queue_size + 1; in nvmet_rdma_create_queue_ib()
1279 factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num, in nvmet_rdma_create_queue_ib()
1281 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor; in nvmet_rdma_create_queue_ib()
1285 if (queue->nsrq) { in nvmet_rdma_create_queue_ib()
1286 qp_attr.srq = queue->nsrq->srq; in nvmet_rdma_create_queue_ib()
1289 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; in nvmet_rdma_create_queue_ib()
1293 if (queue->port->pi_enable && queue->host_qid) in nvmet_rdma_create_queue_ib()
1296 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); in nvmet_rdma_create_queue_ib()
1301 queue->qp = queue->cm_id->qp; in nvmet_rdma_create_queue_ib()
1303 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); in nvmet_rdma_create_queue_ib()
1306 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, in nvmet_rdma_create_queue_ib()
1307 qp_attr.cap.max_send_wr, queue->cm_id); in nvmet_rdma_create_queue_ib()
1309 if (!queue->nsrq) { in nvmet_rdma_create_queue_ib()
1310 for (i = 0; i < queue->recv_queue_size; i++) { in nvmet_rdma_create_queue_ib()
1311 queue->cmds[i].queue = queue; in nvmet_rdma_create_queue_ib()
1312 ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); in nvmet_rdma_create_queue_ib()
1322 rdma_destroy_qp(queue->cm_id); in nvmet_rdma_create_queue_ib()
1324 ib_cq_pool_put(queue->cq, nr_cqe + 1); in nvmet_rdma_create_queue_ib()
1328 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) in nvmet_rdma_destroy_queue_ib() argument
1330 ib_drain_qp(queue->qp); in nvmet_rdma_destroy_queue_ib()
1331 if (queue->cm_id) in nvmet_rdma_destroy_queue_ib()
1332 rdma_destroy_id(queue->cm_id); in nvmet_rdma_destroy_queue_ib()
1333 ib_destroy_qp(queue->qp); in nvmet_rdma_destroy_queue_ib()
1334 ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 * in nvmet_rdma_destroy_queue_ib()
1335 queue->send_queue_size + 1); in nvmet_rdma_destroy_queue_ib()
1338 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) in nvmet_rdma_free_queue() argument
1340 pr_debug("freeing queue %d\n", queue->idx); in nvmet_rdma_free_queue()
1342 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_rdma_free_queue()
1344 nvmet_rdma_destroy_queue_ib(queue); in nvmet_rdma_free_queue()
1345 if (!queue->nsrq) { in nvmet_rdma_free_queue()
1346 nvmet_rdma_free_cmds(queue->dev, queue->cmds, in nvmet_rdma_free_queue()
1347 queue->recv_queue_size, in nvmet_rdma_free_queue()
1348 !queue->host_qid); in nvmet_rdma_free_queue()
1350 nvmet_rdma_free_rsps(queue); in nvmet_rdma_free_queue()
1351 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); in nvmet_rdma_free_queue()
1352 kfree(queue); in nvmet_rdma_free_queue()
1357 struct nvmet_rdma_queue *queue = in nvmet_rdma_release_queue_work() local
1359 struct nvmet_rdma_device *dev = queue->dev; in nvmet_rdma_release_queue_work()
1361 nvmet_rdma_free_queue(queue); in nvmet_rdma_release_queue_work()
1368 struct nvmet_rdma_queue *queue) in nvmet_rdma_parse_cm_connect_req() argument
1379 queue->host_qid = le16_to_cpu(req->qid); in nvmet_rdma_parse_cm_connect_req()
1382 * req->hsqsize corresponds to our recv queue size plus 1 in nvmet_rdma_parse_cm_connect_req()
1383 * req->hrqsize corresponds to our send queue size in nvmet_rdma_parse_cm_connect_req()
1385 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; in nvmet_rdma_parse_cm_connect_req()
1386 queue->send_queue_size = le16_to_cpu(req->hrqsize); in nvmet_rdma_parse_cm_connect_req()
1388 if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) in nvmet_rdma_parse_cm_connect_req()
1417 struct nvmet_rdma_queue *queue; in nvmet_rdma_alloc_queue() local
1420 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in nvmet_rdma_alloc_queue()
1421 if (!queue) { in nvmet_rdma_alloc_queue()
1426 ret = nvmet_sq_init(&queue->nvme_sq); in nvmet_rdma_alloc_queue()
1432 ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); in nvmet_rdma_alloc_queue()
1440 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); in nvmet_rdma_alloc_queue()
1441 queue->dev = ndev; in nvmet_rdma_alloc_queue()
1442 queue->cm_id = cm_id; in nvmet_rdma_alloc_queue()
1443 queue->port = port->nport; in nvmet_rdma_alloc_queue()
1445 spin_lock_init(&queue->state_lock); in nvmet_rdma_alloc_queue()
1446 queue->state = NVMET_RDMA_Q_CONNECTING; in nvmet_rdma_alloc_queue()
1447 INIT_LIST_HEAD(&queue->rsp_wait_list); in nvmet_rdma_alloc_queue()
1448 INIT_LIST_HEAD(&queue->rsp_wr_wait_list); in nvmet_rdma_alloc_queue()
1449 spin_lock_init(&queue->rsp_wr_wait_lock); in nvmet_rdma_alloc_queue()
1450 INIT_LIST_HEAD(&queue->free_rsps); in nvmet_rdma_alloc_queue()
1451 spin_lock_init(&queue->rsps_lock); in nvmet_rdma_alloc_queue()
1452 INIT_LIST_HEAD(&queue->queue_list); in nvmet_rdma_alloc_queue()
1454 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); in nvmet_rdma_alloc_queue()
1455 if (queue->idx < 0) { in nvmet_rdma_alloc_queue()
1464 queue->comp_vector = !queue->host_qid ? 0 : in nvmet_rdma_alloc_queue()
1465 queue->idx % ndev->device->num_comp_vectors; in nvmet_rdma_alloc_queue()
1468 ret = nvmet_rdma_alloc_rsps(queue); in nvmet_rdma_alloc_queue()
1475 queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count]; in nvmet_rdma_alloc_queue()
1477 queue->cmds = nvmet_rdma_alloc_cmds(ndev, in nvmet_rdma_alloc_queue()
1478 queue->recv_queue_size, in nvmet_rdma_alloc_queue()
1479 !queue->host_qid); in nvmet_rdma_alloc_queue()
1480 if (IS_ERR(queue->cmds)) { in nvmet_rdma_alloc_queue()
1486 ret = nvmet_rdma_create_queue_ib(queue); in nvmet_rdma_alloc_queue()
1488 pr_err("%s: creating RDMA queue failed (%d).\n", in nvmet_rdma_alloc_queue()
1494 return queue; in nvmet_rdma_alloc_queue()
1497 if (!queue->nsrq) { in nvmet_rdma_alloc_queue()
1498 nvmet_rdma_free_cmds(queue->dev, queue->cmds, in nvmet_rdma_alloc_queue()
1499 queue->recv_queue_size, in nvmet_rdma_alloc_queue()
1500 !queue->host_qid); in nvmet_rdma_alloc_queue()
1503 nvmet_rdma_free_rsps(queue); in nvmet_rdma_alloc_queue()
1505 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); in nvmet_rdma_alloc_queue()
1507 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_rdma_alloc_queue()
1509 kfree(queue); in nvmet_rdma_alloc_queue()
1517 struct nvmet_rdma_queue *queue = priv; in nvmet_rdma_qp_event() local
1521 rdma_notify(queue->cm_id, event->event); in nvmet_rdma_qp_event()
1524 pr_debug("received last WQE reached event for queue=0x%p\n", in nvmet_rdma_qp_event()
1525 queue); in nvmet_rdma_qp_event()
1535 struct nvmet_rdma_queue *queue, in nvmet_rdma_cm_accept() argument
1545 queue->dev->device->attrs.max_qp_init_rd_atom); in nvmet_rdma_cm_accept()
1549 priv.crqsize = cpu_to_le16(queue->recv_queue_size); in nvmet_rdma_cm_accept()
1562 struct nvmet_rdma_queue *queue; in nvmet_rdma_queue_connect() local
1571 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); in nvmet_rdma_queue_connect()
1572 if (!queue) { in nvmet_rdma_queue_connect()
1577 if (queue->host_qid == 0) { in nvmet_rdma_queue_connect()
1582 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); in nvmet_rdma_queue_connect()
1588 queue->cm_id = NULL; in nvmet_rdma_queue_connect()
1593 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); in nvmet_rdma_queue_connect()
1599 nvmet_rdma_free_queue(queue); in nvmet_rdma_queue_connect()
1606 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_established() argument
1610 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1611 if (queue->state != NVMET_RDMA_Q_CONNECTING) { in nvmet_rdma_queue_established()
1612 pr_warn("trying to establish a connected queue\n"); in nvmet_rdma_queue_established()
1615 queue->state = NVMET_RDMA_Q_LIVE; in nvmet_rdma_queue_established()
1617 while (!list_empty(&queue->rsp_wait_list)) { in nvmet_rdma_queue_established()
1620 cmd = list_first_entry(&queue->rsp_wait_list, in nvmet_rdma_queue_established()
1624 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1625 nvmet_rdma_handle_command(queue, cmd); in nvmet_rdma_queue_established()
1626 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1630 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1633 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) in __nvmet_rdma_queue_disconnect() argument
1638 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); in __nvmet_rdma_queue_disconnect()
1640 spin_lock_irqsave(&queue->state_lock, flags); in __nvmet_rdma_queue_disconnect()
1641 switch (queue->state) { in __nvmet_rdma_queue_disconnect()
1644 queue->state = NVMET_RDMA_Q_DISCONNECTING; in __nvmet_rdma_queue_disconnect()
1650 spin_unlock_irqrestore(&queue->state_lock, flags); in __nvmet_rdma_queue_disconnect()
1653 rdma_disconnect(queue->cm_id); in __nvmet_rdma_queue_disconnect()
1654 schedule_work(&queue->release_work); in __nvmet_rdma_queue_disconnect()
1658 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_disconnect() argument
1663 if (!list_empty(&queue->queue_list)) { in nvmet_rdma_queue_disconnect()
1664 list_del_init(&queue->queue_list); in nvmet_rdma_queue_disconnect()
1670 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_queue_disconnect()
1674 struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_connect_fail() argument
1676 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); in nvmet_rdma_queue_connect_fail()
1679 if (!list_empty(&queue->queue_list)) in nvmet_rdma_queue_connect_fail()
1680 list_del_init(&queue->queue_list); in nvmet_rdma_queue_connect_fail()
1683 pr_err("failed to connect queue %d\n", queue->idx); in nvmet_rdma_queue_connect_fail()
1684 schedule_work(&queue->release_work); in nvmet_rdma_queue_connect_fail()
1690 * @queue: nvmet rdma queue (cm id qp_context)
1694 * queue cm_id and/or a device bound listener cm_id (where in this
1695 * case queue will be null).
1703 struct nvmet_rdma_queue *queue) in nvmet_rdma_device_removal() argument
1707 if (queue) { in nvmet_rdma_device_removal()
1709 * This is a queue cm_id. we have registered in nvmet_rdma_device_removal()
1737 struct nvmet_rdma_queue *queue = NULL; in nvmet_rdma_cm_handler() local
1741 queue = cm_id->qp->qp_context; in nvmet_rdma_cm_handler()
1752 nvmet_rdma_queue_established(queue); in nvmet_rdma_cm_handler()
1755 if (!queue) { in nvmet_rdma_cm_handler()
1764 nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_cm_handler()
1767 ret = nvmet_rdma_device_removal(cm_id, queue); in nvmet_rdma_cm_handler()
1775 nvmet_rdma_queue_connect_fail(cm_id, queue); in nvmet_rdma_cm_handler()
1788 struct nvmet_rdma_queue *queue; in nvmet_rdma_delete_ctrl() local
1792 list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { in nvmet_rdma_delete_ctrl()
1793 if (queue->nvme_sq.ctrl == ctrl) { in nvmet_rdma_delete_ctrl()
1794 list_del_init(&queue->queue_list); in nvmet_rdma_delete_ctrl()
1797 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_delete_ctrl()
1953 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; in nvmet_rdma_disc_port_addr()
1984 struct nvmet_rdma_queue *queue, *tmp; in nvmet_rdma_remove_one() local
2005 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, in nvmet_rdma_remove_one()
2007 if (queue->dev->device != ib_device) in nvmet_rdma_remove_one()
2010 pr_info("Removing queue %d\n", queue->idx); in nvmet_rdma_remove_one()
2011 list_del_init(&queue->queue_list); in nvmet_rdma_remove_one()
2012 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_remove_one()