Lines Matching refs:queue
48 struct nvmet_rdma_queue *queue; member
62 struct nvmet_rdma_queue *queue; member
141 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
172 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument
177 spin_lock_irqsave(&queue->rsps_lock, flags); in nvmet_rdma_get_rsp()
178 rsp = list_first_entry_or_null(&queue->free_rsps, in nvmet_rdma_get_rsp()
182 spin_unlock_irqrestore(&queue->rsps_lock, flags); in nvmet_rdma_get_rsp()
204 spin_lock_irqsave(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
205 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); in nvmet_rdma_put_rsp()
206 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
400 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) in nvmet_rdma_alloc_rsps() argument
402 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_alloc_rsps()
403 int nr_rsps = queue->recv_queue_size * 2; in nvmet_rdma_alloc_rsps()
406 queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), in nvmet_rdma_alloc_rsps()
408 if (!queue->rsps) in nvmet_rdma_alloc_rsps()
412 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps()
418 list_add_tail(&rsp->free_list, &queue->free_rsps); in nvmet_rdma_alloc_rsps()
425 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps()
430 kfree(queue->rsps); in nvmet_rdma_alloc_rsps()
435 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) in nvmet_rdma_free_rsps() argument
437 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_free_rsps()
438 int i, nr_rsps = queue->recv_queue_size * 2; in nvmet_rdma_free_rsps()
441 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_free_rsps()
446 kfree(queue->rsps); in nvmet_rdma_free_rsps()
461 ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL); in nvmet_rdma_post_recv()
469 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) in nvmet_rdma_process_wr_wait_list() argument
471 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
472 while (!list_empty(&queue->rsp_wr_wait_list)) { in nvmet_rdma_process_wr_wait_list()
476 rsp = list_entry(queue->rsp_wr_wait_list.next, in nvmet_rdma_process_wr_wait_list()
480 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
482 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
485 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_process_wr_wait_list()
489 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
495 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_release_rsp() local
497 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_release_rsp()
500 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, in nvmet_rdma_release_rsp()
501 queue->cm_id->port_num, rsp->req.sg, in nvmet_rdma_release_rsp()
508 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) in nvmet_rdma_release_rsp()
509 nvmet_rdma_process_wr_wait_list(queue); in nvmet_rdma_release_rsp()
514 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) in nvmet_rdma_error_comp() argument
516 if (queue->nvme_sq.ctrl) { in nvmet_rdma_error_comp()
517 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); in nvmet_rdma_error_comp()
524 nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_error_comp()
539 nvmet_rdma_error_comp(rsp->queue); in nvmet_rdma_send_done()
547 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_queue_response()
563 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); in nvmet_rdma_queue_response()
565 ib_dma_sync_single_for_device(rsp->queue->dev->device, in nvmet_rdma_queue_response()
579 struct nvmet_rdma_queue *queue = cq->cq_context; in nvmet_rdma_read_data_done() local
582 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_read_data_done()
583 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, in nvmet_rdma_read_data_done()
584 queue->cm_id->port_num, rsp->req.sg, in nvmet_rdma_read_data_done()
594 nvmet_rdma_error_comp(queue); in nvmet_rdma_read_data_done()
635 if (off + len > rsp->queue->dev->inline_data_size) { in nvmet_rdma_map_sgl_inline()
653 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_map_sgl_keyed()
714 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_execute_command() local
717 &queue->sq_wr_avail) < 0)) { in nvmet_rdma_execute_command()
719 1 + rsp->n_rdma, queue->idx, in nvmet_rdma_execute_command()
720 queue->nvme_sq.ctrl->cntlid); in nvmet_rdma_execute_command()
721 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_execute_command()
726 if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, in nvmet_rdma_execute_command()
727 queue->cm_id->port_num, &rsp->read_cqe, NULL)) in nvmet_rdma_execute_command()
736 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, in nvmet_rdma_handle_command() argument
741 ib_dma_sync_single_for_cpu(queue->dev->device, in nvmet_rdma_handle_command()
744 ib_dma_sync_single_for_cpu(queue->dev->device, in nvmet_rdma_handle_command()
748 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, in nvmet_rdma_handle_command()
749 &queue->nvme_sq, &nvmet_rdma_ops)) in nvmet_rdma_handle_command()
757 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_handle_command()
758 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_handle_command()
759 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_handle_command()
772 struct nvmet_rdma_queue *queue = cq->cq_context; in nvmet_rdma_recv_done() local
780 nvmet_rdma_error_comp(queue); in nvmet_rdma_recv_done()
787 nvmet_rdma_error_comp(queue); in nvmet_rdma_recv_done()
791 cmd->queue = queue; in nvmet_rdma_recv_done()
792 rsp = nvmet_rdma_get_rsp(queue); in nvmet_rdma_recv_done()
799 nvmet_rdma_post_recv(queue->dev, cmd); in nvmet_rdma_recv_done()
802 rsp->queue = queue; in nvmet_rdma_recv_done()
806 rsp->req.port = queue->port; in nvmet_rdma_recv_done()
809 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { in nvmet_rdma_recv_done()
812 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_recv_done()
813 if (queue->state == NVMET_RDMA_Q_CONNECTING) in nvmet_rdma_recv_done()
814 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); in nvmet_rdma_recv_done()
817 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_recv_done()
821 nvmet_rdma_handle_command(queue, rsp); in nvmet_rdma_recv_done()
955 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) in nvmet_rdma_create_queue_ib() argument
958 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_create_queue_ib()
965 comp_vector = !queue->host_qid ? 0 : in nvmet_rdma_create_queue_ib()
966 queue->idx % ndev->device->num_comp_vectors; in nvmet_rdma_create_queue_ib()
971 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; in nvmet_rdma_create_queue_ib()
973 queue->cq = ib_alloc_cq(ndev->device, queue, in nvmet_rdma_create_queue_ib()
976 if (IS_ERR(queue->cq)) { in nvmet_rdma_create_queue_ib()
977 ret = PTR_ERR(queue->cq); in nvmet_rdma_create_queue_ib()
984 qp_attr.qp_context = queue; in nvmet_rdma_create_queue_ib()
986 qp_attr.send_cq = queue->cq; in nvmet_rdma_create_queue_ib()
987 qp_attr.recv_cq = queue->cq; in nvmet_rdma_create_queue_ib()
991 qp_attr.cap.max_send_wr = queue->send_queue_size + 1; in nvmet_rdma_create_queue_ib()
992 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size; in nvmet_rdma_create_queue_ib()
1000 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; in nvmet_rdma_create_queue_ib()
1004 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); in nvmet_rdma_create_queue_ib()
1010 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); in nvmet_rdma_create_queue_ib()
1013 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, in nvmet_rdma_create_queue_ib()
1014 qp_attr.cap.max_send_wr, queue->cm_id); in nvmet_rdma_create_queue_ib()
1017 for (i = 0; i < queue->recv_queue_size; i++) { in nvmet_rdma_create_queue_ib()
1018 queue->cmds[i].queue = queue; in nvmet_rdma_create_queue_ib()
1019 ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); in nvmet_rdma_create_queue_ib()
1029 rdma_destroy_qp(queue->cm_id); in nvmet_rdma_create_queue_ib()
1031 ib_free_cq(queue->cq); in nvmet_rdma_create_queue_ib()
1035 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) in nvmet_rdma_destroy_queue_ib() argument
1037 struct ib_qp *qp = queue->cm_id->qp; in nvmet_rdma_destroy_queue_ib()
1040 rdma_destroy_id(queue->cm_id); in nvmet_rdma_destroy_queue_ib()
1042 ib_free_cq(queue->cq); in nvmet_rdma_destroy_queue_ib()
1045 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) in nvmet_rdma_free_queue() argument
1047 pr_debug("freeing queue %d\n", queue->idx); in nvmet_rdma_free_queue()
1049 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_rdma_free_queue()
1051 nvmet_rdma_destroy_queue_ib(queue); in nvmet_rdma_free_queue()
1052 if (!queue->dev->srq) { in nvmet_rdma_free_queue()
1053 nvmet_rdma_free_cmds(queue->dev, queue->cmds, in nvmet_rdma_free_queue()
1054 queue->recv_queue_size, in nvmet_rdma_free_queue()
1055 !queue->host_qid); in nvmet_rdma_free_queue()
1057 nvmet_rdma_free_rsps(queue); in nvmet_rdma_free_queue()
1058 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); in nvmet_rdma_free_queue()
1059 kfree(queue); in nvmet_rdma_free_queue()
1064 struct nvmet_rdma_queue *queue = in nvmet_rdma_release_queue_work() local
1066 struct nvmet_rdma_device *dev = queue->dev; in nvmet_rdma_release_queue_work()
1068 nvmet_rdma_free_queue(queue); in nvmet_rdma_release_queue_work()
1075 struct nvmet_rdma_queue *queue) in nvmet_rdma_parse_cm_connect_req() argument
1086 queue->host_qid = le16_to_cpu(req->qid); in nvmet_rdma_parse_cm_connect_req()
1092 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; in nvmet_rdma_parse_cm_connect_req()
1093 queue->send_queue_size = le16_to_cpu(req->hrqsize); in nvmet_rdma_parse_cm_connect_req()
1095 if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) in nvmet_rdma_parse_cm_connect_req()
1122 struct nvmet_rdma_queue *queue; in nvmet_rdma_alloc_queue() local
1125 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in nvmet_rdma_alloc_queue()
1126 if (!queue) { in nvmet_rdma_alloc_queue()
1131 ret = nvmet_sq_init(&queue->nvme_sq); in nvmet_rdma_alloc_queue()
1137 ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); in nvmet_rdma_alloc_queue()
1145 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); in nvmet_rdma_alloc_queue()
1146 queue->dev = ndev; in nvmet_rdma_alloc_queue()
1147 queue->cm_id = cm_id; in nvmet_rdma_alloc_queue()
1149 spin_lock_init(&queue->state_lock); in nvmet_rdma_alloc_queue()
1150 queue->state = NVMET_RDMA_Q_CONNECTING; in nvmet_rdma_alloc_queue()
1151 INIT_LIST_HEAD(&queue->rsp_wait_list); in nvmet_rdma_alloc_queue()
1152 INIT_LIST_HEAD(&queue->rsp_wr_wait_list); in nvmet_rdma_alloc_queue()
1153 spin_lock_init(&queue->rsp_wr_wait_lock); in nvmet_rdma_alloc_queue()
1154 INIT_LIST_HEAD(&queue->free_rsps); in nvmet_rdma_alloc_queue()
1155 spin_lock_init(&queue->rsps_lock); in nvmet_rdma_alloc_queue()
1156 INIT_LIST_HEAD(&queue->queue_list); in nvmet_rdma_alloc_queue()
1158 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); in nvmet_rdma_alloc_queue()
1159 if (queue->idx < 0) { in nvmet_rdma_alloc_queue()
1164 ret = nvmet_rdma_alloc_rsps(queue); in nvmet_rdma_alloc_queue()
1171 queue->cmds = nvmet_rdma_alloc_cmds(ndev, in nvmet_rdma_alloc_queue()
1172 queue->recv_queue_size, in nvmet_rdma_alloc_queue()
1173 !queue->host_qid); in nvmet_rdma_alloc_queue()
1174 if (IS_ERR(queue->cmds)) { in nvmet_rdma_alloc_queue()
1180 ret = nvmet_rdma_create_queue_ib(queue); in nvmet_rdma_alloc_queue()
1188 return queue; in nvmet_rdma_alloc_queue()
1192 nvmet_rdma_free_cmds(queue->dev, queue->cmds, in nvmet_rdma_alloc_queue()
1193 queue->recv_queue_size, in nvmet_rdma_alloc_queue()
1194 !queue->host_qid); in nvmet_rdma_alloc_queue()
1197 nvmet_rdma_free_rsps(queue); in nvmet_rdma_alloc_queue()
1199 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); in nvmet_rdma_alloc_queue()
1201 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_rdma_alloc_queue()
1203 kfree(queue); in nvmet_rdma_alloc_queue()
1211 struct nvmet_rdma_queue *queue = priv; in nvmet_rdma_qp_event() local
1215 rdma_notify(queue->cm_id, event->event); in nvmet_rdma_qp_event()
1225 struct nvmet_rdma_queue *queue, in nvmet_rdma_cm_accept() argument
1235 queue->dev->device->attrs.max_qp_init_rd_atom); in nvmet_rdma_cm_accept()
1239 priv.crqsize = cpu_to_le16(queue->recv_queue_size); in nvmet_rdma_cm_accept()
1252 struct nvmet_rdma_queue *queue; in nvmet_rdma_queue_connect() local
1261 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); in nvmet_rdma_queue_connect()
1262 if (!queue) { in nvmet_rdma_queue_connect()
1266 queue->port = cm_id->context; in nvmet_rdma_queue_connect()
1268 if (queue->host_qid == 0) { in nvmet_rdma_queue_connect()
1273 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); in nvmet_rdma_queue_connect()
1275 schedule_work(&queue->release_work); in nvmet_rdma_queue_connect()
1281 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); in nvmet_rdma_queue_connect()
1292 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_established() argument
1296 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1297 if (queue->state != NVMET_RDMA_Q_CONNECTING) { in nvmet_rdma_queue_established()
1301 queue->state = NVMET_RDMA_Q_LIVE; in nvmet_rdma_queue_established()
1303 while (!list_empty(&queue->rsp_wait_list)) { in nvmet_rdma_queue_established()
1306 cmd = list_first_entry(&queue->rsp_wait_list, in nvmet_rdma_queue_established()
1310 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1311 nvmet_rdma_handle_command(queue, cmd); in nvmet_rdma_queue_established()
1312 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1316 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1319 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) in __nvmet_rdma_queue_disconnect() argument
1324 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); in __nvmet_rdma_queue_disconnect()
1326 spin_lock_irqsave(&queue->state_lock, flags); in __nvmet_rdma_queue_disconnect()
1327 switch (queue->state) { in __nvmet_rdma_queue_disconnect()
1330 queue->state = NVMET_RDMA_Q_DISCONNECTING; in __nvmet_rdma_queue_disconnect()
1336 spin_unlock_irqrestore(&queue->state_lock, flags); in __nvmet_rdma_queue_disconnect()
1339 rdma_disconnect(queue->cm_id); in __nvmet_rdma_queue_disconnect()
1340 schedule_work(&queue->release_work); in __nvmet_rdma_queue_disconnect()
1344 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_disconnect() argument
1349 if (!list_empty(&queue->queue_list)) { in nvmet_rdma_queue_disconnect()
1350 list_del_init(&queue->queue_list); in nvmet_rdma_queue_disconnect()
1356 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_queue_disconnect()
1360 struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_connect_fail() argument
1362 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); in nvmet_rdma_queue_connect_fail()
1365 if (!list_empty(&queue->queue_list)) in nvmet_rdma_queue_connect_fail()
1366 list_del_init(&queue->queue_list); in nvmet_rdma_queue_connect_fail()
1369 pr_err("failed to connect queue %d\n", queue->idx); in nvmet_rdma_queue_connect_fail()
1370 schedule_work(&queue->release_work); in nvmet_rdma_queue_connect_fail()
1389 struct nvmet_rdma_queue *queue) in nvmet_rdma_device_removal() argument
1393 if (queue) { in nvmet_rdma_device_removal()
1423 struct nvmet_rdma_queue *queue = NULL; in nvmet_rdma_cm_handler() local
1427 queue = cm_id->qp->qp_context; in nvmet_rdma_cm_handler()
1438 nvmet_rdma_queue_established(queue); in nvmet_rdma_cm_handler()
1443 nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_cm_handler()
1446 ret = nvmet_rdma_device_removal(cm_id, queue); in nvmet_rdma_cm_handler()
1454 nvmet_rdma_queue_connect_fail(cm_id, queue); in nvmet_rdma_cm_handler()
1467 struct nvmet_rdma_queue *queue; in nvmet_rdma_delete_ctrl() local
1471 list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { in nvmet_rdma_delete_ctrl()
1472 if (queue->nvme_sq.ctrl == ctrl) { in nvmet_rdma_delete_ctrl()
1473 list_del_init(&queue->queue_list); in nvmet_rdma_delete_ctrl()
1476 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_delete_ctrl()
1577 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; in nvmet_rdma_disc_port_addr()
1600 struct nvmet_rdma_queue *queue, *tmp; in nvmet_rdma_remove_one() local
1621 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, in nvmet_rdma_remove_one()
1623 if (queue->dev->device != ib_device) in nvmet_rdma_remove_one()
1626 pr_info("Removing queue %d\n", queue->idx); in nvmet_rdma_remove_one()
1627 list_del_init(&queue->queue_list); in nvmet_rdma_remove_one()
1628 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_remove_one()