Lines Matching refs:rsp
136 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
156 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_need_data_in() argument
158 return nvme_is_write(rsp->req.cmd) && in nvmet_rdma_need_data_in()
159 rsp->req.transfer_len && in nvmet_rdma_need_data_in()
160 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); in nvmet_rdma_need_data_in()
163 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_need_data_out() argument
165 return !nvme_is_write(rsp->req.cmd) && in nvmet_rdma_need_data_out()
166 rsp->req.transfer_len && in nvmet_rdma_need_data_out()
167 !rsp->req.rsp->status && in nvmet_rdma_need_data_out()
168 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); in nvmet_rdma_need_data_out()
174 struct nvmet_rdma_rsp *rsp; in nvmet_rdma_get_rsp() local
178 rsp = list_first_entry_or_null(&queue->free_rsps, in nvmet_rdma_get_rsp()
180 if (likely(rsp)) in nvmet_rdma_get_rsp()
181 list_del(&rsp->free_list); in nvmet_rdma_get_rsp()
184 if (unlikely(!rsp)) { in nvmet_rdma_get_rsp()
185 rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); in nvmet_rdma_get_rsp()
186 if (unlikely(!rsp)) in nvmet_rdma_get_rsp()
188 rsp->allocated = true; in nvmet_rdma_get_rsp()
191 return rsp; in nvmet_rdma_get_rsp()
195 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_put_rsp() argument
199 if (rsp->allocated) { in nvmet_rdma_put_rsp()
200 kfree(rsp); in nvmet_rdma_put_rsp()
204 spin_lock_irqsave(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
205 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); in nvmet_rdma_put_rsp()
206 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
362 r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL); in nvmet_rdma_alloc_rsp()
363 if (!r->req.rsp) in nvmet_rdma_alloc_rsp()
366 r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp, in nvmet_rdma_alloc_rsp()
367 sizeof(*r->req.rsp), DMA_TO_DEVICE); in nvmet_rdma_alloc_rsp()
371 r->send_sge.length = sizeof(*r->req.rsp); in nvmet_rdma_alloc_rsp()
386 kfree(r->req.rsp); in nvmet_rdma_alloc_rsp()
395 sizeof(*r->req.rsp), DMA_TO_DEVICE); in nvmet_rdma_free_rsp()
396 kfree(r->req.rsp); in nvmet_rdma_free_rsp()
412 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps() local
414 ret = nvmet_rdma_alloc_rsp(ndev, rsp); in nvmet_rdma_alloc_rsps()
418 list_add_tail(&rsp->free_list, &queue->free_rsps); in nvmet_rdma_alloc_rsps()
425 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps() local
427 list_del(&rsp->free_list); in nvmet_rdma_alloc_rsps()
428 nvmet_rdma_free_rsp(ndev, rsp); in nvmet_rdma_alloc_rsps()
441 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_free_rsps() local
443 list_del(&rsp->free_list); in nvmet_rdma_free_rsps()
444 nvmet_rdma_free_rsp(ndev, rsp); in nvmet_rdma_free_rsps()
473 struct nvmet_rdma_rsp *rsp; in nvmet_rdma_process_wr_wait_list() local
476 rsp = list_entry(queue->rsp_wr_wait_list.next, in nvmet_rdma_process_wr_wait_list()
478 list_del(&rsp->wait_list); in nvmet_rdma_process_wr_wait_list()
481 ret = nvmet_rdma_execute_command(rsp); in nvmet_rdma_process_wr_wait_list()
485 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_process_wr_wait_list()
493 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_release_rsp() argument
495 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_release_rsp()
497 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_release_rsp()
499 if (rsp->n_rdma) { in nvmet_rdma_release_rsp()
500 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, in nvmet_rdma_release_rsp()
501 queue->cm_id->port_num, rsp->req.sg, in nvmet_rdma_release_rsp()
502 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); in nvmet_rdma_release_rsp()
505 if (rsp->req.sg != rsp->cmd->inline_sg) in nvmet_rdma_release_rsp()
506 sgl_free(rsp->req.sg); in nvmet_rdma_release_rsp()
511 nvmet_rdma_put_rsp(rsp); in nvmet_rdma_release_rsp()
530 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_send_done() local
533 nvmet_rdma_release_rsp(rsp); in nvmet_rdma_send_done()
539 nvmet_rdma_error_comp(rsp->queue); in nvmet_rdma_send_done()
545 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_queue_response() local
547 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_queue_response()
550 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { in nvmet_rdma_queue_response()
551 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; in nvmet_rdma_queue_response()
552 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; in nvmet_rdma_queue_response()
554 rsp->send_wr.opcode = IB_WR_SEND; in nvmet_rdma_queue_response()
557 if (nvmet_rdma_need_data_out(rsp)) in nvmet_rdma_queue_response()
558 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, in nvmet_rdma_queue_response()
559 cm_id->port_num, NULL, &rsp->send_wr); in nvmet_rdma_queue_response()
561 first_wr = &rsp->send_wr; in nvmet_rdma_queue_response()
563 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); in nvmet_rdma_queue_response()
565 ib_dma_sync_single_for_device(rsp->queue->dev->device, in nvmet_rdma_queue_response()
566 rsp->send_sge.addr, rsp->send_sge.length, in nvmet_rdma_queue_response()
571 nvmet_rdma_release_rsp(rsp); in nvmet_rdma_queue_response()
577 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_read_data_done() local
581 WARN_ON(rsp->n_rdma <= 0); in nvmet_rdma_read_data_done()
582 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_read_data_done()
583 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, in nvmet_rdma_read_data_done()
584 queue->cm_id->port_num, rsp->req.sg, in nvmet_rdma_read_data_done()
585 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); in nvmet_rdma_read_data_done()
586 rsp->n_rdma = 0; in nvmet_rdma_read_data_done()
589 nvmet_req_uninit(&rsp->req); in nvmet_rdma_read_data_done()
590 nvmet_rdma_release_rsp(rsp); in nvmet_rdma_read_data_done()
599 nvmet_req_execute(&rsp->req); in nvmet_rdma_read_data_done()
602 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, in nvmet_rdma_use_inline_sg() argument
609 sg = rsp->cmd->inline_sg; in nvmet_rdma_use_inline_sg()
622 rsp->req.sg = rsp->cmd->inline_sg; in nvmet_rdma_use_inline_sg()
623 rsp->req.sg_cnt = sg_count; in nvmet_rdma_use_inline_sg()
626 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_map_sgl_inline() argument
628 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; in nvmet_rdma_map_sgl_inline()
632 if (!nvme_is_write(rsp->req.cmd)) in nvmet_rdma_map_sgl_inline()
635 if (off + len > rsp->queue->dev->inline_data_size) { in nvmet_rdma_map_sgl_inline()
644 nvmet_rdma_use_inline_sg(rsp, len, off); in nvmet_rdma_map_sgl_inline()
645 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; in nvmet_rdma_map_sgl_inline()
646 rsp->req.transfer_len += len; in nvmet_rdma_map_sgl_inline()
650 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, in nvmet_rdma_map_sgl_keyed() argument
653 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_map_sgl_keyed()
663 rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt); in nvmet_rdma_map_sgl_keyed()
664 if (!rsp->req.sg) in nvmet_rdma_map_sgl_keyed()
667 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, in nvmet_rdma_map_sgl_keyed()
668 rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, in nvmet_rdma_map_sgl_keyed()
669 nvmet_data_dir(&rsp->req)); in nvmet_rdma_map_sgl_keyed()
672 rsp->req.transfer_len += len; in nvmet_rdma_map_sgl_keyed()
673 rsp->n_rdma += ret; in nvmet_rdma_map_sgl_keyed()
676 rsp->invalidate_rkey = key; in nvmet_rdma_map_sgl_keyed()
677 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; in nvmet_rdma_map_sgl_keyed()
683 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_map_sgl() argument
685 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; in nvmet_rdma_map_sgl()
691 return nvmet_rdma_map_sgl_inline(rsp); in nvmet_rdma_map_sgl()
699 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); in nvmet_rdma_map_sgl()
701 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); in nvmet_rdma_map_sgl()
712 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_execute_command() argument
714 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_execute_command()
716 if (unlikely(atomic_sub_return(1 + rsp->n_rdma, in nvmet_rdma_execute_command()
719 1 + rsp->n_rdma, queue->idx, in nvmet_rdma_execute_command()
721 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_execute_command()
725 if (nvmet_rdma_need_data_in(rsp)) { in nvmet_rdma_execute_command()
726 if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, in nvmet_rdma_execute_command()
727 queue->cm_id->port_num, &rsp->read_cqe, NULL)) in nvmet_rdma_execute_command()
728 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); in nvmet_rdma_execute_command()
730 nvmet_req_execute(&rsp->req); in nvmet_rdma_execute_command()
773 struct nvmet_rdma_rsp *rsp; in nvmet_rdma_recv_done() local
792 rsp = nvmet_rdma_get_rsp(queue); in nvmet_rdma_recv_done()
793 if (unlikely(!rsp)) { in nvmet_rdma_recv_done()
802 rsp->queue = queue; in nvmet_rdma_recv_done()
803 rsp->cmd = cmd; in nvmet_rdma_recv_done()
804 rsp->flags = 0; in nvmet_rdma_recv_done()
805 rsp->req.cmd = cmd->nvme_cmd; in nvmet_rdma_recv_done()
806 rsp->req.port = queue->port; in nvmet_rdma_recv_done()
807 rsp->n_rdma = 0; in nvmet_rdma_recv_done()
814 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); in nvmet_rdma_recv_done()
816 nvmet_rdma_put_rsp(rsp); in nvmet_rdma_recv_done()
821 nvmet_rdma_handle_command(queue, rsp); in nvmet_rdma_recv_done()
1575 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_disc_port_addr() local
1577 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; in nvmet_rdma_disc_port_addr()