Lines Matching full:cmd

123 	struct nvmet_tcp_cmd	*cmd;  member
168 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
171 struct nvmet_tcp_cmd *cmd) in nvmet_tcp_cmd_tag() argument
178 return cmd - queue->cmds; in nvmet_tcp_cmd_tag()
181 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_has_data_in() argument
183 return nvme_is_write(cmd->req.cmd) && in nvmet_tcp_has_data_in()
184 cmd->rbytes_done < cmd->req.transfer_len; in nvmet_tcp_has_data_in()
187 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_need_data_in() argument
189 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; in nvmet_tcp_need_data_in()
192 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_need_data_out() argument
194 return !nvme_is_write(cmd->req.cmd) && in nvmet_tcp_need_data_out()
195 cmd->req.transfer_len > 0 && in nvmet_tcp_need_data_out()
196 !cmd->req.cqe->status; in nvmet_tcp_need_data_out()
199 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_has_inline_data() argument
201 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && in nvmet_tcp_has_inline_data()
202 !cmd->rbytes_done; in nvmet_tcp_has_inline_data()
208 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_get_cmd() local
210 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd()
212 if (!cmd) in nvmet_tcp_get_cmd()
214 list_del_init(&cmd->entry); in nvmet_tcp_get_cmd()
216 cmd->rbytes_done = cmd->wbytes_done = 0; in nvmet_tcp_get_cmd()
217 cmd->pdu_len = 0; in nvmet_tcp_get_cmd()
218 cmd->pdu_recv = 0; in nvmet_tcp_get_cmd()
219 cmd->iov = NULL; in nvmet_tcp_get_cmd()
220 cmd->flags = 0; in nvmet_tcp_get_cmd()
221 return cmd; in nvmet_tcp_get_cmd()
224 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_put_cmd() argument
226 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd()
229 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd()
300 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_unmap_pdu_iovec() argument
305 sg = &cmd->req.sg[cmd->sg_idx]; in nvmet_tcp_unmap_pdu_iovec()
307 for (i = 0; i < cmd->nr_mapped; i++) in nvmet_tcp_unmap_pdu_iovec()
311 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_map_pdu_iovec() argument
313 struct kvec *iov = cmd->iov; in nvmet_tcp_map_pdu_iovec()
317 length = cmd->pdu_len; in nvmet_tcp_map_pdu_iovec()
318 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); in nvmet_tcp_map_pdu_iovec()
319 offset = cmd->rbytes_done; in nvmet_tcp_map_pdu_iovec()
320 cmd->sg_idx = offset / PAGE_SIZE; in nvmet_tcp_map_pdu_iovec()
322 sg = &cmd->req.sg[cmd->sg_idx]; in nvmet_tcp_map_pdu_iovec()
336 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, in nvmet_tcp_map_pdu_iovec()
337 cmd->nr_mapped, cmd->pdu_len); in nvmet_tcp_map_pdu_iovec()
357 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_map_data() argument
359 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; in nvmet_tcp_map_data()
367 if (!nvme_is_write(cmd->req.cmd)) in nvmet_tcp_map_data()
370 if (len > cmd->req.port->inline_data_size) in nvmet_tcp_map_data()
372 cmd->pdu_len = len; in nvmet_tcp_map_data()
374 cmd->req.transfer_len += len; in nvmet_tcp_map_data()
376 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); in nvmet_tcp_map_data()
377 if (!cmd->req.sg) in nvmet_tcp_map_data()
379 cmd->cur_sg = cmd->req.sg; in nvmet_tcp_map_data()
381 if (nvmet_tcp_has_data_in(cmd)) { in nvmet_tcp_map_data()
382 cmd->iov = kmalloc_array(cmd->req.sg_cnt, in nvmet_tcp_map_data()
383 sizeof(*cmd->iov), GFP_KERNEL); in nvmet_tcp_map_data()
384 if (!cmd->iov) in nvmet_tcp_map_data()
390 sgl_free(cmd->req.sg); in nvmet_tcp_map_data()
395 struct nvmet_tcp_cmd *cmd) in nvmet_tcp_send_ddgst() argument
397 ahash_request_set_crypt(hash, cmd->req.sg, in nvmet_tcp_send_ddgst()
398 (void *)&cmd->exp_ddgst, cmd->req.transfer_len); in nvmet_tcp_send_ddgst()
403 struct nvmet_tcp_cmd *cmd) in nvmet_tcp_recv_ddgst() argument
410 for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) { in nvmet_tcp_recv_ddgst()
415 ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0); in nvmet_tcp_recv_ddgst()
419 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_c2h_data_pdu() argument
421 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; in nvmet_setup_c2h_data_pdu()
422 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_c2h_data_pdu()
423 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
424 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
426 cmd->offset = 0; in nvmet_setup_c2h_data_pdu()
427 cmd->state = NVMET_TCP_SEND_DATA_PDU; in nvmet_setup_c2h_data_pdu()
436 cmd->req.transfer_len + ddgst); in nvmet_setup_c2h_data_pdu()
437 pdu->command_id = cmd->req.cqe->command_id; in nvmet_setup_c2h_data_pdu()
438 pdu->data_length = cpu_to_le32(cmd->req.transfer_len); in nvmet_setup_c2h_data_pdu()
439 pdu->data_offset = cpu_to_le32(cmd->wbytes_done); in nvmet_setup_c2h_data_pdu()
443 nvmet_tcp_send_ddgst(queue->snd_hash, cmd); in nvmet_setup_c2h_data_pdu()
446 if (cmd->queue->hdr_digest) { in nvmet_setup_c2h_data_pdu()
452 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_r2t_pdu() argument
454 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; in nvmet_setup_r2t_pdu()
455 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_r2t_pdu()
456 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_r2t_pdu()
458 cmd->offset = 0; in nvmet_setup_r2t_pdu()
459 cmd->state = NVMET_TCP_SEND_R2T; in nvmet_setup_r2t_pdu()
467 pdu->command_id = cmd->req.cmd->common.command_id; in nvmet_setup_r2t_pdu()
468 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); in nvmet_setup_r2t_pdu()
469 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); in nvmet_setup_r2t_pdu()
470 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); in nvmet_setup_r2t_pdu()
471 if (cmd->queue->hdr_digest) { in nvmet_setup_r2t_pdu()
477 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_response_pdu() argument
479 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; in nvmet_setup_response_pdu()
480 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_response_pdu()
481 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_response_pdu()
483 cmd->offset = 0; in nvmet_setup_response_pdu()
484 cmd->state = NVMET_TCP_SEND_RESPONSE; in nvmet_setup_response_pdu()
491 if (cmd->queue->hdr_digest) { in nvmet_setup_response_pdu()
500 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_process_resp_list() local
503 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry); in nvmet_tcp_process_resp_list()
504 list_add(&cmd->entry, &queue->resp_send_list); in nvmet_tcp_process_resp_list()
537 struct nvmet_tcp_cmd *cmd = in nvmet_tcp_queue_response() local
539 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_queue_response()
543 if (unlikely(cmd == queue->cmd)) { in nvmet_tcp_queue_response()
544 sgl = &cmd->req.cmd->common.dptr.sgl; in nvmet_tcp_queue_response()
553 len && len <= cmd->req.port->inline_data_size && in nvmet_tcp_queue_response()
554 nvme_is_write(cmd->req.cmd)) in nvmet_tcp_queue_response()
558 llist_add(&cmd->lentry, &queue->resp_list); in nvmet_tcp_queue_response()
559 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); in nvmet_tcp_queue_response()
562 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_execute_request() argument
564 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED)) in nvmet_tcp_execute_request()
565 nvmet_tcp_queue_response(&cmd->req); in nvmet_tcp_execute_request()
567 cmd->req.execute(&cmd->req); in nvmet_tcp_execute_request()
570 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_try_send_data_pdu() argument
572 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_data_pdu()
573 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; in nvmet_try_send_data_pdu()
576 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu), in nvmet_try_send_data_pdu()
577 offset_in_page(cmd->data_pdu) + cmd->offset, in nvmet_try_send_data_pdu()
582 cmd->offset += ret; in nvmet_try_send_data_pdu()
588 cmd->state = NVMET_TCP_SEND_DATA; in nvmet_try_send_data_pdu()
589 cmd->offset = 0; in nvmet_try_send_data_pdu()
593 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_data() argument
595 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_data()
598 while (cmd->cur_sg) { in nvmet_try_send_data()
599 struct page *page = sg_page(cmd->cur_sg); in nvmet_try_send_data()
600 u32 left = cmd->cur_sg->length - cmd->offset; in nvmet_try_send_data()
603 if ((!last_in_batch && cmd->queue->send_list_len) || in nvmet_try_send_data()
604 cmd->wbytes_done + left < cmd->req.transfer_len || in nvmet_try_send_data()
608 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset, in nvmet_try_send_data()
613 cmd->offset += ret; in nvmet_try_send_data()
614 cmd->wbytes_done += ret; in nvmet_try_send_data()
617 if (cmd->offset == cmd->cur_sg->length) { in nvmet_try_send_data()
618 cmd->cur_sg = sg_next(cmd->cur_sg); in nvmet_try_send_data()
619 cmd->offset = 0; in nvmet_try_send_data()
624 cmd->state = NVMET_TCP_SEND_DDGST; in nvmet_try_send_data()
625 cmd->offset = 0; in nvmet_try_send_data()
628 cmd->queue->snd_cmd = NULL; in nvmet_try_send_data()
629 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_data()
631 nvmet_setup_response_pdu(cmd); in nvmet_try_send_data()
636 kfree(cmd->iov); in nvmet_try_send_data()
637 sgl_free(cmd->req.sg); in nvmet_try_send_data()
644 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, in nvmet_try_send_response() argument
647 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_response()
648 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; in nvmet_try_send_response()
652 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_response()
657 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu), in nvmet_try_send_response()
658 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags); in nvmet_try_send_response()
661 cmd->offset += ret; in nvmet_try_send_response()
667 kfree(cmd->iov); in nvmet_try_send_response()
668 sgl_free(cmd->req.sg); in nvmet_try_send_response()
669 cmd->queue->snd_cmd = NULL; in nvmet_try_send_response()
670 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_response()
674 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_r2t() argument
676 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_r2t()
677 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; in nvmet_try_send_r2t()
681 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_r2t()
686 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu), in nvmet_try_send_r2t()
687 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags); in nvmet_try_send_r2t()
690 cmd->offset += ret; in nvmet_try_send_r2t()
696 cmd->queue->snd_cmd = NULL; in nvmet_try_send_r2t()
700 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_ddgst() argument
702 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_ddgst()
705 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, in nvmet_try_send_ddgst()
706 .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset in nvmet_try_send_ddgst()
710 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_ddgst()
719 cmd->offset += ret; in nvmet_try_send_ddgst()
722 cmd->queue->snd_cmd = NULL; in nvmet_try_send_ddgst()
723 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_ddgst()
725 nvmet_setup_response_pdu(cmd); in nvmet_try_send_ddgst()
733 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; in nvmet_tcp_try_send_one() local
736 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { in nvmet_tcp_try_send_one()
737 cmd = nvmet_tcp_fetch_cmd(queue); in nvmet_tcp_try_send_one()
738 if (unlikely(!cmd)) in nvmet_tcp_try_send_one()
742 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { in nvmet_tcp_try_send_one()
743 ret = nvmet_try_send_data_pdu(cmd); in nvmet_tcp_try_send_one()
748 if (cmd->state == NVMET_TCP_SEND_DATA) { in nvmet_tcp_try_send_one()
749 ret = nvmet_try_send_data(cmd, last_in_batch); in nvmet_tcp_try_send_one()
754 if (cmd->state == NVMET_TCP_SEND_DDGST) { in nvmet_tcp_try_send_one()
755 ret = nvmet_try_send_ddgst(cmd, last_in_batch); in nvmet_tcp_try_send_one()
760 if (cmd->state == NVMET_TCP_SEND_R2T) { in nvmet_tcp_try_send_one()
761 ret = nvmet_try_send_r2t(cmd, last_in_batch); in nvmet_tcp_try_send_one()
766 if (cmd->state == NVMET_TCP_SEND_RESPONSE) in nvmet_tcp_try_send_one()
767 ret = nvmet_try_send_response(cmd, last_in_batch); in nvmet_tcp_try_send_one()
802 queue->cmd = NULL; in nvmet_prepare_receive_pdu()
904 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) in nvmet_tcp_handle_req_failure() argument
906 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); in nvmet_tcp_handle_req_failure()
909 if (!nvme_is_write(cmd->req.cmd) || in nvmet_tcp_handle_req_failure()
910 data_len > cmd->req.port->inline_data_size) { in nvmet_tcp_handle_req_failure()
915 ret = nvmet_tcp_map_data(cmd); in nvmet_tcp_handle_req_failure()
923 nvmet_tcp_map_pdu_iovec(cmd); in nvmet_tcp_handle_req_failure()
924 cmd->flags |= NVMET_TCP_F_INIT_FAILED; in nvmet_tcp_handle_req_failure()
930 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_handle_h2c_data_pdu() local
933 cmd = &queue->cmds[data->ttag]; in nvmet_tcp_handle_h2c_data_pdu()
935 cmd = &queue->connect; in nvmet_tcp_handle_h2c_data_pdu()
937 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { in nvmet_tcp_handle_h2c_data_pdu()
940 cmd->rbytes_done); in nvmet_tcp_handle_h2c_data_pdu()
942 nvmet_req_complete(&cmd->req, in nvmet_tcp_handle_h2c_data_pdu()
947 cmd->pdu_len = le32_to_cpu(data->data_length); in nvmet_tcp_handle_h2c_data_pdu()
948 cmd->pdu_recv = 0; in nvmet_tcp_handle_h2c_data_pdu()
949 nvmet_tcp_map_pdu_iovec(cmd); in nvmet_tcp_handle_h2c_data_pdu()
950 queue->cmd = cmd; in nvmet_tcp_handle_h2c_data_pdu()
958 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_done_recv_pdu()
959 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; in nvmet_tcp_done_recv_pdu()
980 queue->cmd = nvmet_tcp_get_cmd(queue); in nvmet_tcp_done_recv_pdu()
981 if (unlikely(!queue->cmd)) { in nvmet_tcp_done_recv_pdu()
990 req = &queue->cmd->req; in nvmet_tcp_done_recv_pdu()
991 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); in nvmet_tcp_done_recv_pdu()
995 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n", in nvmet_tcp_done_recv_pdu()
996 req->cmd, req->cmd->common.command_id, in nvmet_tcp_done_recv_pdu()
997 req->cmd->common.opcode, in nvmet_tcp_done_recv_pdu()
998 le32_to_cpu(req->cmd->common.dptr.sgl.length)); in nvmet_tcp_done_recv_pdu()
1000 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); in nvmet_tcp_done_recv_pdu()
1004 ret = nvmet_tcp_map_data(queue->cmd); in nvmet_tcp_done_recv_pdu()
1007 if (nvmet_tcp_has_inline_data(queue->cmd)) in nvmet_tcp_done_recv_pdu()
1015 if (nvmet_tcp_need_data_in(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1016 if (nvmet_tcp_has_inline_data(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1018 nvmet_tcp_map_pdu_iovec(queue->cmd); in nvmet_tcp_done_recv_pdu()
1022 nvmet_tcp_queue_response(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1026 queue->cmd->req.execute(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1062 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_try_recv_pdu()
1113 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_prep_recv_ddgst() argument
1115 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_prep_recv_ddgst()
1117 nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd); in nvmet_tcp_prep_recv_ddgst()
1125 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_data() local
1128 while (msg_data_left(&cmd->recv_msg)) { in nvmet_tcp_try_recv_data()
1129 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, in nvmet_tcp_try_recv_data()
1130 cmd->recv_msg.msg_flags); in nvmet_tcp_try_recv_data()
1134 cmd->pdu_recv += ret; in nvmet_tcp_try_recv_data()
1135 cmd->rbytes_done += ret; in nvmet_tcp_try_recv_data()
1138 nvmet_tcp_unmap_pdu_iovec(cmd); in nvmet_tcp_try_recv_data()
1140 nvmet_tcp_prep_recv_ddgst(cmd); in nvmet_tcp_try_recv_data()
1144 if (cmd->rbytes_done == cmd->req.transfer_len) in nvmet_tcp_try_recv_data()
1145 nvmet_tcp_execute_request(cmd); in nvmet_tcp_try_recv_data()
1153 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_ddgst() local
1157 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, in nvmet_tcp_try_recv_ddgst()
1171 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { in nvmet_tcp_try_recv_ddgst()
1172 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", in nvmet_tcp_try_recv_ddgst()
1173 queue->idx, cmd->req.cmd->common.command_id, in nvmet_tcp_try_recv_ddgst()
1174 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), in nvmet_tcp_try_recv_ddgst()
1175 le32_to_cpu(cmd->exp_ddgst)); in nvmet_tcp_try_recv_ddgst()
1176 nvmet_tcp_finish_cmd(cmd); in nvmet_tcp_try_recv_ddgst()
1182 if (cmd->rbytes_done == cmd->req.transfer_len) in nvmet_tcp_try_recv_ddgst()
1183 nvmet_tcp_execute_request(cmd); in nvmet_tcp_try_recv_ddgst()
1315 c->req.cmd = &c->cmd_pdu->cmd; in nvmet_tcp_alloc_cmd()
1405 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_finish_cmd() argument
1407 nvmet_req_uninit(&cmd->req); in nvmet_tcp_finish_cmd()
1408 nvmet_tcp_unmap_pdu_iovec(cmd); in nvmet_tcp_finish_cmd()
1409 kfree(cmd->iov); in nvmet_tcp_finish_cmd()
1410 sgl_free(cmd->req.sg); in nvmet_tcp_finish_cmd()
1415 struct nvmet_tcp_cmd *cmd = queue->cmds; in nvmet_tcp_uninit_data_in_cmds() local
1418 for (i = 0; i < queue->nr_cmds; i++, cmd++) { in nvmet_tcp_uninit_data_in_cmds()
1419 if (nvmet_tcp_need_data_in(cmd)) in nvmet_tcp_uninit_data_in_cmds()
1420 nvmet_tcp_finish_cmd(cmd); in nvmet_tcp_uninit_data_in_cmds()
1790 struct nvmet_tcp_cmd *cmd = in nvmet_tcp_disc_port_addr() local
1792 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_disc_port_addr()