Lines Matching full:req
44 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) in errno_to_nvme_status() argument
53 req->error_loc = offsetof(struct nvme_rw_command, length); in errno_to_nvme_status()
57 req->error_loc = offsetof(struct nvme_rw_command, slba); in errno_to_nvme_status()
61 req->error_loc = offsetof(struct nvme_common_command, opcode); in errno_to_nvme_status()
62 switch (req->cmd->common.opcode) { in errno_to_nvme_status()
72 req->error_loc = offsetof(struct nvme_rw_command, nsid); in errno_to_nvme_status()
78 req->error_loc = offsetof(struct nvme_common_command, opcode); in errno_to_nvme_status()
88 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, in nvmet_copy_to_sgl() argument
91 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { in nvmet_copy_to_sgl()
92 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_copy_to_sgl()
98 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) in nvmet_copy_from_sgl() argument
100 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { in nvmet_copy_from_sgl()
101 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_copy_from_sgl()
107 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len) in nvmet_zero_sgl() argument
109 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) { in nvmet_zero_sgl()
110 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_zero_sgl()
136 struct nvmet_req *req; in nvmet_async_events_failall() local
140 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; in nvmet_async_events_failall()
142 nvmet_req_complete(req, status); in nvmet_async_events_failall()
151 struct nvmet_req *req; in nvmet_async_events_process() local
157 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; in nvmet_async_events_process()
158 nvmet_set_result(req, nvmet_async_event_result(aen)); in nvmet_async_events_process()
164 trace_nvmet_async_event(ctrl, req->cqe->result.u32); in nvmet_async_events_process()
165 nvmet_req_complete(req, 0); in nvmet_async_events_process()
688 static void nvmet_update_sq_head(struct nvmet_req *req) in nvmet_update_sq_head() argument
690 if (req->sq->size) { in nvmet_update_sq_head()
694 old_sqhd = req->sq->sqhd; in nvmet_update_sq_head()
695 new_sqhd = (old_sqhd + 1) % req->sq->size; in nvmet_update_sq_head()
696 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) != in nvmet_update_sq_head()
699 req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); in nvmet_update_sq_head()
702 static void nvmet_set_error(struct nvmet_req *req, u16 status) in nvmet_set_error() argument
704 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_error()
708 req->cqe->status = cpu_to_le16(status << 1); in nvmet_set_error()
710 if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC) in nvmet_set_error()
719 new_error_slot->sqid = cpu_to_le16(req->sq->qid); in nvmet_set_error()
720 new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id); in nvmet_set_error()
722 new_error_slot->param_error_location = cpu_to_le16(req->error_loc); in nvmet_set_error()
723 new_error_slot->lba = cpu_to_le64(req->error_slba); in nvmet_set_error()
724 new_error_slot->nsid = req->cmd->common.nsid; in nvmet_set_error()
728 req->cqe->status |= cpu_to_le16(1 << 14); in nvmet_set_error()
731 static void __nvmet_req_complete(struct nvmet_req *req, u16 status) in __nvmet_req_complete() argument
733 if (!req->sq->sqhd_disabled) in __nvmet_req_complete()
734 nvmet_update_sq_head(req); in __nvmet_req_complete()
735 req->cqe->sq_id = cpu_to_le16(req->sq->qid); in __nvmet_req_complete()
736 req->cqe->command_id = req->cmd->common.command_id; in __nvmet_req_complete()
739 nvmet_set_error(req, status); in __nvmet_req_complete()
741 trace_nvmet_req_complete(req); in __nvmet_req_complete()
743 if (req->ns) in __nvmet_req_complete()
744 nvmet_put_namespace(req->ns); in __nvmet_req_complete()
745 req->ops->queue_response(req); in __nvmet_req_complete()
748 void nvmet_req_complete(struct nvmet_req *req, u16 status) in nvmet_req_complete() argument
750 __nvmet_req_complete(req, status); in nvmet_req_complete()
751 percpu_ref_put(&req->sq->ref); in nvmet_req_complete()
840 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req) in nvmet_io_cmd_check_access() argument
842 if (unlikely(req->ns->readonly)) { in nvmet_io_cmd_check_access()
843 switch (req->cmd->common.opcode) { in nvmet_io_cmd_check_access()
855 static u16 nvmet_parse_io_cmd(struct nvmet_req *req) in nvmet_parse_io_cmd() argument
857 struct nvme_command *cmd = req->cmd; in nvmet_parse_io_cmd()
860 ret = nvmet_check_ctrl_status(req, cmd); in nvmet_parse_io_cmd()
864 if (nvmet_req_passthru_ctrl(req)) in nvmet_parse_io_cmd()
865 return nvmet_parse_passthru_io_cmd(req); in nvmet_parse_io_cmd()
867 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid); in nvmet_parse_io_cmd()
868 if (unlikely(!req->ns)) { in nvmet_parse_io_cmd()
869 req->error_loc = offsetof(struct nvme_common_command, nsid); in nvmet_parse_io_cmd()
872 ret = nvmet_check_ana_state(req->port, req->ns); in nvmet_parse_io_cmd()
874 req->error_loc = offsetof(struct nvme_common_command, nsid); in nvmet_parse_io_cmd()
877 ret = nvmet_io_cmd_check_access(req); in nvmet_parse_io_cmd()
879 req->error_loc = offsetof(struct nvme_common_command, nsid); in nvmet_parse_io_cmd()
883 if (req->ns->file) in nvmet_parse_io_cmd()
884 return nvmet_file_parse_io_cmd(req); in nvmet_parse_io_cmd()
886 return nvmet_bdev_parse_io_cmd(req); in nvmet_parse_io_cmd()
889 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, in nvmet_req_init() argument
892 u8 flags = req->cmd->common.flags; in nvmet_req_init()
895 req->cq = cq; in nvmet_req_init()
896 req->sq = sq; in nvmet_req_init()
897 req->ops = ops; in nvmet_req_init()
898 req->sg = NULL; in nvmet_req_init()
899 req->metadata_sg = NULL; in nvmet_req_init()
900 req->sg_cnt = 0; in nvmet_req_init()
901 req->metadata_sg_cnt = 0; in nvmet_req_init()
902 req->transfer_len = 0; in nvmet_req_init()
903 req->metadata_len = 0; in nvmet_req_init()
904 req->cqe->status = 0; in nvmet_req_init()
905 req->cqe->sq_head = 0; in nvmet_req_init()
906 req->ns = NULL; in nvmet_req_init()
907 req->error_loc = NVMET_NO_ERROR_LOC; in nvmet_req_init()
908 req->error_slba = 0; in nvmet_req_init()
912 req->error_loc = offsetof(struct nvme_common_command, flags); in nvmet_req_init()
923 req->error_loc = offsetof(struct nvme_common_command, flags); in nvmet_req_init()
928 if (unlikely(!req->sq->ctrl)) in nvmet_req_init()
930 status = nvmet_parse_connect_cmd(req); in nvmet_req_init()
931 else if (likely(req->sq->qid != 0)) in nvmet_req_init()
932 status = nvmet_parse_io_cmd(req); in nvmet_req_init()
934 status = nvmet_parse_admin_cmd(req); in nvmet_req_init()
939 trace_nvmet_req_init(req, req->cmd); in nvmet_req_init()
952 __nvmet_req_complete(req, status); in nvmet_req_init()
957 void nvmet_req_uninit(struct nvmet_req *req) in nvmet_req_uninit() argument
959 percpu_ref_put(&req->sq->ref); in nvmet_req_uninit()
960 if (req->ns) in nvmet_req_uninit()
961 nvmet_put_namespace(req->ns); in nvmet_req_uninit()
965 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len) in nvmet_check_transfer_len() argument
967 if (unlikely(len != req->transfer_len)) { in nvmet_check_transfer_len()
968 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_check_transfer_len()
969 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); in nvmet_check_transfer_len()
977 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) in nvmet_check_data_len_lte() argument
979 if (unlikely(data_len > req->transfer_len)) { in nvmet_check_data_len_lte()
980 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_check_data_len_lte()
981 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); in nvmet_check_data_len_lte()
988 static unsigned int nvmet_data_transfer_len(struct nvmet_req *req) in nvmet_data_transfer_len() argument
990 return req->transfer_len - req->metadata_len; in nvmet_data_transfer_len()
993 static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req) in nvmet_req_alloc_p2pmem_sgls() argument
995 req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt, in nvmet_req_alloc_p2pmem_sgls()
996 nvmet_data_transfer_len(req)); in nvmet_req_alloc_p2pmem_sgls()
997 if (!req->sg) in nvmet_req_alloc_p2pmem_sgls()
1000 if (req->metadata_len) { in nvmet_req_alloc_p2pmem_sgls()
1001 req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev, in nvmet_req_alloc_p2pmem_sgls()
1002 &req->metadata_sg_cnt, req->metadata_len); in nvmet_req_alloc_p2pmem_sgls()
1003 if (!req->metadata_sg) in nvmet_req_alloc_p2pmem_sgls()
1008 pci_p2pmem_free_sgl(req->p2p_dev, req->sg); in nvmet_req_alloc_p2pmem_sgls()
1013 static bool nvmet_req_find_p2p_dev(struct nvmet_req *req) in nvmet_req_find_p2p_dev() argument
1018 if (req->sq->ctrl && req->sq->qid && req->ns) { in nvmet_req_find_p2p_dev()
1019 req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, in nvmet_req_find_p2p_dev()
1020 req->ns->nsid); in nvmet_req_find_p2p_dev()
1021 if (req->p2p_dev) in nvmet_req_find_p2p_dev()
1025 req->p2p_dev = NULL; in nvmet_req_find_p2p_dev()
1029 int nvmet_req_alloc_sgls(struct nvmet_req *req) in nvmet_req_alloc_sgls() argument
1031 if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req)) in nvmet_req_alloc_sgls()
1034 req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL, in nvmet_req_alloc_sgls()
1035 &req->sg_cnt); in nvmet_req_alloc_sgls()
1036 if (unlikely(!req->sg)) in nvmet_req_alloc_sgls()
1039 if (req->metadata_len) { in nvmet_req_alloc_sgls()
1040 req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL, in nvmet_req_alloc_sgls()
1041 &req->metadata_sg_cnt); in nvmet_req_alloc_sgls()
1042 if (unlikely(!req->metadata_sg)) in nvmet_req_alloc_sgls()
1048 sgl_free(req->sg); in nvmet_req_alloc_sgls()
1054 void nvmet_req_free_sgls(struct nvmet_req *req) in nvmet_req_free_sgls() argument
1056 if (req->p2p_dev) { in nvmet_req_free_sgls()
1057 pci_p2pmem_free_sgl(req->p2p_dev, req->sg); in nvmet_req_free_sgls()
1058 if (req->metadata_sg) in nvmet_req_free_sgls()
1059 pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg); in nvmet_req_free_sgls()
1061 sgl_free(req->sg); in nvmet_req_free_sgls()
1062 if (req->metadata_sg) in nvmet_req_free_sgls()
1063 sgl_free(req->metadata_sg); in nvmet_req_free_sgls()
1066 req->sg = NULL; in nvmet_req_free_sgls()
1067 req->metadata_sg = NULL; in nvmet_req_free_sgls()
1068 req->sg_cnt = 0; in nvmet_req_free_sgls()
1069 req->metadata_sg_cnt = 0; in nvmet_req_free_sgls()
1174 struct nvmet_req *req, struct nvmet_ctrl **ret) in nvmet_ctrl_find_get() argument
1180 subsys = nvmet_find_get_subsys(req->port, subsysnqn); in nvmet_ctrl_find_get()
1184 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); in nvmet_ctrl_find_get()
1205 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); in nvmet_ctrl_find_get()
1214 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd) in nvmet_check_ctrl_status() argument
1216 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { in nvmet_check_ctrl_status()
1218 cmd->common.opcode, req->sq->qid); in nvmet_check_ctrl_status()
1222 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { in nvmet_check_ctrl_status()
1224 cmd->common.opcode, req->sq->qid); in nvmet_check_ctrl_status()
1254 struct nvmet_req *req) in nvmet_setup_p2p_ns_map() argument
1259 if (!req->p2p_client) in nvmet_setup_p2p_ns_map()
1262 ctrl->p2p_client = get_device(req->p2p_client); in nvmet_setup_p2p_ns_map()
1292 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) in nvmet_alloc_ctrl() argument
1300 subsys = nvmet_find_get_subsys(req->port, subsysnqn); in nvmet_alloc_ctrl()
1304 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); in nvmet_alloc_ctrl()
1313 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); in nvmet_alloc_ctrl()
1328 ctrl->port = req->port; in nvmet_alloc_ctrl()
1371 ctrl->ops = req->ops; in nvmet_alloc_ctrl()
1390 nvmet_setup_p2p_ns_map(ctrl, req); in nvmet_alloc_ctrl()