Lines Matching refs:sq
662 if (req->sq->size) { in nvmet_update_sq_head()
666 old_sqhd = req->sq->sqhd; in nvmet_update_sq_head()
667 new_sqhd = (old_sqhd + 1) % req->sq->size; in nvmet_update_sq_head()
668 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) != in nvmet_update_sq_head()
671 req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); in nvmet_update_sq_head()
676 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_error()
691 new_error_slot->sqid = cpu_to_le16(req->sq->qid); in nvmet_set_error()
705 if (!req->sq->sqhd_disabled) in __nvmet_req_complete()
707 req->cqe->sq_id = cpu_to_le16(req->sq->qid); in __nvmet_req_complete()
723 percpu_ref_put(&req->sq->ref); in nvmet_req_complete()
736 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, in nvmet_sq_setup() argument
739 sq->sqhd = 0; in nvmet_sq_setup()
740 sq->qid = qid; in nvmet_sq_setup()
741 sq->size = size; in nvmet_sq_setup()
743 ctrl->sqs[qid] = sq; in nvmet_sq_setup()
748 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); in nvmet_confirm_sq() local
750 complete(&sq->confirm_done); in nvmet_confirm_sq()
753 void nvmet_sq_destroy(struct nvmet_sq *sq) in nvmet_sq_destroy() argument
759 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) in nvmet_sq_destroy()
760 nvmet_async_events_free(sq->ctrl); in nvmet_sq_destroy()
761 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); in nvmet_sq_destroy()
762 wait_for_completion(&sq->confirm_done); in nvmet_sq_destroy()
763 wait_for_completion(&sq->free_done); in nvmet_sq_destroy()
764 percpu_ref_exit(&sq->ref); in nvmet_sq_destroy()
766 if (sq->ctrl) { in nvmet_sq_destroy()
767 nvmet_ctrl_put(sq->ctrl); in nvmet_sq_destroy()
768 sq->ctrl = NULL; /* allows reusing the queue later */ in nvmet_sq_destroy()
775 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); in nvmet_sq_free() local
777 complete(&sq->free_done); in nvmet_sq_free()
780 int nvmet_sq_init(struct nvmet_sq *sq) in nvmet_sq_init() argument
784 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL); in nvmet_sq_init()
789 init_completion(&sq->free_done); in nvmet_sq_init()
790 init_completion(&sq->confirm_done); in nvmet_sq_init()
834 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid); in nvmet_parse_io_cmd()
857 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops) in nvmet_req_init() argument
863 req->sq = sq; in nvmet_req_init()
894 if (unlikely(!req->sq->ctrl)) in nvmet_req_init()
897 else if (likely(req->sq->qid != 0)) in nvmet_req_init()
901 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) in nvmet_req_init()
909 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { in nvmet_req_init()
914 if (sq->ctrl) in nvmet_req_init()
915 sq->ctrl->cmd_seen = true; in nvmet_req_init()
927 percpu_ref_put(&req->sq->ref); in nvmet_req_uninit()
948 if (req->sq->ctrl && req->ns) in nvmet_req_alloc_sgl()
949 p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, in nvmet_req_alloc_sgl()
953 if (req->sq->qid && p2p_dev) { in nvmet_req_alloc_sgl()
1130 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { in nvmet_check_ctrl_status()
1132 cmd->common.opcode, req->sq->qid); in nvmet_check_ctrl_status()
1136 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { in nvmet_check_ctrl_status()
1138 cmd->common.opcode, req->sq->qid); in nvmet_check_ctrl_status()