1 /*
2  * NVMe Fabrics command implementation.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/blkdev.h>
16 #include "nvmet.h"
17 
nvmet_execute_prop_set(struct nvmet_req * req)18 static void nvmet_execute_prop_set(struct nvmet_req *req)
19 {
20 	u16 status = 0;
21 
22 	if (!(req->cmd->prop_set.attrib & 1)) {
23 		u64 val = le64_to_cpu(req->cmd->prop_set.value);
24 
25 		switch (le32_to_cpu(req->cmd->prop_set.offset)) {
26 		case NVME_REG_CC:
27 			nvmet_update_cc(req->sq->ctrl, val);
28 			break;
29 		default:
30 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
31 			break;
32 		}
33 	} else {
34 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
35 	}
36 
37 	nvmet_req_complete(req, status);
38 }
39 
nvmet_execute_prop_get(struct nvmet_req * req)40 static void nvmet_execute_prop_get(struct nvmet_req *req)
41 {
42 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
43 	u16 status = 0;
44 	u64 val = 0;
45 
46 	if (req->cmd->prop_get.attrib & 1) {
47 		switch (le32_to_cpu(req->cmd->prop_get.offset)) {
48 		case NVME_REG_CAP:
49 			val = ctrl->cap;
50 			break;
51 		default:
52 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
53 			break;
54 		}
55 	} else {
56 		switch (le32_to_cpu(req->cmd->prop_get.offset)) {
57 		case NVME_REG_VS:
58 			val = ctrl->subsys->ver;
59 			break;
60 		case NVME_REG_CC:
61 			val = ctrl->cc;
62 			break;
63 		case NVME_REG_CSTS:
64 			val = ctrl->csts;
65 			break;
66 		default:
67 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
68 			break;
69 		}
70 	}
71 
72 	req->rsp->result.u64 = cpu_to_le64(val);
73 	nvmet_req_complete(req, status);
74 }
75 
nvmet_parse_fabrics_cmd(struct nvmet_req * req)76 u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
77 {
78 	struct nvme_command *cmd = req->cmd;
79 
80 	switch (cmd->fabrics.fctype) {
81 	case nvme_fabrics_type_property_set:
82 		req->data_len = 0;
83 		req->execute = nvmet_execute_prop_set;
84 		break;
85 	case nvme_fabrics_type_property_get:
86 		req->data_len = 0;
87 		req->execute = nvmet_execute_prop_get;
88 		break;
89 	default:
90 		pr_err("received unknown capsule type 0x%x\n",
91 			cmd->fabrics.fctype);
92 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
93 	}
94 
95 	return 0;
96 }
97 
nvmet_install_queue(struct nvmet_ctrl * ctrl,struct nvmet_req * req)98 static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
99 {
100 	struct nvmf_connect_command *c = &req->cmd->connect;
101 	u16 qid = le16_to_cpu(c->qid);
102 	u16 sqsize = le16_to_cpu(c->sqsize);
103 	struct nvmet_ctrl *old;
104 
105 	old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
106 	if (old) {
107 		pr_warn("queue already connected!\n");
108 		return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
109 	}
110 	if (!sqsize) {
111 		pr_warn("queue size zero!\n");
112 		return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
113 	}
114 
115 	/* note: convert queue size from 0's-based value to 1's-based value */
116 	nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
117 	nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
118 	return 0;
119 }
120 
nvmet_execute_admin_connect(struct nvmet_req * req)121 static void nvmet_execute_admin_connect(struct nvmet_req *req)
122 {
123 	struct nvmf_connect_command *c = &req->cmd->connect;
124 	struct nvmf_connect_data *d;
125 	struct nvmet_ctrl *ctrl = NULL;
126 	u16 status = 0;
127 
128 	d = kmalloc(sizeof(*d), GFP_KERNEL);
129 	if (!d) {
130 		status = NVME_SC_INTERNAL;
131 		goto complete;
132 	}
133 
134 	status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
135 	if (status)
136 		goto out;
137 
138 	/* zero out initial completion result, assign values as needed */
139 	req->rsp->result.u32 = 0;
140 
141 	if (c->recfmt != 0) {
142 		pr_warn("invalid connect version (%d).\n",
143 			le16_to_cpu(c->recfmt));
144 		status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
145 		goto out;
146 	}
147 
148 	if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
149 		pr_warn("connect attempt for invalid controller ID %#x\n",
150 			d->cntlid);
151 		status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
152 		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
153 		goto out;
154 	}
155 
156 	status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
157 				  le32_to_cpu(c->kato), &ctrl);
158 	if (status)
159 		goto out;
160 	uuid_copy(&ctrl->hostid, &d->hostid);
161 
162 	status = nvmet_install_queue(ctrl, req);
163 	if (status) {
164 		nvmet_ctrl_put(ctrl);
165 		goto out;
166 	}
167 
168 	pr_info("creating controller %d for subsystem %s for NQN %s.\n",
169 		ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
170 	req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
171 
172 out:
173 	kfree(d);
174 complete:
175 	nvmet_req_complete(req, status);
176 }
177 
nvmet_execute_io_connect(struct nvmet_req * req)178 static void nvmet_execute_io_connect(struct nvmet_req *req)
179 {
180 	struct nvmf_connect_command *c = &req->cmd->connect;
181 	struct nvmf_connect_data *d;
182 	struct nvmet_ctrl *ctrl = NULL;
183 	u16 qid = le16_to_cpu(c->qid);
184 	u16 status = 0;
185 
186 	d = kmalloc(sizeof(*d), GFP_KERNEL);
187 	if (!d) {
188 		status = NVME_SC_INTERNAL;
189 		goto complete;
190 	}
191 
192 	status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
193 	if (status)
194 		goto out;
195 
196 	/* zero out initial completion result, assign values as needed */
197 	req->rsp->result.u32 = 0;
198 
199 	if (c->recfmt != 0) {
200 		pr_warn("invalid connect version (%d).\n",
201 			le16_to_cpu(c->recfmt));
202 		status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
203 		goto out;
204 	}
205 
206 	status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
207 				     le16_to_cpu(d->cntlid),
208 				     req, &ctrl);
209 	if (status)
210 		goto out;
211 
212 	if (unlikely(qid > ctrl->subsys->max_qid)) {
213 		pr_warn("invalid queue id (%d)\n", qid);
214 		status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
215 		req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
216 		goto out_ctrl_put;
217 	}
218 
219 	status = nvmet_install_queue(ctrl, req);
220 	if (status) {
221 		/* pass back cntlid that had the issue of installing queue */
222 		req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
223 		goto out_ctrl_put;
224 	}
225 
226 	pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
227 
228 out:
229 	kfree(d);
230 complete:
231 	nvmet_req_complete(req, status);
232 	return;
233 
234 out_ctrl_put:
235 	nvmet_ctrl_put(ctrl);
236 	goto out;
237 }
238 
nvmet_parse_connect_cmd(struct nvmet_req * req)239 u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
240 {
241 	struct nvme_command *cmd = req->cmd;
242 
243 	if (cmd->common.opcode != nvme_fabrics_command) {
244 		pr_err("invalid command 0x%x on unconnected queue.\n",
245 			cmd->fabrics.opcode);
246 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
247 	}
248 	if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
249 		pr_err("invalid capsule type 0x%x on unconnected queue.\n",
250 			cmd->fabrics.fctype);
251 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
252 	}
253 
254 	req->data_len = sizeof(struct nvmf_connect_data);
255 	if (cmd->connect.qid == 0)
256 		req->execute = nvmet_execute_admin_connect;
257 	else
258 		req->execute = nvmet_execute_io_connect;
259 	return 0;
260 }
261