1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Discovery service for the NVMe over Fabrics target.
4  * Copyright (C) 2016 Intel Corporation. All rights reserved.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/slab.h>
8 #include <generated/utsrelease.h>
9 #include "nvmet.h"
10 
11 struct nvmet_subsys *nvmet_disc_subsys;
12 
13 static u64 nvmet_genctr;
14 
__nvmet_disc_changed(struct nvmet_port * port,struct nvmet_ctrl * ctrl)15 static void __nvmet_disc_changed(struct nvmet_port *port,
16 				 struct nvmet_ctrl *ctrl)
17 {
18 	if (ctrl->port != port)
19 		return;
20 
21 	if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
22 		return;
23 
24 	nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
25 			      NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
26 }
27 
nvmet_port_disc_changed(struct nvmet_port * port,struct nvmet_subsys * subsys)28 void nvmet_port_disc_changed(struct nvmet_port *port,
29 			     struct nvmet_subsys *subsys)
30 {
31 	struct nvmet_ctrl *ctrl;
32 
33 	lockdep_assert_held(&nvmet_config_sem);
34 	nvmet_genctr++;
35 
36 	mutex_lock(&nvmet_disc_subsys->lock);
37 	list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
38 		if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
39 			continue;
40 
41 		__nvmet_disc_changed(port, ctrl);
42 	}
43 	mutex_unlock(&nvmet_disc_subsys->lock);
44 
45 	/* If transport can signal change, notify transport */
46 	if (port->tr_ops && port->tr_ops->discovery_chg)
47 		port->tr_ops->discovery_chg(port);
48 }
49 
__nvmet_subsys_disc_changed(struct nvmet_port * port,struct nvmet_subsys * subsys,struct nvmet_host * host)50 static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
51 					struct nvmet_subsys *subsys,
52 					struct nvmet_host *host)
53 {
54 	struct nvmet_ctrl *ctrl;
55 
56 	mutex_lock(&nvmet_disc_subsys->lock);
57 	list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
58 		if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
59 			continue;
60 
61 		__nvmet_disc_changed(port, ctrl);
62 	}
63 	mutex_unlock(&nvmet_disc_subsys->lock);
64 }
65 
nvmet_subsys_disc_changed(struct nvmet_subsys * subsys,struct nvmet_host * host)66 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
67 			       struct nvmet_host *host)
68 {
69 	struct nvmet_port *port;
70 	struct nvmet_subsys_link *s;
71 
72 	lockdep_assert_held(&nvmet_config_sem);
73 	nvmet_genctr++;
74 
75 	list_for_each_entry(port, nvmet_ports, global_entry)
76 		list_for_each_entry(s, &port->subsystems, entry) {
77 			if (s->subsys != subsys)
78 				continue;
79 			__nvmet_subsys_disc_changed(port, subsys, host);
80 		}
81 }
82 
nvmet_referral_enable(struct nvmet_port * parent,struct nvmet_port * port)83 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
84 {
85 	down_write(&nvmet_config_sem);
86 	if (list_empty(&port->entry)) {
87 		list_add_tail(&port->entry, &parent->referrals);
88 		port->enabled = true;
89 		nvmet_port_disc_changed(parent, NULL);
90 	}
91 	up_write(&nvmet_config_sem);
92 }
93 
nvmet_referral_disable(struct nvmet_port * parent,struct nvmet_port * port)94 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port)
95 {
96 	down_write(&nvmet_config_sem);
97 	if (!list_empty(&port->entry)) {
98 		port->enabled = false;
99 		list_del_init(&port->entry);
100 		nvmet_port_disc_changed(parent, NULL);
101 	}
102 	up_write(&nvmet_config_sem);
103 }
104 
nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr * hdr,struct nvmet_port * port,char * subsys_nqn,char * traddr,u8 type,u32 numrec)105 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
106 		struct nvmet_port *port, char *subsys_nqn, char *traddr,
107 		u8 type, u32 numrec)
108 {
109 	struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
110 
111 	e->trtype = port->disc_addr.trtype;
112 	e->adrfam = port->disc_addr.adrfam;
113 	e->treq = port->disc_addr.treq;
114 	e->portid = port->disc_addr.portid;
115 	/* we support only dynamic controllers */
116 	e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
117 	e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
118 	e->subtype = type;
119 	memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
120 	memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
121 	memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
122 	strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
123 }
124 
125 /*
126  * nvmet_set_disc_traddr - set a correct discovery log entry traddr
127  *
128  * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses
129  * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply
130  * must not contain that "any" IP address. If the transport implements
131  * .disc_traddr, use it. this callback will set the discovery traddr
132  * from the req->port address in case the port in question listens
133  * "any" IP address.
134  */
nvmet_set_disc_traddr(struct nvmet_req * req,struct nvmet_port * port,char * traddr)135 static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port,
136 		char *traddr)
137 {
138 	if (req->ops->disc_traddr)
139 		req->ops->disc_traddr(req, port, traddr);
140 	else
141 		memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
142 }
143 
discovery_log_entries(struct nvmet_req * req)144 static size_t discovery_log_entries(struct nvmet_req *req)
145 {
146 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
147 	struct nvmet_subsys_link *p;
148 	struct nvmet_port *r;
149 	size_t entries = 0;
150 
151 	list_for_each_entry(p, &req->port->subsystems, entry) {
152 		if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
153 			continue;
154 		entries++;
155 	}
156 	list_for_each_entry(r, &req->port->referrals, entry)
157 		entries++;
158 	return entries;
159 }
160 
nvmet_execute_disc_get_log_page(struct nvmet_req * req)161 static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
162 {
163 	const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
164 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
165 	struct nvmf_disc_rsp_page_hdr *hdr;
166 	u64 offset = nvmet_get_log_page_offset(req->cmd);
167 	size_t data_len = nvmet_get_log_page_len(req->cmd);
168 	size_t alloc_len;
169 	struct nvmet_subsys_link *p;
170 	struct nvmet_port *r;
171 	u32 numrec = 0;
172 	u16 status = 0;
173 	void *buffer;
174 
175 	if (!nvmet_check_transfer_len(req, data_len))
176 		return;
177 
178 	if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
179 		req->error_loc =
180 			offsetof(struct nvme_get_log_page_command, lid);
181 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
182 		goto out;
183 	}
184 
185 	/* Spec requires dword aligned offsets */
186 	if (offset & 0x3) {
187 		req->error_loc =
188 			offsetof(struct nvme_get_log_page_command, lpo);
189 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
190 		goto out;
191 	}
192 
193 	/*
194 	 * Make sure we're passing at least a buffer of response header size.
195 	 * If host provided data len is less than the header size, only the
196 	 * number of bytes requested by host will be sent to host.
197 	 */
198 	down_read(&nvmet_config_sem);
199 	alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
200 	buffer = kzalloc(alloc_len, GFP_KERNEL);
201 	if (!buffer) {
202 		up_read(&nvmet_config_sem);
203 		status = NVME_SC_INTERNAL;
204 		goto out;
205 	}
206 
207 	hdr = buffer;
208 	list_for_each_entry(p, &req->port->subsystems, entry) {
209 		char traddr[NVMF_TRADDR_SIZE];
210 
211 		if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
212 			continue;
213 
214 		nvmet_set_disc_traddr(req, req->port, traddr);
215 		nvmet_format_discovery_entry(hdr, req->port,
216 				p->subsys->subsysnqn, traddr,
217 				NVME_NQN_NVME, numrec);
218 		numrec++;
219 	}
220 
221 	list_for_each_entry(r, &req->port->referrals, entry) {
222 		nvmet_format_discovery_entry(hdr, r,
223 				NVME_DISC_SUBSYS_NAME,
224 				r->disc_addr.traddr,
225 				NVME_NQN_DISC, numrec);
226 		numrec++;
227 	}
228 
229 	hdr->genctr = cpu_to_le64(nvmet_genctr);
230 	hdr->numrec = cpu_to_le64(numrec);
231 	hdr->recfmt = cpu_to_le16(0);
232 
233 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
234 
235 	up_read(&nvmet_config_sem);
236 
237 	status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
238 	kfree(buffer);
239 out:
240 	nvmet_req_complete(req, status);
241 }
242 
nvmet_execute_disc_identify(struct nvmet_req * req)243 static void nvmet_execute_disc_identify(struct nvmet_req *req)
244 {
245 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
246 	struct nvme_id_ctrl *id;
247 	u16 status = 0;
248 
249 	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
250 		return;
251 
252 	if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
253 		req->error_loc = offsetof(struct nvme_identify, cns);
254 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
255 		goto out;
256 	}
257 
258 	id = kzalloc(sizeof(*id), GFP_KERNEL);
259 	if (!id) {
260 		status = NVME_SC_INTERNAL;
261 		goto out;
262 	}
263 
264 	memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
265 	memset(id->fr, ' ', sizeof(id->fr));
266 	memcpy_and_pad(id->mn, sizeof(id->mn), ctrl->subsys->model_number,
267 		       strlen(ctrl->subsys->model_number), ' ');
268 	memcpy_and_pad(id->fr, sizeof(id->fr),
269 		       UTS_RELEASE, strlen(UTS_RELEASE), ' ');
270 
271 	/* no limit on data transfer sizes for now */
272 	id->mdts = 0;
273 	id->cntlid = cpu_to_le16(ctrl->cntlid);
274 	id->ver = cpu_to_le32(ctrl->subsys->ver);
275 	id->lpa = (1 << 2);
276 
277 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
278 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
279 
280 	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
281 	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
282 		id->sgls |= cpu_to_le32(1 << 2);
283 	if (req->port->inline_data_size)
284 		id->sgls |= cpu_to_le32(1 << 20);
285 
286 	id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
287 
288 	strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
289 
290 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
291 
292 	kfree(id);
293 out:
294 	nvmet_req_complete(req, status);
295 }
296 
nvmet_execute_disc_set_features(struct nvmet_req * req)297 static void nvmet_execute_disc_set_features(struct nvmet_req *req)
298 {
299 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
300 	u16 stat;
301 
302 	if (!nvmet_check_transfer_len(req, 0))
303 		return;
304 
305 	switch (cdw10 & 0xff) {
306 	case NVME_FEAT_KATO:
307 		stat = nvmet_set_feat_kato(req);
308 		break;
309 	case NVME_FEAT_ASYNC_EVENT:
310 		stat = nvmet_set_feat_async_event(req,
311 						  NVMET_DISC_AEN_CFG_OPTIONAL);
312 		break;
313 	default:
314 		req->error_loc =
315 			offsetof(struct nvme_common_command, cdw10);
316 		stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
317 		break;
318 	}
319 
320 	nvmet_req_complete(req, stat);
321 }
322 
nvmet_execute_disc_get_features(struct nvmet_req * req)323 static void nvmet_execute_disc_get_features(struct nvmet_req *req)
324 {
325 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
326 	u16 stat = 0;
327 
328 	if (!nvmet_check_transfer_len(req, 0))
329 		return;
330 
331 	switch (cdw10 & 0xff) {
332 	case NVME_FEAT_KATO:
333 		nvmet_get_feat_kato(req);
334 		break;
335 	case NVME_FEAT_ASYNC_EVENT:
336 		nvmet_get_feat_async_event(req);
337 		break;
338 	default:
339 		req->error_loc =
340 			offsetof(struct nvme_common_command, cdw10);
341 		stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
342 		break;
343 	}
344 
345 	nvmet_req_complete(req, stat);
346 }
347 
nvmet_parse_discovery_cmd(struct nvmet_req * req)348 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
349 {
350 	struct nvme_command *cmd = req->cmd;
351 
352 	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
353 		pr_err("got cmd %d while not ready\n",
354 		       cmd->common.opcode);
355 		req->error_loc =
356 			offsetof(struct nvme_common_command, opcode);
357 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
358 	}
359 
360 	switch (cmd->common.opcode) {
361 	case nvme_admin_set_features:
362 		req->execute = nvmet_execute_disc_set_features;
363 		return 0;
364 	case nvme_admin_get_features:
365 		req->execute = nvmet_execute_disc_get_features;
366 		return 0;
367 	case nvme_admin_async_event:
368 		req->execute = nvmet_execute_async_event;
369 		return 0;
370 	case nvme_admin_keep_alive:
371 		req->execute = nvmet_execute_keep_alive;
372 		return 0;
373 	case nvme_admin_get_log_page:
374 		req->execute = nvmet_execute_disc_get_log_page;
375 		return 0;
376 	case nvme_admin_identify:
377 		req->execute = nvmet_execute_disc_identify;
378 		return 0;
379 	default:
380 		pr_debug("unhandled cmd %d\n", cmd->common.opcode);
381 		req->error_loc = offsetof(struct nvme_common_command, opcode);
382 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
383 	}
384 
385 }
386 
nvmet_init_discovery(void)387 int __init nvmet_init_discovery(void)
388 {
389 	nvmet_disc_subsys =
390 		nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
391 	return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
392 }
393 
nvmet_exit_discovery(void)394 void nvmet_exit_discovery(void)
395 {
396 	nvmet_subsys_put(nvmet_disc_subsys);
397 }
398