1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  *
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/parser.h>
20 #include <uapi/scsi/fc/fc_fs.h>
21 #include <uapi/scsi/fc/fc_els.h>
22 #include <linux/delay.h>
23 
24 #include "nvme.h"
25 #include "fabrics.h"
26 #include <linux/nvme-fc-driver.h>
27 #include <linux/nvme-fc.h>
28 
29 
30 /* *************************** Data Structures/Defines ****************** */
31 
32 
33 enum nvme_fc_queue_flags {
34 	NVME_FC_Q_CONNECTED = 0,
35 	NVME_FC_Q_LIVE,
36 };
37 
38 #define NVME_FC_DEFAULT_DEV_LOSS_TMO	60	/* seconds */
39 
40 struct nvme_fc_queue {
41 	struct nvme_fc_ctrl	*ctrl;
42 	struct device		*dev;
43 	struct blk_mq_hw_ctx	*hctx;
44 	void			*lldd_handle;
45 	size_t			cmnd_capsule_len;
46 	u32			qnum;
47 	u32			rqcnt;
48 	u32			seqno;
49 
50 	u64			connection_id;
51 	atomic_t		csn;
52 
53 	unsigned long		flags;
54 } __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
55 
56 enum nvme_fcop_flags {
57 	FCOP_FLAGS_TERMIO	= (1 << 0),
58 	FCOP_FLAGS_AEN		= (1 << 1),
59 };
60 
61 struct nvmefc_ls_req_op {
62 	struct nvmefc_ls_req	ls_req;
63 
64 	struct nvme_fc_rport	*rport;
65 	struct nvme_fc_queue	*queue;
66 	struct request		*rq;
67 	u32			flags;
68 
69 	int			ls_error;
70 	struct completion	ls_done;
71 	struct list_head	lsreq_list;	/* rport->ls_req_list */
72 	bool			req_queued;
73 };
74 
75 enum nvme_fcpop_state {
76 	FCPOP_STATE_UNINIT	= 0,
77 	FCPOP_STATE_IDLE	= 1,
78 	FCPOP_STATE_ACTIVE	= 2,
79 	FCPOP_STATE_ABORTED	= 3,
80 	FCPOP_STATE_COMPLETE	= 4,
81 };
82 
83 struct nvme_fc_fcp_op {
84 	struct nvme_request	nreq;		/*
85 						 * nvme/host/core.c
86 						 * requires this to be
87 						 * the 1st element in the
88 						 * private structure
89 						 * associated with the
90 						 * request.
91 						 */
92 	struct nvmefc_fcp_req	fcp_req;
93 
94 	struct nvme_fc_ctrl	*ctrl;
95 	struct nvme_fc_queue	*queue;
96 	struct request		*rq;
97 
98 	atomic_t		state;
99 	u32			flags;
100 	u32			rqno;
101 	u32			nents;
102 
103 	struct nvme_fc_cmd_iu	cmd_iu;
104 	struct nvme_fc_ersp_iu	rsp_iu;
105 };
106 
107 struct nvme_fc_lport {
108 	struct nvme_fc_local_port	localport;
109 
110 	struct ida			endp_cnt;
111 	struct list_head		port_list;	/* nvme_fc_port_list */
112 	struct list_head		endp_list;
113 	struct device			*dev;	/* physical device for dma */
114 	struct nvme_fc_port_template	*ops;
115 	struct kref			ref;
116 	atomic_t                        act_rport_cnt;
117 } __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
118 
119 struct nvme_fc_rport {
120 	struct nvme_fc_remote_port	remoteport;
121 
122 	struct list_head		endp_list; /* for lport->endp_list */
123 	struct list_head		ctrl_list;
124 	struct list_head		ls_req_list;
125 	struct device			*dev;	/* physical device for dma */
126 	struct nvme_fc_lport		*lport;
127 	spinlock_t			lock;
128 	struct kref			ref;
129 	atomic_t                        act_ctrl_cnt;
130 	unsigned long			dev_loss_end;
131 } __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
132 
133 enum nvme_fcctrl_flags {
134 	FCCTRL_TERMIO		= (1 << 0),
135 };
136 
137 struct nvme_fc_ctrl {
138 	spinlock_t		lock;
139 	struct nvme_fc_queue	*queues;
140 	struct device		*dev;
141 	struct nvme_fc_lport	*lport;
142 	struct nvme_fc_rport	*rport;
143 	u32			cnum;
144 
145 	bool			ioq_live;
146 	bool			assoc_active;
147 	u64			association_id;
148 
149 	struct list_head	ctrl_list;	/* rport->ctrl_list */
150 
151 	struct blk_mq_tag_set	admin_tag_set;
152 	struct blk_mq_tag_set	tag_set;
153 
154 	struct delayed_work	connect_work;
155 
156 	struct kref		ref;
157 	u32			flags;
158 	u32			iocnt;
159 	wait_queue_head_t	ioabort_wait;
160 
161 	struct nvme_fc_fcp_op	aen_ops[NVME_NR_AEN_COMMANDS];
162 
163 	struct nvme_ctrl	ctrl;
164 };
165 
166 static inline struct nvme_fc_ctrl *
to_fc_ctrl(struct nvme_ctrl * ctrl)167 to_fc_ctrl(struct nvme_ctrl *ctrl)
168 {
169 	return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
170 }
171 
172 static inline struct nvme_fc_lport *
localport_to_lport(struct nvme_fc_local_port * portptr)173 localport_to_lport(struct nvme_fc_local_port *portptr)
174 {
175 	return container_of(portptr, struct nvme_fc_lport, localport);
176 }
177 
178 static inline struct nvme_fc_rport *
remoteport_to_rport(struct nvme_fc_remote_port * portptr)179 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
180 {
181 	return container_of(portptr, struct nvme_fc_rport, remoteport);
182 }
183 
184 static inline struct nvmefc_ls_req_op *
ls_req_to_lsop(struct nvmefc_ls_req * lsreq)185 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
186 {
187 	return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
188 }
189 
190 static inline struct nvme_fc_fcp_op *
fcp_req_to_fcp_op(struct nvmefc_fcp_req * fcpreq)191 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
192 {
193 	return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
194 }
195 
196 
197 
198 /* *************************** Globals **************************** */
199 
200 
201 static DEFINE_SPINLOCK(nvme_fc_lock);
202 
203 static LIST_HEAD(nvme_fc_lport_list);
204 static DEFINE_IDA(nvme_fc_local_port_cnt);
205 static DEFINE_IDA(nvme_fc_ctrl_cnt);
206 
207 
208 
209 /*
210  * These items are short-term. They will eventually be moved into
211  * a generic FC class. See comments in module init.
212  */
213 static struct class *fc_class;
214 static struct device *fc_udev_device;
215 
216 
217 /* *********************** FC-NVME Port Management ************************ */
218 
219 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
220 			struct nvme_fc_queue *, unsigned int);
221 
222 static void
nvme_fc_free_lport(struct kref * ref)223 nvme_fc_free_lport(struct kref *ref)
224 {
225 	struct nvme_fc_lport *lport =
226 		container_of(ref, struct nvme_fc_lport, ref);
227 	unsigned long flags;
228 
229 	WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
230 	WARN_ON(!list_empty(&lport->endp_list));
231 
232 	/* remove from transport list */
233 	spin_lock_irqsave(&nvme_fc_lock, flags);
234 	list_del(&lport->port_list);
235 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
236 
237 	ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
238 	ida_destroy(&lport->endp_cnt);
239 
240 	put_device(lport->dev);
241 
242 	kfree(lport);
243 }
244 
245 static void
nvme_fc_lport_put(struct nvme_fc_lport * lport)246 nvme_fc_lport_put(struct nvme_fc_lport *lport)
247 {
248 	kref_put(&lport->ref, nvme_fc_free_lport);
249 }
250 
251 static int
nvme_fc_lport_get(struct nvme_fc_lport * lport)252 nvme_fc_lport_get(struct nvme_fc_lport *lport)
253 {
254 	return kref_get_unless_zero(&lport->ref);
255 }
256 
257 
258 static struct nvme_fc_lport *
nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info * pinfo,struct nvme_fc_port_template * ops,struct device * dev)259 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
260 			struct nvme_fc_port_template *ops,
261 			struct device *dev)
262 {
263 	struct nvme_fc_lport *lport;
264 	unsigned long flags;
265 
266 	spin_lock_irqsave(&nvme_fc_lock, flags);
267 
268 	list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
269 		if (lport->localport.node_name != pinfo->node_name ||
270 		    lport->localport.port_name != pinfo->port_name)
271 			continue;
272 
273 		if (lport->dev != dev) {
274 			lport = ERR_PTR(-EXDEV);
275 			goto out_done;
276 		}
277 
278 		if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
279 			lport = ERR_PTR(-EEXIST);
280 			goto out_done;
281 		}
282 
283 		if (!nvme_fc_lport_get(lport)) {
284 			/*
285 			 * fails if ref cnt already 0. If so,
286 			 * act as if lport already deleted
287 			 */
288 			lport = NULL;
289 			goto out_done;
290 		}
291 
292 		/* resume the lport */
293 
294 		lport->ops = ops;
295 		lport->localport.port_role = pinfo->port_role;
296 		lport->localport.port_id = pinfo->port_id;
297 		lport->localport.port_state = FC_OBJSTATE_ONLINE;
298 
299 		spin_unlock_irqrestore(&nvme_fc_lock, flags);
300 
301 		return lport;
302 	}
303 
304 	lport = NULL;
305 
306 out_done:
307 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
308 
309 	return lport;
310 }
311 
312 /**
313  * nvme_fc_register_localport - transport entry point called by an
314  *                              LLDD to register the existence of a NVME
315  *                              host FC port.
316  * @pinfo:     pointer to information about the port to be registered
317  * @template:  LLDD entrypoints and operational parameters for the port
318  * @dev:       physical hardware device node port corresponds to. Will be
319  *             used for DMA mappings
320  * @lport_p:   pointer to a local port pointer. Upon success, the routine
321  *             will allocate a nvme_fc_local_port structure and place its
322  *             address in the local port pointer. Upon failure, local port
323  *             pointer will be set to 0.
324  *
325  * Returns:
326  * a completion status. Must be 0 upon success; a negative errno
327  * (ex: -ENXIO) upon failure.
328  */
329 int
nvme_fc_register_localport(struct nvme_fc_port_info * pinfo,struct nvme_fc_port_template * template,struct device * dev,struct nvme_fc_local_port ** portptr)330 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
331 			struct nvme_fc_port_template *template,
332 			struct device *dev,
333 			struct nvme_fc_local_port **portptr)
334 {
335 	struct nvme_fc_lport *newrec;
336 	unsigned long flags;
337 	int ret, idx;
338 
339 	if (!template->localport_delete || !template->remoteport_delete ||
340 	    !template->ls_req || !template->fcp_io ||
341 	    !template->ls_abort || !template->fcp_abort ||
342 	    !template->max_hw_queues || !template->max_sgl_segments ||
343 	    !template->max_dif_sgl_segments || !template->dma_boundary) {
344 		ret = -EINVAL;
345 		goto out_reghost_failed;
346 	}
347 
348 	/*
349 	 * look to see if there is already a localport that had been
350 	 * deregistered and in the process of waiting for all the
351 	 * references to fully be removed.  If the references haven't
352 	 * expired, we can simply re-enable the localport. Remoteports
353 	 * and controller reconnections should resume naturally.
354 	 */
355 	newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
356 
357 	/* found an lport, but something about its state is bad */
358 	if (IS_ERR(newrec)) {
359 		ret = PTR_ERR(newrec);
360 		goto out_reghost_failed;
361 
362 	/* found existing lport, which was resumed */
363 	} else if (newrec) {
364 		*portptr = &newrec->localport;
365 		return 0;
366 	}
367 
368 	/* nothing found - allocate a new localport struct */
369 
370 	newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
371 			 GFP_KERNEL);
372 	if (!newrec) {
373 		ret = -ENOMEM;
374 		goto out_reghost_failed;
375 	}
376 
377 	idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
378 	if (idx < 0) {
379 		ret = -ENOSPC;
380 		goto out_fail_kfree;
381 	}
382 
383 	if (!get_device(dev) && dev) {
384 		ret = -ENODEV;
385 		goto out_ida_put;
386 	}
387 
388 	INIT_LIST_HEAD(&newrec->port_list);
389 	INIT_LIST_HEAD(&newrec->endp_list);
390 	kref_init(&newrec->ref);
391 	atomic_set(&newrec->act_rport_cnt, 0);
392 	newrec->ops = template;
393 	newrec->dev = dev;
394 	ida_init(&newrec->endp_cnt);
395 	newrec->localport.private = &newrec[1];
396 	newrec->localport.node_name = pinfo->node_name;
397 	newrec->localport.port_name = pinfo->port_name;
398 	newrec->localport.port_role = pinfo->port_role;
399 	newrec->localport.port_id = pinfo->port_id;
400 	newrec->localport.port_state = FC_OBJSTATE_ONLINE;
401 	newrec->localport.port_num = idx;
402 
403 	spin_lock_irqsave(&nvme_fc_lock, flags);
404 	list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
405 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
406 
407 	if (dev)
408 		dma_set_seg_boundary(dev, template->dma_boundary);
409 
410 	*portptr = &newrec->localport;
411 	return 0;
412 
413 out_ida_put:
414 	ida_simple_remove(&nvme_fc_local_port_cnt, idx);
415 out_fail_kfree:
416 	kfree(newrec);
417 out_reghost_failed:
418 	*portptr = NULL;
419 
420 	return ret;
421 }
422 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
423 
424 /**
425  * nvme_fc_unregister_localport - transport entry point called by an
426  *                              LLDD to deregister/remove a previously
427  *                              registered a NVME host FC port.
428  * @localport: pointer to the (registered) local port that is to be
429  *             deregistered.
430  *
431  * Returns:
432  * a completion status. Must be 0 upon success; a negative errno
433  * (ex: -ENXIO) upon failure.
434  */
435 int
nvme_fc_unregister_localport(struct nvme_fc_local_port * portptr)436 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
437 {
438 	struct nvme_fc_lport *lport = localport_to_lport(portptr);
439 	unsigned long flags;
440 
441 	if (!portptr)
442 		return -EINVAL;
443 
444 	spin_lock_irqsave(&nvme_fc_lock, flags);
445 
446 	if (portptr->port_state != FC_OBJSTATE_ONLINE) {
447 		spin_unlock_irqrestore(&nvme_fc_lock, flags);
448 		return -EINVAL;
449 	}
450 	portptr->port_state = FC_OBJSTATE_DELETED;
451 
452 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
453 
454 	if (atomic_read(&lport->act_rport_cnt) == 0)
455 		lport->ops->localport_delete(&lport->localport);
456 
457 	nvme_fc_lport_put(lport);
458 
459 	return 0;
460 }
461 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
462 
463 /*
464  * TRADDR strings, per FC-NVME are fixed format:
465  *   "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
466  * udev event will only differ by prefix of what field is
467  * being specified:
468  *    "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
469  *  19 + 43 + null_fudge = 64 characters
470  */
471 #define FCNVME_TRADDR_LENGTH		64
472 
473 static void
nvme_fc_signal_discovery_scan(struct nvme_fc_lport * lport,struct nvme_fc_rport * rport)474 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
475 		struct nvme_fc_rport *rport)
476 {
477 	char hostaddr[FCNVME_TRADDR_LENGTH];	/* NVMEFC_HOST_TRADDR=...*/
478 	char tgtaddr[FCNVME_TRADDR_LENGTH];	/* NVMEFC_TRADDR=...*/
479 	char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
480 
481 	if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
482 		return;
483 
484 	snprintf(hostaddr, sizeof(hostaddr),
485 		"NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
486 		lport->localport.node_name, lport->localport.port_name);
487 	snprintf(tgtaddr, sizeof(tgtaddr),
488 		"NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
489 		rport->remoteport.node_name, rport->remoteport.port_name);
490 	kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
491 }
492 
493 static void
nvme_fc_free_rport(struct kref * ref)494 nvme_fc_free_rport(struct kref *ref)
495 {
496 	struct nvme_fc_rport *rport =
497 		container_of(ref, struct nvme_fc_rport, ref);
498 	struct nvme_fc_lport *lport =
499 			localport_to_lport(rport->remoteport.localport);
500 	unsigned long flags;
501 
502 	WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
503 	WARN_ON(!list_empty(&rport->ctrl_list));
504 
505 	/* remove from lport list */
506 	spin_lock_irqsave(&nvme_fc_lock, flags);
507 	list_del(&rport->endp_list);
508 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
509 
510 	ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
511 
512 	kfree(rport);
513 
514 	nvme_fc_lport_put(lport);
515 }
516 
517 static void
nvme_fc_rport_put(struct nvme_fc_rport * rport)518 nvme_fc_rport_put(struct nvme_fc_rport *rport)
519 {
520 	kref_put(&rport->ref, nvme_fc_free_rport);
521 }
522 
523 static int
nvme_fc_rport_get(struct nvme_fc_rport * rport)524 nvme_fc_rport_get(struct nvme_fc_rport *rport)
525 {
526 	return kref_get_unless_zero(&rport->ref);
527 }
528 
529 static void
nvme_fc_resume_controller(struct nvme_fc_ctrl * ctrl)530 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
531 {
532 	switch (ctrl->ctrl.state) {
533 	case NVME_CTRL_NEW:
534 	case NVME_CTRL_CONNECTING:
535 		/*
536 		 * As all reconnects were suppressed, schedule a
537 		 * connect.
538 		 */
539 		dev_info(ctrl->ctrl.device,
540 			"NVME-FC{%d}: connectivity re-established. "
541 			"Attempting reconnect\n", ctrl->cnum);
542 
543 		queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
544 		break;
545 
546 	case NVME_CTRL_RESETTING:
547 		/*
548 		 * Controller is already in the process of terminating the
549 		 * association. No need to do anything further. The reconnect
550 		 * step will naturally occur after the reset completes.
551 		 */
552 		break;
553 
554 	default:
555 		/* no action to take - let it delete */
556 		break;
557 	}
558 }
559 
560 static struct nvme_fc_rport *
nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport * lport,struct nvme_fc_port_info * pinfo)561 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
562 				struct nvme_fc_port_info *pinfo)
563 {
564 	struct nvme_fc_rport *rport;
565 	struct nvme_fc_ctrl *ctrl;
566 	unsigned long flags;
567 
568 	spin_lock_irqsave(&nvme_fc_lock, flags);
569 
570 	list_for_each_entry(rport, &lport->endp_list, endp_list) {
571 		if (rport->remoteport.node_name != pinfo->node_name ||
572 		    rport->remoteport.port_name != pinfo->port_name)
573 			continue;
574 
575 		if (!nvme_fc_rport_get(rport)) {
576 			rport = ERR_PTR(-ENOLCK);
577 			goto out_done;
578 		}
579 
580 		spin_unlock_irqrestore(&nvme_fc_lock, flags);
581 
582 		spin_lock_irqsave(&rport->lock, flags);
583 
584 		/* has it been unregistered */
585 		if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
586 			/* means lldd called us twice */
587 			spin_unlock_irqrestore(&rport->lock, flags);
588 			nvme_fc_rport_put(rport);
589 			return ERR_PTR(-ESTALE);
590 		}
591 
592 		rport->remoteport.port_role = pinfo->port_role;
593 		rport->remoteport.port_id = pinfo->port_id;
594 		rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
595 		rport->dev_loss_end = 0;
596 
597 		/*
598 		 * kick off a reconnect attempt on all associations to the
599 		 * remote port. A successful reconnects will resume i/o.
600 		 */
601 		list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
602 			nvme_fc_resume_controller(ctrl);
603 
604 		spin_unlock_irqrestore(&rport->lock, flags);
605 
606 		return rport;
607 	}
608 
609 	rport = NULL;
610 
611 out_done:
612 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
613 
614 	return rport;
615 }
616 
617 static inline void
__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport * rport,struct nvme_fc_port_info * pinfo)618 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
619 			struct nvme_fc_port_info *pinfo)
620 {
621 	if (pinfo->dev_loss_tmo)
622 		rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
623 	else
624 		rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
625 }
626 
627 /**
628  * nvme_fc_register_remoteport - transport entry point called by an
629  *                              LLDD to register the existence of a NVME
630  *                              subsystem FC port on its fabric.
631  * @localport: pointer to the (registered) local port that the remote
632  *             subsystem port is connected to.
633  * @pinfo:     pointer to information about the port to be registered
634  * @rport_p:   pointer to a remote port pointer. Upon success, the routine
635  *             will allocate a nvme_fc_remote_port structure and place its
636  *             address in the remote port pointer. Upon failure, remote port
637  *             pointer will be set to 0.
638  *
639  * Returns:
640  * a completion status. Must be 0 upon success; a negative errno
641  * (ex: -ENXIO) upon failure.
642  */
643 int
nvme_fc_register_remoteport(struct nvme_fc_local_port * localport,struct nvme_fc_port_info * pinfo,struct nvme_fc_remote_port ** portptr)644 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
645 				struct nvme_fc_port_info *pinfo,
646 				struct nvme_fc_remote_port **portptr)
647 {
648 	struct nvme_fc_lport *lport = localport_to_lport(localport);
649 	struct nvme_fc_rport *newrec;
650 	unsigned long flags;
651 	int ret, idx;
652 
653 	if (!nvme_fc_lport_get(lport)) {
654 		ret = -ESHUTDOWN;
655 		goto out_reghost_failed;
656 	}
657 
658 	/*
659 	 * look to see if there is already a remoteport that is waiting
660 	 * for a reconnect (within dev_loss_tmo) with the same WWN's.
661 	 * If so, transition to it and reconnect.
662 	 */
663 	newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
664 
665 	/* found an rport, but something about its state is bad */
666 	if (IS_ERR(newrec)) {
667 		ret = PTR_ERR(newrec);
668 		goto out_lport_put;
669 
670 	/* found existing rport, which was resumed */
671 	} else if (newrec) {
672 		nvme_fc_lport_put(lport);
673 		__nvme_fc_set_dev_loss_tmo(newrec, pinfo);
674 		nvme_fc_signal_discovery_scan(lport, newrec);
675 		*portptr = &newrec->remoteport;
676 		return 0;
677 	}
678 
679 	/* nothing found - allocate a new remoteport struct */
680 
681 	newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
682 			 GFP_KERNEL);
683 	if (!newrec) {
684 		ret = -ENOMEM;
685 		goto out_lport_put;
686 	}
687 
688 	idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
689 	if (idx < 0) {
690 		ret = -ENOSPC;
691 		goto out_kfree_rport;
692 	}
693 
694 	INIT_LIST_HEAD(&newrec->endp_list);
695 	INIT_LIST_HEAD(&newrec->ctrl_list);
696 	INIT_LIST_HEAD(&newrec->ls_req_list);
697 	kref_init(&newrec->ref);
698 	atomic_set(&newrec->act_ctrl_cnt, 0);
699 	spin_lock_init(&newrec->lock);
700 	newrec->remoteport.localport = &lport->localport;
701 	newrec->dev = lport->dev;
702 	newrec->lport = lport;
703 	newrec->remoteport.private = &newrec[1];
704 	newrec->remoteport.port_role = pinfo->port_role;
705 	newrec->remoteport.node_name = pinfo->node_name;
706 	newrec->remoteport.port_name = pinfo->port_name;
707 	newrec->remoteport.port_id = pinfo->port_id;
708 	newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
709 	newrec->remoteport.port_num = idx;
710 	__nvme_fc_set_dev_loss_tmo(newrec, pinfo);
711 
712 	spin_lock_irqsave(&nvme_fc_lock, flags);
713 	list_add_tail(&newrec->endp_list, &lport->endp_list);
714 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
715 
716 	nvme_fc_signal_discovery_scan(lport, newrec);
717 
718 	*portptr = &newrec->remoteport;
719 	return 0;
720 
721 out_kfree_rport:
722 	kfree(newrec);
723 out_lport_put:
724 	nvme_fc_lport_put(lport);
725 out_reghost_failed:
726 	*portptr = NULL;
727 	return ret;
728 }
729 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
730 
731 static int
nvme_fc_abort_lsops(struct nvme_fc_rport * rport)732 nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
733 {
734 	struct nvmefc_ls_req_op *lsop;
735 	unsigned long flags;
736 
737 restart:
738 	spin_lock_irqsave(&rport->lock, flags);
739 
740 	list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
741 		if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
742 			lsop->flags |= FCOP_FLAGS_TERMIO;
743 			spin_unlock_irqrestore(&rport->lock, flags);
744 			rport->lport->ops->ls_abort(&rport->lport->localport,
745 						&rport->remoteport,
746 						&lsop->ls_req);
747 			goto restart;
748 		}
749 	}
750 	spin_unlock_irqrestore(&rport->lock, flags);
751 
752 	return 0;
753 }
754 
755 static void
nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl * ctrl)756 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
757 {
758 	dev_info(ctrl->ctrl.device,
759 		"NVME-FC{%d}: controller connectivity lost. Awaiting "
760 		"Reconnect", ctrl->cnum);
761 
762 	switch (ctrl->ctrl.state) {
763 	case NVME_CTRL_NEW:
764 	case NVME_CTRL_LIVE:
765 		/*
766 		 * Schedule a controller reset. The reset will terminate the
767 		 * association and schedule the reconnect timer.  Reconnects
768 		 * will be attempted until either the ctlr_loss_tmo
769 		 * (max_retries * connect_delay) expires or the remoteport's
770 		 * dev_loss_tmo expires.
771 		 */
772 		if (nvme_reset_ctrl(&ctrl->ctrl)) {
773 			dev_warn(ctrl->ctrl.device,
774 				"NVME-FC{%d}: Couldn't schedule reset.\n",
775 				ctrl->cnum);
776 			nvme_delete_ctrl(&ctrl->ctrl);
777 		}
778 		break;
779 
780 	case NVME_CTRL_CONNECTING:
781 		/*
782 		 * The association has already been terminated and the
783 		 * controller is attempting reconnects.  No need to do anything
784 		 * futher.  Reconnects will be attempted until either the
785 		 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
786 		 * remoteport's dev_loss_tmo expires.
787 		 */
788 		break;
789 
790 	case NVME_CTRL_RESETTING:
791 		/*
792 		 * Controller is already in the process of terminating the
793 		 * association.  No need to do anything further. The reconnect
794 		 * step will kick in naturally after the association is
795 		 * terminated.
796 		 */
797 		break;
798 
799 	case NVME_CTRL_DELETING:
800 	default:
801 		/* no action to take - let it delete */
802 		break;
803 	}
804 }
805 
806 /**
807  * nvme_fc_unregister_remoteport - transport entry point called by an
808  *                              LLDD to deregister/remove a previously
809  *                              registered a NVME subsystem FC port.
810  * @remoteport: pointer to the (registered) remote port that is to be
811  *              deregistered.
812  *
813  * Returns:
814  * a completion status. Must be 0 upon success; a negative errno
815  * (ex: -ENXIO) upon failure.
816  */
817 int
nvme_fc_unregister_remoteport(struct nvme_fc_remote_port * portptr)818 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
819 {
820 	struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
821 	struct nvme_fc_ctrl *ctrl;
822 	unsigned long flags;
823 
824 	if (!portptr)
825 		return -EINVAL;
826 
827 	spin_lock_irqsave(&rport->lock, flags);
828 
829 	if (portptr->port_state != FC_OBJSTATE_ONLINE) {
830 		spin_unlock_irqrestore(&rport->lock, flags);
831 		return -EINVAL;
832 	}
833 	portptr->port_state = FC_OBJSTATE_DELETED;
834 
835 	rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
836 
837 	list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
838 		/* if dev_loss_tmo==0, dev loss is immediate */
839 		if (!portptr->dev_loss_tmo) {
840 			dev_warn(ctrl->ctrl.device,
841 				"NVME-FC{%d}: controller connectivity lost.\n",
842 				ctrl->cnum);
843 			nvme_delete_ctrl(&ctrl->ctrl);
844 		} else
845 			nvme_fc_ctrl_connectivity_loss(ctrl);
846 	}
847 
848 	spin_unlock_irqrestore(&rport->lock, flags);
849 
850 	nvme_fc_abort_lsops(rport);
851 
852 	if (atomic_read(&rport->act_ctrl_cnt) == 0)
853 		rport->lport->ops->remoteport_delete(portptr);
854 
855 	/*
856 	 * release the reference, which will allow, if all controllers
857 	 * go away, which should only occur after dev_loss_tmo occurs,
858 	 * for the rport to be torn down.
859 	 */
860 	nvme_fc_rport_put(rport);
861 
862 	return 0;
863 }
864 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
865 
866 /**
867  * nvme_fc_rescan_remoteport - transport entry point called by an
868  *                              LLDD to request a nvme device rescan.
869  * @remoteport: pointer to the (registered) remote port that is to be
870  *              rescanned.
871  *
872  * Returns: N/A
873  */
874 void
nvme_fc_rescan_remoteport(struct nvme_fc_remote_port * remoteport)875 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
876 {
877 	struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
878 
879 	nvme_fc_signal_discovery_scan(rport->lport, rport);
880 }
881 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
882 
883 int
nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port * portptr,u32 dev_loss_tmo)884 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
885 			u32 dev_loss_tmo)
886 {
887 	struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
888 	unsigned long flags;
889 
890 	spin_lock_irqsave(&rport->lock, flags);
891 
892 	if (portptr->port_state != FC_OBJSTATE_ONLINE) {
893 		spin_unlock_irqrestore(&rport->lock, flags);
894 		return -EINVAL;
895 	}
896 
897 	/* a dev_loss_tmo of 0 (immediate) is allowed to be set */
898 	rport->remoteport.dev_loss_tmo = dev_loss_tmo;
899 
900 	spin_unlock_irqrestore(&rport->lock, flags);
901 
902 	return 0;
903 }
904 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
905 
906 
907 /* *********************** FC-NVME DMA Handling **************************** */
908 
909 /*
910  * The fcloop device passes in a NULL device pointer. Real LLD's will
911  * pass in a valid device pointer. If NULL is passed to the dma mapping
912  * routines, depending on the platform, it may or may not succeed, and
913  * may crash.
914  *
915  * As such:
916  * Wrapper all the dma routines and check the dev pointer.
917  *
918  * If simple mappings (return just a dma address, we'll noop them,
919  * returning a dma address of 0.
920  *
921  * On more complex mappings (dma_map_sg), a pseudo routine fills
922  * in the scatter list, setting all dma addresses to 0.
923  */
924 
925 static inline dma_addr_t
fc_dma_map_single(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir)926 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
927 		enum dma_data_direction dir)
928 {
929 	return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
930 }
931 
932 static inline int
fc_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)933 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
934 {
935 	return dev ? dma_mapping_error(dev, dma_addr) : 0;
936 }
937 
938 static inline void
fc_dma_unmap_single(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)939 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
940 	enum dma_data_direction dir)
941 {
942 	if (dev)
943 		dma_unmap_single(dev, addr, size, dir);
944 }
945 
946 static inline void
fc_dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)947 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
948 		enum dma_data_direction dir)
949 {
950 	if (dev)
951 		dma_sync_single_for_cpu(dev, addr, size, dir);
952 }
953 
954 static inline void
fc_dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)955 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
956 		enum dma_data_direction dir)
957 {
958 	if (dev)
959 		dma_sync_single_for_device(dev, addr, size, dir);
960 }
961 
962 /* pseudo dma_map_sg call */
963 static int
fc_map_sg(struct scatterlist * sg,int nents)964 fc_map_sg(struct scatterlist *sg, int nents)
965 {
966 	struct scatterlist *s;
967 	int i;
968 
969 	WARN_ON(nents == 0 || sg[0].length == 0);
970 
971 	for_each_sg(sg, s, nents, i) {
972 		s->dma_address = 0L;
973 #ifdef CONFIG_NEED_SG_DMA_LENGTH
974 		s->dma_length = s->length;
975 #endif
976 	}
977 	return nents;
978 }
979 
980 static inline int
fc_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)981 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
982 		enum dma_data_direction dir)
983 {
984 	return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
985 }
986 
987 static inline void
fc_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)988 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
989 		enum dma_data_direction dir)
990 {
991 	if (dev)
992 		dma_unmap_sg(dev, sg, nents, dir);
993 }
994 
995 /* *********************** FC-NVME LS Handling **************************** */
996 
997 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
998 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
999 
1000 
1001 static void
__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op * lsop)1002 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1003 {
1004 	struct nvme_fc_rport *rport = lsop->rport;
1005 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1006 	unsigned long flags;
1007 
1008 	spin_lock_irqsave(&rport->lock, flags);
1009 
1010 	if (!lsop->req_queued) {
1011 		spin_unlock_irqrestore(&rport->lock, flags);
1012 		return;
1013 	}
1014 
1015 	list_del(&lsop->lsreq_list);
1016 
1017 	lsop->req_queued = false;
1018 
1019 	spin_unlock_irqrestore(&rport->lock, flags);
1020 
1021 	fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1022 				  (lsreq->rqstlen + lsreq->rsplen),
1023 				  DMA_BIDIRECTIONAL);
1024 
1025 	nvme_fc_rport_put(rport);
1026 }
1027 
1028 static int
__nvme_fc_send_ls_req(struct nvme_fc_rport * rport,struct nvmefc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))1029 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1030 		struct nvmefc_ls_req_op *lsop,
1031 		void (*done)(struct nvmefc_ls_req *req, int status))
1032 {
1033 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1034 	unsigned long flags;
1035 	int ret = 0;
1036 
1037 	if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1038 		return -ECONNREFUSED;
1039 
1040 	if (!nvme_fc_rport_get(rport))
1041 		return -ESHUTDOWN;
1042 
1043 	lsreq->done = done;
1044 	lsop->rport = rport;
1045 	lsop->req_queued = false;
1046 	INIT_LIST_HEAD(&lsop->lsreq_list);
1047 	init_completion(&lsop->ls_done);
1048 
1049 	lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1050 				  lsreq->rqstlen + lsreq->rsplen,
1051 				  DMA_BIDIRECTIONAL);
1052 	if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1053 		ret = -EFAULT;
1054 		goto out_putrport;
1055 	}
1056 	lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1057 
1058 	spin_lock_irqsave(&rport->lock, flags);
1059 
1060 	list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1061 
1062 	lsop->req_queued = true;
1063 
1064 	spin_unlock_irqrestore(&rport->lock, flags);
1065 
1066 	ret = rport->lport->ops->ls_req(&rport->lport->localport,
1067 					&rport->remoteport, lsreq);
1068 	if (ret)
1069 		goto out_unlink;
1070 
1071 	return 0;
1072 
1073 out_unlink:
1074 	lsop->ls_error = ret;
1075 	spin_lock_irqsave(&rport->lock, flags);
1076 	lsop->req_queued = false;
1077 	list_del(&lsop->lsreq_list);
1078 	spin_unlock_irqrestore(&rport->lock, flags);
1079 	fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1080 				  (lsreq->rqstlen + lsreq->rsplen),
1081 				  DMA_BIDIRECTIONAL);
1082 out_putrport:
1083 	nvme_fc_rport_put(rport);
1084 
1085 	return ret;
1086 }
1087 
1088 static void
nvme_fc_send_ls_req_done(struct nvmefc_ls_req * lsreq,int status)1089 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1090 {
1091 	struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1092 
1093 	lsop->ls_error = status;
1094 	complete(&lsop->ls_done);
1095 }
1096 
1097 static int
nvme_fc_send_ls_req(struct nvme_fc_rport * rport,struct nvmefc_ls_req_op * lsop)1098 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1099 {
1100 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1101 	struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1102 	int ret;
1103 
1104 	ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1105 
1106 	if (!ret) {
1107 		/*
1108 		 * No timeout/not interruptible as we need the struct
1109 		 * to exist until the lldd calls us back. Thus mandate
1110 		 * wait until driver calls back. lldd responsible for
1111 		 * the timeout action
1112 		 */
1113 		wait_for_completion(&lsop->ls_done);
1114 
1115 		__nvme_fc_finish_ls_req(lsop);
1116 
1117 		ret = lsop->ls_error;
1118 	}
1119 
1120 	if (ret)
1121 		return ret;
1122 
1123 	/* ACC or RJT payload ? */
1124 	if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1125 		return -ENXIO;
1126 
1127 	return 0;
1128 }
1129 
1130 static int
nvme_fc_send_ls_req_async(struct nvme_fc_rport * rport,struct nvmefc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))1131 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1132 		struct nvmefc_ls_req_op *lsop,
1133 		void (*done)(struct nvmefc_ls_req *req, int status))
1134 {
1135 	/* don't wait for completion */
1136 
1137 	return __nvme_fc_send_ls_req(rport, lsop, done);
1138 }
1139 
1140 /* Validation Error indexes into the string table below */
1141 enum {
1142 	VERR_NO_ERROR		= 0,
1143 	VERR_LSACC		= 1,
1144 	VERR_LSDESC_RQST	= 2,
1145 	VERR_LSDESC_RQST_LEN	= 3,
1146 	VERR_ASSOC_ID		= 4,
1147 	VERR_ASSOC_ID_LEN	= 5,
1148 	VERR_CONN_ID		= 6,
1149 	VERR_CONN_ID_LEN	= 7,
1150 	VERR_CR_ASSOC		= 8,
1151 	VERR_CR_ASSOC_ACC_LEN	= 9,
1152 	VERR_CR_CONN		= 10,
1153 	VERR_CR_CONN_ACC_LEN	= 11,
1154 	VERR_DISCONN		= 12,
1155 	VERR_DISCONN_ACC_LEN	= 13,
1156 };
1157 
1158 static char *validation_errors[] = {
1159 	"OK",
1160 	"Not LS_ACC",
1161 	"Not LSDESC_RQST",
1162 	"Bad LSDESC_RQST Length",
1163 	"Not Association ID",
1164 	"Bad Association ID Length",
1165 	"Not Connection ID",
1166 	"Bad Connection ID Length",
1167 	"Not CR_ASSOC Rqst",
1168 	"Bad CR_ASSOC ACC Length",
1169 	"Not CR_CONN Rqst",
1170 	"Bad CR_CONN ACC Length",
1171 	"Not Disconnect Rqst",
1172 	"Bad Disconnect ACC Length",
1173 };
1174 
1175 static int
nvme_fc_connect_admin_queue(struct nvme_fc_ctrl * ctrl,struct nvme_fc_queue * queue,u16 qsize,u16 ersp_ratio)1176 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1177 	struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1178 {
1179 	struct nvmefc_ls_req_op *lsop;
1180 	struct nvmefc_ls_req *lsreq;
1181 	struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1182 	struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1183 	int ret, fcret = 0;
1184 
1185 	lsop = kzalloc((sizeof(*lsop) +
1186 			 ctrl->lport->ops->lsrqst_priv_sz +
1187 			 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
1188 	if (!lsop) {
1189 		ret = -ENOMEM;
1190 		goto out_no_memory;
1191 	}
1192 	lsreq = &lsop->ls_req;
1193 
1194 	lsreq->private = (void *)&lsop[1];
1195 	assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
1196 			(lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1197 	assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1198 
1199 	assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1200 	assoc_rqst->desc_list_len =
1201 			cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1202 
1203 	assoc_rqst->assoc_cmd.desc_tag =
1204 			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1205 	assoc_rqst->assoc_cmd.desc_len =
1206 			fcnvme_lsdesc_len(
1207 				sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1208 
1209 	assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1210 	assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1211 	/* Linux supports only Dynamic controllers */
1212 	assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1213 	uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1214 	strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1215 		min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1216 	strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1217 		min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1218 
1219 	lsop->queue = queue;
1220 	lsreq->rqstaddr = assoc_rqst;
1221 	lsreq->rqstlen = sizeof(*assoc_rqst);
1222 	lsreq->rspaddr = assoc_acc;
1223 	lsreq->rsplen = sizeof(*assoc_acc);
1224 	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1225 
1226 	ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1227 	if (ret)
1228 		goto out_free_buffer;
1229 
1230 	/* process connect LS completion */
1231 
1232 	/* validate the ACC response */
1233 	if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1234 		fcret = VERR_LSACC;
1235 	else if (assoc_acc->hdr.desc_list_len !=
1236 			fcnvme_lsdesc_len(
1237 				sizeof(struct fcnvme_ls_cr_assoc_acc)))
1238 		fcret = VERR_CR_ASSOC_ACC_LEN;
1239 	else if (assoc_acc->hdr.rqst.desc_tag !=
1240 			cpu_to_be32(FCNVME_LSDESC_RQST))
1241 		fcret = VERR_LSDESC_RQST;
1242 	else if (assoc_acc->hdr.rqst.desc_len !=
1243 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1244 		fcret = VERR_LSDESC_RQST_LEN;
1245 	else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1246 		fcret = VERR_CR_ASSOC;
1247 	else if (assoc_acc->associd.desc_tag !=
1248 			cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1249 		fcret = VERR_ASSOC_ID;
1250 	else if (assoc_acc->associd.desc_len !=
1251 			fcnvme_lsdesc_len(
1252 				sizeof(struct fcnvme_lsdesc_assoc_id)))
1253 		fcret = VERR_ASSOC_ID_LEN;
1254 	else if (assoc_acc->connectid.desc_tag !=
1255 			cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1256 		fcret = VERR_CONN_ID;
1257 	else if (assoc_acc->connectid.desc_len !=
1258 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1259 		fcret = VERR_CONN_ID_LEN;
1260 
1261 	if (fcret) {
1262 		ret = -EBADF;
1263 		dev_err(ctrl->dev,
1264 			"q %d connect failed: %s\n",
1265 			queue->qnum, validation_errors[fcret]);
1266 	} else {
1267 		ctrl->association_id =
1268 			be64_to_cpu(assoc_acc->associd.association_id);
1269 		queue->connection_id =
1270 			be64_to_cpu(assoc_acc->connectid.connection_id);
1271 		set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1272 	}
1273 
1274 out_free_buffer:
1275 	kfree(lsop);
1276 out_no_memory:
1277 	if (ret)
1278 		dev_err(ctrl->dev,
1279 			"queue %d connect admin queue failed (%d).\n",
1280 			queue->qnum, ret);
1281 	return ret;
1282 }
1283 
1284 static int
nvme_fc_connect_queue(struct nvme_fc_ctrl * ctrl,struct nvme_fc_queue * queue,u16 qsize,u16 ersp_ratio)1285 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1286 			u16 qsize, u16 ersp_ratio)
1287 {
1288 	struct nvmefc_ls_req_op *lsop;
1289 	struct nvmefc_ls_req *lsreq;
1290 	struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1291 	struct fcnvme_ls_cr_conn_acc *conn_acc;
1292 	int ret, fcret = 0;
1293 
1294 	lsop = kzalloc((sizeof(*lsop) +
1295 			 ctrl->lport->ops->lsrqst_priv_sz +
1296 			 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
1297 	if (!lsop) {
1298 		ret = -ENOMEM;
1299 		goto out_no_memory;
1300 	}
1301 	lsreq = &lsop->ls_req;
1302 
1303 	lsreq->private = (void *)&lsop[1];
1304 	conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
1305 			(lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1306 	conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1307 
1308 	conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1309 	conn_rqst->desc_list_len = cpu_to_be32(
1310 				sizeof(struct fcnvme_lsdesc_assoc_id) +
1311 				sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1312 
1313 	conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1314 	conn_rqst->associd.desc_len =
1315 			fcnvme_lsdesc_len(
1316 				sizeof(struct fcnvme_lsdesc_assoc_id));
1317 	conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1318 	conn_rqst->connect_cmd.desc_tag =
1319 			cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1320 	conn_rqst->connect_cmd.desc_len =
1321 			fcnvme_lsdesc_len(
1322 				sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1323 	conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1324 	conn_rqst->connect_cmd.qid  = cpu_to_be16(queue->qnum);
1325 	conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1326 
1327 	lsop->queue = queue;
1328 	lsreq->rqstaddr = conn_rqst;
1329 	lsreq->rqstlen = sizeof(*conn_rqst);
1330 	lsreq->rspaddr = conn_acc;
1331 	lsreq->rsplen = sizeof(*conn_acc);
1332 	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1333 
1334 	ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1335 	if (ret)
1336 		goto out_free_buffer;
1337 
1338 	/* process connect LS completion */
1339 
1340 	/* validate the ACC response */
1341 	if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1342 		fcret = VERR_LSACC;
1343 	else if (conn_acc->hdr.desc_list_len !=
1344 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1345 		fcret = VERR_CR_CONN_ACC_LEN;
1346 	else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1347 		fcret = VERR_LSDESC_RQST;
1348 	else if (conn_acc->hdr.rqst.desc_len !=
1349 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1350 		fcret = VERR_LSDESC_RQST_LEN;
1351 	else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1352 		fcret = VERR_CR_CONN;
1353 	else if (conn_acc->connectid.desc_tag !=
1354 			cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1355 		fcret = VERR_CONN_ID;
1356 	else if (conn_acc->connectid.desc_len !=
1357 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1358 		fcret = VERR_CONN_ID_LEN;
1359 
1360 	if (fcret) {
1361 		ret = -EBADF;
1362 		dev_err(ctrl->dev,
1363 			"q %d connect failed: %s\n",
1364 			queue->qnum, validation_errors[fcret]);
1365 	} else {
1366 		queue->connection_id =
1367 			be64_to_cpu(conn_acc->connectid.connection_id);
1368 		set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1369 	}
1370 
1371 out_free_buffer:
1372 	kfree(lsop);
1373 out_no_memory:
1374 	if (ret)
1375 		dev_err(ctrl->dev,
1376 			"queue %d connect command failed (%d).\n",
1377 			queue->qnum, ret);
1378 	return ret;
1379 }
1380 
1381 static void
nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req * lsreq,int status)1382 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1383 {
1384 	struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1385 
1386 	__nvme_fc_finish_ls_req(lsop);
1387 
1388 	/* fc-nvme iniator doesn't care about success or failure of cmd */
1389 
1390 	kfree(lsop);
1391 }
1392 
1393 /*
1394  * This routine sends a FC-NVME LS to disconnect (aka terminate)
1395  * the FC-NVME Association.  Terminating the association also
1396  * terminates the FC-NVME connections (per queue, both admin and io
1397  * queues) that are part of the association. E.g. things are torn
1398  * down, and the related FC-NVME Association ID and Connection IDs
1399  * become invalid.
1400  *
1401  * The behavior of the fc-nvme initiator is such that it's
1402  * understanding of the association and connections will implicitly
1403  * be torn down. The action is implicit as it may be due to a loss of
1404  * connectivity with the fc-nvme target, so you may never get a
1405  * response even if you tried.  As such, the action of this routine
1406  * is to asynchronously send the LS, ignore any results of the LS, and
1407  * continue on with terminating the association. If the fc-nvme target
1408  * is present and receives the LS, it too can tear down.
1409  */
1410 static void
nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl * ctrl)1411 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1412 {
1413 	struct fcnvme_ls_disconnect_rqst *discon_rqst;
1414 	struct fcnvme_ls_disconnect_acc *discon_acc;
1415 	struct nvmefc_ls_req_op *lsop;
1416 	struct nvmefc_ls_req *lsreq;
1417 	int ret;
1418 
1419 	lsop = kzalloc((sizeof(*lsop) +
1420 			 ctrl->lport->ops->lsrqst_priv_sz +
1421 			 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1422 			GFP_KERNEL);
1423 	if (!lsop)
1424 		/* couldn't sent it... too bad */
1425 		return;
1426 
1427 	lsreq = &lsop->ls_req;
1428 
1429 	lsreq->private = (void *)&lsop[1];
1430 	discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1431 			(lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1432 	discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1433 
1434 	discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1435 	discon_rqst->desc_list_len = cpu_to_be32(
1436 				sizeof(struct fcnvme_lsdesc_assoc_id) +
1437 				sizeof(struct fcnvme_lsdesc_disconn_cmd));
1438 
1439 	discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1440 	discon_rqst->associd.desc_len =
1441 			fcnvme_lsdesc_len(
1442 				sizeof(struct fcnvme_lsdesc_assoc_id));
1443 
1444 	discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1445 
1446 	discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1447 						FCNVME_LSDESC_DISCONN_CMD);
1448 	discon_rqst->discon_cmd.desc_len =
1449 			fcnvme_lsdesc_len(
1450 				sizeof(struct fcnvme_lsdesc_disconn_cmd));
1451 	discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1452 	discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1453 
1454 	lsreq->rqstaddr = discon_rqst;
1455 	lsreq->rqstlen = sizeof(*discon_rqst);
1456 	lsreq->rspaddr = discon_acc;
1457 	lsreq->rsplen = sizeof(*discon_acc);
1458 	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1459 
1460 	ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1461 				nvme_fc_disconnect_assoc_done);
1462 	if (ret)
1463 		kfree(lsop);
1464 
1465 	/* only meaningful part to terminating the association */
1466 	ctrl->association_id = 0;
1467 }
1468 
1469 
1470 /* *********************** NVME Ctrl Routines **************************** */
1471 
1472 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1473 
1474 static void
__nvme_fc_exit_request(struct nvme_fc_ctrl * ctrl,struct nvme_fc_fcp_op * op)1475 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1476 		struct nvme_fc_fcp_op *op)
1477 {
1478 	fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1479 				sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1480 	fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1481 				sizeof(op->cmd_iu), DMA_TO_DEVICE);
1482 
1483 	atomic_set(&op->state, FCPOP_STATE_UNINIT);
1484 }
1485 
1486 static void
nvme_fc_exit_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx)1487 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1488 		unsigned int hctx_idx)
1489 {
1490 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1491 
1492 	return __nvme_fc_exit_request(set->driver_data, op);
1493 }
1494 
1495 static int
__nvme_fc_abort_op(struct nvme_fc_ctrl * ctrl,struct nvme_fc_fcp_op * op)1496 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1497 {
1498 	unsigned long flags;
1499 	int opstate;
1500 
1501 	spin_lock_irqsave(&ctrl->lock, flags);
1502 	opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1503 	if (opstate != FCPOP_STATE_ACTIVE)
1504 		atomic_set(&op->state, opstate);
1505 	else if (ctrl->flags & FCCTRL_TERMIO)
1506 		ctrl->iocnt++;
1507 	spin_unlock_irqrestore(&ctrl->lock, flags);
1508 
1509 	if (opstate != FCPOP_STATE_ACTIVE)
1510 		return -ECANCELED;
1511 
1512 	ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1513 					&ctrl->rport->remoteport,
1514 					op->queue->lldd_handle,
1515 					&op->fcp_req);
1516 
1517 	return 0;
1518 }
1519 
1520 static void
nvme_fc_abort_aen_ops(struct nvme_fc_ctrl * ctrl)1521 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1522 {
1523 	struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1524 	int i;
1525 
1526 	for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1527 		__nvme_fc_abort_op(ctrl, aen_op);
1528 }
1529 
1530 static inline void
__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl * ctrl,struct nvme_fc_fcp_op * op,int opstate)1531 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1532 		struct nvme_fc_fcp_op *op, int opstate)
1533 {
1534 	unsigned long flags;
1535 
1536 	if (opstate == FCPOP_STATE_ABORTED) {
1537 		spin_lock_irqsave(&ctrl->lock, flags);
1538 		if (ctrl->flags & FCCTRL_TERMIO) {
1539 			if (!--ctrl->iocnt)
1540 				wake_up(&ctrl->ioabort_wait);
1541 		}
1542 		spin_unlock_irqrestore(&ctrl->lock, flags);
1543 	}
1544 }
1545 
1546 static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req * req)1547 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1548 {
1549 	struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1550 	struct request *rq = op->rq;
1551 	struct nvmefc_fcp_req *freq = &op->fcp_req;
1552 	struct nvme_fc_ctrl *ctrl = op->ctrl;
1553 	struct nvme_fc_queue *queue = op->queue;
1554 	struct nvme_completion *cqe = &op->rsp_iu.cqe;
1555 	struct nvme_command *sqe = &op->cmd_iu.sqe;
1556 	__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1557 	union nvme_result result;
1558 	bool terminate_assoc = true;
1559 	int opstate;
1560 
1561 	/*
1562 	 * WARNING:
1563 	 * The current linux implementation of a nvme controller
1564 	 * allocates a single tag set for all io queues and sizes
1565 	 * the io queues to fully hold all possible tags. Thus, the
1566 	 * implementation does not reference or care about the sqhd
1567 	 * value as it never needs to use the sqhd/sqtail pointers
1568 	 * for submission pacing.
1569 	 *
1570 	 * This affects the FC-NVME implementation in two ways:
1571 	 * 1) As the value doesn't matter, we don't need to waste
1572 	 *    cycles extracting it from ERSPs and stamping it in the
1573 	 *    cases where the transport fabricates CQEs on successful
1574 	 *    completions.
1575 	 * 2) The FC-NVME implementation requires that delivery of
1576 	 *    ERSP completions are to go back to the nvme layer in order
1577 	 *    relative to the rsn, such that the sqhd value will always
1578 	 *    be "in order" for the nvme layer. As the nvme layer in
1579 	 *    linux doesn't care about sqhd, there's no need to return
1580 	 *    them in order.
1581 	 *
1582 	 * Additionally:
1583 	 * As the core nvme layer in linux currently does not look at
1584 	 * every field in the cqe - in cases where the FC transport must
1585 	 * fabricate a CQE, the following fields will not be set as they
1586 	 * are not referenced:
1587 	 *      cqe.sqid,  cqe.sqhd,  cqe.command_id
1588 	 *
1589 	 * Failure or error of an individual i/o, in a transport
1590 	 * detected fashion unrelated to the nvme completion status,
1591 	 * potentially cause the initiator and target sides to get out
1592 	 * of sync on SQ head/tail (aka outstanding io count allowed).
1593 	 * Per FC-NVME spec, failure of an individual command requires
1594 	 * the connection to be terminated, which in turn requires the
1595 	 * association to be terminated.
1596 	 */
1597 
1598 	opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1599 
1600 	fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1601 				sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1602 
1603 	if (opstate == FCPOP_STATE_ABORTED)
1604 		status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1605 	else if (freq->status)
1606 		status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1607 
1608 	/*
1609 	 * For the linux implementation, if we have an unsuccesful
1610 	 * status, they blk-mq layer can typically be called with the
1611 	 * non-zero status and the content of the cqe isn't important.
1612 	 */
1613 	if (status)
1614 		goto done;
1615 
1616 	/*
1617 	 * command completed successfully relative to the wire
1618 	 * protocol. However, validate anything received and
1619 	 * extract the status and result from the cqe (create it
1620 	 * where necessary).
1621 	 */
1622 
1623 	switch (freq->rcv_rsplen) {
1624 
1625 	case 0:
1626 	case NVME_FC_SIZEOF_ZEROS_RSP:
1627 		/*
1628 		 * No response payload or 12 bytes of payload (which
1629 		 * should all be zeros) are considered successful and
1630 		 * no payload in the CQE by the transport.
1631 		 */
1632 		if (freq->transferred_length !=
1633 			be32_to_cpu(op->cmd_iu.data_len)) {
1634 			status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1635 			goto done;
1636 		}
1637 		result.u64 = 0;
1638 		break;
1639 
1640 	case sizeof(struct nvme_fc_ersp_iu):
1641 		/*
1642 		 * The ERSP IU contains a full completion with CQE.
1643 		 * Validate ERSP IU and look at cqe.
1644 		 */
1645 		if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1646 					(freq->rcv_rsplen / 4) ||
1647 			     be32_to_cpu(op->rsp_iu.xfrd_len) !=
1648 					freq->transferred_length ||
1649 			     op->rsp_iu.status_code ||
1650 			     sqe->common.command_id != cqe->command_id)) {
1651 			status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1652 			goto done;
1653 		}
1654 		result = cqe->result;
1655 		status = cqe->status;
1656 		break;
1657 
1658 	default:
1659 		status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1660 		goto done;
1661 	}
1662 
1663 	terminate_assoc = false;
1664 
1665 done:
1666 	if (op->flags & FCOP_FLAGS_AEN) {
1667 		nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1668 		__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1669 		atomic_set(&op->state, FCPOP_STATE_IDLE);
1670 		op->flags = FCOP_FLAGS_AEN;	/* clear other flags */
1671 		nvme_fc_ctrl_put(ctrl);
1672 		goto check_error;
1673 	}
1674 
1675 	__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1676 	nvme_end_request(rq, status, result);
1677 
1678 check_error:
1679 	if (terminate_assoc)
1680 		nvme_fc_error_recovery(ctrl, "transport detected io error");
1681 }
1682 
1683 static int
__nvme_fc_init_request(struct nvme_fc_ctrl * ctrl,struct nvme_fc_queue * queue,struct nvme_fc_fcp_op * op,struct request * rq,u32 rqno)1684 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1685 		struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1686 		struct request *rq, u32 rqno)
1687 {
1688 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1689 	int ret = 0;
1690 
1691 	memset(op, 0, sizeof(*op));
1692 	op->fcp_req.cmdaddr = &op->cmd_iu;
1693 	op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1694 	op->fcp_req.rspaddr = &op->rsp_iu;
1695 	op->fcp_req.rsplen = sizeof(op->rsp_iu);
1696 	op->fcp_req.done = nvme_fc_fcpio_done;
1697 	op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1698 	op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1699 	op->ctrl = ctrl;
1700 	op->queue = queue;
1701 	op->rq = rq;
1702 	op->rqno = rqno;
1703 
1704 	cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1705 	cmdiu->fc_id = NVME_CMD_FC_ID;
1706 	cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1707 
1708 	op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1709 				&op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1710 	if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1711 		dev_err(ctrl->dev,
1712 			"FCP Op failed - cmdiu dma mapping failed.\n");
1713 		ret = EFAULT;
1714 		goto out_on_error;
1715 	}
1716 
1717 	op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1718 				&op->rsp_iu, sizeof(op->rsp_iu),
1719 				DMA_FROM_DEVICE);
1720 	if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1721 		dev_err(ctrl->dev,
1722 			"FCP Op failed - rspiu dma mapping failed.\n");
1723 		ret = EFAULT;
1724 	}
1725 
1726 	atomic_set(&op->state, FCPOP_STATE_IDLE);
1727 out_on_error:
1728 	return ret;
1729 }
1730 
1731 static int
nvme_fc_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,unsigned int numa_node)1732 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1733 		unsigned int hctx_idx, unsigned int numa_node)
1734 {
1735 	struct nvme_fc_ctrl *ctrl = set->driver_data;
1736 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1737 	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
1738 	struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
1739 
1740 	nvme_req(rq)->ctrl = &ctrl->ctrl;
1741 	return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1742 }
1743 
1744 static int
nvme_fc_init_aen_ops(struct nvme_fc_ctrl * ctrl)1745 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1746 {
1747 	struct nvme_fc_fcp_op *aen_op;
1748 	struct nvme_fc_cmd_iu *cmdiu;
1749 	struct nvme_command *sqe;
1750 	void *private;
1751 	int i, ret;
1752 
1753 	aen_op = ctrl->aen_ops;
1754 	for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1755 		private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1756 						GFP_KERNEL);
1757 		if (!private)
1758 			return -ENOMEM;
1759 
1760 		cmdiu = &aen_op->cmd_iu;
1761 		sqe = &cmdiu->sqe;
1762 		ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1763 				aen_op, (struct request *)NULL,
1764 				(NVME_AQ_BLK_MQ_DEPTH + i));
1765 		if (ret) {
1766 			kfree(private);
1767 			return ret;
1768 		}
1769 
1770 		aen_op->flags = FCOP_FLAGS_AEN;
1771 		aen_op->fcp_req.first_sgl = NULL; /* no sg list */
1772 		aen_op->fcp_req.private = private;
1773 
1774 		memset(sqe, 0, sizeof(*sqe));
1775 		sqe->common.opcode = nvme_admin_async_event;
1776 		/* Note: core layer may overwrite the sqe.command_id value */
1777 		sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
1778 	}
1779 	return 0;
1780 }
1781 
1782 static void
nvme_fc_term_aen_ops(struct nvme_fc_ctrl * ctrl)1783 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1784 {
1785 	struct nvme_fc_fcp_op *aen_op;
1786 	int i;
1787 
1788 	aen_op = ctrl->aen_ops;
1789 	for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1790 		if (!aen_op->fcp_req.private)
1791 			continue;
1792 
1793 		__nvme_fc_exit_request(ctrl, aen_op);
1794 
1795 		kfree(aen_op->fcp_req.private);
1796 		aen_op->fcp_req.private = NULL;
1797 	}
1798 }
1799 
1800 static inline void
__nvme_fc_init_hctx(struct blk_mq_hw_ctx * hctx,struct nvme_fc_ctrl * ctrl,unsigned int qidx)1801 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1802 		unsigned int qidx)
1803 {
1804 	struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1805 
1806 	hctx->driver_data = queue;
1807 	queue->hctx = hctx;
1808 }
1809 
1810 static int
nvme_fc_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)1811 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1812 		unsigned int hctx_idx)
1813 {
1814 	struct nvme_fc_ctrl *ctrl = data;
1815 
1816 	__nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1817 
1818 	return 0;
1819 }
1820 
1821 static int
nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)1822 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1823 		unsigned int hctx_idx)
1824 {
1825 	struct nvme_fc_ctrl *ctrl = data;
1826 
1827 	__nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1828 
1829 	return 0;
1830 }
1831 
1832 static void
nvme_fc_init_queue(struct nvme_fc_ctrl * ctrl,int idx)1833 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
1834 {
1835 	struct nvme_fc_queue *queue;
1836 
1837 	queue = &ctrl->queues[idx];
1838 	memset(queue, 0, sizeof(*queue));
1839 	queue->ctrl = ctrl;
1840 	queue->qnum = idx;
1841 	atomic_set(&queue->csn, 1);
1842 	queue->dev = ctrl->dev;
1843 
1844 	if (idx > 0)
1845 		queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1846 	else
1847 		queue->cmnd_capsule_len = sizeof(struct nvme_command);
1848 
1849 	/*
1850 	 * Considered whether we should allocate buffers for all SQEs
1851 	 * and CQEs and dma map them - mapping their respective entries
1852 	 * into the request structures (kernel vm addr and dma address)
1853 	 * thus the driver could use the buffers/mappings directly.
1854 	 * It only makes sense if the LLDD would use them for its
1855 	 * messaging api. It's very unlikely most adapter api's would use
1856 	 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1857 	 * structures were used instead.
1858 	 */
1859 }
1860 
1861 /*
1862  * This routine terminates a queue at the transport level.
1863  * The transport has already ensured that all outstanding ios on
1864  * the queue have been terminated.
1865  * The transport will send a Disconnect LS request to terminate
1866  * the queue's connection. Termination of the admin queue will also
1867  * terminate the association at the target.
1868  */
1869 static void
nvme_fc_free_queue(struct nvme_fc_queue * queue)1870 nvme_fc_free_queue(struct nvme_fc_queue *queue)
1871 {
1872 	if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1873 		return;
1874 
1875 	clear_bit(NVME_FC_Q_LIVE, &queue->flags);
1876 	/*
1877 	 * Current implementation never disconnects a single queue.
1878 	 * It always terminates a whole association. So there is never
1879 	 * a disconnect(queue) LS sent to the target.
1880 	 */
1881 
1882 	queue->connection_id = 0;
1883 	atomic_set(&queue->csn, 1);
1884 }
1885 
1886 static void
__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl * ctrl,struct nvme_fc_queue * queue,unsigned int qidx)1887 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1888 	struct nvme_fc_queue *queue, unsigned int qidx)
1889 {
1890 	if (ctrl->lport->ops->delete_queue)
1891 		ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1892 				queue->lldd_handle);
1893 	queue->lldd_handle = NULL;
1894 }
1895 
1896 static void
nvme_fc_free_io_queues(struct nvme_fc_ctrl * ctrl)1897 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1898 {
1899 	int i;
1900 
1901 	for (i = 1; i < ctrl->ctrl.queue_count; i++)
1902 		nvme_fc_free_queue(&ctrl->queues[i]);
1903 }
1904 
1905 static int
__nvme_fc_create_hw_queue(struct nvme_fc_ctrl * ctrl,struct nvme_fc_queue * queue,unsigned int qidx,u16 qsize)1906 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1907 	struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1908 {
1909 	int ret = 0;
1910 
1911 	queue->lldd_handle = NULL;
1912 	if (ctrl->lport->ops->create_queue)
1913 		ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1914 				qidx, qsize, &queue->lldd_handle);
1915 
1916 	return ret;
1917 }
1918 
1919 static void
nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl * ctrl)1920 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1921 {
1922 	struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
1923 	int i;
1924 
1925 	for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
1926 		__nvme_fc_delete_hw_queue(ctrl, queue, i);
1927 }
1928 
1929 static int
nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl * ctrl,u16 qsize)1930 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1931 {
1932 	struct nvme_fc_queue *queue = &ctrl->queues[1];
1933 	int i, ret;
1934 
1935 	for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
1936 		ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1937 		if (ret)
1938 			goto delete_queues;
1939 	}
1940 
1941 	return 0;
1942 
1943 delete_queues:
1944 	for (; i >= 0; i--)
1945 		__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1946 	return ret;
1947 }
1948 
1949 static int
nvme_fc_connect_io_queues(struct nvme_fc_ctrl * ctrl,u16 qsize)1950 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1951 {
1952 	int i, ret = 0;
1953 
1954 	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
1955 		ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1956 					(qsize / 5));
1957 		if (ret)
1958 			break;
1959 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1960 		if (ret)
1961 			break;
1962 
1963 		set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
1964 	}
1965 
1966 	return ret;
1967 }
1968 
1969 static void
nvme_fc_init_io_queues(struct nvme_fc_ctrl * ctrl)1970 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1971 {
1972 	int i;
1973 
1974 	for (i = 1; i < ctrl->ctrl.queue_count; i++)
1975 		nvme_fc_init_queue(ctrl, i);
1976 }
1977 
1978 static void
nvme_fc_ctrl_free(struct kref * ref)1979 nvme_fc_ctrl_free(struct kref *ref)
1980 {
1981 	struct nvme_fc_ctrl *ctrl =
1982 		container_of(ref, struct nvme_fc_ctrl, ref);
1983 	unsigned long flags;
1984 
1985 	if (ctrl->ctrl.tagset) {
1986 		blk_cleanup_queue(ctrl->ctrl.connect_q);
1987 		blk_mq_free_tag_set(&ctrl->tag_set);
1988 	}
1989 
1990 	/* remove from rport list */
1991 	spin_lock_irqsave(&ctrl->rport->lock, flags);
1992 	list_del(&ctrl->ctrl_list);
1993 	spin_unlock_irqrestore(&ctrl->rport->lock, flags);
1994 
1995 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
1996 	blk_cleanup_queue(ctrl->ctrl.admin_q);
1997 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
1998 
1999 	kfree(ctrl->queues);
2000 
2001 	put_device(ctrl->dev);
2002 	nvme_fc_rport_put(ctrl->rport);
2003 
2004 	ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2005 	if (ctrl->ctrl.opts)
2006 		nvmf_free_options(ctrl->ctrl.opts);
2007 	kfree(ctrl);
2008 }
2009 
2010 static void
nvme_fc_ctrl_put(struct nvme_fc_ctrl * ctrl)2011 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2012 {
2013 	kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2014 }
2015 
2016 static int
nvme_fc_ctrl_get(struct nvme_fc_ctrl * ctrl)2017 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2018 {
2019 	return kref_get_unless_zero(&ctrl->ref);
2020 }
2021 
2022 /*
2023  * All accesses from nvme core layer done - can now free the
2024  * controller. Called after last nvme_put_ctrl() call
2025  */
2026 static void
nvme_fc_nvme_ctrl_freed(struct nvme_ctrl * nctrl)2027 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2028 {
2029 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2030 
2031 	WARN_ON(nctrl != &ctrl->ctrl);
2032 
2033 	nvme_fc_ctrl_put(ctrl);
2034 }
2035 
2036 static void
nvme_fc_error_recovery(struct nvme_fc_ctrl * ctrl,char * errmsg)2037 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2038 {
2039 	/* only proceed if in LIVE state - e.g. on first error */
2040 	if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2041 		return;
2042 
2043 	dev_warn(ctrl->ctrl.device,
2044 		"NVME-FC{%d}: transport association error detected: %s\n",
2045 		ctrl->cnum, errmsg);
2046 	dev_warn(ctrl->ctrl.device,
2047 		"NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2048 
2049 	nvme_reset_ctrl(&ctrl->ctrl);
2050 }
2051 
2052 static enum blk_eh_timer_return
nvme_fc_timeout(struct request * rq,bool reserved)2053 nvme_fc_timeout(struct request *rq, bool reserved)
2054 {
2055 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2056 	struct nvme_fc_ctrl *ctrl = op->ctrl;
2057 
2058 	/*
2059 	 * we can't individually ABTS an io without affecting the queue,
2060 	 * thus killing the queue, and thus the association.
2061 	 * So resolve by performing a controller reset, which will stop
2062 	 * the host/io stack, terminate the association on the link,
2063 	 * and recreate an association on the link.
2064 	 */
2065 	nvme_fc_error_recovery(ctrl, "io timeout error");
2066 
2067 	/*
2068 	 * the io abort has been initiated. Have the reset timer
2069 	 * restarted and the abort completion will complete the io
2070 	 * shortly. Avoids a synchronous wait while the abort finishes.
2071 	 */
2072 	return BLK_EH_RESET_TIMER;
2073 }
2074 
2075 static int
nvme_fc_map_data(struct nvme_fc_ctrl * ctrl,struct request * rq,struct nvme_fc_fcp_op * op)2076 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2077 		struct nvme_fc_fcp_op *op)
2078 {
2079 	struct nvmefc_fcp_req *freq = &op->fcp_req;
2080 	enum dma_data_direction dir;
2081 	int ret;
2082 
2083 	freq->sg_cnt = 0;
2084 
2085 	if (!blk_rq_payload_bytes(rq))
2086 		return 0;
2087 
2088 	freq->sg_table.sgl = freq->first_sgl;
2089 	ret = sg_alloc_table_chained(&freq->sg_table,
2090 			blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
2091 	if (ret)
2092 		return -ENOMEM;
2093 
2094 	op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2095 	WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2096 	dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
2097 	freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2098 				op->nents, dir);
2099 	if (unlikely(freq->sg_cnt <= 0)) {
2100 		sg_free_table_chained(&freq->sg_table, true);
2101 		freq->sg_cnt = 0;
2102 		return -EFAULT;
2103 	}
2104 
2105 	/*
2106 	 * TODO: blk_integrity_rq(rq)  for DIF
2107 	 */
2108 	return 0;
2109 }
2110 
2111 static void
nvme_fc_unmap_data(struct nvme_fc_ctrl * ctrl,struct request * rq,struct nvme_fc_fcp_op * op)2112 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2113 		struct nvme_fc_fcp_op *op)
2114 {
2115 	struct nvmefc_fcp_req *freq = &op->fcp_req;
2116 
2117 	if (!freq->sg_cnt)
2118 		return;
2119 
2120 	fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2121 				((rq_data_dir(rq) == WRITE) ?
2122 					DMA_TO_DEVICE : DMA_FROM_DEVICE));
2123 
2124 	nvme_cleanup_cmd(rq);
2125 
2126 	sg_free_table_chained(&freq->sg_table, true);
2127 
2128 	freq->sg_cnt = 0;
2129 }
2130 
2131 /*
2132  * In FC, the queue is a logical thing. At transport connect, the target
2133  * creates its "queue" and returns a handle that is to be given to the
2134  * target whenever it posts something to the corresponding SQ.  When an
2135  * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2136  * command contained within the SQE, an io, and assigns a FC exchange
2137  * to it. The SQE and the associated SQ handle are sent in the initial
2138  * CMD IU sents on the exchange. All transfers relative to the io occur
2139  * as part of the exchange.  The CQE is the last thing for the io,
2140  * which is transferred (explicitly or implicitly) with the RSP IU
2141  * sent on the exchange. After the CQE is received, the FC exchange is
2142  * terminaed and the Exchange may be used on a different io.
2143  *
2144  * The transport to LLDD api has the transport making a request for a
2145  * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2146  * resource and transfers the command. The LLDD will then process all
2147  * steps to complete the io. Upon completion, the transport done routine
2148  * is called.
2149  *
2150  * So - while the operation is outstanding to the LLDD, there is a link
2151  * level FC exchange resource that is also outstanding. This must be
2152  * considered in all cleanup operations.
2153  */
2154 static blk_status_t
nvme_fc_start_fcp_op(struct nvme_fc_ctrl * ctrl,struct nvme_fc_queue * queue,struct nvme_fc_fcp_op * op,u32 data_len,enum nvmefc_fcp_datadir io_dir)2155 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2156 	struct nvme_fc_fcp_op *op, u32 data_len,
2157 	enum nvmefc_fcp_datadir	io_dir)
2158 {
2159 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2160 	struct nvme_command *sqe = &cmdiu->sqe;
2161 	u32 csn;
2162 	int ret, opstate;
2163 
2164 	/*
2165 	 * before attempting to send the io, check to see if we believe
2166 	 * the target device is present
2167 	 */
2168 	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2169 		return BLK_STS_RESOURCE;
2170 
2171 	if (!nvme_fc_ctrl_get(ctrl))
2172 		return BLK_STS_IOERR;
2173 
2174 	/* format the FC-NVME CMD IU and fcp_req */
2175 	cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2176 	csn = atomic_inc_return(&queue->csn);
2177 	cmdiu->csn = cpu_to_be32(csn);
2178 	cmdiu->data_len = cpu_to_be32(data_len);
2179 	switch (io_dir) {
2180 	case NVMEFC_FCP_WRITE:
2181 		cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2182 		break;
2183 	case NVMEFC_FCP_READ:
2184 		cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2185 		break;
2186 	case NVMEFC_FCP_NODATA:
2187 		cmdiu->flags = 0;
2188 		break;
2189 	}
2190 	op->fcp_req.payload_length = data_len;
2191 	op->fcp_req.io_dir = io_dir;
2192 	op->fcp_req.transferred_length = 0;
2193 	op->fcp_req.rcv_rsplen = 0;
2194 	op->fcp_req.status = NVME_SC_SUCCESS;
2195 	op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2196 
2197 	/*
2198 	 * validate per fabric rules, set fields mandated by fabric spec
2199 	 * as well as those by FC-NVME spec.
2200 	 */
2201 	WARN_ON_ONCE(sqe->common.metadata);
2202 	sqe->common.flags |= NVME_CMD_SGL_METABUF;
2203 
2204 	/*
2205 	 * format SQE DPTR field per FC-NVME rules:
2206 	 *    type=0x5     Transport SGL Data Block Descriptor
2207 	 *    subtype=0xA  Transport-specific value
2208 	 *    address=0
2209 	 *    length=length of the data series
2210 	 */
2211 	sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2212 					NVME_SGL_FMT_TRANSPORT_A;
2213 	sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2214 	sqe->rw.dptr.sgl.addr = 0;
2215 
2216 	if (!(op->flags & FCOP_FLAGS_AEN)) {
2217 		ret = nvme_fc_map_data(ctrl, op->rq, op);
2218 		if (ret < 0) {
2219 			nvme_cleanup_cmd(op->rq);
2220 			nvme_fc_ctrl_put(ctrl);
2221 			if (ret == -ENOMEM || ret == -EAGAIN)
2222 				return BLK_STS_RESOURCE;
2223 			return BLK_STS_IOERR;
2224 		}
2225 	}
2226 
2227 	fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2228 				  sizeof(op->cmd_iu), DMA_TO_DEVICE);
2229 
2230 	atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2231 
2232 	if (!(op->flags & FCOP_FLAGS_AEN))
2233 		blk_mq_start_request(op->rq);
2234 
2235 	ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2236 					&ctrl->rport->remoteport,
2237 					queue->lldd_handle, &op->fcp_req);
2238 
2239 	if (ret) {
2240 		opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2241 		__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2242 
2243 		if (!(op->flags & FCOP_FLAGS_AEN))
2244 			nvme_fc_unmap_data(ctrl, op->rq, op);
2245 
2246 		nvme_fc_ctrl_put(ctrl);
2247 
2248 		if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2249 				ret != -EBUSY)
2250 			return BLK_STS_IOERR;
2251 
2252 		return BLK_STS_RESOURCE;
2253 	}
2254 
2255 	return BLK_STS_OK;
2256 }
2257 
2258 static blk_status_t
nvme_fc_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)2259 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2260 			const struct blk_mq_queue_data *bd)
2261 {
2262 	struct nvme_ns *ns = hctx->queue->queuedata;
2263 	struct nvme_fc_queue *queue = hctx->driver_data;
2264 	struct nvme_fc_ctrl *ctrl = queue->ctrl;
2265 	struct request *rq = bd->rq;
2266 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2267 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2268 	struct nvme_command *sqe = &cmdiu->sqe;
2269 	enum nvmefc_fcp_datadir	io_dir;
2270 	bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2271 	u32 data_len;
2272 	blk_status_t ret;
2273 
2274 	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2275 	    !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2276 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2277 
2278 	ret = nvme_setup_cmd(ns, rq, sqe);
2279 	if (ret)
2280 		return ret;
2281 
2282 	data_len = blk_rq_payload_bytes(rq);
2283 	if (data_len)
2284 		io_dir = ((rq_data_dir(rq) == WRITE) ?
2285 					NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2286 	else
2287 		io_dir = NVMEFC_FCP_NODATA;
2288 
2289 	return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2290 }
2291 
2292 static struct blk_mq_tags *
nvme_fc_tagset(struct nvme_fc_queue * queue)2293 nvme_fc_tagset(struct nvme_fc_queue *queue)
2294 {
2295 	if (queue->qnum == 0)
2296 		return queue->ctrl->admin_tag_set.tags[queue->qnum];
2297 
2298 	return queue->ctrl->tag_set.tags[queue->qnum - 1];
2299 }
2300 
2301 static int
nvme_fc_poll(struct blk_mq_hw_ctx * hctx,unsigned int tag)2302 nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
2303 
2304 {
2305 	struct nvme_fc_queue *queue = hctx->driver_data;
2306 	struct nvme_fc_ctrl *ctrl = queue->ctrl;
2307 	struct request *req;
2308 	struct nvme_fc_fcp_op *op;
2309 
2310 	req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
2311 	if (!req)
2312 		return 0;
2313 
2314 	op = blk_mq_rq_to_pdu(req);
2315 
2316 	if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
2317 		 (ctrl->lport->ops->poll_queue))
2318 		ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
2319 						 queue->lldd_handle);
2320 
2321 	return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
2322 }
2323 
2324 static void
nvme_fc_submit_async_event(struct nvme_ctrl * arg)2325 nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2326 {
2327 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2328 	struct nvme_fc_fcp_op *aen_op;
2329 	unsigned long flags;
2330 	bool terminating = false;
2331 	blk_status_t ret;
2332 
2333 	spin_lock_irqsave(&ctrl->lock, flags);
2334 	if (ctrl->flags & FCCTRL_TERMIO)
2335 		terminating = true;
2336 	spin_unlock_irqrestore(&ctrl->lock, flags);
2337 
2338 	if (terminating)
2339 		return;
2340 
2341 	aen_op = &ctrl->aen_ops[0];
2342 
2343 	ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2344 					NVMEFC_FCP_NODATA);
2345 	if (ret)
2346 		dev_err(ctrl->ctrl.device,
2347 			"failed async event work\n");
2348 }
2349 
2350 static void
nvme_fc_complete_rq(struct request * rq)2351 nvme_fc_complete_rq(struct request *rq)
2352 {
2353 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2354 	struct nvme_fc_ctrl *ctrl = op->ctrl;
2355 
2356 	atomic_set(&op->state, FCPOP_STATE_IDLE);
2357 
2358 	nvme_fc_unmap_data(ctrl, rq, op);
2359 	nvme_complete_rq(rq);
2360 	nvme_fc_ctrl_put(ctrl);
2361 }
2362 
2363 /*
2364  * This routine is used by the transport when it needs to find active
2365  * io on a queue that is to be terminated. The transport uses
2366  * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2367  * this routine to kill them on a 1 by 1 basis.
2368  *
2369  * As FC allocates FC exchange for each io, the transport must contact
2370  * the LLDD to terminate the exchange, thus releasing the FC exchange.
2371  * After terminating the exchange the LLDD will call the transport's
2372  * normal io done path for the request, but it will have an aborted
2373  * status. The done path will return the io request back to the block
2374  * layer with an error status.
2375  */
2376 static void
nvme_fc_terminate_exchange(struct request * req,void * data,bool reserved)2377 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2378 {
2379 	struct nvme_ctrl *nctrl = data;
2380 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2381 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2382 
2383 	__nvme_fc_abort_op(ctrl, op);
2384 }
2385 
2386 
2387 static const struct blk_mq_ops nvme_fc_mq_ops = {
2388 	.queue_rq	= nvme_fc_queue_rq,
2389 	.complete	= nvme_fc_complete_rq,
2390 	.init_request	= nvme_fc_init_request,
2391 	.exit_request	= nvme_fc_exit_request,
2392 	.init_hctx	= nvme_fc_init_hctx,
2393 	.poll		= nvme_fc_poll,
2394 	.timeout	= nvme_fc_timeout,
2395 };
2396 
2397 static int
nvme_fc_create_io_queues(struct nvme_fc_ctrl * ctrl)2398 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2399 {
2400 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2401 	unsigned int nr_io_queues;
2402 	int ret;
2403 
2404 	nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2405 				ctrl->lport->ops->max_hw_queues);
2406 	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2407 	if (ret) {
2408 		dev_info(ctrl->ctrl.device,
2409 			"set_queue_count failed: %d\n", ret);
2410 		return ret;
2411 	}
2412 
2413 	ctrl->ctrl.queue_count = nr_io_queues + 1;
2414 	if (!nr_io_queues)
2415 		return 0;
2416 
2417 	nvme_fc_init_io_queues(ctrl);
2418 
2419 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2420 	ctrl->tag_set.ops = &nvme_fc_mq_ops;
2421 	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2422 	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2423 	ctrl->tag_set.numa_node = NUMA_NO_NODE;
2424 	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2425 	ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2426 					(SG_CHUNK_SIZE *
2427 						sizeof(struct scatterlist)) +
2428 					ctrl->lport->ops->fcprqst_priv_sz;
2429 	ctrl->tag_set.driver_data = ctrl;
2430 	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2431 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2432 
2433 	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2434 	if (ret)
2435 		return ret;
2436 
2437 	ctrl->ctrl.tagset = &ctrl->tag_set;
2438 
2439 	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2440 	if (IS_ERR(ctrl->ctrl.connect_q)) {
2441 		ret = PTR_ERR(ctrl->ctrl.connect_q);
2442 		goto out_free_tag_set;
2443 	}
2444 
2445 	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2446 	if (ret)
2447 		goto out_cleanup_blk_queue;
2448 
2449 	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2450 	if (ret)
2451 		goto out_delete_hw_queues;
2452 
2453 	ctrl->ioq_live = true;
2454 
2455 	return 0;
2456 
2457 out_delete_hw_queues:
2458 	nvme_fc_delete_hw_io_queues(ctrl);
2459 out_cleanup_blk_queue:
2460 	blk_cleanup_queue(ctrl->ctrl.connect_q);
2461 out_free_tag_set:
2462 	blk_mq_free_tag_set(&ctrl->tag_set);
2463 	nvme_fc_free_io_queues(ctrl);
2464 
2465 	/* force put free routine to ignore io queues */
2466 	ctrl->ctrl.tagset = NULL;
2467 
2468 	return ret;
2469 }
2470 
2471 static int
nvme_fc_recreate_io_queues(struct nvme_fc_ctrl * ctrl)2472 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2473 {
2474 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2475 	unsigned int nr_io_queues;
2476 	int ret;
2477 
2478 	nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2479 				ctrl->lport->ops->max_hw_queues);
2480 	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2481 	if (ret) {
2482 		dev_info(ctrl->ctrl.device,
2483 			"set_queue_count failed: %d\n", ret);
2484 		return ret;
2485 	}
2486 
2487 	ctrl->ctrl.queue_count = nr_io_queues + 1;
2488 	/* check for io queues existing */
2489 	if (ctrl->ctrl.queue_count == 1)
2490 		return 0;
2491 
2492 	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2493 	if (ret)
2494 		goto out_free_io_queues;
2495 
2496 	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2497 	if (ret)
2498 		goto out_delete_hw_queues;
2499 
2500 	blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2501 
2502 	return 0;
2503 
2504 out_delete_hw_queues:
2505 	nvme_fc_delete_hw_io_queues(ctrl);
2506 out_free_io_queues:
2507 	nvme_fc_free_io_queues(ctrl);
2508 	return ret;
2509 }
2510 
2511 static void
nvme_fc_rport_active_on_lport(struct nvme_fc_rport * rport)2512 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2513 {
2514 	struct nvme_fc_lport *lport = rport->lport;
2515 
2516 	atomic_inc(&lport->act_rport_cnt);
2517 }
2518 
2519 static void
nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport * rport)2520 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2521 {
2522 	struct nvme_fc_lport *lport = rport->lport;
2523 	u32 cnt;
2524 
2525 	cnt = atomic_dec_return(&lport->act_rport_cnt);
2526 	if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2527 		lport->ops->localport_delete(&lport->localport);
2528 }
2529 
2530 static int
nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl * ctrl)2531 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
2532 {
2533 	struct nvme_fc_rport *rport = ctrl->rport;
2534 	u32 cnt;
2535 
2536 	if (ctrl->assoc_active)
2537 		return 1;
2538 
2539 	ctrl->assoc_active = true;
2540 	cnt = atomic_inc_return(&rport->act_ctrl_cnt);
2541 	if (cnt == 1)
2542 		nvme_fc_rport_active_on_lport(rport);
2543 
2544 	return 0;
2545 }
2546 
2547 static int
nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl * ctrl)2548 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
2549 {
2550 	struct nvme_fc_rport *rport = ctrl->rport;
2551 	struct nvme_fc_lport *lport = rport->lport;
2552 	u32 cnt;
2553 
2554 	/* ctrl->assoc_active=false will be set independently */
2555 
2556 	cnt = atomic_dec_return(&rport->act_ctrl_cnt);
2557 	if (cnt == 0) {
2558 		if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
2559 			lport->ops->remoteport_delete(&rport->remoteport);
2560 		nvme_fc_rport_inactive_on_lport(rport);
2561 	}
2562 
2563 	return 0;
2564 }
2565 
2566 /*
2567  * This routine restarts the controller on the host side, and
2568  * on the link side, recreates the controller association.
2569  */
2570 static int
nvme_fc_create_association(struct nvme_fc_ctrl * ctrl)2571 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2572 {
2573 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2574 	int ret;
2575 	bool changed;
2576 
2577 	++ctrl->ctrl.nr_reconnects;
2578 
2579 	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2580 		return -ENODEV;
2581 
2582 	if (nvme_fc_ctlr_active_on_rport(ctrl))
2583 		return -ENOTUNIQ;
2584 
2585 	/*
2586 	 * Create the admin queue
2587 	 */
2588 
2589 	ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2590 				NVME_AQ_DEPTH);
2591 	if (ret)
2592 		goto out_free_queue;
2593 
2594 	ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2595 				NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
2596 	if (ret)
2597 		goto out_delete_hw_queue;
2598 
2599 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2600 
2601 	ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2602 	if (ret)
2603 		goto out_disconnect_admin_queue;
2604 
2605 	set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2606 
2607 	/*
2608 	 * Check controller capabilities
2609 	 *
2610 	 * todo:- add code to check if ctrl attributes changed from
2611 	 * prior connection values
2612 	 */
2613 
2614 	ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
2615 	if (ret) {
2616 		dev_err(ctrl->ctrl.device,
2617 			"prop_get NVME_REG_CAP failed\n");
2618 		goto out_disconnect_admin_queue;
2619 	}
2620 
2621 	ctrl->ctrl.sqsize =
2622 		min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
2623 
2624 	ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
2625 	if (ret)
2626 		goto out_disconnect_admin_queue;
2627 
2628 	ctrl->ctrl.max_hw_sectors =
2629 		(ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
2630 
2631 	ret = nvme_init_identify(&ctrl->ctrl);
2632 	if (ret)
2633 		goto out_disconnect_admin_queue;
2634 
2635 	/* sanity checks */
2636 
2637 	/* FC-NVME does not have other data in the capsule */
2638 	if (ctrl->ctrl.icdoff) {
2639 		dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2640 				ctrl->ctrl.icdoff);
2641 		goto out_disconnect_admin_queue;
2642 	}
2643 
2644 	/* FC-NVME supports normal SGL Data Block Descriptors */
2645 
2646 	if (opts->queue_size > ctrl->ctrl.maxcmd) {
2647 		/* warn if maxcmd is lower than queue_size */
2648 		dev_warn(ctrl->ctrl.device,
2649 			"queue_size %zu > ctrl maxcmd %u, reducing "
2650 			"to queue_size\n",
2651 			opts->queue_size, ctrl->ctrl.maxcmd);
2652 		opts->queue_size = ctrl->ctrl.maxcmd;
2653 	}
2654 
2655 	if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
2656 		/* warn if sqsize is lower than queue_size */
2657 		dev_warn(ctrl->ctrl.device,
2658 			"queue_size %zu > ctrl sqsize %u, clamping down\n",
2659 			opts->queue_size, ctrl->ctrl.sqsize + 1);
2660 		opts->queue_size = ctrl->ctrl.sqsize + 1;
2661 	}
2662 
2663 	ret = nvme_fc_init_aen_ops(ctrl);
2664 	if (ret)
2665 		goto out_term_aen_ops;
2666 
2667 	/*
2668 	 * Create the io queues
2669 	 */
2670 
2671 	if (ctrl->ctrl.queue_count > 1) {
2672 		if (!ctrl->ioq_live)
2673 			ret = nvme_fc_create_io_queues(ctrl);
2674 		else
2675 			ret = nvme_fc_recreate_io_queues(ctrl);
2676 		if (ret)
2677 			goto out_term_aen_ops;
2678 	}
2679 
2680 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2681 
2682 	ctrl->ctrl.nr_reconnects = 0;
2683 
2684 	if (changed)
2685 		nvme_start_ctrl(&ctrl->ctrl);
2686 
2687 	return 0;	/* Success */
2688 
2689 out_term_aen_ops:
2690 	nvme_fc_term_aen_ops(ctrl);
2691 out_disconnect_admin_queue:
2692 	/* send a Disconnect(association) LS to fc-nvme target */
2693 	nvme_fc_xmt_disconnect_assoc(ctrl);
2694 out_delete_hw_queue:
2695 	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2696 out_free_queue:
2697 	nvme_fc_free_queue(&ctrl->queues[0]);
2698 	ctrl->assoc_active = false;
2699 	nvme_fc_ctlr_inactive_on_rport(ctrl);
2700 
2701 	return ret;
2702 }
2703 
2704 /*
2705  * This routine stops operation of the controller on the host side.
2706  * On the host os stack side: Admin and IO queues are stopped,
2707  *   outstanding ios on them terminated via FC ABTS.
2708  * On the link side: the association is terminated.
2709  */
2710 static void
nvme_fc_delete_association(struct nvme_fc_ctrl * ctrl)2711 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2712 {
2713 	unsigned long flags;
2714 
2715 	if (!ctrl->assoc_active)
2716 		return;
2717 	ctrl->assoc_active = false;
2718 
2719 	spin_lock_irqsave(&ctrl->lock, flags);
2720 	ctrl->flags |= FCCTRL_TERMIO;
2721 	ctrl->iocnt = 0;
2722 	spin_unlock_irqrestore(&ctrl->lock, flags);
2723 
2724 	/*
2725 	 * If io queues are present, stop them and terminate all outstanding
2726 	 * ios on them. As FC allocates FC exchange for each io, the
2727 	 * transport must contact the LLDD to terminate the exchange,
2728 	 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2729 	 * to tell us what io's are busy and invoke a transport routine
2730 	 * to kill them with the LLDD.  After terminating the exchange
2731 	 * the LLDD will call the transport's normal io done path, but it
2732 	 * will have an aborted status. The done path will return the
2733 	 * io requests back to the block layer as part of normal completions
2734 	 * (but with error status).
2735 	 */
2736 	if (ctrl->ctrl.queue_count > 1) {
2737 		nvme_stop_queues(&ctrl->ctrl);
2738 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
2739 				nvme_fc_terminate_exchange, &ctrl->ctrl);
2740 	}
2741 
2742 	/*
2743 	 * Other transports, which don't have link-level contexts bound
2744 	 * to sqe's, would try to gracefully shutdown the controller by
2745 	 * writing the registers for shutdown and polling (call
2746 	 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2747 	 * just aborted and we will wait on those contexts, and given
2748 	 * there was no indication of how live the controlelr is on the
2749 	 * link, don't send more io to create more contexts for the
2750 	 * shutdown. Let the controller fail via keepalive failure if
2751 	 * its still present.
2752 	 */
2753 
2754 	/*
2755 	 * clean up the admin queue. Same thing as above.
2756 	 * use blk_mq_tagset_busy_itr() and the transport routine to
2757 	 * terminate the exchanges.
2758 	 */
2759 	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2760 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2761 				nvme_fc_terminate_exchange, &ctrl->ctrl);
2762 
2763 	/* kill the aens as they are a separate path */
2764 	nvme_fc_abort_aen_ops(ctrl);
2765 
2766 	/* wait for all io that had to be aborted */
2767 	spin_lock_irq(&ctrl->lock);
2768 	wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
2769 	ctrl->flags &= ~FCCTRL_TERMIO;
2770 	spin_unlock_irq(&ctrl->lock);
2771 
2772 	nvme_fc_term_aen_ops(ctrl);
2773 
2774 	/*
2775 	 * send a Disconnect(association) LS to fc-nvme target
2776 	 * Note: could have been sent at top of process, but
2777 	 * cleaner on link traffic if after the aborts complete.
2778 	 * Note: if association doesn't exist, association_id will be 0
2779 	 */
2780 	if (ctrl->association_id)
2781 		nvme_fc_xmt_disconnect_assoc(ctrl);
2782 
2783 	if (ctrl->ctrl.tagset) {
2784 		nvme_fc_delete_hw_io_queues(ctrl);
2785 		nvme_fc_free_io_queues(ctrl);
2786 	}
2787 
2788 	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2789 	nvme_fc_free_queue(&ctrl->queues[0]);
2790 
2791 	/* re-enable the admin_q so anything new can fast fail */
2792 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2793 
2794 	/* resume the io queues so that things will fast fail */
2795 	nvme_start_queues(&ctrl->ctrl);
2796 
2797 	nvme_fc_ctlr_inactive_on_rport(ctrl);
2798 }
2799 
2800 static void
nvme_fc_delete_ctrl(struct nvme_ctrl * nctrl)2801 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
2802 {
2803 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2804 
2805 	cancel_delayed_work_sync(&ctrl->connect_work);
2806 	/*
2807 	 * kill the association on the link side.  this will block
2808 	 * waiting for io to terminate
2809 	 */
2810 	nvme_fc_delete_association(ctrl);
2811 }
2812 
2813 static void
nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl * ctrl,int status)2814 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2815 {
2816 	struct nvme_fc_rport *rport = ctrl->rport;
2817 	struct nvme_fc_remote_port *portptr = &rport->remoteport;
2818 	unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
2819 	bool recon = true;
2820 
2821 	if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
2822 		return;
2823 
2824 	if (portptr->port_state == FC_OBJSTATE_ONLINE)
2825 		dev_info(ctrl->ctrl.device,
2826 			"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2827 			ctrl->cnum, status);
2828 	else if (time_after_eq(jiffies, rport->dev_loss_end))
2829 		recon = false;
2830 
2831 	if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
2832 		if (portptr->port_state == FC_OBJSTATE_ONLINE)
2833 			dev_info(ctrl->ctrl.device,
2834 				"NVME-FC{%d}: Reconnect attempt in %ld "
2835 				"seconds\n",
2836 				ctrl->cnum, recon_delay / HZ);
2837 		else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
2838 			recon_delay = rport->dev_loss_end - jiffies;
2839 
2840 		queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
2841 	} else {
2842 		if (portptr->port_state == FC_OBJSTATE_ONLINE)
2843 			dev_warn(ctrl->ctrl.device,
2844 				"NVME-FC{%d}: Max reconnect attempts (%d) "
2845 				"reached.\n",
2846 				ctrl->cnum, ctrl->ctrl.nr_reconnects);
2847 		else
2848 			dev_warn(ctrl->ctrl.device,
2849 				"NVME-FC{%d}: dev_loss_tmo (%d) expired "
2850 				"while waiting for remoteport connectivity.\n",
2851 				ctrl->cnum, portptr->dev_loss_tmo);
2852 		WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
2853 	}
2854 }
2855 
2856 static void
nvme_fc_reset_ctrl_work(struct work_struct * work)2857 nvme_fc_reset_ctrl_work(struct work_struct *work)
2858 {
2859 	struct nvme_fc_ctrl *ctrl =
2860 		container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2861 	int ret;
2862 
2863 	nvme_stop_ctrl(&ctrl->ctrl);
2864 
2865 	/* will block will waiting for io to terminate */
2866 	nvme_fc_delete_association(ctrl);
2867 
2868 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2869 		dev_err(ctrl->ctrl.device,
2870 			"NVME-FC{%d}: error_recovery: Couldn't change state "
2871 			"to CONNECTING\n", ctrl->cnum);
2872 		return;
2873 	}
2874 
2875 	if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
2876 		ret = nvme_fc_create_association(ctrl);
2877 	else
2878 		ret = -ENOTCONN;
2879 
2880 	if (ret)
2881 		nvme_fc_reconnect_or_delete(ctrl, ret);
2882 	else
2883 		dev_info(ctrl->ctrl.device,
2884 			"NVME-FC{%d}: controller reset complete\n",
2885 			ctrl->cnum);
2886 }
2887 
2888 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2889 	.name			= "fc",
2890 	.module			= THIS_MODULE,
2891 	.flags			= NVME_F_FABRICS,
2892 	.reg_read32		= nvmf_reg_read32,
2893 	.reg_read64		= nvmf_reg_read64,
2894 	.reg_write32		= nvmf_reg_write32,
2895 	.free_ctrl		= nvme_fc_nvme_ctrl_freed,
2896 	.submit_async_event	= nvme_fc_submit_async_event,
2897 	.delete_ctrl		= nvme_fc_delete_ctrl,
2898 	.get_address		= nvmf_get_address,
2899 };
2900 
2901 static void
nvme_fc_connect_ctrl_work(struct work_struct * work)2902 nvme_fc_connect_ctrl_work(struct work_struct *work)
2903 {
2904 	int ret;
2905 
2906 	struct nvme_fc_ctrl *ctrl =
2907 			container_of(to_delayed_work(work),
2908 				struct nvme_fc_ctrl, connect_work);
2909 
2910 	ret = nvme_fc_create_association(ctrl);
2911 	if (ret)
2912 		nvme_fc_reconnect_or_delete(ctrl, ret);
2913 	else
2914 		dev_info(ctrl->ctrl.device,
2915 			"NVME-FC{%d}: controller connect complete\n",
2916 			ctrl->cnum);
2917 }
2918 
2919 
2920 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
2921 	.queue_rq	= nvme_fc_queue_rq,
2922 	.complete	= nvme_fc_complete_rq,
2923 	.init_request	= nvme_fc_init_request,
2924 	.exit_request	= nvme_fc_exit_request,
2925 	.init_hctx	= nvme_fc_init_admin_hctx,
2926 	.timeout	= nvme_fc_timeout,
2927 };
2928 
2929 
2930 /*
2931  * Fails a controller request if it matches an existing controller
2932  * (association) with the same tuple:
2933  * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
2934  *
2935  * The ports don't need to be compared as they are intrinsically
2936  * already matched by the port pointers supplied.
2937  */
2938 static bool
nvme_fc_existing_controller(struct nvme_fc_rport * rport,struct nvmf_ctrl_options * opts)2939 nvme_fc_existing_controller(struct nvme_fc_rport *rport,
2940 		struct nvmf_ctrl_options *opts)
2941 {
2942 	struct nvme_fc_ctrl *ctrl;
2943 	unsigned long flags;
2944 	bool found = false;
2945 
2946 	spin_lock_irqsave(&rport->lock, flags);
2947 	list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
2948 		found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
2949 		if (found)
2950 			break;
2951 	}
2952 	spin_unlock_irqrestore(&rport->lock, flags);
2953 
2954 	return found;
2955 }
2956 
2957 static struct nvme_ctrl *
nvme_fc_init_ctrl(struct device * dev,struct nvmf_ctrl_options * opts,struct nvme_fc_lport * lport,struct nvme_fc_rport * rport)2958 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2959 	struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
2960 {
2961 	struct nvme_fc_ctrl *ctrl;
2962 	unsigned long flags;
2963 	int ret, idx;
2964 
2965 	if (!(rport->remoteport.port_role &
2966 	    (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
2967 		ret = -EBADR;
2968 		goto out_fail;
2969 	}
2970 
2971 	if (!opts->duplicate_connect &&
2972 	    nvme_fc_existing_controller(rport, opts)) {
2973 		ret = -EALREADY;
2974 		goto out_fail;
2975 	}
2976 
2977 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2978 	if (!ctrl) {
2979 		ret = -ENOMEM;
2980 		goto out_fail;
2981 	}
2982 
2983 	idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
2984 	if (idx < 0) {
2985 		ret = -ENOSPC;
2986 		goto out_free_ctrl;
2987 	}
2988 
2989 	ctrl->ctrl.opts = opts;
2990 	ctrl->ctrl.nr_reconnects = 0;
2991 	INIT_LIST_HEAD(&ctrl->ctrl_list);
2992 	ctrl->lport = lport;
2993 	ctrl->rport = rport;
2994 	ctrl->dev = lport->dev;
2995 	ctrl->cnum = idx;
2996 	ctrl->ioq_live = false;
2997 	ctrl->assoc_active = false;
2998 	init_waitqueue_head(&ctrl->ioabort_wait);
2999 
3000 	get_device(ctrl->dev);
3001 	kref_init(&ctrl->ref);
3002 
3003 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3004 	INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3005 	spin_lock_init(&ctrl->lock);
3006 
3007 	/* io queue count */
3008 	ctrl->ctrl.queue_count = min_t(unsigned int,
3009 				opts->nr_io_queues,
3010 				lport->ops->max_hw_queues);
3011 	ctrl->ctrl.queue_count++;	/* +1 for admin queue */
3012 
3013 	ctrl->ctrl.sqsize = opts->queue_size - 1;
3014 	ctrl->ctrl.kato = opts->kato;
3015 	ctrl->ctrl.cntlid = 0xffff;
3016 
3017 	ret = -ENOMEM;
3018 	ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3019 				sizeof(struct nvme_fc_queue), GFP_KERNEL);
3020 	if (!ctrl->queues)
3021 		goto out_free_ida;
3022 
3023 	nvme_fc_init_queue(ctrl, 0);
3024 
3025 	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3026 	ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3027 	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3028 	ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
3029 	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
3030 	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
3031 					(SG_CHUNK_SIZE *
3032 						sizeof(struct scatterlist)) +
3033 					ctrl->lport->ops->fcprqst_priv_sz;
3034 	ctrl->admin_tag_set.driver_data = ctrl;
3035 	ctrl->admin_tag_set.nr_hw_queues = 1;
3036 	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3037 	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3038 
3039 	ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3040 	if (ret)
3041 		goto out_free_queues;
3042 	ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3043 
3044 	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3045 	if (IS_ERR(ctrl->ctrl.admin_q)) {
3046 		ret = PTR_ERR(ctrl->ctrl.admin_q);
3047 		goto out_free_admin_tag_set;
3048 	}
3049 
3050 	/*
3051 	 * Would have been nice to init io queues tag set as well.
3052 	 * However, we require interaction from the controller
3053 	 * for max io queue count before we can do so.
3054 	 * Defer this to the connect path.
3055 	 */
3056 
3057 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3058 	if (ret)
3059 		goto out_cleanup_admin_q;
3060 
3061 	/* at this point, teardown path changes to ref counting on nvme ctrl */
3062 
3063 	spin_lock_irqsave(&rport->lock, flags);
3064 	list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3065 	spin_unlock_irqrestore(&rport->lock, flags);
3066 
3067 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3068 	    !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3069 		dev_err(ctrl->ctrl.device,
3070 			"NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3071 		goto fail_ctrl;
3072 	}
3073 
3074 	nvme_get_ctrl(&ctrl->ctrl);
3075 
3076 	if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3077 		nvme_put_ctrl(&ctrl->ctrl);
3078 		dev_err(ctrl->ctrl.device,
3079 			"NVME-FC{%d}: failed to schedule initial connect\n",
3080 			ctrl->cnum);
3081 		goto fail_ctrl;
3082 	}
3083 
3084 	flush_delayed_work(&ctrl->connect_work);
3085 
3086 	dev_info(ctrl->ctrl.device,
3087 		"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3088 		ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3089 
3090 	return &ctrl->ctrl;
3091 
3092 fail_ctrl:
3093 	nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3094 	cancel_work_sync(&ctrl->ctrl.reset_work);
3095 	cancel_delayed_work_sync(&ctrl->connect_work);
3096 
3097 	ctrl->ctrl.opts = NULL;
3098 
3099 	/* initiate nvme ctrl ref counting teardown */
3100 	nvme_uninit_ctrl(&ctrl->ctrl);
3101 
3102 	/* Remove core ctrl ref. */
3103 	nvme_put_ctrl(&ctrl->ctrl);
3104 
3105 	/* as we're past the point where we transition to the ref
3106 	 * counting teardown path, if we return a bad pointer here,
3107 	 * the calling routine, thinking it's prior to the
3108 	 * transition, will do an rport put. Since the teardown
3109 	 * path also does a rport put, we do an extra get here to
3110 	 * so proper order/teardown happens.
3111 	 */
3112 	nvme_fc_rport_get(rport);
3113 
3114 	return ERR_PTR(-EIO);
3115 
3116 out_cleanup_admin_q:
3117 	blk_cleanup_queue(ctrl->ctrl.admin_q);
3118 out_free_admin_tag_set:
3119 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
3120 out_free_queues:
3121 	kfree(ctrl->queues);
3122 out_free_ida:
3123 	put_device(ctrl->dev);
3124 	ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3125 out_free_ctrl:
3126 	kfree(ctrl);
3127 out_fail:
3128 	/* exit via here doesn't follow ctlr ref points */
3129 	return ERR_PTR(ret);
3130 }
3131 
3132 
3133 struct nvmet_fc_traddr {
3134 	u64	nn;
3135 	u64	pn;
3136 };
3137 
3138 static int
__nvme_fc_parse_u64(substring_t * sstr,u64 * val)3139 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3140 {
3141 	u64 token64;
3142 
3143 	if (match_u64(sstr, &token64))
3144 		return -EINVAL;
3145 	*val = token64;
3146 
3147 	return 0;
3148 }
3149 
3150 /*
3151  * This routine validates and extracts the WWN's from the TRADDR string.
3152  * As kernel parsers need the 0x to determine number base, universally
3153  * build string to parse with 0x prefix before parsing name strings.
3154  */
3155 static int
nvme_fc_parse_traddr(struct nvmet_fc_traddr * traddr,char * buf,size_t blen)3156 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3157 {
3158 	char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3159 	substring_t wwn = { name, &name[sizeof(name)-1] };
3160 	int nnoffset, pnoffset;
3161 
3162 	/* validate it string one of the 2 allowed formats */
3163 	if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3164 			!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3165 			!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3166 				"pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3167 		nnoffset = NVME_FC_TRADDR_OXNNLEN;
3168 		pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3169 						NVME_FC_TRADDR_OXNNLEN;
3170 	} else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3171 			!strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3172 			!strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3173 				"pn-", NVME_FC_TRADDR_NNLEN))) {
3174 		nnoffset = NVME_FC_TRADDR_NNLEN;
3175 		pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3176 	} else
3177 		goto out_einval;
3178 
3179 	name[0] = '0';
3180 	name[1] = 'x';
3181 	name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3182 
3183 	memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3184 	if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3185 		goto out_einval;
3186 
3187 	memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3188 	if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3189 		goto out_einval;
3190 
3191 	return 0;
3192 
3193 out_einval:
3194 	pr_warn("%s: bad traddr string\n", __func__);
3195 	return -EINVAL;
3196 }
3197 
3198 static struct nvme_ctrl *
nvme_fc_create_ctrl(struct device * dev,struct nvmf_ctrl_options * opts)3199 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3200 {
3201 	struct nvme_fc_lport *lport;
3202 	struct nvme_fc_rport *rport;
3203 	struct nvme_ctrl *ctrl;
3204 	struct nvmet_fc_traddr laddr = { 0L, 0L };
3205 	struct nvmet_fc_traddr raddr = { 0L, 0L };
3206 	unsigned long flags;
3207 	int ret;
3208 
3209 	ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3210 	if (ret || !raddr.nn || !raddr.pn)
3211 		return ERR_PTR(-EINVAL);
3212 
3213 	ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3214 	if (ret || !laddr.nn || !laddr.pn)
3215 		return ERR_PTR(-EINVAL);
3216 
3217 	/* find the host and remote ports to connect together */
3218 	spin_lock_irqsave(&nvme_fc_lock, flags);
3219 	list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3220 		if (lport->localport.node_name != laddr.nn ||
3221 		    lport->localport.port_name != laddr.pn)
3222 			continue;
3223 
3224 		list_for_each_entry(rport, &lport->endp_list, endp_list) {
3225 			if (rport->remoteport.node_name != raddr.nn ||
3226 			    rport->remoteport.port_name != raddr.pn)
3227 				continue;
3228 
3229 			/* if fail to get reference fall through. Will error */
3230 			if (!nvme_fc_rport_get(rport))
3231 				break;
3232 
3233 			spin_unlock_irqrestore(&nvme_fc_lock, flags);
3234 
3235 			ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3236 			if (IS_ERR(ctrl))
3237 				nvme_fc_rport_put(rport);
3238 			return ctrl;
3239 		}
3240 	}
3241 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
3242 
3243 	pr_warn("%s: %s - %s combination not found\n",
3244 		__func__, opts->traddr, opts->host_traddr);
3245 	return ERR_PTR(-ENOENT);
3246 }
3247 
3248 
3249 static struct nvmf_transport_ops nvme_fc_transport = {
3250 	.name		= "fc",
3251 	.module		= THIS_MODULE,
3252 	.required_opts	= NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3253 	.allowed_opts	= NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3254 	.create_ctrl	= nvme_fc_create_ctrl,
3255 };
3256 
nvme_fc_init_module(void)3257 static int __init nvme_fc_init_module(void)
3258 {
3259 	int ret;
3260 
3261 	/*
3262 	 * NOTE:
3263 	 * It is expected that in the future the kernel will combine
3264 	 * the FC-isms that are currently under scsi and now being
3265 	 * added to by NVME into a new standalone FC class. The SCSI
3266 	 * and NVME protocols and their devices would be under this
3267 	 * new FC class.
3268 	 *
3269 	 * As we need something to post FC-specific udev events to,
3270 	 * specifically for nvme probe events, start by creating the
3271 	 * new device class.  When the new standalone FC class is
3272 	 * put in place, this code will move to a more generic
3273 	 * location for the class.
3274 	 */
3275 	fc_class = class_create(THIS_MODULE, "fc");
3276 	if (IS_ERR(fc_class)) {
3277 		pr_err("couldn't register class fc\n");
3278 		return PTR_ERR(fc_class);
3279 	}
3280 
3281 	/*
3282 	 * Create a device for the FC-centric udev events
3283 	 */
3284 	fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL,
3285 				"fc_udev_device");
3286 	if (IS_ERR(fc_udev_device)) {
3287 		pr_err("couldn't create fc_udev device!\n");
3288 		ret = PTR_ERR(fc_udev_device);
3289 		goto out_destroy_class;
3290 	}
3291 
3292 	ret = nvmf_register_transport(&nvme_fc_transport);
3293 	if (ret)
3294 		goto out_destroy_device;
3295 
3296 	return 0;
3297 
3298 out_destroy_device:
3299 	device_destroy(fc_class, MKDEV(0, 0));
3300 out_destroy_class:
3301 	class_destroy(fc_class);
3302 	return ret;
3303 }
3304 
nvme_fc_exit_module(void)3305 static void __exit nvme_fc_exit_module(void)
3306 {
3307 	/* sanity check - all lports should be removed */
3308 	if (!list_empty(&nvme_fc_lport_list))
3309 		pr_warn("%s: localport list not empty\n", __func__);
3310 
3311 	nvmf_unregister_transport(&nvme_fc_transport);
3312 
3313 	ida_destroy(&nvme_fc_local_port_cnt);
3314 	ida_destroy(&nvme_fc_ctrl_cnt);
3315 
3316 	device_destroy(fc_class, MKDEV(0, 0));
3317 	class_destroy(fc_class);
3318 }
3319 
3320 module_init(nvme_fc_init_module);
3321 module_exit(nvme_fc_exit_module);
3322 
3323 MODULE_LICENSE("GPL v2");
3324