1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  FiberChannel transport specific attributes exported to sysfs.
4  *
5  *  Copyright (c) 2003 Silicon Graphics, Inc.  All rights reserved.
6  *  Copyright (C) 2004-2007   James Smart, Emulex Corporation
7  *    Rewrite for host, target, device, and remote port attributes,
8  *    statistics, and service functions...
9  *    Add vports, etc
10  */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/delay.h>
15 #include <linux/kernel.h>
16 #include <linux/bsg-lib.h>
17 #include <scsi/scsi_device.h>
18 #include <scsi/scsi_host.h>
19 #include <scsi/scsi_transport.h>
20 #include <scsi/scsi_transport_fc.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <net/netlink.h>
23 #include <scsi/scsi_netlink_fc.h>
24 #include <scsi/scsi_bsg_fc.h>
25 #include <uapi/scsi/fc/fc_els.h>
26 #include "scsi_priv.h"
27 
28 static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
29 static void fc_vport_sched_delete(struct work_struct *work);
30 static int fc_vport_setup(struct Scsi_Host *shost, int channel,
31 	struct device *pdev, struct fc_vport_identifiers  *ids,
32 	struct fc_vport **vport);
33 static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
34 static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
35 static void fc_bsg_remove(struct request_queue *);
36 static void fc_bsg_goose_queue(struct fc_rport *);
37 static void fc_li_stats_update(struct fc_fn_li_desc *li_desc,
38 			       struct fc_fpin_stats *stats);
39 static void fc_delivery_stats_update(u32 reason_code,
40 				     struct fc_fpin_stats *stats);
41 static void fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats);
42 
43 /*
44  * Module Parameters
45  */
46 
47 /*
48  * dev_loss_tmo: the default number of seconds that the FC transport
49  *   should insulate the loss of a remote port.
50  *   The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
51  */
52 static unsigned int fc_dev_loss_tmo = 60;		/* seconds */
53 
54 module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR);
55 MODULE_PARM_DESC(dev_loss_tmo,
56 		 "Maximum number of seconds that the FC transport should"
57 		 " insulate the loss of a remote port. Once this value is"
58 		 " exceeded, the scsi target is removed. Value should be"
59 		 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
60 		 " fast_io_fail_tmo is not set.");
61 
62 /*
63  * Redefine so that we can have same named attributes in the
64  * sdev/starget/host objects.
65  */
66 #define FC_DEVICE_ATTR(_prefix,_name,_mode,_show,_store)		\
67 struct device_attribute device_attr_##_prefix##_##_name = 	\
68 	__ATTR(_name,_mode,_show,_store)
69 
70 #define fc_enum_name_search(title, table_type, table)			\
71 static const char *get_fc_##title##_name(enum table_type table_key)	\
72 {									\
73 	int i;								\
74 	char *name = NULL;						\
75 									\
76 	for (i = 0; i < ARRAY_SIZE(table); i++) {			\
77 		if (table[i].value == table_key) {			\
78 			name = table[i].name;				\
79 			break;						\
80 		}							\
81 	}								\
82 	return name;							\
83 }
84 
85 #define fc_enum_name_match(title, table_type, table)			\
86 static int get_fc_##title##_match(const char *table_key,		\
87 		enum table_type *value)					\
88 {									\
89 	int i;								\
90 									\
91 	for (i = 0; i < ARRAY_SIZE(table); i++) {			\
92 		if (strncmp(table_key, table[i].name,			\
93 				table[i].matchlen) == 0) {		\
94 			*value = table[i].value;			\
95 			return 0; /* success */				\
96 		}							\
97 	}								\
98 	return 1; /* failure */						\
99 }
100 
101 
102 /* Convert fc_port_type values to ascii string name */
103 static struct {
104 	enum fc_port_type	value;
105 	char			*name;
106 } fc_port_type_names[] = {
107 	{ FC_PORTTYPE_UNKNOWN,		"Unknown" },
108 	{ FC_PORTTYPE_OTHER,		"Other" },
109 	{ FC_PORTTYPE_NOTPRESENT,	"Not Present" },
110 	{ FC_PORTTYPE_NPORT,	"NPort (fabric via point-to-point)" },
111 	{ FC_PORTTYPE_NLPORT,	"NLPort (fabric via loop)" },
112 	{ FC_PORTTYPE_LPORT,	"LPort (private loop)" },
113 	{ FC_PORTTYPE_PTP,	"Point-To-Point (direct nport connection)" },
114 	{ FC_PORTTYPE_NPIV,		"NPIV VPORT" },
115 };
116 fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
117 #define FC_PORTTYPE_MAX_NAMELEN		50
118 
119 /* Reuse fc_port_type enum function for vport_type */
120 #define get_fc_vport_type_name get_fc_port_type_name
121 
122 
123 /* Convert fc_host_event_code values to ascii string name */
124 static const struct {
125 	enum fc_host_event_code		value;
126 	char				*name;
127 } fc_host_event_code_names[] = {
128 	{ FCH_EVT_LIP,			"lip" },
129 	{ FCH_EVT_LINKUP,		"link_up" },
130 	{ FCH_EVT_LINKDOWN,		"link_down" },
131 	{ FCH_EVT_LIPRESET,		"lip_reset" },
132 	{ FCH_EVT_RSCN,			"rscn" },
133 	{ FCH_EVT_ADAPTER_CHANGE,	"adapter_chg" },
134 	{ FCH_EVT_PORT_UNKNOWN,		"port_unknown" },
135 	{ FCH_EVT_PORT_ONLINE,		"port_online" },
136 	{ FCH_EVT_PORT_OFFLINE,		"port_offline" },
137 	{ FCH_EVT_PORT_FABRIC,		"port_fabric" },
138 	{ FCH_EVT_LINK_UNKNOWN,		"link_unknown" },
139 	{ FCH_EVT_LINK_FPIN,		"link_FPIN" },
140 	{ FCH_EVT_VENDOR_UNIQUE,	"vendor_unique" },
141 };
142 fc_enum_name_search(host_event_code, fc_host_event_code,
143 		fc_host_event_code_names)
144 #define FC_HOST_EVENT_CODE_MAX_NAMELEN	30
145 
146 
147 /* Convert fc_port_state values to ascii string name */
148 static struct {
149 	enum fc_port_state	value;
150 	char			*name;
151 	int			matchlen;
152 } fc_port_state_names[] = {
153 	{ FC_PORTSTATE_UNKNOWN,		"Unknown", 7},
154 	{ FC_PORTSTATE_NOTPRESENT,	"Not Present", 11 },
155 	{ FC_PORTSTATE_ONLINE,		"Online", 6 },
156 	{ FC_PORTSTATE_OFFLINE,		"Offline", 7 },
157 	{ FC_PORTSTATE_BLOCKED,		"Blocked", 7 },
158 	{ FC_PORTSTATE_BYPASSED,	"Bypassed", 8 },
159 	{ FC_PORTSTATE_DIAGNOSTICS,	"Diagnostics", 11 },
160 	{ FC_PORTSTATE_LINKDOWN,	"Linkdown", 8 },
161 	{ FC_PORTSTATE_ERROR,		"Error", 5 },
162 	{ FC_PORTSTATE_LOOPBACK,	"Loopback", 8 },
163 	{ FC_PORTSTATE_DELETED,		"Deleted", 7 },
164 	{ FC_PORTSTATE_MARGINAL,	"Marginal", 8 },
165 };
166 fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
167 fc_enum_name_match(port_state, fc_port_state, fc_port_state_names)
168 #define FC_PORTSTATE_MAX_NAMELEN	20
169 
170 
171 /* Convert fc_vport_state values to ascii string name */
172 static struct {
173 	enum fc_vport_state	value;
174 	char			*name;
175 } fc_vport_state_names[] = {
176 	{ FC_VPORT_UNKNOWN,		"Unknown" },
177 	{ FC_VPORT_ACTIVE,		"Active" },
178 	{ FC_VPORT_DISABLED,		"Disabled" },
179 	{ FC_VPORT_LINKDOWN,		"Linkdown" },
180 	{ FC_VPORT_INITIALIZING,	"Initializing" },
181 	{ FC_VPORT_NO_FABRIC_SUPP,	"No Fabric Support" },
182 	{ FC_VPORT_NO_FABRIC_RSCS,	"No Fabric Resources" },
183 	{ FC_VPORT_FABRIC_LOGOUT,	"Fabric Logout" },
184 	{ FC_VPORT_FABRIC_REJ_WWN,	"Fabric Rejected WWN" },
185 	{ FC_VPORT_FAILED,		"VPort Failed" },
186 };
187 fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names)
188 #define FC_VPORTSTATE_MAX_NAMELEN	24
189 
190 /* Reuse fc_vport_state enum function for vport_last_state */
191 #define get_fc_vport_last_state_name get_fc_vport_state_name
192 
193 
194 /* Convert fc_tgtid_binding_type values to ascii string name */
195 static const struct {
196 	enum fc_tgtid_binding_type	value;
197 	char				*name;
198 	int				matchlen;
199 } fc_tgtid_binding_type_names[] = {
200 	{ FC_TGTID_BIND_NONE, "none", 4 },
201 	{ FC_TGTID_BIND_BY_WWPN, "wwpn (World Wide Port Name)", 4 },
202 	{ FC_TGTID_BIND_BY_WWNN, "wwnn (World Wide Node Name)", 4 },
203 	{ FC_TGTID_BIND_BY_ID, "port_id (FC Address)", 7 },
204 };
205 fc_enum_name_search(tgtid_bind_type, fc_tgtid_binding_type,
206 		fc_tgtid_binding_type_names)
207 fc_enum_name_match(tgtid_bind_type, fc_tgtid_binding_type,
208 		fc_tgtid_binding_type_names)
209 #define FC_BINDTYPE_MAX_NAMELEN	30
210 
211 
212 #define fc_bitfield_name_search(title, table)			\
213 static ssize_t							\
214 get_fc_##title##_names(u32 table_key, char *buf)		\
215 {								\
216 	char *prefix = "";					\
217 	ssize_t len = 0;					\
218 	int i;							\
219 								\
220 	for (i = 0; i < ARRAY_SIZE(table); i++) {		\
221 		if (table[i].value & table_key) {		\
222 			len += sprintf(buf + len, "%s%s",	\
223 				prefix, table[i].name);		\
224 			prefix = ", ";				\
225 		}						\
226 	}							\
227 	len += sprintf(buf + len, "\n");			\
228 	return len;						\
229 }
230 
231 
232 /* Convert FC_COS bit values to ascii string name */
233 static const struct {
234 	u32 			value;
235 	char			*name;
236 } fc_cos_names[] = {
237 	{ FC_COS_CLASS1,	"Class 1" },
238 	{ FC_COS_CLASS2,	"Class 2" },
239 	{ FC_COS_CLASS3,	"Class 3" },
240 	{ FC_COS_CLASS4,	"Class 4" },
241 	{ FC_COS_CLASS6,	"Class 6" },
242 };
243 fc_bitfield_name_search(cos, fc_cos_names)
244 
245 
246 /* Convert FC_PORTSPEED bit values to ascii string name */
247 static const struct {
248 	u32 			value;
249 	char			*name;
250 } fc_port_speed_names[] = {
251 	{ FC_PORTSPEED_1GBIT,		"1 Gbit" },
252 	{ FC_PORTSPEED_2GBIT,		"2 Gbit" },
253 	{ FC_PORTSPEED_4GBIT,		"4 Gbit" },
254 	{ FC_PORTSPEED_10GBIT,		"10 Gbit" },
255 	{ FC_PORTSPEED_8GBIT,		"8 Gbit" },
256 	{ FC_PORTSPEED_16GBIT,		"16 Gbit" },
257 	{ FC_PORTSPEED_32GBIT,		"32 Gbit" },
258 	{ FC_PORTSPEED_20GBIT,		"20 Gbit" },
259 	{ FC_PORTSPEED_40GBIT,		"40 Gbit" },
260 	{ FC_PORTSPEED_50GBIT,		"50 Gbit" },
261 	{ FC_PORTSPEED_100GBIT,		"100 Gbit" },
262 	{ FC_PORTSPEED_25GBIT,		"25 Gbit" },
263 	{ FC_PORTSPEED_64GBIT,		"64 Gbit" },
264 	{ FC_PORTSPEED_128GBIT,		"128 Gbit" },
265 	{ FC_PORTSPEED_256GBIT,		"256 Gbit" },
266 	{ FC_PORTSPEED_NOT_NEGOTIATED,	"Not Negotiated" },
267 };
fc_bitfield_name_search(port_speed,fc_port_speed_names)268 fc_bitfield_name_search(port_speed, fc_port_speed_names)
269 
270 
271 static int
272 show_fc_fc4s (char *buf, u8 *fc4_list)
273 {
274 	int i, len=0;
275 
276 	for (i = 0; i < FC_FC4_LIST_SIZE; i++, fc4_list++)
277 		len += sprintf(buf + len , "0x%02x ", *fc4_list);
278 	len += sprintf(buf + len, "\n");
279 	return len;
280 }
281 
282 
283 /* Convert FC_PORT_ROLE bit values to ascii string name */
284 static const struct {
285 	u32 			value;
286 	char			*name;
287 } fc_port_role_names[] = {
288 	{ FC_PORT_ROLE_FCP_TARGET,		"FCP Target" },
289 	{ FC_PORT_ROLE_FCP_INITIATOR,		"FCP Initiator" },
290 	{ FC_PORT_ROLE_IP_PORT,			"IP Port" },
291 	{ FC_PORT_ROLE_FCP_DUMMY_INITIATOR,	"FCP Dummy Initiator" },
292 	{ FC_PORT_ROLE_NVME_INITIATOR,		"NVMe Initiator" },
293 	{ FC_PORT_ROLE_NVME_TARGET,		"NVMe Target" },
294 	{ FC_PORT_ROLE_NVME_DISCOVERY,		"NVMe Discovery" },
295 };
296 fc_bitfield_name_search(port_roles, fc_port_role_names)
297 
298 /*
299  * Define roles that are specific to port_id. Values are relative to ROLE_MASK.
300  */
301 #define FC_WELLKNOWN_PORTID_MASK	0xfffff0
302 #define FC_WELLKNOWN_ROLE_MASK  	0x00000f
303 #define FC_FPORT_PORTID			0x00000e
304 #define FC_FABCTLR_PORTID		0x00000d
305 #define FC_DIRSRVR_PORTID		0x00000c
306 #define FC_TIMESRVR_PORTID		0x00000b
307 #define FC_MGMTSRVR_PORTID		0x00000a
308 
309 
310 static void fc_timeout_deleted_rport(struct work_struct *work);
311 static void fc_timeout_fail_rport_io(struct work_struct *work);
312 static void fc_scsi_scan_rport(struct work_struct *work);
313 
314 /*
315  * Attribute counts pre object type...
316  * Increase these values if you add attributes
317  */
318 #define FC_STARGET_NUM_ATTRS 	3
319 #define FC_RPORT_NUM_ATTRS	10
320 #define FC_VPORT_NUM_ATTRS	9
321 #define FC_HOST_NUM_ATTRS	29
322 
323 struct fc_internal {
324 	struct scsi_transport_template t;
325 	struct fc_function_template *f;
326 
327 	/*
328 	 * For attributes : each object has :
329 	 *   An array of the actual attributes structures
330 	 *   An array of null-terminated pointers to the attribute
331 	 *     structures - used for mid-layer interaction.
332 	 *
333 	 * The attribute containers for the starget and host are are
334 	 * part of the midlayer. As the remote port is specific to the
335 	 * fc transport, we must provide the attribute container.
336 	 */
337 	struct device_attribute private_starget_attrs[
338 							FC_STARGET_NUM_ATTRS];
339 	struct device_attribute *starget_attrs[FC_STARGET_NUM_ATTRS + 1];
340 
341 	struct device_attribute private_host_attrs[FC_HOST_NUM_ATTRS];
342 	struct device_attribute *host_attrs[FC_HOST_NUM_ATTRS + 1];
343 
344 	struct transport_container rport_attr_cont;
345 	struct device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS];
346 	struct device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1];
347 
348 	struct transport_container vport_attr_cont;
349 	struct device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS];
350 	struct device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1];
351 };
352 
353 #define to_fc_internal(tmpl)	container_of(tmpl, struct fc_internal, t)
354 
fc_target_setup(struct transport_container * tc,struct device * dev,struct device * cdev)355 static int fc_target_setup(struct transport_container *tc, struct device *dev,
356 			   struct device *cdev)
357 {
358 	struct scsi_target *starget = to_scsi_target(dev);
359 	struct fc_rport *rport = starget_to_rport(starget);
360 
361 	/*
362 	 * if parent is remote port, use values from remote port.
363 	 * Otherwise, this host uses the fc_transport, but not the
364 	 * remote port interface. As such, initialize to known non-values.
365 	 */
366 	if (rport) {
367 		fc_starget_node_name(starget) = rport->node_name;
368 		fc_starget_port_name(starget) = rport->port_name;
369 		fc_starget_port_id(starget) = rport->port_id;
370 	} else {
371 		fc_starget_node_name(starget) = -1;
372 		fc_starget_port_name(starget) = -1;
373 		fc_starget_port_id(starget) = -1;
374 	}
375 
376 	return 0;
377 }
378 
379 static DECLARE_TRANSPORT_CLASS(fc_transport_class,
380 			       "fc_transport",
381 			       fc_target_setup,
382 			       NULL,
383 			       NULL);
384 
fc_host_setup(struct transport_container * tc,struct device * dev,struct device * cdev)385 static int fc_host_setup(struct transport_container *tc, struct device *dev,
386 			 struct device *cdev)
387 {
388 	struct Scsi_Host *shost = dev_to_shost(dev);
389 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
390 
391 	/*
392 	 * Set default values easily detected by the midlayer as
393 	 * failure cases.  The scsi lldd is responsible for initializing
394 	 * all transport attributes to valid values per host.
395 	 */
396 	fc_host->node_name = -1;
397 	fc_host->port_name = -1;
398 	fc_host->permanent_port_name = -1;
399 	fc_host->supported_classes = FC_COS_UNSPECIFIED;
400 	memset(fc_host->supported_fc4s, 0,
401 		sizeof(fc_host->supported_fc4s));
402 	fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
403 	fc_host->maxframe_size = -1;
404 	fc_host->max_npiv_vports = 0;
405 	memset(fc_host->serial_number, 0,
406 		sizeof(fc_host->serial_number));
407 	memset(fc_host->manufacturer, 0,
408 		sizeof(fc_host->manufacturer));
409 	memset(fc_host->model, 0,
410 		sizeof(fc_host->model));
411 	memset(fc_host->model_description, 0,
412 		sizeof(fc_host->model_description));
413 	memset(fc_host->hardware_version, 0,
414 		sizeof(fc_host->hardware_version));
415 	memset(fc_host->driver_version, 0,
416 		sizeof(fc_host->driver_version));
417 	memset(fc_host->firmware_version, 0,
418 		sizeof(fc_host->firmware_version));
419 	memset(fc_host->optionrom_version, 0,
420 		sizeof(fc_host->optionrom_version));
421 
422 	fc_host->port_id = -1;
423 	fc_host->port_type = FC_PORTTYPE_UNKNOWN;
424 	fc_host->port_state = FC_PORTSTATE_UNKNOWN;
425 	memset(fc_host->active_fc4s, 0,
426 		sizeof(fc_host->active_fc4s));
427 	fc_host->speed = FC_PORTSPEED_UNKNOWN;
428 	fc_host->fabric_name = -1;
429 	memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name));
430 	memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname));
431 	memset(&fc_host->fpin_stats, 0, sizeof(fc_host->fpin_stats));
432 
433 	fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN;
434 
435 	INIT_LIST_HEAD(&fc_host->rports);
436 	INIT_LIST_HEAD(&fc_host->rport_bindings);
437 	INIT_LIST_HEAD(&fc_host->vports);
438 	fc_host->next_rport_number = 0;
439 	fc_host->next_target_id = 0;
440 	fc_host->next_vport_number = 0;
441 	fc_host->npiv_vports_inuse = 0;
442 
443 	snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name),
444 		 "fc_wq_%d", shost->host_no);
445 	fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name);
446 	if (!fc_host->work_q)
447 		return -ENOMEM;
448 
449 	fc_host->dev_loss_tmo = fc_dev_loss_tmo;
450 	snprintf(fc_host->devloss_work_q_name,
451 		 sizeof(fc_host->devloss_work_q_name),
452 		 "fc_dl_%d", shost->host_no);
453 	fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0,
454 					fc_host->devloss_work_q_name);
455 	if (!fc_host->devloss_work_q) {
456 		destroy_workqueue(fc_host->work_q);
457 		fc_host->work_q = NULL;
458 		return -ENOMEM;
459 	}
460 
461 	fc_bsg_hostadd(shost, fc_host);
462 	/* ignore any bsg add error - we just can't do sgio */
463 
464 	return 0;
465 }
466 
fc_host_remove(struct transport_container * tc,struct device * dev,struct device * cdev)467 static int fc_host_remove(struct transport_container *tc, struct device *dev,
468 			 struct device *cdev)
469 {
470 	struct Scsi_Host *shost = dev_to_shost(dev);
471 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
472 
473 	fc_bsg_remove(fc_host->rqst_q);
474 	return 0;
475 }
476 
477 static DECLARE_TRANSPORT_CLASS(fc_host_class,
478 			       "fc_host",
479 			       fc_host_setup,
480 			       fc_host_remove,
481 			       NULL);
482 
483 /*
484  * Setup and Remove actions for remote ports are handled
485  * in the service functions below.
486  */
487 static DECLARE_TRANSPORT_CLASS(fc_rport_class,
488 			       "fc_remote_ports",
489 			       NULL,
490 			       NULL,
491 			       NULL);
492 
493 /*
494  * Setup and Remove actions for virtual ports are handled
495  * in the service functions below.
496  */
497 static DECLARE_TRANSPORT_CLASS(fc_vport_class,
498 			       "fc_vports",
499 			       NULL,
500 			       NULL,
501 			       NULL);
502 
503 /*
504  * Netlink Infrastructure
505  */
506 
507 static atomic_t fc_event_seq;
508 
509 /**
510  * fc_get_event_number - Obtain the next sequential FC event number
511  *
512  * Notes:
513  *   We could have inlined this, but it would have required fc_event_seq to
514  *   be exposed. For now, live with the subroutine call.
515  *   Atomic used to avoid lock/unlock...
516  */
517 u32
fc_get_event_number(void)518 fc_get_event_number(void)
519 {
520 	return atomic_add_return(1, &fc_event_seq);
521 }
522 EXPORT_SYMBOL(fc_get_event_number);
523 
524 /**
525  * fc_host_post_fc_event - routine to do the work of posting an event
526  *                      on an fc_host.
527  * @shost:		host the event occurred on
528  * @event_number:	fc event number obtained from get_fc_event_number()
529  * @event_code:		fc_host event being posted
530  * @data_len:		amount, in bytes, of event data
531  * @data_buf:		pointer to event data
532  * @vendor_id:          value for Vendor id
533  *
534  * Notes:
535  *	This routine assumes no locks are held on entry.
536  */
537 void
fc_host_post_fc_event(struct Scsi_Host * shost,u32 event_number,enum fc_host_event_code event_code,u32 data_len,char * data_buf,u64 vendor_id)538 fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
539 		enum fc_host_event_code event_code,
540 		u32 data_len, char *data_buf, u64 vendor_id)
541 {
542 	struct sk_buff *skb;
543 	struct nlmsghdr	*nlh;
544 	struct fc_nl_event *event;
545 	const char *name;
546 	u32 len;
547 	int err;
548 
549 	if (!data_buf || data_len < 4)
550 		data_len = 0;
551 
552 	if (!scsi_nl_sock) {
553 		err = -ENOENT;
554 		goto send_fail;
555 	}
556 
557 	len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
558 
559 	skb = nlmsg_new(len, GFP_KERNEL);
560 	if (!skb) {
561 		err = -ENOBUFS;
562 		goto send_fail;
563 	}
564 
565 	nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
566 	if (!nlh) {
567 		err = -ENOBUFS;
568 		goto send_fail_skb;
569 	}
570 	event = nlmsg_data(nlh);
571 
572 	INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
573 				FC_NL_ASYNC_EVENT, len);
574 	event->seconds = ktime_get_real_seconds();
575 	event->vendor_id = vendor_id;
576 	event->host_no = shost->host_no;
577 	event->event_datalen = data_len;	/* bytes */
578 	event->event_num = event_number;
579 	event->event_code = event_code;
580 	if (data_len)
581 		memcpy(&event->event_data, data_buf, data_len);
582 
583 	nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
584 			GFP_KERNEL);
585 	return;
586 
587 send_fail_skb:
588 	kfree_skb(skb);
589 send_fail:
590 	name = get_fc_host_event_code_name(event_code);
591 	printk(KERN_WARNING
592 		"%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
593 		__func__, shost->host_no,
594 		(name) ? name : "<unknown>",
595 		(data_len) ? *((u32 *)data_buf) : 0xFFFFFFFF, err);
596 	return;
597 }
598 EXPORT_SYMBOL(fc_host_post_fc_event);
599 
600 /**
601  * fc_host_post_event - called to post an even on an fc_host.
602  * @shost:		host the event occurred on
603  * @event_number:	fc event number obtained from get_fc_event_number()
604  * @event_code:		fc_host event being posted
605  * @event_data:		32bits of data for the event being posted
606  *
607  * Notes:
608  *	This routine assumes no locks are held on entry.
609  */
610 void
fc_host_post_event(struct Scsi_Host * shost,u32 event_number,enum fc_host_event_code event_code,u32 event_data)611 fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
612 		enum fc_host_event_code event_code, u32 event_data)
613 {
614 	fc_host_post_fc_event(shost, event_number, event_code,
615 		(u32)sizeof(u32), (char *)&event_data, 0);
616 }
617 EXPORT_SYMBOL(fc_host_post_event);
618 
619 
620 /**
621  * fc_host_post_vendor_event - called to post a vendor unique event
622  *                      on an fc_host
623  * @shost:		host the event occurred on
624  * @event_number:	fc event number obtained from get_fc_event_number()
625  * @data_len:		amount, in bytes, of vendor unique data
626  * @data_buf:		pointer to vendor unique data
627  * @vendor_id:          Vendor id
628  *
629  * Notes:
630  *	This routine assumes no locks are held on entry.
631  */
632 void
fc_host_post_vendor_event(struct Scsi_Host * shost,u32 event_number,u32 data_len,char * data_buf,u64 vendor_id)633 fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
634 		u32 data_len, char * data_buf, u64 vendor_id)
635 {
636 	fc_host_post_fc_event(shost, event_number, FCH_EVT_VENDOR_UNIQUE,
637 		data_len, data_buf, vendor_id);
638 }
639 EXPORT_SYMBOL(fc_host_post_vendor_event);
640 
641 /**
642  * fc_find_rport_by_wwpn - find the fc_rport pointer for a given wwpn
643  * @shost:		host the fc_rport is associated with
644  * @wwpn:		wwpn of the fc_rport device
645  *
646  * Notes:
647  *	This routine assumes no locks are held on entry.
648  */
649 struct fc_rport *
fc_find_rport_by_wwpn(struct Scsi_Host * shost,u64 wwpn)650 fc_find_rport_by_wwpn(struct Scsi_Host *shost, u64 wwpn)
651 {
652 	struct fc_rport *rport;
653 	unsigned long flags;
654 
655 	spin_lock_irqsave(shost->host_lock, flags);
656 
657 	list_for_each_entry(rport, &fc_host_rports(shost), peers) {
658 		if (rport->port_state != FC_PORTSTATE_ONLINE)
659 			continue;
660 
661 		if (rport->port_name == wwpn) {
662 			spin_unlock_irqrestore(shost->host_lock, flags);
663 			return rport;
664 		}
665 	}
666 
667 	spin_unlock_irqrestore(shost->host_lock, flags);
668 	return NULL;
669 }
670 EXPORT_SYMBOL(fc_find_rport_by_wwpn);
671 
672 static void
fc_li_stats_update(struct fc_fn_li_desc * li_desc,struct fc_fpin_stats * stats)673 fc_li_stats_update(struct fc_fn_li_desc *li_desc,
674 		   struct fc_fpin_stats *stats)
675 {
676 	stats->li += be32_to_cpu(li_desc->event_count);
677 	switch (be16_to_cpu(li_desc->event_type)) {
678 	case FPIN_LI_UNKNOWN:
679 		stats->li_failure_unknown +=
680 		    be32_to_cpu(li_desc->event_count);
681 		break;
682 	case FPIN_LI_LINK_FAILURE:
683 		stats->li_link_failure_count +=
684 		    be32_to_cpu(li_desc->event_count);
685 		break;
686 	case FPIN_LI_LOSS_OF_SYNC:
687 		stats->li_loss_of_sync_count +=
688 		    be32_to_cpu(li_desc->event_count);
689 		break;
690 	case FPIN_LI_LOSS_OF_SIG:
691 		stats->li_loss_of_signals_count +=
692 		    be32_to_cpu(li_desc->event_count);
693 		break;
694 	case FPIN_LI_PRIM_SEQ_ERR:
695 		stats->li_prim_seq_err_count +=
696 		    be32_to_cpu(li_desc->event_count);
697 		break;
698 	case FPIN_LI_INVALID_TX_WD:
699 		stats->li_invalid_tx_word_count +=
700 		    be32_to_cpu(li_desc->event_count);
701 		break;
702 	case FPIN_LI_INVALID_CRC:
703 		stats->li_invalid_crc_count +=
704 		    be32_to_cpu(li_desc->event_count);
705 		break;
706 	case FPIN_LI_DEVICE_SPEC:
707 		stats->li_device_specific +=
708 		    be32_to_cpu(li_desc->event_count);
709 		break;
710 	}
711 }
712 
713 static void
fc_delivery_stats_update(u32 reason_code,struct fc_fpin_stats * stats)714 fc_delivery_stats_update(u32 reason_code, struct fc_fpin_stats *stats)
715 {
716 	stats->dn++;
717 	switch (reason_code) {
718 	case FPIN_DELI_UNKNOWN:
719 		stats->dn_unknown++;
720 		break;
721 	case FPIN_DELI_TIMEOUT:
722 		stats->dn_timeout++;
723 		break;
724 	case FPIN_DELI_UNABLE_TO_ROUTE:
725 		stats->dn_unable_to_route++;
726 		break;
727 	case FPIN_DELI_DEVICE_SPEC:
728 		stats->dn_device_specific++;
729 		break;
730 	}
731 }
732 
733 static void
fc_cn_stats_update(u16 event_type,struct fc_fpin_stats * stats)734 fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats)
735 {
736 	stats->cn++;
737 	switch (event_type) {
738 	case FPIN_CONGN_CLEAR:
739 		stats->cn_clear++;
740 		break;
741 	case FPIN_CONGN_LOST_CREDIT:
742 		stats->cn_lost_credit++;
743 		break;
744 	case FPIN_CONGN_CREDIT_STALL:
745 		stats->cn_credit_stall++;
746 		break;
747 	case FPIN_CONGN_OVERSUBSCRIPTION:
748 		stats->cn_oversubscription++;
749 		break;
750 	case FPIN_CONGN_DEVICE_SPEC:
751 		stats->cn_device_specific++;
752 	}
753 }
754 
755 /*
756  * fc_fpin_li_stats_update - routine to update Link Integrity
757  * event statistics.
758  * @shost:		host the FPIN was received on
759  * @tlv:		pointer to link integrity descriptor
760  *
761  */
762 static void
fc_fpin_li_stats_update(struct Scsi_Host * shost,struct fc_tlv_desc * tlv)763 fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv)
764 {
765 	u8 i;
766 	struct fc_rport *rport = NULL;
767 	struct fc_rport *attach_rport = NULL;
768 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
769 	struct fc_fn_li_desc *li_desc = (struct fc_fn_li_desc *)tlv;
770 	u64 wwpn;
771 
772 	rport = fc_find_rport_by_wwpn(shost,
773 				      be64_to_cpu(li_desc->attached_wwpn));
774 	if (rport &&
775 	    (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
776 	     rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
777 		attach_rport = rport;
778 		fc_li_stats_update(li_desc, &attach_rport->fpin_stats);
779 	}
780 
781 	if (be32_to_cpu(li_desc->pname_count) > 0) {
782 		for (i = 0;
783 		    i < be32_to_cpu(li_desc->pname_count);
784 		    i++) {
785 			wwpn = be64_to_cpu(li_desc->pname_list[i]);
786 			rport = fc_find_rport_by_wwpn(shost, wwpn);
787 			if (rport &&
788 			    (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
789 			    rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
790 				if (rport == attach_rport)
791 					continue;
792 				fc_li_stats_update(li_desc,
793 						   &rport->fpin_stats);
794 			}
795 		}
796 	}
797 
798 	if (fc_host->port_name == be64_to_cpu(li_desc->attached_wwpn))
799 		fc_li_stats_update(li_desc, &fc_host->fpin_stats);
800 }
801 
802 /*
803  * fc_fpin_delivery_stats_update - routine to update Delivery Notification
804  * event statistics.
805  * @shost:		host the FPIN was received on
806  * @tlv:		pointer to delivery descriptor
807  *
808  */
809 static void
fc_fpin_delivery_stats_update(struct Scsi_Host * shost,struct fc_tlv_desc * tlv)810 fc_fpin_delivery_stats_update(struct Scsi_Host *shost,
811 			      struct fc_tlv_desc *tlv)
812 {
813 	struct fc_rport *rport = NULL;
814 	struct fc_rport *attach_rport = NULL;
815 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
816 	struct fc_fn_deli_desc *dn_desc = (struct fc_fn_deli_desc *)tlv;
817 	u32 reason_code = be32_to_cpu(dn_desc->deli_reason_code);
818 
819 	rport = fc_find_rport_by_wwpn(shost,
820 				      be64_to_cpu(dn_desc->attached_wwpn));
821 	if (rport &&
822 	    (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
823 	     rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
824 		attach_rport = rport;
825 		fc_delivery_stats_update(reason_code,
826 					 &attach_rport->fpin_stats);
827 	}
828 
829 	if (fc_host->port_name == be64_to_cpu(dn_desc->attached_wwpn))
830 		fc_delivery_stats_update(reason_code, &fc_host->fpin_stats);
831 }
832 
833 /*
834  * fc_fpin_peer_congn_stats_update - routine to update Peer Congestion
835  * event statistics.
836  * @shost:		host the FPIN was received on
837  * @tlv:		pointer to peer congestion descriptor
838  *
839  */
840 static void
fc_fpin_peer_congn_stats_update(struct Scsi_Host * shost,struct fc_tlv_desc * tlv)841 fc_fpin_peer_congn_stats_update(struct Scsi_Host *shost,
842 				struct fc_tlv_desc *tlv)
843 {
844 	u8 i;
845 	struct fc_rport *rport = NULL;
846 	struct fc_rport *attach_rport = NULL;
847 	struct fc_fn_peer_congn_desc *pc_desc =
848 	    (struct fc_fn_peer_congn_desc *)tlv;
849 	u16 event_type = be16_to_cpu(pc_desc->event_type);
850 	u64 wwpn;
851 
852 	rport = fc_find_rport_by_wwpn(shost,
853 				      be64_to_cpu(pc_desc->attached_wwpn));
854 	if (rport &&
855 	    (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
856 	     rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
857 		attach_rport = rport;
858 		fc_cn_stats_update(event_type, &attach_rport->fpin_stats);
859 	}
860 
861 	if (be32_to_cpu(pc_desc->pname_count) > 0) {
862 		for (i = 0;
863 		    i < be32_to_cpu(pc_desc->pname_count);
864 		    i++) {
865 			wwpn = be64_to_cpu(pc_desc->pname_list[i]);
866 			rport = fc_find_rport_by_wwpn(shost, wwpn);
867 			if (rport &&
868 			    (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
869 			     rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
870 				if (rport == attach_rport)
871 					continue;
872 				fc_cn_stats_update(event_type,
873 						   &rport->fpin_stats);
874 			}
875 		}
876 	}
877 }
878 
879 /*
880  * fc_fpin_congn_stats_update - routine to update Congestion
881  * event statistics.
882  * @shost:		host the FPIN was received on
883  * @tlv:		pointer to congestion descriptor
884  *
885  */
886 static void
fc_fpin_congn_stats_update(struct Scsi_Host * shost,struct fc_tlv_desc * tlv)887 fc_fpin_congn_stats_update(struct Scsi_Host *shost,
888 			   struct fc_tlv_desc *tlv)
889 {
890 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
891 	struct fc_fn_congn_desc *congn = (struct fc_fn_congn_desc *)tlv;
892 
893 	fc_cn_stats_update(be16_to_cpu(congn->event_type),
894 			   &fc_host->fpin_stats);
895 }
896 
897 /**
898  * fc_host_fpin_rcv - routine to process a received FPIN.
899  * @shost:		host the FPIN was received on
900  * @fpin_len:		length of FPIN payload, in bytes
901  * @fpin_buf:		pointer to FPIN payload
902  *
903  * Notes:
904  *	This routine assumes no locks are held on entry.
905  */
906 void
fc_host_fpin_rcv(struct Scsi_Host * shost,u32 fpin_len,char * fpin_buf)907 fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf)
908 {
909 	struct fc_els_fpin *fpin = (struct fc_els_fpin *)fpin_buf;
910 	struct fc_tlv_desc *tlv;
911 	u32 desc_cnt = 0, bytes_remain;
912 	u32 dtag;
913 
914 	/* Update Statistics */
915 	tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
916 	bytes_remain = fpin_len - offsetof(struct fc_els_fpin, fpin_desc);
917 	bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
918 
919 	while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
920 	       bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
921 		dtag = be32_to_cpu(tlv->desc_tag);
922 		switch (dtag) {
923 		case ELS_DTAG_LNK_INTEGRITY:
924 			fc_fpin_li_stats_update(shost, tlv);
925 			break;
926 		case ELS_DTAG_DELIVERY:
927 			fc_fpin_delivery_stats_update(shost, tlv);
928 			break;
929 		case ELS_DTAG_PEER_CONGEST:
930 			fc_fpin_peer_congn_stats_update(shost, tlv);
931 			break;
932 		case ELS_DTAG_CONGESTION:
933 			fc_fpin_congn_stats_update(shost, tlv);
934 		}
935 
936 		desc_cnt++;
937 		bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
938 		tlv = fc_tlv_next_desc(tlv);
939 	}
940 
941 	fc_host_post_fc_event(shost, fc_get_event_number(),
942 				FCH_EVT_LINK_FPIN, fpin_len, fpin_buf, 0);
943 }
944 EXPORT_SYMBOL(fc_host_fpin_rcv);
945 
946 
fc_transport_init(void)947 static __init int fc_transport_init(void)
948 {
949 	int error;
950 
951 	atomic_set(&fc_event_seq, 0);
952 
953 	error = transport_class_register(&fc_host_class);
954 	if (error)
955 		return error;
956 	error = transport_class_register(&fc_vport_class);
957 	if (error)
958 		goto unreg_host_class;
959 	error = transport_class_register(&fc_rport_class);
960 	if (error)
961 		goto unreg_vport_class;
962 	error = transport_class_register(&fc_transport_class);
963 	if (error)
964 		goto unreg_rport_class;
965 	return 0;
966 
967 unreg_rport_class:
968 	transport_class_unregister(&fc_rport_class);
969 unreg_vport_class:
970 	transport_class_unregister(&fc_vport_class);
971 unreg_host_class:
972 	transport_class_unregister(&fc_host_class);
973 	return error;
974 }
975 
fc_transport_exit(void)976 static void __exit fc_transport_exit(void)
977 {
978 	transport_class_unregister(&fc_transport_class);
979 	transport_class_unregister(&fc_rport_class);
980 	transport_class_unregister(&fc_host_class);
981 	transport_class_unregister(&fc_vport_class);
982 }
983 
984 /*
985  * FC Remote Port Attribute Management
986  */
987 
988 #define fc_rport_show_function(field, format_string, sz, cast)		\
989 static ssize_t								\
990 show_fc_rport_##field (struct device *dev, 				\
991 		       struct device_attribute *attr, char *buf)	\
992 {									\
993 	struct fc_rport *rport = transport_class_to_rport(dev);		\
994 	struct Scsi_Host *shost = rport_to_shost(rport);		\
995 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
996 	if ((i->f->get_rport_##field) &&				\
997 	    !((rport->port_state == FC_PORTSTATE_BLOCKED) ||		\
998 	      (rport->port_state == FC_PORTSTATE_DELETED) ||		\
999 	      (rport->port_state == FC_PORTSTATE_NOTPRESENT)))		\
1000 		i->f->get_rport_##field(rport);				\
1001 	return snprintf(buf, sz, format_string, cast rport->field); 	\
1002 }
1003 
1004 #define fc_rport_store_function(field)					\
1005 static ssize_t								\
1006 store_fc_rport_##field(struct device *dev,				\
1007 		       struct device_attribute *attr,			\
1008 		       const char *buf,	size_t count)			\
1009 {									\
1010 	int val;							\
1011 	struct fc_rport *rport = transport_class_to_rport(dev);		\
1012 	struct Scsi_Host *shost = rport_to_shost(rport);		\
1013 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1014 	char *cp;							\
1015 	if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||		\
1016 	    (rport->port_state == FC_PORTSTATE_DELETED) ||		\
1017 	    (rport->port_state == FC_PORTSTATE_NOTPRESENT))		\
1018 		return -EBUSY;						\
1019 	val = simple_strtoul(buf, &cp, 0);				\
1020 	if (*cp && (*cp != '\n'))					\
1021 		return -EINVAL;						\
1022 	i->f->set_rport_##field(rport, val);				\
1023 	return count;							\
1024 }
1025 
1026 #define fc_rport_rd_attr(field, format_string, sz)			\
1027 	fc_rport_show_function(field, format_string, sz, )		\
1028 static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
1029 			 show_fc_rport_##field, NULL)
1030 
1031 #define fc_rport_rd_attr_cast(field, format_string, sz, cast)		\
1032 	fc_rport_show_function(field, format_string, sz, (cast))	\
1033 static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
1034 			  show_fc_rport_##field, NULL)
1035 
1036 #define fc_rport_rw_attr(field, format_string, sz)			\
1037 	fc_rport_show_function(field, format_string, sz, )		\
1038 	fc_rport_store_function(field)					\
1039 static FC_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR,		\
1040 			show_fc_rport_##field,				\
1041 			store_fc_rport_##field)
1042 
1043 
1044 #define fc_private_rport_show_function(field, format_string, sz, cast)	\
1045 static ssize_t								\
1046 show_fc_rport_##field (struct device *dev, 				\
1047 		       struct device_attribute *attr, char *buf)	\
1048 {									\
1049 	struct fc_rport *rport = transport_class_to_rport(dev);		\
1050 	return snprintf(buf, sz, format_string, cast rport->field); 	\
1051 }
1052 
1053 #define fc_private_rport_rd_attr(field, format_string, sz)		\
1054 	fc_private_rport_show_function(field, format_string, sz, )	\
1055 static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
1056 			 show_fc_rport_##field, NULL)
1057 
1058 #define fc_private_rport_rd_attr_cast(field, format_string, sz, cast)	\
1059 	fc_private_rport_show_function(field, format_string, sz, (cast)) \
1060 static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
1061 			  show_fc_rport_##field, NULL)
1062 
1063 
1064 #define fc_private_rport_rd_enum_attr(title, maxlen)			\
1065 static ssize_t								\
1066 show_fc_rport_##title (struct device *dev,				\
1067 		       struct device_attribute *attr, char *buf)	\
1068 {									\
1069 	struct fc_rport *rport = transport_class_to_rport(dev);		\
1070 	const char *name;						\
1071 	name = get_fc_##title##_name(rport->title);			\
1072 	if (!name)							\
1073 		return -EINVAL;						\
1074 	return snprintf(buf, maxlen, "%s\n", name);			\
1075 }									\
1076 static FC_DEVICE_ATTR(rport, title, S_IRUGO,			\
1077 			show_fc_rport_##title, NULL)
1078 
1079 
1080 #define SETUP_RPORT_ATTRIBUTE_RD(field)					\
1081 	i->private_rport_attrs[count] = device_attr_rport_##field; \
1082 	i->private_rport_attrs[count].attr.mode = S_IRUGO;		\
1083 	i->private_rport_attrs[count].store = NULL;			\
1084 	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
1085 	if (i->f->show_rport_##field)					\
1086 		count++
1087 
1088 #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(field)				\
1089 	i->private_rport_attrs[count] = device_attr_rport_##field; \
1090 	i->private_rport_attrs[count].attr.mode = S_IRUGO;		\
1091 	i->private_rport_attrs[count].store = NULL;			\
1092 	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
1093 	count++
1094 
1095 #define SETUP_RPORT_ATTRIBUTE_RW(field)					\
1096 	i->private_rport_attrs[count] = device_attr_rport_##field; \
1097 	if (!i->f->set_rport_##field) {					\
1098 		i->private_rport_attrs[count].attr.mode = S_IRUGO;	\
1099 		i->private_rport_attrs[count].store = NULL;		\
1100 	}								\
1101 	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
1102 	if (i->f->show_rport_##field)					\
1103 		count++
1104 
1105 #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field)				\
1106 {									\
1107 	i->private_rport_attrs[count] = device_attr_rport_##field; \
1108 	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
1109 	count++;							\
1110 }
1111 
1112 
1113 /* The FC Transport Remote Port Attributes: */
1114 
1115 /* Fixed Remote Port Attributes */
1116 
1117 fc_private_rport_rd_attr(maxframe_size, "%u bytes\n", 20);
1118 
1119 static ssize_t
show_fc_rport_supported_classes(struct device * dev,struct device_attribute * attr,char * buf)1120 show_fc_rport_supported_classes (struct device *dev,
1121 				 struct device_attribute *attr, char *buf)
1122 {
1123 	struct fc_rport *rport = transport_class_to_rport(dev);
1124 	if (rport->supported_classes == FC_COS_UNSPECIFIED)
1125 		return snprintf(buf, 20, "unspecified\n");
1126 	return get_fc_cos_names(rport->supported_classes, buf);
1127 }
1128 static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO,
1129 		show_fc_rport_supported_classes, NULL);
1130 
1131 /* Dynamic Remote Port Attributes */
1132 
1133 /*
1134  * dev_loss_tmo attribute
1135  */
fc_str_to_dev_loss(const char * buf,unsigned long * val)1136 static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
1137 {
1138 	char *cp;
1139 
1140 	*val = simple_strtoul(buf, &cp, 0);
1141 	if (*cp && (*cp != '\n'))
1142 		return -EINVAL;
1143 	/*
1144 	 * Check for overflow; dev_loss_tmo is u32
1145 	 */
1146 	if (*val > UINT_MAX)
1147 		return -EINVAL;
1148 
1149 	return 0;
1150 }
1151 
fc_rport_set_dev_loss_tmo(struct fc_rport * rport,unsigned long val)1152 static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
1153 				     unsigned long val)
1154 {
1155 	struct Scsi_Host *shost = rport_to_shost(rport);
1156 	struct fc_internal *i = to_fc_internal(shost->transportt);
1157 
1158 	if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
1159 	    (rport->port_state == FC_PORTSTATE_DELETED) ||
1160 	    (rport->port_state == FC_PORTSTATE_NOTPRESENT))
1161 		return -EBUSY;
1162 	/*
1163 	 * Check for overflow; dev_loss_tmo is u32
1164 	 */
1165 	if (val > UINT_MAX)
1166 		return -EINVAL;
1167 
1168 	/*
1169 	 * If fast_io_fail is off we have to cap
1170 	 * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
1171 	 */
1172 	if (rport->fast_io_fail_tmo == -1 &&
1173 	    val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
1174 		return -EINVAL;
1175 
1176 	i->f->set_rport_dev_loss_tmo(rport, val);
1177 	return 0;
1178 }
1179 
1180 fc_rport_show_function(dev_loss_tmo, "%d\n", 20, )
1181 static ssize_t
store_fc_rport_dev_loss_tmo(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1182 store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
1183 			    const char *buf, size_t count)
1184 {
1185 	struct fc_rport *rport = transport_class_to_rport(dev);
1186 	unsigned long val;
1187 	int rc;
1188 
1189 	rc = fc_str_to_dev_loss(buf, &val);
1190 	if (rc)
1191 		return rc;
1192 
1193 	rc = fc_rport_set_dev_loss_tmo(rport, val);
1194 	if (rc)
1195 		return rc;
1196 	return count;
1197 }
1198 static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR,
1199 		show_fc_rport_dev_loss_tmo, store_fc_rport_dev_loss_tmo);
1200 
1201 
1202 /* Private Remote Port Attributes */
1203 
1204 fc_private_rport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1205 fc_private_rport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1206 fc_private_rport_rd_attr(port_id, "0x%06x\n", 20);
1207 
1208 static ssize_t
show_fc_rport_roles(struct device * dev,struct device_attribute * attr,char * buf)1209 show_fc_rport_roles (struct device *dev, struct device_attribute *attr,
1210 		     char *buf)
1211 {
1212 	struct fc_rport *rport = transport_class_to_rport(dev);
1213 
1214 	/* identify any roles that are port_id specific */
1215 	if ((rport->port_id != -1) &&
1216 	    (rport->port_id & FC_WELLKNOWN_PORTID_MASK) ==
1217 					FC_WELLKNOWN_PORTID_MASK) {
1218 		switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) {
1219 		case FC_FPORT_PORTID:
1220 			return snprintf(buf, 30, "Fabric Port\n");
1221 		case FC_FABCTLR_PORTID:
1222 			return snprintf(buf, 30, "Fabric Controller\n");
1223 		case FC_DIRSRVR_PORTID:
1224 			return snprintf(buf, 30, "Directory Server\n");
1225 		case FC_TIMESRVR_PORTID:
1226 			return snprintf(buf, 30, "Time Server\n");
1227 		case FC_MGMTSRVR_PORTID:
1228 			return snprintf(buf, 30, "Management Server\n");
1229 		default:
1230 			return snprintf(buf, 30, "Unknown Fabric Entity\n");
1231 		}
1232 	} else {
1233 		if (rport->roles == FC_PORT_ROLE_UNKNOWN)
1234 			return snprintf(buf, 20, "unknown\n");
1235 		return get_fc_port_roles_names(rport->roles, buf);
1236 	}
1237 }
1238 static FC_DEVICE_ATTR(rport, roles, S_IRUGO,
1239 		show_fc_rport_roles, NULL);
1240 
fc_rport_set_marginal_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1241 static ssize_t fc_rport_set_marginal_state(struct device *dev,
1242 						struct device_attribute *attr,
1243 						const char *buf, size_t count)
1244 {
1245 	struct fc_rport *rport = transport_class_to_rport(dev);
1246 	enum fc_port_state port_state;
1247 	int ret = 0;
1248 
1249 	ret = get_fc_port_state_match(buf, &port_state);
1250 	if (ret)
1251 		return -EINVAL;
1252 	if (port_state == FC_PORTSTATE_MARGINAL) {
1253 		/*
1254 		 * Change the state to Marginal only if the
1255 		 * current rport state is Online
1256 		 * Allow only Online->Marginal
1257 		 */
1258 		if (rport->port_state == FC_PORTSTATE_ONLINE)
1259 			rport->port_state = port_state;
1260 		else
1261 			return -EINVAL;
1262 	} else if (port_state == FC_PORTSTATE_ONLINE) {
1263 		/*
1264 		 * Change the state to Online only if the
1265 		 * current rport state is Marginal
1266 		 * Allow only Marginal->Online
1267 		 */
1268 		if (rport->port_state == FC_PORTSTATE_MARGINAL)
1269 			rport->port_state = port_state;
1270 		else
1271 			return -EINVAL;
1272 	} else
1273 		return -EINVAL;
1274 	return count;
1275 }
1276 
1277 static ssize_t
show_fc_rport_port_state(struct device * dev,struct device_attribute * attr,char * buf)1278 show_fc_rport_port_state(struct device *dev,
1279 				struct device_attribute *attr, char *buf)
1280 {
1281 	const char *name;
1282 	struct fc_rport *rport = transport_class_to_rport(dev);
1283 
1284 	name = get_fc_port_state_name(rport->port_state);
1285 	if (!name)
1286 		return -EINVAL;
1287 
1288 	return snprintf(buf, 20, "%s\n", name);
1289 }
1290 
1291 static FC_DEVICE_ATTR(rport, port_state, 0444 | 0200,
1292 			show_fc_rport_port_state, fc_rport_set_marginal_state);
1293 
1294 fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20);
1295 
1296 /*
1297  * fast_io_fail_tmo attribute
1298  */
1299 static ssize_t
show_fc_rport_fast_io_fail_tmo(struct device * dev,struct device_attribute * attr,char * buf)1300 show_fc_rport_fast_io_fail_tmo (struct device *dev,
1301 				struct device_attribute *attr, char *buf)
1302 {
1303 	struct fc_rport *rport = transport_class_to_rport(dev);
1304 
1305 	if (rport->fast_io_fail_tmo == -1)
1306 		return snprintf(buf, 5, "off\n");
1307 	return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo);
1308 }
1309 
1310 static ssize_t
store_fc_rport_fast_io_fail_tmo(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1311 store_fc_rport_fast_io_fail_tmo(struct device *dev,
1312 				struct device_attribute *attr, const char *buf,
1313 				size_t count)
1314 {
1315 	int val;
1316 	char *cp;
1317 	struct fc_rport *rport = transport_class_to_rport(dev);
1318 
1319 	if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
1320 	    (rport->port_state == FC_PORTSTATE_DELETED) ||
1321 	    (rport->port_state == FC_PORTSTATE_NOTPRESENT))
1322 		return -EBUSY;
1323 	if (strncmp(buf, "off", 3) == 0)
1324 		rport->fast_io_fail_tmo = -1;
1325 	else {
1326 		val = simple_strtoul(buf, &cp, 0);
1327 		if ((*cp && (*cp != '\n')) || (val < 0))
1328 			return -EINVAL;
1329 		/*
1330 		 * Cap fast_io_fail by dev_loss_tmo or
1331 		 * SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
1332 		 */
1333 		if ((val >= rport->dev_loss_tmo) ||
1334 		    (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
1335 			return -EINVAL;
1336 
1337 		rport->fast_io_fail_tmo = val;
1338 	}
1339 	return count;
1340 }
1341 static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
1342 	show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
1343 
1344 #define fc_rport_fpin_statistic(name)					\
1345 static ssize_t fc_rport_fpinstat_##name(struct device *cd,		\
1346 				  struct device_attribute *attr,	\
1347 				  char *buf)				\
1348 {									\
1349 	struct fc_rport *rport = transport_class_to_rport(cd);		\
1350 									\
1351 	return snprintf(buf, 20, "0x%llx\n", rport->fpin_stats.name);	\
1352 }									\
1353 static FC_DEVICE_ATTR(rport, fpin_##name, 0444, fc_rport_fpinstat_##name, NULL)
1354 
1355 fc_rport_fpin_statistic(dn);
1356 fc_rport_fpin_statistic(dn_unknown);
1357 fc_rport_fpin_statistic(dn_timeout);
1358 fc_rport_fpin_statistic(dn_unable_to_route);
1359 fc_rport_fpin_statistic(dn_device_specific);
1360 fc_rport_fpin_statistic(cn);
1361 fc_rport_fpin_statistic(cn_clear);
1362 fc_rport_fpin_statistic(cn_lost_credit);
1363 fc_rport_fpin_statistic(cn_credit_stall);
1364 fc_rport_fpin_statistic(cn_oversubscription);
1365 fc_rport_fpin_statistic(cn_device_specific);
1366 fc_rport_fpin_statistic(li);
1367 fc_rport_fpin_statistic(li_failure_unknown);
1368 fc_rport_fpin_statistic(li_link_failure_count);
1369 fc_rport_fpin_statistic(li_loss_of_sync_count);
1370 fc_rport_fpin_statistic(li_loss_of_signals_count);
1371 fc_rport_fpin_statistic(li_prim_seq_err_count);
1372 fc_rport_fpin_statistic(li_invalid_tx_word_count);
1373 fc_rport_fpin_statistic(li_invalid_crc_count);
1374 fc_rport_fpin_statistic(li_device_specific);
1375 
1376 static struct attribute *fc_rport_statistics_attrs[] = {
1377 	&device_attr_rport_fpin_dn.attr,
1378 	&device_attr_rport_fpin_dn_unknown.attr,
1379 	&device_attr_rport_fpin_dn_timeout.attr,
1380 	&device_attr_rport_fpin_dn_unable_to_route.attr,
1381 	&device_attr_rport_fpin_dn_device_specific.attr,
1382 	&device_attr_rport_fpin_li.attr,
1383 	&device_attr_rport_fpin_li_failure_unknown.attr,
1384 	&device_attr_rport_fpin_li_link_failure_count.attr,
1385 	&device_attr_rport_fpin_li_loss_of_sync_count.attr,
1386 	&device_attr_rport_fpin_li_loss_of_signals_count.attr,
1387 	&device_attr_rport_fpin_li_prim_seq_err_count.attr,
1388 	&device_attr_rport_fpin_li_invalid_tx_word_count.attr,
1389 	&device_attr_rport_fpin_li_invalid_crc_count.attr,
1390 	&device_attr_rport_fpin_li_device_specific.attr,
1391 	&device_attr_rport_fpin_cn.attr,
1392 	&device_attr_rport_fpin_cn_clear.attr,
1393 	&device_attr_rport_fpin_cn_lost_credit.attr,
1394 	&device_attr_rport_fpin_cn_credit_stall.attr,
1395 	&device_attr_rport_fpin_cn_oversubscription.attr,
1396 	&device_attr_rport_fpin_cn_device_specific.attr,
1397 	NULL
1398 };
1399 
1400 static struct attribute_group fc_rport_statistics_group = {
1401 	.name = "statistics",
1402 	.attrs = fc_rport_statistics_attrs,
1403 };
1404 
1405 
1406 /*
1407  * FC SCSI Target Attribute Management
1408  */
1409 
1410 /*
1411  * Note: in the target show function we recognize when the remote
1412  *  port is in the hierarchy and do not allow the driver to get
1413  *  involved in sysfs functions. The driver only gets involved if
1414  *  it's the "old" style that doesn't use rports.
1415  */
1416 #define fc_starget_show_function(field, format_string, sz, cast)	\
1417 static ssize_t								\
1418 show_fc_starget_##field (struct device *dev, 				\
1419 			 struct device_attribute *attr, char *buf)	\
1420 {									\
1421 	struct scsi_target *starget = transport_class_to_starget(dev);	\
1422 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);	\
1423 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1424 	struct fc_rport *rport = starget_to_rport(starget);		\
1425 	if (rport)							\
1426 		fc_starget_##field(starget) = rport->field;		\
1427 	else if (i->f->get_starget_##field)				\
1428 		i->f->get_starget_##field(starget);			\
1429 	return snprintf(buf, sz, format_string, 			\
1430 		cast fc_starget_##field(starget)); 			\
1431 }
1432 
1433 #define fc_starget_rd_attr(field, format_string, sz)			\
1434 	fc_starget_show_function(field, format_string, sz, )		\
1435 static FC_DEVICE_ATTR(starget, field, S_IRUGO,			\
1436 			 show_fc_starget_##field, NULL)
1437 
1438 #define fc_starget_rd_attr_cast(field, format_string, sz, cast)		\
1439 	fc_starget_show_function(field, format_string, sz, (cast))	\
1440 static FC_DEVICE_ATTR(starget, field, S_IRUGO,			\
1441 			  show_fc_starget_##field, NULL)
1442 
1443 #define SETUP_STARGET_ATTRIBUTE_RD(field)				\
1444 	i->private_starget_attrs[count] = device_attr_starget_##field; \
1445 	i->private_starget_attrs[count].attr.mode = S_IRUGO;		\
1446 	i->private_starget_attrs[count].store = NULL;			\
1447 	i->starget_attrs[count] = &i->private_starget_attrs[count];	\
1448 	if (i->f->show_starget_##field)					\
1449 		count++
1450 
1451 #define SETUP_STARGET_ATTRIBUTE_RW(field)				\
1452 	i->private_starget_attrs[count] = device_attr_starget_##field; \
1453 	if (!i->f->set_starget_##field) {				\
1454 		i->private_starget_attrs[count].attr.mode = S_IRUGO;	\
1455 		i->private_starget_attrs[count].store = NULL;		\
1456 	}								\
1457 	i->starget_attrs[count] = &i->private_starget_attrs[count];	\
1458 	if (i->f->show_starget_##field)					\
1459 		count++
1460 
1461 /* The FC Transport SCSI Target Attributes: */
1462 fc_starget_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1463 fc_starget_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1464 fc_starget_rd_attr(port_id, "0x%06x\n", 20);
1465 
1466 
1467 /*
1468  * FC Virtual Port Attribute Management
1469  */
1470 
1471 #define fc_vport_show_function(field, format_string, sz, cast)		\
1472 static ssize_t								\
1473 show_fc_vport_##field (struct device *dev, 				\
1474 		       struct device_attribute *attr, char *buf)	\
1475 {									\
1476 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1477 	struct Scsi_Host *shost = vport_to_shost(vport);		\
1478 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1479 	if ((i->f->get_vport_##field) &&				\
1480 	    !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)))	\
1481 		i->f->get_vport_##field(vport);				\
1482 	return snprintf(buf, sz, format_string, cast vport->field); 	\
1483 }
1484 
1485 #define fc_vport_store_function(field)					\
1486 static ssize_t								\
1487 store_fc_vport_##field(struct device *dev,				\
1488 		       struct device_attribute *attr,			\
1489 		       const char *buf,	size_t count)			\
1490 {									\
1491 	int val;							\
1492 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1493 	struct Scsi_Host *shost = vport_to_shost(vport);		\
1494 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1495 	char *cp;							\
1496 	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))	\
1497 		return -EBUSY;						\
1498 	val = simple_strtoul(buf, &cp, 0);				\
1499 	if (*cp && (*cp != '\n'))					\
1500 		return -EINVAL;						\
1501 	i->f->set_vport_##field(vport, val);				\
1502 	return count;							\
1503 }
1504 
1505 #define fc_vport_store_str_function(field, slen)			\
1506 static ssize_t								\
1507 store_fc_vport_##field(struct device *dev,				\
1508 		       struct device_attribute *attr, 			\
1509 		       const char *buf,	size_t count)			\
1510 {									\
1511 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1512 	struct Scsi_Host *shost = vport_to_shost(vport);		\
1513 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1514 	unsigned int cnt=count;						\
1515 									\
1516 	/* count may include a LF at end of string */			\
1517 	if (buf[cnt-1] == '\n')						\
1518 		cnt--;							\
1519 	if (cnt > ((slen) - 1))						\
1520 		return -EINVAL;						\
1521 	memcpy(vport->field, buf, cnt);					\
1522 	i->f->set_vport_##field(vport);					\
1523 	return count;							\
1524 }
1525 
1526 #define fc_vport_rd_attr(field, format_string, sz)			\
1527 	fc_vport_show_function(field, format_string, sz, )		\
1528 static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1529 			 show_fc_vport_##field, NULL)
1530 
1531 #define fc_vport_rd_attr_cast(field, format_string, sz, cast)		\
1532 	fc_vport_show_function(field, format_string, sz, (cast))	\
1533 static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1534 			  show_fc_vport_##field, NULL)
1535 
1536 #define fc_vport_rw_attr(field, format_string, sz)			\
1537 	fc_vport_show_function(field, format_string, sz, )		\
1538 	fc_vport_store_function(field)					\
1539 static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR,		\
1540 			show_fc_vport_##field,				\
1541 			store_fc_vport_##field)
1542 
1543 #define fc_private_vport_show_function(field, format_string, sz, cast)	\
1544 static ssize_t								\
1545 show_fc_vport_##field (struct device *dev,				\
1546 		       struct device_attribute *attr, char *buf)	\
1547 {									\
1548 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1549 	return snprintf(buf, sz, format_string, cast vport->field); 	\
1550 }
1551 
1552 #define fc_private_vport_store_u32_function(field)			\
1553 static ssize_t								\
1554 store_fc_vport_##field(struct device *dev,				\
1555 		       struct device_attribute *attr,			\
1556 		       const char *buf,	size_t count)			\
1557 {									\
1558 	u32 val;							\
1559 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1560 	char *cp;							\
1561 	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))		\
1562 		return -EBUSY;						\
1563 	val = simple_strtoul(buf, &cp, 0);				\
1564 	if (*cp && (*cp != '\n'))					\
1565 		return -EINVAL;						\
1566 	vport->field = val;						\
1567 	return count;							\
1568 }
1569 
1570 
1571 #define fc_private_vport_rd_attr(field, format_string, sz)		\
1572 	fc_private_vport_show_function(field, format_string, sz, )	\
1573 static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1574 			 show_fc_vport_##field, NULL)
1575 
1576 #define fc_private_vport_rd_attr_cast(field, format_string, sz, cast)	\
1577 	fc_private_vport_show_function(field, format_string, sz, (cast)) \
1578 static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1579 			  show_fc_vport_##field, NULL)
1580 
1581 #define fc_private_vport_rw_u32_attr(field, format_string, sz)		\
1582 	fc_private_vport_show_function(field, format_string, sz, )	\
1583 	fc_private_vport_store_u32_function(field)			\
1584 static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR,		\
1585 			show_fc_vport_##field,				\
1586 			store_fc_vport_##field)
1587 
1588 
1589 #define fc_private_vport_rd_enum_attr(title, maxlen)			\
1590 static ssize_t								\
1591 show_fc_vport_##title (struct device *dev,				\
1592 		       struct device_attribute *attr,			\
1593 		       char *buf)					\
1594 {									\
1595 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1596 	const char *name;						\
1597 	name = get_fc_##title##_name(vport->title);			\
1598 	if (!name)							\
1599 		return -EINVAL;						\
1600 	return snprintf(buf, maxlen, "%s\n", name);			\
1601 }									\
1602 static FC_DEVICE_ATTR(vport, title, S_IRUGO,			\
1603 			show_fc_vport_##title, NULL)
1604 
1605 
1606 #define SETUP_VPORT_ATTRIBUTE_RD(field)					\
1607 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1608 	i->private_vport_attrs[count].attr.mode = S_IRUGO;		\
1609 	i->private_vport_attrs[count].store = NULL;			\
1610 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1611 	if (i->f->get_##field)						\
1612 		count++
1613 	/* NOTE: Above MACRO differs: checks function not show bit */
1614 
1615 #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field)				\
1616 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1617 	i->private_vport_attrs[count].attr.mode = S_IRUGO;		\
1618 	i->private_vport_attrs[count].store = NULL;			\
1619 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1620 	count++
1621 
1622 #define SETUP_VPORT_ATTRIBUTE_WR(field)					\
1623 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1624 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1625 	if (i->f->field)						\
1626 		count++
1627 	/* NOTE: Above MACRO differs: checks function */
1628 
1629 #define SETUP_VPORT_ATTRIBUTE_RW(field)					\
1630 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1631 	if (!i->f->set_vport_##field) {					\
1632 		i->private_vport_attrs[count].attr.mode = S_IRUGO;	\
1633 		i->private_vport_attrs[count].store = NULL;		\
1634 	}								\
1635 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1636 	count++
1637 	/* NOTE: Above MACRO differs: does not check show bit */
1638 
1639 #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field)				\
1640 {									\
1641 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1642 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1643 	count++;							\
1644 }
1645 
1646 
1647 /* The FC Transport Virtual Port Attributes: */
1648 
1649 /* Fixed Virtual Port Attributes */
1650 
1651 /* Dynamic Virtual Port Attributes */
1652 
1653 /* Private Virtual Port Attributes */
1654 
1655 fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN);
1656 fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN);
1657 fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1658 fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1659 
1660 static ssize_t
show_fc_vport_roles(struct device * dev,struct device_attribute * attr,char * buf)1661 show_fc_vport_roles (struct device *dev, struct device_attribute *attr,
1662 		     char *buf)
1663 {
1664 	struct fc_vport *vport = transport_class_to_vport(dev);
1665 
1666 	if (vport->roles == FC_PORT_ROLE_UNKNOWN)
1667 		return snprintf(buf, 20, "unknown\n");
1668 	return get_fc_port_roles_names(vport->roles, buf);
1669 }
1670 static FC_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL);
1671 
1672 fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN);
1673 
1674 fc_private_vport_show_function(symbolic_name, "%s\n",
1675 		FC_VPORT_SYMBOLIC_NAMELEN + 1, )
1676 fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN)
1677 static FC_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR,
1678 		show_fc_vport_symbolic_name, store_fc_vport_symbolic_name);
1679 
1680 static ssize_t
store_fc_vport_delete(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1681 store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
1682 		      const char *buf, size_t count)
1683 {
1684 	struct fc_vport *vport = transport_class_to_vport(dev);
1685 	struct Scsi_Host *shost = vport_to_shost(vport);
1686 	unsigned long flags;
1687 
1688 	spin_lock_irqsave(shost->host_lock, flags);
1689 	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
1690 		spin_unlock_irqrestore(shost->host_lock, flags);
1691 		return -EBUSY;
1692 	}
1693 	vport->flags |= FC_VPORT_DELETING;
1694 	spin_unlock_irqrestore(shost->host_lock, flags);
1695 
1696 	fc_queue_work(shost, &vport->vport_delete_work);
1697 	return count;
1698 }
1699 static FC_DEVICE_ATTR(vport, vport_delete, S_IWUSR,
1700 			NULL, store_fc_vport_delete);
1701 
1702 
1703 /*
1704  * Enable/Disable vport
1705  *  Write "1" to disable, write "0" to enable
1706  */
1707 static ssize_t
store_fc_vport_disable(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1708 store_fc_vport_disable(struct device *dev, struct device_attribute *attr,
1709 		       const char *buf,
1710 			   size_t count)
1711 {
1712 	struct fc_vport *vport = transport_class_to_vport(dev);
1713 	struct Scsi_Host *shost = vport_to_shost(vport);
1714 	struct fc_internal *i = to_fc_internal(shost->transportt);
1715 	int stat;
1716 
1717 	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
1718 		return -EBUSY;
1719 
1720 	if (*buf == '0') {
1721 		if (vport->vport_state != FC_VPORT_DISABLED)
1722 			return -EALREADY;
1723 	} else if (*buf == '1') {
1724 		if (vport->vport_state == FC_VPORT_DISABLED)
1725 			return -EALREADY;
1726 	} else
1727 		return -EINVAL;
1728 
1729 	stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true));
1730 	return stat ? stat : count;
1731 }
1732 static FC_DEVICE_ATTR(vport, vport_disable, S_IWUSR,
1733 			NULL, store_fc_vport_disable);
1734 
1735 
1736 /*
1737  * Host Attribute Management
1738  */
1739 
1740 #define fc_host_show_function(field, format_string, sz, cast)		\
1741 static ssize_t								\
1742 show_fc_host_##field (struct device *dev,				\
1743 		      struct device_attribute *attr, char *buf)		\
1744 {									\
1745 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1746 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1747 	if (i->f->get_host_##field)					\
1748 		i->f->get_host_##field(shost);				\
1749 	return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
1750 }
1751 
1752 #define fc_host_store_function(field)					\
1753 static ssize_t								\
1754 store_fc_host_##field(struct device *dev, 				\
1755 		      struct device_attribute *attr,			\
1756 		      const char *buf,	size_t count)			\
1757 {									\
1758 	int val;							\
1759 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1760 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1761 	char *cp;							\
1762 									\
1763 	val = simple_strtoul(buf, &cp, 0);				\
1764 	if (*cp && (*cp != '\n'))					\
1765 		return -EINVAL;						\
1766 	i->f->set_host_##field(shost, val);				\
1767 	return count;							\
1768 }
1769 
1770 #define fc_host_store_str_function(field, slen)				\
1771 static ssize_t								\
1772 store_fc_host_##field(struct device *dev,				\
1773 		      struct device_attribute *attr,			\
1774 		      const char *buf, size_t count)			\
1775 {									\
1776 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1777 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1778 	unsigned int cnt=count;						\
1779 									\
1780 	/* count may include a LF at end of string */			\
1781 	if (buf[cnt-1] == '\n')						\
1782 		cnt--;							\
1783 	if (cnt > ((slen) - 1))						\
1784 		return -EINVAL;						\
1785 	memcpy(fc_host_##field(shost), buf, cnt);			\
1786 	i->f->set_host_##field(shost);					\
1787 	return count;							\
1788 }
1789 
1790 #define fc_host_rd_attr(field, format_string, sz)			\
1791 	fc_host_show_function(field, format_string, sz, )		\
1792 static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1793 			 show_fc_host_##field, NULL)
1794 
1795 #define fc_host_rd_attr_cast(field, format_string, sz, cast)		\
1796 	fc_host_show_function(field, format_string, sz, (cast))		\
1797 static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1798 			  show_fc_host_##field, NULL)
1799 
1800 #define fc_host_rw_attr(field, format_string, sz)			\
1801 	fc_host_show_function(field, format_string, sz, )		\
1802 	fc_host_store_function(field)					\
1803 static FC_DEVICE_ATTR(host, field, S_IRUGO | S_IWUSR,		\
1804 			show_fc_host_##field,				\
1805 			store_fc_host_##field)
1806 
1807 #define fc_host_rd_enum_attr(title, maxlen)				\
1808 static ssize_t								\
1809 show_fc_host_##title (struct device *dev,				\
1810 		      struct device_attribute *attr, char *buf)		\
1811 {									\
1812 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1813 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1814 	const char *name;						\
1815 	if (i->f->get_host_##title)					\
1816 		i->f->get_host_##title(shost);				\
1817 	name = get_fc_##title##_name(fc_host_##title(shost));		\
1818 	if (!name)							\
1819 		return -EINVAL;						\
1820 	return snprintf(buf, maxlen, "%s\n", name);			\
1821 }									\
1822 static FC_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL)
1823 
1824 #define SETUP_HOST_ATTRIBUTE_RD(field)					\
1825 	i->private_host_attrs[count] = device_attr_host_##field;	\
1826 	i->private_host_attrs[count].attr.mode = S_IRUGO;		\
1827 	i->private_host_attrs[count].store = NULL;			\
1828 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1829 	if (i->f->show_host_##field)					\
1830 		count++
1831 
1832 #define SETUP_HOST_ATTRIBUTE_RD_NS(field)				\
1833 	i->private_host_attrs[count] = device_attr_host_##field;	\
1834 	i->private_host_attrs[count].attr.mode = S_IRUGO;		\
1835 	i->private_host_attrs[count].store = NULL;			\
1836 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1837 	count++
1838 
1839 #define SETUP_HOST_ATTRIBUTE_RW(field)					\
1840 	i->private_host_attrs[count] = device_attr_host_##field;	\
1841 	if (!i->f->set_host_##field) {					\
1842 		i->private_host_attrs[count].attr.mode = S_IRUGO;	\
1843 		i->private_host_attrs[count].store = NULL;		\
1844 	}								\
1845 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1846 	if (i->f->show_host_##field)					\
1847 		count++
1848 
1849 
1850 #define fc_private_host_show_function(field, format_string, sz, cast)	\
1851 static ssize_t								\
1852 show_fc_host_##field (struct device *dev,				\
1853 		      struct device_attribute *attr, char *buf)		\
1854 {									\
1855 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1856 	return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
1857 }
1858 
1859 #define fc_private_host_rd_attr(field, format_string, sz)		\
1860 	fc_private_host_show_function(field, format_string, sz, )	\
1861 static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1862 			 show_fc_host_##field, NULL)
1863 
1864 #define fc_private_host_rd_attr_cast(field, format_string, sz, cast)	\
1865 	fc_private_host_show_function(field, format_string, sz, (cast)) \
1866 static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1867 			  show_fc_host_##field, NULL)
1868 
1869 #define SETUP_PRIVATE_HOST_ATTRIBUTE_RD(field)			\
1870 	i->private_host_attrs[count] = device_attr_host_##field;	\
1871 	i->private_host_attrs[count].attr.mode = S_IRUGO;		\
1872 	i->private_host_attrs[count].store = NULL;			\
1873 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1874 	count++
1875 
1876 #define SETUP_PRIVATE_HOST_ATTRIBUTE_RW(field)			\
1877 {									\
1878 	i->private_host_attrs[count] = device_attr_host_##field;	\
1879 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1880 	count++;							\
1881 }
1882 
1883 
1884 /* Fixed Host Attributes */
1885 
1886 static ssize_t
show_fc_host_supported_classes(struct device * dev,struct device_attribute * attr,char * buf)1887 show_fc_host_supported_classes (struct device *dev,
1888 			        struct device_attribute *attr, char *buf)
1889 {
1890 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1891 
1892 	if (fc_host_supported_classes(shost) == FC_COS_UNSPECIFIED)
1893 		return snprintf(buf, 20, "unspecified\n");
1894 
1895 	return get_fc_cos_names(fc_host_supported_classes(shost), buf);
1896 }
1897 static FC_DEVICE_ATTR(host, supported_classes, S_IRUGO,
1898 		show_fc_host_supported_classes, NULL);
1899 
1900 static ssize_t
show_fc_host_supported_fc4s(struct device * dev,struct device_attribute * attr,char * buf)1901 show_fc_host_supported_fc4s (struct device *dev,
1902 			     struct device_attribute *attr, char *buf)
1903 {
1904 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1905 	return (ssize_t)show_fc_fc4s(buf, fc_host_supported_fc4s(shost));
1906 }
1907 static FC_DEVICE_ATTR(host, supported_fc4s, S_IRUGO,
1908 		show_fc_host_supported_fc4s, NULL);
1909 
1910 static ssize_t
show_fc_host_supported_speeds(struct device * dev,struct device_attribute * attr,char * buf)1911 show_fc_host_supported_speeds (struct device *dev,
1912 			       struct device_attribute *attr, char *buf)
1913 {
1914 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1915 
1916 	if (fc_host_supported_speeds(shost) == FC_PORTSPEED_UNKNOWN)
1917 		return snprintf(buf, 20, "unknown\n");
1918 
1919 	return get_fc_port_speed_names(fc_host_supported_speeds(shost), buf);
1920 }
1921 static FC_DEVICE_ATTR(host, supported_speeds, S_IRUGO,
1922 		show_fc_host_supported_speeds, NULL);
1923 
1924 
1925 fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1926 fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1927 fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
1928 			     unsigned long long);
1929 fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
1930 fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
1931 fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
1932 fc_private_host_rd_attr(manufacturer, "%s\n", FC_SERIAL_NUMBER_SIZE + 1);
1933 fc_private_host_rd_attr(model, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1934 fc_private_host_rd_attr(model_description, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1935 fc_private_host_rd_attr(hardware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1936 fc_private_host_rd_attr(driver_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1937 fc_private_host_rd_attr(firmware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1938 fc_private_host_rd_attr(optionrom_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1939 
1940 
1941 /* Dynamic Host Attributes */
1942 
1943 static ssize_t
show_fc_host_active_fc4s(struct device * dev,struct device_attribute * attr,char * buf)1944 show_fc_host_active_fc4s (struct device *dev,
1945 			  struct device_attribute *attr, char *buf)
1946 {
1947 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1948 	struct fc_internal *i = to_fc_internal(shost->transportt);
1949 
1950 	if (i->f->get_host_active_fc4s)
1951 		i->f->get_host_active_fc4s(shost);
1952 
1953 	return (ssize_t)show_fc_fc4s(buf, fc_host_active_fc4s(shost));
1954 }
1955 static FC_DEVICE_ATTR(host, active_fc4s, S_IRUGO,
1956 		show_fc_host_active_fc4s, NULL);
1957 
1958 static ssize_t
show_fc_host_speed(struct device * dev,struct device_attribute * attr,char * buf)1959 show_fc_host_speed (struct device *dev,
1960 		    struct device_attribute *attr, char *buf)
1961 {
1962 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1963 	struct fc_internal *i = to_fc_internal(shost->transportt);
1964 
1965 	if (i->f->get_host_speed)
1966 		i->f->get_host_speed(shost);
1967 
1968 	if (fc_host_speed(shost) == FC_PORTSPEED_UNKNOWN)
1969 		return snprintf(buf, 20, "unknown\n");
1970 
1971 	return get_fc_port_speed_names(fc_host_speed(shost), buf);
1972 }
1973 static FC_DEVICE_ATTR(host, speed, S_IRUGO,
1974 		show_fc_host_speed, NULL);
1975 
1976 
1977 fc_host_rd_attr(port_id, "0x%06x\n", 20);
1978 fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN);
1979 fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
1980 fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
1981 fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1982 
1983 fc_private_host_show_function(system_hostname, "%s\n",
1984 		FC_SYMBOLIC_NAME_SIZE + 1, )
1985 fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE)
1986 static FC_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR,
1987 		show_fc_host_system_hostname, store_fc_host_system_hostname);
1988 
1989 
1990 /* Private Host Attributes */
1991 
1992 static ssize_t
show_fc_private_host_tgtid_bind_type(struct device * dev,struct device_attribute * attr,char * buf)1993 show_fc_private_host_tgtid_bind_type(struct device *dev,
1994 				     struct device_attribute *attr, char *buf)
1995 {
1996 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1997 	const char *name;
1998 
1999 	name = get_fc_tgtid_bind_type_name(fc_host_tgtid_bind_type(shost));
2000 	if (!name)
2001 		return -EINVAL;
2002 	return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name);
2003 }
2004 
2005 #define get_list_head_entry(pos, head, member) 		\
2006 	pos = list_entry((head)->next, typeof(*pos), member)
2007 
2008 static ssize_t
store_fc_private_host_tgtid_bind_type(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2009 store_fc_private_host_tgtid_bind_type(struct device *dev,
2010 	struct device_attribute *attr, const char *buf, size_t count)
2011 {
2012 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2013 	struct fc_rport *rport;
2014  	enum fc_tgtid_binding_type val;
2015 	unsigned long flags;
2016 
2017 	if (get_fc_tgtid_bind_type_match(buf, &val))
2018 		return -EINVAL;
2019 
2020 	/* if changing bind type, purge all unused consistent bindings */
2021 	if (val != fc_host_tgtid_bind_type(shost)) {
2022 		spin_lock_irqsave(shost->host_lock, flags);
2023 		while (!list_empty(&fc_host_rport_bindings(shost))) {
2024 			get_list_head_entry(rport,
2025 				&fc_host_rport_bindings(shost), peers);
2026 			list_del(&rport->peers);
2027 			rport->port_state = FC_PORTSTATE_DELETED;
2028 			fc_queue_work(shost, &rport->rport_delete_work);
2029 		}
2030 		spin_unlock_irqrestore(shost->host_lock, flags);
2031 	}
2032 
2033 	fc_host_tgtid_bind_type(shost) = val;
2034 	return count;
2035 }
2036 
2037 static FC_DEVICE_ATTR(host, tgtid_bind_type, S_IRUGO | S_IWUSR,
2038 			show_fc_private_host_tgtid_bind_type,
2039 			store_fc_private_host_tgtid_bind_type);
2040 
2041 static ssize_t
store_fc_private_host_issue_lip(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2042 store_fc_private_host_issue_lip(struct device *dev,
2043 	struct device_attribute *attr, const char *buf, size_t count)
2044 {
2045 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2046 	struct fc_internal *i = to_fc_internal(shost->transportt);
2047 	int ret;
2048 
2049 	/* ignore any data value written to the attribute */
2050 	if (i->f->issue_fc_host_lip) {
2051 		ret = i->f->issue_fc_host_lip(shost);
2052 		return ret ? ret: count;
2053 	}
2054 
2055 	return -ENOENT;
2056 }
2057 
2058 static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
2059 			store_fc_private_host_issue_lip);
2060 
2061 static ssize_t
store_fc_private_host_dev_loss_tmo(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2062 store_fc_private_host_dev_loss_tmo(struct device *dev,
2063 				   struct device_attribute *attr,
2064 				   const char *buf, size_t count)
2065 {
2066 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2067 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2068 	struct fc_rport *rport;
2069 	unsigned long val, flags;
2070 	int rc;
2071 
2072 	rc = fc_str_to_dev_loss(buf, &val);
2073 	if (rc)
2074 		return rc;
2075 
2076 	fc_host_dev_loss_tmo(shost) = val;
2077 	spin_lock_irqsave(shost->host_lock, flags);
2078 	list_for_each_entry(rport, &fc_host->rports, peers)
2079 		fc_rport_set_dev_loss_tmo(rport, val);
2080 	spin_unlock_irqrestore(shost->host_lock, flags);
2081 	return count;
2082 }
2083 
2084 fc_private_host_show_function(dev_loss_tmo, "%d\n", 20, );
2085 static FC_DEVICE_ATTR(host, dev_loss_tmo, S_IRUGO | S_IWUSR,
2086 		      show_fc_host_dev_loss_tmo,
2087 		      store_fc_private_host_dev_loss_tmo);
2088 
2089 fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
2090 
2091 /*
2092  * Host Statistics Management
2093  */
2094 
2095 /* Show a given attribute in the statistics group */
2096 static ssize_t
fc_stat_show(const struct device * dev,char * buf,unsigned long offset)2097 fc_stat_show(const struct device *dev, char *buf, unsigned long offset)
2098 {
2099 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2100 	struct fc_internal *i = to_fc_internal(shost->transportt);
2101 	struct fc_host_statistics *stats;
2102 	ssize_t ret = -ENOENT;
2103 
2104 	if (offset > sizeof(struct fc_host_statistics) ||
2105 	    offset % sizeof(u64) != 0)
2106 		WARN_ON(1);
2107 
2108 	if (i->f->get_fc_host_stats) {
2109 		stats = (i->f->get_fc_host_stats)(shost);
2110 		if (stats)
2111 			ret = snprintf(buf, 20, "0x%llx\n",
2112 			      (unsigned long long)*(u64 *)(((u8 *) stats) + offset));
2113 	}
2114 	return ret;
2115 }
2116 
2117 
2118 /* generate a read-only statistics attribute */
2119 #define fc_host_statistic(name)						\
2120 static ssize_t show_fcstat_##name(struct device *cd,			\
2121 				  struct device_attribute *attr,	\
2122 				  char *buf)				\
2123 {									\
2124 	return fc_stat_show(cd, buf, 					\
2125 			    offsetof(struct fc_host_statistics, name));	\
2126 }									\
2127 static FC_DEVICE_ATTR(host, name, S_IRUGO, show_fcstat_##name, NULL)
2128 
2129 fc_host_statistic(seconds_since_last_reset);
2130 fc_host_statistic(tx_frames);
2131 fc_host_statistic(tx_words);
2132 fc_host_statistic(rx_frames);
2133 fc_host_statistic(rx_words);
2134 fc_host_statistic(lip_count);
2135 fc_host_statistic(nos_count);
2136 fc_host_statistic(error_frames);
2137 fc_host_statistic(dumped_frames);
2138 fc_host_statistic(link_failure_count);
2139 fc_host_statistic(loss_of_sync_count);
2140 fc_host_statistic(loss_of_signal_count);
2141 fc_host_statistic(prim_seq_protocol_err_count);
2142 fc_host_statistic(invalid_tx_word_count);
2143 fc_host_statistic(invalid_crc_count);
2144 fc_host_statistic(fcp_input_requests);
2145 fc_host_statistic(fcp_output_requests);
2146 fc_host_statistic(fcp_control_requests);
2147 fc_host_statistic(fcp_input_megabytes);
2148 fc_host_statistic(fcp_output_megabytes);
2149 fc_host_statistic(fcp_packet_alloc_failures);
2150 fc_host_statistic(fcp_packet_aborts);
2151 fc_host_statistic(fcp_frame_alloc_failures);
2152 fc_host_statistic(fc_no_free_exch);
2153 fc_host_statistic(fc_no_free_exch_xid);
2154 fc_host_statistic(fc_xid_not_found);
2155 fc_host_statistic(fc_xid_busy);
2156 fc_host_statistic(fc_seq_not_found);
2157 fc_host_statistic(fc_non_bls_resp);
2158 fc_host_statistic(cn_sig_warn);
2159 fc_host_statistic(cn_sig_alarm);
2160 
2161 
2162 #define fc_host_fpin_statistic(name)					\
2163 static ssize_t fc_host_fpinstat_##name(struct device *cd,		\
2164 				  struct device_attribute *attr,	\
2165 				  char *buf)				\
2166 {									\
2167 	struct Scsi_Host *shost = transport_class_to_shost(cd);		\
2168 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);	\
2169 									\
2170 	return snprintf(buf, 20, "0x%llx\n", fc_host->fpin_stats.name);	\
2171 }									\
2172 static FC_DEVICE_ATTR(host, fpin_##name, 0444, fc_host_fpinstat_##name, NULL)
2173 
2174 fc_host_fpin_statistic(dn);
2175 fc_host_fpin_statistic(dn_unknown);
2176 fc_host_fpin_statistic(dn_timeout);
2177 fc_host_fpin_statistic(dn_unable_to_route);
2178 fc_host_fpin_statistic(dn_device_specific);
2179 fc_host_fpin_statistic(cn);
2180 fc_host_fpin_statistic(cn_clear);
2181 fc_host_fpin_statistic(cn_lost_credit);
2182 fc_host_fpin_statistic(cn_credit_stall);
2183 fc_host_fpin_statistic(cn_oversubscription);
2184 fc_host_fpin_statistic(cn_device_specific);
2185 fc_host_fpin_statistic(li);
2186 fc_host_fpin_statistic(li_failure_unknown);
2187 fc_host_fpin_statistic(li_link_failure_count);
2188 fc_host_fpin_statistic(li_loss_of_sync_count);
2189 fc_host_fpin_statistic(li_loss_of_signals_count);
2190 fc_host_fpin_statistic(li_prim_seq_err_count);
2191 fc_host_fpin_statistic(li_invalid_tx_word_count);
2192 fc_host_fpin_statistic(li_invalid_crc_count);
2193 fc_host_fpin_statistic(li_device_specific);
2194 
2195 static ssize_t
fc_reset_statistics(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2196 fc_reset_statistics(struct device *dev, struct device_attribute *attr,
2197 		    const char *buf, size_t count)
2198 {
2199 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2200 	struct fc_internal *i = to_fc_internal(shost->transportt);
2201 
2202 	/* ignore any data value written to the attribute */
2203 	if (i->f->reset_fc_host_stats) {
2204 		i->f->reset_fc_host_stats(shost);
2205 		return count;
2206 	}
2207 
2208 	return -ENOENT;
2209 }
2210 static FC_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL,
2211 				fc_reset_statistics);
2212 
2213 static struct attribute *fc_statistics_attrs[] = {
2214 	&device_attr_host_seconds_since_last_reset.attr,
2215 	&device_attr_host_tx_frames.attr,
2216 	&device_attr_host_tx_words.attr,
2217 	&device_attr_host_rx_frames.attr,
2218 	&device_attr_host_rx_words.attr,
2219 	&device_attr_host_lip_count.attr,
2220 	&device_attr_host_nos_count.attr,
2221 	&device_attr_host_error_frames.attr,
2222 	&device_attr_host_dumped_frames.attr,
2223 	&device_attr_host_link_failure_count.attr,
2224 	&device_attr_host_loss_of_sync_count.attr,
2225 	&device_attr_host_loss_of_signal_count.attr,
2226 	&device_attr_host_prim_seq_protocol_err_count.attr,
2227 	&device_attr_host_invalid_tx_word_count.attr,
2228 	&device_attr_host_invalid_crc_count.attr,
2229 	&device_attr_host_fcp_input_requests.attr,
2230 	&device_attr_host_fcp_output_requests.attr,
2231 	&device_attr_host_fcp_control_requests.attr,
2232 	&device_attr_host_fcp_input_megabytes.attr,
2233 	&device_attr_host_fcp_output_megabytes.attr,
2234 	&device_attr_host_fcp_packet_alloc_failures.attr,
2235 	&device_attr_host_fcp_packet_aborts.attr,
2236 	&device_attr_host_fcp_frame_alloc_failures.attr,
2237 	&device_attr_host_fc_no_free_exch.attr,
2238 	&device_attr_host_fc_no_free_exch_xid.attr,
2239 	&device_attr_host_fc_xid_not_found.attr,
2240 	&device_attr_host_fc_xid_busy.attr,
2241 	&device_attr_host_fc_seq_not_found.attr,
2242 	&device_attr_host_fc_non_bls_resp.attr,
2243 	&device_attr_host_cn_sig_warn.attr,
2244 	&device_attr_host_cn_sig_alarm.attr,
2245 	&device_attr_host_reset_statistics.attr,
2246 	&device_attr_host_fpin_dn.attr,
2247 	&device_attr_host_fpin_dn_unknown.attr,
2248 	&device_attr_host_fpin_dn_timeout.attr,
2249 	&device_attr_host_fpin_dn_unable_to_route.attr,
2250 	&device_attr_host_fpin_dn_device_specific.attr,
2251 	&device_attr_host_fpin_li.attr,
2252 	&device_attr_host_fpin_li_failure_unknown.attr,
2253 	&device_attr_host_fpin_li_link_failure_count.attr,
2254 	&device_attr_host_fpin_li_loss_of_sync_count.attr,
2255 	&device_attr_host_fpin_li_loss_of_signals_count.attr,
2256 	&device_attr_host_fpin_li_prim_seq_err_count.attr,
2257 	&device_attr_host_fpin_li_invalid_tx_word_count.attr,
2258 	&device_attr_host_fpin_li_invalid_crc_count.attr,
2259 	&device_attr_host_fpin_li_device_specific.attr,
2260 	&device_attr_host_fpin_cn.attr,
2261 	&device_attr_host_fpin_cn_clear.attr,
2262 	&device_attr_host_fpin_cn_lost_credit.attr,
2263 	&device_attr_host_fpin_cn_credit_stall.attr,
2264 	&device_attr_host_fpin_cn_oversubscription.attr,
2265 	&device_attr_host_fpin_cn_device_specific.attr,
2266 	NULL
2267 };
2268 
2269 static struct attribute_group fc_statistics_group = {
2270 	.name = "statistics",
2271 	.attrs = fc_statistics_attrs,
2272 };
2273 
2274 
2275 /* Host Vport Attributes */
2276 
2277 static int
fc_parse_wwn(const char * ns,u64 * nm)2278 fc_parse_wwn(const char *ns, u64 *nm)
2279 {
2280 	unsigned int i, j;
2281 	u8 wwn[8];
2282 
2283 	memset(wwn, 0, sizeof(wwn));
2284 
2285 	/* Validate and store the new name */
2286 	for (i=0, j=0; i < 16; i++) {
2287 		int value;
2288 
2289 		value = hex_to_bin(*ns++);
2290 		if (value >= 0)
2291 			j = (j << 4) | value;
2292 		else
2293 			return -EINVAL;
2294 		if (i % 2) {
2295 			wwn[i/2] = j & 0xff;
2296 			j = 0;
2297 		}
2298 	}
2299 
2300 	*nm = wwn_to_u64(wwn);
2301 
2302 	return 0;
2303 }
2304 
2305 
2306 /*
2307  * "Short-cut" sysfs variable to create a new vport on a FC Host.
2308  * Input is a string of the form "<WWPN>:<WWNN>". Other attributes
2309  * will default to a NPIV-based FCP_Initiator; The WWNs are specified
2310  * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc)
2311  */
2312 static ssize_t
store_fc_host_vport_create(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2313 store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
2314 			   const char *buf, size_t count)
2315 {
2316 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2317 	struct fc_vport_identifiers vid;
2318 	struct fc_vport *vport;
2319 	unsigned int cnt=count;
2320 	int stat;
2321 
2322 	memset(&vid, 0, sizeof(vid));
2323 
2324 	/* count may include a LF at end of string */
2325 	if (buf[cnt-1] == '\n')
2326 		cnt--;
2327 
2328 	/* validate we have enough characters for WWPN */
2329 	if ((cnt != (16+1+16)) || (buf[16] != ':'))
2330 		return -EINVAL;
2331 
2332 	stat = fc_parse_wwn(&buf[0], &vid.port_name);
2333 	if (stat)
2334 		return stat;
2335 
2336 	stat = fc_parse_wwn(&buf[17], &vid.node_name);
2337 	if (stat)
2338 		return stat;
2339 
2340 	vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
2341 	vid.vport_type = FC_PORTTYPE_NPIV;
2342 	/* vid.symbolic_name is already zero/NULL's */
2343 	vid.disable = false;		/* always enabled */
2344 
2345 	/* we only allow support on Channel 0 !!! */
2346 	stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
2347 	return stat ? stat : count;
2348 }
2349 static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
2350 			store_fc_host_vport_create);
2351 
2352 
2353 /*
2354  * "Short-cut" sysfs variable to delete a vport on a FC Host.
2355  * Vport is identified by a string containing "<WWPN>:<WWNN>".
2356  * The WWNs are specified as hex characters, and may *not* contain
2357  * any prefixes (e.g. 0x, x, etc)
2358  */
2359 static ssize_t
store_fc_host_vport_delete(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2360 store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
2361 			   const char *buf, size_t count)
2362 {
2363 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2364 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2365 	struct fc_vport *vport;
2366 	u64 wwpn, wwnn;
2367 	unsigned long flags;
2368 	unsigned int cnt=count;
2369 	int stat, match;
2370 
2371 	/* count may include a LF at end of string */
2372 	if (buf[cnt-1] == '\n')
2373 		cnt--;
2374 
2375 	/* validate we have enough characters for WWPN */
2376 	if ((cnt != (16+1+16)) || (buf[16] != ':'))
2377 		return -EINVAL;
2378 
2379 	stat = fc_parse_wwn(&buf[0], &wwpn);
2380 	if (stat)
2381 		return stat;
2382 
2383 	stat = fc_parse_wwn(&buf[17], &wwnn);
2384 	if (stat)
2385 		return stat;
2386 
2387 	spin_lock_irqsave(shost->host_lock, flags);
2388 	match = 0;
2389 	/* we only allow support on Channel 0 !!! */
2390 	list_for_each_entry(vport, &fc_host->vports, peers) {
2391 		if ((vport->channel == 0) &&
2392 		    (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
2393 			if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
2394 				break;
2395 			vport->flags |= FC_VPORT_DELETING;
2396 			match = 1;
2397 			break;
2398 		}
2399 	}
2400 	spin_unlock_irqrestore(shost->host_lock, flags);
2401 
2402 	if (!match)
2403 		return -ENODEV;
2404 
2405 	stat = fc_vport_terminate(vport);
2406 	return stat ? stat : count;
2407 }
2408 static FC_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL,
2409 			store_fc_host_vport_delete);
2410 
2411 
fc_host_match(struct attribute_container * cont,struct device * dev)2412 static int fc_host_match(struct attribute_container *cont,
2413 			  struct device *dev)
2414 {
2415 	struct Scsi_Host *shost;
2416 	struct fc_internal *i;
2417 
2418 	if (!scsi_is_host_device(dev))
2419 		return 0;
2420 
2421 	shost = dev_to_shost(dev);
2422 	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
2423 	    != &fc_host_class.class)
2424 		return 0;
2425 
2426 	i = to_fc_internal(shost->transportt);
2427 
2428 	return &i->t.host_attrs.ac == cont;
2429 }
2430 
fc_target_match(struct attribute_container * cont,struct device * dev)2431 static int fc_target_match(struct attribute_container *cont,
2432 			    struct device *dev)
2433 {
2434 	struct Scsi_Host *shost;
2435 	struct fc_internal *i;
2436 
2437 	if (!scsi_is_target_device(dev))
2438 		return 0;
2439 
2440 	shost = dev_to_shost(dev->parent);
2441 	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
2442 	    != &fc_host_class.class)
2443 		return 0;
2444 
2445 	i = to_fc_internal(shost->transportt);
2446 
2447 	return &i->t.target_attrs.ac == cont;
2448 }
2449 
fc_rport_dev_release(struct device * dev)2450 static void fc_rport_dev_release(struct device *dev)
2451 {
2452 	struct fc_rport *rport = dev_to_rport(dev);
2453 	put_device(dev->parent);
2454 	kfree(rport);
2455 }
2456 
scsi_is_fc_rport(const struct device * dev)2457 int scsi_is_fc_rport(const struct device *dev)
2458 {
2459 	return dev->release == fc_rport_dev_release;
2460 }
2461 EXPORT_SYMBOL(scsi_is_fc_rport);
2462 
fc_rport_match(struct attribute_container * cont,struct device * dev)2463 static int fc_rport_match(struct attribute_container *cont,
2464 			    struct device *dev)
2465 {
2466 	struct Scsi_Host *shost;
2467 	struct fc_internal *i;
2468 
2469 	if (!scsi_is_fc_rport(dev))
2470 		return 0;
2471 
2472 	shost = dev_to_shost(dev->parent);
2473 	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
2474 	    != &fc_host_class.class)
2475 		return 0;
2476 
2477 	i = to_fc_internal(shost->transportt);
2478 
2479 	return &i->rport_attr_cont.ac == cont;
2480 }
2481 
2482 
fc_vport_dev_release(struct device * dev)2483 static void fc_vport_dev_release(struct device *dev)
2484 {
2485 	struct fc_vport *vport = dev_to_vport(dev);
2486 	put_device(dev->parent);		/* release kobj parent */
2487 	kfree(vport);
2488 }
2489 
scsi_is_fc_vport(const struct device * dev)2490 static int scsi_is_fc_vport(const struct device *dev)
2491 {
2492 	return dev->release == fc_vport_dev_release;
2493 }
2494 
fc_vport_match(struct attribute_container * cont,struct device * dev)2495 static int fc_vport_match(struct attribute_container *cont,
2496 			    struct device *dev)
2497 {
2498 	struct fc_vport *vport;
2499 	struct Scsi_Host *shost;
2500 	struct fc_internal *i;
2501 
2502 	if (!scsi_is_fc_vport(dev))
2503 		return 0;
2504 	vport = dev_to_vport(dev);
2505 
2506 	shost = vport_to_shost(vport);
2507 	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
2508 	    != &fc_host_class.class)
2509 		return 0;
2510 
2511 	i = to_fc_internal(shost->transportt);
2512 	return &i->vport_attr_cont.ac == cont;
2513 }
2514 
2515 
2516 /**
2517  * fc_eh_timed_out - FC Transport I/O timeout intercept handler
2518  * @scmd:	The SCSI command which timed out
2519  *
2520  * This routine protects against error handlers getting invoked while a
2521  * rport is in a blocked state, typically due to a temporarily loss of
2522  * connectivity. If the error handlers are allowed to proceed, requests
2523  * to abort i/o, reset the target, etc will likely fail as there is no way
2524  * to communicate with the device to perform the requested function. These
2525  * failures may result in the midlayer taking the device offline, requiring
2526  * manual intervention to restore operation.
2527  *
2528  * This routine, called whenever an i/o times out, validates the state of
2529  * the underlying rport. If the rport is blocked, it returns
2530  * EH_RESET_TIMER, which will continue to reschedule the timeout.
2531  * Eventually, either the device will return, or devloss_tmo will fire,
2532  * and when the timeout then fires, it will be handled normally.
2533  * If the rport is not blocked, normal error handling continues.
2534  *
2535  * Notes:
2536  *	This routine assumes no locks are held on entry.
2537  */
2538 enum blk_eh_timer_return
fc_eh_timed_out(struct scsi_cmnd * scmd)2539 fc_eh_timed_out(struct scsi_cmnd *scmd)
2540 {
2541 	struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
2542 
2543 	if (rport->port_state == FC_PORTSTATE_BLOCKED)
2544 		return BLK_EH_RESET_TIMER;
2545 
2546 	return BLK_EH_DONE;
2547 }
2548 EXPORT_SYMBOL(fc_eh_timed_out);
2549 
2550 /*
2551  * Called by fc_user_scan to locate an rport on the shost that
2552  * matches the channel and target id, and invoke scsi_scan_target()
2553  * on the rport.
2554  */
2555 static void
fc_user_scan_tgt(struct Scsi_Host * shost,uint channel,uint id,u64 lun)2556 fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
2557 {
2558 	struct fc_rport *rport;
2559 	unsigned long flags;
2560 
2561 	spin_lock_irqsave(shost->host_lock, flags);
2562 
2563 	list_for_each_entry(rport, &fc_host_rports(shost), peers) {
2564 		if (rport->scsi_target_id == -1)
2565 			continue;
2566 
2567 		if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
2568 			(rport->port_state != FC_PORTSTATE_MARGINAL))
2569 			continue;
2570 
2571 		if ((channel == rport->channel) &&
2572 		    (id == rport->scsi_target_id)) {
2573 			spin_unlock_irqrestore(shost->host_lock, flags);
2574 			scsi_scan_target(&rport->dev, channel, id, lun,
2575 					 SCSI_SCAN_MANUAL);
2576 			return;
2577 		}
2578 	}
2579 
2580 	spin_unlock_irqrestore(shost->host_lock, flags);
2581 }
2582 
2583 /*
2584  * Called via sysfs scan routines. Necessary, as the FC transport
2585  * wants to place all target objects below the rport object. So this
2586  * routine must invoke the scsi_scan_target() routine with the rport
2587  * object as the parent.
2588  */
2589 static int
fc_user_scan(struct Scsi_Host * shost,uint channel,uint id,u64 lun)2590 fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
2591 {
2592 	uint chlo, chhi;
2593 	uint tgtlo, tgthi;
2594 
2595 	if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
2596 	    ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
2597 	    ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
2598 		return -EINVAL;
2599 
2600 	if (channel == SCAN_WILD_CARD) {
2601 		chlo = 0;
2602 		chhi = shost->max_channel + 1;
2603 	} else {
2604 		chlo = channel;
2605 		chhi = channel + 1;
2606 	}
2607 
2608 	if (id == SCAN_WILD_CARD) {
2609 		tgtlo = 0;
2610 		tgthi = shost->max_id;
2611 	} else {
2612 		tgtlo = id;
2613 		tgthi = id + 1;
2614 	}
2615 
2616 	for ( ; chlo < chhi; chlo++)
2617 		for ( ; tgtlo < tgthi; tgtlo++)
2618 			fc_user_scan_tgt(shost, chlo, tgtlo, lun);
2619 
2620 	return 0;
2621 }
2622 
2623 struct scsi_transport_template *
fc_attach_transport(struct fc_function_template * ft)2624 fc_attach_transport(struct fc_function_template *ft)
2625 {
2626 	int count;
2627 	struct fc_internal *i = kzalloc(sizeof(struct fc_internal),
2628 					GFP_KERNEL);
2629 
2630 	if (unlikely(!i))
2631 		return NULL;
2632 
2633 	i->t.target_attrs.ac.attrs = &i->starget_attrs[0];
2634 	i->t.target_attrs.ac.class = &fc_transport_class.class;
2635 	i->t.target_attrs.ac.match = fc_target_match;
2636 	i->t.target_size = sizeof(struct fc_starget_attrs);
2637 	transport_container_register(&i->t.target_attrs);
2638 
2639 	i->t.host_attrs.ac.attrs = &i->host_attrs[0];
2640 	i->t.host_attrs.ac.class = &fc_host_class.class;
2641 	i->t.host_attrs.ac.match = fc_host_match;
2642 	i->t.host_size = sizeof(struct fc_host_attrs);
2643 	if (ft->get_fc_host_stats)
2644 		i->t.host_attrs.statistics = &fc_statistics_group;
2645 	transport_container_register(&i->t.host_attrs);
2646 
2647 	i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
2648 	i->rport_attr_cont.ac.class = &fc_rport_class.class;
2649 	i->rport_attr_cont.ac.match = fc_rport_match;
2650 	i->rport_attr_cont.statistics = &fc_rport_statistics_group;
2651 	transport_container_register(&i->rport_attr_cont);
2652 
2653 	i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
2654 	i->vport_attr_cont.ac.class = &fc_vport_class.class;
2655 	i->vport_attr_cont.ac.match = fc_vport_match;
2656 	transport_container_register(&i->vport_attr_cont);
2657 
2658 	i->f = ft;
2659 
2660 	/* Transport uses the shost workq for scsi scanning */
2661 	i->t.create_work_queue = 1;
2662 
2663 	i->t.user_scan = fc_user_scan;
2664 
2665 	/*
2666 	 * Setup SCSI Target Attributes.
2667 	 */
2668 	count = 0;
2669 	SETUP_STARGET_ATTRIBUTE_RD(node_name);
2670 	SETUP_STARGET_ATTRIBUTE_RD(port_name);
2671 	SETUP_STARGET_ATTRIBUTE_RD(port_id);
2672 
2673 	BUG_ON(count > FC_STARGET_NUM_ATTRS);
2674 
2675 	i->starget_attrs[count] = NULL;
2676 
2677 
2678 	/*
2679 	 * Setup SCSI Host Attributes.
2680 	 */
2681 	count=0;
2682 	SETUP_HOST_ATTRIBUTE_RD(node_name);
2683 	SETUP_HOST_ATTRIBUTE_RD(port_name);
2684 	SETUP_HOST_ATTRIBUTE_RD(permanent_port_name);
2685 	SETUP_HOST_ATTRIBUTE_RD(supported_classes);
2686 	SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
2687 	SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
2688 	SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
2689 	if (ft->vport_create) {
2690 		SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports);
2691 		SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
2692 	}
2693 	SETUP_HOST_ATTRIBUTE_RD(serial_number);
2694 	SETUP_HOST_ATTRIBUTE_RD(manufacturer);
2695 	SETUP_HOST_ATTRIBUTE_RD(model);
2696 	SETUP_HOST_ATTRIBUTE_RD(model_description);
2697 	SETUP_HOST_ATTRIBUTE_RD(hardware_version);
2698 	SETUP_HOST_ATTRIBUTE_RD(driver_version);
2699 	SETUP_HOST_ATTRIBUTE_RD(firmware_version);
2700 	SETUP_HOST_ATTRIBUTE_RD(optionrom_version);
2701 
2702 	SETUP_HOST_ATTRIBUTE_RD(port_id);
2703 	SETUP_HOST_ATTRIBUTE_RD(port_type);
2704 	SETUP_HOST_ATTRIBUTE_RD(port_state);
2705 	SETUP_HOST_ATTRIBUTE_RD(active_fc4s);
2706 	SETUP_HOST_ATTRIBUTE_RD(speed);
2707 	SETUP_HOST_ATTRIBUTE_RD(fabric_name);
2708 	SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
2709 	SETUP_HOST_ATTRIBUTE_RW(system_hostname);
2710 
2711 	/* Transport-managed attributes */
2712 	SETUP_PRIVATE_HOST_ATTRIBUTE_RW(dev_loss_tmo);
2713 	SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
2714 	if (ft->issue_fc_host_lip)
2715 		SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
2716 	if (ft->vport_create)
2717 		SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create);
2718 	if (ft->vport_delete)
2719 		SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete);
2720 
2721 	BUG_ON(count > FC_HOST_NUM_ATTRS);
2722 
2723 	i->host_attrs[count] = NULL;
2724 
2725 	/*
2726 	 * Setup Remote Port Attributes.
2727 	 */
2728 	count=0;
2729 	SETUP_RPORT_ATTRIBUTE_RD(maxframe_size);
2730 	SETUP_RPORT_ATTRIBUTE_RD(supported_classes);
2731 	SETUP_RPORT_ATTRIBUTE_RW(dev_loss_tmo);
2732 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(node_name);
2733 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name);
2734 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id);
2735 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
2736 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(port_state);
2737 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
2738 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
2739 
2740 	BUG_ON(count > FC_RPORT_NUM_ATTRS);
2741 
2742 	i->rport_attrs[count] = NULL;
2743 
2744 	/*
2745 	 * Setup Virtual Port Attributes.
2746 	 */
2747 	count=0;
2748 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state);
2749 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state);
2750 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name);
2751 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name);
2752 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles);
2753 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type);
2754 	SETUP_VPORT_ATTRIBUTE_RW(symbolic_name);
2755 	SETUP_VPORT_ATTRIBUTE_WR(vport_delete);
2756 	SETUP_VPORT_ATTRIBUTE_WR(vport_disable);
2757 
2758 	BUG_ON(count > FC_VPORT_NUM_ATTRS);
2759 
2760 	i->vport_attrs[count] = NULL;
2761 
2762 	return &i->t;
2763 }
2764 EXPORT_SYMBOL(fc_attach_transport);
2765 
fc_release_transport(struct scsi_transport_template * t)2766 void fc_release_transport(struct scsi_transport_template *t)
2767 {
2768 	struct fc_internal *i = to_fc_internal(t);
2769 
2770 	transport_container_unregister(&i->t.target_attrs);
2771 	transport_container_unregister(&i->t.host_attrs);
2772 	transport_container_unregister(&i->rport_attr_cont);
2773 	transport_container_unregister(&i->vport_attr_cont);
2774 
2775 	kfree(i);
2776 }
2777 EXPORT_SYMBOL(fc_release_transport);
2778 
2779 /**
2780  * fc_queue_work - Queue work to the fc_host workqueue.
2781  * @shost:	Pointer to Scsi_Host bound to fc_host.
2782  * @work:	Work to queue for execution.
2783  *
2784  * Return value:
2785  * 	1 - work queued for execution
2786  *	0 - work is already queued
2787  *	-EINVAL - work queue doesn't exist
2788  */
2789 static int
fc_queue_work(struct Scsi_Host * shost,struct work_struct * work)2790 fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
2791 {
2792 	if (unlikely(!fc_host_work_q(shost))) {
2793 		printk(KERN_ERR
2794 			"ERROR: FC host '%s' attempted to queue work, "
2795 			"when no workqueue created.\n", shost->hostt->name);
2796 		dump_stack();
2797 
2798 		return -EINVAL;
2799 	}
2800 
2801 	return queue_work(fc_host_work_q(shost), work);
2802 }
2803 
2804 /**
2805  * fc_flush_work - Flush a fc_host's workqueue.
2806  * @shost:	Pointer to Scsi_Host bound to fc_host.
2807  */
2808 static void
fc_flush_work(struct Scsi_Host * shost)2809 fc_flush_work(struct Scsi_Host *shost)
2810 {
2811 	if (!fc_host_work_q(shost)) {
2812 		printk(KERN_ERR
2813 			"ERROR: FC host '%s' attempted to flush work, "
2814 			"when no workqueue created.\n", shost->hostt->name);
2815 		dump_stack();
2816 		return;
2817 	}
2818 
2819 	flush_workqueue(fc_host_work_q(shost));
2820 }
2821 
2822 /**
2823  * fc_queue_devloss_work - Schedule work for the fc_host devloss workqueue.
2824  * @shost:	Pointer to Scsi_Host bound to fc_host.
2825  * @work:	Work to queue for execution.
2826  * @delay:	jiffies to delay the work queuing
2827  *
2828  * Return value:
2829  * 	1 on success / 0 already queued / < 0 for error
2830  */
2831 static int
fc_queue_devloss_work(struct Scsi_Host * shost,struct delayed_work * work,unsigned long delay)2832 fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
2833 				unsigned long delay)
2834 {
2835 	if (unlikely(!fc_host_devloss_work_q(shost))) {
2836 		printk(KERN_ERR
2837 			"ERROR: FC host '%s' attempted to queue work, "
2838 			"when no workqueue created.\n", shost->hostt->name);
2839 		dump_stack();
2840 
2841 		return -EINVAL;
2842 	}
2843 
2844 	return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
2845 }
2846 
2847 /**
2848  * fc_flush_devloss - Flush a fc_host's devloss workqueue.
2849  * @shost:	Pointer to Scsi_Host bound to fc_host.
2850  */
2851 static void
fc_flush_devloss(struct Scsi_Host * shost)2852 fc_flush_devloss(struct Scsi_Host *shost)
2853 {
2854 	if (!fc_host_devloss_work_q(shost)) {
2855 		printk(KERN_ERR
2856 			"ERROR: FC host '%s' attempted to flush work, "
2857 			"when no workqueue created.\n", shost->hostt->name);
2858 		dump_stack();
2859 		return;
2860 	}
2861 
2862 	flush_workqueue(fc_host_devloss_work_q(shost));
2863 }
2864 
2865 
2866 /**
2867  * fc_remove_host - called to terminate any fc_transport-related elements for a scsi host.
2868  * @shost:	Which &Scsi_Host
2869  *
2870  * This routine is expected to be called immediately preceding the
2871  * a driver's call to scsi_remove_host().
2872  *
2873  * WARNING: A driver utilizing the fc_transport, which fails to call
2874  *   this routine prior to scsi_remove_host(), will leave dangling
2875  *   objects in /sys/class/fc_remote_ports. Access to any of these
2876  *   objects can result in a system crash !!!
2877  *
2878  * Notes:
2879  *	This routine assumes no locks are held on entry.
2880  */
2881 void
fc_remove_host(struct Scsi_Host * shost)2882 fc_remove_host(struct Scsi_Host *shost)
2883 {
2884 	struct fc_vport *vport = NULL, *next_vport = NULL;
2885 	struct fc_rport *rport = NULL, *next_rport = NULL;
2886 	struct workqueue_struct *work_q;
2887 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2888 	unsigned long flags;
2889 
2890 	spin_lock_irqsave(shost->host_lock, flags);
2891 
2892 	/* Remove any vports */
2893 	list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
2894 		vport->flags |= FC_VPORT_DELETING;
2895 		fc_queue_work(shost, &vport->vport_delete_work);
2896 	}
2897 
2898 	/* Remove any remote ports */
2899 	list_for_each_entry_safe(rport, next_rport,
2900 			&fc_host->rports, peers) {
2901 		list_del(&rport->peers);
2902 		rport->port_state = FC_PORTSTATE_DELETED;
2903 		fc_queue_work(shost, &rport->rport_delete_work);
2904 	}
2905 
2906 	list_for_each_entry_safe(rport, next_rport,
2907 			&fc_host->rport_bindings, peers) {
2908 		list_del(&rport->peers);
2909 		rport->port_state = FC_PORTSTATE_DELETED;
2910 		fc_queue_work(shost, &rport->rport_delete_work);
2911 	}
2912 
2913 	spin_unlock_irqrestore(shost->host_lock, flags);
2914 
2915 	/* flush all scan work items */
2916 	scsi_flush_work(shost);
2917 
2918 	/* flush all stgt delete, and rport delete work items, then kill it  */
2919 	if (fc_host->work_q) {
2920 		work_q = fc_host->work_q;
2921 		fc_host->work_q = NULL;
2922 		destroy_workqueue(work_q);
2923 	}
2924 
2925 	/* flush all devloss work items, then kill it  */
2926 	if (fc_host->devloss_work_q) {
2927 		work_q = fc_host->devloss_work_q;
2928 		fc_host->devloss_work_q = NULL;
2929 		destroy_workqueue(work_q);
2930 	}
2931 }
2932 EXPORT_SYMBOL(fc_remove_host);
2933 
fc_terminate_rport_io(struct fc_rport * rport)2934 static void fc_terminate_rport_io(struct fc_rport *rport)
2935 {
2936 	struct Scsi_Host *shost = rport_to_shost(rport);
2937 	struct fc_internal *i = to_fc_internal(shost->transportt);
2938 
2939 	/* Involve the LLDD if possible to terminate all io on the rport. */
2940 	if (i->f->terminate_rport_io)
2941 		i->f->terminate_rport_io(rport);
2942 
2943 	/*
2944 	 * Must unblock to flush queued IO. scsi-ml will fail incoming reqs.
2945 	 */
2946 	scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
2947 }
2948 
2949 /**
2950  * fc_starget_delete - called to delete the scsi descendants of an rport
2951  * @work:	remote port to be operated on.
2952  *
2953  * Deletes target and all sdevs.
2954  */
2955 static void
fc_starget_delete(struct work_struct * work)2956 fc_starget_delete(struct work_struct *work)
2957 {
2958 	struct fc_rport *rport =
2959 		container_of(work, struct fc_rport, stgt_delete_work);
2960 
2961 	fc_terminate_rport_io(rport);
2962 	scsi_remove_target(&rport->dev);
2963 }
2964 
2965 
2966 /**
2967  * fc_rport_final_delete - finish rport termination and delete it.
2968  * @work:	remote port to be deleted.
2969  */
2970 static void
fc_rport_final_delete(struct work_struct * work)2971 fc_rport_final_delete(struct work_struct *work)
2972 {
2973 	struct fc_rport *rport =
2974 		container_of(work, struct fc_rport, rport_delete_work);
2975 	struct device *dev = &rport->dev;
2976 	struct Scsi_Host *shost = rport_to_shost(rport);
2977 	struct fc_internal *i = to_fc_internal(shost->transportt);
2978 	unsigned long flags;
2979 	int do_callback = 0;
2980 
2981 	fc_terminate_rport_io(rport);
2982 
2983 	/*
2984 	 * if a scan is pending, flush the SCSI Host work_q so that
2985 	 * that we can reclaim the rport scan work element.
2986 	 */
2987 	if (rport->flags & FC_RPORT_SCAN_PENDING)
2988 		scsi_flush_work(shost);
2989 
2990 	/*
2991 	 * Cancel any outstanding timers. These should really exist
2992 	 * only when rmmod'ing the LLDD and we're asking for
2993 	 * immediate termination of the rports
2994 	 */
2995 	spin_lock_irqsave(shost->host_lock, flags);
2996 	if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
2997 		spin_unlock_irqrestore(shost->host_lock, flags);
2998 		if (!cancel_delayed_work(&rport->fail_io_work))
2999 			fc_flush_devloss(shost);
3000 		if (!cancel_delayed_work(&rport->dev_loss_work))
3001 			fc_flush_devloss(shost);
3002 		cancel_work_sync(&rport->scan_work);
3003 		spin_lock_irqsave(shost->host_lock, flags);
3004 		rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
3005 	}
3006 	spin_unlock_irqrestore(shost->host_lock, flags);
3007 
3008 	/* Delete SCSI target and sdevs */
3009 	if (rport->scsi_target_id != -1)
3010 		fc_starget_delete(&rport->stgt_delete_work);
3011 
3012 	/*
3013 	 * Notify the driver that the rport is now dead. The LLDD will
3014 	 * also guarantee that any communication to the rport is terminated
3015 	 *
3016 	 * Avoid this call if we already called it when we preserved the
3017 	 * rport for the binding.
3018 	 */
3019 	spin_lock_irqsave(shost->host_lock, flags);
3020 	if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
3021 	    (i->f->dev_loss_tmo_callbk)) {
3022 		rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
3023 		do_callback = 1;
3024 	}
3025 	spin_unlock_irqrestore(shost->host_lock, flags);
3026 
3027 	if (do_callback)
3028 		i->f->dev_loss_tmo_callbk(rport);
3029 
3030 	fc_bsg_remove(rport->rqst_q);
3031 
3032 	transport_remove_device(dev);
3033 	device_del(dev);
3034 	transport_destroy_device(dev);
3035 	scsi_host_put(shost);			/* for fc_host->rport list */
3036 	put_device(dev);			/* for self-reference */
3037 }
3038 
3039 
3040 /**
3041  * fc_remote_port_create - allocates and creates a remote FC port.
3042  * @shost:	scsi host the remote port is connected to.
3043  * @channel:	Channel on shost port connected to.
3044  * @ids:	The world wide names, fc address, and FC4 port
3045  *		roles for the remote port.
3046  *
3047  * Allocates and creates the remoter port structure, including the
3048  * class and sysfs creation.
3049  *
3050  * Notes:
3051  *	This routine assumes no locks are held on entry.
3052  */
3053 static struct fc_rport *
fc_remote_port_create(struct Scsi_Host * shost,int channel,struct fc_rport_identifiers * ids)3054 fc_remote_port_create(struct Scsi_Host *shost, int channel,
3055 		      struct fc_rport_identifiers  *ids)
3056 {
3057 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3058 	struct fc_internal *fci = to_fc_internal(shost->transportt);
3059 	struct fc_rport *rport;
3060 	struct device *dev;
3061 	unsigned long flags;
3062 	int error;
3063 	size_t size;
3064 
3065 	size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size);
3066 	rport = kzalloc(size, GFP_KERNEL);
3067 	if (unlikely(!rport)) {
3068 		printk(KERN_ERR "%s: allocation failure\n", __func__);
3069 		return NULL;
3070 	}
3071 
3072 	rport->maxframe_size = -1;
3073 	rport->supported_classes = FC_COS_UNSPECIFIED;
3074 	rport->dev_loss_tmo = fc_host->dev_loss_tmo;
3075 	memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name));
3076 	memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name));
3077 	rport->port_id = ids->port_id;
3078 	rport->roles = ids->roles;
3079 	rport->port_state = FC_PORTSTATE_ONLINE;
3080 	if (fci->f->dd_fcrport_size)
3081 		rport->dd_data = &rport[1];
3082 	rport->channel = channel;
3083 	rport->fast_io_fail_tmo = -1;
3084 
3085 	INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
3086 	INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
3087 	INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
3088 	INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
3089 	INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
3090 
3091 	spin_lock_irqsave(shost->host_lock, flags);
3092 
3093 	rport->number = fc_host->next_rport_number++;
3094 	if ((rport->roles & FC_PORT_ROLE_FCP_TARGET) ||
3095 	    (rport->roles & FC_PORT_ROLE_FCP_DUMMY_INITIATOR))
3096 		rport->scsi_target_id = fc_host->next_target_id++;
3097 	else
3098 		rport->scsi_target_id = -1;
3099 	list_add_tail(&rport->peers, &fc_host->rports);
3100 	scsi_host_get(shost);			/* for fc_host->rport list */
3101 
3102 	spin_unlock_irqrestore(shost->host_lock, flags);
3103 
3104 	dev = &rport->dev;
3105 	device_initialize(dev);			/* takes self reference */
3106 	dev->parent = get_device(&shost->shost_gendev); /* parent reference */
3107 	dev->release = fc_rport_dev_release;
3108 	dev_set_name(dev, "rport-%d:%d-%d",
3109 		     shost->host_no, channel, rport->number);
3110 	transport_setup_device(dev);
3111 
3112 	error = device_add(dev);
3113 	if (error) {
3114 		printk(KERN_ERR "FC Remote Port device_add failed\n");
3115 		goto delete_rport;
3116 	}
3117 	transport_add_device(dev);
3118 	transport_configure_device(dev);
3119 
3120 	fc_bsg_rportadd(shost, rport);
3121 	/* ignore any bsg add error - we just can't do sgio */
3122 
3123 	if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
3124 		/* initiate a scan of the target */
3125 		rport->flags |= FC_RPORT_SCAN_PENDING;
3126 		scsi_queue_work(shost, &rport->scan_work);
3127 	}
3128 
3129 	return rport;
3130 
3131 delete_rport:
3132 	transport_destroy_device(dev);
3133 	spin_lock_irqsave(shost->host_lock, flags);
3134 	list_del(&rport->peers);
3135 	scsi_host_put(shost);			/* for fc_host->rport list */
3136 	spin_unlock_irqrestore(shost->host_lock, flags);
3137 	put_device(dev->parent);
3138 	kfree(rport);
3139 	return NULL;
3140 }
3141 
3142 /**
3143  * fc_remote_port_add - notify fc transport of the existence of a remote FC port.
3144  * @shost:	scsi host the remote port is connected to.
3145  * @channel:	Channel on shost port connected to.
3146  * @ids:	The world wide names, fc address, and FC4 port
3147  *		roles for the remote port.
3148  *
3149  * The LLDD calls this routine to notify the transport of the existence
3150  * of a remote port. The LLDD provides the unique identifiers (wwpn,wwn)
3151  * of the port, it's FC address (port_id), and the FC4 roles that are
3152  * active for the port.
3153  *
3154  * For ports that are FCP targets (aka scsi targets), the FC transport
3155  * maintains consistent target id bindings on behalf of the LLDD.
3156  * A consistent target id binding is an assignment of a target id to
3157  * a remote port identifier, which persists while the scsi host is
3158  * attached. The remote port can disappear, then later reappear, and
3159  * it's target id assignment remains the same. This allows for shifts
3160  * in FC addressing (if binding by wwpn or wwnn) with no apparent
3161  * changes to the scsi subsystem which is based on scsi host number and
3162  * target id values.  Bindings are only valid during the attachment of
3163  * the scsi host. If the host detaches, then later re-attaches, target
3164  * id bindings may change.
3165  *
3166  * This routine is responsible for returning a remote port structure.
3167  * The routine will search the list of remote ports it maintains
3168  * internally on behalf of consistent target id mappings. If found, the
3169  * remote port structure will be reused. Otherwise, a new remote port
3170  * structure will be allocated.
3171  *
3172  * Whenever a remote port is allocated, a new fc_remote_port class
3173  * device is created.
3174  *
3175  * Should not be called from interrupt context.
3176  *
3177  * Notes:
3178  *	This routine assumes no locks are held on entry.
3179  */
3180 struct fc_rport *
fc_remote_port_add(struct Scsi_Host * shost,int channel,struct fc_rport_identifiers * ids)3181 fc_remote_port_add(struct Scsi_Host *shost, int channel,
3182 	struct fc_rport_identifiers  *ids)
3183 {
3184 	struct fc_internal *fci = to_fc_internal(shost->transportt);
3185 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3186 	struct fc_rport *rport;
3187 	unsigned long flags;
3188 	int match = 0;
3189 
3190 	/* ensure any stgt delete functions are done */
3191 	fc_flush_work(shost);
3192 
3193 	/*
3194 	 * Search the list of "active" rports, for an rport that has been
3195 	 * deleted, but we've held off the real delete while the target
3196 	 * is in a "blocked" state.
3197 	 */
3198 	spin_lock_irqsave(shost->host_lock, flags);
3199 
3200 	list_for_each_entry(rport, &fc_host->rports, peers) {
3201 
3202 		if ((rport->port_state == FC_PORTSTATE_BLOCKED ||
3203 		     rport->port_state == FC_PORTSTATE_NOTPRESENT) &&
3204 			(rport->channel == channel)) {
3205 
3206 			switch (fc_host->tgtid_bind_type) {
3207 			case FC_TGTID_BIND_BY_WWPN:
3208 			case FC_TGTID_BIND_NONE:
3209 				if (rport->port_name == ids->port_name)
3210 					match = 1;
3211 				break;
3212 			case FC_TGTID_BIND_BY_WWNN:
3213 				if (rport->node_name == ids->node_name)
3214 					match = 1;
3215 				break;
3216 			case FC_TGTID_BIND_BY_ID:
3217 				if (rport->port_id == ids->port_id)
3218 					match = 1;
3219 				break;
3220 			}
3221 
3222 			if (match) {
3223 
3224 				memcpy(&rport->node_name, &ids->node_name,
3225 					sizeof(rport->node_name));
3226 				memcpy(&rport->port_name, &ids->port_name,
3227 					sizeof(rport->port_name));
3228 				rport->port_id = ids->port_id;
3229 
3230 				rport->port_state = FC_PORTSTATE_ONLINE;
3231 				rport->roles = ids->roles;
3232 
3233 				spin_unlock_irqrestore(shost->host_lock, flags);
3234 
3235 				if (fci->f->dd_fcrport_size)
3236 					memset(rport->dd_data, 0,
3237 						fci->f->dd_fcrport_size);
3238 
3239 				/*
3240 				 * If we were not a target, cancel the
3241 				 * io terminate and rport timers, and
3242 				 * we're done.
3243 				 *
3244 				 * If we were a target, but our new role
3245 				 * doesn't indicate a target, leave the
3246 				 * timers running expecting the role to
3247 				 * change as the target fully logs in. If
3248 				 * it doesn't, the target will be torn down.
3249 				 *
3250 				 * If we were a target, and our role shows
3251 				 * we're still a target, cancel the timers
3252 				 * and kick off a scan.
3253 				 */
3254 
3255 				/* was a target, not in roles */
3256 				if ((rport->scsi_target_id != -1) &&
3257 				    (!(ids->roles & FC_PORT_ROLE_FCP_TARGET)))
3258 					return rport;
3259 
3260 				/*
3261 				 * Stop the fail io and dev_loss timers.
3262 				 * If they flush, the port_state will
3263 				 * be checked and will NOOP the function.
3264 				 */
3265 				if (!cancel_delayed_work(&rport->fail_io_work))
3266 					fc_flush_devloss(shost);
3267 				if (!cancel_delayed_work(&rport->dev_loss_work))
3268 					fc_flush_devloss(shost);
3269 
3270 				spin_lock_irqsave(shost->host_lock, flags);
3271 
3272 				rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
3273 						  FC_RPORT_DEVLOSS_PENDING |
3274 						  FC_RPORT_DEVLOSS_CALLBK_DONE);
3275 
3276 				spin_unlock_irqrestore(shost->host_lock, flags);
3277 
3278 				/* if target, initiate a scan */
3279 				if (rport->scsi_target_id != -1) {
3280 					scsi_target_unblock(&rport->dev,
3281 							    SDEV_RUNNING);
3282 					spin_lock_irqsave(shost->host_lock,
3283 							  flags);
3284 					rport->flags |= FC_RPORT_SCAN_PENDING;
3285 					scsi_queue_work(shost,
3286 							&rport->scan_work);
3287 					spin_unlock_irqrestore(shost->host_lock,
3288 							flags);
3289 				}
3290 
3291 				fc_bsg_goose_queue(rport);
3292 
3293 				return rport;
3294 			}
3295 		}
3296 	}
3297 
3298 	/*
3299 	 * Search the bindings array
3300 	 * Note: if never a FCP target, you won't be on this list
3301 	 */
3302 	if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) {
3303 
3304 		/* search for a matching consistent binding */
3305 
3306 		list_for_each_entry(rport, &fc_host->rport_bindings,
3307 					peers) {
3308 			if (rport->channel != channel)
3309 				continue;
3310 
3311 			switch (fc_host->tgtid_bind_type) {
3312 			case FC_TGTID_BIND_BY_WWPN:
3313 				if (rport->port_name == ids->port_name)
3314 					match = 1;
3315 				break;
3316 			case FC_TGTID_BIND_BY_WWNN:
3317 				if (rport->node_name == ids->node_name)
3318 					match = 1;
3319 				break;
3320 			case FC_TGTID_BIND_BY_ID:
3321 				if (rport->port_id == ids->port_id)
3322 					match = 1;
3323 				break;
3324 			case FC_TGTID_BIND_NONE: /* to keep compiler happy */
3325 				break;
3326 			}
3327 
3328 			if (match) {
3329 				list_move_tail(&rport->peers, &fc_host->rports);
3330 				break;
3331 			}
3332 		}
3333 
3334 		if (match) {
3335 			memcpy(&rport->node_name, &ids->node_name,
3336 				sizeof(rport->node_name));
3337 			memcpy(&rport->port_name, &ids->port_name,
3338 				sizeof(rport->port_name));
3339 			rport->port_id = ids->port_id;
3340 			rport->port_state = FC_PORTSTATE_ONLINE;
3341 			rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3342 
3343 			if (fci->f->dd_fcrport_size)
3344 				memset(rport->dd_data, 0,
3345 						fci->f->dd_fcrport_size);
3346 			spin_unlock_irqrestore(shost->host_lock, flags);
3347 
3348 			fc_remote_port_rolechg(rport, ids->roles);
3349 			return rport;
3350 		}
3351 	}
3352 
3353 	spin_unlock_irqrestore(shost->host_lock, flags);
3354 
3355 	/* No consistent binding found - create new remote port entry */
3356 	rport = fc_remote_port_create(shost, channel, ids);
3357 
3358 	return rport;
3359 }
3360 EXPORT_SYMBOL(fc_remote_port_add);
3361 
3362 
3363 /**
3364  * fc_remote_port_delete - notifies the fc transport that a remote port is no longer in existence.
3365  * @rport:	The remote port that no longer exists
3366  *
3367  * The LLDD calls this routine to notify the transport that a remote
3368  * port is no longer part of the topology. Note: Although a port
3369  * may no longer be part of the topology, it may persist in the remote
3370  * ports displayed by the fc_host. We do this under 2 conditions:
3371  *
3372  * 1) If the port was a scsi target, we delay its deletion by "blocking" it.
3373  *    This allows the port to temporarily disappear, then reappear without
3374  *    disrupting the SCSI device tree attached to it. During the "blocked"
3375  *    period the port will still exist.
3376  *
3377  * 2) If the port was a scsi target and disappears for longer than we
3378  *    expect, we'll delete the port and the tear down the SCSI device tree
3379  *    attached to it. However, we want to semi-persist the target id assigned
3380  *    to that port if it eventually does exist. The port structure will
3381  *    remain (although with minimal information) so that the target id
3382  *    bindings also remain.
3383  *
3384  * If the remote port is not an FCP Target, it will be fully torn down
3385  * and deallocated, including the fc_remote_port class device.
3386  *
3387  * If the remote port is an FCP Target, the port will be placed in a
3388  * temporary blocked state. From the LLDD's perspective, the rport no
3389  * longer exists. From the SCSI midlayer's perspective, the SCSI target
3390  * exists, but all sdevs on it are blocked from further I/O. The following
3391  * is then expected.
3392  *
3393  *   If the remote port does not return (signaled by a LLDD call to
3394  *   fc_remote_port_add()) within the dev_loss_tmo timeout, then the
3395  *   scsi target is removed - killing all outstanding i/o and removing the
3396  *   scsi devices attached to it. The port structure will be marked Not
3397  *   Present and be partially cleared, leaving only enough information to
3398  *   recognize the remote port relative to the scsi target id binding if
3399  *   it later appears.  The port will remain as long as there is a valid
3400  *   binding (e.g. until the user changes the binding type or unloads the
3401  *   scsi host with the binding).
3402  *
3403  *   If the remote port returns within the dev_loss_tmo value (and matches
3404  *   according to the target id binding type), the port structure will be
3405  *   reused. If it is no longer a SCSI target, the target will be torn
3406  *   down. If it continues to be a SCSI target, then the target will be
3407  *   unblocked (allowing i/o to be resumed), and a scan will be activated
3408  *   to ensure that all luns are detected.
3409  *
3410  * Called from normal process context only - cannot be called from interrupt.
3411  *
3412  * Notes:
3413  *	This routine assumes no locks are held on entry.
3414  */
3415 void
fc_remote_port_delete(struct fc_rport * rport)3416 fc_remote_port_delete(struct fc_rport  *rport)
3417 {
3418 	struct Scsi_Host *shost = rport_to_shost(rport);
3419 	unsigned long timeout = rport->dev_loss_tmo;
3420 	unsigned long flags;
3421 
3422 	/*
3423 	 * No need to flush the fc_host work_q's, as all adds are synchronous.
3424 	 *
3425 	 * We do need to reclaim the rport scan work element, so eventually
3426 	 * (in fc_rport_final_delete()) we'll flush the scsi host work_q if
3427 	 * there's still a scan pending.
3428 	 */
3429 
3430 	spin_lock_irqsave(shost->host_lock, flags);
3431 
3432 	if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
3433 		(rport->port_state != FC_PORTSTATE_MARGINAL)) {
3434 		spin_unlock_irqrestore(shost->host_lock, flags);
3435 		return;
3436 	}
3437 
3438 	/*
3439 	 * In the past, we if this was not an FCP-Target, we would
3440 	 * unconditionally just jump to deleting the rport.
3441 	 * However, rports can be used as node containers by the LLDD,
3442 	 * and its not appropriate to just terminate the rport at the
3443 	 * first sign of a loss in connectivity. The LLDD may want to
3444 	 * send ELS traffic to re-validate the login. If the rport is
3445 	 * immediately deleted, it makes it inappropriate for a node
3446 	 * container.
3447 	 * So... we now unconditionally wait dev_loss_tmo before
3448 	 * destroying an rport.
3449 	 */
3450 
3451 	rport->port_state = FC_PORTSTATE_BLOCKED;
3452 
3453 	rport->flags |= FC_RPORT_DEVLOSS_PENDING;
3454 
3455 	spin_unlock_irqrestore(shost->host_lock, flags);
3456 
3457 	scsi_target_block(&rport->dev);
3458 
3459 	/* see if we need to kill io faster than waiting for device loss */
3460 	if ((rport->fast_io_fail_tmo != -1) &&
3461 	    (rport->fast_io_fail_tmo < timeout))
3462 		fc_queue_devloss_work(shost, &rport->fail_io_work,
3463 					rport->fast_io_fail_tmo * HZ);
3464 
3465 	/* cap the length the devices can be blocked until they are deleted */
3466 	fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ);
3467 }
3468 EXPORT_SYMBOL(fc_remote_port_delete);
3469 
3470 /**
3471  * fc_remote_port_rolechg - notifies the fc transport that the roles on a remote may have changed.
3472  * @rport:	The remote port that changed.
3473  * @roles:      New roles for this port.
3474  *
3475  * Description: The LLDD calls this routine to notify the transport that the
3476  * roles on a remote port may have changed. The largest effect of this is
3477  * if a port now becomes a FCP Target, it must be allocated a
3478  * scsi target id.  If the port is no longer a FCP target, any
3479  * scsi target id value assigned to it will persist in case the
3480  * role changes back to include FCP Target. No changes in the scsi
3481  * midlayer will be invoked if the role changes (in the expectation
3482  * that the role will be resumed. If it doesn't normal error processing
3483  * will take place).
3484  *
3485  * Should not be called from interrupt context.
3486  *
3487  * Notes:
3488  *	This routine assumes no locks are held on entry.
3489  */
3490 void
fc_remote_port_rolechg(struct fc_rport * rport,u32 roles)3491 fc_remote_port_rolechg(struct fc_rport  *rport, u32 roles)
3492 {
3493 	struct Scsi_Host *shost = rport_to_shost(rport);
3494 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3495 	unsigned long flags;
3496 	int create = 0;
3497 
3498 	spin_lock_irqsave(shost->host_lock, flags);
3499 	if (roles & FC_PORT_ROLE_FCP_TARGET) {
3500 		if (rport->scsi_target_id == -1) {
3501 			rport->scsi_target_id = fc_host->next_target_id++;
3502 			create = 1;
3503 		} else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
3504 			create = 1;
3505 	}
3506 
3507 	rport->roles = roles;
3508 
3509 	spin_unlock_irqrestore(shost->host_lock, flags);
3510 
3511 	if (create) {
3512 		/*
3513 		 * There may have been a delete timer running on the
3514 		 * port. Ensure that it is cancelled as we now know
3515 		 * the port is an FCP Target.
3516 		 * Note: we know the rport exists and is in an online
3517 		 *  state as the LLDD would not have had an rport
3518 		 *  reference to pass us.
3519 		 *
3520 		 * Take no action on the del_timer failure as the state
3521 		 * machine state change will validate the
3522 		 * transaction.
3523 		 */
3524 		if (!cancel_delayed_work(&rport->fail_io_work))
3525 			fc_flush_devloss(shost);
3526 		if (!cancel_delayed_work(&rport->dev_loss_work))
3527 			fc_flush_devloss(shost);
3528 
3529 		spin_lock_irqsave(shost->host_lock, flags);
3530 		rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
3531 				  FC_RPORT_DEVLOSS_PENDING |
3532 				  FC_RPORT_DEVLOSS_CALLBK_DONE);
3533 		spin_unlock_irqrestore(shost->host_lock, flags);
3534 
3535 		/* ensure any stgt delete functions are done */
3536 		fc_flush_work(shost);
3537 
3538 		scsi_target_unblock(&rport->dev, SDEV_RUNNING);
3539 		/* initiate a scan of the target */
3540 		spin_lock_irqsave(shost->host_lock, flags);
3541 		rport->flags |= FC_RPORT_SCAN_PENDING;
3542 		scsi_queue_work(shost, &rport->scan_work);
3543 		spin_unlock_irqrestore(shost->host_lock, flags);
3544 	}
3545 }
3546 EXPORT_SYMBOL(fc_remote_port_rolechg);
3547 
3548 /**
3549  * fc_timeout_deleted_rport - Timeout handler for a deleted remote port.
3550  * @work:	rport target that failed to reappear in the allotted time.
3551  *
3552  * Description: An attempt to delete a remote port blocks, and if it fails
3553  *              to return in the allotted time this gets called.
3554  */
3555 static void
fc_timeout_deleted_rport(struct work_struct * work)3556 fc_timeout_deleted_rport(struct work_struct *work)
3557 {
3558 	struct fc_rport *rport =
3559 		container_of(work, struct fc_rport, dev_loss_work.work);
3560 	struct Scsi_Host *shost = rport_to_shost(rport);
3561 	struct fc_internal *i = to_fc_internal(shost->transportt);
3562 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3563 	unsigned long flags;
3564 	int do_callback = 0;
3565 
3566 	spin_lock_irqsave(shost->host_lock, flags);
3567 
3568 	rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
3569 
3570 	/*
3571 	 * If the port is ONLINE, then it came back. If it was a SCSI
3572 	 * target, validate it still is. If not, tear down the
3573 	 * scsi_target on it.
3574 	 */
3575 	if (((rport->port_state == FC_PORTSTATE_ONLINE) ||
3576 		(rport->port_state == FC_PORTSTATE_MARGINAL)) &&
3577 	    (rport->scsi_target_id != -1) &&
3578 	    !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
3579 		dev_printk(KERN_ERR, &rport->dev,
3580 			"blocked FC remote port time out: no longer"
3581 			" a FCP target, removing starget\n");
3582 		spin_unlock_irqrestore(shost->host_lock, flags);
3583 		scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
3584 		fc_queue_work(shost, &rport->stgt_delete_work);
3585 		return;
3586 	}
3587 
3588 	/* NOOP state - we're flushing workq's */
3589 	if (rport->port_state != FC_PORTSTATE_BLOCKED) {
3590 		spin_unlock_irqrestore(shost->host_lock, flags);
3591 		dev_printk(KERN_ERR, &rport->dev,
3592 			"blocked FC remote port time out: leaving"
3593 			" rport%s alone\n",
3594 			(rport->scsi_target_id != -1) ?  " and starget" : "");
3595 		return;
3596 	}
3597 
3598 	if ((fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) ||
3599 	    (rport->scsi_target_id == -1)) {
3600 		list_del(&rport->peers);
3601 		rport->port_state = FC_PORTSTATE_DELETED;
3602 		dev_printk(KERN_ERR, &rport->dev,
3603 			"blocked FC remote port time out: removing"
3604 			" rport%s\n",
3605 			(rport->scsi_target_id != -1) ?  " and starget" : "");
3606 		fc_queue_work(shost, &rport->rport_delete_work);
3607 		spin_unlock_irqrestore(shost->host_lock, flags);
3608 		return;
3609 	}
3610 
3611 	dev_printk(KERN_ERR, &rport->dev,
3612 		"blocked FC remote port time out: removing target and "
3613 		"saving binding\n");
3614 
3615 	list_move_tail(&rport->peers, &fc_host->rport_bindings);
3616 
3617 	/*
3618 	 * Note: We do not remove or clear the hostdata area. This allows
3619 	 *   host-specific target data to persist along with the
3620 	 *   scsi_target_id. It's up to the host to manage it's hostdata area.
3621 	 */
3622 
3623 	/*
3624 	 * Reinitialize port attributes that may change if the port comes back.
3625 	 */
3626 	rport->maxframe_size = -1;
3627 	rport->supported_classes = FC_COS_UNSPECIFIED;
3628 	rport->roles = FC_PORT_ROLE_UNKNOWN;
3629 	rport->port_state = FC_PORTSTATE_NOTPRESENT;
3630 	rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3631 
3632 	/*
3633 	 * Pre-emptively kill I/O rather than waiting for the work queue
3634 	 * item to teardown the starget. (FCOE libFC folks prefer this
3635 	 * and to have the rport_port_id still set when it's done).
3636 	 */
3637 	spin_unlock_irqrestore(shost->host_lock, flags);
3638 	fc_terminate_rport_io(rport);
3639 
3640 	spin_lock_irqsave(shost->host_lock, flags);
3641 
3642 	if (rport->port_state == FC_PORTSTATE_NOTPRESENT) {	/* still missing */
3643 
3644 		/* remove the identifiers that aren't used in the consisting binding */
3645 		switch (fc_host->tgtid_bind_type) {
3646 		case FC_TGTID_BIND_BY_WWPN:
3647 			rport->node_name = -1;
3648 			rport->port_id = -1;
3649 			break;
3650 		case FC_TGTID_BIND_BY_WWNN:
3651 			rport->port_name = -1;
3652 			rport->port_id = -1;
3653 			break;
3654 		case FC_TGTID_BIND_BY_ID:
3655 			rport->node_name = -1;
3656 			rport->port_name = -1;
3657 			break;
3658 		case FC_TGTID_BIND_NONE:	/* to keep compiler happy */
3659 			break;
3660 		}
3661 
3662 		/*
3663 		 * As this only occurs if the remote port (scsi target)
3664 		 * went away and didn't come back - we'll remove
3665 		 * all attached scsi devices.
3666 		 */
3667 		rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
3668 		fc_queue_work(shost, &rport->stgt_delete_work);
3669 
3670 		do_callback = 1;
3671 	}
3672 
3673 	spin_unlock_irqrestore(shost->host_lock, flags);
3674 
3675 	/*
3676 	 * Notify the driver that the rport is now dead. The LLDD will
3677 	 * also guarantee that any communication to the rport is terminated
3678 	 *
3679 	 * Note: we set the CALLBK_DONE flag above to correspond
3680 	 */
3681 	if (do_callback && i->f->dev_loss_tmo_callbk)
3682 		i->f->dev_loss_tmo_callbk(rport);
3683 }
3684 
3685 
3686 /**
3687  * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target.
3688  * @work:	rport to terminate io on.
3689  *
3690  * Notes: Only requests the failure of the io, not that all are flushed
3691  *    prior to returning.
3692  */
3693 static void
fc_timeout_fail_rport_io(struct work_struct * work)3694 fc_timeout_fail_rport_io(struct work_struct *work)
3695 {
3696 	struct fc_rport *rport =
3697 		container_of(work, struct fc_rport, fail_io_work.work);
3698 
3699 	if (rport->port_state != FC_PORTSTATE_BLOCKED)
3700 		return;
3701 
3702 	rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
3703 	fc_terminate_rport_io(rport);
3704 }
3705 
3706 /**
3707  * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
3708  * @work:	remote port to be scanned.
3709  */
3710 static void
fc_scsi_scan_rport(struct work_struct * work)3711 fc_scsi_scan_rport(struct work_struct *work)
3712 {
3713 	struct fc_rport *rport =
3714 		container_of(work, struct fc_rport, scan_work);
3715 	struct Scsi_Host *shost = rport_to_shost(rport);
3716 	struct fc_internal *i = to_fc_internal(shost->transportt);
3717 	unsigned long flags;
3718 
3719 	if (((rport->port_state == FC_PORTSTATE_ONLINE) ||
3720 		(rport->port_state == FC_PORTSTATE_MARGINAL)) &&
3721 	    (rport->roles & FC_PORT_ROLE_FCP_TARGET) &&
3722 	    !(i->f->disable_target_scan)) {
3723 		scsi_scan_target(&rport->dev, rport->channel,
3724 				 rport->scsi_target_id, SCAN_WILD_CARD,
3725 				 SCSI_SCAN_RESCAN);
3726 	}
3727 
3728 	spin_lock_irqsave(shost->host_lock, flags);
3729 	rport->flags &= ~FC_RPORT_SCAN_PENDING;
3730 	spin_unlock_irqrestore(shost->host_lock, flags);
3731 }
3732 
3733 /**
3734  * fc_block_rport() - Block SCSI eh thread for blocked fc_rport.
3735  * @rport: Remote port that scsi_eh is trying to recover.
3736  *
3737  * This routine can be called from a FC LLD scsi_eh callback. It
3738  * blocks the scsi_eh thread until the fc_rport leaves the
3739  * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
3740  * necessary to avoid the scsi_eh failing recovery actions for blocked
3741  * rports which would lead to offlined SCSI devices.
3742  *
3743  * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
3744  *	    FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
3745  *	    passed back to scsi_eh.
3746  */
fc_block_rport(struct fc_rport * rport)3747 int fc_block_rport(struct fc_rport *rport)
3748 {
3749 	struct Scsi_Host *shost = rport_to_shost(rport);
3750 	unsigned long flags;
3751 
3752 	spin_lock_irqsave(shost->host_lock, flags);
3753 	while (rport->port_state == FC_PORTSTATE_BLOCKED &&
3754 	       !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) {
3755 		spin_unlock_irqrestore(shost->host_lock, flags);
3756 		msleep(1000);
3757 		spin_lock_irqsave(shost->host_lock, flags);
3758 	}
3759 	spin_unlock_irqrestore(shost->host_lock, flags);
3760 
3761 	if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
3762 		return FAST_IO_FAIL;
3763 
3764 	return 0;
3765 }
3766 EXPORT_SYMBOL(fc_block_rport);
3767 
3768 /**
3769  * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport
3770  * @cmnd: SCSI command that scsi_eh is trying to recover
3771  *
3772  * This routine can be called from a FC LLD scsi_eh callback. It
3773  * blocks the scsi_eh thread until the fc_rport leaves the
3774  * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
3775  * necessary to avoid the scsi_eh failing recovery actions for blocked
3776  * rports which would lead to offlined SCSI devices.
3777  *
3778  * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
3779  *	    FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
3780  *	    passed back to scsi_eh.
3781  */
fc_block_scsi_eh(struct scsi_cmnd * cmnd)3782 int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
3783 {
3784 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
3785 
3786 	if (WARN_ON_ONCE(!rport))
3787 		return FAST_IO_FAIL;
3788 
3789 	return fc_block_rport(rport);
3790 }
3791 EXPORT_SYMBOL(fc_block_scsi_eh);
3792 
3793 /*
3794  * fc_eh_should_retry_cmd - Checks if the cmd should be retried or not
3795  * @scmd:        The SCSI command to be checked
3796  *
3797  * This checks the rport state to decide if a cmd is
3798  * retryable.
3799  *
3800  * Returns: true if the rport state is not in marginal state.
3801  */
fc_eh_should_retry_cmd(struct scsi_cmnd * scmd)3802 bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd)
3803 {
3804 	struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
3805 
3806 	if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
3807 		(scsi_cmd_to_rq(scmd)->cmd_flags & REQ_FAILFAST_TRANSPORT)) {
3808 		set_host_byte(scmd, DID_TRANSPORT_MARGINAL);
3809 		return false;
3810 	}
3811 	return true;
3812 }
3813 EXPORT_SYMBOL_GPL(fc_eh_should_retry_cmd);
3814 
3815 /**
3816  * fc_vport_setup - allocates and creates a FC virtual port.
3817  * @shost:	scsi host the virtual port is connected to.
3818  * @channel:	Channel on shost port connected to.
3819  * @pdev:	parent device for vport
3820  * @ids:	The world wide names, FC4 port roles, etc for
3821  *              the virtual port.
3822  * @ret_vport:	The pointer to the created vport.
3823  *
3824  * Allocates and creates the vport structure, calls the parent host
3825  * to instantiate the vport, this completes w/ class and sysfs creation.
3826  *
3827  * Notes:
3828  *	This routine assumes no locks are held on entry.
3829  */
3830 static int
fc_vport_setup(struct Scsi_Host * shost,int channel,struct device * pdev,struct fc_vport_identifiers * ids,struct fc_vport ** ret_vport)3831 fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3832 	struct fc_vport_identifiers  *ids, struct fc_vport **ret_vport)
3833 {
3834 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3835 	struct fc_internal *fci = to_fc_internal(shost->transportt);
3836 	struct fc_vport *vport;
3837 	struct device *dev;
3838 	unsigned long flags;
3839 	size_t size;
3840 	int error;
3841 
3842 	*ret_vport = NULL;
3843 
3844 	if ( ! fci->f->vport_create)
3845 		return -ENOENT;
3846 
3847 	size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
3848 	vport = kzalloc(size, GFP_KERNEL);
3849 	if (unlikely(!vport)) {
3850 		printk(KERN_ERR "%s: allocation failure\n", __func__);
3851 		return -ENOMEM;
3852 	}
3853 
3854 	vport->vport_state = FC_VPORT_UNKNOWN;
3855 	vport->vport_last_state = FC_VPORT_UNKNOWN;
3856 	vport->node_name = ids->node_name;
3857 	vport->port_name = ids->port_name;
3858 	vport->roles = ids->roles;
3859 	vport->vport_type = ids->vport_type;
3860 	if (fci->f->dd_fcvport_size)
3861 		vport->dd_data = &vport[1];
3862 	vport->shost = shost;
3863 	vport->channel = channel;
3864 	vport->flags = FC_VPORT_CREATING;
3865 	INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete);
3866 
3867 	spin_lock_irqsave(shost->host_lock, flags);
3868 
3869 	if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) {
3870 		spin_unlock_irqrestore(shost->host_lock, flags);
3871 		kfree(vport);
3872 		return -ENOSPC;
3873 	}
3874 	fc_host->npiv_vports_inuse++;
3875 	vport->number = fc_host->next_vport_number++;
3876 	list_add_tail(&vport->peers, &fc_host->vports);
3877 	scsi_host_get(shost);			/* for fc_host->vport list */
3878 
3879 	spin_unlock_irqrestore(shost->host_lock, flags);
3880 
3881 	dev = &vport->dev;
3882 	device_initialize(dev);			/* takes self reference */
3883 	dev->parent = get_device(pdev);		/* takes parent reference */
3884 	dev->release = fc_vport_dev_release;
3885 	dev_set_name(dev, "vport-%d:%d-%d",
3886 		     shost->host_no, channel, vport->number);
3887 	transport_setup_device(dev);
3888 
3889 	error = device_add(dev);
3890 	if (error) {
3891 		printk(KERN_ERR "FC Virtual Port device_add failed\n");
3892 		goto delete_vport;
3893 	}
3894 	transport_add_device(dev);
3895 	transport_configure_device(dev);
3896 
3897 	error = fci->f->vport_create(vport, ids->disable);
3898 	if (error) {
3899 		printk(KERN_ERR "FC Virtual Port LLDD Create failed\n");
3900 		goto delete_vport_all;
3901 	}
3902 
3903 	/*
3904 	 * if the parent isn't the physical adapter's Scsi_Host, ensure
3905 	 * the Scsi_Host at least contains a symlink to the vport.
3906 	 */
3907 	if (pdev != &shost->shost_gendev) {
3908 		error = sysfs_create_link(&shost->shost_gendev.kobj,
3909 				 &dev->kobj, dev_name(dev));
3910 		if (error)
3911 			printk(KERN_ERR
3912 				"%s: Cannot create vport symlinks for "
3913 				"%s, err=%d\n",
3914 				__func__, dev_name(dev), error);
3915 	}
3916 	spin_lock_irqsave(shost->host_lock, flags);
3917 	vport->flags &= ~FC_VPORT_CREATING;
3918 	spin_unlock_irqrestore(shost->host_lock, flags);
3919 
3920 	dev_printk(KERN_NOTICE, pdev,
3921 			"%s created via shost%d channel %d\n", dev_name(dev),
3922 			shost->host_no, channel);
3923 
3924 	*ret_vport = vport;
3925 
3926 	return 0;
3927 
3928 delete_vport_all:
3929 	transport_remove_device(dev);
3930 	device_del(dev);
3931 delete_vport:
3932 	transport_destroy_device(dev);
3933 	spin_lock_irqsave(shost->host_lock, flags);
3934 	list_del(&vport->peers);
3935 	scsi_host_put(shost);			/* for fc_host->vport list */
3936 	fc_host->npiv_vports_inuse--;
3937 	spin_unlock_irqrestore(shost->host_lock, flags);
3938 	put_device(dev->parent);
3939 	kfree(vport);
3940 
3941 	return error;
3942 }
3943 
3944 /**
3945  * fc_vport_create - Admin App or LLDD requests creation of a vport
3946  * @shost:	scsi host the virtual port is connected to.
3947  * @channel:	channel on shost port connected to.
3948  * @ids:	The world wide names, FC4 port roles, etc for
3949  *              the virtual port.
3950  *
3951  * Notes:
3952  *	This routine assumes no locks are held on entry.
3953  */
3954 struct fc_vport *
fc_vport_create(struct Scsi_Host * shost,int channel,struct fc_vport_identifiers * ids)3955 fc_vport_create(struct Scsi_Host *shost, int channel,
3956 	struct fc_vport_identifiers *ids)
3957 {
3958 	int stat;
3959 	struct fc_vport *vport;
3960 
3961 	stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
3962 		 ids, &vport);
3963 	return stat ? NULL : vport;
3964 }
3965 EXPORT_SYMBOL(fc_vport_create);
3966 
3967 /**
3968  * fc_vport_terminate - Admin App or LLDD requests termination of a vport
3969  * @vport:	fc_vport to be terminated
3970  *
3971  * Calls the LLDD vport_delete() function, then deallocates and removes
3972  * the vport from the shost and object tree.
3973  *
3974  * Notes:
3975  *	This routine assumes no locks are held on entry.
3976  */
3977 int
fc_vport_terminate(struct fc_vport * vport)3978 fc_vport_terminate(struct fc_vport *vport)
3979 {
3980 	struct Scsi_Host *shost = vport_to_shost(vport);
3981 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3982 	struct fc_internal *i = to_fc_internal(shost->transportt);
3983 	struct device *dev = &vport->dev;
3984 	unsigned long flags;
3985 	int stat;
3986 
3987 	if (i->f->vport_delete)
3988 		stat = i->f->vport_delete(vport);
3989 	else
3990 		stat = -ENOENT;
3991 
3992 	spin_lock_irqsave(shost->host_lock, flags);
3993 	vport->flags &= ~FC_VPORT_DELETING;
3994 	if (!stat) {
3995 		vport->flags |= FC_VPORT_DELETED;
3996 		list_del(&vport->peers);
3997 		fc_host->npiv_vports_inuse--;
3998 		scsi_host_put(shost);		/* for fc_host->vport list */
3999 	}
4000 	spin_unlock_irqrestore(shost->host_lock, flags);
4001 
4002 	if (stat)
4003 		return stat;
4004 
4005 	if (dev->parent != &shost->shost_gendev)
4006 		sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev));
4007 	transport_remove_device(dev);
4008 	device_del(dev);
4009 	transport_destroy_device(dev);
4010 
4011 	/*
4012 	 * Removing our self-reference should mean our
4013 	 * release function gets called, which will drop the remaining
4014 	 * parent reference and free the data structure.
4015 	 */
4016 	put_device(dev);			/* for self-reference */
4017 
4018 	return 0; /* SUCCESS */
4019 }
4020 EXPORT_SYMBOL(fc_vport_terminate);
4021 
4022 /**
4023  * fc_vport_sched_delete - workq-based delete request for a vport
4024  * @work:	vport to be deleted.
4025  */
4026 static void
fc_vport_sched_delete(struct work_struct * work)4027 fc_vport_sched_delete(struct work_struct *work)
4028 {
4029 	struct fc_vport *vport =
4030 		container_of(work, struct fc_vport, vport_delete_work);
4031 	int stat;
4032 
4033 	stat = fc_vport_terminate(vport);
4034 	if (stat)
4035 		dev_printk(KERN_ERR, vport->dev.parent,
4036 			"%s: %s could not be deleted created via "
4037 			"shost%d channel %d - error %d\n", __func__,
4038 			dev_name(&vport->dev), vport->shost->host_no,
4039 			vport->channel, stat);
4040 }
4041 
4042 
4043 /*
4044  * BSG support
4045  */
4046 
4047 /**
4048  * fc_bsg_job_timeout - handler for when a bsg request timesout
4049  * @req:	request that timed out
4050  */
4051 static enum blk_eh_timer_return
fc_bsg_job_timeout(struct request * req)4052 fc_bsg_job_timeout(struct request *req)
4053 {
4054 	struct bsg_job *job = blk_mq_rq_to_pdu(req);
4055 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
4056 	struct fc_rport *rport = fc_bsg_to_rport(job);
4057 	struct fc_internal *i = to_fc_internal(shost->transportt);
4058 	int err = 0, inflight = 0;
4059 
4060 	if (rport && rport->port_state == FC_PORTSTATE_BLOCKED)
4061 		return BLK_EH_RESET_TIMER;
4062 
4063 	inflight = bsg_job_get(job);
4064 
4065 	if (inflight && i->f->bsg_timeout) {
4066 		/* call LLDD to abort the i/o as it has timed out */
4067 		err = i->f->bsg_timeout(job);
4068 		if (err == -EAGAIN) {
4069 			bsg_job_put(job);
4070 			return BLK_EH_RESET_TIMER;
4071 		} else if (err)
4072 			printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
4073 				"abort failed with status %d\n", err);
4074 	}
4075 
4076 	/* the blk_end_sync_io() doesn't check the error */
4077 	if (inflight)
4078 		blk_mq_end_request(req, BLK_STS_IOERR);
4079 	return BLK_EH_DONE;
4080 }
4081 
4082 /**
4083  * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
4084  * @shost:	scsi host rport attached to
4085  * @job:	bsg job to be processed
4086  */
fc_bsg_host_dispatch(struct Scsi_Host * shost,struct bsg_job * job)4087 static int fc_bsg_host_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
4088 {
4089 	struct fc_internal *i = to_fc_internal(shost->transportt);
4090 	struct fc_bsg_request *bsg_request = job->request;
4091 	struct fc_bsg_reply *bsg_reply = job->reply;
4092 	int cmdlen = sizeof(uint32_t);	/* start with length of msgcode */
4093 	int ret;
4094 
4095 	/* check if we really have all the request data needed */
4096 	if (job->request_len < cmdlen) {
4097 		ret = -ENOMSG;
4098 		goto fail_host_msg;
4099 	}
4100 
4101 	/* Validate the host command */
4102 	switch (bsg_request->msgcode) {
4103 	case FC_BSG_HST_ADD_RPORT:
4104 		cmdlen += sizeof(struct fc_bsg_host_add_rport);
4105 		break;
4106 
4107 	case FC_BSG_HST_DEL_RPORT:
4108 		cmdlen += sizeof(struct fc_bsg_host_del_rport);
4109 		break;
4110 
4111 	case FC_BSG_HST_ELS_NOLOGIN:
4112 		cmdlen += sizeof(struct fc_bsg_host_els);
4113 		/* there better be a xmt and rcv payloads */
4114 		if ((!job->request_payload.payload_len) ||
4115 		    (!job->reply_payload.payload_len)) {
4116 			ret = -EINVAL;
4117 			goto fail_host_msg;
4118 		}
4119 		break;
4120 
4121 	case FC_BSG_HST_CT:
4122 		cmdlen += sizeof(struct fc_bsg_host_ct);
4123 		/* there better be xmt and rcv payloads */
4124 		if ((!job->request_payload.payload_len) ||
4125 		    (!job->reply_payload.payload_len)) {
4126 			ret = -EINVAL;
4127 			goto fail_host_msg;
4128 		}
4129 		break;
4130 
4131 	case FC_BSG_HST_VENDOR:
4132 		cmdlen += sizeof(struct fc_bsg_host_vendor);
4133 		if ((shost->hostt->vendor_id == 0L) ||
4134 		    (bsg_request->rqst_data.h_vendor.vendor_id !=
4135 			shost->hostt->vendor_id)) {
4136 			ret = -ESRCH;
4137 			goto fail_host_msg;
4138 		}
4139 		break;
4140 
4141 	default:
4142 		ret = -EBADR;
4143 		goto fail_host_msg;
4144 	}
4145 
4146 	ret = i->f->bsg_request(job);
4147 	if (!ret)
4148 		return 0;
4149 
4150 fail_host_msg:
4151 	/* return the errno failure code as the only status */
4152 	BUG_ON(job->reply_len < sizeof(uint32_t));
4153 	bsg_reply->reply_payload_rcv_len = 0;
4154 	bsg_reply->result = ret;
4155 	job->reply_len = sizeof(uint32_t);
4156 	bsg_job_done(job, bsg_reply->result,
4157 		       bsg_reply->reply_payload_rcv_len);
4158 	return 0;
4159 }
4160 
4161 
4162 /*
4163  * fc_bsg_goose_queue - restart rport queue in case it was stopped
4164  * @rport:	rport to be restarted
4165  */
4166 static void
fc_bsg_goose_queue(struct fc_rport * rport)4167 fc_bsg_goose_queue(struct fc_rport *rport)
4168 {
4169 	struct request_queue *q = rport->rqst_q;
4170 
4171 	if (q)
4172 		blk_mq_run_hw_queues(q, true);
4173 }
4174 
4175 /**
4176  * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
4177  * @shost:	scsi host rport attached to
4178  * @job:	bsg job to be processed
4179  */
fc_bsg_rport_dispatch(struct Scsi_Host * shost,struct bsg_job * job)4180 static int fc_bsg_rport_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
4181 {
4182 	struct fc_internal *i = to_fc_internal(shost->transportt);
4183 	struct fc_bsg_request *bsg_request = job->request;
4184 	struct fc_bsg_reply *bsg_reply = job->reply;
4185 	int cmdlen = sizeof(uint32_t);	/* start with length of msgcode */
4186 	int ret;
4187 
4188 	/* check if we really have all the request data needed */
4189 	if (job->request_len < cmdlen) {
4190 		ret = -ENOMSG;
4191 		goto fail_rport_msg;
4192 	}
4193 
4194 	/* Validate the rport command */
4195 	switch (bsg_request->msgcode) {
4196 	case FC_BSG_RPT_ELS:
4197 		cmdlen += sizeof(struct fc_bsg_rport_els);
4198 		goto check_bidi;
4199 
4200 	case FC_BSG_RPT_CT:
4201 		cmdlen += sizeof(struct fc_bsg_rport_ct);
4202 check_bidi:
4203 		/* there better be xmt and rcv payloads */
4204 		if ((!job->request_payload.payload_len) ||
4205 		    (!job->reply_payload.payload_len)) {
4206 			ret = -EINVAL;
4207 			goto fail_rport_msg;
4208 		}
4209 		break;
4210 	default:
4211 		ret = -EBADR;
4212 		goto fail_rport_msg;
4213 	}
4214 
4215 	ret = i->f->bsg_request(job);
4216 	if (!ret)
4217 		return 0;
4218 
4219 fail_rport_msg:
4220 	/* return the errno failure code as the only status */
4221 	BUG_ON(job->reply_len < sizeof(uint32_t));
4222 	bsg_reply->reply_payload_rcv_len = 0;
4223 	bsg_reply->result = ret;
4224 	job->reply_len = sizeof(uint32_t);
4225 	bsg_job_done(job, bsg_reply->result,
4226 		       bsg_reply->reply_payload_rcv_len);
4227 	return 0;
4228 }
4229 
fc_bsg_dispatch(struct bsg_job * job)4230 static int fc_bsg_dispatch(struct bsg_job *job)
4231 {
4232 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
4233 
4234 	if (scsi_is_fc_rport(job->dev))
4235 		return fc_bsg_rport_dispatch(shost, job);
4236 	else
4237 		return fc_bsg_host_dispatch(shost, job);
4238 }
4239 
fc_bsg_rport_prep(struct fc_rport * rport)4240 static blk_status_t fc_bsg_rport_prep(struct fc_rport *rport)
4241 {
4242 	if (rport->port_state == FC_PORTSTATE_BLOCKED &&
4243 	    !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
4244 		return BLK_STS_RESOURCE;
4245 
4246 	if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
4247 		(rport->port_state != FC_PORTSTATE_MARGINAL))
4248 		return BLK_STS_IOERR;
4249 
4250 	return BLK_STS_OK;
4251 }
4252 
4253 
fc_bsg_dispatch_prep(struct bsg_job * job)4254 static int fc_bsg_dispatch_prep(struct bsg_job *job)
4255 {
4256 	struct fc_rport *rport = fc_bsg_to_rport(job);
4257 	blk_status_t ret;
4258 
4259 	ret = fc_bsg_rport_prep(rport);
4260 	switch (ret) {
4261 	case BLK_STS_OK:
4262 		break;
4263 	case BLK_STS_RESOURCE:
4264 		return -EAGAIN;
4265 	default:
4266 		return -EIO;
4267 	}
4268 
4269 	return fc_bsg_dispatch(job);
4270 }
4271 
4272 /**
4273  * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
4274  * @shost:	shost for fc_host
4275  * @fc_host:	fc_host adding the structures to
4276  */
4277 static int
fc_bsg_hostadd(struct Scsi_Host * shost,struct fc_host_attrs * fc_host)4278 fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
4279 {
4280 	struct device *dev = &shost->shost_gendev;
4281 	struct fc_internal *i = to_fc_internal(shost->transportt);
4282 	struct request_queue *q;
4283 	char bsg_name[20];
4284 
4285 	fc_host->rqst_q = NULL;
4286 
4287 	if (!i->f->bsg_request)
4288 		return -ENOTSUPP;
4289 
4290 	snprintf(bsg_name, sizeof(bsg_name),
4291 		 "fc_host%d", shost->host_no);
4292 
4293 	q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, fc_bsg_job_timeout,
4294 				i->f->dd_bsg_size);
4295 	if (IS_ERR(q)) {
4296 		dev_err(dev,
4297 			"fc_host%d: bsg interface failed to initialize - setup queue\n",
4298 			shost->host_no);
4299 		return PTR_ERR(q);
4300 	}
4301 	__scsi_init_queue(shost, q);
4302 	blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
4303 	fc_host->rqst_q = q;
4304 	return 0;
4305 }
4306 
4307 /**
4308  * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
4309  * @shost:	shost that rport is attached to
4310  * @rport:	rport that the bsg hooks are being attached to
4311  */
4312 static int
fc_bsg_rportadd(struct Scsi_Host * shost,struct fc_rport * rport)4313 fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
4314 {
4315 	struct device *dev = &rport->dev;
4316 	struct fc_internal *i = to_fc_internal(shost->transportt);
4317 	struct request_queue *q;
4318 
4319 	rport->rqst_q = NULL;
4320 
4321 	if (!i->f->bsg_request)
4322 		return -ENOTSUPP;
4323 
4324 	q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep,
4325 				fc_bsg_job_timeout, i->f->dd_bsg_size);
4326 	if (IS_ERR(q)) {
4327 		dev_err(dev, "failed to setup bsg queue\n");
4328 		return PTR_ERR(q);
4329 	}
4330 	__scsi_init_queue(shost, q);
4331 	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
4332 	rport->rqst_q = q;
4333 	return 0;
4334 }
4335 
4336 
4337 /**
4338  * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
4339  * @q:	the request_queue that is to be torn down.
4340  *
4341  * Notes:
4342  *   Before unregistering the queue empty any requests that are blocked
4343  *
4344  *
4345  */
4346 static void
fc_bsg_remove(struct request_queue * q)4347 fc_bsg_remove(struct request_queue *q)
4348 {
4349 	bsg_remove_queue(q);
4350 }
4351 
4352 
4353 /* Original Author:  Martin Hicks */
4354 MODULE_AUTHOR("James Smart");
4355 MODULE_DESCRIPTION("FC Transport Attributes");
4356 MODULE_LICENSE("GPL");
4357 
4358 module_init(fc_transport_init);
4359 module_exit(fc_transport_exit);
4360