1 /*
2  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3  *
4  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5  * Copyright (C) 2012-2014  LSI Corporation
6  * Copyright (C) 2013-2014 Avago Technologies
7  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29 
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44 
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/aer.h>
56 #include <linux/raid_class.h>
57 #include <asm/unaligned.h>
58 
59 #include "mpt3sas_base.h"
60 
61 #define RAID_CHANNEL 1
62 
63 #define PCIE_CHANNEL 2
64 
65 /* forward proto's */
66 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
67 	struct _sas_node *sas_expander);
68 static void _firmware_event_work(struct work_struct *work);
69 
70 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
71 	struct _sas_device *sas_device);
72 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
73 	u8 retry_count, u8 is_pd);
74 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
75 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
76 	struct _pcie_device *pcie_device);
77 static void
78 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
79 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
80 
81 /* global parameters */
82 LIST_HEAD(mpt3sas_ioc_list);
83 /* global ioc lock for list operations */
84 DEFINE_SPINLOCK(gioc_lock);
85 
86 MODULE_AUTHOR(MPT3SAS_AUTHOR);
87 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
90 MODULE_ALIAS("mpt2sas");
91 
92 /* local parameters */
93 static u8 scsi_io_cb_idx = -1;
94 static u8 tm_cb_idx = -1;
95 static u8 ctl_cb_idx = -1;
96 static u8 base_cb_idx = -1;
97 static u8 port_enable_cb_idx = -1;
98 static u8 transport_cb_idx = -1;
99 static u8 scsih_cb_idx = -1;
100 static u8 config_cb_idx = -1;
101 static int mpt2_ids;
102 static int mpt3_ids;
103 
104 static u8 tm_tr_cb_idx = -1 ;
105 static u8 tm_tr_volume_cb_idx = -1 ;
106 static u8 tm_sas_control_cb_idx = -1;
107 
108 /* command line options */
109 static u32 logging_level;
110 MODULE_PARM_DESC(logging_level,
111 	" bits for enabling additional logging info (default=0)");
112 
113 
114 static ushort max_sectors = 0xFFFF;
115 module_param(max_sectors, ushort, 0444);
116 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
117 
118 
119 static int missing_delay[2] = {-1, -1};
120 module_param_array(missing_delay, int, NULL, 0444);
121 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
122 
123 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
124 #define MPT3SAS_MAX_LUN (16895)
125 static u64 max_lun = MPT3SAS_MAX_LUN;
126 module_param(max_lun, ullong, 0444);
127 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
128 
129 static ushort hbas_to_enumerate;
130 module_param(hbas_to_enumerate, ushort, 0444);
131 MODULE_PARM_DESC(hbas_to_enumerate,
132 		" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
133 		  1 - enumerates only SAS 2.0 generation HBAs\n \
134 		  2 - enumerates only SAS 3.0 generation HBAs (default=0)");
135 
136 /* diag_buffer_enable is bitwise
137  * bit 0 set = TRACE
138  * bit 1 set = SNAPSHOT
139  * bit 2 set = EXTENDED
140  *
141  * Either bit can be set, or both
142  */
143 static int diag_buffer_enable = -1;
144 module_param(diag_buffer_enable, int, 0444);
145 MODULE_PARM_DESC(diag_buffer_enable,
146 	" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
147 static int disable_discovery = -1;
148 module_param(disable_discovery, int, 0444);
149 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
150 
151 
152 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
153 static int prot_mask = -1;
154 module_param(prot_mask, int, 0444);
155 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
156 
157 static bool enable_sdev_max_qd;
158 module_param(enable_sdev_max_qd, bool, 0444);
159 MODULE_PARM_DESC(enable_sdev_max_qd,
160 	"Enable sdev max qd as can_queue, def=disabled(0)");
161 
162 /* raid transport support */
163 static struct raid_template *mpt3sas_raid_template;
164 static struct raid_template *mpt2sas_raid_template;
165 
166 
167 /**
168  * struct sense_info - common structure for obtaining sense keys
169  * @skey: sense key
170  * @asc: additional sense code
171  * @ascq: additional sense code qualifier
172  */
173 struct sense_info {
174 	u8 skey;
175 	u8 asc;
176 	u8 ascq;
177 };
178 
179 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
180 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
181 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
182 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
183 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
184 /**
185  * struct fw_event_work - firmware event struct
186  * @list: link list framework
187  * @work: work object (ioc->fault_reset_work_q)
188  * @ioc: per adapter object
189  * @device_handle: device handle
190  * @VF_ID: virtual function id
191  * @VP_ID: virtual port id
192  * @ignore: flag meaning this event has been marked to ignore
193  * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
194  * @refcount: kref for this event
195  * @event_data: reply event data payload follows
196  *
197  * This object stored on ioc->fw_event_list.
198  */
199 struct fw_event_work {
200 	struct list_head	list;
201 	struct work_struct	work;
202 
203 	struct MPT3SAS_ADAPTER *ioc;
204 	u16			device_handle;
205 	u8			VF_ID;
206 	u8			VP_ID;
207 	u8			ignore;
208 	u16			event;
209 	struct kref		refcount;
210 	char			event_data[] __aligned(4);
211 };
212 
fw_event_work_free(struct kref * r)213 static void fw_event_work_free(struct kref *r)
214 {
215 	kfree(container_of(r, struct fw_event_work, refcount));
216 }
217 
fw_event_work_get(struct fw_event_work * fw_work)218 static void fw_event_work_get(struct fw_event_work *fw_work)
219 {
220 	kref_get(&fw_work->refcount);
221 }
222 
fw_event_work_put(struct fw_event_work * fw_work)223 static void fw_event_work_put(struct fw_event_work *fw_work)
224 {
225 	kref_put(&fw_work->refcount, fw_event_work_free);
226 }
227 
alloc_fw_event_work(int len)228 static struct fw_event_work *alloc_fw_event_work(int len)
229 {
230 	struct fw_event_work *fw_event;
231 
232 	fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
233 	if (!fw_event)
234 		return NULL;
235 
236 	kref_init(&fw_event->refcount);
237 	return fw_event;
238 }
239 
240 /**
241  * struct _scsi_io_transfer - scsi io transfer
242  * @handle: sas device handle (assigned by firmware)
243  * @is_raid: flag set for hidden raid components
244  * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
245  * @data_length: data transfer length
246  * @data_dma: dma pointer to data
247  * @sense: sense data
248  * @lun: lun number
249  * @cdb_length: cdb length
250  * @cdb: cdb contents
251  * @timeout: timeout for this command
252  * @VF_ID: virtual function id
253  * @VP_ID: virtual port id
254  * @valid_reply: flag set for reply message
255  * @sense_length: sense length
256  * @ioc_status: ioc status
257  * @scsi_state: scsi state
258  * @scsi_status: scsi staus
259  * @log_info: log information
260  * @transfer_length: data length transfer when there is a reply message
261  *
262  * Used for sending internal scsi commands to devices within this module.
263  * Refer to _scsi_send_scsi_io().
264  */
265 struct _scsi_io_transfer {
266 	u16	handle;
267 	u8	is_raid;
268 	enum dma_data_direction dir;
269 	u32	data_length;
270 	dma_addr_t data_dma;
271 	u8	sense[SCSI_SENSE_BUFFERSIZE];
272 	u32	lun;
273 	u8	cdb_length;
274 	u8	cdb[32];
275 	u8	timeout;
276 	u8	VF_ID;
277 	u8	VP_ID;
278 	u8	valid_reply;
279   /* the following bits are only valid when 'valid_reply = 1' */
280 	u32	sense_length;
281 	u16	ioc_status;
282 	u8	scsi_state;
283 	u8	scsi_status;
284 	u32	log_info;
285 	u32	transfer_length;
286 };
287 
288 /**
289  * _scsih_set_debug_level - global setting of ioc->logging_level.
290  * @val: ?
291  * @kp: ?
292  *
293  * Note: The logging levels are defined in mpt3sas_debug.h.
294  */
295 static int
_scsih_set_debug_level(const char * val,const struct kernel_param * kp)296 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
297 {
298 	int ret = param_set_int(val, kp);
299 	struct MPT3SAS_ADAPTER *ioc;
300 
301 	if (ret)
302 		return ret;
303 
304 	pr_info("setting logging_level(0x%08x)\n", logging_level);
305 	spin_lock(&gioc_lock);
306 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
307 		ioc->logging_level = logging_level;
308 	spin_unlock(&gioc_lock);
309 	return 0;
310 }
311 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
312 	&logging_level, 0644);
313 
314 /**
315  * _scsih_srch_boot_sas_address - search based on sas_address
316  * @sas_address: sas address
317  * @boot_device: boot device object from bios page 2
318  *
319  * Return: 1 when there's a match, 0 means no match.
320  */
321 static inline int
_scsih_srch_boot_sas_address(u64 sas_address,Mpi2BootDeviceSasWwid_t * boot_device)322 _scsih_srch_boot_sas_address(u64 sas_address,
323 	Mpi2BootDeviceSasWwid_t *boot_device)
324 {
325 	return (sas_address == le64_to_cpu(boot_device->SASAddress)) ?  1 : 0;
326 }
327 
328 /**
329  * _scsih_srch_boot_device_name - search based on device name
330  * @device_name: device name specified in INDENTIFY fram
331  * @boot_device: boot device object from bios page 2
332  *
333  * Return: 1 when there's a match, 0 means no match.
334  */
335 static inline int
_scsih_srch_boot_device_name(u64 device_name,Mpi2BootDeviceDeviceName_t * boot_device)336 _scsih_srch_boot_device_name(u64 device_name,
337 	Mpi2BootDeviceDeviceName_t *boot_device)
338 {
339 	return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
340 }
341 
342 /**
343  * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
344  * @enclosure_logical_id: enclosure logical id
345  * @slot_number: slot number
346  * @boot_device: boot device object from bios page 2
347  *
348  * Return: 1 when there's a match, 0 means no match.
349  */
350 static inline int
_scsih_srch_boot_encl_slot(u64 enclosure_logical_id,u16 slot_number,Mpi2BootDeviceEnclosureSlot_t * boot_device)351 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
352 	Mpi2BootDeviceEnclosureSlot_t *boot_device)
353 {
354 	return (enclosure_logical_id == le64_to_cpu(boot_device->
355 	    EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
356 	    SlotNumber)) ? 1 : 0;
357 }
358 
359 /**
360  * _scsih_is_boot_device - search for matching boot device.
361  * @sas_address: sas address
362  * @device_name: device name specified in INDENTIFY fram
363  * @enclosure_logical_id: enclosure logical id
364  * @slot: slot number
365  * @form: specifies boot device form
366  * @boot_device: boot device object from bios page 2
367  *
368  * Return: 1 when there's a match, 0 means no match.
369  */
370 static int
_scsih_is_boot_device(u64 sas_address,u64 device_name,u64 enclosure_logical_id,u16 slot,u8 form,Mpi2BiosPage2BootDevice_t * boot_device)371 _scsih_is_boot_device(u64 sas_address, u64 device_name,
372 	u64 enclosure_logical_id, u16 slot, u8 form,
373 	Mpi2BiosPage2BootDevice_t *boot_device)
374 {
375 	int rc = 0;
376 
377 	switch (form) {
378 	case MPI2_BIOSPAGE2_FORM_SAS_WWID:
379 		if (!sas_address)
380 			break;
381 		rc = _scsih_srch_boot_sas_address(
382 		    sas_address, &boot_device->SasWwid);
383 		break;
384 	case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
385 		if (!enclosure_logical_id)
386 			break;
387 		rc = _scsih_srch_boot_encl_slot(
388 		    enclosure_logical_id,
389 		    slot, &boot_device->EnclosureSlot);
390 		break;
391 	case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
392 		if (!device_name)
393 			break;
394 		rc = _scsih_srch_boot_device_name(
395 		    device_name, &boot_device->DeviceName);
396 		break;
397 	case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
398 		break;
399 	}
400 
401 	return rc;
402 }
403 
404 /**
405  * _scsih_get_sas_address - set the sas_address for given device handle
406  * @ioc: ?
407  * @handle: device handle
408  * @sas_address: sas address
409  *
410  * Return: 0 success, non-zero when failure
411  */
412 static int
_scsih_get_sas_address(struct MPT3SAS_ADAPTER * ioc,u16 handle,u64 * sas_address)413 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
414 	u64 *sas_address)
415 {
416 	Mpi2SasDevicePage0_t sas_device_pg0;
417 	Mpi2ConfigReply_t mpi_reply;
418 	u32 ioc_status;
419 
420 	*sas_address = 0;
421 
422 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
423 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
424 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
425 			__FILE__, __LINE__, __func__);
426 		return -ENXIO;
427 	}
428 
429 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
430 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
431 		/* For HBA, vSES doesn't return HBA SAS address. Instead return
432 		 * vSES's sas address.
433 		 */
434 		if ((handle <= ioc->sas_hba.num_phys) &&
435 		   (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
436 		   MPI2_SAS_DEVICE_INFO_SEP)))
437 			*sas_address = ioc->sas_hba.sas_address;
438 		else
439 			*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
440 		return 0;
441 	}
442 
443 	/* we hit this because the given parent handle doesn't exist */
444 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
445 		return -ENXIO;
446 
447 	/* else error case */
448 	ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
449 		handle, ioc_status, __FILE__, __LINE__, __func__);
450 	return -EIO;
451 }
452 
453 /**
454  * _scsih_determine_boot_device - determine boot device.
455  * @ioc: per adapter object
456  * @device: sas_device or pcie_device object
457  * @channel: SAS or PCIe channel
458  *
459  * Determines whether this device should be first reported device to
460  * to scsi-ml or sas transport, this purpose is for persistent boot device.
461  * There are primary, alternate, and current entries in bios page 2. The order
462  * priority is primary, alternate, then current.  This routine saves
463  * the corresponding device object.
464  * The saved data to be used later in _scsih_probe_boot_devices().
465  */
466 static void
_scsih_determine_boot_device(struct MPT3SAS_ADAPTER * ioc,void * device,u32 channel)467 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
468 	u32 channel)
469 {
470 	struct _sas_device *sas_device;
471 	struct _pcie_device *pcie_device;
472 	struct _raid_device *raid_device;
473 	u64 sas_address;
474 	u64 device_name;
475 	u64 enclosure_logical_id;
476 	u16 slot;
477 
478 	 /* only process this function when driver loads */
479 	if (!ioc->is_driver_loading)
480 		return;
481 
482 	 /* no Bios, return immediately */
483 	if (!ioc->bios_pg3.BiosVersion)
484 		return;
485 
486 	if (channel == RAID_CHANNEL) {
487 		raid_device = device;
488 		sas_address = raid_device->wwid;
489 		device_name = 0;
490 		enclosure_logical_id = 0;
491 		slot = 0;
492 	} else if (channel == PCIE_CHANNEL) {
493 		pcie_device = device;
494 		sas_address = pcie_device->wwid;
495 		device_name = 0;
496 		enclosure_logical_id = 0;
497 		slot = 0;
498 	} else {
499 		sas_device = device;
500 		sas_address = sas_device->sas_address;
501 		device_name = sas_device->device_name;
502 		enclosure_logical_id = sas_device->enclosure_logical_id;
503 		slot = sas_device->slot;
504 	}
505 
506 	if (!ioc->req_boot_device.device) {
507 		if (_scsih_is_boot_device(sas_address, device_name,
508 		    enclosure_logical_id, slot,
509 		    (ioc->bios_pg2.ReqBootDeviceForm &
510 		    MPI2_BIOSPAGE2_FORM_MASK),
511 		    &ioc->bios_pg2.RequestedBootDevice)) {
512 			dinitprintk(ioc,
513 				    ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
514 					     __func__, (u64)sas_address));
515 			ioc->req_boot_device.device = device;
516 			ioc->req_boot_device.channel = channel;
517 		}
518 	}
519 
520 	if (!ioc->req_alt_boot_device.device) {
521 		if (_scsih_is_boot_device(sas_address, device_name,
522 		    enclosure_logical_id, slot,
523 		    (ioc->bios_pg2.ReqAltBootDeviceForm &
524 		    MPI2_BIOSPAGE2_FORM_MASK),
525 		    &ioc->bios_pg2.RequestedAltBootDevice)) {
526 			dinitprintk(ioc,
527 				    ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
528 					     __func__, (u64)sas_address));
529 			ioc->req_alt_boot_device.device = device;
530 			ioc->req_alt_boot_device.channel = channel;
531 		}
532 	}
533 
534 	if (!ioc->current_boot_device.device) {
535 		if (_scsih_is_boot_device(sas_address, device_name,
536 		    enclosure_logical_id, slot,
537 		    (ioc->bios_pg2.CurrentBootDeviceForm &
538 		    MPI2_BIOSPAGE2_FORM_MASK),
539 		    &ioc->bios_pg2.CurrentBootDevice)) {
540 			dinitprintk(ioc,
541 				    ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
542 					     __func__, (u64)sas_address));
543 			ioc->current_boot_device.device = device;
544 			ioc->current_boot_device.channel = channel;
545 		}
546 	}
547 }
548 
549 static struct _sas_device *
__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)550 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
551 		struct MPT3SAS_TARGET *tgt_priv)
552 {
553 	struct _sas_device *ret;
554 
555 	assert_spin_locked(&ioc->sas_device_lock);
556 
557 	ret = tgt_priv->sas_dev;
558 	if (ret)
559 		sas_device_get(ret);
560 
561 	return ret;
562 }
563 
564 static struct _sas_device *
mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)565 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
566 		struct MPT3SAS_TARGET *tgt_priv)
567 {
568 	struct _sas_device *ret;
569 	unsigned long flags;
570 
571 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
572 	ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
573 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
574 
575 	return ret;
576 }
577 
578 static struct _pcie_device *
__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)579 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
580 	struct MPT3SAS_TARGET *tgt_priv)
581 {
582 	struct _pcie_device *ret;
583 
584 	assert_spin_locked(&ioc->pcie_device_lock);
585 
586 	ret = tgt_priv->pcie_dev;
587 	if (ret)
588 		pcie_device_get(ret);
589 
590 	return ret;
591 }
592 
593 /**
594  * mpt3sas_get_pdev_from_target - pcie device search
595  * @ioc: per adapter object
596  * @tgt_priv: starget private object
597  *
598  * Context: This function will acquire ioc->pcie_device_lock and will release
599  * before returning the pcie_device object.
600  *
601  * This searches for pcie_device from target, then return pcie_device object.
602  */
603 static struct _pcie_device *
mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)604 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
605 	struct MPT3SAS_TARGET *tgt_priv)
606 {
607 	struct _pcie_device *ret;
608 	unsigned long flags;
609 
610 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
611 	ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
612 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
613 
614 	return ret;
615 }
616 
617 struct _sas_device *
__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)618 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
619 					u64 sas_address)
620 {
621 	struct _sas_device *sas_device;
622 
623 	assert_spin_locked(&ioc->sas_device_lock);
624 
625 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
626 		if (sas_device->sas_address == sas_address)
627 			goto found_device;
628 
629 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
630 		if (sas_device->sas_address == sas_address)
631 			goto found_device;
632 
633 	return NULL;
634 
635 found_device:
636 	sas_device_get(sas_device);
637 	return sas_device;
638 }
639 
640 /**
641  * mpt3sas_get_sdev_by_addr - sas device search
642  * @ioc: per adapter object
643  * @sas_address: sas address
644  * Context: Calling function should acquire ioc->sas_device_lock
645  *
646  * This searches for sas_device based on sas_address, then return sas_device
647  * object.
648  */
649 struct _sas_device *
mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)650 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
651 	u64 sas_address)
652 {
653 	struct _sas_device *sas_device;
654 	unsigned long flags;
655 
656 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
657 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
658 			sas_address);
659 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
660 
661 	return sas_device;
662 }
663 
664 static struct _sas_device *
__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)665 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
666 {
667 	struct _sas_device *sas_device;
668 
669 	assert_spin_locked(&ioc->sas_device_lock);
670 
671 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
672 		if (sas_device->handle == handle)
673 			goto found_device;
674 
675 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
676 		if (sas_device->handle == handle)
677 			goto found_device;
678 
679 	return NULL;
680 
681 found_device:
682 	sas_device_get(sas_device);
683 	return sas_device;
684 }
685 
686 /**
687  * mpt3sas_get_sdev_by_handle - sas device search
688  * @ioc: per adapter object
689  * @handle: sas device handle (assigned by firmware)
690  * Context: Calling function should acquire ioc->sas_device_lock
691  *
692  * This searches for sas_device based on sas_address, then return sas_device
693  * object.
694  */
695 struct _sas_device *
mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)696 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
697 {
698 	struct _sas_device *sas_device;
699 	unsigned long flags;
700 
701 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
702 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
703 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
704 
705 	return sas_device;
706 }
707 
708 /**
709  * _scsih_display_enclosure_chassis_info - display device location info
710  * @ioc: per adapter object
711  * @sas_device: per sas device object
712  * @sdev: scsi device struct
713  * @starget: scsi target struct
714  */
715 static void
_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device,struct scsi_device * sdev,struct scsi_target * starget)716 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
717 	struct _sas_device *sas_device, struct scsi_device *sdev,
718 	struct scsi_target *starget)
719 {
720 	if (sdev) {
721 		if (sas_device->enclosure_handle != 0)
722 			sdev_printk(KERN_INFO, sdev,
723 			    "enclosure logical id (0x%016llx), slot(%d) \n",
724 			    (unsigned long long)
725 			    sas_device->enclosure_logical_id,
726 			    sas_device->slot);
727 		if (sas_device->connector_name[0] != '\0')
728 			sdev_printk(KERN_INFO, sdev,
729 			    "enclosure level(0x%04x), connector name( %s)\n",
730 			    sas_device->enclosure_level,
731 			    sas_device->connector_name);
732 		if (sas_device->is_chassis_slot_valid)
733 			sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
734 			    sas_device->chassis_slot);
735 	} else if (starget) {
736 		if (sas_device->enclosure_handle != 0)
737 			starget_printk(KERN_INFO, starget,
738 			    "enclosure logical id(0x%016llx), slot(%d) \n",
739 			    (unsigned long long)
740 			    sas_device->enclosure_logical_id,
741 			    sas_device->slot);
742 		if (sas_device->connector_name[0] != '\0')
743 			starget_printk(KERN_INFO, starget,
744 			    "enclosure level(0x%04x), connector name( %s)\n",
745 			    sas_device->enclosure_level,
746 			    sas_device->connector_name);
747 		if (sas_device->is_chassis_slot_valid)
748 			starget_printk(KERN_INFO, starget,
749 			    "chassis slot(0x%04x)\n",
750 			    sas_device->chassis_slot);
751 	} else {
752 		if (sas_device->enclosure_handle != 0)
753 			ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
754 				 (u64)sas_device->enclosure_logical_id,
755 				 sas_device->slot);
756 		if (sas_device->connector_name[0] != '\0')
757 			ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
758 				 sas_device->enclosure_level,
759 				 sas_device->connector_name);
760 		if (sas_device->is_chassis_slot_valid)
761 			ioc_info(ioc, "chassis slot(0x%04x)\n",
762 				 sas_device->chassis_slot);
763 	}
764 }
765 
766 /**
767  * _scsih_sas_device_remove - remove sas_device from list.
768  * @ioc: per adapter object
769  * @sas_device: the sas_device object
770  * Context: This function will acquire ioc->sas_device_lock.
771  *
772  * If sas_device is on the list, remove it and decrement its reference count.
773  */
774 static void
_scsih_sas_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)775 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
776 	struct _sas_device *sas_device)
777 {
778 	unsigned long flags;
779 
780 	if (!sas_device)
781 		return;
782 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
783 		 sas_device->handle, (u64)sas_device->sas_address);
784 
785 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
786 
787 	/*
788 	 * The lock serializes access to the list, but we still need to verify
789 	 * that nobody removed the entry while we were waiting on the lock.
790 	 */
791 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
792 	if (!list_empty(&sas_device->list)) {
793 		list_del_init(&sas_device->list);
794 		sas_device_put(sas_device);
795 	}
796 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
797 }
798 
799 /**
800  * _scsih_device_remove_by_handle - removing device object by handle
801  * @ioc: per adapter object
802  * @handle: device handle
803  */
804 static void
_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)805 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
806 {
807 	struct _sas_device *sas_device;
808 	unsigned long flags;
809 
810 	if (ioc->shost_recovery)
811 		return;
812 
813 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
814 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
815 	if (sas_device) {
816 		list_del_init(&sas_device->list);
817 		sas_device_put(sas_device);
818 	}
819 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
820 	if (sas_device) {
821 		_scsih_remove_device(ioc, sas_device);
822 		sas_device_put(sas_device);
823 	}
824 }
825 
826 /**
827  * mpt3sas_device_remove_by_sas_address - removing device object by sas address
828  * @ioc: per adapter object
829  * @sas_address: device sas_address
830  */
831 void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)832 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
833 	u64 sas_address)
834 {
835 	struct _sas_device *sas_device;
836 	unsigned long flags;
837 
838 	if (ioc->shost_recovery)
839 		return;
840 
841 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
842 	sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
843 	if (sas_device) {
844 		list_del_init(&sas_device->list);
845 		sas_device_put(sas_device);
846 	}
847 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
848 	if (sas_device) {
849 		_scsih_remove_device(ioc, sas_device);
850 		sas_device_put(sas_device);
851 	}
852 }
853 
854 /**
855  * _scsih_sas_device_add - insert sas_device to the list.
856  * @ioc: per adapter object
857  * @sas_device: the sas_device object
858  * Context: This function will acquire ioc->sas_device_lock.
859  *
860  * Adding new object to the ioc->sas_device_list.
861  */
862 static void
_scsih_sas_device_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)863 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
864 	struct _sas_device *sas_device)
865 {
866 	unsigned long flags;
867 
868 	dewtprintk(ioc,
869 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
870 			    __func__, sas_device->handle,
871 			    (u64)sas_device->sas_address));
872 
873 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
874 	    NULL, NULL));
875 
876 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
877 	sas_device_get(sas_device);
878 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
879 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
880 
881 	if (ioc->hide_drives) {
882 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
883 		return;
884 	}
885 
886 	if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
887 	     sas_device->sas_address_parent)) {
888 		_scsih_sas_device_remove(ioc, sas_device);
889 	} else if (!sas_device->starget) {
890 		/*
891 		 * When asyn scanning is enabled, its not possible to remove
892 		 * devices while scanning is turned on due to an oops in
893 		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
894 		 */
895 		if (!ioc->is_driver_loading) {
896 			mpt3sas_transport_port_remove(ioc,
897 			    sas_device->sas_address,
898 			    sas_device->sas_address_parent);
899 			_scsih_sas_device_remove(ioc, sas_device);
900 		}
901 	} else
902 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
903 }
904 
905 /**
906  * _scsih_sas_device_init_add - insert sas_device to the list.
907  * @ioc: per adapter object
908  * @sas_device: the sas_device object
909  * Context: This function will acquire ioc->sas_device_lock.
910  *
911  * Adding new object at driver load time to the ioc->sas_device_init_list.
912  */
913 static void
_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)914 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
915 	struct _sas_device *sas_device)
916 {
917 	unsigned long flags;
918 
919 	dewtprintk(ioc,
920 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
921 			    __func__, sas_device->handle,
922 			    (u64)sas_device->sas_address));
923 
924 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
925 	    NULL, NULL));
926 
927 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
928 	sas_device_get(sas_device);
929 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
930 	_scsih_determine_boot_device(ioc, sas_device, 0);
931 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
932 }
933 
934 
935 static struct _pcie_device *
__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)936 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
937 {
938 	struct _pcie_device *pcie_device;
939 
940 	assert_spin_locked(&ioc->pcie_device_lock);
941 
942 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
943 		if (pcie_device->wwid == wwid)
944 			goto found_device;
945 
946 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
947 		if (pcie_device->wwid == wwid)
948 			goto found_device;
949 
950 	return NULL;
951 
952 found_device:
953 	pcie_device_get(pcie_device);
954 	return pcie_device;
955 }
956 
957 
958 /**
959  * mpt3sas_get_pdev_by_wwid - pcie device search
960  * @ioc: per adapter object
961  * @wwid: wwid
962  *
963  * Context: This function will acquire ioc->pcie_device_lock and will release
964  * before returning the pcie_device object.
965  *
966  * This searches for pcie_device based on wwid, then return pcie_device object.
967  */
968 static struct _pcie_device *
mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)969 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
970 {
971 	struct _pcie_device *pcie_device;
972 	unsigned long flags;
973 
974 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
975 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
976 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
977 
978 	return pcie_device;
979 }
980 
981 
982 static struct _pcie_device *
__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER * ioc,int id,int channel)983 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
984 	int channel)
985 {
986 	struct _pcie_device *pcie_device;
987 
988 	assert_spin_locked(&ioc->pcie_device_lock);
989 
990 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
991 		if (pcie_device->id == id && pcie_device->channel == channel)
992 			goto found_device;
993 
994 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
995 		if (pcie_device->id == id && pcie_device->channel == channel)
996 			goto found_device;
997 
998 	return NULL;
999 
1000 found_device:
1001 	pcie_device_get(pcie_device);
1002 	return pcie_device;
1003 }
1004 
1005 static struct _pcie_device *
__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1006 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1007 {
1008 	struct _pcie_device *pcie_device;
1009 
1010 	assert_spin_locked(&ioc->pcie_device_lock);
1011 
1012 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1013 		if (pcie_device->handle == handle)
1014 			goto found_device;
1015 
1016 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1017 		if (pcie_device->handle == handle)
1018 			goto found_device;
1019 
1020 	return NULL;
1021 
1022 found_device:
1023 	pcie_device_get(pcie_device);
1024 	return pcie_device;
1025 }
1026 
1027 
1028 /**
1029  * mpt3sas_get_pdev_by_handle - pcie device search
1030  * @ioc: per adapter object
1031  * @handle: Firmware device handle
1032  *
1033  * Context: This function will acquire ioc->pcie_device_lock and will release
1034  * before returning the pcie_device object.
1035  *
1036  * This searches for pcie_device based on handle, then return pcie_device
1037  * object.
1038  */
1039 struct _pcie_device *
mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1040 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1041 {
1042 	struct _pcie_device *pcie_device;
1043 	unsigned long flags;
1044 
1045 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1046 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1047 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1048 
1049 	return pcie_device;
1050 }
1051 
1052 /**
1053  * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1054  * @ioc: per adapter object
1055  * Context: This function will acquire ioc->pcie_device_lock
1056  *
1057  * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1058  * which has reported maximum among all available NVMe drives.
1059  * Minimum max_shutdown_latency will be six seconds.
1060  */
1061 static void
_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER * ioc)1062 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1063 {
1064 	struct _pcie_device *pcie_device;
1065 	unsigned long flags;
1066 	u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1067 
1068 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1069 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1070 		if (pcie_device->shutdown_latency) {
1071 			if (shutdown_latency < pcie_device->shutdown_latency)
1072 				shutdown_latency =
1073 					pcie_device->shutdown_latency;
1074 		}
1075 	}
1076 	ioc->max_shutdown_latency = shutdown_latency;
1077 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1078 }
1079 
1080 /**
1081  * _scsih_pcie_device_remove - remove pcie_device from list.
1082  * @ioc: per adapter object
1083  * @pcie_device: the pcie_device object
1084  * Context: This function will acquire ioc->pcie_device_lock.
1085  *
1086  * If pcie_device is on the list, remove it and decrement its reference count.
1087  */
1088 static void
_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1089 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1090 	struct _pcie_device *pcie_device)
1091 {
1092 	unsigned long flags;
1093 	int was_on_pcie_device_list = 0;
1094 	u8 update_latency = 0;
1095 
1096 	if (!pcie_device)
1097 		return;
1098 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1099 		 pcie_device->handle, (u64)pcie_device->wwid);
1100 	if (pcie_device->enclosure_handle != 0)
1101 		ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1102 			 (u64)pcie_device->enclosure_logical_id,
1103 			 pcie_device->slot);
1104 	if (pcie_device->connector_name[0] != '\0')
1105 		ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1106 			 pcie_device->enclosure_level,
1107 			 pcie_device->connector_name);
1108 
1109 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1110 	if (!list_empty(&pcie_device->list)) {
1111 		list_del_init(&pcie_device->list);
1112 		was_on_pcie_device_list = 1;
1113 	}
1114 	if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1115 		update_latency = 1;
1116 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1117 	if (was_on_pcie_device_list) {
1118 		kfree(pcie_device->serial_number);
1119 		pcie_device_put(pcie_device);
1120 	}
1121 
1122 	/*
1123 	 * This device's RTD3 Entry Latency matches IOC's
1124 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1125 	 * from the available drives as current drive is getting removed.
1126 	 */
1127 	if (update_latency)
1128 		_scsih_set_nvme_max_shutdown_latency(ioc);
1129 }
1130 
1131 
1132 /**
1133  * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1134  * @ioc: per adapter object
1135  * @handle: device handle
1136  */
1137 static void
_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1138 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1139 {
1140 	struct _pcie_device *pcie_device;
1141 	unsigned long flags;
1142 	int was_on_pcie_device_list = 0;
1143 	u8 update_latency = 0;
1144 
1145 	if (ioc->shost_recovery)
1146 		return;
1147 
1148 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1149 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1150 	if (pcie_device) {
1151 		if (!list_empty(&pcie_device->list)) {
1152 			list_del_init(&pcie_device->list);
1153 			was_on_pcie_device_list = 1;
1154 			pcie_device_put(pcie_device);
1155 		}
1156 		if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1157 			update_latency = 1;
1158 	}
1159 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1160 	if (was_on_pcie_device_list) {
1161 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1162 		pcie_device_put(pcie_device);
1163 	}
1164 
1165 	/*
1166 	 * This device's RTD3 Entry Latency matches IOC's
1167 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1168 	 * from the available drives as current drive is getting removed.
1169 	 */
1170 	if (update_latency)
1171 		_scsih_set_nvme_max_shutdown_latency(ioc);
1172 }
1173 
1174 /**
1175  * _scsih_pcie_device_add - add pcie_device object
1176  * @ioc: per adapter object
1177  * @pcie_device: pcie_device object
1178  *
1179  * This is added to the pcie_device_list link list.
1180  */
1181 static void
_scsih_pcie_device_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1182 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1183 	struct _pcie_device *pcie_device)
1184 {
1185 	unsigned long flags;
1186 
1187 	dewtprintk(ioc,
1188 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1189 			    __func__,
1190 			    pcie_device->handle, (u64)pcie_device->wwid));
1191 	if (pcie_device->enclosure_handle != 0)
1192 		dewtprintk(ioc,
1193 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1194 				    __func__,
1195 				    (u64)pcie_device->enclosure_logical_id,
1196 				    pcie_device->slot));
1197 	if (pcie_device->connector_name[0] != '\0')
1198 		dewtprintk(ioc,
1199 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1200 				    __func__, pcie_device->enclosure_level,
1201 				    pcie_device->connector_name));
1202 
1203 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1204 	pcie_device_get(pcie_device);
1205 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1206 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1207 
1208 	if (pcie_device->access_status ==
1209 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1210 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1211 		return;
1212 	}
1213 	if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1214 		_scsih_pcie_device_remove(ioc, pcie_device);
1215 	} else if (!pcie_device->starget) {
1216 		if (!ioc->is_driver_loading) {
1217 /*TODO-- Need to find out whether this condition will occur or not*/
1218 			clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1219 		}
1220 	} else
1221 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1222 }
1223 
1224 /*
1225  * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1226  * @ioc: per adapter object
1227  * @pcie_device: the pcie_device object
1228  * Context: This function will acquire ioc->pcie_device_lock.
1229  *
1230  * Adding new object at driver load time to the ioc->pcie_device_init_list.
1231  */
1232 static void
_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1233 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1234 				struct _pcie_device *pcie_device)
1235 {
1236 	unsigned long flags;
1237 
1238 	dewtprintk(ioc,
1239 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1240 			    __func__,
1241 			    pcie_device->handle, (u64)pcie_device->wwid));
1242 	if (pcie_device->enclosure_handle != 0)
1243 		dewtprintk(ioc,
1244 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1245 				    __func__,
1246 				    (u64)pcie_device->enclosure_logical_id,
1247 				    pcie_device->slot));
1248 	if (pcie_device->connector_name[0] != '\0')
1249 		dewtprintk(ioc,
1250 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1251 				    __func__, pcie_device->enclosure_level,
1252 				    pcie_device->connector_name));
1253 
1254 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1255 	pcie_device_get(pcie_device);
1256 	list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1257 	if (pcie_device->access_status !=
1258 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1259 		_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1260 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1261 }
1262 /**
1263  * _scsih_raid_device_find_by_id - raid device search
1264  * @ioc: per adapter object
1265  * @id: sas device target id
1266  * @channel: sas device channel
1267  * Context: Calling function should acquire ioc->raid_device_lock
1268  *
1269  * This searches for raid_device based on target id, then return raid_device
1270  * object.
1271  */
1272 static struct _raid_device *
_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1273 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1274 {
1275 	struct _raid_device *raid_device, *r;
1276 
1277 	r = NULL;
1278 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1279 		if (raid_device->id == id && raid_device->channel == channel) {
1280 			r = raid_device;
1281 			goto out;
1282 		}
1283 	}
1284 
1285  out:
1286 	return r;
1287 }
1288 
1289 /**
1290  * mpt3sas_raid_device_find_by_handle - raid device search
1291  * @ioc: per adapter object
1292  * @handle: sas device handle (assigned by firmware)
1293  * Context: Calling function should acquire ioc->raid_device_lock
1294  *
1295  * This searches for raid_device based on handle, then return raid_device
1296  * object.
1297  */
1298 struct _raid_device *
mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1299 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1300 {
1301 	struct _raid_device *raid_device, *r;
1302 
1303 	r = NULL;
1304 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1305 		if (raid_device->handle != handle)
1306 			continue;
1307 		r = raid_device;
1308 		goto out;
1309 	}
1310 
1311  out:
1312 	return r;
1313 }
1314 
1315 /**
1316  * _scsih_raid_device_find_by_wwid - raid device search
1317  * @ioc: per adapter object
1318  * @wwid: ?
1319  * Context: Calling function should acquire ioc->raid_device_lock
1320  *
1321  * This searches for raid_device based on wwid, then return raid_device
1322  * object.
1323  */
1324 static struct _raid_device *
_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1325 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1326 {
1327 	struct _raid_device *raid_device, *r;
1328 
1329 	r = NULL;
1330 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1331 		if (raid_device->wwid != wwid)
1332 			continue;
1333 		r = raid_device;
1334 		goto out;
1335 	}
1336 
1337  out:
1338 	return r;
1339 }
1340 
1341 /**
1342  * _scsih_raid_device_add - add raid_device object
1343  * @ioc: per adapter object
1344  * @raid_device: raid_device object
1345  *
1346  * This is added to the raid_device_list link list.
1347  */
1348 static void
_scsih_raid_device_add(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1349 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1350 	struct _raid_device *raid_device)
1351 {
1352 	unsigned long flags;
1353 
1354 	dewtprintk(ioc,
1355 		   ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1356 			    __func__,
1357 			    raid_device->handle, (u64)raid_device->wwid));
1358 
1359 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1360 	list_add_tail(&raid_device->list, &ioc->raid_device_list);
1361 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1362 }
1363 
1364 /**
1365  * _scsih_raid_device_remove - delete raid_device object
1366  * @ioc: per adapter object
1367  * @raid_device: raid_device object
1368  *
1369  */
1370 static void
_scsih_raid_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1371 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1372 	struct _raid_device *raid_device)
1373 {
1374 	unsigned long flags;
1375 
1376 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1377 	list_del(&raid_device->list);
1378 	kfree(raid_device);
1379 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1380 }
1381 
1382 /**
1383  * mpt3sas_scsih_expander_find_by_handle - expander device search
1384  * @ioc: per adapter object
1385  * @handle: expander handle (assigned by firmware)
1386  * Context: Calling function should acquire ioc->sas_device_lock
1387  *
1388  * This searches for expander device based on handle, then returns the
1389  * sas_node object.
1390  */
1391 struct _sas_node *
mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1392 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1393 {
1394 	struct _sas_node *sas_expander, *r;
1395 
1396 	r = NULL;
1397 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1398 		if (sas_expander->handle != handle)
1399 			continue;
1400 		r = sas_expander;
1401 		goto out;
1402 	}
1403  out:
1404 	return r;
1405 }
1406 
1407 /**
1408  * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1409  * @ioc: per adapter object
1410  * @handle: enclosure handle (assigned by firmware)
1411  * Context: Calling function should acquire ioc->sas_device_lock
1412  *
1413  * This searches for enclosure device based on handle, then returns the
1414  * enclosure object.
1415  */
1416 static struct _enclosure_node *
mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1417 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1418 {
1419 	struct _enclosure_node *enclosure_dev, *r;
1420 
1421 	r = NULL;
1422 	list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1423 		if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1424 			continue;
1425 		r = enclosure_dev;
1426 		goto out;
1427 	}
1428 out:
1429 	return r;
1430 }
1431 /**
1432  * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1433  * @ioc: per adapter object
1434  * @sas_address: sas address
1435  * Context: Calling function should acquire ioc->sas_node_lock.
1436  *
1437  * This searches for expander device based on sas_address, then returns the
1438  * sas_node object.
1439  */
1440 struct _sas_node *
mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)1441 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1442 	u64 sas_address)
1443 {
1444 	struct _sas_node *sas_expander, *r;
1445 
1446 	r = NULL;
1447 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1448 		if (sas_expander->sas_address != sas_address)
1449 			continue;
1450 		r = sas_expander;
1451 		goto out;
1452 	}
1453  out:
1454 	return r;
1455 }
1456 
1457 /**
1458  * _scsih_expander_node_add - insert expander device to the list.
1459  * @ioc: per adapter object
1460  * @sas_expander: the sas_device object
1461  * Context: This function will acquire ioc->sas_node_lock.
1462  *
1463  * Adding new object to the ioc->sas_expander_list.
1464  */
1465 static void
_scsih_expander_node_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)1466 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1467 	struct _sas_node *sas_expander)
1468 {
1469 	unsigned long flags;
1470 
1471 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
1472 	list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1473 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1474 }
1475 
1476 /**
1477  * _scsih_is_end_device - determines if device is an end device
1478  * @device_info: bitfield providing information about the device.
1479  * Context: none
1480  *
1481  * Return: 1 if end device.
1482  */
1483 static int
_scsih_is_end_device(u32 device_info)1484 _scsih_is_end_device(u32 device_info)
1485 {
1486 	if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1487 		((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1488 		(device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1489 		(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1490 		return 1;
1491 	else
1492 		return 0;
1493 }
1494 
1495 /**
1496  * _scsih_is_nvme_pciescsi_device - determines if
1497  *			device is an pcie nvme/scsi device
1498  * @device_info: bitfield providing information about the device.
1499  * Context: none
1500  *
1501  * Returns 1 if device is pcie device type nvme/scsi.
1502  */
1503 static int
_scsih_is_nvme_pciescsi_device(u32 device_info)1504 _scsih_is_nvme_pciescsi_device(u32 device_info)
1505 {
1506 	if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1507 	    == MPI26_PCIE_DEVINFO_NVME) ||
1508 	    ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1509 	    == MPI26_PCIE_DEVINFO_SCSI))
1510 		return 1;
1511 	else
1512 		return 0;
1513 }
1514 
1515 /**
1516  * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1517  * @ioc: per adapter object
1518  * @id: target id
1519  * @channel: channel
1520  * Context: This function will acquire ioc->scsi_lookup_lock.
1521  *
1522  * This will search for a matching channel:id in the scsi_lookup array,
1523  * returning 1 if found.
1524  */
1525 static u8
_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1526 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1527 	int channel)
1528 {
1529 	int smid;
1530 	struct scsi_cmnd *scmd;
1531 
1532 	for (smid = 1;
1533 	     smid <= ioc->shost->can_queue; smid++) {
1534 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1535 		if (!scmd)
1536 			continue;
1537 		if (scmd->device->id == id &&
1538 		    scmd->device->channel == channel)
1539 			return 1;
1540 	}
1541 	return 0;
1542 }
1543 
1544 /**
1545  * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1546  * @ioc: per adapter object
1547  * @id: target id
1548  * @lun: lun number
1549  * @channel: channel
1550  * Context: This function will acquire ioc->scsi_lookup_lock.
1551  *
1552  * This will search for a matching channel:id:lun in the scsi_lookup array,
1553  * returning 1 if found.
1554  */
1555 static u8
_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER * ioc,int id,unsigned int lun,int channel)1556 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1557 	unsigned int lun, int channel)
1558 {
1559 	int smid;
1560 	struct scsi_cmnd *scmd;
1561 
1562 	for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1563 
1564 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1565 		if (!scmd)
1566 			continue;
1567 		if (scmd->device->id == id &&
1568 		    scmd->device->channel == channel &&
1569 		    scmd->device->lun == lun)
1570 			return 1;
1571 	}
1572 	return 0;
1573 }
1574 
1575 /**
1576  * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1577  * @ioc: per adapter object
1578  * @smid: system request message index
1579  *
1580  * Return: the smid stored scmd pointer.
1581  * Then will dereference the stored scmd pointer.
1582  */
1583 struct scsi_cmnd *
mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER * ioc,u16 smid)1584 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1585 {
1586 	struct scsi_cmnd *scmd = NULL;
1587 	struct scsiio_tracker *st;
1588 	Mpi25SCSIIORequest_t *mpi_request;
1589 
1590 	if (smid > 0  &&
1591 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1592 		u32 unique_tag = smid - 1;
1593 
1594 		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1595 
1596 		/*
1597 		 * If SCSI IO request is outstanding at driver level then
1598 		 * DevHandle filed must be non-zero. If DevHandle is zero
1599 		 * then it means that this smid is free at driver level,
1600 		 * so return NULL.
1601 		 */
1602 		if (!mpi_request->DevHandle)
1603 			return scmd;
1604 
1605 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1606 		if (scmd) {
1607 			st = scsi_cmd_priv(scmd);
1608 			if (st->cb_idx == 0xFF || st->smid == 0)
1609 				scmd = NULL;
1610 		}
1611 	}
1612 	return scmd;
1613 }
1614 
1615 /**
1616  * scsih_change_queue_depth - setting device queue depth
1617  * @sdev: scsi device struct
1618  * @qdepth: requested queue depth
1619  *
1620  * Return: queue depth.
1621  */
1622 static int
scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1623 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1624 {
1625 	struct Scsi_Host *shost = sdev->host;
1626 	int max_depth;
1627 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1628 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1629 	struct MPT3SAS_TARGET *sas_target_priv_data;
1630 	struct _sas_device *sas_device;
1631 	unsigned long flags;
1632 
1633 	max_depth = shost->can_queue;
1634 
1635 	/*
1636 	 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1637 	 * is disabled.
1638 	 */
1639 	if (ioc->enable_sdev_max_qd)
1640 		goto not_sata;
1641 
1642 	sas_device_priv_data = sdev->hostdata;
1643 	if (!sas_device_priv_data)
1644 		goto not_sata;
1645 	sas_target_priv_data = sas_device_priv_data->sas_target;
1646 	if (!sas_target_priv_data)
1647 		goto not_sata;
1648 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1649 		goto not_sata;
1650 
1651 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1652 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1653 	if (sas_device) {
1654 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1655 			max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1656 
1657 		sas_device_put(sas_device);
1658 	}
1659 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1660 
1661  not_sata:
1662 
1663 	if (!sdev->tagged_supported)
1664 		max_depth = 1;
1665 	if (qdepth > max_depth)
1666 		qdepth = max_depth;
1667 	scsi_change_queue_depth(sdev, qdepth);
1668 	sdev_printk(KERN_INFO, sdev,
1669 	    "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1670 	    sdev->queue_depth, sdev->tagged_supported,
1671 	    sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1672 	return sdev->queue_depth;
1673 }
1674 
1675 /**
1676  * mpt3sas_scsih_change_queue_depth - setting device queue depth
1677  * @sdev: scsi device struct
1678  * @qdepth: requested queue depth
1679  *
1680  * Returns nothing.
1681  */
1682 void
mpt3sas_scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1683 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1684 {
1685 	struct Scsi_Host *shost = sdev->host;
1686 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1687 
1688 	if (ioc->enable_sdev_max_qd)
1689 		qdepth = shost->can_queue;
1690 
1691 	scsih_change_queue_depth(sdev, qdepth);
1692 }
1693 
1694 /**
1695  * scsih_target_alloc - target add routine
1696  * @starget: scsi target struct
1697  *
1698  * Return: 0 if ok. Any other return is assumed to be an error and
1699  * the device is ignored.
1700  */
1701 static int
scsih_target_alloc(struct scsi_target * starget)1702 scsih_target_alloc(struct scsi_target *starget)
1703 {
1704 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1705 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1706 	struct MPT3SAS_TARGET *sas_target_priv_data;
1707 	struct _sas_device *sas_device;
1708 	struct _raid_device *raid_device;
1709 	struct _pcie_device *pcie_device;
1710 	unsigned long flags;
1711 	struct sas_rphy *rphy;
1712 
1713 	sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1714 				       GFP_KERNEL);
1715 	if (!sas_target_priv_data)
1716 		return -ENOMEM;
1717 
1718 	starget->hostdata = sas_target_priv_data;
1719 	sas_target_priv_data->starget = starget;
1720 	sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1721 
1722 	/* RAID volumes */
1723 	if (starget->channel == RAID_CHANNEL) {
1724 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1725 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1726 		    starget->channel);
1727 		if (raid_device) {
1728 			sas_target_priv_data->handle = raid_device->handle;
1729 			sas_target_priv_data->sas_address = raid_device->wwid;
1730 			sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1731 			if (ioc->is_warpdrive)
1732 				sas_target_priv_data->raid_device = raid_device;
1733 			raid_device->starget = starget;
1734 		}
1735 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1736 		return 0;
1737 	}
1738 
1739 	/* PCIe devices */
1740 	if (starget->channel == PCIE_CHANNEL) {
1741 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1742 		pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1743 			starget->channel);
1744 		if (pcie_device) {
1745 			sas_target_priv_data->handle = pcie_device->handle;
1746 			sas_target_priv_data->sas_address = pcie_device->wwid;
1747 			sas_target_priv_data->pcie_dev = pcie_device;
1748 			pcie_device->starget = starget;
1749 			pcie_device->id = starget->id;
1750 			pcie_device->channel = starget->channel;
1751 			sas_target_priv_data->flags |=
1752 				MPT_TARGET_FLAGS_PCIE_DEVICE;
1753 			if (pcie_device->fast_path)
1754 				sas_target_priv_data->flags |=
1755 					MPT_TARGET_FASTPATH_IO;
1756 		}
1757 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1758 		return 0;
1759 	}
1760 
1761 	/* sas/sata devices */
1762 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1763 	rphy = dev_to_rphy(starget->dev.parent);
1764 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1765 	   rphy->identify.sas_address);
1766 
1767 	if (sas_device) {
1768 		sas_target_priv_data->handle = sas_device->handle;
1769 		sas_target_priv_data->sas_address = sas_device->sas_address;
1770 		sas_target_priv_data->sas_dev = sas_device;
1771 		sas_device->starget = starget;
1772 		sas_device->id = starget->id;
1773 		sas_device->channel = starget->channel;
1774 		if (test_bit(sas_device->handle, ioc->pd_handles))
1775 			sas_target_priv_data->flags |=
1776 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
1777 		if (sas_device->fast_path)
1778 			sas_target_priv_data->flags |=
1779 					MPT_TARGET_FASTPATH_IO;
1780 	}
1781 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1782 
1783 	return 0;
1784 }
1785 
1786 /**
1787  * scsih_target_destroy - target destroy routine
1788  * @starget: scsi target struct
1789  */
1790 static void
scsih_target_destroy(struct scsi_target * starget)1791 scsih_target_destroy(struct scsi_target *starget)
1792 {
1793 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1794 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1795 	struct MPT3SAS_TARGET *sas_target_priv_data;
1796 	struct _sas_device *sas_device;
1797 	struct _raid_device *raid_device;
1798 	struct _pcie_device *pcie_device;
1799 	unsigned long flags;
1800 
1801 	sas_target_priv_data = starget->hostdata;
1802 	if (!sas_target_priv_data)
1803 		return;
1804 
1805 	if (starget->channel == RAID_CHANNEL) {
1806 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1807 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1808 		    starget->channel);
1809 		if (raid_device) {
1810 			raid_device->starget = NULL;
1811 			raid_device->sdev = NULL;
1812 		}
1813 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1814 		goto out;
1815 	}
1816 
1817 	if (starget->channel == PCIE_CHANNEL) {
1818 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1819 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1820 							sas_target_priv_data);
1821 		if (pcie_device && (pcie_device->starget == starget) &&
1822 			(pcie_device->id == starget->id) &&
1823 			(pcie_device->channel == starget->channel))
1824 			pcie_device->starget = NULL;
1825 
1826 		if (pcie_device) {
1827 			/*
1828 			 * Corresponding get() is in _scsih_target_alloc()
1829 			 */
1830 			sas_target_priv_data->pcie_dev = NULL;
1831 			pcie_device_put(pcie_device);
1832 			pcie_device_put(pcie_device);
1833 		}
1834 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1835 		goto out;
1836 	}
1837 
1838 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1839 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1840 	if (sas_device && (sas_device->starget == starget) &&
1841 	    (sas_device->id == starget->id) &&
1842 	    (sas_device->channel == starget->channel))
1843 		sas_device->starget = NULL;
1844 
1845 	if (sas_device) {
1846 		/*
1847 		 * Corresponding get() is in _scsih_target_alloc()
1848 		 */
1849 		sas_target_priv_data->sas_dev = NULL;
1850 		sas_device_put(sas_device);
1851 
1852 		sas_device_put(sas_device);
1853 	}
1854 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1855 
1856  out:
1857 	kfree(sas_target_priv_data);
1858 	starget->hostdata = NULL;
1859 }
1860 
1861 /**
1862  * scsih_slave_alloc - device add routine
1863  * @sdev: scsi device struct
1864  *
1865  * Return: 0 if ok. Any other return is assumed to be an error and
1866  * the device is ignored.
1867  */
1868 static int
scsih_slave_alloc(struct scsi_device * sdev)1869 scsih_slave_alloc(struct scsi_device *sdev)
1870 {
1871 	struct Scsi_Host *shost;
1872 	struct MPT3SAS_ADAPTER *ioc;
1873 	struct MPT3SAS_TARGET *sas_target_priv_data;
1874 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1875 	struct scsi_target *starget;
1876 	struct _raid_device *raid_device;
1877 	struct _sas_device *sas_device;
1878 	struct _pcie_device *pcie_device;
1879 	unsigned long flags;
1880 
1881 	sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
1882 				       GFP_KERNEL);
1883 	if (!sas_device_priv_data)
1884 		return -ENOMEM;
1885 
1886 	sas_device_priv_data->lun = sdev->lun;
1887 	sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
1888 
1889 	starget = scsi_target(sdev);
1890 	sas_target_priv_data = starget->hostdata;
1891 	sas_target_priv_data->num_luns++;
1892 	sas_device_priv_data->sas_target = sas_target_priv_data;
1893 	sdev->hostdata = sas_device_priv_data;
1894 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
1895 		sdev->no_uld_attach = 1;
1896 
1897 	shost = dev_to_shost(&starget->dev);
1898 	ioc = shost_priv(shost);
1899 	if (starget->channel == RAID_CHANNEL) {
1900 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1901 		raid_device = _scsih_raid_device_find_by_id(ioc,
1902 		    starget->id, starget->channel);
1903 		if (raid_device)
1904 			raid_device->sdev = sdev; /* raid is single lun */
1905 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1906 	}
1907 	if (starget->channel == PCIE_CHANNEL) {
1908 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1909 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
1910 				sas_target_priv_data->sas_address);
1911 		if (pcie_device && (pcie_device->starget == NULL)) {
1912 			sdev_printk(KERN_INFO, sdev,
1913 			    "%s : pcie_device->starget set to starget @ %d\n",
1914 			    __func__, __LINE__);
1915 			pcie_device->starget = starget;
1916 		}
1917 
1918 		if (pcie_device)
1919 			pcie_device_put(pcie_device);
1920 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1921 
1922 	} else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1923 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1924 		sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1925 					sas_target_priv_data->sas_address);
1926 		if (sas_device && (sas_device->starget == NULL)) {
1927 			sdev_printk(KERN_INFO, sdev,
1928 			"%s : sas_device->starget set to starget @ %d\n",
1929 			     __func__, __LINE__);
1930 			sas_device->starget = starget;
1931 		}
1932 
1933 		if (sas_device)
1934 			sas_device_put(sas_device);
1935 
1936 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1937 	}
1938 
1939 	return 0;
1940 }
1941 
1942 /**
1943  * scsih_slave_destroy - device destroy routine
1944  * @sdev: scsi device struct
1945  */
1946 static void
scsih_slave_destroy(struct scsi_device * sdev)1947 scsih_slave_destroy(struct scsi_device *sdev)
1948 {
1949 	struct MPT3SAS_TARGET *sas_target_priv_data;
1950 	struct scsi_target *starget;
1951 	struct Scsi_Host *shost;
1952 	struct MPT3SAS_ADAPTER *ioc;
1953 	struct _sas_device *sas_device;
1954 	struct _pcie_device *pcie_device;
1955 	unsigned long flags;
1956 
1957 	if (!sdev->hostdata)
1958 		return;
1959 
1960 	starget = scsi_target(sdev);
1961 	sas_target_priv_data = starget->hostdata;
1962 	sas_target_priv_data->num_luns--;
1963 
1964 	shost = dev_to_shost(&starget->dev);
1965 	ioc = shost_priv(shost);
1966 
1967 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
1968 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1969 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1970 				sas_target_priv_data);
1971 		if (pcie_device && !sas_target_priv_data->num_luns)
1972 			pcie_device->starget = NULL;
1973 
1974 		if (pcie_device)
1975 			pcie_device_put(pcie_device);
1976 
1977 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1978 
1979 	} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1980 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1981 		sas_device = __mpt3sas_get_sdev_from_target(ioc,
1982 				sas_target_priv_data);
1983 		if (sas_device && !sas_target_priv_data->num_luns)
1984 			sas_device->starget = NULL;
1985 
1986 		if (sas_device)
1987 			sas_device_put(sas_device);
1988 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1989 	}
1990 
1991 	kfree(sdev->hostdata);
1992 	sdev->hostdata = NULL;
1993 }
1994 
1995 /**
1996  * _scsih_display_sata_capabilities - sata capabilities
1997  * @ioc: per adapter object
1998  * @handle: device handle
1999  * @sdev: scsi device struct
2000  */
2001 static void
_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER * ioc,u16 handle,struct scsi_device * sdev)2002 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2003 	u16 handle, struct scsi_device *sdev)
2004 {
2005 	Mpi2ConfigReply_t mpi_reply;
2006 	Mpi2SasDevicePage0_t sas_device_pg0;
2007 	u32 ioc_status;
2008 	u16 flags;
2009 	u32 device_info;
2010 
2011 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2012 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2013 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2014 			__FILE__, __LINE__, __func__);
2015 		return;
2016 	}
2017 
2018 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2019 	    MPI2_IOCSTATUS_MASK;
2020 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2021 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2022 			__FILE__, __LINE__, __func__);
2023 		return;
2024 	}
2025 
2026 	flags = le16_to_cpu(sas_device_pg0.Flags);
2027 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2028 
2029 	sdev_printk(KERN_INFO, sdev,
2030 	    "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2031 	    "sw_preserve(%s)\n",
2032 	    (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2033 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2034 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2035 	    "n",
2036 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2037 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2038 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2039 }
2040 
2041 /*
2042  * raid transport support -
2043  * Enabled for SLES11 and newer, in older kernels the driver will panic when
2044  * unloading the driver followed by a load - I believe that the subroutine
2045  * raid_class_release() is not cleaning up properly.
2046  */
2047 
2048 /**
2049  * scsih_is_raid - return boolean indicating device is raid volume
2050  * @dev: the device struct object
2051  */
2052 static int
scsih_is_raid(struct device * dev)2053 scsih_is_raid(struct device *dev)
2054 {
2055 	struct scsi_device *sdev = to_scsi_device(dev);
2056 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2057 
2058 	if (ioc->is_warpdrive)
2059 		return 0;
2060 	return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2061 }
2062 
2063 static int
scsih_is_nvme(struct device * dev)2064 scsih_is_nvme(struct device *dev)
2065 {
2066 	struct scsi_device *sdev = to_scsi_device(dev);
2067 
2068 	return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2069 }
2070 
2071 /**
2072  * scsih_get_resync - get raid volume resync percent complete
2073  * @dev: the device struct object
2074  */
2075 static void
scsih_get_resync(struct device * dev)2076 scsih_get_resync(struct device *dev)
2077 {
2078 	struct scsi_device *sdev = to_scsi_device(dev);
2079 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2080 	static struct _raid_device *raid_device;
2081 	unsigned long flags;
2082 	Mpi2RaidVolPage0_t vol_pg0;
2083 	Mpi2ConfigReply_t mpi_reply;
2084 	u32 volume_status_flags;
2085 	u8 percent_complete;
2086 	u16 handle;
2087 
2088 	percent_complete = 0;
2089 	handle = 0;
2090 	if (ioc->is_warpdrive)
2091 		goto out;
2092 
2093 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2094 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2095 	    sdev->channel);
2096 	if (raid_device) {
2097 		handle = raid_device->handle;
2098 		percent_complete = raid_device->percent_complete;
2099 	}
2100 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2101 
2102 	if (!handle)
2103 		goto out;
2104 
2105 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2106 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2107 	     sizeof(Mpi2RaidVolPage0_t))) {
2108 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2109 			__FILE__, __LINE__, __func__);
2110 		percent_complete = 0;
2111 		goto out;
2112 	}
2113 
2114 	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2115 	if (!(volume_status_flags &
2116 	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2117 		percent_complete = 0;
2118 
2119  out:
2120 
2121 	switch (ioc->hba_mpi_version_belonged) {
2122 	case MPI2_VERSION:
2123 		raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2124 		break;
2125 	case MPI25_VERSION:
2126 	case MPI26_VERSION:
2127 		raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2128 		break;
2129 	}
2130 }
2131 
2132 /**
2133  * scsih_get_state - get raid volume level
2134  * @dev: the device struct object
2135  */
2136 static void
scsih_get_state(struct device * dev)2137 scsih_get_state(struct device *dev)
2138 {
2139 	struct scsi_device *sdev = to_scsi_device(dev);
2140 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2141 	static struct _raid_device *raid_device;
2142 	unsigned long flags;
2143 	Mpi2RaidVolPage0_t vol_pg0;
2144 	Mpi2ConfigReply_t mpi_reply;
2145 	u32 volstate;
2146 	enum raid_state state = RAID_STATE_UNKNOWN;
2147 	u16 handle = 0;
2148 
2149 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2150 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2151 	    sdev->channel);
2152 	if (raid_device)
2153 		handle = raid_device->handle;
2154 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2155 
2156 	if (!raid_device)
2157 		goto out;
2158 
2159 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2160 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2161 	     sizeof(Mpi2RaidVolPage0_t))) {
2162 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2163 			__FILE__, __LINE__, __func__);
2164 		goto out;
2165 	}
2166 
2167 	volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2168 	if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2169 		state = RAID_STATE_RESYNCING;
2170 		goto out;
2171 	}
2172 
2173 	switch (vol_pg0.VolumeState) {
2174 	case MPI2_RAID_VOL_STATE_OPTIMAL:
2175 	case MPI2_RAID_VOL_STATE_ONLINE:
2176 		state = RAID_STATE_ACTIVE;
2177 		break;
2178 	case  MPI2_RAID_VOL_STATE_DEGRADED:
2179 		state = RAID_STATE_DEGRADED;
2180 		break;
2181 	case MPI2_RAID_VOL_STATE_FAILED:
2182 	case MPI2_RAID_VOL_STATE_MISSING:
2183 		state = RAID_STATE_OFFLINE;
2184 		break;
2185 	}
2186  out:
2187 	switch (ioc->hba_mpi_version_belonged) {
2188 	case MPI2_VERSION:
2189 		raid_set_state(mpt2sas_raid_template, dev, state);
2190 		break;
2191 	case MPI25_VERSION:
2192 	case MPI26_VERSION:
2193 		raid_set_state(mpt3sas_raid_template, dev, state);
2194 		break;
2195 	}
2196 }
2197 
2198 /**
2199  * _scsih_set_level - set raid level
2200  * @ioc: ?
2201  * @sdev: scsi device struct
2202  * @volume_type: volume type
2203  */
2204 static void
_scsih_set_level(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev,u8 volume_type)2205 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2206 	struct scsi_device *sdev, u8 volume_type)
2207 {
2208 	enum raid_level level = RAID_LEVEL_UNKNOWN;
2209 
2210 	switch (volume_type) {
2211 	case MPI2_RAID_VOL_TYPE_RAID0:
2212 		level = RAID_LEVEL_0;
2213 		break;
2214 	case MPI2_RAID_VOL_TYPE_RAID10:
2215 		level = RAID_LEVEL_10;
2216 		break;
2217 	case MPI2_RAID_VOL_TYPE_RAID1E:
2218 		level = RAID_LEVEL_1E;
2219 		break;
2220 	case MPI2_RAID_VOL_TYPE_RAID1:
2221 		level = RAID_LEVEL_1;
2222 		break;
2223 	}
2224 
2225 	switch (ioc->hba_mpi_version_belonged) {
2226 	case MPI2_VERSION:
2227 		raid_set_level(mpt2sas_raid_template,
2228 			&sdev->sdev_gendev, level);
2229 		break;
2230 	case MPI25_VERSION:
2231 	case MPI26_VERSION:
2232 		raid_set_level(mpt3sas_raid_template,
2233 			&sdev->sdev_gendev, level);
2234 		break;
2235 	}
2236 }
2237 
2238 
2239 /**
2240  * _scsih_get_volume_capabilities - volume capabilities
2241  * @ioc: per adapter object
2242  * @raid_device: the raid_device object
2243  *
2244  * Return: 0 for success, else 1
2245  */
2246 static int
_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)2247 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2248 	struct _raid_device *raid_device)
2249 {
2250 	Mpi2RaidVolPage0_t *vol_pg0;
2251 	Mpi2RaidPhysDiskPage0_t pd_pg0;
2252 	Mpi2SasDevicePage0_t sas_device_pg0;
2253 	Mpi2ConfigReply_t mpi_reply;
2254 	u16 sz;
2255 	u8 num_pds;
2256 
2257 	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2258 	    &num_pds)) || !num_pds) {
2259 		dfailprintk(ioc,
2260 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2261 				     __FILE__, __LINE__, __func__));
2262 		return 1;
2263 	}
2264 
2265 	raid_device->num_pds = num_pds;
2266 	sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2267 	    sizeof(Mpi2RaidVol0PhysDisk_t));
2268 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
2269 	if (!vol_pg0) {
2270 		dfailprintk(ioc,
2271 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2272 				     __FILE__, __LINE__, __func__));
2273 		return 1;
2274 	}
2275 
2276 	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2277 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2278 		dfailprintk(ioc,
2279 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2280 				     __FILE__, __LINE__, __func__));
2281 		kfree(vol_pg0);
2282 		return 1;
2283 	}
2284 
2285 	raid_device->volume_type = vol_pg0->VolumeType;
2286 
2287 	/* figure out what the underlying devices are by
2288 	 * obtaining the device_info bits for the 1st device
2289 	 */
2290 	if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2291 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2292 	    vol_pg0->PhysDisk[0].PhysDiskNum))) {
2293 		if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2294 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2295 		    le16_to_cpu(pd_pg0.DevHandle)))) {
2296 			raid_device->device_info =
2297 			    le32_to_cpu(sas_device_pg0.DeviceInfo);
2298 		}
2299 	}
2300 
2301 	kfree(vol_pg0);
2302 	return 0;
2303 }
2304 
2305 /**
2306  * _scsih_enable_tlr - setting TLR flags
2307  * @ioc: per adapter object
2308  * @sdev: scsi device struct
2309  *
2310  * Enabling Transaction Layer Retries for tape devices when
2311  * vpd page 0x90 is present
2312  *
2313  */
2314 static void
_scsih_enable_tlr(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev)2315 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2316 {
2317 
2318 	/* only for TAPE */
2319 	if (sdev->type != TYPE_TAPE)
2320 		return;
2321 
2322 	if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2323 		return;
2324 
2325 	sas_enable_tlr(sdev);
2326 	sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2327 	    sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2328 	return;
2329 
2330 }
2331 
2332 /**
2333  * scsih_slave_configure - device configure routine.
2334  * @sdev: scsi device struct
2335  *
2336  * Return: 0 if ok. Any other return is assumed to be an error and
2337  * the device is ignored.
2338  */
2339 static int
scsih_slave_configure(struct scsi_device * sdev)2340 scsih_slave_configure(struct scsi_device *sdev)
2341 {
2342 	struct Scsi_Host *shost = sdev->host;
2343 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2344 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2345 	struct MPT3SAS_TARGET *sas_target_priv_data;
2346 	struct _sas_device *sas_device;
2347 	struct _pcie_device *pcie_device;
2348 	struct _raid_device *raid_device;
2349 	unsigned long flags;
2350 	int qdepth;
2351 	u8 ssp_target = 0;
2352 	char *ds = "";
2353 	char *r_level = "";
2354 	u16 handle, volume_handle = 0;
2355 	u64 volume_wwid = 0;
2356 
2357 	qdepth = 1;
2358 	sas_device_priv_data = sdev->hostdata;
2359 	sas_device_priv_data->configured_lun = 1;
2360 	sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2361 	sas_target_priv_data = sas_device_priv_data->sas_target;
2362 	handle = sas_target_priv_data->handle;
2363 
2364 	/* raid volume handling */
2365 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2366 
2367 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2368 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2369 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2370 		if (!raid_device) {
2371 			dfailprintk(ioc,
2372 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2373 					     __FILE__, __LINE__, __func__));
2374 			return 1;
2375 		}
2376 
2377 		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2378 			dfailprintk(ioc,
2379 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2380 					     __FILE__, __LINE__, __func__));
2381 			return 1;
2382 		}
2383 
2384 		/*
2385 		 * WARPDRIVE: Initialize the required data for Direct IO
2386 		 */
2387 		mpt3sas_init_warpdrive_properties(ioc, raid_device);
2388 
2389 		/* RAID Queue Depth Support
2390 		 * IS volume = underlying qdepth of drive type, either
2391 		 *    MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2392 		 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2393 		 */
2394 		if (raid_device->device_info &
2395 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2396 			qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2397 			ds = "SSP";
2398 		} else {
2399 			qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2400 			if (raid_device->device_info &
2401 			    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2402 				ds = "SATA";
2403 			else
2404 				ds = "STP";
2405 		}
2406 
2407 		switch (raid_device->volume_type) {
2408 		case MPI2_RAID_VOL_TYPE_RAID0:
2409 			r_level = "RAID0";
2410 			break;
2411 		case MPI2_RAID_VOL_TYPE_RAID1E:
2412 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2413 			if (ioc->manu_pg10.OEMIdentifier &&
2414 			    (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2415 			    MFG10_GF0_R10_DISPLAY) &&
2416 			    !(raid_device->num_pds % 2))
2417 				r_level = "RAID10";
2418 			else
2419 				r_level = "RAID1E";
2420 			break;
2421 		case MPI2_RAID_VOL_TYPE_RAID1:
2422 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2423 			r_level = "RAID1";
2424 			break;
2425 		case MPI2_RAID_VOL_TYPE_RAID10:
2426 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2427 			r_level = "RAID10";
2428 			break;
2429 		case MPI2_RAID_VOL_TYPE_UNKNOWN:
2430 		default:
2431 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2432 			r_level = "RAIDX";
2433 			break;
2434 		}
2435 
2436 		if (!ioc->hide_ir_msg)
2437 			sdev_printk(KERN_INFO, sdev,
2438 			   "%s: handle(0x%04x), wwid(0x%016llx),"
2439 			    " pd_count(%d), type(%s)\n",
2440 			    r_level, raid_device->handle,
2441 			    (unsigned long long)raid_device->wwid,
2442 			    raid_device->num_pds, ds);
2443 
2444 		if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2445 			blk_queue_max_hw_sectors(sdev->request_queue,
2446 						MPT3SAS_RAID_MAX_SECTORS);
2447 			sdev_printk(KERN_INFO, sdev,
2448 					"Set queue's max_sector to: %u\n",
2449 						MPT3SAS_RAID_MAX_SECTORS);
2450 		}
2451 
2452 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2453 
2454 		/* raid transport support */
2455 		if (!ioc->is_warpdrive)
2456 			_scsih_set_level(ioc, sdev, raid_device->volume_type);
2457 		return 0;
2458 	}
2459 
2460 	/* non-raid handling */
2461 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2462 		if (mpt3sas_config_get_volume_handle(ioc, handle,
2463 		    &volume_handle)) {
2464 			dfailprintk(ioc,
2465 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2466 					     __FILE__, __LINE__, __func__));
2467 			return 1;
2468 		}
2469 		if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2470 		    volume_handle, &volume_wwid)) {
2471 			dfailprintk(ioc,
2472 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2473 					     __FILE__, __LINE__, __func__));
2474 			return 1;
2475 		}
2476 	}
2477 
2478 	/* PCIe handling */
2479 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2480 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2481 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2482 				sas_device_priv_data->sas_target->sas_address);
2483 		if (!pcie_device) {
2484 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2485 			dfailprintk(ioc,
2486 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2487 					     __FILE__, __LINE__, __func__));
2488 			return 1;
2489 		}
2490 
2491 		qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2492 		ds = "NVMe";
2493 		sdev_printk(KERN_INFO, sdev,
2494 			"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2495 			ds, handle, (unsigned long long)pcie_device->wwid,
2496 			pcie_device->port_num);
2497 		if (pcie_device->enclosure_handle != 0)
2498 			sdev_printk(KERN_INFO, sdev,
2499 			"%s: enclosure logical id(0x%016llx), slot(%d)\n",
2500 			ds,
2501 			(unsigned long long)pcie_device->enclosure_logical_id,
2502 			pcie_device->slot);
2503 		if (pcie_device->connector_name[0] != '\0')
2504 			sdev_printk(KERN_INFO, sdev,
2505 				"%s: enclosure level(0x%04x),"
2506 				"connector name( %s)\n", ds,
2507 				pcie_device->enclosure_level,
2508 				pcie_device->connector_name);
2509 
2510 		if (pcie_device->nvme_mdts)
2511 			blk_queue_max_hw_sectors(sdev->request_queue,
2512 					pcie_device->nvme_mdts/512);
2513 
2514 		pcie_device_put(pcie_device);
2515 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2516 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2517 		/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2518 		 ** merged and can eliminate holes created during merging
2519 		 ** operation.
2520 		 **/
2521 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2522 				sdev->request_queue);
2523 		blk_queue_virt_boundary(sdev->request_queue,
2524 				ioc->page_size - 1);
2525 		return 0;
2526 	}
2527 
2528 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2529 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2530 	   sas_device_priv_data->sas_target->sas_address);
2531 	if (!sas_device) {
2532 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2533 		dfailprintk(ioc,
2534 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2535 				     __FILE__, __LINE__, __func__));
2536 		return 1;
2537 	}
2538 
2539 	sas_device->volume_handle = volume_handle;
2540 	sas_device->volume_wwid = volume_wwid;
2541 	if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2542 		qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2543 		ssp_target = 1;
2544 		if (sas_device->device_info &
2545 				MPI2_SAS_DEVICE_INFO_SEP) {
2546 			sdev_printk(KERN_WARNING, sdev,
2547 			"set ignore_delay_remove for handle(0x%04x)\n",
2548 			sas_device_priv_data->sas_target->handle);
2549 			sas_device_priv_data->ignore_delay_remove = 1;
2550 			ds = "SES";
2551 		} else
2552 			ds = "SSP";
2553 	} else {
2554 		qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2555 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2556 			ds = "STP";
2557 		else if (sas_device->device_info &
2558 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2559 			ds = "SATA";
2560 	}
2561 
2562 	sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2563 	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2564 	    ds, handle, (unsigned long long)sas_device->sas_address,
2565 	    sas_device->phy, (unsigned long long)sas_device->device_name);
2566 
2567 	_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2568 
2569 	sas_device_put(sas_device);
2570 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2571 
2572 	if (!ssp_target)
2573 		_scsih_display_sata_capabilities(ioc, handle, sdev);
2574 
2575 
2576 	mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2577 
2578 	if (ssp_target) {
2579 		sas_read_port_mode_page(sdev);
2580 		_scsih_enable_tlr(ioc, sdev);
2581 	}
2582 
2583 	return 0;
2584 }
2585 
2586 /**
2587  * scsih_bios_param - fetch head, sector, cylinder info for a disk
2588  * @sdev: scsi device struct
2589  * @bdev: pointer to block device context
2590  * @capacity: device size (in 512 byte sectors)
2591  * @params: three element array to place output:
2592  *              params[0] number of heads (max 255)
2593  *              params[1] number of sectors (max 63)
2594  *              params[2] number of cylinders
2595  */
2596 static int
scsih_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int params[])2597 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2598 	sector_t capacity, int params[])
2599 {
2600 	int		heads;
2601 	int		sectors;
2602 	sector_t	cylinders;
2603 	ulong		dummy;
2604 
2605 	heads = 64;
2606 	sectors = 32;
2607 
2608 	dummy = heads * sectors;
2609 	cylinders = capacity;
2610 	sector_div(cylinders, dummy);
2611 
2612 	/*
2613 	 * Handle extended translation size for logical drives
2614 	 * > 1Gb
2615 	 */
2616 	if ((ulong)capacity >= 0x200000) {
2617 		heads = 255;
2618 		sectors = 63;
2619 		dummy = heads * sectors;
2620 		cylinders = capacity;
2621 		sector_div(cylinders, dummy);
2622 	}
2623 
2624 	/* return result */
2625 	params[0] = heads;
2626 	params[1] = sectors;
2627 	params[2] = cylinders;
2628 
2629 	return 0;
2630 }
2631 
2632 /**
2633  * _scsih_response_code - translation of device response code
2634  * @ioc: per adapter object
2635  * @response_code: response code returned by the device
2636  */
2637 static void
_scsih_response_code(struct MPT3SAS_ADAPTER * ioc,u8 response_code)2638 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2639 {
2640 	char *desc;
2641 
2642 	switch (response_code) {
2643 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2644 		desc = "task management request completed";
2645 		break;
2646 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2647 		desc = "invalid frame";
2648 		break;
2649 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2650 		desc = "task management request not supported";
2651 		break;
2652 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2653 		desc = "task management request failed";
2654 		break;
2655 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2656 		desc = "task management request succeeded";
2657 		break;
2658 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2659 		desc = "invalid lun";
2660 		break;
2661 	case 0xA:
2662 		desc = "overlapped tag attempted";
2663 		break;
2664 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2665 		desc = "task queued, however not sent to target";
2666 		break;
2667 	default:
2668 		desc = "unknown";
2669 		break;
2670 	}
2671 	ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2672 }
2673 
2674 /**
2675  * _scsih_tm_done - tm completion routine
2676  * @ioc: per adapter object
2677  * @smid: system request message index
2678  * @msix_index: MSIX table index supplied by the OS
2679  * @reply: reply message frame(lower 32bit addr)
2680  * Context: none.
2681  *
2682  * The callback handler when using scsih_issue_tm.
2683  *
2684  * Return: 1 meaning mf should be freed from _base_interrupt
2685  *         0 means the mf is freed from this function.
2686  */
2687 static u8
_scsih_tm_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)2688 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2689 {
2690 	MPI2DefaultReply_t *mpi_reply;
2691 
2692 	if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2693 		return 1;
2694 	if (ioc->tm_cmds.smid != smid)
2695 		return 1;
2696 	ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2697 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
2698 	if (mpi_reply) {
2699 		memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2700 		ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2701 	}
2702 	ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2703 	complete(&ioc->tm_cmds.done);
2704 	return 1;
2705 }
2706 
2707 /**
2708  * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2709  * @ioc: per adapter object
2710  * @handle: device handle
2711  *
2712  * During taskmangement request, we need to freeze the device queue.
2713  */
2714 void
mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2715 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2716 {
2717 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2718 	struct scsi_device *sdev;
2719 	u8 skip = 0;
2720 
2721 	shost_for_each_device(sdev, ioc->shost) {
2722 		if (skip)
2723 			continue;
2724 		sas_device_priv_data = sdev->hostdata;
2725 		if (!sas_device_priv_data)
2726 			continue;
2727 		if (sas_device_priv_data->sas_target->handle == handle) {
2728 			sas_device_priv_data->sas_target->tm_busy = 1;
2729 			skip = 1;
2730 			ioc->ignore_loginfos = 1;
2731 		}
2732 	}
2733 }
2734 
2735 /**
2736  * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2737  * @ioc: per adapter object
2738  * @handle: device handle
2739  *
2740  * During taskmangement request, we need to freeze the device queue.
2741  */
2742 void
mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2743 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2744 {
2745 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2746 	struct scsi_device *sdev;
2747 	u8 skip = 0;
2748 
2749 	shost_for_each_device(sdev, ioc->shost) {
2750 		if (skip)
2751 			continue;
2752 		sas_device_priv_data = sdev->hostdata;
2753 		if (!sas_device_priv_data)
2754 			continue;
2755 		if (sas_device_priv_data->sas_target->handle == handle) {
2756 			sas_device_priv_data->sas_target->tm_busy = 0;
2757 			skip = 1;
2758 			ioc->ignore_loginfos = 0;
2759 		}
2760 	}
2761 }
2762 
2763 /**
2764  * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2765  * @ioc - per adapter object
2766  * @channel - the channel assigned by the OS
2767  * @id: the id assigned by the OS
2768  * @lun: lun number
2769  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2770  * @smid_task: smid assigned to the task
2771  *
2772  * Look whether TM has aborted the timed out SCSI command, if
2773  * TM has aborted the IO then return SUCCESS else return FAILED.
2774  */
2775 static int
scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER * ioc,uint channel,uint id,uint lun,u8 type,u16 smid_task)2776 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2777 	uint id, uint lun, u8 type, u16 smid_task)
2778 {
2779 
2780 	if (smid_task <= ioc->shost->can_queue) {
2781 		switch (type) {
2782 		case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2783 			if (!(_scsih_scsi_lookup_find_by_target(ioc,
2784 			    id, channel)))
2785 				return SUCCESS;
2786 			break;
2787 		case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2788 		case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2789 			if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2790 			    lun, channel)))
2791 				return SUCCESS;
2792 			break;
2793 		default:
2794 			return SUCCESS;
2795 		}
2796 	} else if (smid_task == ioc->scsih_cmds.smid) {
2797 		if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2798 		    (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2799 			return SUCCESS;
2800 	} else if (smid_task == ioc->ctl_cmds.smid) {
2801 		if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2802 		    (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2803 			return SUCCESS;
2804 	}
2805 
2806 	return FAILED;
2807 }
2808 
2809 /**
2810  * scsih_tm_post_processing - post processing of target & LUN reset
2811  * @ioc - per adapter object
2812  * @handle: device handle
2813  * @channel - the channel assigned by the OS
2814  * @id: the id assigned by the OS
2815  * @lun: lun number
2816  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2817  * @smid_task: smid assigned to the task
2818  *
2819  * Post processing of target & LUN reset. Due to interrupt latency
2820  * issue it possible that interrupt for aborted IO might not be
2821  * received yet. So before returning failure status, poll the
2822  * reply descriptor pools for the reply of timed out SCSI command.
2823  * Return FAILED status if reply for timed out is not received
2824  * otherwise return SUCCESS.
2825  */
2826 static int
scsih_tm_post_processing(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,uint lun,u8 type,u16 smid_task)2827 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2828 	uint channel, uint id, uint lun, u8 type, u16 smid_task)
2829 {
2830 	int rc;
2831 
2832 	rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
2833 	if (rc == SUCCESS)
2834 		return rc;
2835 
2836 	ioc_info(ioc,
2837 	    "Poll ReplyDescriptor queues for completion of"
2838 	    " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
2839 	    smid_task, type, handle);
2840 
2841 	/*
2842 	 * Due to interrupt latency issues, driver may receive interrupt for
2843 	 * TM first and then for aborted SCSI IO command. So, poll all the
2844 	 * ReplyDescriptor pools before returning the FAILED status to SML.
2845 	 */
2846 	mpt3sas_base_mask_interrupts(ioc);
2847 	mpt3sas_base_sync_reply_irqs(ioc, 1);
2848 	mpt3sas_base_unmask_interrupts(ioc);
2849 
2850 	return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
2851 }
2852 
2853 /**
2854  * mpt3sas_scsih_issue_tm - main routine for sending tm requests
2855  * @ioc: per adapter struct
2856  * @handle: device handle
2857  * @channel: the channel assigned by the OS
2858  * @id: the id assigned by the OS
2859  * @lun: lun number
2860  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2861  * @smid_task: smid assigned to the task
2862  * @msix_task: MSIX table index supplied by the OS
2863  * @timeout: timeout in seconds
2864  * @tr_method: Target Reset Method
2865  * Context: user
2866  *
2867  * A generic API for sending task management requests to firmware.
2868  *
2869  * The callback index is set inside `ioc->tm_cb_idx`.
2870  * The caller is responsible to check for outstanding commands.
2871  *
2872  * Return: SUCCESS or FAILED.
2873  */
2874 int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)2875 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
2876 	uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
2877 	u8 timeout, u8 tr_method)
2878 {
2879 	Mpi2SCSITaskManagementRequest_t *mpi_request;
2880 	Mpi2SCSITaskManagementReply_t *mpi_reply;
2881 	Mpi25SCSIIORequest_t *request;
2882 	u16 smid = 0;
2883 	u32 ioc_state;
2884 	int rc;
2885 	u8 issue_reset = 0;
2886 
2887 	lockdep_assert_held(&ioc->tm_cmds.mutex);
2888 
2889 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
2890 		ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
2891 		return FAILED;
2892 	}
2893 
2894 	if (ioc->shost_recovery || ioc->remove_host ||
2895 	    ioc->pci_error_recovery) {
2896 		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
2897 		return FAILED;
2898 	}
2899 
2900 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
2901 	if (ioc_state & MPI2_DOORBELL_USED) {
2902 		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
2903 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2904 		return (!rc) ? SUCCESS : FAILED;
2905 	}
2906 
2907 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2908 		mpt3sas_print_fault_code(ioc, ioc_state &
2909 		    MPI2_DOORBELL_DATA_MASK);
2910 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2911 		return (!rc) ? SUCCESS : FAILED;
2912 	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
2913 	    MPI2_IOC_STATE_COREDUMP) {
2914 		mpt3sas_print_coredump_info(ioc, ioc_state &
2915 		    MPI2_DOORBELL_DATA_MASK);
2916 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2917 		return (!rc) ? SUCCESS : FAILED;
2918 	}
2919 
2920 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2921 	if (!smid) {
2922 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2923 		return FAILED;
2924 	}
2925 
2926 	dtmprintk(ioc,
2927 		  ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
2928 			   handle, type, smid_task, timeout, tr_method));
2929 	ioc->tm_cmds.status = MPT3_CMD_PENDING;
2930 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2931 	ioc->tm_cmds.smid = smid;
2932 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2933 	memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
2934 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2935 	mpi_request->DevHandle = cpu_to_le16(handle);
2936 	mpi_request->TaskType = type;
2937 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
2938 	    type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
2939 		mpi_request->MsgFlags = tr_method;
2940 	mpi_request->TaskMID = cpu_to_le16(smid_task);
2941 	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2942 	mpt3sas_scsih_set_tm_flag(ioc, handle);
2943 	init_completion(&ioc->tm_cmds.done);
2944 	ioc->put_smid_hi_priority(ioc, smid, msix_task);
2945 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2946 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
2947 		mpt3sas_check_cmd_timeout(ioc,
2948 		    ioc->tm_cmds.status, mpi_request,
2949 		    sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
2950 		if (issue_reset) {
2951 			rc = mpt3sas_base_hard_reset_handler(ioc,
2952 					FORCE_BIG_HAMMER);
2953 			rc = (!rc) ? SUCCESS : FAILED;
2954 			goto out;
2955 		}
2956 	}
2957 
2958 	/* sync IRQs in case those were busy during flush. */
2959 	mpt3sas_base_sync_reply_irqs(ioc, 0);
2960 
2961 	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
2962 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
2963 		mpi_reply = ioc->tm_cmds.reply;
2964 		dtmprintk(ioc,
2965 			  ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
2966 				   le16_to_cpu(mpi_reply->IOCStatus),
2967 				   le32_to_cpu(mpi_reply->IOCLogInfo),
2968 				   le32_to_cpu(mpi_reply->TerminationCount)));
2969 		if (ioc->logging_level & MPT_DEBUG_TM) {
2970 			_scsih_response_code(ioc, mpi_reply->ResponseCode);
2971 			if (mpi_reply->IOCStatus)
2972 				_debug_dump_mf(mpi_request,
2973 				    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2974 		}
2975 	}
2976 
2977 	switch (type) {
2978 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
2979 		rc = SUCCESS;
2980 		/*
2981 		 * If DevHandle filed in smid_task's entry of request pool
2982 		 * doesn't match with device handle on which this task abort
2983 		 * TM is received then it means that TM has successfully
2984 		 * aborted the timed out command. Since smid_task's entry in
2985 		 * request pool will be memset to zero once the timed out
2986 		 * command is returned to the SML. If the command is not
2987 		 * aborted then smid_task’s entry won’t be cleared and it
2988 		 * will have same DevHandle value on which this task abort TM
2989 		 * is received and driver will return the TM status as FAILED.
2990 		 */
2991 		request = mpt3sas_base_get_msg_frame(ioc, smid_task);
2992 		if (le16_to_cpu(request->DevHandle) != handle)
2993 			break;
2994 
2995 		ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
2996 		    "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
2997 		    handle, timeout, tr_method, smid_task, msix_task);
2998 		rc = FAILED;
2999 		break;
3000 
3001 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3002 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3003 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3004 		rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3005 		    type, smid_task);
3006 		break;
3007 	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3008 		rc = SUCCESS;
3009 		break;
3010 	default:
3011 		rc = FAILED;
3012 		break;
3013 	}
3014 
3015 out:
3016 	mpt3sas_scsih_clear_tm_flag(ioc, handle);
3017 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3018 	return rc;
3019 }
3020 
mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3021 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3022 		uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3023 		u16 msix_task, u8 timeout, u8 tr_method)
3024 {
3025 	int ret;
3026 
3027 	mutex_lock(&ioc->tm_cmds.mutex);
3028 	ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3029 			smid_task, msix_task, timeout, tr_method);
3030 	mutex_unlock(&ioc->tm_cmds.mutex);
3031 
3032 	return ret;
3033 }
3034 
3035 /**
3036  * _scsih_tm_display_info - displays info about the device
3037  * @ioc: per adapter struct
3038  * @scmd: pointer to scsi command object
3039  *
3040  * Called by task management callback handlers.
3041  */
3042 static void
_scsih_tm_display_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)3043 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3044 {
3045 	struct scsi_target *starget = scmd->device->sdev_target;
3046 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3047 	struct _sas_device *sas_device = NULL;
3048 	struct _pcie_device *pcie_device = NULL;
3049 	unsigned long flags;
3050 	char *device_str = NULL;
3051 
3052 	if (!priv_target)
3053 		return;
3054 	if (ioc->hide_ir_msg)
3055 		device_str = "WarpDrive";
3056 	else
3057 		device_str = "volume";
3058 
3059 	scsi_print_command(scmd);
3060 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3061 		starget_printk(KERN_INFO, starget,
3062 			"%s handle(0x%04x), %s wwid(0x%016llx)\n",
3063 			device_str, priv_target->handle,
3064 		    device_str, (unsigned long long)priv_target->sas_address);
3065 
3066 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3067 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3068 		pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3069 		if (pcie_device) {
3070 			starget_printk(KERN_INFO, starget,
3071 				"handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3072 				pcie_device->handle,
3073 				(unsigned long long)pcie_device->wwid,
3074 				pcie_device->port_num);
3075 			if (pcie_device->enclosure_handle != 0)
3076 				starget_printk(KERN_INFO, starget,
3077 					"enclosure logical id(0x%016llx), slot(%d)\n",
3078 					(unsigned long long)
3079 					pcie_device->enclosure_logical_id,
3080 					pcie_device->slot);
3081 			if (pcie_device->connector_name[0] != '\0')
3082 				starget_printk(KERN_INFO, starget,
3083 					"enclosure level(0x%04x), connector name( %s)\n",
3084 					pcie_device->enclosure_level,
3085 					pcie_device->connector_name);
3086 			pcie_device_put(pcie_device);
3087 		}
3088 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3089 
3090 	} else {
3091 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
3092 		sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3093 		if (sas_device) {
3094 			if (priv_target->flags &
3095 			    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3096 				starget_printk(KERN_INFO, starget,
3097 				    "volume handle(0x%04x), "
3098 				    "volume wwid(0x%016llx)\n",
3099 				    sas_device->volume_handle,
3100 				   (unsigned long long)sas_device->volume_wwid);
3101 			}
3102 			starget_printk(KERN_INFO, starget,
3103 			    "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3104 			    sas_device->handle,
3105 			    (unsigned long long)sas_device->sas_address,
3106 			    sas_device->phy);
3107 
3108 			_scsih_display_enclosure_chassis_info(NULL, sas_device,
3109 			    NULL, starget);
3110 
3111 			sas_device_put(sas_device);
3112 		}
3113 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3114 	}
3115 }
3116 
3117 /**
3118  * scsih_abort - eh threads main abort routine
3119  * @scmd: pointer to scsi command object
3120  *
3121  * Return: SUCCESS if command aborted else FAILED
3122  */
3123 static int
scsih_abort(struct scsi_cmnd * scmd)3124 scsih_abort(struct scsi_cmnd *scmd)
3125 {
3126 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3127 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3128 	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3129 	u16 handle;
3130 	int r;
3131 
3132 	u8 timeout = 30;
3133 	struct _pcie_device *pcie_device = NULL;
3134 	sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3135 	    "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3136 	    scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3137 	    (scmd->request->timeout / HZ) * 1000);
3138 	_scsih_tm_display_info(ioc, scmd);
3139 
3140 	sas_device_priv_data = scmd->device->hostdata;
3141 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3142 	    ioc->remove_host) {
3143 		sdev_printk(KERN_INFO, scmd->device,
3144 		    "device been deleted! scmd(0x%p)\n", scmd);
3145 		scmd->result = DID_NO_CONNECT << 16;
3146 		scmd->scsi_done(scmd);
3147 		r = SUCCESS;
3148 		goto out;
3149 	}
3150 
3151 	/* check for completed command */
3152 	if (st == NULL || st->cb_idx == 0xFF) {
3153 		sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3154 		    "driver, assuming scmd(0x%p) might have completed\n", scmd);
3155 		scmd->result = DID_RESET << 16;
3156 		r = SUCCESS;
3157 		goto out;
3158 	}
3159 
3160 	/* for hidden raid components and volumes this is not supported */
3161 	if (sas_device_priv_data->sas_target->flags &
3162 	    MPT_TARGET_FLAGS_RAID_COMPONENT ||
3163 	    sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3164 		scmd->result = DID_RESET << 16;
3165 		r = FAILED;
3166 		goto out;
3167 	}
3168 
3169 	mpt3sas_halt_firmware(ioc);
3170 
3171 	handle = sas_device_priv_data->sas_target->handle;
3172 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3173 	if (pcie_device && (!ioc->tm_custom_handling) &&
3174 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3175 		timeout = ioc->nvme_abort_timeout;
3176 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3177 		scmd->device->id, scmd->device->lun,
3178 		MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3179 		st->smid, st->msix_io, timeout, 0);
3180 	/* Command must be cleared after abort */
3181 	if (r == SUCCESS && st->cb_idx != 0xFF)
3182 		r = FAILED;
3183  out:
3184 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3185 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3186 	if (pcie_device)
3187 		pcie_device_put(pcie_device);
3188 	return r;
3189 }
3190 
3191 /**
3192  * scsih_dev_reset - eh threads main device reset routine
3193  * @scmd: pointer to scsi command object
3194  *
3195  * Return: SUCCESS if command aborted else FAILED
3196  */
3197 static int
scsih_dev_reset(struct scsi_cmnd * scmd)3198 scsih_dev_reset(struct scsi_cmnd *scmd)
3199 {
3200 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3201 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3202 	struct _sas_device *sas_device = NULL;
3203 	struct _pcie_device *pcie_device = NULL;
3204 	u16	handle;
3205 	u8	tr_method = 0;
3206 	u8	tr_timeout = 30;
3207 	int r;
3208 
3209 	struct scsi_target *starget = scmd->device->sdev_target;
3210 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3211 
3212 	sdev_printk(KERN_INFO, scmd->device,
3213 	    "attempting device reset! scmd(0x%p)\n", scmd);
3214 	_scsih_tm_display_info(ioc, scmd);
3215 
3216 	sas_device_priv_data = scmd->device->hostdata;
3217 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3218 	    ioc->remove_host) {
3219 		sdev_printk(KERN_INFO, scmd->device,
3220 		    "device been deleted! scmd(0x%p)\n", scmd);
3221 		scmd->result = DID_NO_CONNECT << 16;
3222 		scmd->scsi_done(scmd);
3223 		r = SUCCESS;
3224 		goto out;
3225 	}
3226 
3227 	/* for hidden raid components obtain the volume_handle */
3228 	handle = 0;
3229 	if (sas_device_priv_data->sas_target->flags &
3230 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3231 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3232 				target_priv_data);
3233 		if (sas_device)
3234 			handle = sas_device->volume_handle;
3235 	} else
3236 		handle = sas_device_priv_data->sas_target->handle;
3237 
3238 	if (!handle) {
3239 		scmd->result = DID_RESET << 16;
3240 		r = FAILED;
3241 		goto out;
3242 	}
3243 
3244 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3245 
3246 	if (pcie_device && (!ioc->tm_custom_handling) &&
3247 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3248 		tr_timeout = pcie_device->reset_timeout;
3249 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3250 	} else
3251 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3252 
3253 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3254 		scmd->device->id, scmd->device->lun,
3255 		MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3256 		tr_timeout, tr_method);
3257 	/* Check for busy commands after reset */
3258 	if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
3259 		r = FAILED;
3260  out:
3261 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3262 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3263 
3264 	if (sas_device)
3265 		sas_device_put(sas_device);
3266 	if (pcie_device)
3267 		pcie_device_put(pcie_device);
3268 
3269 	return r;
3270 }
3271 
3272 /**
3273  * scsih_target_reset - eh threads main target reset routine
3274  * @scmd: pointer to scsi command object
3275  *
3276  * Return: SUCCESS if command aborted else FAILED
3277  */
3278 static int
scsih_target_reset(struct scsi_cmnd * scmd)3279 scsih_target_reset(struct scsi_cmnd *scmd)
3280 {
3281 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3282 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3283 	struct _sas_device *sas_device = NULL;
3284 	struct _pcie_device *pcie_device = NULL;
3285 	u16	handle;
3286 	u8	tr_method = 0;
3287 	u8	tr_timeout = 30;
3288 	int r;
3289 	struct scsi_target *starget = scmd->device->sdev_target;
3290 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3291 
3292 	starget_printk(KERN_INFO, starget,
3293 	    "attempting target reset! scmd(0x%p)\n", scmd);
3294 	_scsih_tm_display_info(ioc, scmd);
3295 
3296 	sas_device_priv_data = scmd->device->hostdata;
3297 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3298 	    ioc->remove_host) {
3299 		starget_printk(KERN_INFO, starget,
3300 		    "target been deleted! scmd(0x%p)\n", scmd);
3301 		scmd->result = DID_NO_CONNECT << 16;
3302 		scmd->scsi_done(scmd);
3303 		r = SUCCESS;
3304 		goto out;
3305 	}
3306 
3307 	/* for hidden raid components obtain the volume_handle */
3308 	handle = 0;
3309 	if (sas_device_priv_data->sas_target->flags &
3310 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3311 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3312 				target_priv_data);
3313 		if (sas_device)
3314 			handle = sas_device->volume_handle;
3315 	} else
3316 		handle = sas_device_priv_data->sas_target->handle;
3317 
3318 	if (!handle) {
3319 		scmd->result = DID_RESET << 16;
3320 		r = FAILED;
3321 		goto out;
3322 	}
3323 
3324 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3325 
3326 	if (pcie_device && (!ioc->tm_custom_handling) &&
3327 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3328 		tr_timeout = pcie_device->reset_timeout;
3329 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3330 	} else
3331 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3332 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3333 		scmd->device->id, 0,
3334 		MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3335 	    tr_timeout, tr_method);
3336 	/* Check for busy commands after reset */
3337 	if (r == SUCCESS && atomic_read(&starget->target_busy))
3338 		r = FAILED;
3339  out:
3340 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3341 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3342 
3343 	if (sas_device)
3344 		sas_device_put(sas_device);
3345 	if (pcie_device)
3346 		pcie_device_put(pcie_device);
3347 	return r;
3348 }
3349 
3350 
3351 /**
3352  * scsih_host_reset - eh threads main host reset routine
3353  * @scmd: pointer to scsi command object
3354  *
3355  * Return: SUCCESS if command aborted else FAILED
3356  */
3357 static int
scsih_host_reset(struct scsi_cmnd * scmd)3358 scsih_host_reset(struct scsi_cmnd *scmd)
3359 {
3360 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3361 	int r, retval;
3362 
3363 	ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3364 	scsi_print_command(scmd);
3365 
3366 	if (ioc->is_driver_loading || ioc->remove_host) {
3367 		ioc_info(ioc, "Blocking the host reset\n");
3368 		r = FAILED;
3369 		goto out;
3370 	}
3371 
3372 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3373 	r = (retval < 0) ? FAILED : SUCCESS;
3374 out:
3375 	ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3376 		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3377 
3378 	return r;
3379 }
3380 
3381 /**
3382  * _scsih_fw_event_add - insert and queue up fw_event
3383  * @ioc: per adapter object
3384  * @fw_event: object describing the event
3385  * Context: This function will acquire ioc->fw_event_lock.
3386  *
3387  * This adds the firmware event object into link list, then queues it up to
3388  * be processed from user context.
3389  */
3390 static void
_scsih_fw_event_add(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3391 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3392 {
3393 	unsigned long flags;
3394 
3395 	if (ioc->firmware_event_thread == NULL)
3396 		return;
3397 
3398 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3399 	fw_event_work_get(fw_event);
3400 	INIT_LIST_HEAD(&fw_event->list);
3401 	list_add_tail(&fw_event->list, &ioc->fw_event_list);
3402 	INIT_WORK(&fw_event->work, _firmware_event_work);
3403 	fw_event_work_get(fw_event);
3404 	queue_work(ioc->firmware_event_thread, &fw_event->work);
3405 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3406 }
3407 
3408 /**
3409  * _scsih_fw_event_del_from_list - delete fw_event from the list
3410  * @ioc: per adapter object
3411  * @fw_event: object describing the event
3412  * Context: This function will acquire ioc->fw_event_lock.
3413  *
3414  * If the fw_event is on the fw_event_list, remove it and do a put.
3415  */
3416 static void
_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3417 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3418 	*fw_event)
3419 {
3420 	unsigned long flags;
3421 
3422 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3423 	if (!list_empty(&fw_event->list)) {
3424 		list_del_init(&fw_event->list);
3425 		fw_event_work_put(fw_event);
3426 	}
3427 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3428 }
3429 
3430 
3431  /**
3432  * mpt3sas_send_trigger_data_event - send event for processing trigger data
3433  * @ioc: per adapter object
3434  * @event_data: trigger event data
3435  */
3436 void
mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER * ioc,struct SL_WH_TRIGGERS_EVENT_DATA_T * event_data)3437 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3438 	struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3439 {
3440 	struct fw_event_work *fw_event;
3441 	u16 sz;
3442 
3443 	if (ioc->is_driver_loading)
3444 		return;
3445 	sz = sizeof(*event_data);
3446 	fw_event = alloc_fw_event_work(sz);
3447 	if (!fw_event)
3448 		return;
3449 	fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3450 	fw_event->ioc = ioc;
3451 	memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3452 	_scsih_fw_event_add(ioc, fw_event);
3453 	fw_event_work_put(fw_event);
3454 }
3455 
3456 /**
3457  * _scsih_error_recovery_delete_devices - remove devices not responding
3458  * @ioc: per adapter object
3459  */
3460 static void
_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER * ioc)3461 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3462 {
3463 	struct fw_event_work *fw_event;
3464 
3465 	if (ioc->is_driver_loading)
3466 		return;
3467 	fw_event = alloc_fw_event_work(0);
3468 	if (!fw_event)
3469 		return;
3470 	fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3471 	fw_event->ioc = ioc;
3472 	_scsih_fw_event_add(ioc, fw_event);
3473 	fw_event_work_put(fw_event);
3474 }
3475 
3476 /**
3477  * mpt3sas_port_enable_complete - port enable completed (fake event)
3478  * @ioc: per adapter object
3479  */
3480 void
mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER * ioc)3481 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3482 {
3483 	struct fw_event_work *fw_event;
3484 
3485 	fw_event = alloc_fw_event_work(0);
3486 	if (!fw_event)
3487 		return;
3488 	fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3489 	fw_event->ioc = ioc;
3490 	_scsih_fw_event_add(ioc, fw_event);
3491 	fw_event_work_put(fw_event);
3492 }
3493 
dequeue_next_fw_event(struct MPT3SAS_ADAPTER * ioc)3494 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3495 {
3496 	unsigned long flags;
3497 	struct fw_event_work *fw_event = NULL;
3498 
3499 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3500 	if (!list_empty(&ioc->fw_event_list)) {
3501 		fw_event = list_first_entry(&ioc->fw_event_list,
3502 				struct fw_event_work, list);
3503 		list_del_init(&fw_event->list);
3504 	}
3505 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3506 
3507 	return fw_event;
3508 }
3509 
3510 /**
3511  * _scsih_fw_event_cleanup_queue - cleanup event queue
3512  * @ioc: per adapter object
3513  *
3514  * Walk the firmware event queue, either killing timers, or waiting
3515  * for outstanding events to complete
3516  */
3517 static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER * ioc)3518 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3519 {
3520 	struct fw_event_work *fw_event;
3521 
3522 	if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3523 	     !ioc->firmware_event_thread || in_interrupt())
3524 		return;
3525 
3526 	ioc->fw_events_cleanup = 1;
3527 	while ((fw_event = dequeue_next_fw_event(ioc)) ||
3528 	     (fw_event = ioc->current_event)) {
3529 		/*
3530 		 * Wait on the fw_event to complete. If this returns 1, then
3531 		 * the event was never executed, and we need a put for the
3532 		 * reference the work had on the fw_event.
3533 		 *
3534 		 * If it did execute, we wait for it to finish, and the put will
3535 		 * happen from _firmware_event_work()
3536 		 */
3537 		if (cancel_work_sync(&fw_event->work))
3538 			fw_event_work_put(fw_event);
3539 
3540 		fw_event_work_put(fw_event);
3541 	}
3542 	ioc->fw_events_cleanup = 0;
3543 }
3544 
3545 /**
3546  * _scsih_internal_device_block - block the sdev device
3547  * @sdev: per device object
3548  * @sas_device_priv_data : per device driver private data
3549  *
3550  * make sure device is blocked without error, if not
3551  * print an error
3552  */
3553 static void
_scsih_internal_device_block(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3554 _scsih_internal_device_block(struct scsi_device *sdev,
3555 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3556 {
3557 	int r = 0;
3558 
3559 	sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3560 	    sas_device_priv_data->sas_target->handle);
3561 	sas_device_priv_data->block = 1;
3562 
3563 	r = scsi_internal_device_block_nowait(sdev);
3564 	if (r == -EINVAL)
3565 		sdev_printk(KERN_WARNING, sdev,
3566 		    "device_block failed with return(%d) for handle(0x%04x)\n",
3567 		    r, sas_device_priv_data->sas_target->handle);
3568 }
3569 
3570 /**
3571  * _scsih_internal_device_unblock - unblock the sdev device
3572  * @sdev: per device object
3573  * @sas_device_priv_data : per device driver private data
3574  * make sure device is unblocked without error, if not retry
3575  * by blocking and then unblocking
3576  */
3577 
3578 static void
_scsih_internal_device_unblock(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3579 _scsih_internal_device_unblock(struct scsi_device *sdev,
3580 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3581 {
3582 	int r = 0;
3583 
3584 	sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3585 	    "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3586 	sas_device_priv_data->block = 0;
3587 	r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3588 	if (r == -EINVAL) {
3589 		/* The device has been set to SDEV_RUNNING by SD layer during
3590 		 * device addition but the request queue is still stopped by
3591 		 * our earlier block call. We need to perform a block again
3592 		 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3593 
3594 		sdev_printk(KERN_WARNING, sdev,
3595 		    "device_unblock failed with return(%d) for handle(0x%04x) "
3596 		    "performing a block followed by an unblock\n",
3597 		    r, sas_device_priv_data->sas_target->handle);
3598 		sas_device_priv_data->block = 1;
3599 		r = scsi_internal_device_block_nowait(sdev);
3600 		if (r)
3601 			sdev_printk(KERN_WARNING, sdev, "retried device_block "
3602 			    "failed with return(%d) for handle(0x%04x)\n",
3603 			    r, sas_device_priv_data->sas_target->handle);
3604 
3605 		sas_device_priv_data->block = 0;
3606 		r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3607 		if (r)
3608 			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3609 			    " failed with return(%d) for handle(0x%04x)\n",
3610 			    r, sas_device_priv_data->sas_target->handle);
3611 	}
3612 }
3613 
3614 /**
3615  * _scsih_ublock_io_all_device - unblock every device
3616  * @ioc: per adapter object
3617  *
3618  * change the device state from block to running
3619  */
3620 static void
_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER * ioc)3621 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3622 {
3623 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3624 	struct scsi_device *sdev;
3625 
3626 	shost_for_each_device(sdev, ioc->shost) {
3627 		sas_device_priv_data = sdev->hostdata;
3628 		if (!sas_device_priv_data)
3629 			continue;
3630 		if (!sas_device_priv_data->block)
3631 			continue;
3632 
3633 		dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3634 			"device_running, handle(0x%04x)\n",
3635 		    sas_device_priv_data->sas_target->handle));
3636 		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
3637 	}
3638 }
3639 
3640 
3641 /**
3642  * _scsih_ublock_io_device - prepare device to be deleted
3643  * @ioc: per adapter object
3644  * @sas_address: sas address
3645  *
3646  * unblock then put device in offline state
3647  */
3648 static void
_scsih_ublock_io_device(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)3649 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
3650 {
3651 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3652 	struct scsi_device *sdev;
3653 
3654 	shost_for_each_device(sdev, ioc->shost) {
3655 		sas_device_priv_data = sdev->hostdata;
3656 		if (!sas_device_priv_data)
3657 			continue;
3658 		if (sas_device_priv_data->sas_target->sas_address
3659 		    != sas_address)
3660 			continue;
3661 		if (sas_device_priv_data->block)
3662 			_scsih_internal_device_unblock(sdev,
3663 				sas_device_priv_data);
3664 	}
3665 }
3666 
3667 /**
3668  * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3669  * @ioc: per adapter object
3670  *
3671  * During device pull we need to appropriately set the sdev state.
3672  */
3673 static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER * ioc)3674 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3675 {
3676 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3677 	struct scsi_device *sdev;
3678 
3679 	shost_for_each_device(sdev, ioc->shost) {
3680 		sas_device_priv_data = sdev->hostdata;
3681 		if (!sas_device_priv_data)
3682 			continue;
3683 		if (sas_device_priv_data->block)
3684 			continue;
3685 		if (sas_device_priv_data->ignore_delay_remove) {
3686 			sdev_printk(KERN_INFO, sdev,
3687 			"%s skip device_block for SES handle(0x%04x)\n",
3688 			__func__, sas_device_priv_data->sas_target->handle);
3689 			continue;
3690 		}
3691 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3692 	}
3693 }
3694 
3695 /**
3696  * _scsih_block_io_device - set the device state to SDEV_BLOCK
3697  * @ioc: per adapter object
3698  * @handle: device handle
3699  *
3700  * During device pull we need to appropriately set the sdev state.
3701  */
3702 static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)3703 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3704 {
3705 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3706 	struct scsi_device *sdev;
3707 	struct _sas_device *sas_device;
3708 
3709 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3710 
3711 	shost_for_each_device(sdev, ioc->shost) {
3712 		sas_device_priv_data = sdev->hostdata;
3713 		if (!sas_device_priv_data)
3714 			continue;
3715 		if (sas_device_priv_data->sas_target->handle != handle)
3716 			continue;
3717 		if (sas_device_priv_data->block)
3718 			continue;
3719 		if (sas_device && sas_device->pend_sas_rphy_add)
3720 			continue;
3721 		if (sas_device_priv_data->ignore_delay_remove) {
3722 			sdev_printk(KERN_INFO, sdev,
3723 			"%s skip device_block for SES handle(0x%04x)\n",
3724 			__func__, sas_device_priv_data->sas_target->handle);
3725 			continue;
3726 		}
3727 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3728 	}
3729 
3730 	if (sas_device)
3731 		sas_device_put(sas_device);
3732 }
3733 
3734 /**
3735  * _scsih_block_io_to_children_attached_to_ex
3736  * @ioc: per adapter object
3737  * @sas_expander: the sas_device object
3738  *
3739  * This routine set sdev state to SDEV_BLOCK for all devices
3740  * attached to this expander. This function called when expander is
3741  * pulled.
3742  */
3743 static void
_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)3744 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3745 	struct _sas_node *sas_expander)
3746 {
3747 	struct _sas_port *mpt3sas_port;
3748 	struct _sas_device *sas_device;
3749 	struct _sas_node *expander_sibling;
3750 	unsigned long flags;
3751 
3752 	if (!sas_expander)
3753 		return;
3754 
3755 	list_for_each_entry(mpt3sas_port,
3756 	   &sas_expander->sas_port_list, port_list) {
3757 		if (mpt3sas_port->remote_identify.device_type ==
3758 		    SAS_END_DEVICE) {
3759 			spin_lock_irqsave(&ioc->sas_device_lock, flags);
3760 			sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3761 			    mpt3sas_port->remote_identify.sas_address);
3762 			if (sas_device) {
3763 				set_bit(sas_device->handle,
3764 						ioc->blocking_handles);
3765 				sas_device_put(sas_device);
3766 			}
3767 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3768 		}
3769 	}
3770 
3771 	list_for_each_entry(mpt3sas_port,
3772 	   &sas_expander->sas_port_list, port_list) {
3773 
3774 		if (mpt3sas_port->remote_identify.device_type ==
3775 		    SAS_EDGE_EXPANDER_DEVICE ||
3776 		    mpt3sas_port->remote_identify.device_type ==
3777 		    SAS_FANOUT_EXPANDER_DEVICE) {
3778 			expander_sibling =
3779 			    mpt3sas_scsih_expander_find_by_sas_address(
3780 			    ioc, mpt3sas_port->remote_identify.sas_address);
3781 			_scsih_block_io_to_children_attached_to_ex(ioc,
3782 			    expander_sibling);
3783 		}
3784 	}
3785 }
3786 
3787 /**
3788  * _scsih_block_io_to_children_attached_directly
3789  * @ioc: per adapter object
3790  * @event_data: topology change event data
3791  *
3792  * This routine set sdev state to SDEV_BLOCK for all devices
3793  * direct attached during device pull.
3794  */
3795 static void
_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)3796 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3797 	Mpi2EventDataSasTopologyChangeList_t *event_data)
3798 {
3799 	int i;
3800 	u16 handle;
3801 	u16 reason_code;
3802 
3803 	for (i = 0; i < event_data->NumEntries; i++) {
3804 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3805 		if (!handle)
3806 			continue;
3807 		reason_code = event_data->PHY[i].PhyStatus &
3808 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
3809 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3810 			_scsih_block_io_device(ioc, handle);
3811 	}
3812 }
3813 
3814 /**
3815  * _scsih_block_io_to_pcie_children_attached_directly
3816  * @ioc: per adapter object
3817  * @event_data: topology change event data
3818  *
3819  * This routine set sdev state to SDEV_BLOCK for all devices
3820  * direct attached during device pull/reconnect.
3821  */
3822 static void
_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)3823 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3824 		Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3825 {
3826 	int i;
3827 	u16 handle;
3828 	u16 reason_code;
3829 
3830 	for (i = 0; i < event_data->NumEntries; i++) {
3831 		handle =
3832 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
3833 		if (!handle)
3834 			continue;
3835 		reason_code = event_data->PortEntry[i].PortStatus;
3836 		if (reason_code ==
3837 				MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
3838 			_scsih_block_io_device(ioc, handle);
3839 	}
3840 }
3841 /**
3842  * _scsih_tm_tr_send - send task management request
3843  * @ioc: per adapter object
3844  * @handle: device handle
3845  * Context: interrupt time.
3846  *
3847  * This code is to initiate the device removal handshake protocol
3848  * with controller firmware.  This function will issue target reset
3849  * using high priority request queue.  It will send a sas iounit
3850  * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
3851  *
3852  * This is designed to send muliple task management request at the same
3853  * time to the fifo. If the fifo is full, we will append the request,
3854  * and process it in a future completion.
3855  */
3856 static void
_scsih_tm_tr_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)3857 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3858 {
3859 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3860 	u16 smid;
3861 	struct _sas_device *sas_device = NULL;
3862 	struct _pcie_device *pcie_device = NULL;
3863 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
3864 	u64 sas_address = 0;
3865 	unsigned long flags;
3866 	struct _tr_list *delayed_tr;
3867 	u32 ioc_state;
3868 	u8 tr_method = 0;
3869 
3870 	if (ioc->pci_error_recovery) {
3871 		dewtprintk(ioc,
3872 			   ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
3873 				    __func__, handle));
3874 		return;
3875 	}
3876 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3877 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3878 		dewtprintk(ioc,
3879 			   ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
3880 				    __func__, handle));
3881 		return;
3882 	}
3883 
3884 	/* if PD, then return */
3885 	if (test_bit(handle, ioc->pd_handles))
3886 		return;
3887 
3888 	clear_bit(handle, ioc->pend_os_device_add);
3889 
3890 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
3891 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
3892 	if (sas_device && sas_device->starget &&
3893 	    sas_device->starget->hostdata) {
3894 		sas_target_priv_data = sas_device->starget->hostdata;
3895 		sas_target_priv_data->deleted = 1;
3896 		sas_address = sas_device->sas_address;
3897 	}
3898 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3899 	if (!sas_device) {
3900 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3901 		pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
3902 		if (pcie_device && pcie_device->starget &&
3903 			pcie_device->starget->hostdata) {
3904 			sas_target_priv_data = pcie_device->starget->hostdata;
3905 			sas_target_priv_data->deleted = 1;
3906 			sas_address = pcie_device->wwid;
3907 		}
3908 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3909 		if (pcie_device && (!ioc->tm_custom_handling) &&
3910 		    (!(mpt3sas_scsih_is_pcie_scsi_device(
3911 		    pcie_device->device_info))))
3912 			tr_method =
3913 			    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3914 		else
3915 			tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3916 	}
3917 	if (sas_target_priv_data) {
3918 		dewtprintk(ioc,
3919 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
3920 				    handle, (u64)sas_address));
3921 		if (sas_device) {
3922 			if (sas_device->enclosure_handle != 0)
3923 				dewtprintk(ioc,
3924 					   ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
3925 						    (u64)sas_device->enclosure_logical_id,
3926 						    sas_device->slot));
3927 			if (sas_device->connector_name[0] != '\0')
3928 				dewtprintk(ioc,
3929 					   ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
3930 						    sas_device->enclosure_level,
3931 						    sas_device->connector_name));
3932 		} else if (pcie_device) {
3933 			if (pcie_device->enclosure_handle != 0)
3934 				dewtprintk(ioc,
3935 					   ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
3936 						    (u64)pcie_device->enclosure_logical_id,
3937 						    pcie_device->slot));
3938 			if (pcie_device->connector_name[0] != '\0')
3939 				dewtprintk(ioc,
3940 					   ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
3941 						    pcie_device->enclosure_level,
3942 						    pcie_device->connector_name));
3943 		}
3944 		_scsih_ublock_io_device(ioc, sas_address);
3945 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
3946 	}
3947 
3948 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
3949 	if (!smid) {
3950 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3951 		if (!delayed_tr)
3952 			goto out;
3953 		INIT_LIST_HEAD(&delayed_tr->list);
3954 		delayed_tr->handle = handle;
3955 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3956 		dewtprintk(ioc,
3957 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3958 				    handle));
3959 		goto out;
3960 	}
3961 
3962 	dewtprintk(ioc,
3963 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3964 			    handle, smid, ioc->tm_tr_cb_idx));
3965 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3966 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3967 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3968 	mpi_request->DevHandle = cpu_to_le16(handle);
3969 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3970 	mpi_request->MsgFlags = tr_method;
3971 	set_bit(handle, ioc->device_remove_in_progress);
3972 	ioc->put_smid_hi_priority(ioc, smid, 0);
3973 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
3974 
3975 out:
3976 	if (sas_device)
3977 		sas_device_put(sas_device);
3978 	if (pcie_device)
3979 		pcie_device_put(pcie_device);
3980 }
3981 
3982 /**
3983  * _scsih_tm_tr_complete -
3984  * @ioc: per adapter object
3985  * @smid: system request message index
3986  * @msix_index: MSIX table index supplied by the OS
3987  * @reply: reply message frame(lower 32bit addr)
3988  * Context: interrupt time.
3989  *
3990  * This is the target reset completion routine.
3991  * This code is part of the code to initiate the device removal
3992  * handshake protocol with controller firmware.
3993  * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
3994  *
3995  * Return: 1 meaning mf should be freed from _base_interrupt
3996  *         0 means the mf is freed from this function.
3997  */
3998 static u8
_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)3999 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4000 	u32 reply)
4001 {
4002 	u16 handle;
4003 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4004 	Mpi2SCSITaskManagementReply_t *mpi_reply =
4005 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4006 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4007 	u16 smid_sas_ctrl;
4008 	u32 ioc_state;
4009 	struct _sc_list *delayed_sc;
4010 
4011 	if (ioc->pci_error_recovery) {
4012 		dewtprintk(ioc,
4013 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4014 				    __func__));
4015 		return 1;
4016 	}
4017 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4018 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4019 		dewtprintk(ioc,
4020 			   ioc_info(ioc, "%s: host is not operational\n",
4021 				    __func__));
4022 		return 1;
4023 	}
4024 	if (unlikely(!mpi_reply)) {
4025 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4026 			__FILE__, __LINE__, __func__);
4027 		return 1;
4028 	}
4029 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4030 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4031 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4032 		dewtprintk(ioc,
4033 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4034 				   handle,
4035 				   le16_to_cpu(mpi_reply->DevHandle), smid));
4036 		return 0;
4037 	}
4038 
4039 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4040 	dewtprintk(ioc,
4041 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4042 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4043 			    le32_to_cpu(mpi_reply->IOCLogInfo),
4044 			    le32_to_cpu(mpi_reply->TerminationCount)));
4045 
4046 	smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4047 	if (!smid_sas_ctrl) {
4048 		delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4049 		if (!delayed_sc)
4050 			return _scsih_check_for_pending_tm(ioc, smid);
4051 		INIT_LIST_HEAD(&delayed_sc->list);
4052 		delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4053 		list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4054 		dewtprintk(ioc,
4055 			   ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4056 				    handle));
4057 		return _scsih_check_for_pending_tm(ioc, smid);
4058 	}
4059 
4060 	dewtprintk(ioc,
4061 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4062 			    handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4063 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4064 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4065 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4066 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4067 	mpi_request->DevHandle = mpi_request_tm->DevHandle;
4068 	ioc->put_smid_default(ioc, smid_sas_ctrl);
4069 
4070 	return _scsih_check_for_pending_tm(ioc, smid);
4071 }
4072 
4073 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4074  *				 issue to IOC or not.
4075  * @ioc: per adapter object
4076  * @scmd: pointer to scsi command object
4077  *
4078  * Returns true if scmd can be issued to IOC otherwise returns false.
4079  */
_scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)4080 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4081 	struct scsi_cmnd *scmd)
4082 {
4083 
4084 	if (ioc->pci_error_recovery)
4085 		return false;
4086 
4087 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4088 		if (ioc->remove_host)
4089 			return false;
4090 
4091 		return true;
4092 	}
4093 
4094 	if (ioc->remove_host) {
4095 
4096 		switch (scmd->cmnd[0]) {
4097 		case SYNCHRONIZE_CACHE:
4098 		case START_STOP:
4099 			return true;
4100 		default:
4101 			return false;
4102 		}
4103 	}
4104 
4105 	return true;
4106 }
4107 
4108 /**
4109  * _scsih_sas_control_complete - completion routine
4110  * @ioc: per adapter object
4111  * @smid: system request message index
4112  * @msix_index: MSIX table index supplied by the OS
4113  * @reply: reply message frame(lower 32bit addr)
4114  * Context: interrupt time.
4115  *
4116  * This is the sas iounit control completion routine.
4117  * This code is part of the code to initiate the device removal
4118  * handshake protocol with controller firmware.
4119  *
4120  * Return: 1 meaning mf should be freed from _base_interrupt
4121  *         0 means the mf is freed from this function.
4122  */
4123 static u8
_scsih_sas_control_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4124 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4125 	u8 msix_index, u32 reply)
4126 {
4127 	Mpi2SasIoUnitControlReply_t *mpi_reply =
4128 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4129 
4130 	if (likely(mpi_reply)) {
4131 		dewtprintk(ioc,
4132 			   ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4133 				    le16_to_cpu(mpi_reply->DevHandle), smid,
4134 				    le16_to_cpu(mpi_reply->IOCStatus),
4135 				    le32_to_cpu(mpi_reply->IOCLogInfo)));
4136 		if (le16_to_cpu(mpi_reply->IOCStatus) ==
4137 		     MPI2_IOCSTATUS_SUCCESS) {
4138 			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4139 			    ioc->device_remove_in_progress);
4140 		}
4141 	} else {
4142 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4143 			__FILE__, __LINE__, __func__);
4144 	}
4145 	return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4146 }
4147 
4148 /**
4149  * _scsih_tm_tr_volume_send - send target reset request for volumes
4150  * @ioc: per adapter object
4151  * @handle: device handle
4152  * Context: interrupt time.
4153  *
4154  * This is designed to send muliple task management request at the same
4155  * time to the fifo. If the fifo is full, we will append the request,
4156  * and process it in a future completion.
4157  */
4158 static void
_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4159 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4160 {
4161 	Mpi2SCSITaskManagementRequest_t *mpi_request;
4162 	u16 smid;
4163 	struct _tr_list *delayed_tr;
4164 
4165 	if (ioc->pci_error_recovery) {
4166 		dewtprintk(ioc,
4167 			   ioc_info(ioc, "%s: host reset in progress!\n",
4168 				    __func__));
4169 		return;
4170 	}
4171 
4172 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4173 	if (!smid) {
4174 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4175 		if (!delayed_tr)
4176 			return;
4177 		INIT_LIST_HEAD(&delayed_tr->list);
4178 		delayed_tr->handle = handle;
4179 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4180 		dewtprintk(ioc,
4181 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4182 				    handle));
4183 		return;
4184 	}
4185 
4186 	dewtprintk(ioc,
4187 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4188 			    handle, smid, ioc->tm_tr_volume_cb_idx));
4189 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4190 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4191 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4192 	mpi_request->DevHandle = cpu_to_le16(handle);
4193 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4194 	ioc->put_smid_hi_priority(ioc, smid, 0);
4195 }
4196 
4197 /**
4198  * _scsih_tm_volume_tr_complete - target reset completion
4199  * @ioc: per adapter object
4200  * @smid: system request message index
4201  * @msix_index: MSIX table index supplied by the OS
4202  * @reply: reply message frame(lower 32bit addr)
4203  * Context: interrupt time.
4204  *
4205  * Return: 1 meaning mf should be freed from _base_interrupt
4206  *         0 means the mf is freed from this function.
4207  */
4208 static u8
_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4209 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4210 	u8 msix_index, u32 reply)
4211 {
4212 	u16 handle;
4213 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4214 	Mpi2SCSITaskManagementReply_t *mpi_reply =
4215 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4216 
4217 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
4218 		dewtprintk(ioc,
4219 			   ioc_info(ioc, "%s: host reset in progress!\n",
4220 				    __func__));
4221 		return 1;
4222 	}
4223 	if (unlikely(!mpi_reply)) {
4224 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4225 			__FILE__, __LINE__, __func__);
4226 		return 1;
4227 	}
4228 
4229 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4230 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4231 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4232 		dewtprintk(ioc,
4233 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4234 				   handle, le16_to_cpu(mpi_reply->DevHandle),
4235 				   smid));
4236 		return 0;
4237 	}
4238 
4239 	dewtprintk(ioc,
4240 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4241 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4242 			    le32_to_cpu(mpi_reply->IOCLogInfo),
4243 			    le32_to_cpu(mpi_reply->TerminationCount)));
4244 
4245 	return _scsih_check_for_pending_tm(ioc, smid);
4246 }
4247 
4248 /**
4249  * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4250  * @ioc: per adapter object
4251  * @smid: system request message index
4252  * @event: Event ID
4253  * @event_context: used to track events uniquely
4254  *
4255  * Context - processed in interrupt context.
4256  */
4257 static void
_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER * ioc,u16 smid,U16 event,U32 event_context)4258 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4259 				U32 event_context)
4260 {
4261 	Mpi2EventAckRequest_t *ack_request;
4262 	int i = smid - ioc->internal_smid;
4263 	unsigned long flags;
4264 
4265 	/* Without releasing the smid just update the
4266 	 * call back index and reuse the same smid for
4267 	 * processing this delayed request
4268 	 */
4269 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4270 	ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4271 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4272 
4273 	dewtprintk(ioc,
4274 		   ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4275 			    le16_to_cpu(event), smid, ioc->base_cb_idx));
4276 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4277 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4278 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4279 	ack_request->Event = event;
4280 	ack_request->EventContext = event_context;
4281 	ack_request->VF_ID = 0;  /* TODO */
4282 	ack_request->VP_ID = 0;
4283 	ioc->put_smid_default(ioc, smid);
4284 }
4285 
4286 /**
4287  * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4288  *				sas_io_unit_ctrl messages
4289  * @ioc: per adapter object
4290  * @smid: system request message index
4291  * @handle: device handle
4292  *
4293  * Context - processed in interrupt context.
4294  */
4295 static void
_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)4296 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4297 					u16 smid, u16 handle)
4298 {
4299 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4300 	u32 ioc_state;
4301 	int i = smid - ioc->internal_smid;
4302 	unsigned long flags;
4303 
4304 	if (ioc->remove_host) {
4305 		dewtprintk(ioc,
4306 			   ioc_info(ioc, "%s: host has been removed\n",
4307 				    __func__));
4308 		return;
4309 	} else if (ioc->pci_error_recovery) {
4310 		dewtprintk(ioc,
4311 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4312 				    __func__));
4313 		return;
4314 	}
4315 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4316 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4317 		dewtprintk(ioc,
4318 			   ioc_info(ioc, "%s: host is not operational\n",
4319 				    __func__));
4320 		return;
4321 	}
4322 
4323 	/* Without releasing the smid just update the
4324 	 * call back index and reuse the same smid for
4325 	 * processing this delayed request
4326 	 */
4327 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4328 	ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4329 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4330 
4331 	dewtprintk(ioc,
4332 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4333 			    handle, smid, ioc->tm_sas_control_cb_idx));
4334 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4335 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4336 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4337 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4338 	mpi_request->DevHandle = cpu_to_le16(handle);
4339 	ioc->put_smid_default(ioc, smid);
4340 }
4341 
4342 /**
4343  * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4344  * @ioc: per adapter object
4345  * @smid: system request message index
4346  *
4347  * Context: Executed in interrupt context
4348  *
4349  * This will check delayed internal messages list, and process the
4350  * next request.
4351  *
4352  * Return: 1 meaning mf should be freed from _base_interrupt
4353  *         0 means the mf is freed from this function.
4354  */
4355 u8
mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER * ioc,u16 smid)4356 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4357 {
4358 	struct _sc_list *delayed_sc;
4359 	struct _event_ack_list *delayed_event_ack;
4360 
4361 	if (!list_empty(&ioc->delayed_event_ack_list)) {
4362 		delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4363 						struct _event_ack_list, list);
4364 		_scsih_issue_delayed_event_ack(ioc, smid,
4365 		  delayed_event_ack->Event, delayed_event_ack->EventContext);
4366 		list_del(&delayed_event_ack->list);
4367 		kfree(delayed_event_ack);
4368 		return 0;
4369 	}
4370 
4371 	if (!list_empty(&ioc->delayed_sc_list)) {
4372 		delayed_sc = list_entry(ioc->delayed_sc_list.next,
4373 						struct _sc_list, list);
4374 		_scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4375 						 delayed_sc->handle);
4376 		list_del(&delayed_sc->list);
4377 		kfree(delayed_sc);
4378 		return 0;
4379 	}
4380 	return 1;
4381 }
4382 
4383 /**
4384  * _scsih_check_for_pending_tm - check for pending task management
4385  * @ioc: per adapter object
4386  * @smid: system request message index
4387  *
4388  * This will check delayed target reset list, and feed the
4389  * next reqeust.
4390  *
4391  * Return: 1 meaning mf should be freed from _base_interrupt
4392  *         0 means the mf is freed from this function.
4393  */
4394 static u8
_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER * ioc,u16 smid)4395 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4396 {
4397 	struct _tr_list *delayed_tr;
4398 
4399 	if (!list_empty(&ioc->delayed_tr_volume_list)) {
4400 		delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4401 		    struct _tr_list, list);
4402 		mpt3sas_base_free_smid(ioc, smid);
4403 		_scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4404 		list_del(&delayed_tr->list);
4405 		kfree(delayed_tr);
4406 		return 0;
4407 	}
4408 
4409 	if (!list_empty(&ioc->delayed_tr_list)) {
4410 		delayed_tr = list_entry(ioc->delayed_tr_list.next,
4411 		    struct _tr_list, list);
4412 		mpt3sas_base_free_smid(ioc, smid);
4413 		_scsih_tm_tr_send(ioc, delayed_tr->handle);
4414 		list_del(&delayed_tr->list);
4415 		kfree(delayed_tr);
4416 		return 0;
4417 	}
4418 
4419 	return 1;
4420 }
4421 
4422 /**
4423  * _scsih_check_topo_delete_events - sanity check on topo events
4424  * @ioc: per adapter object
4425  * @event_data: the event data payload
4426  *
4427  * This routine added to better handle cable breaker.
4428  *
4429  * This handles the case where driver receives multiple expander
4430  * add and delete events in a single shot.  When there is a delete event
4431  * the routine will void any pending add events waiting in the event queue.
4432  */
4433 static void
_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4434 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4435 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4436 {
4437 	struct fw_event_work *fw_event;
4438 	Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4439 	u16 expander_handle;
4440 	struct _sas_node *sas_expander;
4441 	unsigned long flags;
4442 	int i, reason_code;
4443 	u16 handle;
4444 
4445 	for (i = 0 ; i < event_data->NumEntries; i++) {
4446 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4447 		if (!handle)
4448 			continue;
4449 		reason_code = event_data->PHY[i].PhyStatus &
4450 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4451 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4452 			_scsih_tm_tr_send(ioc, handle);
4453 	}
4454 
4455 	expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4456 	if (expander_handle < ioc->sas_hba.num_phys) {
4457 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4458 		return;
4459 	}
4460 	if (event_data->ExpStatus ==
4461 	    MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4462 		/* put expander attached devices into blocking state */
4463 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
4464 		sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4465 		    expander_handle);
4466 		_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4467 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4468 		do {
4469 			handle = find_first_bit(ioc->blocking_handles,
4470 			    ioc->facts.MaxDevHandle);
4471 			if (handle < ioc->facts.MaxDevHandle)
4472 				_scsih_block_io_device(ioc, handle);
4473 		} while (test_and_clear_bit(handle, ioc->blocking_handles));
4474 	} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4475 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4476 
4477 	if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4478 		return;
4479 
4480 	/* mark ignore flag for pending events */
4481 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4482 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4483 		if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4484 		    fw_event->ignore)
4485 			continue;
4486 		local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4487 				   fw_event->event_data;
4488 		if (local_event_data->ExpStatus ==
4489 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4490 		    local_event_data->ExpStatus ==
4491 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4492 			if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4493 			    expander_handle) {
4494 				dewtprintk(ioc,
4495 					   ioc_info(ioc, "setting ignoring flag\n"));
4496 				fw_event->ignore = 1;
4497 			}
4498 		}
4499 	}
4500 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4501 }
4502 
4503 /**
4504  * _scsih_check_pcie_topo_remove_events - sanity check on topo
4505  * events
4506  * @ioc: per adapter object
4507  * @event_data: the event data payload
4508  *
4509  * This handles the case where driver receives multiple switch
4510  * or device add and delete events in a single shot.  When there
4511  * is a delete event the routine will void any pending add
4512  * events waiting in the event queue.
4513  */
4514 static void
_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4515 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4516 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4517 {
4518 	struct fw_event_work *fw_event;
4519 	Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4520 	unsigned long flags;
4521 	int i, reason_code;
4522 	u16 handle, switch_handle;
4523 
4524 	for (i = 0; i < event_data->NumEntries; i++) {
4525 		handle =
4526 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4527 		if (!handle)
4528 			continue;
4529 		reason_code = event_data->PortEntry[i].PortStatus;
4530 		if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4531 			_scsih_tm_tr_send(ioc, handle);
4532 	}
4533 
4534 	switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4535 	if (!switch_handle) {
4536 		_scsih_block_io_to_pcie_children_attached_directly(
4537 							ioc, event_data);
4538 		return;
4539 	}
4540     /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4541 	if ((event_data->SwitchStatus
4542 		== MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4543 		(event_data->SwitchStatus ==
4544 					MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4545 		_scsih_block_io_to_pcie_children_attached_directly(
4546 							ioc, event_data);
4547 
4548 	if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4549 		return;
4550 
4551 	/* mark ignore flag for pending events */
4552 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4553 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4554 		if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4555 			fw_event->ignore)
4556 			continue;
4557 		local_event_data =
4558 			(Mpi26EventDataPCIeTopologyChangeList_t *)
4559 			fw_event->event_data;
4560 		if (local_event_data->SwitchStatus ==
4561 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4562 		    local_event_data->SwitchStatus ==
4563 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4564 			if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4565 				switch_handle) {
4566 				dewtprintk(ioc,
4567 					   ioc_info(ioc, "setting ignoring flag for switch event\n"));
4568 				fw_event->ignore = 1;
4569 			}
4570 		}
4571 	}
4572 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4573 }
4574 
4575 /**
4576  * _scsih_set_volume_delete_flag - setting volume delete flag
4577  * @ioc: per adapter object
4578  * @handle: device handle
4579  *
4580  * This returns nothing.
4581  */
4582 static void
_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)4583 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4584 {
4585 	struct _raid_device *raid_device;
4586 	struct MPT3SAS_TARGET *sas_target_priv_data;
4587 	unsigned long flags;
4588 
4589 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
4590 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4591 	if (raid_device && raid_device->starget &&
4592 	    raid_device->starget->hostdata) {
4593 		sas_target_priv_data =
4594 		    raid_device->starget->hostdata;
4595 		sas_target_priv_data->deleted = 1;
4596 		dewtprintk(ioc,
4597 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4598 				    handle, (u64)raid_device->wwid));
4599 	}
4600 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4601 }
4602 
4603 /**
4604  * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4605  * @handle: input handle
4606  * @a: handle for volume a
4607  * @b: handle for volume b
4608  *
4609  * IR firmware only supports two raid volumes.  The purpose of this
4610  * routine is to set the volume handle in either a or b. When the given
4611  * input handle is non-zero, or when a and b have not been set before.
4612  */
4613 static void
_scsih_set_volume_handle_for_tr(u16 handle,u16 * a,u16 * b)4614 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4615 {
4616 	if (!handle || handle == *a || handle == *b)
4617 		return;
4618 	if (!*a)
4619 		*a = handle;
4620 	else if (!*b)
4621 		*b = handle;
4622 }
4623 
4624 /**
4625  * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4626  * @ioc: per adapter object
4627  * @event_data: the event data payload
4628  * Context: interrupt time.
4629  *
4630  * This routine will send target reset to volume, followed by target
4631  * resets to the PDs. This is called when a PD has been removed, or
4632  * volume has been deleted or removed. When the target reset is sent
4633  * to volume, the PD target resets need to be queued to start upon
4634  * completion of the volume target reset.
4635  */
4636 static void
_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)4637 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4638 	Mpi2EventDataIrConfigChangeList_t *event_data)
4639 {
4640 	Mpi2EventIrConfigElement_t *element;
4641 	int i;
4642 	u16 handle, volume_handle, a, b;
4643 	struct _tr_list *delayed_tr;
4644 
4645 	a = 0;
4646 	b = 0;
4647 
4648 	if (ioc->is_warpdrive)
4649 		return;
4650 
4651 	/* Volume Resets for Deleted or Removed */
4652 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4653 	for (i = 0; i < event_data->NumElements; i++, element++) {
4654 		if (le32_to_cpu(event_data->Flags) &
4655 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4656 			continue;
4657 		if (element->ReasonCode ==
4658 		    MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4659 		    element->ReasonCode ==
4660 		    MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4661 			volume_handle = le16_to_cpu(element->VolDevHandle);
4662 			_scsih_set_volume_delete_flag(ioc, volume_handle);
4663 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4664 		}
4665 	}
4666 
4667 	/* Volume Resets for UNHIDE events */
4668 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4669 	for (i = 0; i < event_data->NumElements; i++, element++) {
4670 		if (le32_to_cpu(event_data->Flags) &
4671 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4672 			continue;
4673 		if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4674 			volume_handle = le16_to_cpu(element->VolDevHandle);
4675 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4676 		}
4677 	}
4678 
4679 	if (a)
4680 		_scsih_tm_tr_volume_send(ioc, a);
4681 	if (b)
4682 		_scsih_tm_tr_volume_send(ioc, b);
4683 
4684 	/* PD target resets */
4685 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4686 	for (i = 0; i < event_data->NumElements; i++, element++) {
4687 		if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4688 			continue;
4689 		handle = le16_to_cpu(element->PhysDiskDevHandle);
4690 		volume_handle = le16_to_cpu(element->VolDevHandle);
4691 		clear_bit(handle, ioc->pd_handles);
4692 		if (!volume_handle)
4693 			_scsih_tm_tr_send(ioc, handle);
4694 		else if (volume_handle == a || volume_handle == b) {
4695 			delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4696 			BUG_ON(!delayed_tr);
4697 			INIT_LIST_HEAD(&delayed_tr->list);
4698 			delayed_tr->handle = handle;
4699 			list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4700 			dewtprintk(ioc,
4701 				   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4702 					    handle));
4703 		} else
4704 			_scsih_tm_tr_send(ioc, handle);
4705 	}
4706 }
4707 
4708 
4709 /**
4710  * _scsih_check_volume_delete_events - set delete flag for volumes
4711  * @ioc: per adapter object
4712  * @event_data: the event data payload
4713  * Context: interrupt time.
4714  *
4715  * This will handle the case when the cable connected to entire volume is
4716  * pulled. We will take care of setting the deleted flag so normal IO will
4717  * not be sent.
4718  */
4719 static void
_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrVolume_t * event_data)4720 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4721 	Mpi2EventDataIrVolume_t *event_data)
4722 {
4723 	u32 state;
4724 
4725 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4726 		return;
4727 	state = le32_to_cpu(event_data->NewValue);
4728 	if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4729 	    MPI2_RAID_VOL_STATE_FAILED)
4730 		_scsih_set_volume_delete_flag(ioc,
4731 		    le16_to_cpu(event_data->VolDevHandle));
4732 }
4733 
4734 /**
4735  * _scsih_temp_threshold_events - display temperature threshold exceeded events
4736  * @ioc: per adapter object
4737  * @event_data: the temp threshold event data
4738  * Context: interrupt time.
4739  */
4740 static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataTemperature_t * event_data)4741 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4742 	Mpi2EventDataTemperature_t *event_data)
4743 {
4744 	u32 doorbell;
4745 	if (ioc->temp_sensors_count >= event_data->SensorNum) {
4746 		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4747 			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4748 			le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4749 			le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4750 			le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4751 			event_data->SensorNum);
4752 		ioc_err(ioc, "Current Temp In Celsius: %d\n",
4753 			event_data->CurrentTemperature);
4754 		if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4755 			doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4756 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
4757 			    MPI2_IOC_STATE_FAULT) {
4758 				mpt3sas_print_fault_code(ioc,
4759 				    doorbell & MPI2_DOORBELL_DATA_MASK);
4760 			} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4761 			    MPI2_IOC_STATE_COREDUMP) {
4762 				mpt3sas_print_coredump_info(ioc,
4763 				    doorbell & MPI2_DOORBELL_DATA_MASK);
4764 			}
4765 		}
4766 	}
4767 }
4768 
_scsih_set_satl_pending(struct scsi_cmnd * scmd,bool pending)4769 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4770 {
4771 	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4772 
4773 	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4774 		return 0;
4775 
4776 	if (pending)
4777 		return test_and_set_bit(0, &priv->ata_command_pending);
4778 
4779 	clear_bit(0, &priv->ata_command_pending);
4780 	return 0;
4781 }
4782 
4783 /**
4784  * _scsih_flush_running_cmds - completing outstanding commands.
4785  * @ioc: per adapter object
4786  *
4787  * The flushing out of all pending scmd commands following host reset,
4788  * where all IO is dropped to the floor.
4789  */
4790 static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER * ioc)4791 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4792 {
4793 	struct scsi_cmnd *scmd;
4794 	struct scsiio_tracker *st;
4795 	u16 smid;
4796 	int count = 0;
4797 
4798 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4799 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4800 		if (!scmd)
4801 			continue;
4802 		count++;
4803 		_scsih_set_satl_pending(scmd, false);
4804 		st = scsi_cmd_priv(scmd);
4805 		mpt3sas_base_clear_st(ioc, st);
4806 		scsi_dma_unmap(scmd);
4807 		if (ioc->pci_error_recovery || ioc->remove_host)
4808 			scmd->result = DID_NO_CONNECT << 16;
4809 		else
4810 			scmd->result = DID_RESET << 16;
4811 		scmd->scsi_done(scmd);
4812 	}
4813 	dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4814 }
4815 
4816 /**
4817  * _scsih_setup_eedp - setup MPI request for EEDP transfer
4818  * @ioc: per adapter object
4819  * @scmd: pointer to scsi command object
4820  * @mpi_request: pointer to the SCSI_IO request message frame
4821  *
4822  * Supporting protection 1 and 3.
4823  */
4824 static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi25SCSIIORequest_t * mpi_request)4825 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4826 	Mpi25SCSIIORequest_t *mpi_request)
4827 {
4828 	u16 eedp_flags;
4829 	unsigned char prot_op = scsi_get_prot_op(scmd);
4830 	unsigned char prot_type = scsi_get_prot_type(scmd);
4831 	Mpi25SCSIIORequest_t *mpi_request_3v =
4832 	   (Mpi25SCSIIORequest_t *)mpi_request;
4833 
4834 	if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
4835 		return;
4836 
4837 	if (prot_op ==  SCSI_PROT_READ_STRIP)
4838 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
4839 	else if (prot_op ==  SCSI_PROT_WRITE_INSERT)
4840 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
4841 	else
4842 		return;
4843 
4844 	switch (prot_type) {
4845 	case SCSI_PROT_DIF_TYPE1:
4846 	case SCSI_PROT_DIF_TYPE2:
4847 
4848 		/*
4849 		* enable ref/guard checking
4850 		* auto increment ref tag
4851 		*/
4852 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
4853 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
4854 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4855 		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
4856 		    cpu_to_be32(t10_pi_ref_tag(scmd->request));
4857 		break;
4858 
4859 	case SCSI_PROT_DIF_TYPE3:
4860 
4861 		/*
4862 		* enable guard checking
4863 		*/
4864 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4865 
4866 		break;
4867 	}
4868 
4869 	mpi_request_3v->EEDPBlockSize =
4870 	    cpu_to_le16(scmd->device->sector_size);
4871 
4872 	if (ioc->is_gen35_ioc)
4873 		eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
4874 	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
4875 }
4876 
4877 /**
4878  * _scsih_eedp_error_handling - return sense code for EEDP errors
4879  * @scmd: pointer to scsi command object
4880  * @ioc_status: ioc status
4881  */
4882 static void
_scsih_eedp_error_handling(struct scsi_cmnd * scmd,u16 ioc_status)4883 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4884 {
4885 	u8 ascq;
4886 
4887 	switch (ioc_status) {
4888 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4889 		ascq = 0x01;
4890 		break;
4891 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4892 		ascq = 0x02;
4893 		break;
4894 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4895 		ascq = 0x03;
4896 		break;
4897 	default:
4898 		ascq = 0x00;
4899 		break;
4900 	}
4901 	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
4902 	    ascq);
4903 	scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
4904 	    SAM_STAT_CHECK_CONDITION;
4905 }
4906 
4907 /**
4908  * scsih_qcmd - main scsi request entry point
4909  * @shost: SCSI host pointer
4910  * @scmd: pointer to scsi command object
4911  *
4912  * The callback index is set inside `ioc->scsi_io_cb_idx`.
4913  *
4914  * Return: 0 on success.  If there's a failure, return either:
4915  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
4916  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
4917  */
4918 static int
scsih_qcmd(struct Scsi_Host * shost,struct scsi_cmnd * scmd)4919 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4920 {
4921 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4922 	struct MPT3SAS_DEVICE *sas_device_priv_data;
4923 	struct MPT3SAS_TARGET *sas_target_priv_data;
4924 	struct _raid_device *raid_device;
4925 	struct request *rq = scmd->request;
4926 	int class;
4927 	Mpi25SCSIIORequest_t *mpi_request;
4928 	struct _pcie_device *pcie_device = NULL;
4929 	u32 mpi_control;
4930 	u16 smid;
4931 	u16 handle;
4932 
4933 	if (ioc->logging_level & MPT_DEBUG_SCSI)
4934 		scsi_print_command(scmd);
4935 
4936 	sas_device_priv_data = scmd->device->hostdata;
4937 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4938 		scmd->result = DID_NO_CONNECT << 16;
4939 		scmd->scsi_done(scmd);
4940 		return 0;
4941 	}
4942 
4943 	if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
4944 		scmd->result = DID_NO_CONNECT << 16;
4945 		scmd->scsi_done(scmd);
4946 		return 0;
4947 	}
4948 
4949 	sas_target_priv_data = sas_device_priv_data->sas_target;
4950 
4951 	/* invalid device handle */
4952 	handle = sas_target_priv_data->handle;
4953 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
4954 		scmd->result = DID_NO_CONNECT << 16;
4955 		scmd->scsi_done(scmd);
4956 		return 0;
4957 	}
4958 
4959 
4960 	if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
4961 		/* host recovery or link resets sent via IOCTLs */
4962 		return SCSI_MLQUEUE_HOST_BUSY;
4963 	} else if (sas_target_priv_data->deleted) {
4964 		/* device has been deleted */
4965 		scmd->result = DID_NO_CONNECT << 16;
4966 		scmd->scsi_done(scmd);
4967 		return 0;
4968 	} else if (sas_target_priv_data->tm_busy ||
4969 		   sas_device_priv_data->block) {
4970 		/* device busy with task management */
4971 		return SCSI_MLQUEUE_DEVICE_BUSY;
4972 	}
4973 
4974 	/*
4975 	 * Bug work around for firmware SATL handling.  The loop
4976 	 * is based on atomic operations and ensures consistency
4977 	 * since we're lockless at this point
4978 	 */
4979 	do {
4980 		if (test_bit(0, &sas_device_priv_data->ata_command_pending))
4981 			return SCSI_MLQUEUE_DEVICE_BUSY;
4982 	} while (_scsih_set_satl_pending(scmd, true));
4983 
4984 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4985 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
4986 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4987 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
4988 	else
4989 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
4990 
4991 	/* set tags */
4992 	mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
4993 	/* NCQ Prio supported, make sure control indicated high priority */
4994 	if (sas_device_priv_data->ncq_prio_enable) {
4995 		class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4996 		if (class == IOPRIO_CLASS_RT)
4997 			mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
4998 	}
4999 	/* Make sure Device is not raid volume.
5000 	 * We do not expose raid functionality to upper layer for warpdrive.
5001 	 */
5002 	if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5003 		&& !scsih_is_nvme(&scmd->device->sdev_gendev))
5004 		&& sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5005 		mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5006 
5007 	smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5008 	if (!smid) {
5009 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5010 		_scsih_set_satl_pending(scmd, false);
5011 		goto out;
5012 	}
5013 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5014 	memset(mpi_request, 0, ioc->request_sz);
5015 	_scsih_setup_eedp(ioc, scmd, mpi_request);
5016 
5017 	if (scmd->cmd_len == 32)
5018 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5019 	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5020 	if (sas_device_priv_data->sas_target->flags &
5021 	    MPT_TARGET_FLAGS_RAID_COMPONENT)
5022 		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5023 	else
5024 		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5025 	mpi_request->DevHandle = cpu_to_le16(handle);
5026 	mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5027 	mpi_request->Control = cpu_to_le32(mpi_control);
5028 	mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5029 	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5030 	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5031 	mpi_request->SenseBufferLowAddress =
5032 	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5033 	mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5034 	int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5035 	    mpi_request->LUN);
5036 	memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5037 
5038 	if (mpi_request->DataLength) {
5039 		pcie_device = sas_target_priv_data->pcie_dev;
5040 		if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5041 			mpt3sas_base_free_smid(ioc, smid);
5042 			_scsih_set_satl_pending(scmd, false);
5043 			goto out;
5044 		}
5045 	} else
5046 		ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5047 
5048 	raid_device = sas_target_priv_data->raid_device;
5049 	if (raid_device && raid_device->direct_io_enabled)
5050 		mpt3sas_setup_direct_io(ioc, scmd,
5051 			raid_device, mpi_request);
5052 
5053 	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5054 		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5055 			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5056 			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5057 			ioc->put_smid_fast_path(ioc, smid, handle);
5058 		} else
5059 			ioc->put_smid_scsi_io(ioc, smid,
5060 			    le16_to_cpu(mpi_request->DevHandle));
5061 	} else
5062 		ioc->put_smid_default(ioc, smid);
5063 	return 0;
5064 
5065  out:
5066 	return SCSI_MLQUEUE_HOST_BUSY;
5067 }
5068 
5069 /**
5070  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5071  * @sense_buffer: sense data returned by target
5072  * @data: normalized skey/asc/ascq
5073  */
5074 static void
_scsih_normalize_sense(char * sense_buffer,struct sense_info * data)5075 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5076 {
5077 	if ((sense_buffer[0] & 0x7F) >= 0x72) {
5078 		/* descriptor format */
5079 		data->skey = sense_buffer[1] & 0x0F;
5080 		data->asc = sense_buffer[2];
5081 		data->ascq = sense_buffer[3];
5082 	} else {
5083 		/* fixed format */
5084 		data->skey = sense_buffer[2] & 0x0F;
5085 		data->asc = sense_buffer[12];
5086 		data->ascq = sense_buffer[13];
5087 	}
5088 }
5089 
5090 /**
5091  * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
5092  * @ioc: per adapter object
5093  * @scmd: pointer to scsi command object
5094  * @mpi_reply: reply mf payload returned from firmware
5095  * @smid: ?
5096  *
5097  * scsi_status - SCSI Status code returned from target device
5098  * scsi_state - state info associated with SCSI_IO determined by ioc
5099  * ioc_status - ioc supplied status info
5100  */
5101 static void
_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi2SCSIIOReply_t * mpi_reply,u16 smid)5102 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5103 	Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5104 {
5105 	u32 response_info;
5106 	u8 *response_bytes;
5107 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5108 	    MPI2_IOCSTATUS_MASK;
5109 	u8 scsi_state = mpi_reply->SCSIState;
5110 	u8 scsi_status = mpi_reply->SCSIStatus;
5111 	char *desc_ioc_state = NULL;
5112 	char *desc_scsi_status = NULL;
5113 	char *desc_scsi_state = ioc->tmp_string;
5114 	u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5115 	struct _sas_device *sas_device = NULL;
5116 	struct _pcie_device *pcie_device = NULL;
5117 	struct scsi_target *starget = scmd->device->sdev_target;
5118 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5119 	char *device_str = NULL;
5120 
5121 	if (!priv_target)
5122 		return;
5123 	if (ioc->hide_ir_msg)
5124 		device_str = "WarpDrive";
5125 	else
5126 		device_str = "volume";
5127 
5128 	if (log_info == 0x31170000)
5129 		return;
5130 
5131 	switch (ioc_status) {
5132 	case MPI2_IOCSTATUS_SUCCESS:
5133 		desc_ioc_state = "success";
5134 		break;
5135 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5136 		desc_ioc_state = "invalid function";
5137 		break;
5138 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5139 		desc_ioc_state = "scsi recovered error";
5140 		break;
5141 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5142 		desc_ioc_state = "scsi invalid dev handle";
5143 		break;
5144 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5145 		desc_ioc_state = "scsi device not there";
5146 		break;
5147 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5148 		desc_ioc_state = "scsi data overrun";
5149 		break;
5150 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5151 		desc_ioc_state = "scsi data underrun";
5152 		break;
5153 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5154 		desc_ioc_state = "scsi io data error";
5155 		break;
5156 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5157 		desc_ioc_state = "scsi protocol error";
5158 		break;
5159 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5160 		desc_ioc_state = "scsi task terminated";
5161 		break;
5162 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5163 		desc_ioc_state = "scsi residual mismatch";
5164 		break;
5165 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5166 		desc_ioc_state = "scsi task mgmt failed";
5167 		break;
5168 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5169 		desc_ioc_state = "scsi ioc terminated";
5170 		break;
5171 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5172 		desc_ioc_state = "scsi ext terminated";
5173 		break;
5174 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5175 		desc_ioc_state = "eedp guard error";
5176 		break;
5177 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5178 		desc_ioc_state = "eedp ref tag error";
5179 		break;
5180 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5181 		desc_ioc_state = "eedp app tag error";
5182 		break;
5183 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5184 		desc_ioc_state = "insufficient power";
5185 		break;
5186 	default:
5187 		desc_ioc_state = "unknown";
5188 		break;
5189 	}
5190 
5191 	switch (scsi_status) {
5192 	case MPI2_SCSI_STATUS_GOOD:
5193 		desc_scsi_status = "good";
5194 		break;
5195 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
5196 		desc_scsi_status = "check condition";
5197 		break;
5198 	case MPI2_SCSI_STATUS_CONDITION_MET:
5199 		desc_scsi_status = "condition met";
5200 		break;
5201 	case MPI2_SCSI_STATUS_BUSY:
5202 		desc_scsi_status = "busy";
5203 		break;
5204 	case MPI2_SCSI_STATUS_INTERMEDIATE:
5205 		desc_scsi_status = "intermediate";
5206 		break;
5207 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5208 		desc_scsi_status = "intermediate condmet";
5209 		break;
5210 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5211 		desc_scsi_status = "reservation conflict";
5212 		break;
5213 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5214 		desc_scsi_status = "command terminated";
5215 		break;
5216 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
5217 		desc_scsi_status = "task set full";
5218 		break;
5219 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
5220 		desc_scsi_status = "aca active";
5221 		break;
5222 	case MPI2_SCSI_STATUS_TASK_ABORTED:
5223 		desc_scsi_status = "task aborted";
5224 		break;
5225 	default:
5226 		desc_scsi_status = "unknown";
5227 		break;
5228 	}
5229 
5230 	desc_scsi_state[0] = '\0';
5231 	if (!scsi_state)
5232 		desc_scsi_state = " ";
5233 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5234 		strcat(desc_scsi_state, "response info ");
5235 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5236 		strcat(desc_scsi_state, "state terminated ");
5237 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5238 		strcat(desc_scsi_state, "no status ");
5239 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5240 		strcat(desc_scsi_state, "autosense failed ");
5241 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5242 		strcat(desc_scsi_state, "autosense valid ");
5243 
5244 	scsi_print_command(scmd);
5245 
5246 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5247 		ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5248 			 device_str, (u64)priv_target->sas_address);
5249 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5250 		pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5251 		if (pcie_device) {
5252 			ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5253 				 (u64)pcie_device->wwid, pcie_device->port_num);
5254 			if (pcie_device->enclosure_handle != 0)
5255 				ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5256 					 (u64)pcie_device->enclosure_logical_id,
5257 					 pcie_device->slot);
5258 			if (pcie_device->connector_name[0])
5259 				ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5260 					 pcie_device->enclosure_level,
5261 					 pcie_device->connector_name);
5262 			pcie_device_put(pcie_device);
5263 		}
5264 	} else {
5265 		sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5266 		if (sas_device) {
5267 			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5268 				 (u64)sas_device->sas_address, sas_device->phy);
5269 
5270 			_scsih_display_enclosure_chassis_info(ioc, sas_device,
5271 			    NULL, NULL);
5272 
5273 			sas_device_put(sas_device);
5274 		}
5275 	}
5276 
5277 	ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5278 		 le16_to_cpu(mpi_reply->DevHandle),
5279 		 desc_ioc_state, ioc_status, smid);
5280 	ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5281 		 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5282 	ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5283 		 le16_to_cpu(mpi_reply->TaskTag),
5284 		 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5285 	ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5286 		 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5287 
5288 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5289 		struct sense_info data;
5290 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5291 		ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5292 			 data.skey, data.asc, data.ascq,
5293 			 le32_to_cpu(mpi_reply->SenseCount));
5294 	}
5295 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5296 		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5297 		response_bytes = (u8 *)&response_info;
5298 		_scsih_response_code(ioc, response_bytes[0]);
5299 	}
5300 }
5301 
5302 /**
5303  * _scsih_turn_on_pfa_led - illuminate PFA LED
5304  * @ioc: per adapter object
5305  * @handle: device handle
5306  * Context: process
5307  */
5308 static void
_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5309 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5310 {
5311 	Mpi2SepReply_t mpi_reply;
5312 	Mpi2SepRequest_t mpi_request;
5313 	struct _sas_device *sas_device;
5314 
5315 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5316 	if (!sas_device)
5317 		return;
5318 
5319 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5320 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5321 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5322 	mpi_request.SlotStatus =
5323 	    cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5324 	mpi_request.DevHandle = cpu_to_le16(handle);
5325 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5326 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5327 	    &mpi_request)) != 0) {
5328 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5329 			__FILE__, __LINE__, __func__);
5330 		goto out;
5331 	}
5332 	sas_device->pfa_led_on = 1;
5333 
5334 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5335 		dewtprintk(ioc,
5336 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5337 				    le16_to_cpu(mpi_reply.IOCStatus),
5338 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5339 		goto out;
5340 	}
5341 out:
5342 	sas_device_put(sas_device);
5343 }
5344 
5345 /**
5346  * _scsih_turn_off_pfa_led - turn off Fault LED
5347  * @ioc: per adapter object
5348  * @sas_device: sas device whose PFA LED has to turned off
5349  * Context: process
5350  */
5351 static void
_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)5352 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5353 	struct _sas_device *sas_device)
5354 {
5355 	Mpi2SepReply_t mpi_reply;
5356 	Mpi2SepRequest_t mpi_request;
5357 
5358 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5359 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5360 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5361 	mpi_request.SlotStatus = 0;
5362 	mpi_request.Slot = cpu_to_le16(sas_device->slot);
5363 	mpi_request.DevHandle = 0;
5364 	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5365 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5366 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5367 		&mpi_request)) != 0) {
5368 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5369 			__FILE__, __LINE__, __func__);
5370 		return;
5371 	}
5372 
5373 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5374 		dewtprintk(ioc,
5375 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5376 				    le16_to_cpu(mpi_reply.IOCStatus),
5377 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5378 		return;
5379 	}
5380 }
5381 
5382 /**
5383  * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5384  * @ioc: per adapter object
5385  * @handle: device handle
5386  * Context: interrupt.
5387  */
5388 static void
_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5389 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5390 {
5391 	struct fw_event_work *fw_event;
5392 
5393 	fw_event = alloc_fw_event_work(0);
5394 	if (!fw_event)
5395 		return;
5396 	fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5397 	fw_event->device_handle = handle;
5398 	fw_event->ioc = ioc;
5399 	_scsih_fw_event_add(ioc, fw_event);
5400 	fw_event_work_put(fw_event);
5401 }
5402 
5403 /**
5404  * _scsih_smart_predicted_fault - process smart errors
5405  * @ioc: per adapter object
5406  * @handle: device handle
5407  * Context: interrupt.
5408  */
5409 static void
_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER * ioc,u16 handle)5410 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5411 {
5412 	struct scsi_target *starget;
5413 	struct MPT3SAS_TARGET *sas_target_priv_data;
5414 	Mpi2EventNotificationReply_t *event_reply;
5415 	Mpi2EventDataSasDeviceStatusChange_t *event_data;
5416 	struct _sas_device *sas_device;
5417 	ssize_t sz;
5418 	unsigned long flags;
5419 
5420 	/* only handle non-raid devices */
5421 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5422 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5423 	if (!sas_device)
5424 		goto out_unlock;
5425 
5426 	starget = sas_device->starget;
5427 	sas_target_priv_data = starget->hostdata;
5428 
5429 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5430 	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5431 		goto out_unlock;
5432 
5433 	_scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5434 
5435 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5436 
5437 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5438 		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5439 
5440 	/* insert into event log */
5441 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5442 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5443 	event_reply = kzalloc(sz, GFP_ATOMIC);
5444 	if (!event_reply) {
5445 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5446 			__FILE__, __LINE__, __func__);
5447 		goto out;
5448 	}
5449 
5450 	event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5451 	event_reply->Event =
5452 	    cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5453 	event_reply->MsgLength = sz/4;
5454 	event_reply->EventDataLength =
5455 	    cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5456 	event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5457 	    event_reply->EventData;
5458 	event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5459 	event_data->ASC = 0x5D;
5460 	event_data->DevHandle = cpu_to_le16(handle);
5461 	event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5462 	mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5463 	kfree(event_reply);
5464 out:
5465 	if (sas_device)
5466 		sas_device_put(sas_device);
5467 	return;
5468 
5469 out_unlock:
5470 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5471 	goto out;
5472 }
5473 
5474 /**
5475  * _scsih_io_done - scsi request callback
5476  * @ioc: per adapter object
5477  * @smid: system request message index
5478  * @msix_index: MSIX table index supplied by the OS
5479  * @reply: reply message frame(lower 32bit addr)
5480  *
5481  * Callback handler when using _scsih_qcmd.
5482  *
5483  * Return: 1 meaning mf should be freed from _base_interrupt
5484  *         0 means the mf is freed from this function.
5485  */
5486 static u8
_scsih_io_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)5487 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5488 {
5489 	Mpi25SCSIIORequest_t *mpi_request;
5490 	Mpi2SCSIIOReply_t *mpi_reply;
5491 	struct scsi_cmnd *scmd;
5492 	struct scsiio_tracker *st;
5493 	u16 ioc_status;
5494 	u32 xfer_cnt;
5495 	u8 scsi_state;
5496 	u8 scsi_status;
5497 	u32 log_info;
5498 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5499 	u32 response_code = 0;
5500 
5501 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5502 
5503 	scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5504 	if (scmd == NULL)
5505 		return 1;
5506 
5507 	_scsih_set_satl_pending(scmd, false);
5508 
5509 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5510 
5511 	if (mpi_reply == NULL) {
5512 		scmd->result = DID_OK << 16;
5513 		goto out;
5514 	}
5515 
5516 	sas_device_priv_data = scmd->device->hostdata;
5517 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5518 	     sas_device_priv_data->sas_target->deleted) {
5519 		scmd->result = DID_NO_CONNECT << 16;
5520 		goto out;
5521 	}
5522 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5523 
5524 	/*
5525 	 * WARPDRIVE: If direct_io is set then it is directIO,
5526 	 * the failed direct I/O should be redirected to volume
5527 	 */
5528 	st = scsi_cmd_priv(scmd);
5529 	if (st->direct_io &&
5530 	     ((ioc_status & MPI2_IOCSTATUS_MASK)
5531 	      != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5532 		st->direct_io = 0;
5533 		st->scmd = scmd;
5534 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5535 		mpi_request->DevHandle =
5536 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
5537 		ioc->put_smid_scsi_io(ioc, smid,
5538 		    sas_device_priv_data->sas_target->handle);
5539 		return 0;
5540 	}
5541 	/* turning off TLR */
5542 	scsi_state = mpi_reply->SCSIState;
5543 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5544 		response_code =
5545 		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5546 	if (!sas_device_priv_data->tlr_snoop_check) {
5547 		sas_device_priv_data->tlr_snoop_check++;
5548 		if ((!ioc->is_warpdrive &&
5549 		    !scsih_is_raid(&scmd->device->sdev_gendev) &&
5550 		    !scsih_is_nvme(&scmd->device->sdev_gendev))
5551 		    && sas_is_tlr_enabled(scmd->device) &&
5552 		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5553 			sas_disable_tlr(scmd->device);
5554 			sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5555 		}
5556 	}
5557 
5558 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5559 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5560 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5561 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
5562 	else
5563 		log_info = 0;
5564 	ioc_status &= MPI2_IOCSTATUS_MASK;
5565 	scsi_status = mpi_reply->SCSIStatus;
5566 
5567 	if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5568 	    (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5569 	     scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5570 	     scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5571 		ioc_status = MPI2_IOCSTATUS_SUCCESS;
5572 	}
5573 
5574 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5575 		struct sense_info data;
5576 		const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5577 		    smid);
5578 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5579 		    le32_to_cpu(mpi_reply->SenseCount));
5580 		memcpy(scmd->sense_buffer, sense_data, sz);
5581 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5582 		/* failure prediction threshold exceeded */
5583 		if (data.asc == 0x5D)
5584 			_scsih_smart_predicted_fault(ioc,
5585 			    le16_to_cpu(mpi_reply->DevHandle));
5586 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5587 
5588 		if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5589 		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5590 		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5591 		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5592 			_scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5593 	}
5594 	switch (ioc_status) {
5595 	case MPI2_IOCSTATUS_BUSY:
5596 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5597 		scmd->result = SAM_STAT_BUSY;
5598 		break;
5599 
5600 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5601 		scmd->result = DID_NO_CONNECT << 16;
5602 		break;
5603 
5604 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5605 		if (sas_device_priv_data->block) {
5606 			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5607 			goto out;
5608 		}
5609 		if (log_info == 0x31110630) {
5610 			if (scmd->retries > 2) {
5611 				scmd->result = DID_NO_CONNECT << 16;
5612 				scsi_device_set_state(scmd->device,
5613 				    SDEV_OFFLINE);
5614 			} else {
5615 				scmd->result = DID_SOFT_ERROR << 16;
5616 				scmd->device->expecting_cc_ua = 1;
5617 			}
5618 			break;
5619 		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5620 			scmd->result = DID_RESET << 16;
5621 			break;
5622 		} else if ((scmd->device->channel == RAID_CHANNEL) &&
5623 		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5624 		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5625 			scmd->result = DID_RESET << 16;
5626 			break;
5627 		}
5628 		scmd->result = DID_SOFT_ERROR << 16;
5629 		break;
5630 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5631 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5632 		scmd->result = DID_RESET << 16;
5633 		break;
5634 
5635 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5636 		if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5637 			scmd->result = DID_SOFT_ERROR << 16;
5638 		else
5639 			scmd->result = (DID_OK << 16) | scsi_status;
5640 		break;
5641 
5642 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5643 		scmd->result = (DID_OK << 16) | scsi_status;
5644 
5645 		if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5646 			break;
5647 
5648 		if (xfer_cnt < scmd->underflow) {
5649 			if (scsi_status == SAM_STAT_BUSY)
5650 				scmd->result = SAM_STAT_BUSY;
5651 			else
5652 				scmd->result = DID_SOFT_ERROR << 16;
5653 		} else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5654 		     MPI2_SCSI_STATE_NO_SCSI_STATUS))
5655 			scmd->result = DID_SOFT_ERROR << 16;
5656 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5657 			scmd->result = DID_RESET << 16;
5658 		else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5659 			mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5660 			mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5661 			scmd->result = (DRIVER_SENSE << 24) |
5662 			    SAM_STAT_CHECK_CONDITION;
5663 			scmd->sense_buffer[0] = 0x70;
5664 			scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5665 			scmd->sense_buffer[12] = 0x20;
5666 			scmd->sense_buffer[13] = 0;
5667 		}
5668 		break;
5669 
5670 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5671 		scsi_set_resid(scmd, 0);
5672 		fallthrough;
5673 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5674 	case MPI2_IOCSTATUS_SUCCESS:
5675 		scmd->result = (DID_OK << 16) | scsi_status;
5676 		if (response_code ==
5677 		    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5678 		    (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5679 		     MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5680 			scmd->result = DID_SOFT_ERROR << 16;
5681 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5682 			scmd->result = DID_RESET << 16;
5683 		break;
5684 
5685 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5686 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5687 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5688 		_scsih_eedp_error_handling(scmd, ioc_status);
5689 		break;
5690 
5691 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5692 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5693 	case MPI2_IOCSTATUS_INVALID_SGL:
5694 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
5695 	case MPI2_IOCSTATUS_INVALID_FIELD:
5696 	case MPI2_IOCSTATUS_INVALID_STATE:
5697 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5698 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5699 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5700 	default:
5701 		scmd->result = DID_SOFT_ERROR << 16;
5702 		break;
5703 
5704 	}
5705 
5706 	if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5707 		_scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5708 
5709  out:
5710 
5711 	scsi_dma_unmap(scmd);
5712 	mpt3sas_base_free_smid(ioc, smid);
5713 	scmd->scsi_done(scmd);
5714 	return 0;
5715 }
5716 
5717 /**
5718  * _scsih_sas_host_refresh - refreshing sas host object contents
5719  * @ioc: per adapter object
5720  * Context: user
5721  *
5722  * During port enable, fw will send topology events for every device. Its
5723  * possible that the handles may change from the previous setting, so this
5724  * code keeping handles updating if changed.
5725  */
5726 static void
_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER * ioc)5727 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
5728 {
5729 	u16 sz;
5730 	u16 ioc_status;
5731 	int i;
5732 	Mpi2ConfigReply_t mpi_reply;
5733 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5734 	u16 attached_handle;
5735 	u8 link_rate;
5736 
5737 	dtmprintk(ioc,
5738 		  ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
5739 			   (u64)ioc->sas_hba.sas_address));
5740 
5741 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
5742 	    * sizeof(Mpi2SasIOUnit0PhyData_t));
5743 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5744 	if (!sas_iounit_pg0) {
5745 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5746 			__FILE__, __LINE__, __func__);
5747 		return;
5748 	}
5749 
5750 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5751 	    sas_iounit_pg0, sz)) != 0)
5752 		goto out;
5753 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5754 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5755 		goto out;
5756 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5757 		link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
5758 		if (i == 0)
5759 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5760 			    PhyData[0].ControllerDevHandle);
5761 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5762 		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
5763 		    AttachedDevHandle);
5764 		if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
5765 			link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
5766 		mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
5767 		    attached_handle, i, link_rate);
5768 	}
5769  out:
5770 	kfree(sas_iounit_pg0);
5771 }
5772 
5773 /**
5774  * _scsih_sas_host_add - create sas host object
5775  * @ioc: per adapter object
5776  *
5777  * Creating host side data object, stored in ioc->sas_hba
5778  */
5779 static void
_scsih_sas_host_add(struct MPT3SAS_ADAPTER * ioc)5780 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5781 {
5782 	int i;
5783 	Mpi2ConfigReply_t mpi_reply;
5784 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5785 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
5786 	Mpi2SasPhyPage0_t phy_pg0;
5787 	Mpi2SasDevicePage0_t sas_device_pg0;
5788 	Mpi2SasEnclosurePage0_t enclosure_pg0;
5789 	u16 ioc_status;
5790 	u16 sz;
5791 	u8 device_missing_delay;
5792 	u8 num_phys;
5793 
5794 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
5795 	if (!num_phys) {
5796 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5797 			__FILE__, __LINE__, __func__);
5798 		return;
5799 	}
5800 	ioc->sas_hba.phy = kcalloc(num_phys,
5801 	    sizeof(struct _sas_phy), GFP_KERNEL);
5802 	if (!ioc->sas_hba.phy) {
5803 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5804 			__FILE__, __LINE__, __func__);
5805 		goto out;
5806 	}
5807 	ioc->sas_hba.num_phys = num_phys;
5808 
5809 	/* sas_iounit page 0 */
5810 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
5811 	    sizeof(Mpi2SasIOUnit0PhyData_t));
5812 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5813 	if (!sas_iounit_pg0) {
5814 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5815 			__FILE__, __LINE__, __func__);
5816 		return;
5817 	}
5818 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5819 	    sas_iounit_pg0, sz))) {
5820 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5821 			__FILE__, __LINE__, __func__);
5822 		goto out;
5823 	}
5824 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5825 	    MPI2_IOCSTATUS_MASK;
5826 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5827 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5828 			__FILE__, __LINE__, __func__);
5829 		goto out;
5830 	}
5831 
5832 	/* sas_iounit page 1 */
5833 	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
5834 	    sizeof(Mpi2SasIOUnit1PhyData_t));
5835 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
5836 	if (!sas_iounit_pg1) {
5837 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5838 			__FILE__, __LINE__, __func__);
5839 		goto out;
5840 	}
5841 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5842 	    sas_iounit_pg1, sz))) {
5843 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5844 			__FILE__, __LINE__, __func__);
5845 		goto out;
5846 	}
5847 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5848 	    MPI2_IOCSTATUS_MASK;
5849 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5850 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5851 			__FILE__, __LINE__, __func__);
5852 		goto out;
5853 	}
5854 
5855 	ioc->io_missing_delay =
5856 	    sas_iounit_pg1->IODeviceMissingDelay;
5857 	device_missing_delay =
5858 	    sas_iounit_pg1->ReportDeviceMissingDelay;
5859 	if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
5860 		ioc->device_missing_delay = (device_missing_delay &
5861 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
5862 	else
5863 		ioc->device_missing_delay = device_missing_delay &
5864 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
5865 
5866 	ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
5867 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5868 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5869 		    i))) {
5870 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5871 				__FILE__, __LINE__, __func__);
5872 			goto out;
5873 		}
5874 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5875 		    MPI2_IOCSTATUS_MASK;
5876 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5877 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5878 				__FILE__, __LINE__, __func__);
5879 			goto out;
5880 		}
5881 
5882 		if (i == 0)
5883 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5884 			    PhyData[0].ControllerDevHandle);
5885 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5886 		ioc->sas_hba.phy[i].phy_id = i;
5887 		mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
5888 		    phy_pg0, ioc->sas_hba.parent_dev);
5889 	}
5890 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5891 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
5892 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5893 			__FILE__, __LINE__, __func__);
5894 		goto out;
5895 	}
5896 	ioc->sas_hba.enclosure_handle =
5897 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
5898 	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5899 	ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5900 		 ioc->sas_hba.handle,
5901 		 (u64)ioc->sas_hba.sas_address,
5902 		 ioc->sas_hba.num_phys);
5903 
5904 	if (ioc->sas_hba.enclosure_handle) {
5905 		if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
5906 		    &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
5907 		   ioc->sas_hba.enclosure_handle)))
5908 			ioc->sas_hba.enclosure_logical_id =
5909 			    le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
5910 	}
5911 
5912  out:
5913 	kfree(sas_iounit_pg1);
5914 	kfree(sas_iounit_pg0);
5915 }
5916 
5917 /**
5918  * _scsih_expander_add -  creating expander object
5919  * @ioc: per adapter object
5920  * @handle: expander handle
5921  *
5922  * Creating expander object, stored in ioc->sas_expander_list.
5923  *
5924  * Return: 0 for success, else error.
5925  */
5926 static int
_scsih_expander_add(struct MPT3SAS_ADAPTER * ioc,u16 handle)5927 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5928 {
5929 	struct _sas_node *sas_expander;
5930 	struct _enclosure_node *enclosure_dev;
5931 	Mpi2ConfigReply_t mpi_reply;
5932 	Mpi2ExpanderPage0_t expander_pg0;
5933 	Mpi2ExpanderPage1_t expander_pg1;
5934 	u32 ioc_status;
5935 	u16 parent_handle;
5936 	u64 sas_address, sas_address_parent = 0;
5937 	int i;
5938 	unsigned long flags;
5939 	struct _sas_port *mpt3sas_port = NULL;
5940 
5941 	int rc = 0;
5942 
5943 	if (!handle)
5944 		return -1;
5945 
5946 	if (ioc->shost_recovery || ioc->pci_error_recovery)
5947 		return -1;
5948 
5949 	if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
5950 	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
5951 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5952 			__FILE__, __LINE__, __func__);
5953 		return -1;
5954 	}
5955 
5956 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5957 	    MPI2_IOCSTATUS_MASK;
5958 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5959 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5960 			__FILE__, __LINE__, __func__);
5961 		return -1;
5962 	}
5963 
5964 	/* handle out of order topology events */
5965 	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
5966 	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
5967 	    != 0) {
5968 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5969 			__FILE__, __LINE__, __func__);
5970 		return -1;
5971 	}
5972 	if (sas_address_parent != ioc->sas_hba.sas_address) {
5973 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
5974 		sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5975 		    sas_address_parent);
5976 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5977 		if (!sas_expander) {
5978 			rc = _scsih_expander_add(ioc, parent_handle);
5979 			if (rc != 0)
5980 				return rc;
5981 		}
5982 	}
5983 
5984 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
5985 	sas_address = le64_to_cpu(expander_pg0.SASAddress);
5986 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5987 	    sas_address);
5988 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5989 
5990 	if (sas_expander)
5991 		return 0;
5992 
5993 	sas_expander = kzalloc(sizeof(struct _sas_node),
5994 	    GFP_KERNEL);
5995 	if (!sas_expander) {
5996 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5997 			__FILE__, __LINE__, __func__);
5998 		return -1;
5999 	}
6000 
6001 	sas_expander->handle = handle;
6002 	sas_expander->num_phys = expander_pg0.NumPhys;
6003 	sas_expander->sas_address_parent = sas_address_parent;
6004 	sas_expander->sas_address = sas_address;
6005 
6006 	ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6007 		 handle, parent_handle,
6008 		 (u64)sas_expander->sas_address, sas_expander->num_phys);
6009 
6010 	if (!sas_expander->num_phys)
6011 		goto out_fail;
6012 	sas_expander->phy = kcalloc(sas_expander->num_phys,
6013 	    sizeof(struct _sas_phy), GFP_KERNEL);
6014 	if (!sas_expander->phy) {
6015 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6016 			__FILE__, __LINE__, __func__);
6017 		rc = -1;
6018 		goto out_fail;
6019 	}
6020 
6021 	INIT_LIST_HEAD(&sas_expander->sas_port_list);
6022 	mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6023 	    sas_address_parent);
6024 	if (!mpt3sas_port) {
6025 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6026 			__FILE__, __LINE__, __func__);
6027 		rc = -1;
6028 		goto out_fail;
6029 	}
6030 	sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6031 
6032 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
6033 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6034 		    &expander_pg1, i, handle))) {
6035 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6036 				__FILE__, __LINE__, __func__);
6037 			rc = -1;
6038 			goto out_fail;
6039 		}
6040 		sas_expander->phy[i].handle = handle;
6041 		sas_expander->phy[i].phy_id = i;
6042 
6043 		if ((mpt3sas_transport_add_expander_phy(ioc,
6044 		    &sas_expander->phy[i], expander_pg1,
6045 		    sas_expander->parent_dev))) {
6046 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6047 				__FILE__, __LINE__, __func__);
6048 			rc = -1;
6049 			goto out_fail;
6050 		}
6051 	}
6052 
6053 	if (sas_expander->enclosure_handle) {
6054 		enclosure_dev =
6055 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6056 						sas_expander->enclosure_handle);
6057 		if (enclosure_dev)
6058 			sas_expander->enclosure_logical_id =
6059 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6060 	}
6061 
6062 	_scsih_expander_node_add(ioc, sas_expander);
6063 	return 0;
6064 
6065  out_fail:
6066 
6067 	if (mpt3sas_port)
6068 		mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
6069 		    sas_address_parent);
6070 	kfree(sas_expander);
6071 	return rc;
6072 }
6073 
6074 /**
6075  * mpt3sas_expander_remove - removing expander object
6076  * @ioc: per adapter object
6077  * @sas_address: expander sas_address
6078  */
6079 void
mpt3sas_expander_remove(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)6080 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
6081 {
6082 	struct _sas_node *sas_expander;
6083 	unsigned long flags;
6084 
6085 	if (ioc->shost_recovery)
6086 		return;
6087 
6088 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6089 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6090 	    sas_address);
6091 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6092 	if (sas_expander)
6093 		_scsih_expander_node_remove(ioc, sas_expander);
6094 }
6095 
6096 /**
6097  * _scsih_done -  internal SCSI_IO callback handler.
6098  * @ioc: per adapter object
6099  * @smid: system request message index
6100  * @msix_index: MSIX table index supplied by the OS
6101  * @reply: reply message frame(lower 32bit addr)
6102  *
6103  * Callback handler when sending internal generated SCSI_IO.
6104  * The callback index passed is `ioc->scsih_cb_idx`
6105  *
6106  * Return: 1 meaning mf should be freed from _base_interrupt
6107  *         0 means the mf is freed from this function.
6108  */
6109 static u8
_scsih_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)6110 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
6111 {
6112 	MPI2DefaultReply_t *mpi_reply;
6113 
6114 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
6115 	if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
6116 		return 1;
6117 	if (ioc->scsih_cmds.smid != smid)
6118 		return 1;
6119 	ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
6120 	if (mpi_reply) {
6121 		memcpy(ioc->scsih_cmds.reply, mpi_reply,
6122 		    mpi_reply->MsgLength*4);
6123 		ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
6124 	}
6125 	ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
6126 	complete(&ioc->scsih_cmds.done);
6127 	return 1;
6128 }
6129 
6130 
6131 
6132 
6133 #define MPT3_MAX_LUNS (255)
6134 
6135 
6136 /**
6137  * _scsih_check_access_status - check access flags
6138  * @ioc: per adapter object
6139  * @sas_address: sas address
6140  * @handle: sas device handle
6141  * @access_status: errors returned during discovery of the device
6142  *
6143  * Return: 0 for success, else failure
6144  */
6145 static u8
_scsih_check_access_status(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,u16 handle,u8 access_status)6146 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
6147 	u16 handle, u8 access_status)
6148 {
6149 	u8 rc = 1;
6150 	char *desc = NULL;
6151 
6152 	switch (access_status) {
6153 	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
6154 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
6155 		rc = 0;
6156 		break;
6157 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
6158 		desc = "sata capability failed";
6159 		break;
6160 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
6161 		desc = "sata affiliation conflict";
6162 		break;
6163 	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
6164 		desc = "route not addressable";
6165 		break;
6166 	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
6167 		desc = "smp error not addressable";
6168 		break;
6169 	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
6170 		desc = "device blocked";
6171 		break;
6172 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
6173 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
6174 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
6175 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
6176 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
6177 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
6178 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
6179 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
6180 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
6181 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
6182 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
6183 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
6184 		desc = "sata initialization failed";
6185 		break;
6186 	default:
6187 		desc = "unknown";
6188 		break;
6189 	}
6190 
6191 	if (!rc)
6192 		return 0;
6193 
6194 	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
6195 		desc, (u64)sas_address, handle);
6196 	return rc;
6197 }
6198 
6199 /**
6200  * _scsih_check_device - checking device responsiveness
6201  * @ioc: per adapter object
6202  * @parent_sas_address: sas address of parent expander or sas host
6203  * @handle: attached device handle
6204  * @phy_number: phy number
6205  * @link_rate: new link rate
6206  */
6207 static void
_scsih_check_device(struct MPT3SAS_ADAPTER * ioc,u64 parent_sas_address,u16 handle,u8 phy_number,u8 link_rate)6208 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
6209 	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
6210 {
6211 	Mpi2ConfigReply_t mpi_reply;
6212 	Mpi2SasDevicePage0_t sas_device_pg0;
6213 	struct _sas_device *sas_device;
6214 	struct _enclosure_node *enclosure_dev = NULL;
6215 	u32 ioc_status;
6216 	unsigned long flags;
6217 	u64 sas_address;
6218 	struct scsi_target *starget;
6219 	struct MPT3SAS_TARGET *sas_target_priv_data;
6220 	u32 device_info;
6221 
6222 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6223 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
6224 		return;
6225 
6226 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6227 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6228 		return;
6229 
6230 	/* wide port handling ~ we need only handle device once for the phy that
6231 	 * is matched in sas device page zero
6232 	 */
6233 	if (phy_number != sas_device_pg0.PhyNum)
6234 		return;
6235 
6236 	/* check if this is end device */
6237 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6238 	if (!(_scsih_is_end_device(device_info)))
6239 		return;
6240 
6241 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
6242 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6243 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6244 	    sas_address);
6245 
6246 	if (!sas_device)
6247 		goto out_unlock;
6248 
6249 	if (unlikely(sas_device->handle != handle)) {
6250 		starget = sas_device->starget;
6251 		sas_target_priv_data = starget->hostdata;
6252 		starget_printk(KERN_INFO, starget,
6253 			"handle changed from(0x%04x) to (0x%04x)!!!\n",
6254 			sas_device->handle, handle);
6255 		sas_target_priv_data->handle = handle;
6256 		sas_device->handle = handle;
6257 		if (le16_to_cpu(sas_device_pg0.Flags) &
6258 		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6259 			sas_device->enclosure_level =
6260 				sas_device_pg0.EnclosureLevel;
6261 			memcpy(sas_device->connector_name,
6262 				sas_device_pg0.ConnectorName, 4);
6263 			sas_device->connector_name[4] = '\0';
6264 		} else {
6265 			sas_device->enclosure_level = 0;
6266 			sas_device->connector_name[0] = '\0';
6267 		}
6268 
6269 		sas_device->enclosure_handle =
6270 				le16_to_cpu(sas_device_pg0.EnclosureHandle);
6271 		sas_device->is_chassis_slot_valid = 0;
6272 		enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
6273 						sas_device->enclosure_handle);
6274 		if (enclosure_dev) {
6275 			sas_device->enclosure_logical_id =
6276 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6277 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6278 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6279 				sas_device->is_chassis_slot_valid = 1;
6280 				sas_device->chassis_slot =
6281 					enclosure_dev->pg0.ChassisSlot;
6282 			}
6283 		}
6284 	}
6285 
6286 	/* check if device is present */
6287 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
6288 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6289 		ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
6290 			handle);
6291 		goto out_unlock;
6292 	}
6293 
6294 	/* check if there were any issues with discovery */
6295 	if (_scsih_check_access_status(ioc, sas_address, handle,
6296 	    sas_device_pg0.AccessStatus))
6297 		goto out_unlock;
6298 
6299 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6300 	_scsih_ublock_io_device(ioc, sas_address);
6301 
6302 	if (sas_device)
6303 		sas_device_put(sas_device);
6304 	return;
6305 
6306 out_unlock:
6307 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6308 	if (sas_device)
6309 		sas_device_put(sas_device);
6310 }
6311 
6312 /**
6313  * _scsih_add_device -  creating sas device object
6314  * @ioc: per adapter object
6315  * @handle: sas device handle
6316  * @phy_num: phy number end device attached to
6317  * @is_pd: is this hidden raid component
6318  *
6319  * Creating end device object, stored in ioc->sas_device_list.
6320  *
6321  * Return: 0 for success, non-zero for failure.
6322  */
6323 static int
_scsih_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phy_num,u8 is_pd)6324 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6325 	u8 is_pd)
6326 {
6327 	Mpi2ConfigReply_t mpi_reply;
6328 	Mpi2SasDevicePage0_t sas_device_pg0;
6329 	struct _sas_device *sas_device;
6330 	struct _enclosure_node *enclosure_dev = NULL;
6331 	u32 ioc_status;
6332 	u64 sas_address;
6333 	u32 device_info;
6334 
6335 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6336 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
6337 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6338 			__FILE__, __LINE__, __func__);
6339 		return -1;
6340 	}
6341 
6342 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6343 	    MPI2_IOCSTATUS_MASK;
6344 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6345 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6346 			__FILE__, __LINE__, __func__);
6347 		return -1;
6348 	}
6349 
6350 	/* check if this is end device */
6351 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6352 	if (!(_scsih_is_end_device(device_info)))
6353 		return -1;
6354 	set_bit(handle, ioc->pend_os_device_add);
6355 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6356 
6357 	/* check if device is present */
6358 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
6359 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6360 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6361 			handle);
6362 		return -1;
6363 	}
6364 
6365 	/* check if there were any issues with discovery */
6366 	if (_scsih_check_access_status(ioc, sas_address, handle,
6367 	    sas_device_pg0.AccessStatus))
6368 		return -1;
6369 
6370 	sas_device = mpt3sas_get_sdev_by_addr(ioc,
6371 					sas_address);
6372 	if (sas_device) {
6373 		clear_bit(handle, ioc->pend_os_device_add);
6374 		sas_device_put(sas_device);
6375 		return -1;
6376 	}
6377 
6378 	if (sas_device_pg0.EnclosureHandle) {
6379 		enclosure_dev =
6380 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6381 			    le16_to_cpu(sas_device_pg0.EnclosureHandle));
6382 		if (enclosure_dev == NULL)
6383 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
6384 				 sas_device_pg0.EnclosureHandle);
6385 	}
6386 
6387 	sas_device = kzalloc(sizeof(struct _sas_device),
6388 	    GFP_KERNEL);
6389 	if (!sas_device) {
6390 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6391 			__FILE__, __LINE__, __func__);
6392 		return 0;
6393 	}
6394 
6395 	kref_init(&sas_device->refcount);
6396 	sas_device->handle = handle;
6397 	if (_scsih_get_sas_address(ioc,
6398 	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
6399 	    &sas_device->sas_address_parent) != 0)
6400 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6401 			__FILE__, __LINE__, __func__);
6402 	sas_device->enclosure_handle =
6403 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
6404 	if (sas_device->enclosure_handle != 0)
6405 		sas_device->slot =
6406 		    le16_to_cpu(sas_device_pg0.Slot);
6407 	sas_device->device_info = device_info;
6408 	sas_device->sas_address = sas_address;
6409 	sas_device->phy = sas_device_pg0.PhyNum;
6410 	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
6411 	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6412 
6413 	if (le16_to_cpu(sas_device_pg0.Flags)
6414 		& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6415 		sas_device->enclosure_level =
6416 			sas_device_pg0.EnclosureLevel;
6417 		memcpy(sas_device->connector_name,
6418 			sas_device_pg0.ConnectorName, 4);
6419 		sas_device->connector_name[4] = '\0';
6420 	} else {
6421 		sas_device->enclosure_level = 0;
6422 		sas_device->connector_name[0] = '\0';
6423 	}
6424 	/* get enclosure_logical_id & chassis_slot*/
6425 	sas_device->is_chassis_slot_valid = 0;
6426 	if (enclosure_dev) {
6427 		sas_device->enclosure_logical_id =
6428 		    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6429 		if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6430 		    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6431 			sas_device->is_chassis_slot_valid = 1;
6432 			sas_device->chassis_slot =
6433 					enclosure_dev->pg0.ChassisSlot;
6434 		}
6435 	}
6436 
6437 	/* get device name */
6438 	sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
6439 
6440 	if (ioc->wait_for_discovery_to_complete)
6441 		_scsih_sas_device_init_add(ioc, sas_device);
6442 	else
6443 		_scsih_sas_device_add(ioc, sas_device);
6444 
6445 	sas_device_put(sas_device);
6446 	return 0;
6447 }
6448 
6449 /**
6450  * _scsih_remove_device -  removing sas device object
6451  * @ioc: per adapter object
6452  * @sas_device: the sas_device object
6453  */
6454 static void
_scsih_remove_device(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)6455 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
6456 	struct _sas_device *sas_device)
6457 {
6458 	struct MPT3SAS_TARGET *sas_target_priv_data;
6459 
6460 	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
6461 	     (sas_device->pfa_led_on)) {
6462 		_scsih_turn_off_pfa_led(ioc, sas_device);
6463 		sas_device->pfa_led_on = 0;
6464 	}
6465 
6466 	dewtprintk(ioc,
6467 		   ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
6468 			    __func__,
6469 			    sas_device->handle, (u64)sas_device->sas_address));
6470 
6471 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6472 	    NULL, NULL));
6473 
6474 	if (sas_device->starget && sas_device->starget->hostdata) {
6475 		sas_target_priv_data = sas_device->starget->hostdata;
6476 		sas_target_priv_data->deleted = 1;
6477 		_scsih_ublock_io_device(ioc, sas_device->sas_address);
6478 		sas_target_priv_data->handle =
6479 		     MPT3SAS_INVALID_DEVICE_HANDLE;
6480 	}
6481 
6482 	if (!ioc->hide_drives)
6483 		mpt3sas_transport_port_remove(ioc,
6484 		    sas_device->sas_address,
6485 		    sas_device->sas_address_parent);
6486 
6487 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
6488 		 sas_device->handle, (u64)sas_device->sas_address);
6489 
6490 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
6491 
6492 	dewtprintk(ioc,
6493 		   ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
6494 			    __func__,
6495 			    sas_device->handle, (u64)sas_device->sas_address));
6496 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6497 	    NULL, NULL));
6498 }
6499 
6500 /**
6501  * _scsih_sas_topology_change_event_debug - debug for topology event
6502  * @ioc: per adapter object
6503  * @event_data: event data payload
6504  * Context: user.
6505  */
6506 static void
_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)6507 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6508 	Mpi2EventDataSasTopologyChangeList_t *event_data)
6509 {
6510 	int i;
6511 	u16 handle;
6512 	u16 reason_code;
6513 	u8 phy_number;
6514 	char *status_str = NULL;
6515 	u8 link_rate, prev_link_rate;
6516 
6517 	switch (event_data->ExpStatus) {
6518 	case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6519 		status_str = "add";
6520 		break;
6521 	case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6522 		status_str = "remove";
6523 		break;
6524 	case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6525 	case 0:
6526 		status_str =  "responding";
6527 		break;
6528 	case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6529 		status_str = "remove delay";
6530 		break;
6531 	default:
6532 		status_str = "unknown status";
6533 		break;
6534 	}
6535 	ioc_info(ioc, "sas topology change: (%s)\n", status_str);
6536 	pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
6537 	    "start_phy(%02d), count(%d)\n",
6538 	    le16_to_cpu(event_data->ExpanderDevHandle),
6539 	    le16_to_cpu(event_data->EnclosureHandle),
6540 	    event_data->StartPhyNum, event_data->NumEntries);
6541 	for (i = 0; i < event_data->NumEntries; i++) {
6542 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6543 		if (!handle)
6544 			continue;
6545 		phy_number = event_data->StartPhyNum + i;
6546 		reason_code = event_data->PHY[i].PhyStatus &
6547 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6548 		switch (reason_code) {
6549 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6550 			status_str = "target add";
6551 			break;
6552 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6553 			status_str = "target remove";
6554 			break;
6555 		case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6556 			status_str = "delay target remove";
6557 			break;
6558 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6559 			status_str = "link rate change";
6560 			break;
6561 		case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6562 			status_str = "target responding";
6563 			break;
6564 		default:
6565 			status_str = "unknown";
6566 			break;
6567 		}
6568 		link_rate = event_data->PHY[i].LinkRate >> 4;
6569 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6570 		pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
6571 		    " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
6572 		    handle, status_str, link_rate, prev_link_rate);
6573 
6574 	}
6575 }
6576 
6577 /**
6578  * _scsih_sas_topology_change_event - handle topology changes
6579  * @ioc: per adapter object
6580  * @fw_event: The fw_event_work object
6581  * Context: user.
6582  *
6583  */
6584 static int
_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)6585 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6586 	struct fw_event_work *fw_event)
6587 {
6588 	int i;
6589 	u16 parent_handle, handle;
6590 	u16 reason_code;
6591 	u8 phy_number, max_phys;
6592 	struct _sas_node *sas_expander;
6593 	u64 sas_address;
6594 	unsigned long flags;
6595 	u8 link_rate, prev_link_rate;
6596 	Mpi2EventDataSasTopologyChangeList_t *event_data =
6597 		(Mpi2EventDataSasTopologyChangeList_t *)
6598 		fw_event->event_data;
6599 
6600 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6601 		_scsih_sas_topology_change_event_debug(ioc, event_data);
6602 
6603 	if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
6604 		return 0;
6605 
6606 	if (!ioc->sas_hba.num_phys)
6607 		_scsih_sas_host_add(ioc);
6608 	else
6609 		_scsih_sas_host_refresh(ioc);
6610 
6611 	if (fw_event->ignore) {
6612 		dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
6613 		return 0;
6614 	}
6615 
6616 	parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
6617 
6618 	/* handle expander add */
6619 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
6620 		if (_scsih_expander_add(ioc, parent_handle) != 0)
6621 			return 0;
6622 
6623 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6624 	sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
6625 	    parent_handle);
6626 	if (sas_expander) {
6627 		sas_address = sas_expander->sas_address;
6628 		max_phys = sas_expander->num_phys;
6629 	} else if (parent_handle < ioc->sas_hba.num_phys) {
6630 		sas_address = ioc->sas_hba.sas_address;
6631 		max_phys = ioc->sas_hba.num_phys;
6632 	} else {
6633 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6634 		return 0;
6635 	}
6636 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6637 
6638 	/* handle siblings events */
6639 	for (i = 0; i < event_data->NumEntries; i++) {
6640 		if (fw_event->ignore) {
6641 			dewtprintk(ioc,
6642 				   ioc_info(ioc, "ignoring expander event\n"));
6643 			return 0;
6644 		}
6645 		if (ioc->remove_host || ioc->pci_error_recovery)
6646 			return 0;
6647 		phy_number = event_data->StartPhyNum + i;
6648 		if (phy_number >= max_phys)
6649 			continue;
6650 		reason_code = event_data->PHY[i].PhyStatus &
6651 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6652 		if ((event_data->PHY[i].PhyStatus &
6653 		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
6654 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
6655 				continue;
6656 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6657 		if (!handle)
6658 			continue;
6659 		link_rate = event_data->PHY[i].LinkRate >> 4;
6660 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6661 		switch (reason_code) {
6662 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6663 
6664 			if (ioc->shost_recovery)
6665 				break;
6666 
6667 			if (link_rate == prev_link_rate)
6668 				break;
6669 
6670 			mpt3sas_transport_update_links(ioc, sas_address,
6671 			    handle, phy_number, link_rate);
6672 
6673 			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6674 				break;
6675 
6676 			_scsih_check_device(ioc, sas_address, handle,
6677 			    phy_number, link_rate);
6678 
6679 			if (!test_bit(handle, ioc->pend_os_device_add))
6680 				break;
6681 
6682 			fallthrough;
6683 
6684 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6685 
6686 			if (ioc->shost_recovery)
6687 				break;
6688 
6689 			mpt3sas_transport_update_links(ioc, sas_address,
6690 			    handle, phy_number, link_rate);
6691 
6692 			_scsih_add_device(ioc, handle, phy_number, 0);
6693 
6694 			break;
6695 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6696 
6697 			_scsih_device_remove_by_handle(ioc, handle);
6698 			break;
6699 		}
6700 	}
6701 
6702 	/* handle expander removal */
6703 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
6704 	    sas_expander)
6705 		mpt3sas_expander_remove(ioc, sas_address);
6706 
6707 	return 0;
6708 }
6709 
6710 /**
6711  * _scsih_sas_device_status_change_event_debug - debug for device event
6712  * @ioc: ?
6713  * @event_data: event data payload
6714  * Context: user.
6715  */
6716 static void
_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)6717 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6718 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
6719 {
6720 	char *reason_str = NULL;
6721 
6722 	switch (event_data->ReasonCode) {
6723 	case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
6724 		reason_str = "smart data";
6725 		break;
6726 	case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
6727 		reason_str = "unsupported device discovered";
6728 		break;
6729 	case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
6730 		reason_str = "internal device reset";
6731 		break;
6732 	case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
6733 		reason_str = "internal task abort";
6734 		break;
6735 	case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
6736 		reason_str = "internal task abort set";
6737 		break;
6738 	case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
6739 		reason_str = "internal clear task set";
6740 		break;
6741 	case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
6742 		reason_str = "internal query task";
6743 		break;
6744 	case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
6745 		reason_str = "sata init failure";
6746 		break;
6747 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
6748 		reason_str = "internal device reset complete";
6749 		break;
6750 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
6751 		reason_str = "internal task abort complete";
6752 		break;
6753 	case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
6754 		reason_str = "internal async notification";
6755 		break;
6756 	case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
6757 		reason_str = "expander reduced functionality";
6758 		break;
6759 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
6760 		reason_str = "expander reduced functionality complete";
6761 		break;
6762 	default:
6763 		reason_str = "unknown reason";
6764 		break;
6765 	}
6766 	ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
6767 		 reason_str, le16_to_cpu(event_data->DevHandle),
6768 		 (u64)le64_to_cpu(event_data->SASAddress),
6769 		 le16_to_cpu(event_data->TaskTag));
6770 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
6771 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
6772 			event_data->ASC, event_data->ASCQ);
6773 	pr_cont("\n");
6774 }
6775 
6776 /**
6777  * _scsih_sas_device_status_change_event - handle device status change
6778  * @ioc: per adapter object
6779  * @event_data: The fw event
6780  * Context: user.
6781  */
6782 static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)6783 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
6784 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
6785 {
6786 	struct MPT3SAS_TARGET *target_priv_data;
6787 	struct _sas_device *sas_device;
6788 	u64 sas_address;
6789 	unsigned long flags;
6790 
6791 	/* In MPI Revision K (0xC), the internal device reset complete was
6792 	 * implemented, so avoid setting tm_busy flag for older firmware.
6793 	 */
6794 	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
6795 		return;
6796 
6797 	if (event_data->ReasonCode !=
6798 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
6799 	   event_data->ReasonCode !=
6800 	    MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
6801 		return;
6802 
6803 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
6804 	sas_address = le64_to_cpu(event_data->SASAddress);
6805 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6806 	    sas_address);
6807 
6808 	if (!sas_device || !sas_device->starget)
6809 		goto out;
6810 
6811 	target_priv_data = sas_device->starget->hostdata;
6812 	if (!target_priv_data)
6813 		goto out;
6814 
6815 	if (event_data->ReasonCode ==
6816 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
6817 		target_priv_data->tm_busy = 1;
6818 	else
6819 		target_priv_data->tm_busy = 0;
6820 
6821 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6822 		ioc_info(ioc,
6823 		    "%s tm_busy flag for handle(0x%04x)\n",
6824 		    (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
6825 		    target_priv_data->handle);
6826 
6827 out:
6828 	if (sas_device)
6829 		sas_device_put(sas_device);
6830 
6831 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6832 }
6833 
6834 
6835 /**
6836  * _scsih_check_pcie_access_status - check access flags
6837  * @ioc: per adapter object
6838  * @wwid: wwid
6839  * @handle: sas device handle
6840  * @access_status: errors returned during discovery of the device
6841  *
6842  * Return: 0 for success, else failure
6843  */
6844 static u8
_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle,u8 access_status)6845 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6846 	u16 handle, u8 access_status)
6847 {
6848 	u8 rc = 1;
6849 	char *desc = NULL;
6850 
6851 	switch (access_status) {
6852 	case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
6853 	case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
6854 		rc = 0;
6855 		break;
6856 	case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
6857 		desc = "PCIe device capability failed";
6858 		break;
6859 	case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
6860 		desc = "PCIe device blocked";
6861 		ioc_info(ioc,
6862 		    "Device with Access Status (%s): wwid(0x%016llx), "
6863 		    "handle(0x%04x)\n ll only be added to the internal list",
6864 		    desc, (u64)wwid, handle);
6865 		rc = 0;
6866 		break;
6867 	case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
6868 		desc = "PCIe device mem space access failed";
6869 		break;
6870 	case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
6871 		desc = "PCIe device unsupported";
6872 		break;
6873 	case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
6874 		desc = "PCIe device MSIx Required";
6875 		break;
6876 	case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
6877 		desc = "PCIe device init fail max";
6878 		break;
6879 	case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
6880 		desc = "PCIe device status unknown";
6881 		break;
6882 	case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
6883 		desc = "nvme ready timeout";
6884 		break;
6885 	case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
6886 		desc = "nvme device configuration unsupported";
6887 		break;
6888 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
6889 		desc = "nvme identify failed";
6890 		break;
6891 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
6892 		desc = "nvme qconfig failed";
6893 		break;
6894 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
6895 		desc = "nvme qcreation failed";
6896 		break;
6897 	case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
6898 		desc = "nvme eventcfg failed";
6899 		break;
6900 	case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
6901 		desc = "nvme get feature stat failed";
6902 		break;
6903 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
6904 		desc = "nvme idle timeout";
6905 		break;
6906 	case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
6907 		desc = "nvme failure status";
6908 		break;
6909 	default:
6910 		ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
6911 			access_status, (u64)wwid, handle);
6912 		return rc;
6913 	}
6914 
6915 	if (!rc)
6916 		return rc;
6917 
6918 	ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
6919 		 desc, (u64)wwid, handle);
6920 	return rc;
6921 }
6922 
6923 /**
6924  * _scsih_pcie_device_remove_from_sml -  removing pcie device
6925  * from SML and free up associated memory
6926  * @ioc: per adapter object
6927  * @pcie_device: the pcie_device object
6928  */
6929 static void
_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)6930 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6931 	struct _pcie_device *pcie_device)
6932 {
6933 	struct MPT3SAS_TARGET *sas_target_priv_data;
6934 
6935 	dewtprintk(ioc,
6936 		   ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
6937 			    __func__,
6938 			    pcie_device->handle, (u64)pcie_device->wwid));
6939 	if (pcie_device->enclosure_handle != 0)
6940 		dewtprintk(ioc,
6941 			   ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
6942 				    __func__,
6943 				    (u64)pcie_device->enclosure_logical_id,
6944 				    pcie_device->slot));
6945 	if (pcie_device->connector_name[0] != '\0')
6946 		dewtprintk(ioc,
6947 			   ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
6948 				    __func__,
6949 				    pcie_device->enclosure_level,
6950 				    pcie_device->connector_name));
6951 
6952 	if (pcie_device->starget && pcie_device->starget->hostdata) {
6953 		sas_target_priv_data = pcie_device->starget->hostdata;
6954 		sas_target_priv_data->deleted = 1;
6955 		_scsih_ublock_io_device(ioc, pcie_device->wwid);
6956 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
6957 	}
6958 
6959 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
6960 		 pcie_device->handle, (u64)pcie_device->wwid);
6961 	if (pcie_device->enclosure_handle != 0)
6962 		ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
6963 			 (u64)pcie_device->enclosure_logical_id,
6964 			 pcie_device->slot);
6965 	if (pcie_device->connector_name[0] != '\0')
6966 		ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
6967 			 pcie_device->enclosure_level,
6968 			 pcie_device->connector_name);
6969 
6970 	if (pcie_device->starget && (pcie_device->access_status !=
6971 				MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
6972 		scsi_remove_target(&pcie_device->starget->dev);
6973 	dewtprintk(ioc,
6974 		   ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
6975 			    __func__,
6976 			    pcie_device->handle, (u64)pcie_device->wwid));
6977 	if (pcie_device->enclosure_handle != 0)
6978 		dewtprintk(ioc,
6979 			   ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
6980 				    __func__,
6981 				    (u64)pcie_device->enclosure_logical_id,
6982 				    pcie_device->slot));
6983 	if (pcie_device->connector_name[0] != '\0')
6984 		dewtprintk(ioc,
6985 			   ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
6986 				    __func__,
6987 				    pcie_device->enclosure_level,
6988 				    pcie_device->connector_name));
6989 
6990 	kfree(pcie_device->serial_number);
6991 }
6992 
6993 
6994 /**
6995  * _scsih_pcie_check_device - checking device responsiveness
6996  * @ioc: per adapter object
6997  * @handle: attached device handle
6998  */
6999 static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)7000 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7001 {
7002 	Mpi2ConfigReply_t mpi_reply;
7003 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
7004 	u32 ioc_status;
7005 	struct _pcie_device *pcie_device;
7006 	u64 wwid;
7007 	unsigned long flags;
7008 	struct scsi_target *starget;
7009 	struct MPT3SAS_TARGET *sas_target_priv_data;
7010 	u32 device_info;
7011 
7012 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7013 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
7014 		return;
7015 
7016 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7017 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7018 		return;
7019 
7020 	/* check if this is end device */
7021 	device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7022 	if (!(_scsih_is_nvme_pciescsi_device(device_info)))
7023 		return;
7024 
7025 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
7026 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7027 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7028 
7029 	if (!pcie_device) {
7030 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7031 		return;
7032 	}
7033 
7034 	if (unlikely(pcie_device->handle != handle)) {
7035 		starget = pcie_device->starget;
7036 		sas_target_priv_data = starget->hostdata;
7037 		pcie_device->access_status = pcie_device_pg0.AccessStatus;
7038 		starget_printk(KERN_INFO, starget,
7039 		    "handle changed from(0x%04x) to (0x%04x)!!!\n",
7040 		    pcie_device->handle, handle);
7041 		sas_target_priv_data->handle = handle;
7042 		pcie_device->handle = handle;
7043 
7044 		if (le32_to_cpu(pcie_device_pg0.Flags) &
7045 		    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7046 			pcie_device->enclosure_level =
7047 			    pcie_device_pg0.EnclosureLevel;
7048 			memcpy(&pcie_device->connector_name[0],
7049 			    &pcie_device_pg0.ConnectorName[0], 4);
7050 		} else {
7051 			pcie_device->enclosure_level = 0;
7052 			pcie_device->connector_name[0] = '\0';
7053 		}
7054 	}
7055 
7056 	/* check if device is present */
7057 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
7058 	    MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
7059 		ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
7060 			 handle);
7061 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7062 		pcie_device_put(pcie_device);
7063 		return;
7064 	}
7065 
7066 	/* check if there were any issues with discovery */
7067 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
7068 	    pcie_device_pg0.AccessStatus)) {
7069 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7070 		pcie_device_put(pcie_device);
7071 		return;
7072 	}
7073 
7074 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7075 	pcie_device_put(pcie_device);
7076 
7077 	_scsih_ublock_io_device(ioc, wwid);
7078 
7079 	return;
7080 }
7081 
7082 /**
7083  * _scsih_pcie_add_device -  creating pcie device object
7084  * @ioc: per adapter object
7085  * @handle: pcie device handle
7086  *
7087  * Creating end device object, stored in ioc->pcie_device_list.
7088  *
7089  * Return: 1 means queue the event later, 0 means complete the event
7090  */
7091 static int
_scsih_pcie_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)7092 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7093 {
7094 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
7095 	Mpi26PCIeDevicePage2_t pcie_device_pg2;
7096 	Mpi2ConfigReply_t mpi_reply;
7097 	struct _pcie_device *pcie_device;
7098 	struct _enclosure_node *enclosure_dev;
7099 	u32 ioc_status;
7100 	u64 wwid;
7101 
7102 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7103 	    &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
7104 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7105 			__FILE__, __LINE__, __func__);
7106 		return 0;
7107 	}
7108 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7109 	    MPI2_IOCSTATUS_MASK;
7110 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7111 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7112 			__FILE__, __LINE__, __func__);
7113 		return 0;
7114 	}
7115 
7116 	set_bit(handle, ioc->pend_os_device_add);
7117 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
7118 
7119 	/* check if device is present */
7120 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
7121 		MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
7122 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7123 			handle);
7124 		return 0;
7125 	}
7126 
7127 	/* check if there were any issues with discovery */
7128 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
7129 	    pcie_device_pg0.AccessStatus))
7130 		return 0;
7131 
7132 	if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
7133 	    (pcie_device_pg0.DeviceInfo))))
7134 		return 0;
7135 
7136 	pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
7137 	if (pcie_device) {
7138 		clear_bit(handle, ioc->pend_os_device_add);
7139 		pcie_device_put(pcie_device);
7140 		return 0;
7141 	}
7142 
7143 	/* PCIe Device Page 2 contains read-only information about a
7144 	 * specific NVMe device; therefore, this page is only
7145 	 * valid for NVMe devices and skip for pcie devices of type scsi.
7146 	 */
7147 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
7148 		le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
7149 		if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
7150 		    &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
7151 		    handle)) {
7152 			ioc_err(ioc,
7153 			    "failure at %s:%d/%s()!\n", __FILE__,
7154 			    __LINE__, __func__);
7155 			return 0;
7156 		}
7157 
7158 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7159 					MPI2_IOCSTATUS_MASK;
7160 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7161 			ioc_err(ioc,
7162 			    "failure at %s:%d/%s()!\n", __FILE__,
7163 			    __LINE__, __func__);
7164 			return 0;
7165 		}
7166 	}
7167 
7168 	pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
7169 	if (!pcie_device) {
7170 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7171 			__FILE__, __LINE__, __func__);
7172 		return 0;
7173 	}
7174 
7175 	kref_init(&pcie_device->refcount);
7176 	pcie_device->id = ioc->pcie_target_id++;
7177 	pcie_device->channel = PCIE_CHANNEL;
7178 	pcie_device->handle = handle;
7179 	pcie_device->access_status = pcie_device_pg0.AccessStatus;
7180 	pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7181 	pcie_device->wwid = wwid;
7182 	pcie_device->port_num = pcie_device_pg0.PortNum;
7183 	pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
7184 	    MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7185 
7186 	pcie_device->enclosure_handle =
7187 	    le16_to_cpu(pcie_device_pg0.EnclosureHandle);
7188 	if (pcie_device->enclosure_handle != 0)
7189 		pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
7190 
7191 	if (le32_to_cpu(pcie_device_pg0.Flags) &
7192 	    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7193 		pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
7194 		memcpy(&pcie_device->connector_name[0],
7195 		    &pcie_device_pg0.ConnectorName[0], 4);
7196 	} else {
7197 		pcie_device->enclosure_level = 0;
7198 		pcie_device->connector_name[0] = '\0';
7199 	}
7200 
7201 	/* get enclosure_logical_id */
7202 	if (pcie_device->enclosure_handle) {
7203 		enclosure_dev =
7204 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7205 						pcie_device->enclosure_handle);
7206 		if (enclosure_dev)
7207 			pcie_device->enclosure_logical_id =
7208 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7209 	}
7210 	/* TODO -- Add device name once FW supports it */
7211 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
7212 	    le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
7213 		pcie_device->nvme_mdts =
7214 		    le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
7215 		pcie_device->shutdown_latency =
7216 			le16_to_cpu(pcie_device_pg2.ShutdownLatency);
7217 		/*
7218 		 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
7219 		 * if drive's RTD3 Entry Latency is greater then IOC's
7220 		 * max_shutdown_latency.
7221 		 */
7222 		if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
7223 			ioc->max_shutdown_latency =
7224 				pcie_device->shutdown_latency;
7225 		if (pcie_device_pg2.ControllerResetTO)
7226 			pcie_device->reset_timeout =
7227 			    pcie_device_pg2.ControllerResetTO;
7228 		else
7229 			pcie_device->reset_timeout = 30;
7230 	} else
7231 		pcie_device->reset_timeout = 30;
7232 
7233 	if (ioc->wait_for_discovery_to_complete)
7234 		_scsih_pcie_device_init_add(ioc, pcie_device);
7235 	else
7236 		_scsih_pcie_device_add(ioc, pcie_device);
7237 
7238 	pcie_device_put(pcie_device);
7239 	return 0;
7240 }
7241 
7242 /**
7243  * _scsih_pcie_topology_change_event_debug - debug for topology
7244  * event
7245  * @ioc: per adapter object
7246  * @event_data: event data payload
7247  * Context: user.
7248  */
7249 static void
_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)7250 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7251 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
7252 {
7253 	int i;
7254 	u16 handle;
7255 	u16 reason_code;
7256 	u8 port_number;
7257 	char *status_str = NULL;
7258 	u8 link_rate, prev_link_rate;
7259 
7260 	switch (event_data->SwitchStatus) {
7261 	case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
7262 		status_str = "add";
7263 		break;
7264 	case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
7265 		status_str = "remove";
7266 		break;
7267 	case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
7268 	case 0:
7269 		status_str =  "responding";
7270 		break;
7271 	case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
7272 		status_str = "remove delay";
7273 		break;
7274 	default:
7275 		status_str = "unknown status";
7276 		break;
7277 	}
7278 	ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
7279 	pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
7280 		"start_port(%02d), count(%d)\n",
7281 		le16_to_cpu(event_data->SwitchDevHandle),
7282 		le16_to_cpu(event_data->EnclosureHandle),
7283 		event_data->StartPortNum, event_data->NumEntries);
7284 	for (i = 0; i < event_data->NumEntries; i++) {
7285 		handle =
7286 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7287 		if (!handle)
7288 			continue;
7289 		port_number = event_data->StartPortNum + i;
7290 		reason_code = event_data->PortEntry[i].PortStatus;
7291 		switch (reason_code) {
7292 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7293 			status_str = "target add";
7294 			break;
7295 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7296 			status_str = "target remove";
7297 			break;
7298 		case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
7299 			status_str = "delay target remove";
7300 			break;
7301 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7302 			status_str = "link rate change";
7303 			break;
7304 		case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
7305 			status_str = "target responding";
7306 			break;
7307 		default:
7308 			status_str = "unknown";
7309 			break;
7310 		}
7311 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
7312 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7313 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
7314 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7315 		pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
7316 			" link rate: new(0x%02x), old(0x%02x)\n", port_number,
7317 			handle, status_str, link_rate, prev_link_rate);
7318 	}
7319 }
7320 
7321 /**
7322  * _scsih_pcie_topology_change_event - handle PCIe topology
7323  *  changes
7324  * @ioc: per adapter object
7325  * @fw_event: The fw_event_work object
7326  * Context: user.
7327  *
7328  */
7329 static void
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7330 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7331 	struct fw_event_work *fw_event)
7332 {
7333 	int i;
7334 	u16 handle;
7335 	u16 reason_code;
7336 	u8 link_rate, prev_link_rate;
7337 	unsigned long flags;
7338 	int rc;
7339 	Mpi26EventDataPCIeTopologyChangeList_t *event_data =
7340 		(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
7341 	struct _pcie_device *pcie_device;
7342 
7343 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7344 		_scsih_pcie_topology_change_event_debug(ioc, event_data);
7345 
7346 	if (ioc->shost_recovery || ioc->remove_host ||
7347 		ioc->pci_error_recovery)
7348 		return;
7349 
7350 	if (fw_event->ignore) {
7351 		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
7352 		return;
7353 	}
7354 
7355 	/* handle siblings events */
7356 	for (i = 0; i < event_data->NumEntries; i++) {
7357 		if (fw_event->ignore) {
7358 			dewtprintk(ioc,
7359 				   ioc_info(ioc, "ignoring switch event\n"));
7360 			return;
7361 		}
7362 		if (ioc->remove_host || ioc->pci_error_recovery)
7363 			return;
7364 		reason_code = event_data->PortEntry[i].PortStatus;
7365 		handle =
7366 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7367 		if (!handle)
7368 			continue;
7369 
7370 		link_rate = event_data->PortEntry[i].CurrentPortInfo
7371 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7372 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
7373 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7374 
7375 		switch (reason_code) {
7376 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7377 			if (ioc->shost_recovery)
7378 				break;
7379 			if (link_rate == prev_link_rate)
7380 				break;
7381 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7382 				break;
7383 
7384 			_scsih_pcie_check_device(ioc, handle);
7385 
7386 			/* This code after this point handles the test case
7387 			 * where a device has been added, however its returning
7388 			 * BUSY for sometime.  Then before the Device Missing
7389 			 * Delay expires and the device becomes READY, the
7390 			 * device is removed and added back.
7391 			 */
7392 			spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7393 			pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
7394 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7395 
7396 			if (pcie_device) {
7397 				pcie_device_put(pcie_device);
7398 				break;
7399 			}
7400 
7401 			if (!test_bit(handle, ioc->pend_os_device_add))
7402 				break;
7403 
7404 			dewtprintk(ioc,
7405 				   ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
7406 					    handle));
7407 			event_data->PortEntry[i].PortStatus &= 0xF0;
7408 			event_data->PortEntry[i].PortStatus |=
7409 				MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
7410 			fallthrough;
7411 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7412 			if (ioc->shost_recovery)
7413 				break;
7414 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7415 				break;
7416 
7417 			rc = _scsih_pcie_add_device(ioc, handle);
7418 			if (!rc) {
7419 				/* mark entry vacant */
7420 				/* TODO This needs to be reviewed and fixed,
7421 				 * we dont have an entry
7422 				 * to make an event void like vacant
7423 				 */
7424 				event_data->PortEntry[i].PortStatus |=
7425 					MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
7426 			}
7427 			break;
7428 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7429 			_scsih_pcie_device_remove_by_handle(ioc, handle);
7430 			break;
7431 		}
7432 	}
7433 }
7434 
7435 /**
7436  * _scsih_pcie_device_status_change_event_debug - debug for device event
7437  * @ioc: ?
7438  * @event_data: event data payload
7439  * Context: user.
7440  */
7441 static void
_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeDeviceStatusChange_t * event_data)7442 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7443 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
7444 {
7445 	char *reason_str = NULL;
7446 
7447 	switch (event_data->ReasonCode) {
7448 	case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
7449 		reason_str = "smart data";
7450 		break;
7451 	case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
7452 		reason_str = "unsupported device discovered";
7453 		break;
7454 	case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
7455 		reason_str = "internal device reset";
7456 		break;
7457 	case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
7458 		reason_str = "internal task abort";
7459 		break;
7460 	case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7461 		reason_str = "internal task abort set";
7462 		break;
7463 	case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7464 		reason_str = "internal clear task set";
7465 		break;
7466 	case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
7467 		reason_str = "internal query task";
7468 		break;
7469 	case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
7470 		reason_str = "device init failure";
7471 		break;
7472 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7473 		reason_str = "internal device reset complete";
7474 		break;
7475 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7476 		reason_str = "internal task abort complete";
7477 		break;
7478 	case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
7479 		reason_str = "internal async notification";
7480 		break;
7481 	case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
7482 		reason_str = "pcie hot reset failed";
7483 		break;
7484 	default:
7485 		reason_str = "unknown reason";
7486 		break;
7487 	}
7488 
7489 	ioc_info(ioc, "PCIE device status change: (%s)\n"
7490 		 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
7491 		 reason_str, le16_to_cpu(event_data->DevHandle),
7492 		 (u64)le64_to_cpu(event_data->WWID),
7493 		 le16_to_cpu(event_data->TaskTag));
7494 	if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
7495 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7496 			event_data->ASC, event_data->ASCQ);
7497 	pr_cont("\n");
7498 }
7499 
7500 /**
7501  * _scsih_pcie_device_status_change_event - handle device status
7502  * change
7503  * @ioc: per adapter object
7504  * @fw_event: The fw_event_work object
7505  * Context: user.
7506  */
7507 static void
_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7508 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7509 	struct fw_event_work *fw_event)
7510 {
7511 	struct MPT3SAS_TARGET *target_priv_data;
7512 	struct _pcie_device *pcie_device;
7513 	u64 wwid;
7514 	unsigned long flags;
7515 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
7516 		(Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
7517 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7518 		_scsih_pcie_device_status_change_event_debug(ioc,
7519 			event_data);
7520 
7521 	if (event_data->ReasonCode !=
7522 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7523 		event_data->ReasonCode !=
7524 		MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7525 		return;
7526 
7527 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7528 	wwid = le64_to_cpu(event_data->WWID);
7529 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7530 
7531 	if (!pcie_device || !pcie_device->starget)
7532 		goto out;
7533 
7534 	target_priv_data = pcie_device->starget->hostdata;
7535 	if (!target_priv_data)
7536 		goto out;
7537 
7538 	if (event_data->ReasonCode ==
7539 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
7540 		target_priv_data->tm_busy = 1;
7541 	else
7542 		target_priv_data->tm_busy = 0;
7543 out:
7544 	if (pcie_device)
7545 		pcie_device_put(pcie_device);
7546 
7547 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7548 }
7549 
7550 /**
7551  * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
7552  * event
7553  * @ioc: per adapter object
7554  * @event_data: event data payload
7555  * Context: user.
7556  */
7557 static void
_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasEnclDevStatusChange_t * event_data)7558 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7559 	Mpi2EventDataSasEnclDevStatusChange_t *event_data)
7560 {
7561 	char *reason_str = NULL;
7562 
7563 	switch (event_data->ReasonCode) {
7564 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7565 		reason_str = "enclosure add";
7566 		break;
7567 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7568 		reason_str = "enclosure remove";
7569 		break;
7570 	default:
7571 		reason_str = "unknown reason";
7572 		break;
7573 	}
7574 
7575 	ioc_info(ioc, "enclosure status change: (%s)\n"
7576 		 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
7577 		 reason_str,
7578 		 le16_to_cpu(event_data->EnclosureHandle),
7579 		 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
7580 		 le16_to_cpu(event_data->StartSlot));
7581 }
7582 
7583 /**
7584  * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
7585  * @ioc: per adapter object
7586  * @fw_event: The fw_event_work object
7587  * Context: user.
7588  */
7589 static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7590 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7591 	struct fw_event_work *fw_event)
7592 {
7593 	Mpi2ConfigReply_t mpi_reply;
7594 	struct _enclosure_node *enclosure_dev = NULL;
7595 	Mpi2EventDataSasEnclDevStatusChange_t *event_data =
7596 		(Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
7597 	int rc;
7598 	u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
7599 
7600 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7601 		_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
7602 		     (Mpi2EventDataSasEnclDevStatusChange_t *)
7603 		     fw_event->event_data);
7604 	if (ioc->shost_recovery)
7605 		return;
7606 
7607 	if (enclosure_handle)
7608 		enclosure_dev =
7609 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7610 						enclosure_handle);
7611 	switch (event_data->ReasonCode) {
7612 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7613 		if (!enclosure_dev) {
7614 			enclosure_dev =
7615 				kzalloc(sizeof(struct _enclosure_node),
7616 					GFP_KERNEL);
7617 			if (!enclosure_dev) {
7618 				ioc_info(ioc, "failure at %s:%d/%s()!\n",
7619 					 __FILE__, __LINE__, __func__);
7620 				return;
7621 			}
7622 			rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7623 				&enclosure_dev->pg0,
7624 				MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7625 				enclosure_handle);
7626 
7627 			if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
7628 						MPI2_IOCSTATUS_MASK)) {
7629 				kfree(enclosure_dev);
7630 				return;
7631 			}
7632 
7633 			list_add_tail(&enclosure_dev->list,
7634 							&ioc->enclosure_list);
7635 		}
7636 		break;
7637 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7638 		if (enclosure_dev) {
7639 			list_del(&enclosure_dev->list);
7640 			kfree(enclosure_dev);
7641 		}
7642 		break;
7643 	default:
7644 		break;
7645 	}
7646 }
7647 
7648 /**
7649  * _scsih_sas_broadcast_primitive_event - handle broadcast events
7650  * @ioc: per adapter object
7651  * @fw_event: The fw_event_work object
7652  * Context: user.
7653  */
7654 static void
_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7655 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7656 	struct fw_event_work *fw_event)
7657 {
7658 	struct scsi_cmnd *scmd;
7659 	struct scsi_device *sdev;
7660 	struct scsiio_tracker *st;
7661 	u16 smid, handle;
7662 	u32 lun;
7663 	struct MPT3SAS_DEVICE *sas_device_priv_data;
7664 	u32 termination_count;
7665 	u32 query_count;
7666 	Mpi2SCSITaskManagementReply_t *mpi_reply;
7667 	Mpi2EventDataSasBroadcastPrimitive_t *event_data =
7668 		(Mpi2EventDataSasBroadcastPrimitive_t *)
7669 		fw_event->event_data;
7670 	u16 ioc_status;
7671 	unsigned long flags;
7672 	int r;
7673 	u8 max_retries = 0;
7674 	u8 task_abort_retries;
7675 
7676 	mutex_lock(&ioc->tm_cmds.mutex);
7677 	ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
7678 		 __func__, event_data->PhyNum, event_data->PortWidth);
7679 
7680 	_scsih_block_io_all_device(ioc);
7681 
7682 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7683 	mpi_reply = ioc->tm_cmds.reply;
7684  broadcast_aen_retry:
7685 
7686 	/* sanity checks for retrying this loop */
7687 	if (max_retries++ == 5) {
7688 		dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
7689 		goto out;
7690 	} else if (max_retries > 1)
7691 		dewtprintk(ioc,
7692 			   ioc_info(ioc, "%s: %d retry\n",
7693 				    __func__, max_retries - 1));
7694 
7695 	termination_count = 0;
7696 	query_count = 0;
7697 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
7698 		if (ioc->shost_recovery)
7699 			goto out;
7700 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
7701 		if (!scmd)
7702 			continue;
7703 		st = scsi_cmd_priv(scmd);
7704 		sdev = scmd->device;
7705 		sas_device_priv_data = sdev->hostdata;
7706 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
7707 			continue;
7708 		 /* skip hidden raid components */
7709 		if (sas_device_priv_data->sas_target->flags &
7710 		    MPT_TARGET_FLAGS_RAID_COMPONENT)
7711 			continue;
7712 		 /* skip volumes */
7713 		if (sas_device_priv_data->sas_target->flags &
7714 		    MPT_TARGET_FLAGS_VOLUME)
7715 			continue;
7716 		 /* skip PCIe devices */
7717 		if (sas_device_priv_data->sas_target->flags &
7718 		    MPT_TARGET_FLAGS_PCIE_DEVICE)
7719 			continue;
7720 
7721 		handle = sas_device_priv_data->sas_target->handle;
7722 		lun = sas_device_priv_data->lun;
7723 		query_count++;
7724 
7725 		if (ioc->shost_recovery)
7726 			goto out;
7727 
7728 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7729 		r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
7730 			MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
7731 			st->msix_io, 30, 0);
7732 		if (r == FAILED) {
7733 			sdev_printk(KERN_WARNING, sdev,
7734 			    "mpt3sas_scsih_issue_tm: FAILED when sending "
7735 			    "QUERY_TASK: scmd(%p)\n", scmd);
7736 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7737 			goto broadcast_aen_retry;
7738 		}
7739 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
7740 		    & MPI2_IOCSTATUS_MASK;
7741 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7742 			sdev_printk(KERN_WARNING, sdev,
7743 				"query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
7744 				ioc_status, scmd);
7745 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7746 			goto broadcast_aen_retry;
7747 		}
7748 
7749 		/* see if IO is still owned by IOC and target */
7750 		if (mpi_reply->ResponseCode ==
7751 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
7752 		     mpi_reply->ResponseCode ==
7753 		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
7754 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7755 			continue;
7756 		}
7757 		task_abort_retries = 0;
7758  tm_retry:
7759 		if (task_abort_retries++ == 60) {
7760 			dewtprintk(ioc,
7761 				   ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
7762 					    __func__));
7763 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7764 			goto broadcast_aen_retry;
7765 		}
7766 
7767 		if (ioc->shost_recovery)
7768 			goto out_no_lock;
7769 
7770 		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
7771 			sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
7772 			st->smid, st->msix_io, 30, 0);
7773 		if (r == FAILED || st->cb_idx != 0xFF) {
7774 			sdev_printk(KERN_WARNING, sdev,
7775 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
7776 			    "scmd(%p)\n", scmd);
7777 			goto tm_retry;
7778 		}
7779 
7780 		if (task_abort_retries > 1)
7781 			sdev_printk(KERN_WARNING, sdev,
7782 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
7783 			    " scmd(%p)\n",
7784 			    task_abort_retries - 1, scmd);
7785 
7786 		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
7787 		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7788 	}
7789 
7790 	if (ioc->broadcast_aen_pending) {
7791 		dewtprintk(ioc,
7792 			   ioc_info(ioc,
7793 				    "%s: loop back due to pending AEN\n",
7794 				    __func__));
7795 		 ioc->broadcast_aen_pending = 0;
7796 		 goto broadcast_aen_retry;
7797 	}
7798 
7799  out:
7800 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7801  out_no_lock:
7802 
7803 	dewtprintk(ioc,
7804 		   ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
7805 			    __func__, query_count, termination_count));
7806 
7807 	ioc->broadcast_aen_busy = 0;
7808 	if (!ioc->shost_recovery)
7809 		_scsih_ublock_io_all_device(ioc);
7810 	mutex_unlock(&ioc->tm_cmds.mutex);
7811 }
7812 
7813 /**
7814  * _scsih_sas_discovery_event - handle discovery events
7815  * @ioc: per adapter object
7816  * @fw_event: The fw_event_work object
7817  * Context: user.
7818  */
7819 static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7820 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
7821 	struct fw_event_work *fw_event)
7822 {
7823 	Mpi2EventDataSasDiscovery_t *event_data =
7824 		(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
7825 
7826 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
7827 		ioc_info(ioc, "discovery event: (%s)",
7828 			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
7829 			 "start" : "stop");
7830 		if (event_data->DiscoveryStatus)
7831 			pr_cont("discovery_status(0x%08x)",
7832 				le32_to_cpu(event_data->DiscoveryStatus));
7833 		pr_cont("\n");
7834 	}
7835 
7836 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
7837 	    !ioc->sas_hba.num_phys) {
7838 		if (disable_discovery > 0 && ioc->shost_recovery) {
7839 			/* Wait for the reset to complete */
7840 			while (ioc->shost_recovery)
7841 				ssleep(1);
7842 		}
7843 		_scsih_sas_host_add(ioc);
7844 	}
7845 }
7846 
7847 /**
7848  * _scsih_sas_device_discovery_error_event - display SAS device discovery error
7849  *						events
7850  * @ioc: per adapter object
7851  * @fw_event: The fw_event_work object
7852  * Context: user.
7853  */
7854 static void
_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7855 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
7856 	struct fw_event_work *fw_event)
7857 {
7858 	Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
7859 		(Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
7860 
7861 	switch (event_data->ReasonCode) {
7862 	case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
7863 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
7864 			 le16_to_cpu(event_data->DevHandle),
7865 			 (u64)le64_to_cpu(event_data->SASAddress),
7866 			 event_data->PhysicalPort);
7867 		break;
7868 	case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
7869 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
7870 			 le16_to_cpu(event_data->DevHandle),
7871 			 (u64)le64_to_cpu(event_data->SASAddress),
7872 			 event_data->PhysicalPort);
7873 		break;
7874 	default:
7875 		break;
7876 	}
7877 }
7878 
7879 /**
7880  * _scsih_pcie_enumeration_event - handle enumeration events
7881  * @ioc: per adapter object
7882  * @fw_event: The fw_event_work object
7883  * Context: user.
7884  */
7885 static void
_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7886 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
7887 	struct fw_event_work *fw_event)
7888 {
7889 	Mpi26EventDataPCIeEnumeration_t *event_data =
7890 		(Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
7891 
7892 	if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
7893 		return;
7894 
7895 	ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
7896 		 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
7897 		 "started" : "completed",
7898 		 event_data->Flags);
7899 	if (event_data->EnumerationStatus)
7900 		pr_cont("enumeration_status(0x%08x)",
7901 			le32_to_cpu(event_data->EnumerationStatus));
7902 	pr_cont("\n");
7903 }
7904 
7905 /**
7906  * _scsih_ir_fastpath - turn on fastpath for IR physdisk
7907  * @ioc: per adapter object
7908  * @handle: device handle for physical disk
7909  * @phys_disk_num: physical disk number
7910  *
7911  * Return: 0 for success, else failure.
7912  */
7913 static int
_scsih_ir_fastpath(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phys_disk_num)7914 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7915 {
7916 	Mpi2RaidActionRequest_t *mpi_request;
7917 	Mpi2RaidActionReply_t *mpi_reply;
7918 	u16 smid;
7919 	u8 issue_reset = 0;
7920 	int rc = 0;
7921 	u16 ioc_status;
7922 	u32 log_info;
7923 
7924 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
7925 		return rc;
7926 
7927 	mutex_lock(&ioc->scsih_cmds.mutex);
7928 
7929 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
7930 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
7931 		rc = -EAGAIN;
7932 		goto out;
7933 	}
7934 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
7935 
7936 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
7937 	if (!smid) {
7938 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7939 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7940 		rc = -EAGAIN;
7941 		goto out;
7942 	}
7943 
7944 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7945 	ioc->scsih_cmds.smid = smid;
7946 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
7947 
7948 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
7949 	mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
7950 	mpi_request->PhysDiskNum = phys_disk_num;
7951 
7952 	dewtprintk(ioc,
7953 		   ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
7954 			    handle, phys_disk_num));
7955 
7956 	init_completion(&ioc->scsih_cmds.done);
7957 	ioc->put_smid_default(ioc, smid);
7958 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
7959 
7960 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
7961 		mpt3sas_check_cmd_timeout(ioc,
7962 		    ioc->scsih_cmds.status, mpi_request,
7963 		    sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
7964 		rc = -EFAULT;
7965 		goto out;
7966 	}
7967 
7968 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
7969 
7970 		mpi_reply = ioc->scsih_cmds.reply;
7971 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
7972 		if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
7973 			log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
7974 		else
7975 			log_info = 0;
7976 		ioc_status &= MPI2_IOCSTATUS_MASK;
7977 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7978 			dewtprintk(ioc,
7979 				   ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
7980 					    ioc_status, log_info));
7981 			rc = -EFAULT;
7982 		} else
7983 			dewtprintk(ioc,
7984 				   ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
7985 	}
7986 
7987  out:
7988 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7989 	mutex_unlock(&ioc->scsih_cmds.mutex);
7990 
7991 	if (issue_reset)
7992 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7993 	return rc;
7994 }
7995 
7996 /**
7997  * _scsih_reprobe_lun - reprobing lun
7998  * @sdev: scsi device struct
7999  * @no_uld_attach: sdev->no_uld_attach flag setting
8000  *
8001  **/
8002 static void
_scsih_reprobe_lun(struct scsi_device * sdev,void * no_uld_attach)8003 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8004 {
8005 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8006 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
8007 	    sdev->no_uld_attach ? "hiding" : "exposing");
8008 	WARN_ON(scsi_device_reprobe(sdev));
8009 }
8010 
8011 /**
8012  * _scsih_sas_volume_add - add new volume
8013  * @ioc: per adapter object
8014  * @element: IR config element data
8015  * Context: user.
8016  */
8017 static void
_scsih_sas_volume_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)8018 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
8019 	Mpi2EventIrConfigElement_t *element)
8020 {
8021 	struct _raid_device *raid_device;
8022 	unsigned long flags;
8023 	u64 wwid;
8024 	u16 handle = le16_to_cpu(element->VolDevHandle);
8025 	int rc;
8026 
8027 	mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8028 	if (!wwid) {
8029 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8030 			__FILE__, __LINE__, __func__);
8031 		return;
8032 	}
8033 
8034 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
8035 	raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
8036 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8037 
8038 	if (raid_device)
8039 		return;
8040 
8041 	raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8042 	if (!raid_device) {
8043 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8044 			__FILE__, __LINE__, __func__);
8045 		return;
8046 	}
8047 
8048 	raid_device->id = ioc->sas_id++;
8049 	raid_device->channel = RAID_CHANNEL;
8050 	raid_device->handle = handle;
8051 	raid_device->wwid = wwid;
8052 	_scsih_raid_device_add(ioc, raid_device);
8053 	if (!ioc->wait_for_discovery_to_complete) {
8054 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8055 		    raid_device->id, 0);
8056 		if (rc)
8057 			_scsih_raid_device_remove(ioc, raid_device);
8058 	} else {
8059 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8060 		_scsih_determine_boot_device(ioc, raid_device, 1);
8061 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8062 	}
8063 }
8064 
8065 /**
8066  * _scsih_sas_volume_delete - delete volume
8067  * @ioc: per adapter object
8068  * @handle: volume device handle
8069  * Context: user.
8070  */
8071 static void
_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER * ioc,u16 handle)8072 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8073 {
8074 	struct _raid_device *raid_device;
8075 	unsigned long flags;
8076 	struct MPT3SAS_TARGET *sas_target_priv_data;
8077 	struct scsi_target *starget = NULL;
8078 
8079 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
8080 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8081 	if (raid_device) {
8082 		if (raid_device->starget) {
8083 			starget = raid_device->starget;
8084 			sas_target_priv_data = starget->hostdata;
8085 			sas_target_priv_data->deleted = 1;
8086 		}
8087 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
8088 			 raid_device->handle, (u64)raid_device->wwid);
8089 		list_del(&raid_device->list);
8090 		kfree(raid_device);
8091 	}
8092 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8093 	if (starget)
8094 		scsi_remove_target(&starget->dev);
8095 }
8096 
8097 /**
8098  * _scsih_sas_pd_expose - expose pd component to /dev/sdX
8099  * @ioc: per adapter object
8100  * @element: IR config element data
8101  * Context: user.
8102  */
8103 static void
_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)8104 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
8105 	Mpi2EventIrConfigElement_t *element)
8106 {
8107 	struct _sas_device *sas_device;
8108 	struct scsi_target *starget = NULL;
8109 	struct MPT3SAS_TARGET *sas_target_priv_data;
8110 	unsigned long flags;
8111 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
8112 
8113 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8114 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
8115 	if (sas_device) {
8116 		sas_device->volume_handle = 0;
8117 		sas_device->volume_wwid = 0;
8118 		clear_bit(handle, ioc->pd_handles);
8119 		if (sas_device->starget && sas_device->starget->hostdata) {
8120 			starget = sas_device->starget;
8121 			sas_target_priv_data = starget->hostdata;
8122 			sas_target_priv_data->flags &=
8123 			    ~MPT_TARGET_FLAGS_RAID_COMPONENT;
8124 		}
8125 	}
8126 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8127 	if (!sas_device)
8128 		return;
8129 
8130 	/* exposing raid component */
8131 	if (starget)
8132 		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
8133 
8134 	sas_device_put(sas_device);
8135 }
8136 
8137 /**
8138  * _scsih_sas_pd_hide - hide pd component from /dev/sdX
8139  * @ioc: per adapter object
8140  * @element: IR config element data
8141  * Context: user.
8142  */
8143 static void
_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)8144 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
8145 	Mpi2EventIrConfigElement_t *element)
8146 {
8147 	struct _sas_device *sas_device;
8148 	struct scsi_target *starget = NULL;
8149 	struct MPT3SAS_TARGET *sas_target_priv_data;
8150 	unsigned long flags;
8151 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
8152 	u16 volume_handle = 0;
8153 	u64 volume_wwid = 0;
8154 
8155 	mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
8156 	if (volume_handle)
8157 		mpt3sas_config_get_volume_wwid(ioc, volume_handle,
8158 		    &volume_wwid);
8159 
8160 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8161 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
8162 	if (sas_device) {
8163 		set_bit(handle, ioc->pd_handles);
8164 		if (sas_device->starget && sas_device->starget->hostdata) {
8165 			starget = sas_device->starget;
8166 			sas_target_priv_data = starget->hostdata;
8167 			sas_target_priv_data->flags |=
8168 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
8169 			sas_device->volume_handle = volume_handle;
8170 			sas_device->volume_wwid = volume_wwid;
8171 		}
8172 	}
8173 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8174 	if (!sas_device)
8175 		return;
8176 
8177 	/* hiding raid component */
8178 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
8179 
8180 	if (starget)
8181 		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
8182 
8183 	sas_device_put(sas_device);
8184 }
8185 
8186 /**
8187  * _scsih_sas_pd_delete - delete pd component
8188  * @ioc: per adapter object
8189  * @element: IR config element data
8190  * Context: user.
8191  */
8192 static void
_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)8193 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
8194 	Mpi2EventIrConfigElement_t *element)
8195 {
8196 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
8197 
8198 	_scsih_device_remove_by_handle(ioc, handle);
8199 }
8200 
8201 /**
8202  * _scsih_sas_pd_add - remove pd component
8203  * @ioc: per adapter object
8204  * @element: IR config element data
8205  * Context: user.
8206  */
8207 static void
_scsih_sas_pd_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)8208 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
8209 	Mpi2EventIrConfigElement_t *element)
8210 {
8211 	struct _sas_device *sas_device;
8212 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
8213 	Mpi2ConfigReply_t mpi_reply;
8214 	Mpi2SasDevicePage0_t sas_device_pg0;
8215 	u32 ioc_status;
8216 	u64 sas_address;
8217 	u16 parent_handle;
8218 
8219 	set_bit(handle, ioc->pd_handles);
8220 
8221 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8222 	if (sas_device) {
8223 		_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
8224 		sas_device_put(sas_device);
8225 		return;
8226 	}
8227 
8228 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
8229 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
8230 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8231 			__FILE__, __LINE__, __func__);
8232 		return;
8233 	}
8234 
8235 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8236 	    MPI2_IOCSTATUS_MASK;
8237 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8238 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8239 			__FILE__, __LINE__, __func__);
8240 		return;
8241 	}
8242 
8243 	parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8244 	if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8245 		mpt3sas_transport_update_links(ioc, sas_address, handle,
8246 		    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8247 
8248 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
8249 	_scsih_add_device(ioc, handle, 0, 1);
8250 }
8251 
8252 /**
8253  * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
8254  * @ioc: per adapter object
8255  * @event_data: event data payload
8256  * Context: user.
8257  */
8258 static void
_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)8259 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8260 	Mpi2EventDataIrConfigChangeList_t *event_data)
8261 {
8262 	Mpi2EventIrConfigElement_t *element;
8263 	u8 element_type;
8264 	int i;
8265 	char *reason_str = NULL, *element_str = NULL;
8266 
8267 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8268 
8269 	ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
8270 		 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
8271 		 "foreign" : "native",
8272 		 event_data->NumElements);
8273 	for (i = 0; i < event_data->NumElements; i++, element++) {
8274 		switch (element->ReasonCode) {
8275 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8276 			reason_str = "add";
8277 			break;
8278 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8279 			reason_str = "remove";
8280 			break;
8281 		case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
8282 			reason_str = "no change";
8283 			break;
8284 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8285 			reason_str = "hide";
8286 			break;
8287 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8288 			reason_str = "unhide";
8289 			break;
8290 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8291 			reason_str = "volume_created";
8292 			break;
8293 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8294 			reason_str = "volume_deleted";
8295 			break;
8296 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8297 			reason_str = "pd_created";
8298 			break;
8299 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8300 			reason_str = "pd_deleted";
8301 			break;
8302 		default:
8303 			reason_str = "unknown reason";
8304 			break;
8305 		}
8306 		element_type = le16_to_cpu(element->ElementFlags) &
8307 		    MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
8308 		switch (element_type) {
8309 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
8310 			element_str = "volume";
8311 			break;
8312 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
8313 			element_str = "phys disk";
8314 			break;
8315 		case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
8316 			element_str = "hot spare";
8317 			break;
8318 		default:
8319 			element_str = "unknown element";
8320 			break;
8321 		}
8322 		pr_info("\t(%s:%s), vol handle(0x%04x), " \
8323 		    "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
8324 		    reason_str, le16_to_cpu(element->VolDevHandle),
8325 		    le16_to_cpu(element->PhysDiskDevHandle),
8326 		    element->PhysDiskNum);
8327 	}
8328 }
8329 
8330 /**
8331  * _scsih_sas_ir_config_change_event - handle ir configuration change events
8332  * @ioc: per adapter object
8333  * @fw_event: The fw_event_work object
8334  * Context: user.
8335  */
8336 static void
_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8337 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
8338 	struct fw_event_work *fw_event)
8339 {
8340 	Mpi2EventIrConfigElement_t *element;
8341 	int i;
8342 	u8 foreign_config;
8343 	Mpi2EventDataIrConfigChangeList_t *event_data =
8344 		(Mpi2EventDataIrConfigChangeList_t *)
8345 		fw_event->event_data;
8346 
8347 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8348 	     (!ioc->hide_ir_msg))
8349 		_scsih_sas_ir_config_change_event_debug(ioc, event_data);
8350 
8351 	foreign_config = (le32_to_cpu(event_data->Flags) &
8352 	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
8353 
8354 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8355 	if (ioc->shost_recovery &&
8356 	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8357 		for (i = 0; i < event_data->NumElements; i++, element++) {
8358 			if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
8359 				_scsih_ir_fastpath(ioc,
8360 					le16_to_cpu(element->PhysDiskDevHandle),
8361 					element->PhysDiskNum);
8362 		}
8363 		return;
8364 	}
8365 
8366 	for (i = 0; i < event_data->NumElements; i++, element++) {
8367 
8368 		switch (element->ReasonCode) {
8369 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8370 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8371 			if (!foreign_config)
8372 				_scsih_sas_volume_add(ioc, element);
8373 			break;
8374 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8375 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8376 			if (!foreign_config)
8377 				_scsih_sas_volume_delete(ioc,
8378 				    le16_to_cpu(element->VolDevHandle));
8379 			break;
8380 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8381 			if (!ioc->is_warpdrive)
8382 				_scsih_sas_pd_hide(ioc, element);
8383 			break;
8384 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8385 			if (!ioc->is_warpdrive)
8386 				_scsih_sas_pd_expose(ioc, element);
8387 			break;
8388 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8389 			if (!ioc->is_warpdrive)
8390 				_scsih_sas_pd_add(ioc, element);
8391 			break;
8392 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8393 			if (!ioc->is_warpdrive)
8394 				_scsih_sas_pd_delete(ioc, element);
8395 			break;
8396 		}
8397 	}
8398 }
8399 
8400 /**
8401  * _scsih_sas_ir_volume_event - IR volume event
8402  * @ioc: per adapter object
8403  * @fw_event: The fw_event_work object
8404  * Context: user.
8405  */
8406 static void
_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8407 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
8408 	struct fw_event_work *fw_event)
8409 {
8410 	u64 wwid;
8411 	unsigned long flags;
8412 	struct _raid_device *raid_device;
8413 	u16 handle;
8414 	u32 state;
8415 	int rc;
8416 	Mpi2EventDataIrVolume_t *event_data =
8417 		(Mpi2EventDataIrVolume_t *) fw_event->event_data;
8418 
8419 	if (ioc->shost_recovery)
8420 		return;
8421 
8422 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
8423 		return;
8424 
8425 	handle = le16_to_cpu(event_data->VolDevHandle);
8426 	state = le32_to_cpu(event_data->NewValue);
8427 	if (!ioc->hide_ir_msg)
8428 		dewtprintk(ioc,
8429 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8430 				    __func__, handle,
8431 				    le32_to_cpu(event_data->PreviousValue),
8432 				    state));
8433 	switch (state) {
8434 	case MPI2_RAID_VOL_STATE_MISSING:
8435 	case MPI2_RAID_VOL_STATE_FAILED:
8436 		_scsih_sas_volume_delete(ioc, handle);
8437 		break;
8438 
8439 	case MPI2_RAID_VOL_STATE_ONLINE:
8440 	case MPI2_RAID_VOL_STATE_DEGRADED:
8441 	case MPI2_RAID_VOL_STATE_OPTIMAL:
8442 
8443 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8444 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8445 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8446 
8447 		if (raid_device)
8448 			break;
8449 
8450 		mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8451 		if (!wwid) {
8452 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8453 				__FILE__, __LINE__, __func__);
8454 			break;
8455 		}
8456 
8457 		raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8458 		if (!raid_device) {
8459 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8460 				__FILE__, __LINE__, __func__);
8461 			break;
8462 		}
8463 
8464 		raid_device->id = ioc->sas_id++;
8465 		raid_device->channel = RAID_CHANNEL;
8466 		raid_device->handle = handle;
8467 		raid_device->wwid = wwid;
8468 		_scsih_raid_device_add(ioc, raid_device);
8469 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8470 		    raid_device->id, 0);
8471 		if (rc)
8472 			_scsih_raid_device_remove(ioc, raid_device);
8473 		break;
8474 
8475 	case MPI2_RAID_VOL_STATE_INITIALIZING:
8476 	default:
8477 		break;
8478 	}
8479 }
8480 
8481 /**
8482  * _scsih_sas_ir_physical_disk_event - PD event
8483  * @ioc: per adapter object
8484  * @fw_event: The fw_event_work object
8485  * Context: user.
8486  */
8487 static void
_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8488 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
8489 	struct fw_event_work *fw_event)
8490 {
8491 	u16 handle, parent_handle;
8492 	u32 state;
8493 	struct _sas_device *sas_device;
8494 	Mpi2ConfigReply_t mpi_reply;
8495 	Mpi2SasDevicePage0_t sas_device_pg0;
8496 	u32 ioc_status;
8497 	Mpi2EventDataIrPhysicalDisk_t *event_data =
8498 		(Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
8499 	u64 sas_address;
8500 
8501 	if (ioc->shost_recovery)
8502 		return;
8503 
8504 	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
8505 		return;
8506 
8507 	handle = le16_to_cpu(event_data->PhysDiskDevHandle);
8508 	state = le32_to_cpu(event_data->NewValue);
8509 
8510 	if (!ioc->hide_ir_msg)
8511 		dewtprintk(ioc,
8512 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8513 				    __func__, handle,
8514 				    le32_to_cpu(event_data->PreviousValue),
8515 				    state));
8516 
8517 	switch (state) {
8518 	case MPI2_RAID_PD_STATE_ONLINE:
8519 	case MPI2_RAID_PD_STATE_DEGRADED:
8520 	case MPI2_RAID_PD_STATE_REBUILDING:
8521 	case MPI2_RAID_PD_STATE_OPTIMAL:
8522 	case MPI2_RAID_PD_STATE_HOT_SPARE:
8523 
8524 		if (!ioc->is_warpdrive)
8525 			set_bit(handle, ioc->pd_handles);
8526 
8527 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8528 		if (sas_device) {
8529 			sas_device_put(sas_device);
8530 			return;
8531 		}
8532 
8533 		if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8534 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8535 		    handle))) {
8536 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8537 				__FILE__, __LINE__, __func__);
8538 			return;
8539 		}
8540 
8541 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8542 		    MPI2_IOCSTATUS_MASK;
8543 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8544 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8545 				__FILE__, __LINE__, __func__);
8546 			return;
8547 		}
8548 
8549 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8550 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8551 			mpt3sas_transport_update_links(ioc, sas_address, handle,
8552 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8553 
8554 		_scsih_add_device(ioc, handle, 0, 1);
8555 
8556 		break;
8557 
8558 	case MPI2_RAID_PD_STATE_OFFLINE:
8559 	case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
8560 	case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
8561 	default:
8562 		break;
8563 	}
8564 }
8565 
8566 /**
8567  * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
8568  * @ioc: per adapter object
8569  * @event_data: event data payload
8570  * Context: user.
8571  */
8572 static void
_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrOperationStatus_t * event_data)8573 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
8574 	Mpi2EventDataIrOperationStatus_t *event_data)
8575 {
8576 	char *reason_str = NULL;
8577 
8578 	switch (event_data->RAIDOperation) {
8579 	case MPI2_EVENT_IR_RAIDOP_RESYNC:
8580 		reason_str = "resync";
8581 		break;
8582 	case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8583 		reason_str = "online capacity expansion";
8584 		break;
8585 	case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8586 		reason_str = "consistency check";
8587 		break;
8588 	case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
8589 		reason_str = "background init";
8590 		break;
8591 	case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
8592 		reason_str = "make data consistent";
8593 		break;
8594 	}
8595 
8596 	if (!reason_str)
8597 		return;
8598 
8599 	ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
8600 		 reason_str,
8601 		 le16_to_cpu(event_data->VolDevHandle),
8602 		 event_data->PercentComplete);
8603 }
8604 
8605 /**
8606  * _scsih_sas_ir_operation_status_event - handle RAID operation events
8607  * @ioc: per adapter object
8608  * @fw_event: The fw_event_work object
8609  * Context: user.
8610  */
8611 static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8612 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
8613 	struct fw_event_work *fw_event)
8614 {
8615 	Mpi2EventDataIrOperationStatus_t *event_data =
8616 		(Mpi2EventDataIrOperationStatus_t *)
8617 		fw_event->event_data;
8618 	static struct _raid_device *raid_device;
8619 	unsigned long flags;
8620 	u16 handle;
8621 
8622 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8623 	    (!ioc->hide_ir_msg))
8624 		_scsih_sas_ir_operation_status_event_debug(ioc,
8625 		     event_data);
8626 
8627 	/* code added for raid transport support */
8628 	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
8629 
8630 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8631 		handle = le16_to_cpu(event_data->VolDevHandle);
8632 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8633 		if (raid_device)
8634 			raid_device->percent_complete =
8635 			    event_data->PercentComplete;
8636 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8637 	}
8638 }
8639 
8640 /**
8641  * _scsih_prep_device_scan - initialize parameters prior to device scan
8642  * @ioc: per adapter object
8643  *
8644  * Set the deleted flag prior to device scan.  If the device is found during
8645  * the scan, then we clear the deleted flag.
8646  */
8647 static void
_scsih_prep_device_scan(struct MPT3SAS_ADAPTER * ioc)8648 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
8649 {
8650 	struct MPT3SAS_DEVICE *sas_device_priv_data;
8651 	struct scsi_device *sdev;
8652 
8653 	shost_for_each_device(sdev, ioc->shost) {
8654 		sas_device_priv_data = sdev->hostdata;
8655 		if (sas_device_priv_data && sas_device_priv_data->sas_target)
8656 			sas_device_priv_data->sas_target->deleted = 1;
8657 	}
8658 }
8659 
8660 /**
8661  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
8662  * @ioc: per adapter object
8663  * @sas_device_pg0: SAS Device page 0
8664  *
8665  * After host reset, find out whether devices are still responding.
8666  * Used in _scsih_remove_unresponsive_sas_devices.
8667  */
8668 static void
_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER * ioc,Mpi2SasDevicePage0_t * sas_device_pg0)8669 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
8670 Mpi2SasDevicePage0_t *sas_device_pg0)
8671 {
8672 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8673 	struct scsi_target *starget;
8674 	struct _sas_device *sas_device = NULL;
8675 	struct _enclosure_node *enclosure_dev = NULL;
8676 	unsigned long flags;
8677 
8678 	if (sas_device_pg0->EnclosureHandle) {
8679 		enclosure_dev =
8680 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8681 				le16_to_cpu(sas_device_pg0->EnclosureHandle));
8682 		if (enclosure_dev == NULL)
8683 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
8684 				 sas_device_pg0->EnclosureHandle);
8685 	}
8686 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8687 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
8688 		if ((sas_device->sas_address == le64_to_cpu(
8689 		    sas_device_pg0->SASAddress)) && (sas_device->slot ==
8690 		    le16_to_cpu(sas_device_pg0->Slot))) {
8691 			sas_device->responding = 1;
8692 			starget = sas_device->starget;
8693 			if (starget && starget->hostdata) {
8694 				sas_target_priv_data = starget->hostdata;
8695 				sas_target_priv_data->tm_busy = 0;
8696 				sas_target_priv_data->deleted = 0;
8697 			} else
8698 				sas_target_priv_data = NULL;
8699 			if (starget) {
8700 				starget_printk(KERN_INFO, starget,
8701 				    "handle(0x%04x), sas_addr(0x%016llx)\n",
8702 				    le16_to_cpu(sas_device_pg0->DevHandle),
8703 				    (unsigned long long)
8704 				    sas_device->sas_address);
8705 
8706 				if (sas_device->enclosure_handle != 0)
8707 					starget_printk(KERN_INFO, starget,
8708 					 "enclosure logical id(0x%016llx),"
8709 					 " slot(%d)\n",
8710 					 (unsigned long long)
8711 					 sas_device->enclosure_logical_id,
8712 					 sas_device->slot);
8713 			}
8714 			if (le16_to_cpu(sas_device_pg0->Flags) &
8715 			      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
8716 				sas_device->enclosure_level =
8717 				   sas_device_pg0->EnclosureLevel;
8718 				memcpy(&sas_device->connector_name[0],
8719 					&sas_device_pg0->ConnectorName[0], 4);
8720 			} else {
8721 				sas_device->enclosure_level = 0;
8722 				sas_device->connector_name[0] = '\0';
8723 			}
8724 
8725 			sas_device->enclosure_handle =
8726 				le16_to_cpu(sas_device_pg0->EnclosureHandle);
8727 			sas_device->is_chassis_slot_valid = 0;
8728 			if (enclosure_dev) {
8729 				sas_device->enclosure_logical_id = le64_to_cpu(
8730 					enclosure_dev->pg0.EnclosureLogicalID);
8731 				if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8732 				    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8733 					sas_device->is_chassis_slot_valid = 1;
8734 					sas_device->chassis_slot =
8735 						enclosure_dev->pg0.ChassisSlot;
8736 				}
8737 			}
8738 
8739 			if (sas_device->handle == le16_to_cpu(
8740 			    sas_device_pg0->DevHandle))
8741 				goto out;
8742 			pr_info("\thandle changed from(0x%04x)!!!\n",
8743 			    sas_device->handle);
8744 			sas_device->handle = le16_to_cpu(
8745 			    sas_device_pg0->DevHandle);
8746 			if (sas_target_priv_data)
8747 				sas_target_priv_data->handle =
8748 				    le16_to_cpu(sas_device_pg0->DevHandle);
8749 			goto out;
8750 		}
8751 	}
8752  out:
8753 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8754 }
8755 
8756 /**
8757  * _scsih_create_enclosure_list_after_reset - Free Existing list,
8758  *	And create enclosure list by scanning all Enclosure Page(0)s
8759  * @ioc: per adapter object
8760  */
8761 static void
_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER * ioc)8762 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
8763 {
8764 	struct _enclosure_node *enclosure_dev;
8765 	Mpi2ConfigReply_t mpi_reply;
8766 	u16 enclosure_handle;
8767 	int rc;
8768 
8769 	/* Free existing enclosure list */
8770 	mpt3sas_free_enclosure_list(ioc);
8771 
8772 	/* Re constructing enclosure list after reset*/
8773 	enclosure_handle = 0xFFFF;
8774 	do {
8775 		enclosure_dev =
8776 			kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
8777 		if (!enclosure_dev) {
8778 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8779 				__FILE__, __LINE__, __func__);
8780 			return;
8781 		}
8782 		rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8783 				&enclosure_dev->pg0,
8784 				MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
8785 				enclosure_handle);
8786 
8787 		if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8788 						MPI2_IOCSTATUS_MASK)) {
8789 			kfree(enclosure_dev);
8790 			return;
8791 		}
8792 		list_add_tail(&enclosure_dev->list,
8793 						&ioc->enclosure_list);
8794 		enclosure_handle =
8795 			le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
8796 	} while (1);
8797 }
8798 
8799 /**
8800  * _scsih_search_responding_sas_devices -
8801  * @ioc: per adapter object
8802  *
8803  * After host reset, find out whether devices are still responding.
8804  * If not remove.
8805  */
8806 static void
_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER * ioc)8807 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8808 {
8809 	Mpi2SasDevicePage0_t sas_device_pg0;
8810 	Mpi2ConfigReply_t mpi_reply;
8811 	u16 ioc_status;
8812 	u16 handle;
8813 	u32 device_info;
8814 
8815 	ioc_info(ioc, "search for end-devices: start\n");
8816 
8817 	if (list_empty(&ioc->sas_device_list))
8818 		goto out;
8819 
8820 	handle = 0xFFFF;
8821 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8822 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8823 	    handle))) {
8824 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8825 		    MPI2_IOCSTATUS_MASK;
8826 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8827 			break;
8828 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
8829 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8830 		if (!(_scsih_is_end_device(device_info)))
8831 			continue;
8832 		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
8833 	}
8834 
8835  out:
8836 	ioc_info(ioc, "search for end-devices: complete\n");
8837 }
8838 
8839 /**
8840  * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
8841  * @ioc: per adapter object
8842  * @pcie_device_pg0: PCIe Device page 0
8843  *
8844  * After host reset, find out whether devices are still responding.
8845  * Used in _scsih_remove_unresponding_devices.
8846  */
8847 static void
_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER * ioc,Mpi26PCIeDevicePage0_t * pcie_device_pg0)8848 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8849 	Mpi26PCIeDevicePage0_t *pcie_device_pg0)
8850 {
8851 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8852 	struct scsi_target *starget;
8853 	struct _pcie_device *pcie_device;
8854 	unsigned long flags;
8855 
8856 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8857 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
8858 		if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
8859 		    && (pcie_device->slot == le16_to_cpu(
8860 		    pcie_device_pg0->Slot))) {
8861 			pcie_device->access_status =
8862 					pcie_device_pg0->AccessStatus;
8863 			pcie_device->responding = 1;
8864 			starget = pcie_device->starget;
8865 			if (starget && starget->hostdata) {
8866 				sas_target_priv_data = starget->hostdata;
8867 				sas_target_priv_data->tm_busy = 0;
8868 				sas_target_priv_data->deleted = 0;
8869 			} else
8870 				sas_target_priv_data = NULL;
8871 			if (starget) {
8872 				starget_printk(KERN_INFO, starget,
8873 				    "handle(0x%04x), wwid(0x%016llx) ",
8874 				    pcie_device->handle,
8875 				    (unsigned long long)pcie_device->wwid);
8876 				if (pcie_device->enclosure_handle != 0)
8877 					starget_printk(KERN_INFO, starget,
8878 					    "enclosure logical id(0x%016llx), "
8879 					    "slot(%d)\n",
8880 					    (unsigned long long)
8881 					    pcie_device->enclosure_logical_id,
8882 					    pcie_device->slot);
8883 			}
8884 
8885 			if (((le32_to_cpu(pcie_device_pg0->Flags)) &
8886 			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
8887 			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8888 				pcie_device->enclosure_level =
8889 				    pcie_device_pg0->EnclosureLevel;
8890 				memcpy(&pcie_device->connector_name[0],
8891 				    &pcie_device_pg0->ConnectorName[0], 4);
8892 			} else {
8893 				pcie_device->enclosure_level = 0;
8894 				pcie_device->connector_name[0] = '\0';
8895 			}
8896 
8897 			if (pcie_device->handle == le16_to_cpu(
8898 			    pcie_device_pg0->DevHandle))
8899 				goto out;
8900 			pr_info("\thandle changed from(0x%04x)!!!\n",
8901 			    pcie_device->handle);
8902 			pcie_device->handle = le16_to_cpu(
8903 			    pcie_device_pg0->DevHandle);
8904 			if (sas_target_priv_data)
8905 				sas_target_priv_data->handle =
8906 				    le16_to_cpu(pcie_device_pg0->DevHandle);
8907 			goto out;
8908 		}
8909 	}
8910 
8911  out:
8912 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8913 }
8914 
8915 /**
8916  * _scsih_search_responding_pcie_devices -
8917  * @ioc: per adapter object
8918  *
8919  * After host reset, find out whether devices are still responding.
8920  * If not remove.
8921  */
8922 static void
_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER * ioc)8923 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8924 {
8925 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8926 	Mpi2ConfigReply_t mpi_reply;
8927 	u16 ioc_status;
8928 	u16 handle;
8929 	u32 device_info;
8930 
8931 	ioc_info(ioc, "search for end-devices: start\n");
8932 
8933 	if (list_empty(&ioc->pcie_device_list))
8934 		goto out;
8935 
8936 	handle = 0xFFFF;
8937 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8938 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8939 		handle))) {
8940 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8941 		    MPI2_IOCSTATUS_MASK;
8942 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8943 			ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
8944 				 __func__, ioc_status,
8945 				 le32_to_cpu(mpi_reply.IOCLogInfo));
8946 			break;
8947 		}
8948 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
8949 		device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8950 		if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8951 			continue;
8952 		_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8953 	}
8954 out:
8955 	ioc_info(ioc, "search for PCIe end-devices: complete\n");
8956 }
8957 
8958 /**
8959  * _scsih_mark_responding_raid_device - mark a raid_device as responding
8960  * @ioc: per adapter object
8961  * @wwid: world wide identifier for raid volume
8962  * @handle: device handle
8963  *
8964  * After host reset, find out whether devices are still responding.
8965  * Used in _scsih_remove_unresponsive_raid_devices.
8966  */
8967 static void
_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle)8968 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
8969 	u16 handle)
8970 {
8971 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8972 	struct scsi_target *starget;
8973 	struct _raid_device *raid_device;
8974 	unsigned long flags;
8975 
8976 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
8977 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
8978 		if (raid_device->wwid == wwid && raid_device->starget) {
8979 			starget = raid_device->starget;
8980 			if (starget && starget->hostdata) {
8981 				sas_target_priv_data = starget->hostdata;
8982 				sas_target_priv_data->deleted = 0;
8983 			} else
8984 				sas_target_priv_data = NULL;
8985 			raid_device->responding = 1;
8986 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8987 			starget_printk(KERN_INFO, raid_device->starget,
8988 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
8989 			    (unsigned long long)raid_device->wwid);
8990 
8991 			/*
8992 			 * WARPDRIVE: The handles of the PDs might have changed
8993 			 * across the host reset so re-initialize the
8994 			 * required data for Direct IO
8995 			 */
8996 			mpt3sas_init_warpdrive_properties(ioc, raid_device);
8997 			spin_lock_irqsave(&ioc->raid_device_lock, flags);
8998 			if (raid_device->handle == handle) {
8999 				spin_unlock_irqrestore(&ioc->raid_device_lock,
9000 				    flags);
9001 				return;
9002 			}
9003 			pr_info("\thandle changed from(0x%04x)!!!\n",
9004 			    raid_device->handle);
9005 			raid_device->handle = handle;
9006 			if (sas_target_priv_data)
9007 				sas_target_priv_data->handle = handle;
9008 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9009 			return;
9010 		}
9011 	}
9012 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9013 }
9014 
9015 /**
9016  * _scsih_search_responding_raid_devices -
9017  * @ioc: per adapter object
9018  *
9019  * After host reset, find out whether devices are still responding.
9020  * If not remove.
9021  */
9022 static void
_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER * ioc)9023 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
9024 {
9025 	Mpi2RaidVolPage1_t volume_pg1;
9026 	Mpi2RaidVolPage0_t volume_pg0;
9027 	Mpi2RaidPhysDiskPage0_t pd_pg0;
9028 	Mpi2ConfigReply_t mpi_reply;
9029 	u16 ioc_status;
9030 	u16 handle;
9031 	u8 phys_disk_num;
9032 
9033 	if (!ioc->ir_firmware)
9034 		return;
9035 
9036 	ioc_info(ioc, "search for raid volumes: start\n");
9037 
9038 	if (list_empty(&ioc->raid_device_list))
9039 		goto out;
9040 
9041 	handle = 0xFFFF;
9042 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9043 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9044 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9045 		    MPI2_IOCSTATUS_MASK;
9046 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9047 			break;
9048 		handle = le16_to_cpu(volume_pg1.DevHandle);
9049 
9050 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9051 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9052 		     sizeof(Mpi2RaidVolPage0_t)))
9053 			continue;
9054 
9055 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9056 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9057 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
9058 			_scsih_mark_responding_raid_device(ioc,
9059 			    le64_to_cpu(volume_pg1.WWID), handle);
9060 	}
9061 
9062 	/* refresh the pd_handles */
9063 	if (!ioc->is_warpdrive) {
9064 		phys_disk_num = 0xFF;
9065 		memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
9066 		while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9067 		    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9068 		    phys_disk_num))) {
9069 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9070 			    MPI2_IOCSTATUS_MASK;
9071 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9072 				break;
9073 			phys_disk_num = pd_pg0.PhysDiskNum;
9074 			handle = le16_to_cpu(pd_pg0.DevHandle);
9075 			set_bit(handle, ioc->pd_handles);
9076 		}
9077 	}
9078  out:
9079 	ioc_info(ioc, "search for responding raid volumes: complete\n");
9080 }
9081 
9082 /**
9083  * _scsih_mark_responding_expander - mark a expander as responding
9084  * @ioc: per adapter object
9085  * @expander_pg0:SAS Expander Config Page0
9086  *
9087  * After host reset, find out whether devices are still responding.
9088  * Used in _scsih_remove_unresponsive_expanders.
9089  */
9090 static void
_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER * ioc,Mpi2ExpanderPage0_t * expander_pg0)9091 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
9092 	Mpi2ExpanderPage0_t *expander_pg0)
9093 {
9094 	struct _sas_node *sas_expander = NULL;
9095 	unsigned long flags;
9096 	int i;
9097 	struct _enclosure_node *enclosure_dev = NULL;
9098 	u16 handle = le16_to_cpu(expander_pg0->DevHandle);
9099 	u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
9100 	u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
9101 
9102 	if (enclosure_handle)
9103 		enclosure_dev =
9104 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
9105 							enclosure_handle);
9106 
9107 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
9108 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
9109 		if (sas_expander->sas_address != sas_address)
9110 			continue;
9111 		sas_expander->responding = 1;
9112 
9113 		if (enclosure_dev) {
9114 			sas_expander->enclosure_logical_id =
9115 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
9116 			sas_expander->enclosure_handle =
9117 			    le16_to_cpu(expander_pg0->EnclosureHandle);
9118 		}
9119 
9120 		if (sas_expander->handle == handle)
9121 			goto out;
9122 		pr_info("\texpander(0x%016llx): handle changed" \
9123 		    " from(0x%04x) to (0x%04x)!!!\n",
9124 		    (unsigned long long)sas_expander->sas_address,
9125 		    sas_expander->handle, handle);
9126 		sas_expander->handle = handle;
9127 		for (i = 0 ; i < sas_expander->num_phys ; i++)
9128 			sas_expander->phy[i].handle = handle;
9129 		goto out;
9130 	}
9131  out:
9132 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9133 }
9134 
9135 /**
9136  * _scsih_search_responding_expanders -
9137  * @ioc: per adapter object
9138  *
9139  * After host reset, find out whether devices are still responding.
9140  * If not remove.
9141  */
9142 static void
_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER * ioc)9143 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
9144 {
9145 	Mpi2ExpanderPage0_t expander_pg0;
9146 	Mpi2ConfigReply_t mpi_reply;
9147 	u16 ioc_status;
9148 	u64 sas_address;
9149 	u16 handle;
9150 
9151 	ioc_info(ioc, "search for expanders: start\n");
9152 
9153 	if (list_empty(&ioc->sas_expander_list))
9154 		goto out;
9155 
9156 	handle = 0xFFFF;
9157 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
9158 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
9159 
9160 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9161 		    MPI2_IOCSTATUS_MASK;
9162 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9163 			break;
9164 
9165 		handle = le16_to_cpu(expander_pg0.DevHandle);
9166 		sas_address = le64_to_cpu(expander_pg0.SASAddress);
9167 		pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
9168 			handle,
9169 		    (unsigned long long)sas_address);
9170 		_scsih_mark_responding_expander(ioc, &expander_pg0);
9171 	}
9172 
9173  out:
9174 	ioc_info(ioc, "search for expanders: complete\n");
9175 }
9176 
9177 /**
9178  * _scsih_remove_unresponding_devices - removing unresponding devices
9179  * @ioc: per adapter object
9180  */
9181 static void
_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER * ioc)9182 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
9183 {
9184 	struct _sas_device *sas_device, *sas_device_next;
9185 	struct _sas_node *sas_expander, *sas_expander_next;
9186 	struct _raid_device *raid_device, *raid_device_next;
9187 	struct _pcie_device *pcie_device, *pcie_device_next;
9188 	struct list_head tmp_list;
9189 	unsigned long flags;
9190 	LIST_HEAD(head);
9191 
9192 	ioc_info(ioc, "removing unresponding devices: start\n");
9193 
9194 	/* removing unresponding end devices */
9195 	ioc_info(ioc, "removing unresponding devices: end-devices\n");
9196 	/*
9197 	 * Iterate, pulling off devices marked as non-responding. We become the
9198 	 * owner for the reference the list had on any object we prune.
9199 	 */
9200 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9201 	list_for_each_entry_safe(sas_device, sas_device_next,
9202 	    &ioc->sas_device_list, list) {
9203 		if (!sas_device->responding)
9204 			list_move_tail(&sas_device->list, &head);
9205 		else
9206 			sas_device->responding = 0;
9207 	}
9208 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9209 
9210 	/*
9211 	 * Now, uninitialize and remove the unresponding devices we pruned.
9212 	 */
9213 	list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
9214 		_scsih_remove_device(ioc, sas_device);
9215 		list_del_init(&sas_device->list);
9216 		sas_device_put(sas_device);
9217 	}
9218 
9219 	ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
9220 	INIT_LIST_HEAD(&head);
9221 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9222 	list_for_each_entry_safe(pcie_device, pcie_device_next,
9223 	    &ioc->pcie_device_list, list) {
9224 		if (!pcie_device->responding)
9225 			list_move_tail(&pcie_device->list, &head);
9226 		else
9227 			pcie_device->responding = 0;
9228 	}
9229 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9230 
9231 	list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
9232 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9233 		list_del_init(&pcie_device->list);
9234 		pcie_device_put(pcie_device);
9235 	}
9236 
9237 	/* removing unresponding volumes */
9238 	if (ioc->ir_firmware) {
9239 		ioc_info(ioc, "removing unresponding devices: volumes\n");
9240 		list_for_each_entry_safe(raid_device, raid_device_next,
9241 		    &ioc->raid_device_list, list) {
9242 			if (!raid_device->responding)
9243 				_scsih_sas_volume_delete(ioc,
9244 				    raid_device->handle);
9245 			else
9246 				raid_device->responding = 0;
9247 		}
9248 	}
9249 
9250 	/* removing unresponding expanders */
9251 	ioc_info(ioc, "removing unresponding devices: expanders\n");
9252 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
9253 	INIT_LIST_HEAD(&tmp_list);
9254 	list_for_each_entry_safe(sas_expander, sas_expander_next,
9255 	    &ioc->sas_expander_list, list) {
9256 		if (!sas_expander->responding)
9257 			list_move_tail(&sas_expander->list, &tmp_list);
9258 		else
9259 			sas_expander->responding = 0;
9260 	}
9261 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9262 	list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
9263 	    list) {
9264 		_scsih_expander_node_remove(ioc, sas_expander);
9265 	}
9266 
9267 	ioc_info(ioc, "removing unresponding devices: complete\n");
9268 
9269 	/* unblock devices */
9270 	_scsih_ublock_io_all_device(ioc);
9271 }
9272 
9273 static void
_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander,u16 handle)9274 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
9275 	struct _sas_node *sas_expander, u16 handle)
9276 {
9277 	Mpi2ExpanderPage1_t expander_pg1;
9278 	Mpi2ConfigReply_t mpi_reply;
9279 	int i;
9280 
9281 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
9282 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
9283 		    &expander_pg1, i, handle))) {
9284 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9285 				__FILE__, __LINE__, __func__);
9286 			return;
9287 		}
9288 
9289 		mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
9290 		    le16_to_cpu(expander_pg1.AttachedDevHandle), i,
9291 		    expander_pg1.NegotiatedLinkRate >> 4);
9292 	}
9293 }
9294 
9295 /**
9296  * _scsih_scan_for_devices_after_reset - scan for devices after host reset
9297  * @ioc: per adapter object
9298  */
9299 static void
_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER * ioc)9300 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9301 {
9302 	Mpi2ExpanderPage0_t expander_pg0;
9303 	Mpi2SasDevicePage0_t sas_device_pg0;
9304 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
9305 	Mpi2RaidVolPage1_t volume_pg1;
9306 	Mpi2RaidVolPage0_t volume_pg0;
9307 	Mpi2RaidPhysDiskPage0_t pd_pg0;
9308 	Mpi2EventIrConfigElement_t element;
9309 	Mpi2ConfigReply_t mpi_reply;
9310 	u8 phys_disk_num;
9311 	u16 ioc_status;
9312 	u16 handle, parent_handle;
9313 	u64 sas_address;
9314 	struct _sas_device *sas_device;
9315 	struct _pcie_device *pcie_device;
9316 	struct _sas_node *expander_device;
9317 	static struct _raid_device *raid_device;
9318 	u8 retry_count;
9319 	unsigned long flags;
9320 
9321 	ioc_info(ioc, "scan devices: start\n");
9322 
9323 	_scsih_sas_host_refresh(ioc);
9324 
9325 	ioc_info(ioc, "\tscan devices: expanders start\n");
9326 
9327 	/* expanders */
9328 	handle = 0xFFFF;
9329 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
9330 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
9331 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9332 		    MPI2_IOCSTATUS_MASK;
9333 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9334 			ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9335 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9336 			break;
9337 		}
9338 		handle = le16_to_cpu(expander_pg0.DevHandle);
9339 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
9340 		expander_device = mpt3sas_scsih_expander_find_by_sas_address(
9341 		    ioc, le64_to_cpu(expander_pg0.SASAddress));
9342 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9343 		if (expander_device)
9344 			_scsih_refresh_expander_links(ioc, expander_device,
9345 			    handle);
9346 		else {
9347 			ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9348 				 handle,
9349 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
9350 			_scsih_expander_add(ioc, handle);
9351 			ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9352 				 handle,
9353 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
9354 		}
9355 	}
9356 
9357 	ioc_info(ioc, "\tscan devices: expanders complete\n");
9358 
9359 	if (!ioc->ir_firmware)
9360 		goto skip_to_sas;
9361 
9362 	ioc_info(ioc, "\tscan devices: phys disk start\n");
9363 
9364 	/* phys disk */
9365 	phys_disk_num = 0xFF;
9366 	while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9367 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9368 	    phys_disk_num))) {
9369 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9370 		    MPI2_IOCSTATUS_MASK;
9371 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9372 			ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9373 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9374 			break;
9375 		}
9376 		phys_disk_num = pd_pg0.PhysDiskNum;
9377 		handle = le16_to_cpu(pd_pg0.DevHandle);
9378 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9379 		if (sas_device) {
9380 			sas_device_put(sas_device);
9381 			continue;
9382 		}
9383 		if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9384 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9385 		    handle) != 0)
9386 			continue;
9387 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9388 		    MPI2_IOCSTATUS_MASK;
9389 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9390 			ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
9391 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9392 			break;
9393 		}
9394 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9395 		if (!_scsih_get_sas_address(ioc, parent_handle,
9396 		    &sas_address)) {
9397 			ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9398 				 handle,
9399 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9400 			mpt3sas_transport_update_links(ioc, sas_address,
9401 			    handle, sas_device_pg0.PhyNum,
9402 			    MPI2_SAS_NEG_LINK_RATE_1_5);
9403 			set_bit(handle, ioc->pd_handles);
9404 			retry_count = 0;
9405 			/* This will retry adding the end device.
9406 			 * _scsih_add_device() will decide on retries and
9407 			 * return "1" when it should be retried
9408 			 */
9409 			while (_scsih_add_device(ioc, handle, retry_count++,
9410 			    1)) {
9411 				ssleep(1);
9412 			}
9413 			ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9414 				 handle,
9415 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9416 		}
9417 	}
9418 
9419 	ioc_info(ioc, "\tscan devices: phys disk complete\n");
9420 
9421 	ioc_info(ioc, "\tscan devices: volumes start\n");
9422 
9423 	/* volumes */
9424 	handle = 0xFFFF;
9425 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9426 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9427 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9428 		    MPI2_IOCSTATUS_MASK;
9429 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9430 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9431 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9432 			break;
9433 		}
9434 		handle = le16_to_cpu(volume_pg1.DevHandle);
9435 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9436 		raid_device = _scsih_raid_device_find_by_wwid(ioc,
9437 		    le64_to_cpu(volume_pg1.WWID));
9438 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9439 		if (raid_device)
9440 			continue;
9441 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9442 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9443 		     sizeof(Mpi2RaidVolPage0_t)))
9444 			continue;
9445 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9446 		    MPI2_IOCSTATUS_MASK;
9447 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9448 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9449 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9450 			break;
9451 		}
9452 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9453 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9454 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
9455 			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
9456 			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
9457 			element.VolDevHandle = volume_pg1.DevHandle;
9458 			ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
9459 				 volume_pg1.DevHandle);
9460 			_scsih_sas_volume_add(ioc, &element);
9461 			ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
9462 				 volume_pg1.DevHandle);
9463 		}
9464 	}
9465 
9466 	ioc_info(ioc, "\tscan devices: volumes complete\n");
9467 
9468  skip_to_sas:
9469 
9470 	ioc_info(ioc, "\tscan devices: end devices start\n");
9471 
9472 	/* sas devices */
9473 	handle = 0xFFFF;
9474 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9475 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9476 	    handle))) {
9477 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9478 		    MPI2_IOCSTATUS_MASK;
9479 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9480 			ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9481 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9482 			break;
9483 		}
9484 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
9485 		if (!(_scsih_is_end_device(
9486 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
9487 			continue;
9488 		sas_device = mpt3sas_get_sdev_by_addr(ioc,
9489 		    le64_to_cpu(sas_device_pg0.SASAddress));
9490 		if (sas_device) {
9491 			sas_device_put(sas_device);
9492 			continue;
9493 		}
9494 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9495 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
9496 			ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9497 				 handle,
9498 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9499 			mpt3sas_transport_update_links(ioc, sas_address, handle,
9500 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
9501 			retry_count = 0;
9502 			/* This will retry adding the end device.
9503 			 * _scsih_add_device() will decide on retries and
9504 			 * return "1" when it should be retried
9505 			 */
9506 			while (_scsih_add_device(ioc, handle, retry_count++,
9507 			    0)) {
9508 				ssleep(1);
9509 			}
9510 			ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9511 				 handle,
9512 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9513 		}
9514 	}
9515 	ioc_info(ioc, "\tscan devices: end devices complete\n");
9516 	ioc_info(ioc, "\tscan devices: pcie end devices start\n");
9517 
9518 	/* pcie devices */
9519 	handle = 0xFFFF;
9520 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9521 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9522 		handle))) {
9523 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
9524 				& MPI2_IOCSTATUS_MASK;
9525 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9526 			ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9527 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9528 			break;
9529 		}
9530 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9531 		if (!(_scsih_is_nvme_pciescsi_device(
9532 			le32_to_cpu(pcie_device_pg0.DeviceInfo))))
9533 			continue;
9534 		pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
9535 				le64_to_cpu(pcie_device_pg0.WWID));
9536 		if (pcie_device) {
9537 			pcie_device_put(pcie_device);
9538 			continue;
9539 		}
9540 		retry_count = 0;
9541 		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
9542 		_scsih_pcie_add_device(ioc, handle);
9543 
9544 		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
9545 			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
9546 	}
9547 	ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
9548 	ioc_info(ioc, "scan devices: complete\n");
9549 }
9550 
9551 /**
9552  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9553  * @ioc: per adapter object
9554  *
9555  * The handler for doing any required cleanup or initialization.
9556  */
mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER * ioc)9557 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9558 {
9559 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
9560 }
9561 
9562 /**
9563  * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
9564  *							scsi & tm cmds.
9565  * @ioc: per adapter object
9566  *
9567  * The handler for doing any required cleanup or initialization.
9568  */
9569 void
mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER * ioc)9570 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
9571 {
9572 	dtmprintk(ioc,
9573 	    ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
9574 	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
9575 		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
9576 		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
9577 		complete(&ioc->scsih_cmds.done);
9578 	}
9579 	if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
9580 		ioc->tm_cmds.status |= MPT3_CMD_RESET;
9581 		mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
9582 		complete(&ioc->tm_cmds.done);
9583 	}
9584 
9585 	memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
9586 	memset(ioc->device_remove_in_progress, 0,
9587 	       ioc->device_remove_in_progress_sz);
9588 	_scsih_fw_event_cleanup_queue(ioc);
9589 	_scsih_flush_running_cmds(ioc);
9590 }
9591 
9592 /**
9593  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9594  * @ioc: per adapter object
9595  *
9596  * The handler for doing any required cleanup or initialization.
9597  */
9598 void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER * ioc)9599 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
9600 {
9601 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
9602 	if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
9603 					   !ioc->sas_hba.num_phys)) {
9604 		_scsih_prep_device_scan(ioc);
9605 		_scsih_create_enclosure_list_after_reset(ioc);
9606 		_scsih_search_responding_sas_devices(ioc);
9607 		_scsih_search_responding_pcie_devices(ioc);
9608 		_scsih_search_responding_raid_devices(ioc);
9609 		_scsih_search_responding_expanders(ioc);
9610 		_scsih_error_recovery_delete_devices(ioc);
9611 	}
9612 }
9613 
9614 /**
9615  * _mpt3sas_fw_work - delayed task for processing firmware events
9616  * @ioc: per adapter object
9617  * @fw_event: The fw_event_work object
9618  * Context: user.
9619  */
9620 static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9621 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9622 {
9623 	ioc->current_event = fw_event;
9624 	_scsih_fw_event_del_from_list(ioc, fw_event);
9625 
9626 	/* the queue is being flushed so ignore this event */
9627 	if (ioc->remove_host || ioc->pci_error_recovery) {
9628 		fw_event_work_put(fw_event);
9629 		ioc->current_event = NULL;
9630 		return;
9631 	}
9632 
9633 	switch (fw_event->event) {
9634 	case MPT3SAS_PROCESS_TRIGGER_DIAG:
9635 		mpt3sas_process_trigger_data(ioc,
9636 			(struct SL_WH_TRIGGERS_EVENT_DATA_T *)
9637 			fw_event->event_data);
9638 		break;
9639 	case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
9640 		while (scsi_host_in_recovery(ioc->shost) ||
9641 					 ioc->shost_recovery) {
9642 			/*
9643 			 * If we're unloading or cancelling the work, bail.
9644 			 * Otherwise, this can become an infinite loop.
9645 			 */
9646 			if (ioc->remove_host || ioc->fw_events_cleanup)
9647 				goto out;
9648 			ssleep(1);
9649 		}
9650 		_scsih_remove_unresponding_devices(ioc);
9651 		_scsih_scan_for_devices_after_reset(ioc);
9652 		_scsih_set_nvme_max_shutdown_latency(ioc);
9653 		break;
9654 	case MPT3SAS_PORT_ENABLE_COMPLETE:
9655 		ioc->start_scan = 0;
9656 		if (missing_delay[0] != -1 && missing_delay[1] != -1)
9657 			mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
9658 			    missing_delay[1]);
9659 		dewtprintk(ioc,
9660 			   ioc_info(ioc, "port enable: complete from worker thread\n"));
9661 		break;
9662 	case MPT3SAS_TURN_ON_PFA_LED:
9663 		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
9664 		break;
9665 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9666 		_scsih_sas_topology_change_event(ioc, fw_event);
9667 		break;
9668 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9669 		if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
9670 			_scsih_sas_device_status_change_event_debug(ioc,
9671 			    (Mpi2EventDataSasDeviceStatusChange_t *)
9672 			    fw_event->event_data);
9673 		break;
9674 	case MPI2_EVENT_SAS_DISCOVERY:
9675 		_scsih_sas_discovery_event(ioc, fw_event);
9676 		break;
9677 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9678 		_scsih_sas_device_discovery_error_event(ioc, fw_event);
9679 		break;
9680 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9681 		_scsih_sas_broadcast_primitive_event(ioc, fw_event);
9682 		break;
9683 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9684 		_scsih_sas_enclosure_dev_status_change_event(ioc,
9685 		    fw_event);
9686 		break;
9687 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9688 		_scsih_sas_ir_config_change_event(ioc, fw_event);
9689 		break;
9690 	case MPI2_EVENT_IR_VOLUME:
9691 		_scsih_sas_ir_volume_event(ioc, fw_event);
9692 		break;
9693 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9694 		_scsih_sas_ir_physical_disk_event(ioc, fw_event);
9695 		break;
9696 	case MPI2_EVENT_IR_OPERATION_STATUS:
9697 		_scsih_sas_ir_operation_status_event(ioc, fw_event);
9698 		break;
9699 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9700 		_scsih_pcie_device_status_change_event(ioc, fw_event);
9701 		break;
9702 	case MPI2_EVENT_PCIE_ENUMERATION:
9703 		_scsih_pcie_enumeration_event(ioc, fw_event);
9704 		break;
9705 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9706 		_scsih_pcie_topology_change_event(ioc, fw_event);
9707 		ioc->current_event = NULL;
9708 			return;
9709 	break;
9710 	}
9711 out:
9712 	fw_event_work_put(fw_event);
9713 	ioc->current_event = NULL;
9714 }
9715 
9716 /**
9717  * _firmware_event_work
9718  * @work: The fw_event_work object
9719  * Context: user.
9720  *
9721  * wrappers for the work thread handling firmware events
9722  */
9723 
9724 static void
_firmware_event_work(struct work_struct * work)9725 _firmware_event_work(struct work_struct *work)
9726 {
9727 	struct fw_event_work *fw_event = container_of(work,
9728 	    struct fw_event_work, work);
9729 
9730 	_mpt3sas_fw_work(fw_event->ioc, fw_event);
9731 }
9732 
9733 /**
9734  * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
9735  * @ioc: per adapter object
9736  * @msix_index: MSIX table index supplied by the OS
9737  * @reply: reply message frame(lower 32bit addr)
9738  * Context: interrupt.
9739  *
9740  * This function merely adds a new work task into ioc->firmware_event_thread.
9741  * The tasks are worked from _firmware_event_work in user context.
9742  *
9743  * Return: 1 meaning mf should be freed from _base_interrupt
9744  *         0 means the mf is freed from this function.
9745  */
9746 u8
mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER * ioc,u8 msix_index,u32 reply)9747 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9748 	u32 reply)
9749 {
9750 	struct fw_event_work *fw_event;
9751 	Mpi2EventNotificationReply_t *mpi_reply;
9752 	u16 event;
9753 	u16 sz;
9754 	Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
9755 
9756 	/* events turned off due to host reset */
9757 	if (ioc->pci_error_recovery)
9758 		return 1;
9759 
9760 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
9761 
9762 	if (unlikely(!mpi_reply)) {
9763 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
9764 			__FILE__, __LINE__, __func__);
9765 		return 1;
9766 	}
9767 
9768 	event = le16_to_cpu(mpi_reply->Event);
9769 
9770 	if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
9771 		mpt3sas_trigger_event(ioc, event, 0);
9772 
9773 	switch (event) {
9774 	/* handle these */
9775 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9776 	{
9777 		Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
9778 		    (Mpi2EventDataSasBroadcastPrimitive_t *)
9779 		    mpi_reply->EventData;
9780 
9781 		if (baen_data->Primitive !=
9782 		    MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
9783 			return 1;
9784 
9785 		if (ioc->broadcast_aen_busy) {
9786 			ioc->broadcast_aen_pending++;
9787 			return 1;
9788 		} else
9789 			ioc->broadcast_aen_busy = 1;
9790 		break;
9791 	}
9792 
9793 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9794 		_scsih_check_topo_delete_events(ioc,
9795 		    (Mpi2EventDataSasTopologyChangeList_t *)
9796 		    mpi_reply->EventData);
9797 		break;
9798 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9799 	_scsih_check_pcie_topo_remove_events(ioc,
9800 		    (Mpi26EventDataPCIeTopologyChangeList_t *)
9801 		    mpi_reply->EventData);
9802 		break;
9803 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9804 		_scsih_check_ir_config_unhide_events(ioc,
9805 		    (Mpi2EventDataIrConfigChangeList_t *)
9806 		    mpi_reply->EventData);
9807 		break;
9808 	case MPI2_EVENT_IR_VOLUME:
9809 		_scsih_check_volume_delete_events(ioc,
9810 		    (Mpi2EventDataIrVolume_t *)
9811 		    mpi_reply->EventData);
9812 		break;
9813 	case MPI2_EVENT_LOG_ENTRY_ADDED:
9814 	{
9815 		Mpi2EventDataLogEntryAdded_t *log_entry;
9816 		u32 *log_code;
9817 
9818 		if (!ioc->is_warpdrive)
9819 			break;
9820 
9821 		log_entry = (Mpi2EventDataLogEntryAdded_t *)
9822 		    mpi_reply->EventData;
9823 		log_code = (u32 *)log_entry->LogData;
9824 
9825 		if (le16_to_cpu(log_entry->LogEntryQualifier)
9826 		    != MPT2_WARPDRIVE_LOGENTRY)
9827 			break;
9828 
9829 		switch (le32_to_cpu(*log_code)) {
9830 		case MPT2_WARPDRIVE_LC_SSDT:
9831 			ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9832 			break;
9833 		case MPT2_WARPDRIVE_LC_SSDLW:
9834 			ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
9835 			break;
9836 		case MPT2_WARPDRIVE_LC_SSDLF:
9837 			ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
9838 			break;
9839 		case MPT2_WARPDRIVE_LC_BRMF:
9840 			ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9841 			break;
9842 		}
9843 
9844 		break;
9845 	}
9846 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9847 		_scsih_sas_device_status_change_event(ioc,
9848 		    (Mpi2EventDataSasDeviceStatusChange_t *)
9849 		    mpi_reply->EventData);
9850 		break;
9851 	case MPI2_EVENT_IR_OPERATION_STATUS:
9852 	case MPI2_EVENT_SAS_DISCOVERY:
9853 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9854 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9855 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9856 	case MPI2_EVENT_PCIE_ENUMERATION:
9857 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9858 		break;
9859 
9860 	case MPI2_EVENT_TEMP_THRESHOLD:
9861 		_scsih_temp_threshold_events(ioc,
9862 			(Mpi2EventDataTemperature_t *)
9863 			mpi_reply->EventData);
9864 		break;
9865 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
9866 		ActiveCableEventData =
9867 		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
9868 		switch (ActiveCableEventData->ReasonCode) {
9869 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
9870 			ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
9871 				   ActiveCableEventData->ReceptacleID);
9872 			pr_notice("cannot be powered and devices connected\n");
9873 			pr_notice("to this active cable will not be seen\n");
9874 			pr_notice("This active cable requires %d mW of power\n",
9875 			     ActiveCableEventData->ActiveCablePowerRequirement);
9876 			break;
9877 
9878 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
9879 			ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
9880 				   ActiveCableEventData->ReceptacleID);
9881 			pr_notice(
9882 			    "is not running at optimal speed(12 Gb/s rate)\n");
9883 			break;
9884 		}
9885 
9886 		break;
9887 
9888 	default: /* ignore the rest */
9889 		return 1;
9890 	}
9891 
9892 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
9893 	fw_event = alloc_fw_event_work(sz);
9894 	if (!fw_event) {
9895 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9896 			__FILE__, __LINE__, __func__);
9897 		return 1;
9898 	}
9899 
9900 	memcpy(fw_event->event_data, mpi_reply->EventData, sz);
9901 	fw_event->ioc = ioc;
9902 	fw_event->VF_ID = mpi_reply->VF_ID;
9903 	fw_event->VP_ID = mpi_reply->VP_ID;
9904 	fw_event->event = event;
9905 	_scsih_fw_event_add(ioc, fw_event);
9906 	fw_event_work_put(fw_event);
9907 	return 1;
9908 }
9909 
9910 /**
9911  * _scsih_expander_node_remove - removing expander device from list.
9912  * @ioc: per adapter object
9913  * @sas_expander: the sas_device object
9914  *
9915  * Removing object and freeing associated memory from the
9916  * ioc->sas_expander_list.
9917  */
9918 static void
_scsih_expander_node_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)9919 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
9920 	struct _sas_node *sas_expander)
9921 {
9922 	struct _sas_port *mpt3sas_port, *next;
9923 	unsigned long flags;
9924 
9925 	/* remove sibling ports attached to this expander */
9926 	list_for_each_entry_safe(mpt3sas_port, next,
9927 	   &sas_expander->sas_port_list, port_list) {
9928 		if (ioc->shost_recovery)
9929 			return;
9930 		if (mpt3sas_port->remote_identify.device_type ==
9931 		    SAS_END_DEVICE)
9932 			mpt3sas_device_remove_by_sas_address(ioc,
9933 			    mpt3sas_port->remote_identify.sas_address);
9934 		else if (mpt3sas_port->remote_identify.device_type ==
9935 		    SAS_EDGE_EXPANDER_DEVICE ||
9936 		    mpt3sas_port->remote_identify.device_type ==
9937 		    SAS_FANOUT_EXPANDER_DEVICE)
9938 			mpt3sas_expander_remove(ioc,
9939 			    mpt3sas_port->remote_identify.sas_address);
9940 	}
9941 
9942 	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
9943 	    sas_expander->sas_address_parent);
9944 
9945 	ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
9946 		 sas_expander->handle, (unsigned long long)
9947 		 sas_expander->sas_address);
9948 
9949 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
9950 	list_del(&sas_expander->list);
9951 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9952 
9953 	kfree(sas_expander->phy);
9954 	kfree(sas_expander);
9955 }
9956 
9957 /**
9958  * _scsih_nvme_shutdown - NVMe shutdown notification
9959  * @ioc: per adapter object
9960  *
9961  * Sending IoUnitControl request with shutdown operation code to alert IOC that
9962  * the host system is shutting down so that IOC can issue NVMe shutdown to
9963  * NVMe drives attached to it.
9964  */
9965 static void
_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER * ioc)9966 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
9967 {
9968 	Mpi26IoUnitControlRequest_t *mpi_request;
9969 	Mpi26IoUnitControlReply_t *mpi_reply;
9970 	u16 smid;
9971 
9972 	/* are there any NVMe devices ? */
9973 	if (list_empty(&ioc->pcie_device_list))
9974 		return;
9975 
9976 	mutex_lock(&ioc->scsih_cmds.mutex);
9977 
9978 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
9979 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
9980 		goto out;
9981 	}
9982 
9983 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
9984 
9985 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
9986 	if (!smid) {
9987 		ioc_err(ioc,
9988 		    "%s: failed obtaining a smid\n", __func__);
9989 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9990 		goto out;
9991 	}
9992 
9993 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
9994 	ioc->scsih_cmds.smid = smid;
9995 	memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
9996 	mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
9997 	mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
9998 
9999 	init_completion(&ioc->scsih_cmds.done);
10000 	ioc->put_smid_default(ioc, smid);
10001 	/* Wait for max_shutdown_latency seconds */
10002 	ioc_info(ioc,
10003 		"Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
10004 		ioc->max_shutdown_latency);
10005 	wait_for_completion_timeout(&ioc->scsih_cmds.done,
10006 			ioc->max_shutdown_latency*HZ);
10007 
10008 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
10009 		ioc_err(ioc, "%s: timeout\n", __func__);
10010 		goto out;
10011 	}
10012 
10013 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
10014 		mpi_reply = ioc->scsih_cmds.reply;
10015 		ioc_info(ioc, "Io Unit Control shutdown (complete):"
10016 			"ioc_status(0x%04x), loginfo(0x%08x)\n",
10017 			le16_to_cpu(mpi_reply->IOCStatus),
10018 			le32_to_cpu(mpi_reply->IOCLogInfo));
10019 	}
10020  out:
10021 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10022 	mutex_unlock(&ioc->scsih_cmds.mutex);
10023 }
10024 
10025 
10026 /**
10027  * _scsih_ir_shutdown - IR shutdown notification
10028  * @ioc: per adapter object
10029  *
10030  * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
10031  * the host system is shutting down.
10032  */
10033 static void
_scsih_ir_shutdown(struct MPT3SAS_ADAPTER * ioc)10034 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
10035 {
10036 	Mpi2RaidActionRequest_t *mpi_request;
10037 	Mpi2RaidActionReply_t *mpi_reply;
10038 	u16 smid;
10039 
10040 	/* is IR firmware build loaded ? */
10041 	if (!ioc->ir_firmware)
10042 		return;
10043 
10044 	/* are there any volumes ? */
10045 	if (list_empty(&ioc->raid_device_list))
10046 		return;
10047 
10048 	mutex_lock(&ioc->scsih_cmds.mutex);
10049 
10050 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10051 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10052 		goto out;
10053 	}
10054 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10055 
10056 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10057 	if (!smid) {
10058 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
10059 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10060 		goto out;
10061 	}
10062 
10063 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10064 	ioc->scsih_cmds.smid = smid;
10065 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
10066 
10067 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
10068 	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
10069 
10070 	if (!ioc->hide_ir_msg)
10071 		ioc_info(ioc, "IR shutdown (sending)\n");
10072 	init_completion(&ioc->scsih_cmds.done);
10073 	ioc->put_smid_default(ioc, smid);
10074 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
10075 
10076 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
10077 		ioc_err(ioc, "%s: timeout\n", __func__);
10078 		goto out;
10079 	}
10080 
10081 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
10082 		mpi_reply = ioc->scsih_cmds.reply;
10083 		if (!ioc->hide_ir_msg)
10084 			ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
10085 				 le16_to_cpu(mpi_reply->IOCStatus),
10086 				 le32_to_cpu(mpi_reply->IOCLogInfo));
10087 	}
10088 
10089  out:
10090 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10091 	mutex_unlock(&ioc->scsih_cmds.mutex);
10092 }
10093 
10094 /**
10095  * _scsih_get_shost_and_ioc - get shost and ioc
10096  *			and verify whether they are NULL or not
10097  * @pdev: PCI device struct
10098  * @shost: address of scsi host pointer
10099  * @ioc: address of HBA adapter pointer
10100  *
10101  * Return zero if *shost and *ioc are not NULL otherwise return error number.
10102  */
10103 static int
_scsih_get_shost_and_ioc(struct pci_dev * pdev,struct Scsi_Host ** shost,struct MPT3SAS_ADAPTER ** ioc)10104 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
10105 	struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
10106 {
10107 	*shost = pci_get_drvdata(pdev);
10108 	if (*shost == NULL) {
10109 		dev_err(&pdev->dev, "pdev's driver data is null\n");
10110 		return -ENXIO;
10111 	}
10112 
10113 	*ioc = shost_priv(*shost);
10114 	if (*ioc == NULL) {
10115 		dev_err(&pdev->dev, "shost's private data is null\n");
10116 		return -ENXIO;
10117 	}
10118 
10119 	return 0;
10120 }
10121 
10122 /**
10123  * scsih_remove - detach and remove add host
10124  * @pdev: PCI device struct
10125  *
10126  * Routine called when unloading the driver.
10127  */
scsih_remove(struct pci_dev * pdev)10128 static void scsih_remove(struct pci_dev *pdev)
10129 {
10130 	struct Scsi_Host *shost;
10131 	struct MPT3SAS_ADAPTER *ioc;
10132 	struct _sas_port *mpt3sas_port, *next_port;
10133 	struct _raid_device *raid_device, *next;
10134 	struct MPT3SAS_TARGET *sas_target_priv_data;
10135 	struct _pcie_device *pcie_device, *pcienext;
10136 	struct workqueue_struct	*wq;
10137 	unsigned long flags;
10138 	Mpi2ConfigReply_t mpi_reply;
10139 
10140 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
10141 		return;
10142 
10143 	ioc->remove_host = 1;
10144 
10145 	if (!pci_device_is_present(pdev))
10146 		_scsih_flush_running_cmds(ioc);
10147 
10148 	_scsih_fw_event_cleanup_queue(ioc);
10149 
10150 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
10151 	wq = ioc->firmware_event_thread;
10152 	ioc->firmware_event_thread = NULL;
10153 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
10154 	if (wq)
10155 		destroy_workqueue(wq);
10156 	/*
10157 	 * Copy back the unmodified ioc page1. so that on next driver load,
10158 	 * current modified changes on ioc page1 won't take effect.
10159 	 */
10160 	if (ioc->is_aero_ioc)
10161 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
10162 				&ioc->ioc_pg1_copy);
10163 	/* release all the volumes */
10164 	_scsih_ir_shutdown(ioc);
10165 	mpt3sas_destroy_debugfs(ioc);
10166 	sas_remove_host(shost);
10167 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
10168 	    list) {
10169 		if (raid_device->starget) {
10170 			sas_target_priv_data =
10171 			    raid_device->starget->hostdata;
10172 			sas_target_priv_data->deleted = 1;
10173 			scsi_remove_target(&raid_device->starget->dev);
10174 		}
10175 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
10176 			 raid_device->handle, (u64)raid_device->wwid);
10177 		_scsih_raid_device_remove(ioc, raid_device);
10178 	}
10179 	list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
10180 		list) {
10181 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10182 		list_del_init(&pcie_device->list);
10183 		pcie_device_put(pcie_device);
10184 	}
10185 
10186 	/* free ports attached to the sas_host */
10187 	list_for_each_entry_safe(mpt3sas_port, next_port,
10188 	   &ioc->sas_hba.sas_port_list, port_list) {
10189 		if (mpt3sas_port->remote_identify.device_type ==
10190 		    SAS_END_DEVICE)
10191 			mpt3sas_device_remove_by_sas_address(ioc,
10192 			    mpt3sas_port->remote_identify.sas_address);
10193 		else if (mpt3sas_port->remote_identify.device_type ==
10194 		    SAS_EDGE_EXPANDER_DEVICE ||
10195 		    mpt3sas_port->remote_identify.device_type ==
10196 		    SAS_FANOUT_EXPANDER_DEVICE)
10197 			mpt3sas_expander_remove(ioc,
10198 			    mpt3sas_port->remote_identify.sas_address);
10199 	}
10200 
10201 	/* free phys attached to the sas_host */
10202 	if (ioc->sas_hba.num_phys) {
10203 		kfree(ioc->sas_hba.phy);
10204 		ioc->sas_hba.phy = NULL;
10205 		ioc->sas_hba.num_phys = 0;
10206 	}
10207 
10208 	mpt3sas_base_detach(ioc);
10209 	spin_lock(&gioc_lock);
10210 	list_del(&ioc->list);
10211 	spin_unlock(&gioc_lock);
10212 	scsi_host_put(shost);
10213 }
10214 
10215 /**
10216  * scsih_shutdown - routine call during system shutdown
10217  * @pdev: PCI device struct
10218  */
10219 static void
scsih_shutdown(struct pci_dev * pdev)10220 scsih_shutdown(struct pci_dev *pdev)
10221 {
10222 	struct Scsi_Host *shost;
10223 	struct MPT3SAS_ADAPTER *ioc;
10224 	struct workqueue_struct	*wq;
10225 	unsigned long flags;
10226 	Mpi2ConfigReply_t mpi_reply;
10227 
10228 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
10229 		return;
10230 
10231 	ioc->remove_host = 1;
10232 
10233 	if (!pci_device_is_present(pdev))
10234 		_scsih_flush_running_cmds(ioc);
10235 
10236 	_scsih_fw_event_cleanup_queue(ioc);
10237 
10238 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
10239 	wq = ioc->firmware_event_thread;
10240 	ioc->firmware_event_thread = NULL;
10241 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
10242 	if (wq)
10243 		destroy_workqueue(wq);
10244 	/*
10245 	 * Copy back the unmodified ioc page1 so that on next driver load,
10246 	 * current modified changes on ioc page1 won't take effect.
10247 	 */
10248 	if (ioc->is_aero_ioc)
10249 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
10250 				&ioc->ioc_pg1_copy);
10251 
10252 	_scsih_ir_shutdown(ioc);
10253 	_scsih_nvme_shutdown(ioc);
10254 	mpt3sas_base_detach(ioc);
10255 }
10256 
10257 
10258 /**
10259  * _scsih_probe_boot_devices - reports 1st device
10260  * @ioc: per adapter object
10261  *
10262  * If specified in bios page 2, this routine reports the 1st
10263  * device scsi-ml or sas transport for persistent boot device
10264  * purposes.  Please refer to function _scsih_determine_boot_device()
10265  */
10266 static void
_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER * ioc)10267 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
10268 {
10269 	u32 channel;
10270 	void *device;
10271 	struct _sas_device *sas_device;
10272 	struct _raid_device *raid_device;
10273 	struct _pcie_device *pcie_device;
10274 	u16 handle;
10275 	u64 sas_address_parent;
10276 	u64 sas_address;
10277 	unsigned long flags;
10278 	int rc;
10279 	int tid;
10280 
10281 	 /* no Bios, return immediately */
10282 	if (!ioc->bios_pg3.BiosVersion)
10283 		return;
10284 
10285 	device = NULL;
10286 	if (ioc->req_boot_device.device) {
10287 		device =  ioc->req_boot_device.device;
10288 		channel = ioc->req_boot_device.channel;
10289 	} else if (ioc->req_alt_boot_device.device) {
10290 		device =  ioc->req_alt_boot_device.device;
10291 		channel = ioc->req_alt_boot_device.channel;
10292 	} else if (ioc->current_boot_device.device) {
10293 		device =  ioc->current_boot_device.device;
10294 		channel = ioc->current_boot_device.channel;
10295 	}
10296 
10297 	if (!device)
10298 		return;
10299 
10300 	if (channel == RAID_CHANNEL) {
10301 		raid_device = device;
10302 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
10303 		    raid_device->id, 0);
10304 		if (rc)
10305 			_scsih_raid_device_remove(ioc, raid_device);
10306 	} else if (channel == PCIE_CHANNEL) {
10307 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10308 		pcie_device = device;
10309 		tid = pcie_device->id;
10310 		list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
10311 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10312 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
10313 		if (rc)
10314 			_scsih_pcie_device_remove(ioc, pcie_device);
10315 	} else {
10316 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
10317 		sas_device = device;
10318 		handle = sas_device->handle;
10319 		sas_address_parent = sas_device->sas_address_parent;
10320 		sas_address = sas_device->sas_address;
10321 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
10322 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10323 
10324 		if (ioc->hide_drives)
10325 			return;
10326 		if (!mpt3sas_transport_port_add(ioc, handle,
10327 		    sas_address_parent)) {
10328 			_scsih_sas_device_remove(ioc, sas_device);
10329 		} else if (!sas_device->starget) {
10330 			if (!ioc->is_driver_loading) {
10331 				mpt3sas_transport_port_remove(ioc,
10332 				    sas_address,
10333 				    sas_address_parent);
10334 				_scsih_sas_device_remove(ioc, sas_device);
10335 			}
10336 		}
10337 	}
10338 }
10339 
10340 /**
10341  * _scsih_probe_raid - reporting raid volumes to scsi-ml
10342  * @ioc: per adapter object
10343  *
10344  * Called during initial loading of the driver.
10345  */
10346 static void
_scsih_probe_raid(struct MPT3SAS_ADAPTER * ioc)10347 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
10348 {
10349 	struct _raid_device *raid_device, *raid_next;
10350 	int rc;
10351 
10352 	list_for_each_entry_safe(raid_device, raid_next,
10353 	    &ioc->raid_device_list, list) {
10354 		if (raid_device->starget)
10355 			continue;
10356 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
10357 		    raid_device->id, 0);
10358 		if (rc)
10359 			_scsih_raid_device_remove(ioc, raid_device);
10360 	}
10361 }
10362 
get_next_sas_device(struct MPT3SAS_ADAPTER * ioc)10363 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
10364 {
10365 	struct _sas_device *sas_device = NULL;
10366 	unsigned long flags;
10367 
10368 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
10369 	if (!list_empty(&ioc->sas_device_init_list)) {
10370 		sas_device = list_first_entry(&ioc->sas_device_init_list,
10371 				struct _sas_device, list);
10372 		sas_device_get(sas_device);
10373 	}
10374 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10375 
10376 	return sas_device;
10377 }
10378 
sas_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)10379 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10380 		struct _sas_device *sas_device)
10381 {
10382 	unsigned long flags;
10383 
10384 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
10385 
10386 	/*
10387 	 * Since we dropped the lock during the call to port_add(), we need to
10388 	 * be careful here that somebody else didn't move or delete this item
10389 	 * while we were busy with other things.
10390 	 *
10391 	 * If it was on the list, we need a put() for the reference the list
10392 	 * had. Either way, we need a get() for the destination list.
10393 	 */
10394 	if (!list_empty(&sas_device->list)) {
10395 		list_del_init(&sas_device->list);
10396 		sas_device_put(sas_device);
10397 	}
10398 
10399 	sas_device_get(sas_device);
10400 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
10401 
10402 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10403 }
10404 
10405 /**
10406  * _scsih_probe_sas - reporting sas devices to sas transport
10407  * @ioc: per adapter object
10408  *
10409  * Called during initial loading of the driver.
10410  */
10411 static void
_scsih_probe_sas(struct MPT3SAS_ADAPTER * ioc)10412 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
10413 {
10414 	struct _sas_device *sas_device;
10415 
10416 	if (ioc->hide_drives)
10417 		return;
10418 
10419 	while ((sas_device = get_next_sas_device(ioc))) {
10420 		if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
10421 		    sas_device->sas_address_parent)) {
10422 			_scsih_sas_device_remove(ioc, sas_device);
10423 			sas_device_put(sas_device);
10424 			continue;
10425 		} else if (!sas_device->starget) {
10426 			/*
10427 			 * When asyn scanning is enabled, its not possible to
10428 			 * remove devices while scanning is turned on due to an
10429 			 * oops in scsi_sysfs_add_sdev()->add_device()->
10430 			 * sysfs_addrm_start()
10431 			 */
10432 			if (!ioc->is_driver_loading) {
10433 				mpt3sas_transport_port_remove(ioc,
10434 				    sas_device->sas_address,
10435 				    sas_device->sas_address_parent);
10436 				_scsih_sas_device_remove(ioc, sas_device);
10437 				sas_device_put(sas_device);
10438 				continue;
10439 			}
10440 		}
10441 		sas_device_make_active(ioc, sas_device);
10442 		sas_device_put(sas_device);
10443 	}
10444 }
10445 
10446 /**
10447  * get_next_pcie_device - Get the next pcie device
10448  * @ioc: per adapter object
10449  *
10450  * Get the next pcie device from pcie_device_init_list list.
10451  *
10452  * Return: pcie device structure if pcie_device_init_list list is not empty
10453  * otherwise returns NULL
10454  */
get_next_pcie_device(struct MPT3SAS_ADAPTER * ioc)10455 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
10456 {
10457 	struct _pcie_device *pcie_device = NULL;
10458 	unsigned long flags;
10459 
10460 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10461 	if (!list_empty(&ioc->pcie_device_init_list)) {
10462 		pcie_device = list_first_entry(&ioc->pcie_device_init_list,
10463 				struct _pcie_device, list);
10464 		pcie_device_get(pcie_device);
10465 	}
10466 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10467 
10468 	return pcie_device;
10469 }
10470 
10471 /**
10472  * pcie_device_make_active - Add pcie device to pcie_device_list list
10473  * @ioc: per adapter object
10474  * @pcie_device: pcie device object
10475  *
10476  * Add the pcie device which has registered with SCSI Transport Later to
10477  * pcie_device_list list
10478  */
pcie_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)10479 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10480 		struct _pcie_device *pcie_device)
10481 {
10482 	unsigned long flags;
10483 
10484 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10485 
10486 	if (!list_empty(&pcie_device->list)) {
10487 		list_del_init(&pcie_device->list);
10488 		pcie_device_put(pcie_device);
10489 	}
10490 	pcie_device_get(pcie_device);
10491 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
10492 
10493 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10494 }
10495 
10496 /**
10497  * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
10498  * @ioc: per adapter object
10499  *
10500  * Called during initial loading of the driver.
10501  */
10502 static void
_scsih_probe_pcie(struct MPT3SAS_ADAPTER * ioc)10503 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
10504 {
10505 	struct _pcie_device *pcie_device;
10506 	int rc;
10507 
10508 	/* PCIe Device List */
10509 	while ((pcie_device = get_next_pcie_device(ioc))) {
10510 		if (pcie_device->starget) {
10511 			pcie_device_put(pcie_device);
10512 			continue;
10513 		}
10514 		if (pcie_device->access_status ==
10515 		    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
10516 			pcie_device_make_active(ioc, pcie_device);
10517 			pcie_device_put(pcie_device);
10518 			continue;
10519 		}
10520 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
10521 			pcie_device->id, 0);
10522 		if (rc) {
10523 			_scsih_pcie_device_remove(ioc, pcie_device);
10524 			pcie_device_put(pcie_device);
10525 			continue;
10526 		} else if (!pcie_device->starget) {
10527 			/*
10528 			 * When async scanning is enabled, its not possible to
10529 			 * remove devices while scanning is turned on due to an
10530 			 * oops in scsi_sysfs_add_sdev()->add_device()->
10531 			 * sysfs_addrm_start()
10532 			 */
10533 			if (!ioc->is_driver_loading) {
10534 			/* TODO-- Need to find out whether this condition will
10535 			 * occur or not
10536 			 */
10537 				_scsih_pcie_device_remove(ioc, pcie_device);
10538 				pcie_device_put(pcie_device);
10539 				continue;
10540 			}
10541 		}
10542 		pcie_device_make_active(ioc, pcie_device);
10543 		pcie_device_put(pcie_device);
10544 	}
10545 }
10546 
10547 /**
10548  * _scsih_probe_devices - probing for devices
10549  * @ioc: per adapter object
10550  *
10551  * Called during initial loading of the driver.
10552  */
10553 static void
_scsih_probe_devices(struct MPT3SAS_ADAPTER * ioc)10554 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
10555 {
10556 	u16 volume_mapping_flags;
10557 
10558 	if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
10559 		return;  /* return when IOC doesn't support initiator mode */
10560 
10561 	_scsih_probe_boot_devices(ioc);
10562 
10563 	if (ioc->ir_firmware) {
10564 		volume_mapping_flags =
10565 		    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
10566 		    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
10567 		if (volume_mapping_flags ==
10568 		    MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
10569 			_scsih_probe_raid(ioc);
10570 			_scsih_probe_sas(ioc);
10571 		} else {
10572 			_scsih_probe_sas(ioc);
10573 			_scsih_probe_raid(ioc);
10574 		}
10575 	} else {
10576 		_scsih_probe_sas(ioc);
10577 		_scsih_probe_pcie(ioc);
10578 	}
10579 }
10580 
10581 /**
10582  * scsih_scan_start - scsi lld callback for .scan_start
10583  * @shost: SCSI host pointer
10584  *
10585  * The shost has the ability to discover targets on its own instead
10586  * of scanning the entire bus.  In our implemention, we will kick off
10587  * firmware discovery.
10588  */
10589 static void
scsih_scan_start(struct Scsi_Host * shost)10590 scsih_scan_start(struct Scsi_Host *shost)
10591 {
10592 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10593 	int rc;
10594 	if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
10595 		mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
10596 	else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
10597 		mpt3sas_enable_diag_buffer(ioc, 1);
10598 
10599 	if (disable_discovery > 0)
10600 		return;
10601 
10602 	ioc->start_scan = 1;
10603 	rc = mpt3sas_port_enable(ioc);
10604 
10605 	if (rc != 0)
10606 		ioc_info(ioc, "port enable: FAILED\n");
10607 }
10608 
10609 /**
10610  * scsih_scan_finished - scsi lld callback for .scan_finished
10611  * @shost: SCSI host pointer
10612  * @time: elapsed time of the scan in jiffies
10613  *
10614  * This function will be called periodicallyn until it returns 1 with the
10615  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
10616  * we wait for firmware discovery to complete, then return 1.
10617  */
10618 static int
scsih_scan_finished(struct Scsi_Host * shost,unsigned long time)10619 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
10620 {
10621 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10622 
10623 	if (disable_discovery > 0) {
10624 		ioc->is_driver_loading = 0;
10625 		ioc->wait_for_discovery_to_complete = 0;
10626 		return 1;
10627 	}
10628 
10629 	if (time >= (300 * HZ)) {
10630 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10631 		ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
10632 		ioc->is_driver_loading = 0;
10633 		return 1;
10634 	}
10635 
10636 	if (ioc->start_scan)
10637 		return 0;
10638 
10639 	if (ioc->start_scan_failed) {
10640 		ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
10641 			 ioc->start_scan_failed);
10642 		ioc->is_driver_loading = 0;
10643 		ioc->wait_for_discovery_to_complete = 0;
10644 		ioc->remove_host = 1;
10645 		return 1;
10646 	}
10647 
10648 	ioc_info(ioc, "port enable: SUCCESS\n");
10649 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10650 
10651 	if (ioc->wait_for_discovery_to_complete) {
10652 		ioc->wait_for_discovery_to_complete = 0;
10653 		_scsih_probe_devices(ioc);
10654 	}
10655 	mpt3sas_base_start_watchdog(ioc);
10656 	ioc->is_driver_loading = 0;
10657 	return 1;
10658 }
10659 
10660 /* shost template for SAS 2.0 HBA devices */
10661 static struct scsi_host_template mpt2sas_driver_template = {
10662 	.module				= THIS_MODULE,
10663 	.name				= "Fusion MPT SAS Host",
10664 	.proc_name			= MPT2SAS_DRIVER_NAME,
10665 	.queuecommand			= scsih_qcmd,
10666 	.target_alloc			= scsih_target_alloc,
10667 	.slave_alloc			= scsih_slave_alloc,
10668 	.slave_configure		= scsih_slave_configure,
10669 	.target_destroy			= scsih_target_destroy,
10670 	.slave_destroy			= scsih_slave_destroy,
10671 	.scan_finished			= scsih_scan_finished,
10672 	.scan_start			= scsih_scan_start,
10673 	.change_queue_depth		= scsih_change_queue_depth,
10674 	.eh_abort_handler		= scsih_abort,
10675 	.eh_device_reset_handler	= scsih_dev_reset,
10676 	.eh_target_reset_handler	= scsih_target_reset,
10677 	.eh_host_reset_handler		= scsih_host_reset,
10678 	.bios_param			= scsih_bios_param,
10679 	.can_queue			= 1,
10680 	.this_id			= -1,
10681 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
10682 	.max_sectors			= 32767,
10683 	.cmd_per_lun			= 7,
10684 	.shost_attrs			= mpt3sas_host_attrs,
10685 	.sdev_attrs			= mpt3sas_dev_attrs,
10686 	.track_queue_depth		= 1,
10687 	.cmd_size			= sizeof(struct scsiio_tracker),
10688 };
10689 
10690 /* raid transport support for SAS 2.0 HBA devices */
10691 static struct raid_function_template mpt2sas_raid_functions = {
10692 	.cookie		= &mpt2sas_driver_template,
10693 	.is_raid	= scsih_is_raid,
10694 	.get_resync	= scsih_get_resync,
10695 	.get_state	= scsih_get_state,
10696 };
10697 
10698 /* shost template for SAS 3.0 HBA devices */
10699 static struct scsi_host_template mpt3sas_driver_template = {
10700 	.module				= THIS_MODULE,
10701 	.name				= "Fusion MPT SAS Host",
10702 	.proc_name			= MPT3SAS_DRIVER_NAME,
10703 	.queuecommand			= scsih_qcmd,
10704 	.target_alloc			= scsih_target_alloc,
10705 	.slave_alloc			= scsih_slave_alloc,
10706 	.slave_configure		= scsih_slave_configure,
10707 	.target_destroy			= scsih_target_destroy,
10708 	.slave_destroy			= scsih_slave_destroy,
10709 	.scan_finished			= scsih_scan_finished,
10710 	.scan_start			= scsih_scan_start,
10711 	.change_queue_depth		= scsih_change_queue_depth,
10712 	.eh_abort_handler		= scsih_abort,
10713 	.eh_device_reset_handler	= scsih_dev_reset,
10714 	.eh_target_reset_handler	= scsih_target_reset,
10715 	.eh_host_reset_handler		= scsih_host_reset,
10716 	.bios_param			= scsih_bios_param,
10717 	.can_queue			= 1,
10718 	.this_id			= -1,
10719 	.sg_tablesize			= MPT3SAS_SG_DEPTH,
10720 	.max_sectors			= 32767,
10721 	.max_segment_size		= 0xffffffff,
10722 	.cmd_per_lun			= 7,
10723 	.shost_attrs			= mpt3sas_host_attrs,
10724 	.sdev_attrs			= mpt3sas_dev_attrs,
10725 	.track_queue_depth		= 1,
10726 	.cmd_size			= sizeof(struct scsiio_tracker),
10727 };
10728 
10729 /* raid transport support for SAS 3.0 HBA devices */
10730 static struct raid_function_template mpt3sas_raid_functions = {
10731 	.cookie		= &mpt3sas_driver_template,
10732 	.is_raid	= scsih_is_raid,
10733 	.get_resync	= scsih_get_resync,
10734 	.get_state	= scsih_get_state,
10735 };
10736 
10737 /**
10738  * _scsih_determine_hba_mpi_version - determine in which MPI version class
10739  *					this device belongs to.
10740  * @pdev: PCI device struct
10741  *
10742  * return MPI2_VERSION for SAS 2.0 HBA devices,
10743  *	MPI25_VERSION for SAS 3.0 HBA devices, and
10744  *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
10745  */
10746 static u16
_scsih_determine_hba_mpi_version(struct pci_dev * pdev)10747 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
10748 {
10749 
10750 	switch (pdev->device) {
10751 	case MPI2_MFGPAGE_DEVID_SSS6200:
10752 	case MPI2_MFGPAGE_DEVID_SAS2004:
10753 	case MPI2_MFGPAGE_DEVID_SAS2008:
10754 	case MPI2_MFGPAGE_DEVID_SAS2108_1:
10755 	case MPI2_MFGPAGE_DEVID_SAS2108_2:
10756 	case MPI2_MFGPAGE_DEVID_SAS2108_3:
10757 	case MPI2_MFGPAGE_DEVID_SAS2116_1:
10758 	case MPI2_MFGPAGE_DEVID_SAS2116_2:
10759 	case MPI2_MFGPAGE_DEVID_SAS2208_1:
10760 	case MPI2_MFGPAGE_DEVID_SAS2208_2:
10761 	case MPI2_MFGPAGE_DEVID_SAS2208_3:
10762 	case MPI2_MFGPAGE_DEVID_SAS2208_4:
10763 	case MPI2_MFGPAGE_DEVID_SAS2208_5:
10764 	case MPI2_MFGPAGE_DEVID_SAS2208_6:
10765 	case MPI2_MFGPAGE_DEVID_SAS2308_1:
10766 	case MPI2_MFGPAGE_DEVID_SAS2308_2:
10767 	case MPI2_MFGPAGE_DEVID_SAS2308_3:
10768 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10769 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10770 		return MPI2_VERSION;
10771 	case MPI25_MFGPAGE_DEVID_SAS3004:
10772 	case MPI25_MFGPAGE_DEVID_SAS3008:
10773 	case MPI25_MFGPAGE_DEVID_SAS3108_1:
10774 	case MPI25_MFGPAGE_DEVID_SAS3108_2:
10775 	case MPI25_MFGPAGE_DEVID_SAS3108_5:
10776 	case MPI25_MFGPAGE_DEVID_SAS3108_6:
10777 		return MPI25_VERSION;
10778 	case MPI26_MFGPAGE_DEVID_SAS3216:
10779 	case MPI26_MFGPAGE_DEVID_SAS3224:
10780 	case MPI26_MFGPAGE_DEVID_SAS3316_1:
10781 	case MPI26_MFGPAGE_DEVID_SAS3316_2:
10782 	case MPI26_MFGPAGE_DEVID_SAS3316_3:
10783 	case MPI26_MFGPAGE_DEVID_SAS3316_4:
10784 	case MPI26_MFGPAGE_DEVID_SAS3324_1:
10785 	case MPI26_MFGPAGE_DEVID_SAS3324_2:
10786 	case MPI26_MFGPAGE_DEVID_SAS3324_3:
10787 	case MPI26_MFGPAGE_DEVID_SAS3324_4:
10788 	case MPI26_MFGPAGE_DEVID_SAS3508:
10789 	case MPI26_MFGPAGE_DEVID_SAS3508_1:
10790 	case MPI26_MFGPAGE_DEVID_SAS3408:
10791 	case MPI26_MFGPAGE_DEVID_SAS3516:
10792 	case MPI26_MFGPAGE_DEVID_SAS3516_1:
10793 	case MPI26_MFGPAGE_DEVID_SAS3416:
10794 	case MPI26_MFGPAGE_DEVID_SAS3616:
10795 	case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10796 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10797 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10798 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10799 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10800 	case MPI26_MFGPAGE_DEVID_INVALID0_3916:
10801 	case MPI26_MFGPAGE_DEVID_INVALID1_3916:
10802 	case MPI26_MFGPAGE_DEVID_INVALID0_3816:
10803 	case MPI26_MFGPAGE_DEVID_INVALID1_3816:
10804 		return MPI26_VERSION;
10805 	}
10806 	return 0;
10807 }
10808 
10809 /**
10810  * _scsih_probe - attach and add scsi host
10811  * @pdev: PCI device struct
10812  * @id: pci device id
10813  *
10814  * Return: 0 success, anything else error.
10815  */
10816 static int
_scsih_probe(struct pci_dev * pdev,const struct pci_device_id * id)10817 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10818 {
10819 	struct MPT3SAS_ADAPTER *ioc;
10820 	struct Scsi_Host *shost = NULL;
10821 	int rv;
10822 	u16 hba_mpi_version;
10823 
10824 	/* Determine in which MPI version class this pci device belongs */
10825 	hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
10826 	if (hba_mpi_version == 0)
10827 		return -ENODEV;
10828 
10829 	/* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
10830 	 * for other generation HBA's return with -ENODEV
10831 	 */
10832 	if ((hbas_to_enumerate == 1) && (hba_mpi_version !=  MPI2_VERSION))
10833 		return -ENODEV;
10834 
10835 	/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
10836 	 * for other generation HBA's return with -ENODEV
10837 	 */
10838 	if ((hbas_to_enumerate == 2) && (!(hba_mpi_version ==  MPI25_VERSION
10839 		|| hba_mpi_version ==  MPI26_VERSION)))
10840 		return -ENODEV;
10841 
10842 	switch (hba_mpi_version) {
10843 	case MPI2_VERSION:
10844 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
10845 			PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
10846 		/* Use mpt2sas driver host template for SAS 2.0 HBA's */
10847 		shost = scsi_host_alloc(&mpt2sas_driver_template,
10848 		  sizeof(struct MPT3SAS_ADAPTER));
10849 		if (!shost)
10850 			return -ENODEV;
10851 		ioc = shost_priv(shost);
10852 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10853 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10854 		ioc->id = mpt2_ids++;
10855 		sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
10856 		switch (pdev->device) {
10857 		case MPI2_MFGPAGE_DEVID_SSS6200:
10858 			ioc->is_warpdrive = 1;
10859 			ioc->hide_ir_msg = 1;
10860 			break;
10861 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10862 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10863 			ioc->is_mcpu_endpoint = 1;
10864 			break;
10865 		default:
10866 			ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
10867 			break;
10868 		}
10869 		break;
10870 	case MPI25_VERSION:
10871 	case MPI26_VERSION:
10872 		/* Use mpt3sas driver host template for SAS 3.0 HBA's */
10873 		shost = scsi_host_alloc(&mpt3sas_driver_template,
10874 		  sizeof(struct MPT3SAS_ADAPTER));
10875 		if (!shost)
10876 			return -ENODEV;
10877 		ioc = shost_priv(shost);
10878 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10879 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10880 		ioc->id = mpt3_ids++;
10881 		sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
10882 		switch (pdev->device) {
10883 		case MPI26_MFGPAGE_DEVID_SAS3508:
10884 		case MPI26_MFGPAGE_DEVID_SAS3508_1:
10885 		case MPI26_MFGPAGE_DEVID_SAS3408:
10886 		case MPI26_MFGPAGE_DEVID_SAS3516:
10887 		case MPI26_MFGPAGE_DEVID_SAS3516_1:
10888 		case MPI26_MFGPAGE_DEVID_SAS3416:
10889 		case MPI26_MFGPAGE_DEVID_SAS3616:
10890 		case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10891 			ioc->is_gen35_ioc = 1;
10892 			break;
10893 		case MPI26_MFGPAGE_DEVID_INVALID0_3816:
10894 		case MPI26_MFGPAGE_DEVID_INVALID0_3916:
10895 			dev_err(&pdev->dev,
10896 			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
10897 			    pdev->device, pdev->subsystem_vendor,
10898 			    pdev->subsystem_device);
10899 			return 1;
10900 		case MPI26_MFGPAGE_DEVID_INVALID1_3816:
10901 		case MPI26_MFGPAGE_DEVID_INVALID1_3916:
10902 			dev_err(&pdev->dev,
10903 			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
10904 			    pdev->device, pdev->subsystem_vendor,
10905 			    pdev->subsystem_device);
10906 			return 1;
10907 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10908 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10909 			dev_info(&pdev->dev,
10910 			    "HBA is in Configurable Secure mode\n");
10911 			fallthrough;
10912 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10913 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10914 			ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
10915 			break;
10916 		default:
10917 			ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
10918 		}
10919 		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
10920 			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
10921 			(ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
10922 			ioc->combined_reply_queue = 1;
10923 			if (ioc->is_gen35_ioc)
10924 				ioc->combined_reply_index_count =
10925 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
10926 			else
10927 				ioc->combined_reply_index_count =
10928 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
10929 		}
10930 		break;
10931 	default:
10932 		return -ENODEV;
10933 	}
10934 
10935 	INIT_LIST_HEAD(&ioc->list);
10936 	spin_lock(&gioc_lock);
10937 	list_add_tail(&ioc->list, &mpt3sas_ioc_list);
10938 	spin_unlock(&gioc_lock);
10939 	ioc->shost = shost;
10940 	ioc->pdev = pdev;
10941 	ioc->scsi_io_cb_idx = scsi_io_cb_idx;
10942 	ioc->tm_cb_idx = tm_cb_idx;
10943 	ioc->ctl_cb_idx = ctl_cb_idx;
10944 	ioc->base_cb_idx = base_cb_idx;
10945 	ioc->port_enable_cb_idx = port_enable_cb_idx;
10946 	ioc->transport_cb_idx = transport_cb_idx;
10947 	ioc->scsih_cb_idx = scsih_cb_idx;
10948 	ioc->config_cb_idx = config_cb_idx;
10949 	ioc->tm_tr_cb_idx = tm_tr_cb_idx;
10950 	ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
10951 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
10952 	ioc->logging_level = logging_level;
10953 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
10954 	/* Host waits for minimum of six seconds */
10955 	ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
10956 	/*
10957 	 * Enable MEMORY MOVE support flag.
10958 	 */
10959 	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
10960 
10961 	ioc->enable_sdev_max_qd = enable_sdev_max_qd;
10962 
10963 	/* misc semaphores and spin locks */
10964 	mutex_init(&ioc->reset_in_progress_mutex);
10965 	/* initializing pci_access_mutex lock */
10966 	mutex_init(&ioc->pci_access_mutex);
10967 	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
10968 	spin_lock_init(&ioc->scsi_lookup_lock);
10969 	spin_lock_init(&ioc->sas_device_lock);
10970 	spin_lock_init(&ioc->sas_node_lock);
10971 	spin_lock_init(&ioc->fw_event_lock);
10972 	spin_lock_init(&ioc->raid_device_lock);
10973 	spin_lock_init(&ioc->pcie_device_lock);
10974 	spin_lock_init(&ioc->diag_trigger_lock);
10975 
10976 	INIT_LIST_HEAD(&ioc->sas_device_list);
10977 	INIT_LIST_HEAD(&ioc->sas_device_init_list);
10978 	INIT_LIST_HEAD(&ioc->sas_expander_list);
10979 	INIT_LIST_HEAD(&ioc->enclosure_list);
10980 	INIT_LIST_HEAD(&ioc->pcie_device_list);
10981 	INIT_LIST_HEAD(&ioc->pcie_device_init_list);
10982 	INIT_LIST_HEAD(&ioc->fw_event_list);
10983 	INIT_LIST_HEAD(&ioc->raid_device_list);
10984 	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
10985 	INIT_LIST_HEAD(&ioc->delayed_tr_list);
10986 	INIT_LIST_HEAD(&ioc->delayed_sc_list);
10987 	INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
10988 	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
10989 	INIT_LIST_HEAD(&ioc->reply_queue_list);
10990 
10991 	sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
10992 
10993 	/* init shost parameters */
10994 	shost->max_cmd_len = 32;
10995 	shost->max_lun = max_lun;
10996 	shost->transportt = mpt3sas_transport_template;
10997 	shost->unique_id = ioc->id;
10998 
10999 	if (ioc->is_mcpu_endpoint) {
11000 		/* mCPU MPI support 64K max IO */
11001 		shost->max_sectors = 128;
11002 		ioc_info(ioc, "The max_sectors value is set to %d\n",
11003 			 shost->max_sectors);
11004 	} else {
11005 		if (max_sectors != 0xFFFF) {
11006 			if (max_sectors < 64) {
11007 				shost->max_sectors = 64;
11008 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
11009 					 max_sectors);
11010 			} else if (max_sectors > 32767) {
11011 				shost->max_sectors = 32767;
11012 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
11013 					 max_sectors);
11014 			} else {
11015 				shost->max_sectors = max_sectors & 0xFFFE;
11016 				ioc_info(ioc, "The max_sectors value is set to %d\n",
11017 					 shost->max_sectors);
11018 			}
11019 		}
11020 	}
11021 	/* register EEDP capabilities with SCSI layer */
11022 	if (prot_mask >= 0)
11023 		scsi_host_set_prot(shost, (prot_mask & 0x07));
11024 	else
11025 		scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
11026 				   | SHOST_DIF_TYPE2_PROTECTION
11027 				   | SHOST_DIF_TYPE3_PROTECTION);
11028 
11029 	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
11030 
11031 	/* event thread */
11032 	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
11033 	    "fw_event_%s%d", ioc->driver_name, ioc->id);
11034 	ioc->firmware_event_thread = alloc_ordered_workqueue(
11035 	    ioc->firmware_event_name, 0);
11036 	if (!ioc->firmware_event_thread) {
11037 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
11038 			__FILE__, __LINE__, __func__);
11039 		rv = -ENODEV;
11040 		goto out_thread_fail;
11041 	}
11042 
11043 	ioc->is_driver_loading = 1;
11044 	if ((mpt3sas_base_attach(ioc))) {
11045 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
11046 			__FILE__, __LINE__, __func__);
11047 		rv = -ENODEV;
11048 		goto out_attach_fail;
11049 	}
11050 
11051 	if (ioc->is_warpdrive) {
11052 		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
11053 			ioc->hide_drives = 0;
11054 		else if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_HIDE_ALL_DISKS)
11055 			ioc->hide_drives = 1;
11056 		else {
11057 			if (mpt3sas_get_num_volumes(ioc))
11058 				ioc->hide_drives = 1;
11059 			else
11060 				ioc->hide_drives = 0;
11061 		}
11062 	} else
11063 		ioc->hide_drives = 0;
11064 
11065 	rv = scsi_add_host(shost, &pdev->dev);
11066 	if (rv) {
11067 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
11068 			__FILE__, __LINE__, __func__);
11069 		goto out_add_shost_fail;
11070 	}
11071 
11072 	scsi_scan_host(shost);
11073 	mpt3sas_setup_debugfs(ioc);
11074 	return 0;
11075 out_add_shost_fail:
11076 	mpt3sas_base_detach(ioc);
11077  out_attach_fail:
11078 	destroy_workqueue(ioc->firmware_event_thread);
11079  out_thread_fail:
11080 	spin_lock(&gioc_lock);
11081 	list_del(&ioc->list);
11082 	spin_unlock(&gioc_lock);
11083 	scsi_host_put(shost);
11084 	return rv;
11085 }
11086 
11087 #ifdef CONFIG_PM
11088 /**
11089  * scsih_suspend - power management suspend main entry point
11090  * @pdev: PCI device struct
11091  * @state: PM state change to (usually PCI_D3)
11092  *
11093  * Return: 0 success, anything else error.
11094  */
11095 static int
scsih_suspend(struct pci_dev * pdev,pm_message_t state)11096 scsih_suspend(struct pci_dev *pdev, pm_message_t state)
11097 {
11098 	struct Scsi_Host *shost;
11099 	struct MPT3SAS_ADAPTER *ioc;
11100 	pci_power_t device_state;
11101 	int rc;
11102 
11103 	rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
11104 	if (rc)
11105 		return rc;
11106 
11107 	mpt3sas_base_stop_watchdog(ioc);
11108 	flush_scheduled_work();
11109 	scsi_block_requests(shost);
11110 	_scsih_nvme_shutdown(ioc);
11111 	device_state = pci_choose_state(pdev, state);
11112 	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
11113 		 pdev, pci_name(pdev), device_state);
11114 
11115 	pci_save_state(pdev);
11116 	mpt3sas_base_free_resources(ioc);
11117 	pci_set_power_state(pdev, device_state);
11118 	return 0;
11119 }
11120 
11121 /**
11122  * scsih_resume - power management resume main entry point
11123  * @pdev: PCI device struct
11124  *
11125  * Return: 0 success, anything else error.
11126  */
11127 static int
scsih_resume(struct pci_dev * pdev)11128 scsih_resume(struct pci_dev *pdev)
11129 {
11130 	struct Scsi_Host *shost;
11131 	struct MPT3SAS_ADAPTER *ioc;
11132 	pci_power_t device_state = pdev->current_state;
11133 	int r;
11134 
11135 	r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
11136 	if (r)
11137 		return r;
11138 
11139 	ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
11140 		 pdev, pci_name(pdev), device_state);
11141 
11142 	pci_set_power_state(pdev, PCI_D0);
11143 	pci_enable_wake(pdev, PCI_D0, 0);
11144 	pci_restore_state(pdev);
11145 	ioc->pdev = pdev;
11146 	r = mpt3sas_base_map_resources(ioc);
11147 	if (r)
11148 		return r;
11149 	ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
11150 	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
11151 	scsi_unblock_requests(shost);
11152 	mpt3sas_base_start_watchdog(ioc);
11153 	return 0;
11154 }
11155 #endif /* CONFIG_PM */
11156 
11157 /**
11158  * scsih_pci_error_detected - Called when a PCI error is detected.
11159  * @pdev: PCI device struct
11160  * @state: PCI channel state
11161  *
11162  * Description: Called when a PCI error is detected.
11163  *
11164  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
11165  */
11166 static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)11167 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
11168 {
11169 	struct Scsi_Host *shost;
11170 	struct MPT3SAS_ADAPTER *ioc;
11171 
11172 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11173 		return PCI_ERS_RESULT_DISCONNECT;
11174 
11175 	ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
11176 
11177 	switch (state) {
11178 	case pci_channel_io_normal:
11179 		return PCI_ERS_RESULT_CAN_RECOVER;
11180 	case pci_channel_io_frozen:
11181 		/* Fatal error, prepare for slot reset */
11182 		ioc->pci_error_recovery = 1;
11183 		scsi_block_requests(ioc->shost);
11184 		mpt3sas_base_stop_watchdog(ioc);
11185 		mpt3sas_base_free_resources(ioc);
11186 		return PCI_ERS_RESULT_NEED_RESET;
11187 	case pci_channel_io_perm_failure:
11188 		/* Permanent error, prepare for device removal */
11189 		ioc->pci_error_recovery = 1;
11190 		mpt3sas_base_stop_watchdog(ioc);
11191 		_scsih_flush_running_cmds(ioc);
11192 		return PCI_ERS_RESULT_DISCONNECT;
11193 	}
11194 	return PCI_ERS_RESULT_NEED_RESET;
11195 }
11196 
11197 /**
11198  * scsih_pci_slot_reset - Called when PCI slot has been reset.
11199  * @pdev: PCI device struct
11200  *
11201  * Description: This routine is called by the pci error recovery
11202  * code after the PCI slot has been reset, just before we
11203  * should resume normal operations.
11204  */
11205 static pci_ers_result_t
scsih_pci_slot_reset(struct pci_dev * pdev)11206 scsih_pci_slot_reset(struct pci_dev *pdev)
11207 {
11208 	struct Scsi_Host *shost;
11209 	struct MPT3SAS_ADAPTER *ioc;
11210 	int rc;
11211 
11212 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11213 		return PCI_ERS_RESULT_DISCONNECT;
11214 
11215 	ioc_info(ioc, "PCI error: slot reset callback!!\n");
11216 
11217 	ioc->pci_error_recovery = 0;
11218 	ioc->pdev = pdev;
11219 	pci_restore_state(pdev);
11220 	rc = mpt3sas_base_map_resources(ioc);
11221 	if (rc)
11222 		return PCI_ERS_RESULT_DISCONNECT;
11223 
11224 	ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
11225 	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
11226 
11227 	ioc_warn(ioc, "hard reset: %s\n",
11228 		 (rc == 0) ? "success" : "failed");
11229 
11230 	if (!rc)
11231 		return PCI_ERS_RESULT_RECOVERED;
11232 	else
11233 		return PCI_ERS_RESULT_DISCONNECT;
11234 }
11235 
11236 /**
11237  * scsih_pci_resume() - resume normal ops after PCI reset
11238  * @pdev: pointer to PCI device
11239  *
11240  * Called when the error recovery driver tells us that its
11241  * OK to resume normal operation. Use completion to allow
11242  * halted scsi ops to resume.
11243  */
11244 static void
scsih_pci_resume(struct pci_dev * pdev)11245 scsih_pci_resume(struct pci_dev *pdev)
11246 {
11247 	struct Scsi_Host *shost;
11248 	struct MPT3SAS_ADAPTER *ioc;
11249 
11250 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11251 		return;
11252 
11253 	ioc_info(ioc, "PCI error: resume callback!!\n");
11254 
11255 	mpt3sas_base_start_watchdog(ioc);
11256 	scsi_unblock_requests(ioc->shost);
11257 }
11258 
11259 /**
11260  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
11261  * @pdev: pointer to PCI device
11262  */
11263 static pci_ers_result_t
scsih_pci_mmio_enabled(struct pci_dev * pdev)11264 scsih_pci_mmio_enabled(struct pci_dev *pdev)
11265 {
11266 	struct Scsi_Host *shost;
11267 	struct MPT3SAS_ADAPTER *ioc;
11268 
11269 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11270 		return PCI_ERS_RESULT_DISCONNECT;
11271 
11272 	ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
11273 
11274 	/* TODO - dump whatever for debugging purposes */
11275 
11276 	/* This called only if scsih_pci_error_detected returns
11277 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
11278 	 * works, no need to reset slot.
11279 	 */
11280 	return PCI_ERS_RESULT_RECOVERED;
11281 }
11282 
11283 /**
11284  * scsih__ncq_prio_supp - Check for NCQ command priority support
11285  * @sdev: scsi device struct
11286  *
11287  * This is called when a user indicates they would like to enable
11288  * ncq command priorities. This works only on SATA devices.
11289  */
scsih_ncq_prio_supp(struct scsi_device * sdev)11290 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
11291 {
11292 	unsigned char *buf;
11293 	bool ncq_prio_supp = false;
11294 
11295 	if (!scsi_device_supports_vpd(sdev))
11296 		return ncq_prio_supp;
11297 
11298 	buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
11299 	if (!buf)
11300 		return ncq_prio_supp;
11301 
11302 	if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
11303 		ncq_prio_supp = (buf[213] >> 4) & 1;
11304 
11305 	kfree(buf);
11306 	return ncq_prio_supp;
11307 }
11308 /*
11309  * The pci device ids are defined in mpi/mpi2_cnfg.h.
11310  */
11311 static const struct pci_device_id mpt3sas_pci_table[] = {
11312 	/* Spitfire ~ 2004 */
11313 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
11314 		PCI_ANY_ID, PCI_ANY_ID },
11315 	/* Falcon ~ 2008 */
11316 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
11317 		PCI_ANY_ID, PCI_ANY_ID },
11318 	/* Liberator ~ 2108 */
11319 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
11320 		PCI_ANY_ID, PCI_ANY_ID },
11321 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
11322 		PCI_ANY_ID, PCI_ANY_ID },
11323 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
11324 		PCI_ANY_ID, PCI_ANY_ID },
11325 	/* Meteor ~ 2116 */
11326 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
11327 		PCI_ANY_ID, PCI_ANY_ID },
11328 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
11329 		PCI_ANY_ID, PCI_ANY_ID },
11330 	/* Thunderbolt ~ 2208 */
11331 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
11332 		PCI_ANY_ID, PCI_ANY_ID },
11333 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
11334 		PCI_ANY_ID, PCI_ANY_ID },
11335 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
11336 		PCI_ANY_ID, PCI_ANY_ID },
11337 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
11338 		PCI_ANY_ID, PCI_ANY_ID },
11339 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
11340 		PCI_ANY_ID, PCI_ANY_ID },
11341 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
11342 		PCI_ANY_ID, PCI_ANY_ID },
11343 	/* Mustang ~ 2308 */
11344 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
11345 		PCI_ANY_ID, PCI_ANY_ID },
11346 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
11347 		PCI_ANY_ID, PCI_ANY_ID },
11348 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
11349 		PCI_ANY_ID, PCI_ANY_ID },
11350 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
11351 		PCI_ANY_ID, PCI_ANY_ID },
11352 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
11353 		PCI_ANY_ID, PCI_ANY_ID },
11354 	/* SSS6200 */
11355 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
11356 		PCI_ANY_ID, PCI_ANY_ID },
11357 	/* Fury ~ 3004 and 3008 */
11358 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
11359 		PCI_ANY_ID, PCI_ANY_ID },
11360 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
11361 		PCI_ANY_ID, PCI_ANY_ID },
11362 	/* Invader ~ 3108 */
11363 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
11364 		PCI_ANY_ID, PCI_ANY_ID },
11365 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
11366 		PCI_ANY_ID, PCI_ANY_ID },
11367 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
11368 		PCI_ANY_ID, PCI_ANY_ID },
11369 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
11370 		PCI_ANY_ID, PCI_ANY_ID },
11371 	/* Cutlass ~ 3216 and 3224 */
11372 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
11373 		PCI_ANY_ID, PCI_ANY_ID },
11374 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
11375 		PCI_ANY_ID, PCI_ANY_ID },
11376 	/* Intruder ~ 3316 and 3324 */
11377 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
11378 		PCI_ANY_ID, PCI_ANY_ID },
11379 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
11380 		PCI_ANY_ID, PCI_ANY_ID },
11381 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
11382 		PCI_ANY_ID, PCI_ANY_ID },
11383 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
11384 		PCI_ANY_ID, PCI_ANY_ID },
11385 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
11386 		PCI_ANY_ID, PCI_ANY_ID },
11387 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
11388 		PCI_ANY_ID, PCI_ANY_ID },
11389 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
11390 		PCI_ANY_ID, PCI_ANY_ID },
11391 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
11392 		PCI_ANY_ID, PCI_ANY_ID },
11393 	/* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
11394 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
11395 		PCI_ANY_ID, PCI_ANY_ID },
11396 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
11397 		PCI_ANY_ID, PCI_ANY_ID },
11398 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
11399 		PCI_ANY_ID, PCI_ANY_ID },
11400 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
11401 		PCI_ANY_ID, PCI_ANY_ID },
11402 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
11403 		PCI_ANY_ID, PCI_ANY_ID },
11404 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
11405 		PCI_ANY_ID, PCI_ANY_ID },
11406 	/* Mercator ~ 3616*/
11407 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
11408 		PCI_ANY_ID, PCI_ANY_ID },
11409 
11410 	/* Aero SI 0x00E1 Configurable Secure
11411 	 * 0x00E2 Hard Secure
11412 	 */
11413 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
11414 		PCI_ANY_ID, PCI_ANY_ID },
11415 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
11416 		PCI_ANY_ID, PCI_ANY_ID },
11417 
11418 	/*
11419 	 *  Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
11420 	 */
11421 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
11422 		PCI_ANY_ID, PCI_ANY_ID },
11423 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
11424 		PCI_ANY_ID, PCI_ANY_ID },
11425 
11426 	/* Atlas PCIe Switch Management Port */
11427 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
11428 		PCI_ANY_ID, PCI_ANY_ID },
11429 
11430 	/* Sea SI 0x00E5 Configurable Secure
11431 	 * 0x00E6 Hard Secure
11432 	 */
11433 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
11434 		PCI_ANY_ID, PCI_ANY_ID },
11435 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
11436 		PCI_ANY_ID, PCI_ANY_ID },
11437 
11438 	/*
11439 	 *  Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
11440 	 */
11441 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
11442 		PCI_ANY_ID, PCI_ANY_ID },
11443 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
11444 		PCI_ANY_ID, PCI_ANY_ID },
11445 
11446 	{0}     /* Terminating entry */
11447 };
11448 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
11449 
11450 static struct pci_error_handlers _mpt3sas_err_handler = {
11451 	.error_detected	= scsih_pci_error_detected,
11452 	.mmio_enabled	= scsih_pci_mmio_enabled,
11453 	.slot_reset	= scsih_pci_slot_reset,
11454 	.resume		= scsih_pci_resume,
11455 };
11456 
11457 static struct pci_driver mpt3sas_driver = {
11458 	.name		= MPT3SAS_DRIVER_NAME,
11459 	.id_table	= mpt3sas_pci_table,
11460 	.probe		= _scsih_probe,
11461 	.remove		= scsih_remove,
11462 	.shutdown	= scsih_shutdown,
11463 	.err_handler	= &_mpt3sas_err_handler,
11464 #ifdef CONFIG_PM
11465 	.suspend	= scsih_suspend,
11466 	.resume		= scsih_resume,
11467 #endif
11468 };
11469 
11470 /**
11471  * scsih_init - main entry point for this driver.
11472  *
11473  * Return: 0 success, anything else error.
11474  */
11475 static int
scsih_init(void)11476 scsih_init(void)
11477 {
11478 	mpt2_ids = 0;
11479 	mpt3_ids = 0;
11480 
11481 	mpt3sas_base_initialize_callback_handler();
11482 
11483 	 /* queuecommand callback hander */
11484 	scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
11485 
11486 	/* task management callback handler */
11487 	tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
11488 
11489 	/* base internal commands callback handler */
11490 	base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
11491 	port_enable_cb_idx = mpt3sas_base_register_callback_handler(
11492 	    mpt3sas_port_enable_done);
11493 
11494 	/* transport internal commands callback handler */
11495 	transport_cb_idx = mpt3sas_base_register_callback_handler(
11496 	    mpt3sas_transport_done);
11497 
11498 	/* scsih internal commands callback handler */
11499 	scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
11500 
11501 	/* configuration page API internal commands callback handler */
11502 	config_cb_idx = mpt3sas_base_register_callback_handler(
11503 	    mpt3sas_config_done);
11504 
11505 	/* ctl module callback handler */
11506 	ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
11507 
11508 	tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
11509 	    _scsih_tm_tr_complete);
11510 
11511 	tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
11512 	    _scsih_tm_volume_tr_complete);
11513 
11514 	tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
11515 	    _scsih_sas_control_complete);
11516 
11517 	mpt3sas_init_debugfs();
11518 	return 0;
11519 }
11520 
11521 /**
11522  * scsih_exit - exit point for this driver (when it is a module).
11523  *
11524  * Return: 0 success, anything else error.
11525  */
11526 static void
scsih_exit(void)11527 scsih_exit(void)
11528 {
11529 
11530 	mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
11531 	mpt3sas_base_release_callback_handler(tm_cb_idx);
11532 	mpt3sas_base_release_callback_handler(base_cb_idx);
11533 	mpt3sas_base_release_callback_handler(port_enable_cb_idx);
11534 	mpt3sas_base_release_callback_handler(transport_cb_idx);
11535 	mpt3sas_base_release_callback_handler(scsih_cb_idx);
11536 	mpt3sas_base_release_callback_handler(config_cb_idx);
11537 	mpt3sas_base_release_callback_handler(ctl_cb_idx);
11538 
11539 	mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
11540 	mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
11541 	mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
11542 
11543 /* raid transport support */
11544 	if (hbas_to_enumerate != 1)
11545 		raid_class_release(mpt3sas_raid_template);
11546 	if (hbas_to_enumerate != 2)
11547 		raid_class_release(mpt2sas_raid_template);
11548 	sas_release_transport(mpt3sas_transport_template);
11549 	mpt3sas_exit_debugfs();
11550 }
11551 
11552 /**
11553  * _mpt3sas_init - main entry point for this driver.
11554  *
11555  * Return: 0 success, anything else error.
11556  */
11557 static int __init
_mpt3sas_init(void)11558 _mpt3sas_init(void)
11559 {
11560 	int error;
11561 
11562 	pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
11563 					MPT3SAS_DRIVER_VERSION);
11564 
11565 	mpt3sas_transport_template =
11566 	    sas_attach_transport(&mpt3sas_transport_functions);
11567 	if (!mpt3sas_transport_template)
11568 		return -ENODEV;
11569 
11570 	/* No need attach mpt3sas raid functions template
11571 	 * if hbas_to_enumarate value is one.
11572 	 */
11573 	if (hbas_to_enumerate != 1) {
11574 		mpt3sas_raid_template =
11575 				raid_class_attach(&mpt3sas_raid_functions);
11576 		if (!mpt3sas_raid_template) {
11577 			sas_release_transport(mpt3sas_transport_template);
11578 			return -ENODEV;
11579 		}
11580 	}
11581 
11582 	/* No need to attach mpt2sas raid functions template
11583 	 * if hbas_to_enumarate value is two
11584 	 */
11585 	if (hbas_to_enumerate != 2) {
11586 		mpt2sas_raid_template =
11587 				raid_class_attach(&mpt2sas_raid_functions);
11588 		if (!mpt2sas_raid_template) {
11589 			sas_release_transport(mpt3sas_transport_template);
11590 			return -ENODEV;
11591 		}
11592 	}
11593 
11594 	error = scsih_init();
11595 	if (error) {
11596 		scsih_exit();
11597 		return error;
11598 	}
11599 
11600 	mpt3sas_ctl_init(hbas_to_enumerate);
11601 
11602 	error = pci_register_driver(&mpt3sas_driver);
11603 	if (error)
11604 		scsih_exit();
11605 
11606 	return error;
11607 }
11608 
11609 /**
11610  * _mpt3sas_exit - exit point for this driver (when it is a module).
11611  *
11612  */
11613 static void __exit
_mpt3sas_exit(void)11614 _mpt3sas_exit(void)
11615 {
11616 	pr_info("mpt3sas version %s unloading\n",
11617 				MPT3SAS_DRIVER_VERSION);
11618 
11619 	mpt3sas_ctl_exit(hbas_to_enumerate);
11620 
11621 	pci_unregister_driver(&mpt3sas_driver);
11622 
11623 	scsih_exit();
11624 }
11625 
11626 module_init(_mpt3sas_init);
11627 module_exit(_mpt3sas_exit);
11628