1 /*
2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3 *
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5 * Copyright (C) 2012-2014 LSI Corporation
6 * Copyright (C) 2013-2014 Avago Technologies
7 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
42 * USA.
43 */
44
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/pci-aspm.h>
55 #include <linux/interrupt.h>
56 #include <linux/aer.h>
57 #include <linux/raid_class.h>
58 #include <asm/unaligned.h>
59
60 #include "mpt3sas_base.h"
61
62 #define RAID_CHANNEL 1
63
64 #define PCIE_CHANNEL 2
65
66 /* forward proto's */
67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 struct _sas_node *sas_expander);
69 static void _firmware_event_work(struct work_struct *work);
70
71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 struct _sas_device *sas_device);
73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 u8 retry_count, u8 is_pd);
75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 struct _pcie_device *pcie_device);
78 static void
79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
81
82 /* global parameters */
83 LIST_HEAD(mpt3sas_ioc_list);
84 /* global ioc lock for list operations */
85 DEFINE_SPINLOCK(gioc_lock);
86
87 MODULE_AUTHOR(MPT3SAS_AUTHOR);
88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
91 MODULE_ALIAS("mpt2sas");
92
93 /* local parameters */
94 static u8 scsi_io_cb_idx = -1;
95 static u8 tm_cb_idx = -1;
96 static u8 ctl_cb_idx = -1;
97 static u8 base_cb_idx = -1;
98 static u8 port_enable_cb_idx = -1;
99 static u8 transport_cb_idx = -1;
100 static u8 scsih_cb_idx = -1;
101 static u8 config_cb_idx = -1;
102 static int mpt2_ids;
103 static int mpt3_ids;
104
105 static u8 tm_tr_cb_idx = -1 ;
106 static u8 tm_tr_volume_cb_idx = -1 ;
107 static u8 tm_sas_control_cb_idx = -1;
108
109 /* command line options */
110 static u32 logging_level;
111 MODULE_PARM_DESC(logging_level,
112 " bits for enabling additional logging info (default=0)");
113
114
115 static ushort max_sectors = 0xFFFF;
116 module_param(max_sectors, ushort, 0);
117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
118
119
120 static int missing_delay[2] = {-1, -1};
121 module_param_array(missing_delay, int, NULL, 0);
122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
123
124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
125 #define MPT3SAS_MAX_LUN (16895)
126 static u64 max_lun = MPT3SAS_MAX_LUN;
127 module_param(max_lun, ullong, 0);
128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
129
130 static ushort hbas_to_enumerate;
131 module_param(hbas_to_enumerate, ushort, 0);
132 MODULE_PARM_DESC(hbas_to_enumerate,
133 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
134 1 - enumerates only SAS 2.0 generation HBAs\n \
135 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
136
137 /* diag_buffer_enable is bitwise
138 * bit 0 set = TRACE
139 * bit 1 set = SNAPSHOT
140 * bit 2 set = EXTENDED
141 *
142 * Either bit can be set, or both
143 */
144 static int diag_buffer_enable = -1;
145 module_param(diag_buffer_enable, int, 0);
146 MODULE_PARM_DESC(diag_buffer_enable,
147 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
148 static int disable_discovery = -1;
149 module_param(disable_discovery, int, 0);
150 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
151
152
153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
154 static int prot_mask = -1;
155 module_param(prot_mask, int, 0);
156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
157
158
159 /* raid transport support */
160 static struct raid_template *mpt3sas_raid_template;
161 static struct raid_template *mpt2sas_raid_template;
162
163
164 /**
165 * struct sense_info - common structure for obtaining sense keys
166 * @skey: sense key
167 * @asc: additional sense code
168 * @ascq: additional sense code qualifier
169 */
170 struct sense_info {
171 u8 skey;
172 u8 asc;
173 u8 ascq;
174 };
175
176 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
177 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
178 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
179 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
180 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
181 /**
182 * struct fw_event_work - firmware event struct
183 * @list: link list framework
184 * @work: work object (ioc->fault_reset_work_q)
185 * @ioc: per adapter object
186 * @device_handle: device handle
187 * @VF_ID: virtual function id
188 * @VP_ID: virtual port id
189 * @ignore: flag meaning this event has been marked to ignore
190 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
191 * @refcount: kref for this event
192 * @event_data: reply event data payload follows
193 *
194 * This object stored on ioc->fw_event_list.
195 */
196 struct fw_event_work {
197 struct list_head list;
198 struct work_struct work;
199
200 struct MPT3SAS_ADAPTER *ioc;
201 u16 device_handle;
202 u8 VF_ID;
203 u8 VP_ID;
204 u8 ignore;
205 u16 event;
206 struct kref refcount;
207 char event_data[0] __aligned(4);
208 };
209
fw_event_work_free(struct kref * r)210 static void fw_event_work_free(struct kref *r)
211 {
212 kfree(container_of(r, struct fw_event_work, refcount));
213 }
214
fw_event_work_get(struct fw_event_work * fw_work)215 static void fw_event_work_get(struct fw_event_work *fw_work)
216 {
217 kref_get(&fw_work->refcount);
218 }
219
fw_event_work_put(struct fw_event_work * fw_work)220 static void fw_event_work_put(struct fw_event_work *fw_work)
221 {
222 kref_put(&fw_work->refcount, fw_event_work_free);
223 }
224
alloc_fw_event_work(int len)225 static struct fw_event_work *alloc_fw_event_work(int len)
226 {
227 struct fw_event_work *fw_event;
228
229 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
230 if (!fw_event)
231 return NULL;
232
233 kref_init(&fw_event->refcount);
234 return fw_event;
235 }
236
237 /**
238 * struct _scsi_io_transfer - scsi io transfer
239 * @handle: sas device handle (assigned by firmware)
240 * @is_raid: flag set for hidden raid components
241 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
242 * @data_length: data transfer length
243 * @data_dma: dma pointer to data
244 * @sense: sense data
245 * @lun: lun number
246 * @cdb_length: cdb length
247 * @cdb: cdb contents
248 * @timeout: timeout for this command
249 * @VF_ID: virtual function id
250 * @VP_ID: virtual port id
251 * @valid_reply: flag set for reply message
252 * @sense_length: sense length
253 * @ioc_status: ioc status
254 * @scsi_state: scsi state
255 * @scsi_status: scsi staus
256 * @log_info: log information
257 * @transfer_length: data length transfer when there is a reply message
258 *
259 * Used for sending internal scsi commands to devices within this module.
260 * Refer to _scsi_send_scsi_io().
261 */
262 struct _scsi_io_transfer {
263 u16 handle;
264 u8 is_raid;
265 enum dma_data_direction dir;
266 u32 data_length;
267 dma_addr_t data_dma;
268 u8 sense[SCSI_SENSE_BUFFERSIZE];
269 u32 lun;
270 u8 cdb_length;
271 u8 cdb[32];
272 u8 timeout;
273 u8 VF_ID;
274 u8 VP_ID;
275 u8 valid_reply;
276 /* the following bits are only valid when 'valid_reply = 1' */
277 u32 sense_length;
278 u16 ioc_status;
279 u8 scsi_state;
280 u8 scsi_status;
281 u32 log_info;
282 u32 transfer_length;
283 };
284
285 /**
286 * _scsih_set_debug_level - global setting of ioc->logging_level.
287 * @val: ?
288 * @kp: ?
289 *
290 * Note: The logging levels are defined in mpt3sas_debug.h.
291 */
292 static int
_scsih_set_debug_level(const char * val,const struct kernel_param * kp)293 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
294 {
295 int ret = param_set_int(val, kp);
296 struct MPT3SAS_ADAPTER *ioc;
297
298 if (ret)
299 return ret;
300
301 pr_info("setting logging_level(0x%08x)\n", logging_level);
302 spin_lock(&gioc_lock);
303 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
304 ioc->logging_level = logging_level;
305 spin_unlock(&gioc_lock);
306 return 0;
307 }
308 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
309 &logging_level, 0644);
310
311 /**
312 * _scsih_srch_boot_sas_address - search based on sas_address
313 * @sas_address: sas address
314 * @boot_device: boot device object from bios page 2
315 *
316 * Return: 1 when there's a match, 0 means no match.
317 */
318 static inline int
_scsih_srch_boot_sas_address(u64 sas_address,Mpi2BootDeviceSasWwid_t * boot_device)319 _scsih_srch_boot_sas_address(u64 sas_address,
320 Mpi2BootDeviceSasWwid_t *boot_device)
321 {
322 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
323 }
324
325 /**
326 * _scsih_srch_boot_device_name - search based on device name
327 * @device_name: device name specified in INDENTIFY fram
328 * @boot_device: boot device object from bios page 2
329 *
330 * Return: 1 when there's a match, 0 means no match.
331 */
332 static inline int
_scsih_srch_boot_device_name(u64 device_name,Mpi2BootDeviceDeviceName_t * boot_device)333 _scsih_srch_boot_device_name(u64 device_name,
334 Mpi2BootDeviceDeviceName_t *boot_device)
335 {
336 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
337 }
338
339 /**
340 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
341 * @enclosure_logical_id: enclosure logical id
342 * @slot_number: slot number
343 * @boot_device: boot device object from bios page 2
344 *
345 * Return: 1 when there's a match, 0 means no match.
346 */
347 static inline int
_scsih_srch_boot_encl_slot(u64 enclosure_logical_id,u16 slot_number,Mpi2BootDeviceEnclosureSlot_t * boot_device)348 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
349 Mpi2BootDeviceEnclosureSlot_t *boot_device)
350 {
351 return (enclosure_logical_id == le64_to_cpu(boot_device->
352 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
353 SlotNumber)) ? 1 : 0;
354 }
355
356 /**
357 * _scsih_is_boot_device - search for matching boot device.
358 * @sas_address: sas address
359 * @device_name: device name specified in INDENTIFY fram
360 * @enclosure_logical_id: enclosure logical id
361 * @slot: slot number
362 * @form: specifies boot device form
363 * @boot_device: boot device object from bios page 2
364 *
365 * Return: 1 when there's a match, 0 means no match.
366 */
367 static int
_scsih_is_boot_device(u64 sas_address,u64 device_name,u64 enclosure_logical_id,u16 slot,u8 form,Mpi2BiosPage2BootDevice_t * boot_device)368 _scsih_is_boot_device(u64 sas_address, u64 device_name,
369 u64 enclosure_logical_id, u16 slot, u8 form,
370 Mpi2BiosPage2BootDevice_t *boot_device)
371 {
372 int rc = 0;
373
374 switch (form) {
375 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
376 if (!sas_address)
377 break;
378 rc = _scsih_srch_boot_sas_address(
379 sas_address, &boot_device->SasWwid);
380 break;
381 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
382 if (!enclosure_logical_id)
383 break;
384 rc = _scsih_srch_boot_encl_slot(
385 enclosure_logical_id,
386 slot, &boot_device->EnclosureSlot);
387 break;
388 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
389 if (!device_name)
390 break;
391 rc = _scsih_srch_boot_device_name(
392 device_name, &boot_device->DeviceName);
393 break;
394 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
395 break;
396 }
397
398 return rc;
399 }
400
401 /**
402 * _scsih_get_sas_address - set the sas_address for given device handle
403 * @ioc: ?
404 * @handle: device handle
405 * @sas_address: sas address
406 *
407 * Return: 0 success, non-zero when failure
408 */
409 static int
_scsih_get_sas_address(struct MPT3SAS_ADAPTER * ioc,u16 handle,u64 * sas_address)410 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
411 u64 *sas_address)
412 {
413 Mpi2SasDevicePage0_t sas_device_pg0;
414 Mpi2ConfigReply_t mpi_reply;
415 u32 ioc_status;
416
417 *sas_address = 0;
418
419 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
420 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
421 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
422 __FILE__, __LINE__, __func__);
423 return -ENXIO;
424 }
425
426 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
427 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
428 /* For HBA, vSES doesn't return HBA SAS address. Instead return
429 * vSES's sas address.
430 */
431 if ((handle <= ioc->sas_hba.num_phys) &&
432 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
433 MPI2_SAS_DEVICE_INFO_SEP)))
434 *sas_address = ioc->sas_hba.sas_address;
435 else
436 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
437 return 0;
438 }
439
440 /* we hit this because the given parent handle doesn't exist */
441 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
442 return -ENXIO;
443
444 /* else error case */
445 pr_err(MPT3SAS_FMT
446 "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
447 ioc->name, handle, ioc_status,
448 __FILE__, __LINE__, __func__);
449 return -EIO;
450 }
451
452 /**
453 * _scsih_determine_boot_device - determine boot device.
454 * @ioc: per adapter object
455 * @device: sas_device or pcie_device object
456 * @channel: SAS or PCIe channel
457 *
458 * Determines whether this device should be first reported device to
459 * to scsi-ml or sas transport, this purpose is for persistent boot device.
460 * There are primary, alternate, and current entries in bios page 2. The order
461 * priority is primary, alternate, then current. This routine saves
462 * the corresponding device object.
463 * The saved data to be used later in _scsih_probe_boot_devices().
464 */
465 static void
_scsih_determine_boot_device(struct MPT3SAS_ADAPTER * ioc,void * device,u32 channel)466 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
467 u32 channel)
468 {
469 struct _sas_device *sas_device;
470 struct _pcie_device *pcie_device;
471 struct _raid_device *raid_device;
472 u64 sas_address;
473 u64 device_name;
474 u64 enclosure_logical_id;
475 u16 slot;
476
477 /* only process this function when driver loads */
478 if (!ioc->is_driver_loading)
479 return;
480
481 /* no Bios, return immediately */
482 if (!ioc->bios_pg3.BiosVersion)
483 return;
484
485 if (channel == RAID_CHANNEL) {
486 raid_device = device;
487 sas_address = raid_device->wwid;
488 device_name = 0;
489 enclosure_logical_id = 0;
490 slot = 0;
491 } else if (channel == PCIE_CHANNEL) {
492 pcie_device = device;
493 sas_address = pcie_device->wwid;
494 device_name = 0;
495 enclosure_logical_id = 0;
496 slot = 0;
497 } else {
498 sas_device = device;
499 sas_address = sas_device->sas_address;
500 device_name = sas_device->device_name;
501 enclosure_logical_id = sas_device->enclosure_logical_id;
502 slot = sas_device->slot;
503 }
504
505 if (!ioc->req_boot_device.device) {
506 if (_scsih_is_boot_device(sas_address, device_name,
507 enclosure_logical_id, slot,
508 (ioc->bios_pg2.ReqBootDeviceForm &
509 MPI2_BIOSPAGE2_FORM_MASK),
510 &ioc->bios_pg2.RequestedBootDevice)) {
511 dinitprintk(ioc, pr_info(MPT3SAS_FMT
512 "%s: req_boot_device(0x%016llx)\n",
513 ioc->name, __func__,
514 (unsigned long long)sas_address));
515 ioc->req_boot_device.device = device;
516 ioc->req_boot_device.channel = channel;
517 }
518 }
519
520 if (!ioc->req_alt_boot_device.device) {
521 if (_scsih_is_boot_device(sas_address, device_name,
522 enclosure_logical_id, slot,
523 (ioc->bios_pg2.ReqAltBootDeviceForm &
524 MPI2_BIOSPAGE2_FORM_MASK),
525 &ioc->bios_pg2.RequestedAltBootDevice)) {
526 dinitprintk(ioc, pr_info(MPT3SAS_FMT
527 "%s: req_alt_boot_device(0x%016llx)\n",
528 ioc->name, __func__,
529 (unsigned long long)sas_address));
530 ioc->req_alt_boot_device.device = device;
531 ioc->req_alt_boot_device.channel = channel;
532 }
533 }
534
535 if (!ioc->current_boot_device.device) {
536 if (_scsih_is_boot_device(sas_address, device_name,
537 enclosure_logical_id, slot,
538 (ioc->bios_pg2.CurrentBootDeviceForm &
539 MPI2_BIOSPAGE2_FORM_MASK),
540 &ioc->bios_pg2.CurrentBootDevice)) {
541 dinitprintk(ioc, pr_info(MPT3SAS_FMT
542 "%s: current_boot_device(0x%016llx)\n",
543 ioc->name, __func__,
544 (unsigned long long)sas_address));
545 ioc->current_boot_device.device = device;
546 ioc->current_boot_device.channel = channel;
547 }
548 }
549 }
550
551 static struct _sas_device *
__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)552 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
553 struct MPT3SAS_TARGET *tgt_priv)
554 {
555 struct _sas_device *ret;
556
557 assert_spin_locked(&ioc->sas_device_lock);
558
559 ret = tgt_priv->sas_dev;
560 if (ret)
561 sas_device_get(ret);
562
563 return ret;
564 }
565
566 static struct _sas_device *
mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)567 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
568 struct MPT3SAS_TARGET *tgt_priv)
569 {
570 struct _sas_device *ret;
571 unsigned long flags;
572
573 spin_lock_irqsave(&ioc->sas_device_lock, flags);
574 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
575 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
576
577 return ret;
578 }
579
580 static struct _pcie_device *
__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)581 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
582 struct MPT3SAS_TARGET *tgt_priv)
583 {
584 struct _pcie_device *ret;
585
586 assert_spin_locked(&ioc->pcie_device_lock);
587
588 ret = tgt_priv->pcie_dev;
589 if (ret)
590 pcie_device_get(ret);
591
592 return ret;
593 }
594
595 /**
596 * mpt3sas_get_pdev_from_target - pcie device search
597 * @ioc: per adapter object
598 * @tgt_priv: starget private object
599 *
600 * Context: This function will acquire ioc->pcie_device_lock and will release
601 * before returning the pcie_device object.
602 *
603 * This searches for pcie_device from target, then return pcie_device object.
604 */
605 static struct _pcie_device *
mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)606 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
607 struct MPT3SAS_TARGET *tgt_priv)
608 {
609 struct _pcie_device *ret;
610 unsigned long flags;
611
612 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
613 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
614 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
615
616 return ret;
617 }
618
619 struct _sas_device *
__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)620 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
621 u64 sas_address)
622 {
623 struct _sas_device *sas_device;
624
625 assert_spin_locked(&ioc->sas_device_lock);
626
627 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
628 if (sas_device->sas_address == sas_address)
629 goto found_device;
630
631 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
632 if (sas_device->sas_address == sas_address)
633 goto found_device;
634
635 return NULL;
636
637 found_device:
638 sas_device_get(sas_device);
639 return sas_device;
640 }
641
642 /**
643 * mpt3sas_get_sdev_by_addr - sas device search
644 * @ioc: per adapter object
645 * @sas_address: sas address
646 * Context: Calling function should acquire ioc->sas_device_lock
647 *
648 * This searches for sas_device based on sas_address, then return sas_device
649 * object.
650 */
651 struct _sas_device *
mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)652 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
653 u64 sas_address)
654 {
655 struct _sas_device *sas_device;
656 unsigned long flags;
657
658 spin_lock_irqsave(&ioc->sas_device_lock, flags);
659 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
660 sas_address);
661 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
662
663 return sas_device;
664 }
665
666 static struct _sas_device *
__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)667 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
668 {
669 struct _sas_device *sas_device;
670
671 assert_spin_locked(&ioc->sas_device_lock);
672
673 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
674 if (sas_device->handle == handle)
675 goto found_device;
676
677 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
678 if (sas_device->handle == handle)
679 goto found_device;
680
681 return NULL;
682
683 found_device:
684 sas_device_get(sas_device);
685 return sas_device;
686 }
687
688 /**
689 * mpt3sas_get_sdev_by_handle - sas device search
690 * @ioc: per adapter object
691 * @handle: sas device handle (assigned by firmware)
692 * Context: Calling function should acquire ioc->sas_device_lock
693 *
694 * This searches for sas_device based on sas_address, then return sas_device
695 * object.
696 */
697 struct _sas_device *
mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)698 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
699 {
700 struct _sas_device *sas_device;
701 unsigned long flags;
702
703 spin_lock_irqsave(&ioc->sas_device_lock, flags);
704 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
705 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
706
707 return sas_device;
708 }
709
710 /**
711 * _scsih_display_enclosure_chassis_info - display device location info
712 * @ioc: per adapter object
713 * @sas_device: per sas device object
714 * @sdev: scsi device struct
715 * @starget: scsi target struct
716 */
717 static void
_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device,struct scsi_device * sdev,struct scsi_target * starget)718 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
719 struct _sas_device *sas_device, struct scsi_device *sdev,
720 struct scsi_target *starget)
721 {
722 if (sdev) {
723 if (sas_device->enclosure_handle != 0)
724 sdev_printk(KERN_INFO, sdev,
725 "enclosure logical id (0x%016llx), slot(%d) \n",
726 (unsigned long long)
727 sas_device->enclosure_logical_id,
728 sas_device->slot);
729 if (sas_device->connector_name[0] != '\0')
730 sdev_printk(KERN_INFO, sdev,
731 "enclosure level(0x%04x), connector name( %s)\n",
732 sas_device->enclosure_level,
733 sas_device->connector_name);
734 if (sas_device->is_chassis_slot_valid)
735 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
736 sas_device->chassis_slot);
737 } else if (starget) {
738 if (sas_device->enclosure_handle != 0)
739 starget_printk(KERN_INFO, starget,
740 "enclosure logical id(0x%016llx), slot(%d) \n",
741 (unsigned long long)
742 sas_device->enclosure_logical_id,
743 sas_device->slot);
744 if (sas_device->connector_name[0] != '\0')
745 starget_printk(KERN_INFO, starget,
746 "enclosure level(0x%04x), connector name( %s)\n",
747 sas_device->enclosure_level,
748 sas_device->connector_name);
749 if (sas_device->is_chassis_slot_valid)
750 starget_printk(KERN_INFO, starget,
751 "chassis slot(0x%04x)\n",
752 sas_device->chassis_slot);
753 } else {
754 if (sas_device->enclosure_handle != 0)
755 pr_info(MPT3SAS_FMT
756 "enclosure logical id(0x%016llx), slot(%d) \n",
757 ioc->name, (unsigned long long)
758 sas_device->enclosure_logical_id,
759 sas_device->slot);
760 if (sas_device->connector_name[0] != '\0')
761 pr_info(MPT3SAS_FMT
762 "enclosure level(0x%04x), connector name( %s)\n",
763 ioc->name, sas_device->enclosure_level,
764 sas_device->connector_name);
765 if (sas_device->is_chassis_slot_valid)
766 pr_info(MPT3SAS_FMT "chassis slot(0x%04x)\n",
767 ioc->name, sas_device->chassis_slot);
768 }
769 }
770
771 /**
772 * _scsih_sas_device_remove - remove sas_device from list.
773 * @ioc: per adapter object
774 * @sas_device: the sas_device object
775 * Context: This function will acquire ioc->sas_device_lock.
776 *
777 * If sas_device is on the list, remove it and decrement its reference count.
778 */
779 static void
_scsih_sas_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)780 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
781 struct _sas_device *sas_device)
782 {
783 unsigned long flags;
784
785 if (!sas_device)
786 return;
787 pr_info(MPT3SAS_FMT
788 "removing handle(0x%04x), sas_addr(0x%016llx)\n",
789 ioc->name, sas_device->handle,
790 (unsigned long long) sas_device->sas_address);
791
792 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
793
794 /*
795 * The lock serializes access to the list, but we still need to verify
796 * that nobody removed the entry while we were waiting on the lock.
797 */
798 spin_lock_irqsave(&ioc->sas_device_lock, flags);
799 if (!list_empty(&sas_device->list)) {
800 list_del_init(&sas_device->list);
801 sas_device_put(sas_device);
802 }
803 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
804 }
805
806 /**
807 * _scsih_device_remove_by_handle - removing device object by handle
808 * @ioc: per adapter object
809 * @handle: device handle
810 */
811 static void
_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)812 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
813 {
814 struct _sas_device *sas_device;
815 unsigned long flags;
816
817 if (ioc->shost_recovery)
818 return;
819
820 spin_lock_irqsave(&ioc->sas_device_lock, flags);
821 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
822 if (sas_device) {
823 list_del_init(&sas_device->list);
824 sas_device_put(sas_device);
825 }
826 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
827 if (sas_device) {
828 _scsih_remove_device(ioc, sas_device);
829 sas_device_put(sas_device);
830 }
831 }
832
833 /**
834 * mpt3sas_device_remove_by_sas_address - removing device object by sas address
835 * @ioc: per adapter object
836 * @sas_address: device sas_address
837 */
838 void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)839 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
840 u64 sas_address)
841 {
842 struct _sas_device *sas_device;
843 unsigned long flags;
844
845 if (ioc->shost_recovery)
846 return;
847
848 spin_lock_irqsave(&ioc->sas_device_lock, flags);
849 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
850 if (sas_device) {
851 list_del_init(&sas_device->list);
852 sas_device_put(sas_device);
853 }
854 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
855 if (sas_device) {
856 _scsih_remove_device(ioc, sas_device);
857 sas_device_put(sas_device);
858 }
859 }
860
861 /**
862 * _scsih_sas_device_add - insert sas_device to the list.
863 * @ioc: per adapter object
864 * @sas_device: the sas_device object
865 * Context: This function will acquire ioc->sas_device_lock.
866 *
867 * Adding new object to the ioc->sas_device_list.
868 */
869 static void
_scsih_sas_device_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)870 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
871 struct _sas_device *sas_device)
872 {
873 unsigned long flags;
874
875 dewtprintk(ioc, pr_info(MPT3SAS_FMT
876 "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
877 ioc->name, __func__, sas_device->handle,
878 (unsigned long long)sas_device->sas_address));
879
880 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
881 NULL, NULL));
882
883 spin_lock_irqsave(&ioc->sas_device_lock, flags);
884 sas_device_get(sas_device);
885 list_add_tail(&sas_device->list, &ioc->sas_device_list);
886 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
887
888 if (ioc->hide_drives) {
889 clear_bit(sas_device->handle, ioc->pend_os_device_add);
890 return;
891 }
892
893 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
894 sas_device->sas_address_parent)) {
895 _scsih_sas_device_remove(ioc, sas_device);
896 } else if (!sas_device->starget) {
897 /*
898 * When asyn scanning is enabled, its not possible to remove
899 * devices while scanning is turned on due to an oops in
900 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
901 */
902 if (!ioc->is_driver_loading) {
903 mpt3sas_transport_port_remove(ioc,
904 sas_device->sas_address,
905 sas_device->sas_address_parent);
906 _scsih_sas_device_remove(ioc, sas_device);
907 }
908 } else
909 clear_bit(sas_device->handle, ioc->pend_os_device_add);
910 }
911
912 /**
913 * _scsih_sas_device_init_add - insert sas_device to the list.
914 * @ioc: per adapter object
915 * @sas_device: the sas_device object
916 * Context: This function will acquire ioc->sas_device_lock.
917 *
918 * Adding new object at driver load time to the ioc->sas_device_init_list.
919 */
920 static void
_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)921 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
922 struct _sas_device *sas_device)
923 {
924 unsigned long flags;
925
926 dewtprintk(ioc, pr_info(MPT3SAS_FMT
927 "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
928 __func__, sas_device->handle,
929 (unsigned long long)sas_device->sas_address));
930
931 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
932 NULL, NULL));
933
934 spin_lock_irqsave(&ioc->sas_device_lock, flags);
935 sas_device_get(sas_device);
936 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
937 _scsih_determine_boot_device(ioc, sas_device, 0);
938 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
939 }
940
941
942 static struct _pcie_device *
__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)943 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
944 {
945 struct _pcie_device *pcie_device;
946
947 assert_spin_locked(&ioc->pcie_device_lock);
948
949 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
950 if (pcie_device->wwid == wwid)
951 goto found_device;
952
953 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
954 if (pcie_device->wwid == wwid)
955 goto found_device;
956
957 return NULL;
958
959 found_device:
960 pcie_device_get(pcie_device);
961 return pcie_device;
962 }
963
964
965 /**
966 * mpt3sas_get_pdev_by_wwid - pcie device search
967 * @ioc: per adapter object
968 * @wwid: wwid
969 *
970 * Context: This function will acquire ioc->pcie_device_lock and will release
971 * before returning the pcie_device object.
972 *
973 * This searches for pcie_device based on wwid, then return pcie_device object.
974 */
975 static struct _pcie_device *
mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)976 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
977 {
978 struct _pcie_device *pcie_device;
979 unsigned long flags;
980
981 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
982 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
983 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
984
985 return pcie_device;
986 }
987
988
989 static struct _pcie_device *
__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER * ioc,int id,int channel)990 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
991 int channel)
992 {
993 struct _pcie_device *pcie_device;
994
995 assert_spin_locked(&ioc->pcie_device_lock);
996
997 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
998 if (pcie_device->id == id && pcie_device->channel == channel)
999 goto found_device;
1000
1001 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1002 if (pcie_device->id == id && pcie_device->channel == channel)
1003 goto found_device;
1004
1005 return NULL;
1006
1007 found_device:
1008 pcie_device_get(pcie_device);
1009 return pcie_device;
1010 }
1011
1012 static struct _pcie_device *
__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1013 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1014 {
1015 struct _pcie_device *pcie_device;
1016
1017 assert_spin_locked(&ioc->pcie_device_lock);
1018
1019 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1020 if (pcie_device->handle == handle)
1021 goto found_device;
1022
1023 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1024 if (pcie_device->handle == handle)
1025 goto found_device;
1026
1027 return NULL;
1028
1029 found_device:
1030 pcie_device_get(pcie_device);
1031 return pcie_device;
1032 }
1033
1034
1035 /**
1036 * mpt3sas_get_pdev_by_handle - pcie device search
1037 * @ioc: per adapter object
1038 * @handle: Firmware device handle
1039 *
1040 * Context: This function will acquire ioc->pcie_device_lock and will release
1041 * before returning the pcie_device object.
1042 *
1043 * This searches for pcie_device based on handle, then return pcie_device
1044 * object.
1045 */
1046 struct _pcie_device *
mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1047 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1048 {
1049 struct _pcie_device *pcie_device;
1050 unsigned long flags;
1051
1052 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1053 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1054 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1055
1056 return pcie_device;
1057 }
1058
1059 /**
1060 * _scsih_pcie_device_remove - remove pcie_device from list.
1061 * @ioc: per adapter object
1062 * @pcie_device: the pcie_device object
1063 * Context: This function will acquire ioc->pcie_device_lock.
1064 *
1065 * If pcie_device is on the list, remove it and decrement its reference count.
1066 */
1067 static void
_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1068 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1069 struct _pcie_device *pcie_device)
1070 {
1071 unsigned long flags;
1072 int was_on_pcie_device_list = 0;
1073
1074 if (!pcie_device)
1075 return;
1076 pr_info(MPT3SAS_FMT
1077 "removing handle(0x%04x), wwid(0x%016llx)\n",
1078 ioc->name, pcie_device->handle,
1079 (unsigned long long) pcie_device->wwid);
1080 if (pcie_device->enclosure_handle != 0)
1081 pr_info(MPT3SAS_FMT
1082 "removing enclosure logical id(0x%016llx), slot(%d)\n",
1083 ioc->name,
1084 (unsigned long long)pcie_device->enclosure_logical_id,
1085 pcie_device->slot);
1086 if (pcie_device->connector_name[0] != '\0')
1087 pr_info(MPT3SAS_FMT
1088 "removing enclosure level(0x%04x), connector name( %s)\n",
1089 ioc->name, pcie_device->enclosure_level,
1090 pcie_device->connector_name);
1091
1092 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1093 if (!list_empty(&pcie_device->list)) {
1094 list_del_init(&pcie_device->list);
1095 was_on_pcie_device_list = 1;
1096 }
1097 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1098 if (was_on_pcie_device_list) {
1099 kfree(pcie_device->serial_number);
1100 pcie_device_put(pcie_device);
1101 }
1102 }
1103
1104
1105 /**
1106 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1107 * @ioc: per adapter object
1108 * @handle: device handle
1109 */
1110 static void
_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1111 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1112 {
1113 struct _pcie_device *pcie_device;
1114 unsigned long flags;
1115 int was_on_pcie_device_list = 0;
1116
1117 if (ioc->shost_recovery)
1118 return;
1119
1120 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1121 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1122 if (pcie_device) {
1123 if (!list_empty(&pcie_device->list)) {
1124 list_del_init(&pcie_device->list);
1125 was_on_pcie_device_list = 1;
1126 pcie_device_put(pcie_device);
1127 }
1128 }
1129 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1130 if (was_on_pcie_device_list) {
1131 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1132 pcie_device_put(pcie_device);
1133 }
1134 }
1135
1136 /**
1137 * _scsih_pcie_device_add - add pcie_device object
1138 * @ioc: per adapter object
1139 * @pcie_device: pcie_device object
1140 *
1141 * This is added to the pcie_device_list link list.
1142 */
1143 static void
_scsih_pcie_device_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1144 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1145 struct _pcie_device *pcie_device)
1146 {
1147 unsigned long flags;
1148
1149 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1150 "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
1151 pcie_device->handle, (unsigned long long)pcie_device->wwid));
1152 if (pcie_device->enclosure_handle != 0)
1153 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1154 "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1155 ioc->name, __func__,
1156 (unsigned long long)pcie_device->enclosure_logical_id,
1157 pcie_device->slot));
1158 if (pcie_device->connector_name[0] != '\0')
1159 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1160 "%s: enclosure level(0x%04x), connector name( %s)\n",
1161 ioc->name, __func__, pcie_device->enclosure_level,
1162 pcie_device->connector_name));
1163
1164 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1165 pcie_device_get(pcie_device);
1166 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1167 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1168
1169 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1170 _scsih_pcie_device_remove(ioc, pcie_device);
1171 } else if (!pcie_device->starget) {
1172 if (!ioc->is_driver_loading) {
1173 /*TODO-- Need to find out whether this condition will occur or not*/
1174 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1175 }
1176 } else
1177 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1178 }
1179
1180 /*
1181 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1182 * @ioc: per adapter object
1183 * @pcie_device: the pcie_device object
1184 * Context: This function will acquire ioc->pcie_device_lock.
1185 *
1186 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1187 */
1188 static void
_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1189 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1190 struct _pcie_device *pcie_device)
1191 {
1192 unsigned long flags;
1193
1194 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1195 "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
1196 pcie_device->handle, (unsigned long long)pcie_device->wwid));
1197 if (pcie_device->enclosure_handle != 0)
1198 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1199 "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1200 ioc->name, __func__,
1201 (unsigned long long)pcie_device->enclosure_logical_id,
1202 pcie_device->slot));
1203 if (pcie_device->connector_name[0] != '\0')
1204 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1205 "%s: enclosure level(0x%04x), connector name( %s)\n",
1206 ioc->name, __func__, pcie_device->enclosure_level,
1207 pcie_device->connector_name));
1208
1209 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1210 pcie_device_get(pcie_device);
1211 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1212 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1213 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1214 }
1215 /**
1216 * _scsih_raid_device_find_by_id - raid device search
1217 * @ioc: per adapter object
1218 * @id: sas device target id
1219 * @channel: sas device channel
1220 * Context: Calling function should acquire ioc->raid_device_lock
1221 *
1222 * This searches for raid_device based on target id, then return raid_device
1223 * object.
1224 */
1225 static struct _raid_device *
_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1226 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1227 {
1228 struct _raid_device *raid_device, *r;
1229
1230 r = NULL;
1231 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1232 if (raid_device->id == id && raid_device->channel == channel) {
1233 r = raid_device;
1234 goto out;
1235 }
1236 }
1237
1238 out:
1239 return r;
1240 }
1241
1242 /**
1243 * mpt3sas_raid_device_find_by_handle - raid device search
1244 * @ioc: per adapter object
1245 * @handle: sas device handle (assigned by firmware)
1246 * Context: Calling function should acquire ioc->raid_device_lock
1247 *
1248 * This searches for raid_device based on handle, then return raid_device
1249 * object.
1250 */
1251 struct _raid_device *
mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1252 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1253 {
1254 struct _raid_device *raid_device, *r;
1255
1256 r = NULL;
1257 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1258 if (raid_device->handle != handle)
1259 continue;
1260 r = raid_device;
1261 goto out;
1262 }
1263
1264 out:
1265 return r;
1266 }
1267
1268 /**
1269 * _scsih_raid_device_find_by_wwid - raid device search
1270 * @ioc: per adapter object
1271 * @wwid: ?
1272 * Context: Calling function should acquire ioc->raid_device_lock
1273 *
1274 * This searches for raid_device based on wwid, then return raid_device
1275 * object.
1276 */
1277 static struct _raid_device *
_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1278 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1279 {
1280 struct _raid_device *raid_device, *r;
1281
1282 r = NULL;
1283 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1284 if (raid_device->wwid != wwid)
1285 continue;
1286 r = raid_device;
1287 goto out;
1288 }
1289
1290 out:
1291 return r;
1292 }
1293
1294 /**
1295 * _scsih_raid_device_add - add raid_device object
1296 * @ioc: per adapter object
1297 * @raid_device: raid_device object
1298 *
1299 * This is added to the raid_device_list link list.
1300 */
1301 static void
_scsih_raid_device_add(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1302 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1303 struct _raid_device *raid_device)
1304 {
1305 unsigned long flags;
1306
1307 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1308 "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
1309 raid_device->handle, (unsigned long long)raid_device->wwid));
1310
1311 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1312 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1313 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1314 }
1315
1316 /**
1317 * _scsih_raid_device_remove - delete raid_device object
1318 * @ioc: per adapter object
1319 * @raid_device: raid_device object
1320 *
1321 */
1322 static void
_scsih_raid_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1323 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1324 struct _raid_device *raid_device)
1325 {
1326 unsigned long flags;
1327
1328 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1329 list_del(&raid_device->list);
1330 kfree(raid_device);
1331 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1332 }
1333
1334 /**
1335 * mpt3sas_scsih_expander_find_by_handle - expander device search
1336 * @ioc: per adapter object
1337 * @handle: expander handle (assigned by firmware)
1338 * Context: Calling function should acquire ioc->sas_device_lock
1339 *
1340 * This searches for expander device based on handle, then returns the
1341 * sas_node object.
1342 */
1343 struct _sas_node *
mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1344 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1345 {
1346 struct _sas_node *sas_expander, *r;
1347
1348 r = NULL;
1349 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1350 if (sas_expander->handle != handle)
1351 continue;
1352 r = sas_expander;
1353 goto out;
1354 }
1355 out:
1356 return r;
1357 }
1358
1359 /**
1360 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1361 * @ioc: per adapter object
1362 * @handle: enclosure handle (assigned by firmware)
1363 * Context: Calling function should acquire ioc->sas_device_lock
1364 *
1365 * This searches for enclosure device based on handle, then returns the
1366 * enclosure object.
1367 */
1368 static struct _enclosure_node *
mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1369 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1370 {
1371 struct _enclosure_node *enclosure_dev, *r;
1372
1373 r = NULL;
1374 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1375 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1376 continue;
1377 r = enclosure_dev;
1378 goto out;
1379 }
1380 out:
1381 return r;
1382 }
1383 /**
1384 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1385 * @ioc: per adapter object
1386 * @sas_address: sas address
1387 * Context: Calling function should acquire ioc->sas_node_lock.
1388 *
1389 * This searches for expander device based on sas_address, then returns the
1390 * sas_node object.
1391 */
1392 struct _sas_node *
mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)1393 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1394 u64 sas_address)
1395 {
1396 struct _sas_node *sas_expander, *r;
1397
1398 r = NULL;
1399 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1400 if (sas_expander->sas_address != sas_address)
1401 continue;
1402 r = sas_expander;
1403 goto out;
1404 }
1405 out:
1406 return r;
1407 }
1408
1409 /**
1410 * _scsih_expander_node_add - insert expander device to the list.
1411 * @ioc: per adapter object
1412 * @sas_expander: the sas_device object
1413 * Context: This function will acquire ioc->sas_node_lock.
1414 *
1415 * Adding new object to the ioc->sas_expander_list.
1416 */
1417 static void
_scsih_expander_node_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)1418 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1419 struct _sas_node *sas_expander)
1420 {
1421 unsigned long flags;
1422
1423 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1424 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1425 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1426 }
1427
1428 /**
1429 * _scsih_is_end_device - determines if device is an end device
1430 * @device_info: bitfield providing information about the device.
1431 * Context: none
1432 *
1433 * Return: 1 if end device.
1434 */
1435 static int
_scsih_is_end_device(u32 device_info)1436 _scsih_is_end_device(u32 device_info)
1437 {
1438 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1439 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1440 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1441 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1442 return 1;
1443 else
1444 return 0;
1445 }
1446
1447 /**
1448 * _scsih_is_nvme_device - determines if device is an nvme device
1449 * @device_info: bitfield providing information about the device.
1450 * Context: none
1451 *
1452 * Return: 1 if nvme device.
1453 */
1454 static int
_scsih_is_nvme_device(u32 device_info)1455 _scsih_is_nvme_device(u32 device_info)
1456 {
1457 if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1458 == MPI26_PCIE_DEVINFO_NVME)
1459 return 1;
1460 else
1461 return 0;
1462 }
1463
1464 /**
1465 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1466 * @ioc: per adapter object
1467 * @smid: system request message index
1468 *
1469 * Return: the smid stored scmd pointer.
1470 * Then will dereference the stored scmd pointer.
1471 */
1472 struct scsi_cmnd *
mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER * ioc,u16 smid)1473 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1474 {
1475 struct scsi_cmnd *scmd = NULL;
1476 struct scsiio_tracker *st;
1477
1478 if (smid > 0 &&
1479 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1480 u32 unique_tag = smid - 1;
1481
1482 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1483 if (scmd) {
1484 st = scsi_cmd_priv(scmd);
1485 if (st->cb_idx == 0xFF || st->smid == 0)
1486 scmd = NULL;
1487 }
1488 }
1489 return scmd;
1490 }
1491
1492 /**
1493 * scsih_change_queue_depth - setting device queue depth
1494 * @sdev: scsi device struct
1495 * @qdepth: requested queue depth
1496 *
1497 * Return: queue depth.
1498 */
1499 static int
scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1500 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1501 {
1502 struct Scsi_Host *shost = sdev->host;
1503 int max_depth;
1504 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1505 struct MPT3SAS_DEVICE *sas_device_priv_data;
1506 struct MPT3SAS_TARGET *sas_target_priv_data;
1507 struct _sas_device *sas_device;
1508 unsigned long flags;
1509
1510 max_depth = shost->can_queue;
1511
1512 /* limit max device queue for SATA to 32 */
1513 sas_device_priv_data = sdev->hostdata;
1514 if (!sas_device_priv_data)
1515 goto not_sata;
1516 sas_target_priv_data = sas_device_priv_data->sas_target;
1517 if (!sas_target_priv_data)
1518 goto not_sata;
1519 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1520 goto not_sata;
1521
1522 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1523 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1524 if (sas_device) {
1525 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1526 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1527
1528 sas_device_put(sas_device);
1529 }
1530 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1531
1532 not_sata:
1533
1534 if (!sdev->tagged_supported)
1535 max_depth = 1;
1536 if (qdepth > max_depth)
1537 qdepth = max_depth;
1538 return scsi_change_queue_depth(sdev, qdepth);
1539 }
1540
1541 /**
1542 * scsih_target_alloc - target add routine
1543 * @starget: scsi target struct
1544 *
1545 * Return: 0 if ok. Any other return is assumed to be an error and
1546 * the device is ignored.
1547 */
1548 static int
scsih_target_alloc(struct scsi_target * starget)1549 scsih_target_alloc(struct scsi_target *starget)
1550 {
1551 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1552 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1553 struct MPT3SAS_TARGET *sas_target_priv_data;
1554 struct _sas_device *sas_device;
1555 struct _raid_device *raid_device;
1556 struct _pcie_device *pcie_device;
1557 unsigned long flags;
1558 struct sas_rphy *rphy;
1559
1560 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1561 GFP_KERNEL);
1562 if (!sas_target_priv_data)
1563 return -ENOMEM;
1564
1565 starget->hostdata = sas_target_priv_data;
1566 sas_target_priv_data->starget = starget;
1567 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1568
1569 /* RAID volumes */
1570 if (starget->channel == RAID_CHANNEL) {
1571 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1572 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1573 starget->channel);
1574 if (raid_device) {
1575 sas_target_priv_data->handle = raid_device->handle;
1576 sas_target_priv_data->sas_address = raid_device->wwid;
1577 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1578 if (ioc->is_warpdrive)
1579 sas_target_priv_data->raid_device = raid_device;
1580 raid_device->starget = starget;
1581 }
1582 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1583 return 0;
1584 }
1585
1586 /* PCIe devices */
1587 if (starget->channel == PCIE_CHANNEL) {
1588 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1589 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1590 starget->channel);
1591 if (pcie_device) {
1592 sas_target_priv_data->handle = pcie_device->handle;
1593 sas_target_priv_data->sas_address = pcie_device->wwid;
1594 sas_target_priv_data->pcie_dev = pcie_device;
1595 pcie_device->starget = starget;
1596 pcie_device->id = starget->id;
1597 pcie_device->channel = starget->channel;
1598 sas_target_priv_data->flags |=
1599 MPT_TARGET_FLAGS_PCIE_DEVICE;
1600 if (pcie_device->fast_path)
1601 sas_target_priv_data->flags |=
1602 MPT_TARGET_FASTPATH_IO;
1603 }
1604 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1605 return 0;
1606 }
1607
1608 /* sas/sata devices */
1609 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1610 rphy = dev_to_rphy(starget->dev.parent);
1611 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1612 rphy->identify.sas_address);
1613
1614 if (sas_device) {
1615 sas_target_priv_data->handle = sas_device->handle;
1616 sas_target_priv_data->sas_address = sas_device->sas_address;
1617 sas_target_priv_data->sas_dev = sas_device;
1618 sas_device->starget = starget;
1619 sas_device->id = starget->id;
1620 sas_device->channel = starget->channel;
1621 if (test_bit(sas_device->handle, ioc->pd_handles))
1622 sas_target_priv_data->flags |=
1623 MPT_TARGET_FLAGS_RAID_COMPONENT;
1624 if (sas_device->fast_path)
1625 sas_target_priv_data->flags |=
1626 MPT_TARGET_FASTPATH_IO;
1627 }
1628 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1629
1630 return 0;
1631 }
1632
1633 /**
1634 * scsih_target_destroy - target destroy routine
1635 * @starget: scsi target struct
1636 */
1637 static void
scsih_target_destroy(struct scsi_target * starget)1638 scsih_target_destroy(struct scsi_target *starget)
1639 {
1640 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1641 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1642 struct MPT3SAS_TARGET *sas_target_priv_data;
1643 struct _sas_device *sas_device;
1644 struct _raid_device *raid_device;
1645 struct _pcie_device *pcie_device;
1646 unsigned long flags;
1647
1648 sas_target_priv_data = starget->hostdata;
1649 if (!sas_target_priv_data)
1650 return;
1651
1652 if (starget->channel == RAID_CHANNEL) {
1653 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1654 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1655 starget->channel);
1656 if (raid_device) {
1657 raid_device->starget = NULL;
1658 raid_device->sdev = NULL;
1659 }
1660 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1661 goto out;
1662 }
1663
1664 if (starget->channel == PCIE_CHANNEL) {
1665 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1666 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1667 sas_target_priv_data);
1668 if (pcie_device && (pcie_device->starget == starget) &&
1669 (pcie_device->id == starget->id) &&
1670 (pcie_device->channel == starget->channel))
1671 pcie_device->starget = NULL;
1672
1673 if (pcie_device) {
1674 /*
1675 * Corresponding get() is in _scsih_target_alloc()
1676 */
1677 sas_target_priv_data->pcie_dev = NULL;
1678 pcie_device_put(pcie_device);
1679 pcie_device_put(pcie_device);
1680 }
1681 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1682 goto out;
1683 }
1684
1685 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1686 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1687 if (sas_device && (sas_device->starget == starget) &&
1688 (sas_device->id == starget->id) &&
1689 (sas_device->channel == starget->channel))
1690 sas_device->starget = NULL;
1691
1692 if (sas_device) {
1693 /*
1694 * Corresponding get() is in _scsih_target_alloc()
1695 */
1696 sas_target_priv_data->sas_dev = NULL;
1697 sas_device_put(sas_device);
1698
1699 sas_device_put(sas_device);
1700 }
1701 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1702
1703 out:
1704 kfree(sas_target_priv_data);
1705 starget->hostdata = NULL;
1706 }
1707
1708 /**
1709 * scsih_slave_alloc - device add routine
1710 * @sdev: scsi device struct
1711 *
1712 * Return: 0 if ok. Any other return is assumed to be an error and
1713 * the device is ignored.
1714 */
1715 static int
scsih_slave_alloc(struct scsi_device * sdev)1716 scsih_slave_alloc(struct scsi_device *sdev)
1717 {
1718 struct Scsi_Host *shost;
1719 struct MPT3SAS_ADAPTER *ioc;
1720 struct MPT3SAS_TARGET *sas_target_priv_data;
1721 struct MPT3SAS_DEVICE *sas_device_priv_data;
1722 struct scsi_target *starget;
1723 struct _raid_device *raid_device;
1724 struct _sas_device *sas_device;
1725 struct _pcie_device *pcie_device;
1726 unsigned long flags;
1727
1728 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
1729 GFP_KERNEL);
1730 if (!sas_device_priv_data)
1731 return -ENOMEM;
1732
1733 sas_device_priv_data->lun = sdev->lun;
1734 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
1735
1736 starget = scsi_target(sdev);
1737 sas_target_priv_data = starget->hostdata;
1738 sas_target_priv_data->num_luns++;
1739 sas_device_priv_data->sas_target = sas_target_priv_data;
1740 sdev->hostdata = sas_device_priv_data;
1741 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
1742 sdev->no_uld_attach = 1;
1743
1744 shost = dev_to_shost(&starget->dev);
1745 ioc = shost_priv(shost);
1746 if (starget->channel == RAID_CHANNEL) {
1747 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1748 raid_device = _scsih_raid_device_find_by_id(ioc,
1749 starget->id, starget->channel);
1750 if (raid_device)
1751 raid_device->sdev = sdev; /* raid is single lun */
1752 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1753 }
1754 if (starget->channel == PCIE_CHANNEL) {
1755 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1756 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
1757 sas_target_priv_data->sas_address);
1758 if (pcie_device && (pcie_device->starget == NULL)) {
1759 sdev_printk(KERN_INFO, sdev,
1760 "%s : pcie_device->starget set to starget @ %d\n",
1761 __func__, __LINE__);
1762 pcie_device->starget = starget;
1763 }
1764
1765 if (pcie_device)
1766 pcie_device_put(pcie_device);
1767 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1768
1769 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1770 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1771 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1772 sas_target_priv_data->sas_address);
1773 if (sas_device && (sas_device->starget == NULL)) {
1774 sdev_printk(KERN_INFO, sdev,
1775 "%s : sas_device->starget set to starget @ %d\n",
1776 __func__, __LINE__);
1777 sas_device->starget = starget;
1778 }
1779
1780 if (sas_device)
1781 sas_device_put(sas_device);
1782
1783 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1784 }
1785
1786 return 0;
1787 }
1788
1789 /**
1790 * scsih_slave_destroy - device destroy routine
1791 * @sdev: scsi device struct
1792 */
1793 static void
scsih_slave_destroy(struct scsi_device * sdev)1794 scsih_slave_destroy(struct scsi_device *sdev)
1795 {
1796 struct MPT3SAS_TARGET *sas_target_priv_data;
1797 struct scsi_target *starget;
1798 struct Scsi_Host *shost;
1799 struct MPT3SAS_ADAPTER *ioc;
1800 struct _sas_device *sas_device;
1801 struct _pcie_device *pcie_device;
1802 unsigned long flags;
1803
1804 if (!sdev->hostdata)
1805 return;
1806
1807 starget = scsi_target(sdev);
1808 sas_target_priv_data = starget->hostdata;
1809 sas_target_priv_data->num_luns--;
1810
1811 shost = dev_to_shost(&starget->dev);
1812 ioc = shost_priv(shost);
1813
1814 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
1815 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1816 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1817 sas_target_priv_data);
1818 if (pcie_device && !sas_target_priv_data->num_luns)
1819 pcie_device->starget = NULL;
1820
1821 if (pcie_device)
1822 pcie_device_put(pcie_device);
1823
1824 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1825
1826 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1827 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1828 sas_device = __mpt3sas_get_sdev_from_target(ioc,
1829 sas_target_priv_data);
1830 if (sas_device && !sas_target_priv_data->num_luns)
1831 sas_device->starget = NULL;
1832
1833 if (sas_device)
1834 sas_device_put(sas_device);
1835 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1836 }
1837
1838 kfree(sdev->hostdata);
1839 sdev->hostdata = NULL;
1840 }
1841
1842 /**
1843 * _scsih_display_sata_capabilities - sata capabilities
1844 * @ioc: per adapter object
1845 * @handle: device handle
1846 * @sdev: scsi device struct
1847 */
1848 static void
_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER * ioc,u16 handle,struct scsi_device * sdev)1849 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
1850 u16 handle, struct scsi_device *sdev)
1851 {
1852 Mpi2ConfigReply_t mpi_reply;
1853 Mpi2SasDevicePage0_t sas_device_pg0;
1854 u32 ioc_status;
1855 u16 flags;
1856 u32 device_info;
1857
1858 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
1859 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
1860 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
1861 ioc->name, __FILE__, __LINE__, __func__);
1862 return;
1863 }
1864
1865 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1866 MPI2_IOCSTATUS_MASK;
1867 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1868 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
1869 ioc->name, __FILE__, __LINE__, __func__);
1870 return;
1871 }
1872
1873 flags = le16_to_cpu(sas_device_pg0.Flags);
1874 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
1875
1876 sdev_printk(KERN_INFO, sdev,
1877 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
1878 "sw_preserve(%s)\n",
1879 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
1880 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
1881 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
1882 "n",
1883 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
1884 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
1885 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
1886 }
1887
1888 /*
1889 * raid transport support -
1890 * Enabled for SLES11 and newer, in older kernels the driver will panic when
1891 * unloading the driver followed by a load - I believe that the subroutine
1892 * raid_class_release() is not cleaning up properly.
1893 */
1894
1895 /**
1896 * scsih_is_raid - return boolean indicating device is raid volume
1897 * @dev: the device struct object
1898 */
1899 static int
scsih_is_raid(struct device * dev)1900 scsih_is_raid(struct device *dev)
1901 {
1902 struct scsi_device *sdev = to_scsi_device(dev);
1903 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1904
1905 if (ioc->is_warpdrive)
1906 return 0;
1907 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
1908 }
1909
1910 static int
scsih_is_nvme(struct device * dev)1911 scsih_is_nvme(struct device *dev)
1912 {
1913 struct scsi_device *sdev = to_scsi_device(dev);
1914
1915 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
1916 }
1917
1918 /**
1919 * scsih_get_resync - get raid volume resync percent complete
1920 * @dev: the device struct object
1921 */
1922 static void
scsih_get_resync(struct device * dev)1923 scsih_get_resync(struct device *dev)
1924 {
1925 struct scsi_device *sdev = to_scsi_device(dev);
1926 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1927 static struct _raid_device *raid_device;
1928 unsigned long flags;
1929 Mpi2RaidVolPage0_t vol_pg0;
1930 Mpi2ConfigReply_t mpi_reply;
1931 u32 volume_status_flags;
1932 u8 percent_complete;
1933 u16 handle;
1934
1935 percent_complete = 0;
1936 handle = 0;
1937 if (ioc->is_warpdrive)
1938 goto out;
1939
1940 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1941 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1942 sdev->channel);
1943 if (raid_device) {
1944 handle = raid_device->handle;
1945 percent_complete = raid_device->percent_complete;
1946 }
1947 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1948
1949 if (!handle)
1950 goto out;
1951
1952 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1953 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
1954 sizeof(Mpi2RaidVolPage0_t))) {
1955 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
1956 ioc->name, __FILE__, __LINE__, __func__);
1957 percent_complete = 0;
1958 goto out;
1959 }
1960
1961 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
1962 if (!(volume_status_flags &
1963 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
1964 percent_complete = 0;
1965
1966 out:
1967
1968 switch (ioc->hba_mpi_version_belonged) {
1969 case MPI2_VERSION:
1970 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
1971 break;
1972 case MPI25_VERSION:
1973 case MPI26_VERSION:
1974 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
1975 break;
1976 }
1977 }
1978
1979 /**
1980 * scsih_get_state - get raid volume level
1981 * @dev: the device struct object
1982 */
1983 static void
scsih_get_state(struct device * dev)1984 scsih_get_state(struct device *dev)
1985 {
1986 struct scsi_device *sdev = to_scsi_device(dev);
1987 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1988 static struct _raid_device *raid_device;
1989 unsigned long flags;
1990 Mpi2RaidVolPage0_t vol_pg0;
1991 Mpi2ConfigReply_t mpi_reply;
1992 u32 volstate;
1993 enum raid_state state = RAID_STATE_UNKNOWN;
1994 u16 handle = 0;
1995
1996 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1997 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1998 sdev->channel);
1999 if (raid_device)
2000 handle = raid_device->handle;
2001 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2002
2003 if (!raid_device)
2004 goto out;
2005
2006 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2007 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2008 sizeof(Mpi2RaidVolPage0_t))) {
2009 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2010 ioc->name, __FILE__, __LINE__, __func__);
2011 goto out;
2012 }
2013
2014 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2015 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2016 state = RAID_STATE_RESYNCING;
2017 goto out;
2018 }
2019
2020 switch (vol_pg0.VolumeState) {
2021 case MPI2_RAID_VOL_STATE_OPTIMAL:
2022 case MPI2_RAID_VOL_STATE_ONLINE:
2023 state = RAID_STATE_ACTIVE;
2024 break;
2025 case MPI2_RAID_VOL_STATE_DEGRADED:
2026 state = RAID_STATE_DEGRADED;
2027 break;
2028 case MPI2_RAID_VOL_STATE_FAILED:
2029 case MPI2_RAID_VOL_STATE_MISSING:
2030 state = RAID_STATE_OFFLINE;
2031 break;
2032 }
2033 out:
2034 switch (ioc->hba_mpi_version_belonged) {
2035 case MPI2_VERSION:
2036 raid_set_state(mpt2sas_raid_template, dev, state);
2037 break;
2038 case MPI25_VERSION:
2039 case MPI26_VERSION:
2040 raid_set_state(mpt3sas_raid_template, dev, state);
2041 break;
2042 }
2043 }
2044
2045 /**
2046 * _scsih_set_level - set raid level
2047 * @ioc: ?
2048 * @sdev: scsi device struct
2049 * @volume_type: volume type
2050 */
2051 static void
_scsih_set_level(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev,u8 volume_type)2052 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2053 struct scsi_device *sdev, u8 volume_type)
2054 {
2055 enum raid_level level = RAID_LEVEL_UNKNOWN;
2056
2057 switch (volume_type) {
2058 case MPI2_RAID_VOL_TYPE_RAID0:
2059 level = RAID_LEVEL_0;
2060 break;
2061 case MPI2_RAID_VOL_TYPE_RAID10:
2062 level = RAID_LEVEL_10;
2063 break;
2064 case MPI2_RAID_VOL_TYPE_RAID1E:
2065 level = RAID_LEVEL_1E;
2066 break;
2067 case MPI2_RAID_VOL_TYPE_RAID1:
2068 level = RAID_LEVEL_1;
2069 break;
2070 }
2071
2072 switch (ioc->hba_mpi_version_belonged) {
2073 case MPI2_VERSION:
2074 raid_set_level(mpt2sas_raid_template,
2075 &sdev->sdev_gendev, level);
2076 break;
2077 case MPI25_VERSION:
2078 case MPI26_VERSION:
2079 raid_set_level(mpt3sas_raid_template,
2080 &sdev->sdev_gendev, level);
2081 break;
2082 }
2083 }
2084
2085
2086 /**
2087 * _scsih_get_volume_capabilities - volume capabilities
2088 * @ioc: per adapter object
2089 * @raid_device: the raid_device object
2090 *
2091 * Return: 0 for success, else 1
2092 */
2093 static int
_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)2094 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2095 struct _raid_device *raid_device)
2096 {
2097 Mpi2RaidVolPage0_t *vol_pg0;
2098 Mpi2RaidPhysDiskPage0_t pd_pg0;
2099 Mpi2SasDevicePage0_t sas_device_pg0;
2100 Mpi2ConfigReply_t mpi_reply;
2101 u16 sz;
2102 u8 num_pds;
2103
2104 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2105 &num_pds)) || !num_pds) {
2106 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
2107 "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
2108 __func__));
2109 return 1;
2110 }
2111
2112 raid_device->num_pds = num_pds;
2113 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2114 sizeof(Mpi2RaidVol0PhysDisk_t));
2115 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2116 if (!vol_pg0) {
2117 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
2118 "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
2119 __func__));
2120 return 1;
2121 }
2122
2123 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2124 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2125 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
2126 "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
2127 __func__));
2128 kfree(vol_pg0);
2129 return 1;
2130 }
2131
2132 raid_device->volume_type = vol_pg0->VolumeType;
2133
2134 /* figure out what the underlying devices are by
2135 * obtaining the device_info bits for the 1st device
2136 */
2137 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2138 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2139 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2140 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2141 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2142 le16_to_cpu(pd_pg0.DevHandle)))) {
2143 raid_device->device_info =
2144 le32_to_cpu(sas_device_pg0.DeviceInfo);
2145 }
2146 }
2147
2148 kfree(vol_pg0);
2149 return 0;
2150 }
2151
2152 /**
2153 * _scsih_enable_tlr - setting TLR flags
2154 * @ioc: per adapter object
2155 * @sdev: scsi device struct
2156 *
2157 * Enabling Transaction Layer Retries for tape devices when
2158 * vpd page 0x90 is present
2159 *
2160 */
2161 static void
_scsih_enable_tlr(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev)2162 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2163 {
2164
2165 /* only for TAPE */
2166 if (sdev->type != TYPE_TAPE)
2167 return;
2168
2169 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2170 return;
2171
2172 sas_enable_tlr(sdev);
2173 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2174 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2175 return;
2176
2177 }
2178
2179 /**
2180 * scsih_slave_configure - device configure routine.
2181 * @sdev: scsi device struct
2182 *
2183 * Return: 0 if ok. Any other return is assumed to be an error and
2184 * the device is ignored.
2185 */
2186 static int
scsih_slave_configure(struct scsi_device * sdev)2187 scsih_slave_configure(struct scsi_device *sdev)
2188 {
2189 struct Scsi_Host *shost = sdev->host;
2190 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2191 struct MPT3SAS_DEVICE *sas_device_priv_data;
2192 struct MPT3SAS_TARGET *sas_target_priv_data;
2193 struct _sas_device *sas_device;
2194 struct _pcie_device *pcie_device;
2195 struct _raid_device *raid_device;
2196 unsigned long flags;
2197 int qdepth;
2198 u8 ssp_target = 0;
2199 char *ds = "";
2200 char *r_level = "";
2201 u16 handle, volume_handle = 0;
2202 u64 volume_wwid = 0;
2203
2204 qdepth = 1;
2205 sas_device_priv_data = sdev->hostdata;
2206 sas_device_priv_data->configured_lun = 1;
2207 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2208 sas_target_priv_data = sas_device_priv_data->sas_target;
2209 handle = sas_target_priv_data->handle;
2210
2211 /* raid volume handling */
2212 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2213
2214 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2215 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2216 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2217 if (!raid_device) {
2218 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
2219 "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
2220 __LINE__, __func__));
2221 return 1;
2222 }
2223
2224 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2225 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
2226 "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
2227 __LINE__, __func__));
2228 return 1;
2229 }
2230
2231 /*
2232 * WARPDRIVE: Initialize the required data for Direct IO
2233 */
2234 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2235
2236 /* RAID Queue Depth Support
2237 * IS volume = underlying qdepth of drive type, either
2238 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2239 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2240 */
2241 if (raid_device->device_info &
2242 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2243 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2244 ds = "SSP";
2245 } else {
2246 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2247 if (raid_device->device_info &
2248 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2249 ds = "SATA";
2250 else
2251 ds = "STP";
2252 }
2253
2254 switch (raid_device->volume_type) {
2255 case MPI2_RAID_VOL_TYPE_RAID0:
2256 r_level = "RAID0";
2257 break;
2258 case MPI2_RAID_VOL_TYPE_RAID1E:
2259 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2260 if (ioc->manu_pg10.OEMIdentifier &&
2261 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2262 MFG10_GF0_R10_DISPLAY) &&
2263 !(raid_device->num_pds % 2))
2264 r_level = "RAID10";
2265 else
2266 r_level = "RAID1E";
2267 break;
2268 case MPI2_RAID_VOL_TYPE_RAID1:
2269 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2270 r_level = "RAID1";
2271 break;
2272 case MPI2_RAID_VOL_TYPE_RAID10:
2273 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2274 r_level = "RAID10";
2275 break;
2276 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2277 default:
2278 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2279 r_level = "RAIDX";
2280 break;
2281 }
2282
2283 if (!ioc->hide_ir_msg)
2284 sdev_printk(KERN_INFO, sdev,
2285 "%s: handle(0x%04x), wwid(0x%016llx),"
2286 " pd_count(%d), type(%s)\n",
2287 r_level, raid_device->handle,
2288 (unsigned long long)raid_device->wwid,
2289 raid_device->num_pds, ds);
2290
2291 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2292 blk_queue_max_hw_sectors(sdev->request_queue,
2293 MPT3SAS_RAID_MAX_SECTORS);
2294 sdev_printk(KERN_INFO, sdev,
2295 "Set queue's max_sector to: %u\n",
2296 MPT3SAS_RAID_MAX_SECTORS);
2297 }
2298
2299 scsih_change_queue_depth(sdev, qdepth);
2300
2301 /* raid transport support */
2302 if (!ioc->is_warpdrive)
2303 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2304 return 0;
2305 }
2306
2307 /* non-raid handling */
2308 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2309 if (mpt3sas_config_get_volume_handle(ioc, handle,
2310 &volume_handle)) {
2311 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
2312 "failure at %s:%d/%s()!\n", ioc->name,
2313 __FILE__, __LINE__, __func__));
2314 return 1;
2315 }
2316 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2317 volume_handle, &volume_wwid)) {
2318 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
2319 "failure at %s:%d/%s()!\n", ioc->name,
2320 __FILE__, __LINE__, __func__));
2321 return 1;
2322 }
2323 }
2324
2325 /* PCIe handling */
2326 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2327 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2328 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2329 sas_device_priv_data->sas_target->sas_address);
2330 if (!pcie_device) {
2331 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2332 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
2333 "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
2334 __LINE__, __func__));
2335 return 1;
2336 }
2337
2338 qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2339 ds = "NVMe";
2340 sdev_printk(KERN_INFO, sdev,
2341 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2342 ds, handle, (unsigned long long)pcie_device->wwid,
2343 pcie_device->port_num);
2344 if (pcie_device->enclosure_handle != 0)
2345 sdev_printk(KERN_INFO, sdev,
2346 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2347 ds,
2348 (unsigned long long)pcie_device->enclosure_logical_id,
2349 pcie_device->slot);
2350 if (pcie_device->connector_name[0] != '\0')
2351 sdev_printk(KERN_INFO, sdev,
2352 "%s: enclosure level(0x%04x),"
2353 "connector name( %s)\n", ds,
2354 pcie_device->enclosure_level,
2355 pcie_device->connector_name);
2356
2357 if (pcie_device->nvme_mdts)
2358 blk_queue_max_hw_sectors(sdev->request_queue,
2359 pcie_device->nvme_mdts/512);
2360
2361 pcie_device_put(pcie_device);
2362 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2363 scsih_change_queue_depth(sdev, qdepth);
2364 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2365 ** merged and can eliminate holes created during merging
2366 ** operation.
2367 **/
2368 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2369 sdev->request_queue);
2370 blk_queue_virt_boundary(sdev->request_queue,
2371 ioc->page_size - 1);
2372 return 0;
2373 }
2374
2375 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2376 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2377 sas_device_priv_data->sas_target->sas_address);
2378 if (!sas_device) {
2379 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2380 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
2381 "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
2382 __func__));
2383 return 1;
2384 }
2385
2386 sas_device->volume_handle = volume_handle;
2387 sas_device->volume_wwid = volume_wwid;
2388 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2389 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2390 ssp_target = 1;
2391 if (sas_device->device_info &
2392 MPI2_SAS_DEVICE_INFO_SEP) {
2393 sdev_printk(KERN_WARNING, sdev,
2394 "set ignore_delay_remove for handle(0x%04x)\n",
2395 sas_device_priv_data->sas_target->handle);
2396 sas_device_priv_data->ignore_delay_remove = 1;
2397 ds = "SES";
2398 } else
2399 ds = "SSP";
2400 } else {
2401 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2402 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2403 ds = "STP";
2404 else if (sas_device->device_info &
2405 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2406 ds = "SATA";
2407 }
2408
2409 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2410 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2411 ds, handle, (unsigned long long)sas_device->sas_address,
2412 sas_device->phy, (unsigned long long)sas_device->device_name);
2413
2414 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2415
2416 sas_device_put(sas_device);
2417 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2418
2419 if (!ssp_target)
2420 _scsih_display_sata_capabilities(ioc, handle, sdev);
2421
2422
2423 scsih_change_queue_depth(sdev, qdepth);
2424
2425 if (ssp_target) {
2426 sas_read_port_mode_page(sdev);
2427 _scsih_enable_tlr(ioc, sdev);
2428 }
2429
2430 return 0;
2431 }
2432
2433 /**
2434 * scsih_bios_param - fetch head, sector, cylinder info for a disk
2435 * @sdev: scsi device struct
2436 * @bdev: pointer to block device context
2437 * @capacity: device size (in 512 byte sectors)
2438 * @params: three element array to place output:
2439 * params[0] number of heads (max 255)
2440 * params[1] number of sectors (max 63)
2441 * params[2] number of cylinders
2442 */
2443 static int
scsih_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int params[])2444 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2445 sector_t capacity, int params[])
2446 {
2447 int heads;
2448 int sectors;
2449 sector_t cylinders;
2450 ulong dummy;
2451
2452 heads = 64;
2453 sectors = 32;
2454
2455 dummy = heads * sectors;
2456 cylinders = capacity;
2457 sector_div(cylinders, dummy);
2458
2459 /*
2460 * Handle extended translation size for logical drives
2461 * > 1Gb
2462 */
2463 if ((ulong)capacity >= 0x200000) {
2464 heads = 255;
2465 sectors = 63;
2466 dummy = heads * sectors;
2467 cylinders = capacity;
2468 sector_div(cylinders, dummy);
2469 }
2470
2471 /* return result */
2472 params[0] = heads;
2473 params[1] = sectors;
2474 params[2] = cylinders;
2475
2476 return 0;
2477 }
2478
2479 /**
2480 * _scsih_response_code - translation of device response code
2481 * @ioc: per adapter object
2482 * @response_code: response code returned by the device
2483 */
2484 static void
_scsih_response_code(struct MPT3SAS_ADAPTER * ioc,u8 response_code)2485 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2486 {
2487 char *desc;
2488
2489 switch (response_code) {
2490 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2491 desc = "task management request completed";
2492 break;
2493 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2494 desc = "invalid frame";
2495 break;
2496 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2497 desc = "task management request not supported";
2498 break;
2499 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2500 desc = "task management request failed";
2501 break;
2502 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2503 desc = "task management request succeeded";
2504 break;
2505 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2506 desc = "invalid lun";
2507 break;
2508 case 0xA:
2509 desc = "overlapped tag attempted";
2510 break;
2511 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2512 desc = "task queued, however not sent to target";
2513 break;
2514 default:
2515 desc = "unknown";
2516 break;
2517 }
2518 pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n",
2519 ioc->name, response_code, desc);
2520 }
2521
2522 /**
2523 * _scsih_tm_done - tm completion routine
2524 * @ioc: per adapter object
2525 * @smid: system request message index
2526 * @msix_index: MSIX table index supplied by the OS
2527 * @reply: reply message frame(lower 32bit addr)
2528 * Context: none.
2529 *
2530 * The callback handler when using scsih_issue_tm.
2531 *
2532 * Return: 1 meaning mf should be freed from _base_interrupt
2533 * 0 means the mf is freed from this function.
2534 */
2535 static u8
_scsih_tm_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)2536 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2537 {
2538 MPI2DefaultReply_t *mpi_reply;
2539
2540 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2541 return 1;
2542 if (ioc->tm_cmds.smid != smid)
2543 return 1;
2544 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2545 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2546 if (mpi_reply) {
2547 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2548 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2549 }
2550 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2551 complete(&ioc->tm_cmds.done);
2552 return 1;
2553 }
2554
2555 /**
2556 * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2557 * @ioc: per adapter object
2558 * @handle: device handle
2559 *
2560 * During taskmangement request, we need to freeze the device queue.
2561 */
2562 void
mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2563 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2564 {
2565 struct MPT3SAS_DEVICE *sas_device_priv_data;
2566 struct scsi_device *sdev;
2567 u8 skip = 0;
2568
2569 shost_for_each_device(sdev, ioc->shost) {
2570 if (skip)
2571 continue;
2572 sas_device_priv_data = sdev->hostdata;
2573 if (!sas_device_priv_data)
2574 continue;
2575 if (sas_device_priv_data->sas_target->handle == handle) {
2576 sas_device_priv_data->sas_target->tm_busy = 1;
2577 skip = 1;
2578 ioc->ignore_loginfos = 1;
2579 }
2580 }
2581 }
2582
2583 /**
2584 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2585 * @ioc: per adapter object
2586 * @handle: device handle
2587 *
2588 * During taskmangement request, we need to freeze the device queue.
2589 */
2590 void
mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2591 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2592 {
2593 struct MPT3SAS_DEVICE *sas_device_priv_data;
2594 struct scsi_device *sdev;
2595 u8 skip = 0;
2596
2597 shost_for_each_device(sdev, ioc->shost) {
2598 if (skip)
2599 continue;
2600 sas_device_priv_data = sdev->hostdata;
2601 if (!sas_device_priv_data)
2602 continue;
2603 if (sas_device_priv_data->sas_target->handle == handle) {
2604 sas_device_priv_data->sas_target->tm_busy = 0;
2605 skip = 1;
2606 ioc->ignore_loginfos = 0;
2607 }
2608 }
2609 }
2610
2611 /**
2612 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
2613 * @ioc: per adapter struct
2614 * @handle: device handle
2615 * @lun: lun number
2616 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2617 * @smid_task: smid assigned to the task
2618 * @msix_task: MSIX table index supplied by the OS
2619 * @timeout: timeout in seconds
2620 * @tr_method: Target Reset Method
2621 * Context: user
2622 *
2623 * A generic API for sending task management requests to firmware.
2624 *
2625 * The callback index is set inside `ioc->tm_cb_idx`.
2626 * The caller is responsible to check for outstanding commands.
2627 *
2628 * Return: SUCCESS or FAILED.
2629 */
2630 int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)2631 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
2632 u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
2633 {
2634 Mpi2SCSITaskManagementRequest_t *mpi_request;
2635 Mpi2SCSITaskManagementReply_t *mpi_reply;
2636 u16 smid = 0;
2637 u32 ioc_state;
2638 int rc;
2639
2640 lockdep_assert_held(&ioc->tm_cmds.mutex);
2641
2642 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
2643 pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
2644 __func__, ioc->name);
2645 return FAILED;
2646 }
2647
2648 if (ioc->shost_recovery || ioc->remove_host ||
2649 ioc->pci_error_recovery) {
2650 pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
2651 __func__, ioc->name);
2652 return FAILED;
2653 }
2654
2655 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
2656 if (ioc_state & MPI2_DOORBELL_USED) {
2657 dhsprintk(ioc, pr_info(MPT3SAS_FMT
2658 "unexpected doorbell active!\n", ioc->name));
2659 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2660 return (!rc) ? SUCCESS : FAILED;
2661 }
2662
2663 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2664 mpt3sas_base_fault_info(ioc, ioc_state &
2665 MPI2_DOORBELL_DATA_MASK);
2666 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2667 return (!rc) ? SUCCESS : FAILED;
2668 }
2669
2670 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2671 if (!smid) {
2672 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
2673 ioc->name, __func__);
2674 return FAILED;
2675 }
2676
2677 dtmprintk(ioc, pr_info(MPT3SAS_FMT
2678 "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
2679 ioc->name, handle, type, smid_task, timeout, tr_method));
2680 ioc->tm_cmds.status = MPT3_CMD_PENDING;
2681 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2682 ioc->tm_cmds.smid = smid;
2683 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2684 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
2685 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2686 mpi_request->DevHandle = cpu_to_le16(handle);
2687 mpi_request->TaskType = type;
2688 mpi_request->MsgFlags = tr_method;
2689 mpi_request->TaskMID = cpu_to_le16(smid_task);
2690 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2691 mpt3sas_scsih_set_tm_flag(ioc, handle);
2692 init_completion(&ioc->tm_cmds.done);
2693 mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
2694 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2695 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
2696 if (mpt3sas_base_check_cmd_timeout(ioc,
2697 ioc->tm_cmds.status, mpi_request,
2698 sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
2699 rc = mpt3sas_base_hard_reset_handler(ioc,
2700 FORCE_BIG_HAMMER);
2701 rc = (!rc) ? SUCCESS : FAILED;
2702 goto out;
2703 }
2704 }
2705
2706 /* sync IRQs in case those were busy during flush. */
2707 mpt3sas_base_sync_reply_irqs(ioc);
2708
2709 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
2710 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
2711 mpi_reply = ioc->tm_cmds.reply;
2712 dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \
2713 "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
2714 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
2715 le32_to_cpu(mpi_reply->IOCLogInfo),
2716 le32_to_cpu(mpi_reply->TerminationCount)));
2717 if (ioc->logging_level & MPT_DEBUG_TM) {
2718 _scsih_response_code(ioc, mpi_reply->ResponseCode);
2719 if (mpi_reply->IOCStatus)
2720 _debug_dump_mf(mpi_request,
2721 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2722 }
2723 }
2724 rc = SUCCESS;
2725
2726 out:
2727 mpt3sas_scsih_clear_tm_flag(ioc, handle);
2728 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
2729 return rc;
2730 }
2731
mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)2732 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2733 u64 lun, u8 type, u16 smid_task, u16 msix_task,
2734 u8 timeout, u8 tr_method)
2735 {
2736 int ret;
2737
2738 mutex_lock(&ioc->tm_cmds.mutex);
2739 ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
2740 msix_task, timeout, tr_method);
2741 mutex_unlock(&ioc->tm_cmds.mutex);
2742
2743 return ret;
2744 }
2745
2746 /**
2747 * _scsih_tm_display_info - displays info about the device
2748 * @ioc: per adapter struct
2749 * @scmd: pointer to scsi command object
2750 *
2751 * Called by task management callback handlers.
2752 */
2753 static void
_scsih_tm_display_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)2754 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2755 {
2756 struct scsi_target *starget = scmd->device->sdev_target;
2757 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
2758 struct _sas_device *sas_device = NULL;
2759 struct _pcie_device *pcie_device = NULL;
2760 unsigned long flags;
2761 char *device_str = NULL;
2762
2763 if (!priv_target)
2764 return;
2765 if (ioc->hide_ir_msg)
2766 device_str = "WarpDrive";
2767 else
2768 device_str = "volume";
2769
2770 scsi_print_command(scmd);
2771 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2772 starget_printk(KERN_INFO, starget,
2773 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
2774 device_str, priv_target->handle,
2775 device_str, (unsigned long long)priv_target->sas_address);
2776
2777 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2778 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2779 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
2780 if (pcie_device) {
2781 starget_printk(KERN_INFO, starget,
2782 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2783 pcie_device->handle,
2784 (unsigned long long)pcie_device->wwid,
2785 pcie_device->port_num);
2786 if (pcie_device->enclosure_handle != 0)
2787 starget_printk(KERN_INFO, starget,
2788 "enclosure logical id(0x%016llx), slot(%d)\n",
2789 (unsigned long long)
2790 pcie_device->enclosure_logical_id,
2791 pcie_device->slot);
2792 if (pcie_device->connector_name[0] != '\0')
2793 starget_printk(KERN_INFO, starget,
2794 "enclosure level(0x%04x), connector name( %s)\n",
2795 pcie_device->enclosure_level,
2796 pcie_device->connector_name);
2797 pcie_device_put(pcie_device);
2798 }
2799 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2800
2801 } else {
2802 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2803 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
2804 if (sas_device) {
2805 if (priv_target->flags &
2806 MPT_TARGET_FLAGS_RAID_COMPONENT) {
2807 starget_printk(KERN_INFO, starget,
2808 "volume handle(0x%04x), "
2809 "volume wwid(0x%016llx)\n",
2810 sas_device->volume_handle,
2811 (unsigned long long)sas_device->volume_wwid);
2812 }
2813 starget_printk(KERN_INFO, starget,
2814 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
2815 sas_device->handle,
2816 (unsigned long long)sas_device->sas_address,
2817 sas_device->phy);
2818
2819 _scsih_display_enclosure_chassis_info(NULL, sas_device,
2820 NULL, starget);
2821
2822 sas_device_put(sas_device);
2823 }
2824 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2825 }
2826 }
2827
2828 /**
2829 * scsih_abort - eh threads main abort routine
2830 * @scmd: pointer to scsi command object
2831 *
2832 * Return: SUCCESS if command aborted else FAILED
2833 */
2834 static int
scsih_abort(struct scsi_cmnd * scmd)2835 scsih_abort(struct scsi_cmnd *scmd)
2836 {
2837 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2838 struct MPT3SAS_DEVICE *sas_device_priv_data;
2839 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2840 u16 handle;
2841 int r;
2842
2843 u8 timeout = 30;
2844 struct _pcie_device *pcie_device = NULL;
2845 sdev_printk(KERN_INFO, scmd->device,
2846 "attempting task abort! scmd(%p)\n", scmd);
2847 _scsih_tm_display_info(ioc, scmd);
2848
2849 sas_device_priv_data = scmd->device->hostdata;
2850 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2851 ioc->remove_host) {
2852 sdev_printk(KERN_INFO, scmd->device,
2853 "device been deleted! scmd(%p)\n", scmd);
2854 scmd->result = DID_NO_CONNECT << 16;
2855 scmd->scsi_done(scmd);
2856 r = SUCCESS;
2857 goto out;
2858 }
2859
2860 /* check for completed command */
2861 if (st == NULL || st->cb_idx == 0xFF) {
2862 scmd->result = DID_RESET << 16;
2863 r = SUCCESS;
2864 goto out;
2865 }
2866
2867 /* for hidden raid components and volumes this is not supported */
2868 if (sas_device_priv_data->sas_target->flags &
2869 MPT_TARGET_FLAGS_RAID_COMPONENT ||
2870 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2871 scmd->result = DID_RESET << 16;
2872 r = FAILED;
2873 goto out;
2874 }
2875
2876 mpt3sas_halt_firmware(ioc);
2877
2878 handle = sas_device_priv_data->sas_target->handle;
2879 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2880 if (pcie_device && (!ioc->tm_custom_handling))
2881 timeout = ioc->nvme_abort_timeout;
2882 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2883 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
2884 st->smid, st->msix_io, timeout, 0);
2885 /* Command must be cleared after abort */
2886 if (r == SUCCESS && st->cb_idx != 0xFF)
2887 r = FAILED;
2888 out:
2889 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
2890 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2891 if (pcie_device)
2892 pcie_device_put(pcie_device);
2893 return r;
2894 }
2895
2896 /**
2897 * scsih_dev_reset - eh threads main device reset routine
2898 * @scmd: pointer to scsi command object
2899 *
2900 * Return: SUCCESS if command aborted else FAILED
2901 */
2902 static int
scsih_dev_reset(struct scsi_cmnd * scmd)2903 scsih_dev_reset(struct scsi_cmnd *scmd)
2904 {
2905 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2906 struct MPT3SAS_DEVICE *sas_device_priv_data;
2907 struct _sas_device *sas_device = NULL;
2908 struct _pcie_device *pcie_device = NULL;
2909 u16 handle;
2910 u8 tr_method = 0;
2911 u8 tr_timeout = 30;
2912 int r;
2913
2914 struct scsi_target *starget = scmd->device->sdev_target;
2915 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
2916
2917 sdev_printk(KERN_INFO, scmd->device,
2918 "attempting device reset! scmd(%p)\n", scmd);
2919 _scsih_tm_display_info(ioc, scmd);
2920
2921 sas_device_priv_data = scmd->device->hostdata;
2922 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2923 ioc->remove_host) {
2924 sdev_printk(KERN_INFO, scmd->device,
2925 "device been deleted! scmd(%p)\n", scmd);
2926 scmd->result = DID_NO_CONNECT << 16;
2927 scmd->scsi_done(scmd);
2928 r = SUCCESS;
2929 goto out;
2930 }
2931
2932 /* for hidden raid components obtain the volume_handle */
2933 handle = 0;
2934 if (sas_device_priv_data->sas_target->flags &
2935 MPT_TARGET_FLAGS_RAID_COMPONENT) {
2936 sas_device = mpt3sas_get_sdev_from_target(ioc,
2937 target_priv_data);
2938 if (sas_device)
2939 handle = sas_device->volume_handle;
2940 } else
2941 handle = sas_device_priv_data->sas_target->handle;
2942
2943 if (!handle) {
2944 scmd->result = DID_RESET << 16;
2945 r = FAILED;
2946 goto out;
2947 }
2948
2949 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2950
2951 if (pcie_device && (!ioc->tm_custom_handling)) {
2952 tr_timeout = pcie_device->reset_timeout;
2953 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
2954 } else
2955 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2956 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2957 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
2958 tr_timeout, tr_method);
2959 /* Check for busy commands after reset */
2960 if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
2961 r = FAILED;
2962 out:
2963 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
2964 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2965
2966 if (sas_device)
2967 sas_device_put(sas_device);
2968 if (pcie_device)
2969 pcie_device_put(pcie_device);
2970
2971 return r;
2972 }
2973
2974 /**
2975 * scsih_target_reset - eh threads main target reset routine
2976 * @scmd: pointer to scsi command object
2977 *
2978 * Return: SUCCESS if command aborted else FAILED
2979 */
2980 static int
scsih_target_reset(struct scsi_cmnd * scmd)2981 scsih_target_reset(struct scsi_cmnd *scmd)
2982 {
2983 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2984 struct MPT3SAS_DEVICE *sas_device_priv_data;
2985 struct _sas_device *sas_device = NULL;
2986 struct _pcie_device *pcie_device = NULL;
2987 u16 handle;
2988 u8 tr_method = 0;
2989 u8 tr_timeout = 30;
2990 int r;
2991 struct scsi_target *starget = scmd->device->sdev_target;
2992 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
2993
2994 starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
2995 scmd);
2996 _scsih_tm_display_info(ioc, scmd);
2997
2998 sas_device_priv_data = scmd->device->hostdata;
2999 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3000 ioc->remove_host) {
3001 starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
3002 scmd);
3003 scmd->result = DID_NO_CONNECT << 16;
3004 scmd->scsi_done(scmd);
3005 r = SUCCESS;
3006 goto out;
3007 }
3008
3009 /* for hidden raid components obtain the volume_handle */
3010 handle = 0;
3011 if (sas_device_priv_data->sas_target->flags &
3012 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3013 sas_device = mpt3sas_get_sdev_from_target(ioc,
3014 target_priv_data);
3015 if (sas_device)
3016 handle = sas_device->volume_handle;
3017 } else
3018 handle = sas_device_priv_data->sas_target->handle;
3019
3020 if (!handle) {
3021 scmd->result = DID_RESET << 16;
3022 r = FAILED;
3023 goto out;
3024 }
3025
3026 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3027
3028 if (pcie_device && (!ioc->tm_custom_handling)) {
3029 tr_timeout = pcie_device->reset_timeout;
3030 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3031 } else
3032 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3033 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
3034 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3035 tr_timeout, tr_method);
3036 /* Check for busy commands after reset */
3037 if (r == SUCCESS && atomic_read(&starget->target_busy))
3038 r = FAILED;
3039 out:
3040 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
3041 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3042
3043 if (sas_device)
3044 sas_device_put(sas_device);
3045 if (pcie_device)
3046 pcie_device_put(pcie_device);
3047 return r;
3048 }
3049
3050
3051 /**
3052 * scsih_host_reset - eh threads main host reset routine
3053 * @scmd: pointer to scsi command object
3054 *
3055 * Return: SUCCESS if command aborted else FAILED
3056 */
3057 static int
scsih_host_reset(struct scsi_cmnd * scmd)3058 scsih_host_reset(struct scsi_cmnd *scmd)
3059 {
3060 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3061 int r, retval;
3062
3063 pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n",
3064 ioc->name, scmd);
3065 scsi_print_command(scmd);
3066
3067 if (ioc->is_driver_loading || ioc->remove_host) {
3068 pr_info(MPT3SAS_FMT "Blocking the host reset\n",
3069 ioc->name);
3070 r = FAILED;
3071 goto out;
3072 }
3073
3074 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3075 r = (retval < 0) ? FAILED : SUCCESS;
3076 out:
3077 pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
3078 ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3079
3080 return r;
3081 }
3082
3083 /**
3084 * _scsih_fw_event_add - insert and queue up fw_event
3085 * @ioc: per adapter object
3086 * @fw_event: object describing the event
3087 * Context: This function will acquire ioc->fw_event_lock.
3088 *
3089 * This adds the firmware event object into link list, then queues it up to
3090 * be processed from user context.
3091 */
3092 static void
_scsih_fw_event_add(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3093 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3094 {
3095 unsigned long flags;
3096
3097 if (ioc->firmware_event_thread == NULL)
3098 return;
3099
3100 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3101 fw_event_work_get(fw_event);
3102 INIT_LIST_HEAD(&fw_event->list);
3103 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3104 INIT_WORK(&fw_event->work, _firmware_event_work);
3105 fw_event_work_get(fw_event);
3106 queue_work(ioc->firmware_event_thread, &fw_event->work);
3107 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3108 }
3109
3110 /**
3111 * _scsih_fw_event_del_from_list - delete fw_event from the list
3112 * @ioc: per adapter object
3113 * @fw_event: object describing the event
3114 * Context: This function will acquire ioc->fw_event_lock.
3115 *
3116 * If the fw_event is on the fw_event_list, remove it and do a put.
3117 */
3118 static void
_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3119 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3120 *fw_event)
3121 {
3122 unsigned long flags;
3123
3124 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3125 if (!list_empty(&fw_event->list)) {
3126 list_del_init(&fw_event->list);
3127 fw_event_work_put(fw_event);
3128 }
3129 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3130 }
3131
3132
3133 /**
3134 * mpt3sas_send_trigger_data_event - send event for processing trigger data
3135 * @ioc: per adapter object
3136 * @event_data: trigger event data
3137 */
3138 void
mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER * ioc,struct SL_WH_TRIGGERS_EVENT_DATA_T * event_data)3139 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3140 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3141 {
3142 struct fw_event_work *fw_event;
3143 u16 sz;
3144
3145 if (ioc->is_driver_loading)
3146 return;
3147 sz = sizeof(*event_data);
3148 fw_event = alloc_fw_event_work(sz);
3149 if (!fw_event)
3150 return;
3151 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3152 fw_event->ioc = ioc;
3153 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3154 _scsih_fw_event_add(ioc, fw_event);
3155 fw_event_work_put(fw_event);
3156 }
3157
3158 /**
3159 * _scsih_error_recovery_delete_devices - remove devices not responding
3160 * @ioc: per adapter object
3161 */
3162 static void
_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER * ioc)3163 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3164 {
3165 struct fw_event_work *fw_event;
3166
3167 if (ioc->is_driver_loading)
3168 return;
3169 fw_event = alloc_fw_event_work(0);
3170 if (!fw_event)
3171 return;
3172 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3173 fw_event->ioc = ioc;
3174 _scsih_fw_event_add(ioc, fw_event);
3175 fw_event_work_put(fw_event);
3176 }
3177
3178 /**
3179 * mpt3sas_port_enable_complete - port enable completed (fake event)
3180 * @ioc: per adapter object
3181 */
3182 void
mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER * ioc)3183 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3184 {
3185 struct fw_event_work *fw_event;
3186
3187 fw_event = alloc_fw_event_work(0);
3188 if (!fw_event)
3189 return;
3190 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3191 fw_event->ioc = ioc;
3192 _scsih_fw_event_add(ioc, fw_event);
3193 fw_event_work_put(fw_event);
3194 }
3195
dequeue_next_fw_event(struct MPT3SAS_ADAPTER * ioc)3196 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3197 {
3198 unsigned long flags;
3199 struct fw_event_work *fw_event = NULL;
3200
3201 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3202 if (!list_empty(&ioc->fw_event_list)) {
3203 fw_event = list_first_entry(&ioc->fw_event_list,
3204 struct fw_event_work, list);
3205 list_del_init(&fw_event->list);
3206 }
3207 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3208
3209 return fw_event;
3210 }
3211
3212 /**
3213 * _scsih_fw_event_cleanup_queue - cleanup event queue
3214 * @ioc: per adapter object
3215 *
3216 * Walk the firmware event queue, either killing timers, or waiting
3217 * for outstanding events to complete
3218 */
3219 static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER * ioc)3220 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3221 {
3222 struct fw_event_work *fw_event;
3223
3224 if (list_empty(&ioc->fw_event_list) ||
3225 !ioc->firmware_event_thread || in_interrupt())
3226 return;
3227
3228 while ((fw_event = dequeue_next_fw_event(ioc))) {
3229 /*
3230 * Wait on the fw_event to complete. If this returns 1, then
3231 * the event was never executed, and we need a put for the
3232 * reference the work had on the fw_event.
3233 *
3234 * If it did execute, we wait for it to finish, and the put will
3235 * happen from _firmware_event_work()
3236 */
3237 if (cancel_work_sync(&fw_event->work))
3238 fw_event_work_put(fw_event);
3239
3240 fw_event_work_put(fw_event);
3241 }
3242 }
3243
3244 /**
3245 * _scsih_internal_device_block - block the sdev device
3246 * @sdev: per device object
3247 * @sas_device_priv_data : per device driver private data
3248 *
3249 * make sure device is blocked without error, if not
3250 * print an error
3251 */
3252 static void
_scsih_internal_device_block(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3253 _scsih_internal_device_block(struct scsi_device *sdev,
3254 struct MPT3SAS_DEVICE *sas_device_priv_data)
3255 {
3256 int r = 0;
3257
3258 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3259 sas_device_priv_data->sas_target->handle);
3260 sas_device_priv_data->block = 1;
3261
3262 r = scsi_internal_device_block_nowait(sdev);
3263 if (r == -EINVAL)
3264 sdev_printk(KERN_WARNING, sdev,
3265 "device_block failed with return(%d) for handle(0x%04x)\n",
3266 r, sas_device_priv_data->sas_target->handle);
3267 }
3268
3269 /**
3270 * _scsih_internal_device_unblock - unblock the sdev device
3271 * @sdev: per device object
3272 * @sas_device_priv_data : per device driver private data
3273 * make sure device is unblocked without error, if not retry
3274 * by blocking and then unblocking
3275 */
3276
3277 static void
_scsih_internal_device_unblock(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3278 _scsih_internal_device_unblock(struct scsi_device *sdev,
3279 struct MPT3SAS_DEVICE *sas_device_priv_data)
3280 {
3281 int r = 0;
3282
3283 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3284 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3285 sas_device_priv_data->block = 0;
3286 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3287 if (r == -EINVAL) {
3288 /* The device has been set to SDEV_RUNNING by SD layer during
3289 * device addition but the request queue is still stopped by
3290 * our earlier block call. We need to perform a block again
3291 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3292
3293 sdev_printk(KERN_WARNING, sdev,
3294 "device_unblock failed with return(%d) for handle(0x%04x) "
3295 "performing a block followed by an unblock\n",
3296 r, sas_device_priv_data->sas_target->handle);
3297 sas_device_priv_data->block = 1;
3298 r = scsi_internal_device_block_nowait(sdev);
3299 if (r)
3300 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3301 "failed with return(%d) for handle(0x%04x)\n",
3302 r, sas_device_priv_data->sas_target->handle);
3303
3304 sas_device_priv_data->block = 0;
3305 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3306 if (r)
3307 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3308 " failed with return(%d) for handle(0x%04x)\n",
3309 r, sas_device_priv_data->sas_target->handle);
3310 }
3311 }
3312
3313 /**
3314 * _scsih_ublock_io_all_device - unblock every device
3315 * @ioc: per adapter object
3316 *
3317 * change the device state from block to running
3318 */
3319 static void
_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER * ioc)3320 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3321 {
3322 struct MPT3SAS_DEVICE *sas_device_priv_data;
3323 struct scsi_device *sdev;
3324
3325 shost_for_each_device(sdev, ioc->shost) {
3326 sas_device_priv_data = sdev->hostdata;
3327 if (!sas_device_priv_data)
3328 continue;
3329 if (!sas_device_priv_data->block)
3330 continue;
3331
3332 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3333 "device_running, handle(0x%04x)\n",
3334 sas_device_priv_data->sas_target->handle));
3335 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3336 }
3337 }
3338
3339
3340 /**
3341 * _scsih_ublock_io_device - prepare device to be deleted
3342 * @ioc: per adapter object
3343 * @sas_address: sas address
3344 *
3345 * unblock then put device in offline state
3346 */
3347 static void
_scsih_ublock_io_device(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)3348 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
3349 {
3350 struct MPT3SAS_DEVICE *sas_device_priv_data;
3351 struct scsi_device *sdev;
3352
3353 shost_for_each_device(sdev, ioc->shost) {
3354 sas_device_priv_data = sdev->hostdata;
3355 if (!sas_device_priv_data)
3356 continue;
3357 if (sas_device_priv_data->sas_target->sas_address
3358 != sas_address)
3359 continue;
3360 if (sas_device_priv_data->block)
3361 _scsih_internal_device_unblock(sdev,
3362 sas_device_priv_data);
3363 }
3364 }
3365
3366 /**
3367 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3368 * @ioc: per adapter object
3369 *
3370 * During device pull we need to appropriately set the sdev state.
3371 */
3372 static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER * ioc)3373 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3374 {
3375 struct MPT3SAS_DEVICE *sas_device_priv_data;
3376 struct scsi_device *sdev;
3377
3378 shost_for_each_device(sdev, ioc->shost) {
3379 sas_device_priv_data = sdev->hostdata;
3380 if (!sas_device_priv_data)
3381 continue;
3382 if (sas_device_priv_data->block)
3383 continue;
3384 if (sas_device_priv_data->ignore_delay_remove) {
3385 sdev_printk(KERN_INFO, sdev,
3386 "%s skip device_block for SES handle(0x%04x)\n",
3387 __func__, sas_device_priv_data->sas_target->handle);
3388 continue;
3389 }
3390 _scsih_internal_device_block(sdev, sas_device_priv_data);
3391 }
3392 }
3393
3394 /**
3395 * _scsih_block_io_device - set the device state to SDEV_BLOCK
3396 * @ioc: per adapter object
3397 * @handle: device handle
3398 *
3399 * During device pull we need to appropriately set the sdev state.
3400 */
3401 static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)3402 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3403 {
3404 struct MPT3SAS_DEVICE *sas_device_priv_data;
3405 struct scsi_device *sdev;
3406 struct _sas_device *sas_device;
3407
3408 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3409
3410 shost_for_each_device(sdev, ioc->shost) {
3411 sas_device_priv_data = sdev->hostdata;
3412 if (!sas_device_priv_data)
3413 continue;
3414 if (sas_device_priv_data->sas_target->handle != handle)
3415 continue;
3416 if (sas_device_priv_data->block)
3417 continue;
3418 if (sas_device && sas_device->pend_sas_rphy_add)
3419 continue;
3420 if (sas_device_priv_data->ignore_delay_remove) {
3421 sdev_printk(KERN_INFO, sdev,
3422 "%s skip device_block for SES handle(0x%04x)\n",
3423 __func__, sas_device_priv_data->sas_target->handle);
3424 continue;
3425 }
3426 _scsih_internal_device_block(sdev, sas_device_priv_data);
3427 }
3428
3429 if (sas_device)
3430 sas_device_put(sas_device);
3431 }
3432
3433 /**
3434 * _scsih_block_io_to_children_attached_to_ex
3435 * @ioc: per adapter object
3436 * @sas_expander: the sas_device object
3437 *
3438 * This routine set sdev state to SDEV_BLOCK for all devices
3439 * attached to this expander. This function called when expander is
3440 * pulled.
3441 */
3442 static void
_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)3443 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3444 struct _sas_node *sas_expander)
3445 {
3446 struct _sas_port *mpt3sas_port;
3447 struct _sas_device *sas_device;
3448 struct _sas_node *expander_sibling;
3449 unsigned long flags;
3450
3451 if (!sas_expander)
3452 return;
3453
3454 list_for_each_entry(mpt3sas_port,
3455 &sas_expander->sas_port_list, port_list) {
3456 if (mpt3sas_port->remote_identify.device_type ==
3457 SAS_END_DEVICE) {
3458 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3459 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3460 mpt3sas_port->remote_identify.sas_address);
3461 if (sas_device) {
3462 set_bit(sas_device->handle,
3463 ioc->blocking_handles);
3464 sas_device_put(sas_device);
3465 }
3466 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3467 }
3468 }
3469
3470 list_for_each_entry(mpt3sas_port,
3471 &sas_expander->sas_port_list, port_list) {
3472
3473 if (mpt3sas_port->remote_identify.device_type ==
3474 SAS_EDGE_EXPANDER_DEVICE ||
3475 mpt3sas_port->remote_identify.device_type ==
3476 SAS_FANOUT_EXPANDER_DEVICE) {
3477 expander_sibling =
3478 mpt3sas_scsih_expander_find_by_sas_address(
3479 ioc, mpt3sas_port->remote_identify.sas_address);
3480 _scsih_block_io_to_children_attached_to_ex(ioc,
3481 expander_sibling);
3482 }
3483 }
3484 }
3485
3486 /**
3487 * _scsih_block_io_to_children_attached_directly
3488 * @ioc: per adapter object
3489 * @event_data: topology change event data
3490 *
3491 * This routine set sdev state to SDEV_BLOCK for all devices
3492 * direct attached during device pull.
3493 */
3494 static void
_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)3495 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3496 Mpi2EventDataSasTopologyChangeList_t *event_data)
3497 {
3498 int i;
3499 u16 handle;
3500 u16 reason_code;
3501
3502 for (i = 0; i < event_data->NumEntries; i++) {
3503 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3504 if (!handle)
3505 continue;
3506 reason_code = event_data->PHY[i].PhyStatus &
3507 MPI2_EVENT_SAS_TOPO_RC_MASK;
3508 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3509 _scsih_block_io_device(ioc, handle);
3510 }
3511 }
3512
3513 /**
3514 * _scsih_block_io_to_pcie_children_attached_directly
3515 * @ioc: per adapter object
3516 * @event_data: topology change event data
3517 *
3518 * This routine set sdev state to SDEV_BLOCK for all devices
3519 * direct attached during device pull/reconnect.
3520 */
3521 static void
_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)3522 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3523 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3524 {
3525 int i;
3526 u16 handle;
3527 u16 reason_code;
3528
3529 for (i = 0; i < event_data->NumEntries; i++) {
3530 handle =
3531 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
3532 if (!handle)
3533 continue;
3534 reason_code = event_data->PortEntry[i].PortStatus;
3535 if (reason_code ==
3536 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
3537 _scsih_block_io_device(ioc, handle);
3538 }
3539 }
3540 /**
3541 * _scsih_tm_tr_send - send task management request
3542 * @ioc: per adapter object
3543 * @handle: device handle
3544 * Context: interrupt time.
3545 *
3546 * This code is to initiate the device removal handshake protocol
3547 * with controller firmware. This function will issue target reset
3548 * using high priority request queue. It will send a sas iounit
3549 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
3550 *
3551 * This is designed to send muliple task management request at the same
3552 * time to the fifo. If the fifo is full, we will append the request,
3553 * and process it in a future completion.
3554 */
3555 static void
_scsih_tm_tr_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)3556 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3557 {
3558 Mpi2SCSITaskManagementRequest_t *mpi_request;
3559 u16 smid;
3560 struct _sas_device *sas_device = NULL;
3561 struct _pcie_device *pcie_device = NULL;
3562 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
3563 u64 sas_address = 0;
3564 unsigned long flags;
3565 struct _tr_list *delayed_tr;
3566 u32 ioc_state;
3567 u8 tr_method = 0;
3568
3569 if (ioc->pci_error_recovery) {
3570 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3571 "%s: host in pci error recovery: handle(0x%04x)\n",
3572 __func__, ioc->name,
3573 handle));
3574 return;
3575 }
3576 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3577 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3578 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3579 "%s: host is not operational: handle(0x%04x)\n",
3580 __func__, ioc->name,
3581 handle));
3582 return;
3583 }
3584
3585 /* if PD, then return */
3586 if (test_bit(handle, ioc->pd_handles))
3587 return;
3588
3589 clear_bit(handle, ioc->pend_os_device_add);
3590
3591 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3592 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
3593 if (sas_device && sas_device->starget &&
3594 sas_device->starget->hostdata) {
3595 sas_target_priv_data = sas_device->starget->hostdata;
3596 sas_target_priv_data->deleted = 1;
3597 sas_address = sas_device->sas_address;
3598 }
3599 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3600 if (!sas_device) {
3601 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3602 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
3603 if (pcie_device && pcie_device->starget &&
3604 pcie_device->starget->hostdata) {
3605 sas_target_priv_data = pcie_device->starget->hostdata;
3606 sas_target_priv_data->deleted = 1;
3607 sas_address = pcie_device->wwid;
3608 }
3609 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3610 if (pcie_device && (!ioc->tm_custom_handling))
3611 tr_method =
3612 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3613 else
3614 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3615 }
3616 if (sas_target_priv_data) {
3617 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3618 "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
3619 ioc->name, handle,
3620 (unsigned long long)sas_address));
3621 if (sas_device) {
3622 if (sas_device->enclosure_handle != 0)
3623 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3624 "setting delete flag:enclosure logical "
3625 "id(0x%016llx), slot(%d)\n", ioc->name,
3626 (unsigned long long)
3627 sas_device->enclosure_logical_id,
3628 sas_device->slot));
3629 if (sas_device->connector_name[0] != '\0')
3630 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3631 "setting delete flag: enclosure "
3632 "level(0x%04x), connector name( %s)\n",
3633 ioc->name, sas_device->enclosure_level,
3634 sas_device->connector_name));
3635 } else if (pcie_device) {
3636 if (pcie_device->enclosure_handle != 0)
3637 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3638 "setting delete flag: logical "
3639 "id(0x%016llx), slot(%d)\n", ioc->name,
3640 (unsigned long long)
3641 pcie_device->enclosure_logical_id,
3642 pcie_device->slot));
3643 if (pcie_device->connector_name[0] != '\0')
3644 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3645 "setting delete flag:, enclosure "
3646 "level(0x%04x), "
3647 "connector name( %s)\n", ioc->name,
3648 pcie_device->enclosure_level,
3649 pcie_device->connector_name));
3650 }
3651 _scsih_ublock_io_device(ioc, sas_address);
3652 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
3653 }
3654
3655 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
3656 if (!smid) {
3657 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3658 if (!delayed_tr)
3659 goto out;
3660 INIT_LIST_HEAD(&delayed_tr->list);
3661 delayed_tr->handle = handle;
3662 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3663 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3664 "DELAYED:tr:handle(0x%04x), (open)\n",
3665 ioc->name, handle));
3666 goto out;
3667 }
3668
3669 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3670 "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3671 ioc->name, handle, smid,
3672 ioc->tm_tr_cb_idx));
3673 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3674 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3675 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3676 mpi_request->DevHandle = cpu_to_le16(handle);
3677 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3678 mpi_request->MsgFlags = tr_method;
3679 set_bit(handle, ioc->device_remove_in_progress);
3680 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
3681 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
3682
3683 out:
3684 if (sas_device)
3685 sas_device_put(sas_device);
3686 if (pcie_device)
3687 pcie_device_put(pcie_device);
3688 }
3689
3690 /**
3691 * _scsih_tm_tr_complete -
3692 * @ioc: per adapter object
3693 * @smid: system request message index
3694 * @msix_index: MSIX table index supplied by the OS
3695 * @reply: reply message frame(lower 32bit addr)
3696 * Context: interrupt time.
3697 *
3698 * This is the target reset completion routine.
3699 * This code is part of the code to initiate the device removal
3700 * handshake protocol with controller firmware.
3701 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
3702 *
3703 * Return: 1 meaning mf should be freed from _base_interrupt
3704 * 0 means the mf is freed from this function.
3705 */
3706 static u8
_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)3707 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3708 u32 reply)
3709 {
3710 u16 handle;
3711 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3712 Mpi2SCSITaskManagementReply_t *mpi_reply =
3713 mpt3sas_base_get_reply_virt_addr(ioc, reply);
3714 Mpi2SasIoUnitControlRequest_t *mpi_request;
3715 u16 smid_sas_ctrl;
3716 u32 ioc_state;
3717 struct _sc_list *delayed_sc;
3718
3719 if (ioc->pci_error_recovery) {
3720 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3721 "%s: host in pci error recovery\n", __func__,
3722 ioc->name));
3723 return 1;
3724 }
3725 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3726 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3727 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3728 "%s: host is not operational\n", __func__, ioc->name));
3729 return 1;
3730 }
3731 if (unlikely(!mpi_reply)) {
3732 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
3733 ioc->name, __FILE__, __LINE__, __func__);
3734 return 1;
3735 }
3736 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3737 handle = le16_to_cpu(mpi_request_tm->DevHandle);
3738 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3739 dewtprintk(ioc, pr_err(MPT3SAS_FMT
3740 "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3741 ioc->name, handle,
3742 le16_to_cpu(mpi_reply->DevHandle), smid));
3743 return 0;
3744 }
3745
3746 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3747 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3748 "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
3749 "loginfo(0x%08x), completed(%d)\n", ioc->name,
3750 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3751 le32_to_cpu(mpi_reply->IOCLogInfo),
3752 le32_to_cpu(mpi_reply->TerminationCount)));
3753
3754 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
3755 if (!smid_sas_ctrl) {
3756 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
3757 if (!delayed_sc)
3758 return _scsih_check_for_pending_tm(ioc, smid);
3759 INIT_LIST_HEAD(&delayed_sc->list);
3760 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
3761 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
3762 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3763 "DELAYED:sc:handle(0x%04x), (open)\n",
3764 ioc->name, handle));
3765 return _scsih_check_for_pending_tm(ioc, smid);
3766 }
3767
3768 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3769 "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3770 ioc->name, handle, smid_sas_ctrl,
3771 ioc->tm_sas_control_cb_idx));
3772 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
3773 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
3774 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
3775 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
3776 mpi_request->DevHandle = mpi_request_tm->DevHandle;
3777 mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
3778
3779 return _scsih_check_for_pending_tm(ioc, smid);
3780 }
3781
3782
3783 /**
3784 * _scsih_sas_control_complete - completion routine
3785 * @ioc: per adapter object
3786 * @smid: system request message index
3787 * @msix_index: MSIX table index supplied by the OS
3788 * @reply: reply message frame(lower 32bit addr)
3789 * Context: interrupt time.
3790 *
3791 * This is the sas iounit control completion routine.
3792 * This code is part of the code to initiate the device removal
3793 * handshake protocol with controller firmware.
3794 *
3795 * Return: 1 meaning mf should be freed from _base_interrupt
3796 * 0 means the mf is freed from this function.
3797 */
3798 static u8
_scsih_sas_control_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)3799 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3800 u8 msix_index, u32 reply)
3801 {
3802 Mpi2SasIoUnitControlReply_t *mpi_reply =
3803 mpt3sas_base_get_reply_virt_addr(ioc, reply);
3804
3805 if (likely(mpi_reply)) {
3806 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3807 "sc_complete:handle(0x%04x), (open) "
3808 "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
3809 ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
3810 le16_to_cpu(mpi_reply->IOCStatus),
3811 le32_to_cpu(mpi_reply->IOCLogInfo)));
3812 if (le16_to_cpu(mpi_reply->IOCStatus) ==
3813 MPI2_IOCSTATUS_SUCCESS) {
3814 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
3815 ioc->device_remove_in_progress);
3816 }
3817 } else {
3818 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
3819 ioc->name, __FILE__, __LINE__, __func__);
3820 }
3821 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
3822 }
3823
3824 /**
3825 * _scsih_tm_tr_volume_send - send target reset request for volumes
3826 * @ioc: per adapter object
3827 * @handle: device handle
3828 * Context: interrupt time.
3829 *
3830 * This is designed to send muliple task management request at the same
3831 * time to the fifo. If the fifo is full, we will append the request,
3832 * and process it in a future completion.
3833 */
3834 static void
_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)3835 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3836 {
3837 Mpi2SCSITaskManagementRequest_t *mpi_request;
3838 u16 smid;
3839 struct _tr_list *delayed_tr;
3840
3841 if (ioc->pci_error_recovery) {
3842 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3843 "%s: host reset in progress!\n",
3844 __func__, ioc->name));
3845 return;
3846 }
3847
3848 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
3849 if (!smid) {
3850 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3851 if (!delayed_tr)
3852 return;
3853 INIT_LIST_HEAD(&delayed_tr->list);
3854 delayed_tr->handle = handle;
3855 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
3856 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3857 "DELAYED:tr:handle(0x%04x), (open)\n",
3858 ioc->name, handle));
3859 return;
3860 }
3861
3862 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3863 "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3864 ioc->name, handle, smid,
3865 ioc->tm_tr_volume_cb_idx));
3866 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3867 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3868 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3869 mpi_request->DevHandle = cpu_to_le16(handle);
3870 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3871 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
3872 }
3873
3874 /**
3875 * _scsih_tm_volume_tr_complete - target reset completion
3876 * @ioc: per adapter object
3877 * @smid: system request message index
3878 * @msix_index: MSIX table index supplied by the OS
3879 * @reply: reply message frame(lower 32bit addr)
3880 * Context: interrupt time.
3881 *
3882 * Return: 1 meaning mf should be freed from _base_interrupt
3883 * 0 means the mf is freed from this function.
3884 */
3885 static u8
_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)3886 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3887 u8 msix_index, u32 reply)
3888 {
3889 u16 handle;
3890 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3891 Mpi2SCSITaskManagementReply_t *mpi_reply =
3892 mpt3sas_base_get_reply_virt_addr(ioc, reply);
3893
3894 if (ioc->shost_recovery || ioc->pci_error_recovery) {
3895 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3896 "%s: host reset in progress!\n",
3897 __func__, ioc->name));
3898 return 1;
3899 }
3900 if (unlikely(!mpi_reply)) {
3901 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
3902 ioc->name, __FILE__, __LINE__, __func__);
3903 return 1;
3904 }
3905
3906 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3907 handle = le16_to_cpu(mpi_request_tm->DevHandle);
3908 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3909 dewtprintk(ioc, pr_err(MPT3SAS_FMT
3910 "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3911 ioc->name, handle,
3912 le16_to_cpu(mpi_reply->DevHandle), smid));
3913 return 0;
3914 }
3915
3916 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3917 "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
3918 "loginfo(0x%08x), completed(%d)\n", ioc->name,
3919 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3920 le32_to_cpu(mpi_reply->IOCLogInfo),
3921 le32_to_cpu(mpi_reply->TerminationCount)));
3922
3923 return _scsih_check_for_pending_tm(ioc, smid);
3924 }
3925
3926 /**
3927 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
3928 * @ioc: per adapter object
3929 * @smid: system request message index
3930 * @event: Event ID
3931 * @event_context: used to track events uniquely
3932 *
3933 * Context - processed in interrupt context.
3934 */
3935 static void
_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER * ioc,u16 smid,U16 event,U32 event_context)3936 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
3937 U32 event_context)
3938 {
3939 Mpi2EventAckRequest_t *ack_request;
3940 int i = smid - ioc->internal_smid;
3941 unsigned long flags;
3942
3943 /* Without releasing the smid just update the
3944 * call back index and reuse the same smid for
3945 * processing this delayed request
3946 */
3947 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3948 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
3949 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3950
3951 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3952 "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
3953 ioc->name, le16_to_cpu(event), smid,
3954 ioc->base_cb_idx));
3955 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
3956 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
3957 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
3958 ack_request->Event = event;
3959 ack_request->EventContext = event_context;
3960 ack_request->VF_ID = 0; /* TODO */
3961 ack_request->VP_ID = 0;
3962 mpt3sas_base_put_smid_default(ioc, smid);
3963 }
3964
3965 /**
3966 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
3967 * sas_io_unit_ctrl messages
3968 * @ioc: per adapter object
3969 * @smid: system request message index
3970 * @handle: device handle
3971 *
3972 * Context - processed in interrupt context.
3973 */
3974 static void
_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)3975 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
3976 u16 smid, u16 handle)
3977 {
3978 Mpi2SasIoUnitControlRequest_t *mpi_request;
3979 u32 ioc_state;
3980 int i = smid - ioc->internal_smid;
3981 unsigned long flags;
3982
3983 if (ioc->remove_host) {
3984 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3985 "%s: host has been removed\n",
3986 __func__, ioc->name));
3987 return;
3988 } else if (ioc->pci_error_recovery) {
3989 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3990 "%s: host in pci error recovery\n",
3991 __func__, ioc->name));
3992 return;
3993 }
3994 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3995 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3996 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3997 "%s: host is not operational\n",
3998 __func__, ioc->name));
3999 return;
4000 }
4001
4002 /* Without releasing the smid just update the
4003 * call back index and reuse the same smid for
4004 * processing this delayed request
4005 */
4006 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4007 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4008 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4009
4010 dewtprintk(ioc, pr_info(MPT3SAS_FMT
4011 "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4012 ioc->name, handle, smid,
4013 ioc->tm_sas_control_cb_idx));
4014 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4015 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4016 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4017 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4018 mpi_request->DevHandle = cpu_to_le16(handle);
4019 mpt3sas_base_put_smid_default(ioc, smid);
4020 }
4021
4022 /**
4023 * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4024 * @ioc: per adapter object
4025 * @smid: system request message index
4026 *
4027 * Context: Executed in interrupt context
4028 *
4029 * This will check delayed internal messages list, and process the
4030 * next request.
4031 *
4032 * Return: 1 meaning mf should be freed from _base_interrupt
4033 * 0 means the mf is freed from this function.
4034 */
4035 u8
mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER * ioc,u16 smid)4036 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4037 {
4038 struct _sc_list *delayed_sc;
4039 struct _event_ack_list *delayed_event_ack;
4040
4041 if (!list_empty(&ioc->delayed_event_ack_list)) {
4042 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4043 struct _event_ack_list, list);
4044 _scsih_issue_delayed_event_ack(ioc, smid,
4045 delayed_event_ack->Event, delayed_event_ack->EventContext);
4046 list_del(&delayed_event_ack->list);
4047 kfree(delayed_event_ack);
4048 return 0;
4049 }
4050
4051 if (!list_empty(&ioc->delayed_sc_list)) {
4052 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4053 struct _sc_list, list);
4054 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4055 delayed_sc->handle);
4056 list_del(&delayed_sc->list);
4057 kfree(delayed_sc);
4058 return 0;
4059 }
4060 return 1;
4061 }
4062
4063 /**
4064 * _scsih_check_for_pending_tm - check for pending task management
4065 * @ioc: per adapter object
4066 * @smid: system request message index
4067 *
4068 * This will check delayed target reset list, and feed the
4069 * next reqeust.
4070 *
4071 * Return: 1 meaning mf should be freed from _base_interrupt
4072 * 0 means the mf is freed from this function.
4073 */
4074 static u8
_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER * ioc,u16 smid)4075 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4076 {
4077 struct _tr_list *delayed_tr;
4078
4079 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4080 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4081 struct _tr_list, list);
4082 mpt3sas_base_free_smid(ioc, smid);
4083 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4084 list_del(&delayed_tr->list);
4085 kfree(delayed_tr);
4086 return 0;
4087 }
4088
4089 if (!list_empty(&ioc->delayed_tr_list)) {
4090 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4091 struct _tr_list, list);
4092 mpt3sas_base_free_smid(ioc, smid);
4093 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4094 list_del(&delayed_tr->list);
4095 kfree(delayed_tr);
4096 return 0;
4097 }
4098
4099 return 1;
4100 }
4101
4102 /**
4103 * _scsih_check_topo_delete_events - sanity check on topo events
4104 * @ioc: per adapter object
4105 * @event_data: the event data payload
4106 *
4107 * This routine added to better handle cable breaker.
4108 *
4109 * This handles the case where driver receives multiple expander
4110 * add and delete events in a single shot. When there is a delete event
4111 * the routine will void any pending add events waiting in the event queue.
4112 */
4113 static void
_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4114 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4115 Mpi2EventDataSasTopologyChangeList_t *event_data)
4116 {
4117 struct fw_event_work *fw_event;
4118 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4119 u16 expander_handle;
4120 struct _sas_node *sas_expander;
4121 unsigned long flags;
4122 int i, reason_code;
4123 u16 handle;
4124
4125 for (i = 0 ; i < event_data->NumEntries; i++) {
4126 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4127 if (!handle)
4128 continue;
4129 reason_code = event_data->PHY[i].PhyStatus &
4130 MPI2_EVENT_SAS_TOPO_RC_MASK;
4131 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4132 _scsih_tm_tr_send(ioc, handle);
4133 }
4134
4135 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4136 if (expander_handle < ioc->sas_hba.num_phys) {
4137 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4138 return;
4139 }
4140 if (event_data->ExpStatus ==
4141 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4142 /* put expander attached devices into blocking state */
4143 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4144 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4145 expander_handle);
4146 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4147 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4148 do {
4149 handle = find_first_bit(ioc->blocking_handles,
4150 ioc->facts.MaxDevHandle);
4151 if (handle < ioc->facts.MaxDevHandle)
4152 _scsih_block_io_device(ioc, handle);
4153 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4154 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4155 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4156
4157 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4158 return;
4159
4160 /* mark ignore flag for pending events */
4161 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4162 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4163 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4164 fw_event->ignore)
4165 continue;
4166 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4167 fw_event->event_data;
4168 if (local_event_data->ExpStatus ==
4169 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4170 local_event_data->ExpStatus ==
4171 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4172 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4173 expander_handle) {
4174 dewtprintk(ioc, pr_info(MPT3SAS_FMT
4175 "setting ignoring flag\n", ioc->name));
4176 fw_event->ignore = 1;
4177 }
4178 }
4179 }
4180 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4181 }
4182
4183 /**
4184 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4185 * events
4186 * @ioc: per adapter object
4187 * @event_data: the event data payload
4188 *
4189 * This handles the case where driver receives multiple switch
4190 * or device add and delete events in a single shot. When there
4191 * is a delete event the routine will void any pending add
4192 * events waiting in the event queue.
4193 */
4194 static void
_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4195 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4196 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4197 {
4198 struct fw_event_work *fw_event;
4199 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4200 unsigned long flags;
4201 int i, reason_code;
4202 u16 handle, switch_handle;
4203
4204 for (i = 0; i < event_data->NumEntries; i++) {
4205 handle =
4206 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4207 if (!handle)
4208 continue;
4209 reason_code = event_data->PortEntry[i].PortStatus;
4210 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4211 _scsih_tm_tr_send(ioc, handle);
4212 }
4213
4214 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4215 if (!switch_handle) {
4216 _scsih_block_io_to_pcie_children_attached_directly(
4217 ioc, event_data);
4218 return;
4219 }
4220 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4221 if ((event_data->SwitchStatus
4222 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4223 (event_data->SwitchStatus ==
4224 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4225 _scsih_block_io_to_pcie_children_attached_directly(
4226 ioc, event_data);
4227
4228 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4229 return;
4230
4231 /* mark ignore flag for pending events */
4232 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4233 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4234 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4235 fw_event->ignore)
4236 continue;
4237 local_event_data =
4238 (Mpi26EventDataPCIeTopologyChangeList_t *)
4239 fw_event->event_data;
4240 if (local_event_data->SwitchStatus ==
4241 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4242 local_event_data->SwitchStatus ==
4243 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4244 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4245 switch_handle) {
4246 dewtprintk(ioc, pr_info(MPT3SAS_FMT
4247 "setting ignoring flag for switch event\n",
4248 ioc->name));
4249 fw_event->ignore = 1;
4250 }
4251 }
4252 }
4253 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4254 }
4255
4256 /**
4257 * _scsih_set_volume_delete_flag - setting volume delete flag
4258 * @ioc: per adapter object
4259 * @handle: device handle
4260 *
4261 * This returns nothing.
4262 */
4263 static void
_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)4264 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4265 {
4266 struct _raid_device *raid_device;
4267 struct MPT3SAS_TARGET *sas_target_priv_data;
4268 unsigned long flags;
4269
4270 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4271 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4272 if (raid_device && raid_device->starget &&
4273 raid_device->starget->hostdata) {
4274 sas_target_priv_data =
4275 raid_device->starget->hostdata;
4276 sas_target_priv_data->deleted = 1;
4277 dewtprintk(ioc, pr_info(MPT3SAS_FMT
4278 "setting delete flag: handle(0x%04x), "
4279 "wwid(0x%016llx)\n", ioc->name, handle,
4280 (unsigned long long) raid_device->wwid));
4281 }
4282 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4283 }
4284
4285 /**
4286 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4287 * @handle: input handle
4288 * @a: handle for volume a
4289 * @b: handle for volume b
4290 *
4291 * IR firmware only supports two raid volumes. The purpose of this
4292 * routine is to set the volume handle in either a or b. When the given
4293 * input handle is non-zero, or when a and b have not been set before.
4294 */
4295 static void
_scsih_set_volume_handle_for_tr(u16 handle,u16 * a,u16 * b)4296 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4297 {
4298 if (!handle || handle == *a || handle == *b)
4299 return;
4300 if (!*a)
4301 *a = handle;
4302 else if (!*b)
4303 *b = handle;
4304 }
4305
4306 /**
4307 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4308 * @ioc: per adapter object
4309 * @event_data: the event data payload
4310 * Context: interrupt time.
4311 *
4312 * This routine will send target reset to volume, followed by target
4313 * resets to the PDs. This is called when a PD has been removed, or
4314 * volume has been deleted or removed. When the target reset is sent
4315 * to volume, the PD target resets need to be queued to start upon
4316 * completion of the volume target reset.
4317 */
4318 static void
_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)4319 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4320 Mpi2EventDataIrConfigChangeList_t *event_data)
4321 {
4322 Mpi2EventIrConfigElement_t *element;
4323 int i;
4324 u16 handle, volume_handle, a, b;
4325 struct _tr_list *delayed_tr;
4326
4327 a = 0;
4328 b = 0;
4329
4330 if (ioc->is_warpdrive)
4331 return;
4332
4333 /* Volume Resets for Deleted or Removed */
4334 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4335 for (i = 0; i < event_data->NumElements; i++, element++) {
4336 if (le32_to_cpu(event_data->Flags) &
4337 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4338 continue;
4339 if (element->ReasonCode ==
4340 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4341 element->ReasonCode ==
4342 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4343 volume_handle = le16_to_cpu(element->VolDevHandle);
4344 _scsih_set_volume_delete_flag(ioc, volume_handle);
4345 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4346 }
4347 }
4348
4349 /* Volume Resets for UNHIDE events */
4350 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4351 for (i = 0; i < event_data->NumElements; i++, element++) {
4352 if (le32_to_cpu(event_data->Flags) &
4353 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4354 continue;
4355 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4356 volume_handle = le16_to_cpu(element->VolDevHandle);
4357 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4358 }
4359 }
4360
4361 if (a)
4362 _scsih_tm_tr_volume_send(ioc, a);
4363 if (b)
4364 _scsih_tm_tr_volume_send(ioc, b);
4365
4366 /* PD target resets */
4367 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4368 for (i = 0; i < event_data->NumElements; i++, element++) {
4369 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4370 continue;
4371 handle = le16_to_cpu(element->PhysDiskDevHandle);
4372 volume_handle = le16_to_cpu(element->VolDevHandle);
4373 clear_bit(handle, ioc->pd_handles);
4374 if (!volume_handle)
4375 _scsih_tm_tr_send(ioc, handle);
4376 else if (volume_handle == a || volume_handle == b) {
4377 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4378 BUG_ON(!delayed_tr);
4379 INIT_LIST_HEAD(&delayed_tr->list);
4380 delayed_tr->handle = handle;
4381 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4382 dewtprintk(ioc, pr_info(MPT3SAS_FMT
4383 "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
4384 handle));
4385 } else
4386 _scsih_tm_tr_send(ioc, handle);
4387 }
4388 }
4389
4390
4391 /**
4392 * _scsih_check_volume_delete_events - set delete flag for volumes
4393 * @ioc: per adapter object
4394 * @event_data: the event data payload
4395 * Context: interrupt time.
4396 *
4397 * This will handle the case when the cable connected to entire volume is
4398 * pulled. We will take care of setting the deleted flag so normal IO will
4399 * not be sent.
4400 */
4401 static void
_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrVolume_t * event_data)4402 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4403 Mpi2EventDataIrVolume_t *event_data)
4404 {
4405 u32 state;
4406
4407 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4408 return;
4409 state = le32_to_cpu(event_data->NewValue);
4410 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4411 MPI2_RAID_VOL_STATE_FAILED)
4412 _scsih_set_volume_delete_flag(ioc,
4413 le16_to_cpu(event_data->VolDevHandle));
4414 }
4415
4416 /**
4417 * _scsih_temp_threshold_events - display temperature threshold exceeded events
4418 * @ioc: per adapter object
4419 * @event_data: the temp threshold event data
4420 * Context: interrupt time.
4421 */
4422 static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataTemperature_t * event_data)4423 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4424 Mpi2EventDataTemperature_t *event_data)
4425 {
4426 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4427 pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s"
4428 " exceeded for Sensor: %d !!!\n", ioc->name,
4429 ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ",
4430 ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ",
4431 ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ",
4432 ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ",
4433 event_data->SensorNum);
4434 pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n",
4435 ioc->name, event_data->CurrentTemperature);
4436 }
4437 }
4438
_scsih_set_satl_pending(struct scsi_cmnd * scmd,bool pending)4439 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4440 {
4441 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4442
4443 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4444 return 0;
4445
4446 if (pending)
4447 return test_and_set_bit(0, &priv->ata_command_pending);
4448
4449 clear_bit(0, &priv->ata_command_pending);
4450 return 0;
4451 }
4452
4453 /**
4454 * _scsih_flush_running_cmds - completing outstanding commands.
4455 * @ioc: per adapter object
4456 *
4457 * The flushing out of all pending scmd commands following host reset,
4458 * where all IO is dropped to the floor.
4459 */
4460 static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER * ioc)4461 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4462 {
4463 struct scsi_cmnd *scmd;
4464 struct scsiio_tracker *st;
4465 u16 smid;
4466 int count = 0;
4467
4468 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4469 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4470 if (!scmd)
4471 continue;
4472 count++;
4473 _scsih_set_satl_pending(scmd, false);
4474 st = scsi_cmd_priv(scmd);
4475 mpt3sas_base_clear_st(ioc, st);
4476 scsi_dma_unmap(scmd);
4477 if (ioc->pci_error_recovery || ioc->remove_host)
4478 scmd->result = DID_NO_CONNECT << 16;
4479 else
4480 scmd->result = DID_RESET << 16;
4481 scmd->scsi_done(scmd);
4482 }
4483 dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n",
4484 ioc->name, count));
4485 }
4486
4487 /**
4488 * _scsih_setup_eedp - setup MPI request for EEDP transfer
4489 * @ioc: per adapter object
4490 * @scmd: pointer to scsi command object
4491 * @mpi_request: pointer to the SCSI_IO request message frame
4492 *
4493 * Supporting protection 1 and 3.
4494 */
4495 static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi25SCSIIORequest_t * mpi_request)4496 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4497 Mpi25SCSIIORequest_t *mpi_request)
4498 {
4499 u16 eedp_flags;
4500 unsigned char prot_op = scsi_get_prot_op(scmd);
4501 unsigned char prot_type = scsi_get_prot_type(scmd);
4502 Mpi25SCSIIORequest_t *mpi_request_3v =
4503 (Mpi25SCSIIORequest_t *)mpi_request;
4504
4505 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
4506 return;
4507
4508 if (prot_op == SCSI_PROT_READ_STRIP)
4509 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
4510 else if (prot_op == SCSI_PROT_WRITE_INSERT)
4511 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
4512 else
4513 return;
4514
4515 switch (prot_type) {
4516 case SCSI_PROT_DIF_TYPE1:
4517 case SCSI_PROT_DIF_TYPE2:
4518
4519 /*
4520 * enable ref/guard checking
4521 * auto increment ref tag
4522 */
4523 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
4524 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
4525 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4526 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
4527 cpu_to_be32(t10_pi_ref_tag(scmd->request));
4528 break;
4529
4530 case SCSI_PROT_DIF_TYPE3:
4531
4532 /*
4533 * enable guard checking
4534 */
4535 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4536
4537 break;
4538 }
4539
4540 mpi_request_3v->EEDPBlockSize =
4541 cpu_to_le16(scmd->device->sector_size);
4542
4543 if (ioc->is_gen35_ioc)
4544 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
4545 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
4546 }
4547
4548 /**
4549 * _scsih_eedp_error_handling - return sense code for EEDP errors
4550 * @scmd: pointer to scsi command object
4551 * @ioc_status: ioc status
4552 */
4553 static void
_scsih_eedp_error_handling(struct scsi_cmnd * scmd,u16 ioc_status)4554 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4555 {
4556 u8 ascq;
4557
4558 switch (ioc_status) {
4559 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4560 ascq = 0x01;
4561 break;
4562 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4563 ascq = 0x02;
4564 break;
4565 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4566 ascq = 0x03;
4567 break;
4568 default:
4569 ascq = 0x00;
4570 break;
4571 }
4572 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
4573 ascq);
4574 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
4575 SAM_STAT_CHECK_CONDITION;
4576 }
4577
4578 /**
4579 * scsih_qcmd - main scsi request entry point
4580 * @shost: SCSI host pointer
4581 * @scmd: pointer to scsi command object
4582 *
4583 * The callback index is set inside `ioc->scsi_io_cb_idx`.
4584 *
4585 * Return: 0 on success. If there's a failure, return either:
4586 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
4587 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
4588 */
4589 static int
scsih_qcmd(struct Scsi_Host * shost,struct scsi_cmnd * scmd)4590 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4591 {
4592 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4593 struct MPT3SAS_DEVICE *sas_device_priv_data;
4594 struct MPT3SAS_TARGET *sas_target_priv_data;
4595 struct _raid_device *raid_device;
4596 struct request *rq = scmd->request;
4597 int class;
4598 Mpi25SCSIIORequest_t *mpi_request;
4599 struct _pcie_device *pcie_device = NULL;
4600 u32 mpi_control;
4601 u16 smid;
4602 u16 handle;
4603
4604 if (ioc->logging_level & MPT_DEBUG_SCSI)
4605 scsi_print_command(scmd);
4606
4607 sas_device_priv_data = scmd->device->hostdata;
4608 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4609 scmd->result = DID_NO_CONNECT << 16;
4610 scmd->scsi_done(scmd);
4611 return 0;
4612 }
4613
4614 if (ioc->pci_error_recovery || ioc->remove_host) {
4615 scmd->result = DID_NO_CONNECT << 16;
4616 scmd->scsi_done(scmd);
4617 return 0;
4618 }
4619
4620 sas_target_priv_data = sas_device_priv_data->sas_target;
4621
4622 /* invalid device handle */
4623 handle = sas_target_priv_data->handle;
4624 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
4625 scmd->result = DID_NO_CONNECT << 16;
4626 scmd->scsi_done(scmd);
4627 return 0;
4628 }
4629
4630
4631 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
4632 /* host recovery or link resets sent via IOCTLs */
4633 return SCSI_MLQUEUE_HOST_BUSY;
4634 } else if (sas_target_priv_data->deleted) {
4635 /* device has been deleted */
4636 scmd->result = DID_NO_CONNECT << 16;
4637 scmd->scsi_done(scmd);
4638 return 0;
4639 } else if (sas_target_priv_data->tm_busy ||
4640 sas_device_priv_data->block) {
4641 /* device busy with task management */
4642 return SCSI_MLQUEUE_DEVICE_BUSY;
4643 }
4644
4645 /*
4646 * Bug work around for firmware SATL handling. The loop
4647 * is based on atomic operations and ensures consistency
4648 * since we're lockless at this point
4649 */
4650 do {
4651 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
4652 scmd->result = SAM_STAT_BUSY;
4653 scmd->scsi_done(scmd);
4654 return 0;
4655 }
4656 } while (_scsih_set_satl_pending(scmd, true));
4657
4658 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4659 mpi_control = MPI2_SCSIIO_CONTROL_READ;
4660 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4661 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
4662 else
4663 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
4664
4665 /* set tags */
4666 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
4667 /* NCQ Prio supported, make sure control indicated high priority */
4668 if (sas_device_priv_data->ncq_prio_enable) {
4669 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4670 if (class == IOPRIO_CLASS_RT)
4671 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
4672 }
4673 /* Make sure Device is not raid volume.
4674 * We do not expose raid functionality to upper layer for warpdrive.
4675 */
4676 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
4677 && !scsih_is_nvme(&scmd->device->sdev_gendev))
4678 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
4679 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
4680
4681 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
4682 if (!smid) {
4683 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4684 ioc->name, __func__);
4685 _scsih_set_satl_pending(scmd, false);
4686 goto out;
4687 }
4688 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4689 memset(mpi_request, 0, ioc->request_sz);
4690 _scsih_setup_eedp(ioc, scmd, mpi_request);
4691
4692 if (scmd->cmd_len == 32)
4693 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
4694 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4695 if (sas_device_priv_data->sas_target->flags &
4696 MPT_TARGET_FLAGS_RAID_COMPONENT)
4697 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
4698 else
4699 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4700 mpi_request->DevHandle = cpu_to_le16(handle);
4701 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
4702 mpi_request->Control = cpu_to_le32(mpi_control);
4703 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
4704 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
4705 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
4706 mpi_request->SenseBufferLowAddress =
4707 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
4708 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
4709 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
4710 mpi_request->LUN);
4711 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4712
4713 if (mpi_request->DataLength) {
4714 pcie_device = sas_target_priv_data->pcie_dev;
4715 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
4716 mpt3sas_base_free_smid(ioc, smid);
4717 _scsih_set_satl_pending(scmd, false);
4718 goto out;
4719 }
4720 } else
4721 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
4722
4723 raid_device = sas_target_priv_data->raid_device;
4724 if (raid_device && raid_device->direct_io_enabled)
4725 mpt3sas_setup_direct_io(ioc, scmd,
4726 raid_device, mpi_request);
4727
4728 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
4729 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
4730 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
4731 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
4732 mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
4733 } else
4734 ioc->put_smid_scsi_io(ioc, smid,
4735 le16_to_cpu(mpi_request->DevHandle));
4736 } else
4737 mpt3sas_base_put_smid_default(ioc, smid);
4738 return 0;
4739
4740 out:
4741 return SCSI_MLQUEUE_HOST_BUSY;
4742 }
4743
4744 /**
4745 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
4746 * @sense_buffer: sense data returned by target
4747 * @data: normalized skey/asc/ascq
4748 */
4749 static void
_scsih_normalize_sense(char * sense_buffer,struct sense_info * data)4750 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
4751 {
4752 if ((sense_buffer[0] & 0x7F) >= 0x72) {
4753 /* descriptor format */
4754 data->skey = sense_buffer[1] & 0x0F;
4755 data->asc = sense_buffer[2];
4756 data->ascq = sense_buffer[3];
4757 } else {
4758 /* fixed format */
4759 data->skey = sense_buffer[2] & 0x0F;
4760 data->asc = sense_buffer[12];
4761 data->ascq = sense_buffer[13];
4762 }
4763 }
4764
4765 /**
4766 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
4767 * @ioc: per adapter object
4768 * @scmd: pointer to scsi command object
4769 * @mpi_reply: reply mf payload returned from firmware
4770 * @smid: ?
4771 *
4772 * scsi_status - SCSI Status code returned from target device
4773 * scsi_state - state info associated with SCSI_IO determined by ioc
4774 * ioc_status - ioc supplied status info
4775 */
4776 static void
_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi2SCSIIOReply_t * mpi_reply,u16 smid)4777 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4778 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
4779 {
4780 u32 response_info;
4781 u8 *response_bytes;
4782 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
4783 MPI2_IOCSTATUS_MASK;
4784 u8 scsi_state = mpi_reply->SCSIState;
4785 u8 scsi_status = mpi_reply->SCSIStatus;
4786 char *desc_ioc_state = NULL;
4787 char *desc_scsi_status = NULL;
4788 char *desc_scsi_state = ioc->tmp_string;
4789 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
4790 struct _sas_device *sas_device = NULL;
4791 struct _pcie_device *pcie_device = NULL;
4792 struct scsi_target *starget = scmd->device->sdev_target;
4793 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
4794 char *device_str = NULL;
4795
4796 if (!priv_target)
4797 return;
4798 if (ioc->hide_ir_msg)
4799 device_str = "WarpDrive";
4800 else
4801 device_str = "volume";
4802
4803 if (log_info == 0x31170000)
4804 return;
4805
4806 switch (ioc_status) {
4807 case MPI2_IOCSTATUS_SUCCESS:
4808 desc_ioc_state = "success";
4809 break;
4810 case MPI2_IOCSTATUS_INVALID_FUNCTION:
4811 desc_ioc_state = "invalid function";
4812 break;
4813 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
4814 desc_ioc_state = "scsi recovered error";
4815 break;
4816 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
4817 desc_ioc_state = "scsi invalid dev handle";
4818 break;
4819 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4820 desc_ioc_state = "scsi device not there";
4821 break;
4822 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
4823 desc_ioc_state = "scsi data overrun";
4824 break;
4825 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
4826 desc_ioc_state = "scsi data underrun";
4827 break;
4828 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
4829 desc_ioc_state = "scsi io data error";
4830 break;
4831 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4832 desc_ioc_state = "scsi protocol error";
4833 break;
4834 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
4835 desc_ioc_state = "scsi task terminated";
4836 break;
4837 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4838 desc_ioc_state = "scsi residual mismatch";
4839 break;
4840 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4841 desc_ioc_state = "scsi task mgmt failed";
4842 break;
4843 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
4844 desc_ioc_state = "scsi ioc terminated";
4845 break;
4846 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
4847 desc_ioc_state = "scsi ext terminated";
4848 break;
4849 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4850 desc_ioc_state = "eedp guard error";
4851 break;
4852 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4853 desc_ioc_state = "eedp ref tag error";
4854 break;
4855 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4856 desc_ioc_state = "eedp app tag error";
4857 break;
4858 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
4859 desc_ioc_state = "insufficient power";
4860 break;
4861 default:
4862 desc_ioc_state = "unknown";
4863 break;
4864 }
4865
4866 switch (scsi_status) {
4867 case MPI2_SCSI_STATUS_GOOD:
4868 desc_scsi_status = "good";
4869 break;
4870 case MPI2_SCSI_STATUS_CHECK_CONDITION:
4871 desc_scsi_status = "check condition";
4872 break;
4873 case MPI2_SCSI_STATUS_CONDITION_MET:
4874 desc_scsi_status = "condition met";
4875 break;
4876 case MPI2_SCSI_STATUS_BUSY:
4877 desc_scsi_status = "busy";
4878 break;
4879 case MPI2_SCSI_STATUS_INTERMEDIATE:
4880 desc_scsi_status = "intermediate";
4881 break;
4882 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
4883 desc_scsi_status = "intermediate condmet";
4884 break;
4885 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
4886 desc_scsi_status = "reservation conflict";
4887 break;
4888 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
4889 desc_scsi_status = "command terminated";
4890 break;
4891 case MPI2_SCSI_STATUS_TASK_SET_FULL:
4892 desc_scsi_status = "task set full";
4893 break;
4894 case MPI2_SCSI_STATUS_ACA_ACTIVE:
4895 desc_scsi_status = "aca active";
4896 break;
4897 case MPI2_SCSI_STATUS_TASK_ABORTED:
4898 desc_scsi_status = "task aborted";
4899 break;
4900 default:
4901 desc_scsi_status = "unknown";
4902 break;
4903 }
4904
4905 desc_scsi_state[0] = '\0';
4906 if (!scsi_state)
4907 desc_scsi_state = " ";
4908 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
4909 strcat(desc_scsi_state, "response info ");
4910 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
4911 strcat(desc_scsi_state, "state terminated ");
4912 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
4913 strcat(desc_scsi_state, "no status ");
4914 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
4915 strcat(desc_scsi_state, "autosense failed ");
4916 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
4917 strcat(desc_scsi_state, "autosense valid ");
4918
4919 scsi_print_command(scmd);
4920
4921 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
4922 pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
4923 device_str, (unsigned long long)priv_target->sas_address);
4924 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
4925 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
4926 if (pcie_device) {
4927 pr_info(MPT3SAS_FMT "\twwid(0x%016llx), port(%d)\n",
4928 ioc->name,
4929 (unsigned long long)pcie_device->wwid,
4930 pcie_device->port_num);
4931 if (pcie_device->enclosure_handle != 0)
4932 pr_info(MPT3SAS_FMT
4933 "\tenclosure logical id(0x%016llx), "
4934 "slot(%d)\n", ioc->name,
4935 (unsigned long long)
4936 pcie_device->enclosure_logical_id,
4937 pcie_device->slot);
4938 if (pcie_device->connector_name[0])
4939 pr_info(MPT3SAS_FMT
4940 "\tenclosure level(0x%04x),"
4941 "connector name( %s)\n",
4942 ioc->name, pcie_device->enclosure_level,
4943 pcie_device->connector_name);
4944 pcie_device_put(pcie_device);
4945 }
4946 } else {
4947 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
4948 if (sas_device) {
4949 pr_warn(MPT3SAS_FMT
4950 "\tsas_address(0x%016llx), phy(%d)\n",
4951 ioc->name, (unsigned long long)
4952 sas_device->sas_address, sas_device->phy);
4953
4954 _scsih_display_enclosure_chassis_info(ioc, sas_device,
4955 NULL, NULL);
4956
4957 sas_device_put(sas_device);
4958 }
4959 }
4960
4961 pr_warn(MPT3SAS_FMT
4962 "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
4963 ioc->name, le16_to_cpu(mpi_reply->DevHandle),
4964 desc_ioc_state, ioc_status, smid);
4965 pr_warn(MPT3SAS_FMT
4966 "\trequest_len(%d), underflow(%d), resid(%d)\n",
4967 ioc->name, scsi_bufflen(scmd), scmd->underflow,
4968 scsi_get_resid(scmd));
4969 pr_warn(MPT3SAS_FMT
4970 "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
4971 ioc->name, le16_to_cpu(mpi_reply->TaskTag),
4972 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
4973 pr_warn(MPT3SAS_FMT
4974 "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
4975 ioc->name, desc_scsi_status,
4976 scsi_status, desc_scsi_state, scsi_state);
4977
4978 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
4979 struct sense_info data;
4980 _scsih_normalize_sense(scmd->sense_buffer, &data);
4981 pr_warn(MPT3SAS_FMT
4982 "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
4983 ioc->name, data.skey,
4984 data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
4985 }
4986 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
4987 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
4988 response_bytes = (u8 *)&response_info;
4989 _scsih_response_code(ioc, response_bytes[0]);
4990 }
4991 }
4992
4993 /**
4994 * _scsih_turn_on_pfa_led - illuminate PFA LED
4995 * @ioc: per adapter object
4996 * @handle: device handle
4997 * Context: process
4998 */
4999 static void
_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5000 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5001 {
5002 Mpi2SepReply_t mpi_reply;
5003 Mpi2SepRequest_t mpi_request;
5004 struct _sas_device *sas_device;
5005
5006 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5007 if (!sas_device)
5008 return;
5009
5010 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5011 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5012 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5013 mpi_request.SlotStatus =
5014 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5015 mpi_request.DevHandle = cpu_to_le16(handle);
5016 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5017 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5018 &mpi_request)) != 0) {
5019 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
5020 __FILE__, __LINE__, __func__);
5021 goto out;
5022 }
5023 sas_device->pfa_led_on = 1;
5024
5025 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5026 dewtprintk(ioc, pr_info(MPT3SAS_FMT
5027 "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5028 ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
5029 le32_to_cpu(mpi_reply.IOCLogInfo)));
5030 goto out;
5031 }
5032 out:
5033 sas_device_put(sas_device);
5034 }
5035
5036 /**
5037 * _scsih_turn_off_pfa_led - turn off Fault LED
5038 * @ioc: per adapter object
5039 * @sas_device: sas device whose PFA LED has to turned off
5040 * Context: process
5041 */
5042 static void
_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)5043 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5044 struct _sas_device *sas_device)
5045 {
5046 Mpi2SepReply_t mpi_reply;
5047 Mpi2SepRequest_t mpi_request;
5048
5049 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5050 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5051 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5052 mpi_request.SlotStatus = 0;
5053 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5054 mpi_request.DevHandle = 0;
5055 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5056 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5057 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5058 &mpi_request)) != 0) {
5059 printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
5060 __FILE__, __LINE__, __func__);
5061 return;
5062 }
5063
5064 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5065 dewtprintk(ioc, printk(MPT3SAS_FMT
5066 "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5067 ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
5068 le32_to_cpu(mpi_reply.IOCLogInfo)));
5069 return;
5070 }
5071 }
5072
5073 /**
5074 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5075 * @ioc: per adapter object
5076 * @handle: device handle
5077 * Context: interrupt.
5078 */
5079 static void
_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5080 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5081 {
5082 struct fw_event_work *fw_event;
5083
5084 fw_event = alloc_fw_event_work(0);
5085 if (!fw_event)
5086 return;
5087 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5088 fw_event->device_handle = handle;
5089 fw_event->ioc = ioc;
5090 _scsih_fw_event_add(ioc, fw_event);
5091 fw_event_work_put(fw_event);
5092 }
5093
5094 /**
5095 * _scsih_smart_predicted_fault - process smart errors
5096 * @ioc: per adapter object
5097 * @handle: device handle
5098 * Context: interrupt.
5099 */
5100 static void
_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER * ioc,u16 handle)5101 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5102 {
5103 struct scsi_target *starget;
5104 struct MPT3SAS_TARGET *sas_target_priv_data;
5105 Mpi2EventNotificationReply_t *event_reply;
5106 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5107 struct _sas_device *sas_device;
5108 ssize_t sz;
5109 unsigned long flags;
5110
5111 /* only handle non-raid devices */
5112 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5113 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5114 if (!sas_device)
5115 goto out_unlock;
5116
5117 starget = sas_device->starget;
5118 sas_target_priv_data = starget->hostdata;
5119
5120 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5121 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5122 goto out_unlock;
5123
5124 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5125
5126 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5127
5128 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5129 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5130
5131 /* insert into event log */
5132 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5133 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5134 event_reply = kzalloc(sz, GFP_KERNEL);
5135 if (!event_reply) {
5136 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5137 ioc->name, __FILE__, __LINE__, __func__);
5138 goto out;
5139 }
5140
5141 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5142 event_reply->Event =
5143 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5144 event_reply->MsgLength = sz/4;
5145 event_reply->EventDataLength =
5146 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5147 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5148 event_reply->EventData;
5149 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5150 event_data->ASC = 0x5D;
5151 event_data->DevHandle = cpu_to_le16(handle);
5152 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5153 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5154 kfree(event_reply);
5155 out:
5156 if (sas_device)
5157 sas_device_put(sas_device);
5158 return;
5159
5160 out_unlock:
5161 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5162 goto out;
5163 }
5164
5165 /**
5166 * _scsih_io_done - scsi request callback
5167 * @ioc: per adapter object
5168 * @smid: system request message index
5169 * @msix_index: MSIX table index supplied by the OS
5170 * @reply: reply message frame(lower 32bit addr)
5171 *
5172 * Callback handler when using _scsih_qcmd.
5173 *
5174 * Return: 1 meaning mf should be freed from _base_interrupt
5175 * 0 means the mf is freed from this function.
5176 */
5177 static u8
_scsih_io_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)5178 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5179 {
5180 Mpi25SCSIIORequest_t *mpi_request;
5181 Mpi2SCSIIOReply_t *mpi_reply;
5182 struct scsi_cmnd *scmd;
5183 struct scsiio_tracker *st;
5184 u16 ioc_status;
5185 u32 xfer_cnt;
5186 u8 scsi_state;
5187 u8 scsi_status;
5188 u32 log_info;
5189 struct MPT3SAS_DEVICE *sas_device_priv_data;
5190 u32 response_code = 0;
5191
5192 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5193
5194 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5195 if (scmd == NULL)
5196 return 1;
5197
5198 _scsih_set_satl_pending(scmd, false);
5199
5200 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5201
5202 if (mpi_reply == NULL) {
5203 scmd->result = DID_OK << 16;
5204 goto out;
5205 }
5206
5207 sas_device_priv_data = scmd->device->hostdata;
5208 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5209 sas_device_priv_data->sas_target->deleted) {
5210 scmd->result = DID_NO_CONNECT << 16;
5211 goto out;
5212 }
5213 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5214
5215 /*
5216 * WARPDRIVE: If direct_io is set then it is directIO,
5217 * the failed direct I/O should be redirected to volume
5218 */
5219 st = scsi_cmd_priv(scmd);
5220 if (st->direct_io &&
5221 ((ioc_status & MPI2_IOCSTATUS_MASK)
5222 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5223 st->direct_io = 0;
5224 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5225 mpi_request->DevHandle =
5226 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5227 ioc->put_smid_scsi_io(ioc, smid,
5228 sas_device_priv_data->sas_target->handle);
5229 return 0;
5230 }
5231 /* turning off TLR */
5232 scsi_state = mpi_reply->SCSIState;
5233 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5234 response_code =
5235 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5236 if (!sas_device_priv_data->tlr_snoop_check) {
5237 sas_device_priv_data->tlr_snoop_check++;
5238 if ((!ioc->is_warpdrive &&
5239 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5240 !scsih_is_nvme(&scmd->device->sdev_gendev))
5241 && sas_is_tlr_enabled(scmd->device) &&
5242 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5243 sas_disable_tlr(scmd->device);
5244 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5245 }
5246 }
5247
5248 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5249 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5250 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5251 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5252 else
5253 log_info = 0;
5254 ioc_status &= MPI2_IOCSTATUS_MASK;
5255 scsi_status = mpi_reply->SCSIStatus;
5256
5257 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5258 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5259 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5260 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5261 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5262 }
5263
5264 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5265 struct sense_info data;
5266 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5267 smid);
5268 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5269 le32_to_cpu(mpi_reply->SenseCount));
5270 memcpy(scmd->sense_buffer, sense_data, sz);
5271 _scsih_normalize_sense(scmd->sense_buffer, &data);
5272 /* failure prediction threshold exceeded */
5273 if (data.asc == 0x5D)
5274 _scsih_smart_predicted_fault(ioc,
5275 le16_to_cpu(mpi_reply->DevHandle));
5276 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5277
5278 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5279 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5280 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5281 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5282 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5283 }
5284 switch (ioc_status) {
5285 case MPI2_IOCSTATUS_BUSY:
5286 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5287 scmd->result = SAM_STAT_BUSY;
5288 break;
5289
5290 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5291 scmd->result = DID_NO_CONNECT << 16;
5292 break;
5293
5294 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5295 if (sas_device_priv_data->block) {
5296 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5297 goto out;
5298 }
5299 if (log_info == 0x31110630) {
5300 if (scmd->retries > 2) {
5301 scmd->result = DID_NO_CONNECT << 16;
5302 scsi_device_set_state(scmd->device,
5303 SDEV_OFFLINE);
5304 } else {
5305 scmd->result = DID_SOFT_ERROR << 16;
5306 scmd->device->expecting_cc_ua = 1;
5307 }
5308 break;
5309 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5310 scmd->result = DID_RESET << 16;
5311 break;
5312 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5313 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5314 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5315 scmd->result = DID_RESET << 16;
5316 break;
5317 }
5318 scmd->result = DID_SOFT_ERROR << 16;
5319 break;
5320 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5321 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5322 scmd->result = DID_RESET << 16;
5323 break;
5324
5325 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5326 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5327 scmd->result = DID_SOFT_ERROR << 16;
5328 else
5329 scmd->result = (DID_OK << 16) | scsi_status;
5330 break;
5331
5332 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5333 scmd->result = (DID_OK << 16) | scsi_status;
5334
5335 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5336 break;
5337
5338 if (xfer_cnt < scmd->underflow) {
5339 if (scsi_status == SAM_STAT_BUSY)
5340 scmd->result = SAM_STAT_BUSY;
5341 else
5342 scmd->result = DID_SOFT_ERROR << 16;
5343 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5344 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5345 scmd->result = DID_SOFT_ERROR << 16;
5346 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5347 scmd->result = DID_RESET << 16;
5348 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5349 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5350 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5351 scmd->result = (DRIVER_SENSE << 24) |
5352 SAM_STAT_CHECK_CONDITION;
5353 scmd->sense_buffer[0] = 0x70;
5354 scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5355 scmd->sense_buffer[12] = 0x20;
5356 scmd->sense_buffer[13] = 0;
5357 }
5358 break;
5359
5360 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5361 scsi_set_resid(scmd, 0);
5362 /* fall through */
5363 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5364 case MPI2_IOCSTATUS_SUCCESS:
5365 scmd->result = (DID_OK << 16) | scsi_status;
5366 if (response_code ==
5367 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5368 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5369 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5370 scmd->result = DID_SOFT_ERROR << 16;
5371 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5372 scmd->result = DID_RESET << 16;
5373 break;
5374
5375 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5376 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5377 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5378 _scsih_eedp_error_handling(scmd, ioc_status);
5379 break;
5380
5381 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5382 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5383 case MPI2_IOCSTATUS_INVALID_SGL:
5384 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5385 case MPI2_IOCSTATUS_INVALID_FIELD:
5386 case MPI2_IOCSTATUS_INVALID_STATE:
5387 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5388 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5389 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5390 default:
5391 scmd->result = DID_SOFT_ERROR << 16;
5392 break;
5393
5394 }
5395
5396 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5397 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5398
5399 out:
5400
5401 scsi_dma_unmap(scmd);
5402 mpt3sas_base_free_smid(ioc, smid);
5403 scmd->scsi_done(scmd);
5404 return 0;
5405 }
5406
5407 /**
5408 * _scsih_sas_host_refresh - refreshing sas host object contents
5409 * @ioc: per adapter object
5410 * Context: user
5411 *
5412 * During port enable, fw will send topology events for every device. Its
5413 * possible that the handles may change from the previous setting, so this
5414 * code keeping handles updating if changed.
5415 */
5416 static void
_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER * ioc)5417 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
5418 {
5419 u16 sz;
5420 u16 ioc_status;
5421 int i;
5422 Mpi2ConfigReply_t mpi_reply;
5423 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5424 u16 attached_handle;
5425 u8 link_rate;
5426
5427 dtmprintk(ioc, pr_info(MPT3SAS_FMT
5428 "updating handles for sas_host(0x%016llx)\n",
5429 ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
5430
5431 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
5432 * sizeof(Mpi2SasIOUnit0PhyData_t));
5433 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5434 if (!sas_iounit_pg0) {
5435 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5436 ioc->name, __FILE__, __LINE__, __func__);
5437 return;
5438 }
5439
5440 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5441 sas_iounit_pg0, sz)) != 0)
5442 goto out;
5443 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5444 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5445 goto out;
5446 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5447 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
5448 if (i == 0)
5449 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5450 PhyData[0].ControllerDevHandle);
5451 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5452 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
5453 AttachedDevHandle);
5454 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
5455 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
5456 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
5457 attached_handle, i, link_rate);
5458 }
5459 out:
5460 kfree(sas_iounit_pg0);
5461 }
5462
5463 /**
5464 * _scsih_sas_host_add - create sas host object
5465 * @ioc: per adapter object
5466 *
5467 * Creating host side data object, stored in ioc->sas_hba
5468 */
5469 static void
_scsih_sas_host_add(struct MPT3SAS_ADAPTER * ioc)5470 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5471 {
5472 int i;
5473 Mpi2ConfigReply_t mpi_reply;
5474 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5475 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
5476 Mpi2SasPhyPage0_t phy_pg0;
5477 Mpi2SasDevicePage0_t sas_device_pg0;
5478 Mpi2SasEnclosurePage0_t enclosure_pg0;
5479 u16 ioc_status;
5480 u16 sz;
5481 u8 device_missing_delay;
5482 u8 num_phys;
5483
5484 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
5485 if (!num_phys) {
5486 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5487 ioc->name, __FILE__, __LINE__, __func__);
5488 return;
5489 }
5490 ioc->sas_hba.phy = kcalloc(num_phys,
5491 sizeof(struct _sas_phy), GFP_KERNEL);
5492 if (!ioc->sas_hba.phy) {
5493 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5494 ioc->name, __FILE__, __LINE__, __func__);
5495 goto out;
5496 }
5497 ioc->sas_hba.num_phys = num_phys;
5498
5499 /* sas_iounit page 0 */
5500 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
5501 sizeof(Mpi2SasIOUnit0PhyData_t));
5502 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5503 if (!sas_iounit_pg0) {
5504 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5505 ioc->name, __FILE__, __LINE__, __func__);
5506 return;
5507 }
5508 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5509 sas_iounit_pg0, sz))) {
5510 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5511 ioc->name, __FILE__, __LINE__, __func__);
5512 goto out;
5513 }
5514 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5515 MPI2_IOCSTATUS_MASK;
5516 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5517 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5518 ioc->name, __FILE__, __LINE__, __func__);
5519 goto out;
5520 }
5521
5522 /* sas_iounit page 1 */
5523 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
5524 sizeof(Mpi2SasIOUnit1PhyData_t));
5525 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
5526 if (!sas_iounit_pg1) {
5527 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5528 ioc->name, __FILE__, __LINE__, __func__);
5529 goto out;
5530 }
5531 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5532 sas_iounit_pg1, sz))) {
5533 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5534 ioc->name, __FILE__, __LINE__, __func__);
5535 goto out;
5536 }
5537 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5538 MPI2_IOCSTATUS_MASK;
5539 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5540 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5541 ioc->name, __FILE__, __LINE__, __func__);
5542 goto out;
5543 }
5544
5545 ioc->io_missing_delay =
5546 sas_iounit_pg1->IODeviceMissingDelay;
5547 device_missing_delay =
5548 sas_iounit_pg1->ReportDeviceMissingDelay;
5549 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
5550 ioc->device_missing_delay = (device_missing_delay &
5551 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
5552 else
5553 ioc->device_missing_delay = device_missing_delay &
5554 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
5555
5556 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
5557 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5558 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5559 i))) {
5560 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5561 ioc->name, __FILE__, __LINE__, __func__);
5562 goto out;
5563 }
5564 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5565 MPI2_IOCSTATUS_MASK;
5566 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5567 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5568 ioc->name, __FILE__, __LINE__, __func__);
5569 goto out;
5570 }
5571
5572 if (i == 0)
5573 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5574 PhyData[0].ControllerDevHandle);
5575 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5576 ioc->sas_hba.phy[i].phy_id = i;
5577 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
5578 phy_pg0, ioc->sas_hba.parent_dev);
5579 }
5580 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5581 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
5582 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5583 ioc->name, __FILE__, __LINE__, __func__);
5584 goto out;
5585 }
5586 ioc->sas_hba.enclosure_handle =
5587 le16_to_cpu(sas_device_pg0.EnclosureHandle);
5588 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5589 pr_info(MPT3SAS_FMT
5590 "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5591 ioc->name, ioc->sas_hba.handle,
5592 (unsigned long long) ioc->sas_hba.sas_address,
5593 ioc->sas_hba.num_phys) ;
5594
5595 if (ioc->sas_hba.enclosure_handle) {
5596 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
5597 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
5598 ioc->sas_hba.enclosure_handle)))
5599 ioc->sas_hba.enclosure_logical_id =
5600 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
5601 }
5602
5603 out:
5604 kfree(sas_iounit_pg1);
5605 kfree(sas_iounit_pg0);
5606 }
5607
5608 /**
5609 * _scsih_expander_add - creating expander object
5610 * @ioc: per adapter object
5611 * @handle: expander handle
5612 *
5613 * Creating expander object, stored in ioc->sas_expander_list.
5614 *
5615 * Return: 0 for success, else error.
5616 */
5617 static int
_scsih_expander_add(struct MPT3SAS_ADAPTER * ioc,u16 handle)5618 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5619 {
5620 struct _sas_node *sas_expander;
5621 struct _enclosure_node *enclosure_dev;
5622 Mpi2ConfigReply_t mpi_reply;
5623 Mpi2ExpanderPage0_t expander_pg0;
5624 Mpi2ExpanderPage1_t expander_pg1;
5625 u32 ioc_status;
5626 u16 parent_handle;
5627 u64 sas_address, sas_address_parent = 0;
5628 int i;
5629 unsigned long flags;
5630 struct _sas_port *mpt3sas_port = NULL;
5631
5632 int rc = 0;
5633
5634 if (!handle)
5635 return -1;
5636
5637 if (ioc->shost_recovery || ioc->pci_error_recovery)
5638 return -1;
5639
5640 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
5641 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
5642 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5643 ioc->name, __FILE__, __LINE__, __func__);
5644 return -1;
5645 }
5646
5647 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5648 MPI2_IOCSTATUS_MASK;
5649 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5650 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5651 ioc->name, __FILE__, __LINE__, __func__);
5652 return -1;
5653 }
5654
5655 /* handle out of order topology events */
5656 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
5657 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
5658 != 0) {
5659 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5660 ioc->name, __FILE__, __LINE__, __func__);
5661 return -1;
5662 }
5663 if (sas_address_parent != ioc->sas_hba.sas_address) {
5664 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5665 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5666 sas_address_parent);
5667 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5668 if (!sas_expander) {
5669 rc = _scsih_expander_add(ioc, parent_handle);
5670 if (rc != 0)
5671 return rc;
5672 }
5673 }
5674
5675 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5676 sas_address = le64_to_cpu(expander_pg0.SASAddress);
5677 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5678 sas_address);
5679 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5680
5681 if (sas_expander)
5682 return 0;
5683
5684 sas_expander = kzalloc(sizeof(struct _sas_node),
5685 GFP_KERNEL);
5686 if (!sas_expander) {
5687 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5688 ioc->name, __FILE__, __LINE__, __func__);
5689 return -1;
5690 }
5691
5692 sas_expander->handle = handle;
5693 sas_expander->num_phys = expander_pg0.NumPhys;
5694 sas_expander->sas_address_parent = sas_address_parent;
5695 sas_expander->sas_address = sas_address;
5696
5697 pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \
5698 " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
5699 handle, parent_handle, (unsigned long long)
5700 sas_expander->sas_address, sas_expander->num_phys);
5701
5702 if (!sas_expander->num_phys)
5703 goto out_fail;
5704 sas_expander->phy = kcalloc(sas_expander->num_phys,
5705 sizeof(struct _sas_phy), GFP_KERNEL);
5706 if (!sas_expander->phy) {
5707 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5708 ioc->name, __FILE__, __LINE__, __func__);
5709 rc = -1;
5710 goto out_fail;
5711 }
5712
5713 INIT_LIST_HEAD(&sas_expander->sas_port_list);
5714 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
5715 sas_address_parent);
5716 if (!mpt3sas_port) {
5717 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5718 ioc->name, __FILE__, __LINE__, __func__);
5719 rc = -1;
5720 goto out_fail;
5721 }
5722 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
5723
5724 for (i = 0 ; i < sas_expander->num_phys ; i++) {
5725 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
5726 &expander_pg1, i, handle))) {
5727 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5728 ioc->name, __FILE__, __LINE__, __func__);
5729 rc = -1;
5730 goto out_fail;
5731 }
5732 sas_expander->phy[i].handle = handle;
5733 sas_expander->phy[i].phy_id = i;
5734
5735 if ((mpt3sas_transport_add_expander_phy(ioc,
5736 &sas_expander->phy[i], expander_pg1,
5737 sas_expander->parent_dev))) {
5738 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
5739 ioc->name, __FILE__, __LINE__, __func__);
5740 rc = -1;
5741 goto out_fail;
5742 }
5743 }
5744
5745 if (sas_expander->enclosure_handle) {
5746 enclosure_dev =
5747 mpt3sas_scsih_enclosure_find_by_handle(ioc,
5748 sas_expander->enclosure_handle);
5749 if (enclosure_dev)
5750 sas_expander->enclosure_logical_id =
5751 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5752 }
5753
5754 _scsih_expander_node_add(ioc, sas_expander);
5755 return 0;
5756
5757 out_fail:
5758
5759 if (mpt3sas_port)
5760 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
5761 sas_address_parent);
5762 kfree(sas_expander);
5763 return rc;
5764 }
5765
5766 /**
5767 * mpt3sas_expander_remove - removing expander object
5768 * @ioc: per adapter object
5769 * @sas_address: expander sas_address
5770 */
5771 void
mpt3sas_expander_remove(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)5772 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
5773 {
5774 struct _sas_node *sas_expander;
5775 unsigned long flags;
5776
5777 if (ioc->shost_recovery)
5778 return;
5779
5780 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5781 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5782 sas_address);
5783 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5784 if (sas_expander)
5785 _scsih_expander_node_remove(ioc, sas_expander);
5786 }
5787
5788 /**
5789 * _scsih_done - internal SCSI_IO callback handler.
5790 * @ioc: per adapter object
5791 * @smid: system request message index
5792 * @msix_index: MSIX table index supplied by the OS
5793 * @reply: reply message frame(lower 32bit addr)
5794 *
5795 * Callback handler when sending internal generated SCSI_IO.
5796 * The callback index passed is `ioc->scsih_cb_idx`
5797 *
5798 * Return: 1 meaning mf should be freed from _base_interrupt
5799 * 0 means the mf is freed from this function.
5800 */
5801 static u8
_scsih_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)5802 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5803 {
5804 MPI2DefaultReply_t *mpi_reply;
5805
5806 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5807 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
5808 return 1;
5809 if (ioc->scsih_cmds.smid != smid)
5810 return 1;
5811 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
5812 if (mpi_reply) {
5813 memcpy(ioc->scsih_cmds.reply, mpi_reply,
5814 mpi_reply->MsgLength*4);
5815 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
5816 }
5817 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
5818 complete(&ioc->scsih_cmds.done);
5819 return 1;
5820 }
5821
5822
5823
5824
5825 #define MPT3_MAX_LUNS (255)
5826
5827
5828 /**
5829 * _scsih_check_access_status - check access flags
5830 * @ioc: per adapter object
5831 * @sas_address: sas address
5832 * @handle: sas device handle
5833 * @access_status: errors returned during discovery of the device
5834 *
5835 * Return: 0 for success, else failure
5836 */
5837 static u8
_scsih_check_access_status(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,u16 handle,u8 access_status)5838 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
5839 u16 handle, u8 access_status)
5840 {
5841 u8 rc = 1;
5842 char *desc = NULL;
5843
5844 switch (access_status) {
5845 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
5846 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
5847 rc = 0;
5848 break;
5849 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
5850 desc = "sata capability failed";
5851 break;
5852 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
5853 desc = "sata affiliation conflict";
5854 break;
5855 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
5856 desc = "route not addressable";
5857 break;
5858 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
5859 desc = "smp error not addressable";
5860 break;
5861 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
5862 desc = "device blocked";
5863 break;
5864 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
5865 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
5866 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
5867 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
5868 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
5869 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
5870 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
5871 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
5872 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
5873 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
5874 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
5875 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
5876 desc = "sata initialization failed";
5877 break;
5878 default:
5879 desc = "unknown";
5880 break;
5881 }
5882
5883 if (!rc)
5884 return 0;
5885
5886 pr_err(MPT3SAS_FMT
5887 "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
5888 ioc->name, desc, (unsigned long long)sas_address, handle);
5889 return rc;
5890 }
5891
5892 /**
5893 * _scsih_check_device - checking device responsiveness
5894 * @ioc: per adapter object
5895 * @parent_sas_address: sas address of parent expander or sas host
5896 * @handle: attached device handle
5897 * @phy_number: phy number
5898 * @link_rate: new link rate
5899 */
5900 static void
_scsih_check_device(struct MPT3SAS_ADAPTER * ioc,u64 parent_sas_address,u16 handle,u8 phy_number,u8 link_rate)5901 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
5902 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
5903 {
5904 Mpi2ConfigReply_t mpi_reply;
5905 Mpi2SasDevicePage0_t sas_device_pg0;
5906 struct _sas_device *sas_device;
5907 struct _enclosure_node *enclosure_dev = NULL;
5908 u32 ioc_status;
5909 unsigned long flags;
5910 u64 sas_address;
5911 struct scsi_target *starget;
5912 struct MPT3SAS_TARGET *sas_target_priv_data;
5913 u32 device_info;
5914
5915 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5916 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
5917 return;
5918
5919 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5920 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5921 return;
5922
5923 /* wide port handling ~ we need only handle device once for the phy that
5924 * is matched in sas device page zero
5925 */
5926 if (phy_number != sas_device_pg0.PhyNum)
5927 return;
5928
5929 /* check if this is end device */
5930 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
5931 if (!(_scsih_is_end_device(device_info)))
5932 return;
5933
5934 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5935 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5936 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
5937 sas_address);
5938
5939 if (!sas_device)
5940 goto out_unlock;
5941
5942 if (unlikely(sas_device->handle != handle)) {
5943 starget = sas_device->starget;
5944 sas_target_priv_data = starget->hostdata;
5945 starget_printk(KERN_INFO, starget,
5946 "handle changed from(0x%04x) to (0x%04x)!!!\n",
5947 sas_device->handle, handle);
5948 sas_target_priv_data->handle = handle;
5949 sas_device->handle = handle;
5950 if (le16_to_cpu(sas_device_pg0.Flags) &
5951 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
5952 sas_device->enclosure_level =
5953 sas_device_pg0.EnclosureLevel;
5954 memcpy(sas_device->connector_name,
5955 sas_device_pg0.ConnectorName, 4);
5956 sas_device->connector_name[4] = '\0';
5957 } else {
5958 sas_device->enclosure_level = 0;
5959 sas_device->connector_name[0] = '\0';
5960 }
5961
5962 sas_device->enclosure_handle =
5963 le16_to_cpu(sas_device_pg0.EnclosureHandle);
5964 sas_device->is_chassis_slot_valid = 0;
5965 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
5966 sas_device->enclosure_handle);
5967 if (enclosure_dev) {
5968 sas_device->enclosure_logical_id =
5969 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5970 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
5971 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
5972 sas_device->is_chassis_slot_valid = 1;
5973 sas_device->chassis_slot =
5974 enclosure_dev->pg0.ChassisSlot;
5975 }
5976 }
5977 }
5978
5979 /* check if device is present */
5980 if (!(le16_to_cpu(sas_device_pg0.Flags) &
5981 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
5982 pr_err(MPT3SAS_FMT
5983 "device is not present handle(0x%04x), flags!!!\n",
5984 ioc->name, handle);
5985 goto out_unlock;
5986 }
5987
5988 /* check if there were any issues with discovery */
5989 if (_scsih_check_access_status(ioc, sas_address, handle,
5990 sas_device_pg0.AccessStatus))
5991 goto out_unlock;
5992
5993 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5994 _scsih_ublock_io_device(ioc, sas_address);
5995
5996 if (sas_device)
5997 sas_device_put(sas_device);
5998 return;
5999
6000 out_unlock:
6001 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6002 if (sas_device)
6003 sas_device_put(sas_device);
6004 }
6005
6006 /**
6007 * _scsih_add_device - creating sas device object
6008 * @ioc: per adapter object
6009 * @handle: sas device handle
6010 * @phy_num: phy number end device attached to
6011 * @is_pd: is this hidden raid component
6012 *
6013 * Creating end device object, stored in ioc->sas_device_list.
6014 *
6015 * Return: 0 for success, non-zero for failure.
6016 */
6017 static int
_scsih_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phy_num,u8 is_pd)6018 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6019 u8 is_pd)
6020 {
6021 Mpi2ConfigReply_t mpi_reply;
6022 Mpi2SasDevicePage0_t sas_device_pg0;
6023 struct _sas_device *sas_device;
6024 struct _enclosure_node *enclosure_dev = NULL;
6025 u32 ioc_status;
6026 u64 sas_address;
6027 u32 device_info;
6028
6029 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6030 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
6031 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
6032 ioc->name, __FILE__, __LINE__, __func__);
6033 return -1;
6034 }
6035
6036 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6037 MPI2_IOCSTATUS_MASK;
6038 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6039 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
6040 ioc->name, __FILE__, __LINE__, __func__);
6041 return -1;
6042 }
6043
6044 /* check if this is end device */
6045 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6046 if (!(_scsih_is_end_device(device_info)))
6047 return -1;
6048 set_bit(handle, ioc->pend_os_device_add);
6049 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6050
6051 /* check if device is present */
6052 if (!(le16_to_cpu(sas_device_pg0.Flags) &
6053 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6054 pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n",
6055 ioc->name, handle);
6056 return -1;
6057 }
6058
6059 /* check if there were any issues with discovery */
6060 if (_scsih_check_access_status(ioc, sas_address, handle,
6061 sas_device_pg0.AccessStatus))
6062 return -1;
6063
6064 sas_device = mpt3sas_get_sdev_by_addr(ioc,
6065 sas_address);
6066 if (sas_device) {
6067 clear_bit(handle, ioc->pend_os_device_add);
6068 sas_device_put(sas_device);
6069 return -1;
6070 }
6071
6072 if (sas_device_pg0.EnclosureHandle) {
6073 enclosure_dev =
6074 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6075 le16_to_cpu(sas_device_pg0.EnclosureHandle));
6076 if (enclosure_dev == NULL)
6077 pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
6078 "doesn't match with enclosure device!\n",
6079 ioc->name, sas_device_pg0.EnclosureHandle);
6080 }
6081
6082 sas_device = kzalloc(sizeof(struct _sas_device),
6083 GFP_KERNEL);
6084 if (!sas_device) {
6085 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
6086 ioc->name, __FILE__, __LINE__, __func__);
6087 return 0;
6088 }
6089
6090 kref_init(&sas_device->refcount);
6091 sas_device->handle = handle;
6092 if (_scsih_get_sas_address(ioc,
6093 le16_to_cpu(sas_device_pg0.ParentDevHandle),
6094 &sas_device->sas_address_parent) != 0)
6095 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
6096 ioc->name, __FILE__, __LINE__, __func__);
6097 sas_device->enclosure_handle =
6098 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6099 if (sas_device->enclosure_handle != 0)
6100 sas_device->slot =
6101 le16_to_cpu(sas_device_pg0.Slot);
6102 sas_device->device_info = device_info;
6103 sas_device->sas_address = sas_address;
6104 sas_device->phy = sas_device_pg0.PhyNum;
6105 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
6106 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6107
6108 if (le16_to_cpu(sas_device_pg0.Flags)
6109 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6110 sas_device->enclosure_level =
6111 sas_device_pg0.EnclosureLevel;
6112 memcpy(sas_device->connector_name,
6113 sas_device_pg0.ConnectorName, 4);
6114 sas_device->connector_name[4] = '\0';
6115 } else {
6116 sas_device->enclosure_level = 0;
6117 sas_device->connector_name[0] = '\0';
6118 }
6119 /* get enclosure_logical_id & chassis_slot*/
6120 sas_device->is_chassis_slot_valid = 0;
6121 if (enclosure_dev) {
6122 sas_device->enclosure_logical_id =
6123 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6124 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6125 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6126 sas_device->is_chassis_slot_valid = 1;
6127 sas_device->chassis_slot =
6128 enclosure_dev->pg0.ChassisSlot;
6129 }
6130 }
6131
6132 /* get device name */
6133 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
6134
6135 if (ioc->wait_for_discovery_to_complete)
6136 _scsih_sas_device_init_add(ioc, sas_device);
6137 else
6138 _scsih_sas_device_add(ioc, sas_device);
6139
6140 sas_device_put(sas_device);
6141 return 0;
6142 }
6143
6144 /**
6145 * _scsih_remove_device - removing sas device object
6146 * @ioc: per adapter object
6147 * @sas_device: the sas_device object
6148 */
6149 static void
_scsih_remove_device(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)6150 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
6151 struct _sas_device *sas_device)
6152 {
6153 struct MPT3SAS_TARGET *sas_target_priv_data;
6154
6155 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
6156 (sas_device->pfa_led_on)) {
6157 _scsih_turn_off_pfa_led(ioc, sas_device);
6158 sas_device->pfa_led_on = 0;
6159 }
6160
6161 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6162 "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
6163 ioc->name, __func__,
6164 sas_device->handle, (unsigned long long)
6165 sas_device->sas_address));
6166
6167 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6168 NULL, NULL));
6169
6170 if (sas_device->starget && sas_device->starget->hostdata) {
6171 sas_target_priv_data = sas_device->starget->hostdata;
6172 sas_target_priv_data->deleted = 1;
6173 _scsih_ublock_io_device(ioc, sas_device->sas_address);
6174 sas_target_priv_data->handle =
6175 MPT3SAS_INVALID_DEVICE_HANDLE;
6176 }
6177
6178 if (!ioc->hide_drives)
6179 mpt3sas_transport_port_remove(ioc,
6180 sas_device->sas_address,
6181 sas_device->sas_address_parent);
6182
6183 pr_info(MPT3SAS_FMT
6184 "removing handle(0x%04x), sas_addr(0x%016llx)\n",
6185 ioc->name, sas_device->handle,
6186 (unsigned long long) sas_device->sas_address);
6187
6188 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
6189
6190 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6191 "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
6192 ioc->name, __func__,
6193 sas_device->handle, (unsigned long long)
6194 sas_device->sas_address));
6195 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6196 NULL, NULL));
6197 }
6198
6199 /**
6200 * _scsih_sas_topology_change_event_debug - debug for topology event
6201 * @ioc: per adapter object
6202 * @event_data: event data payload
6203 * Context: user.
6204 */
6205 static void
_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)6206 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6207 Mpi2EventDataSasTopologyChangeList_t *event_data)
6208 {
6209 int i;
6210 u16 handle;
6211 u16 reason_code;
6212 u8 phy_number;
6213 char *status_str = NULL;
6214 u8 link_rate, prev_link_rate;
6215
6216 switch (event_data->ExpStatus) {
6217 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6218 status_str = "add";
6219 break;
6220 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6221 status_str = "remove";
6222 break;
6223 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6224 case 0:
6225 status_str = "responding";
6226 break;
6227 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6228 status_str = "remove delay";
6229 break;
6230 default:
6231 status_str = "unknown status";
6232 break;
6233 }
6234 pr_info(MPT3SAS_FMT "sas topology change: (%s)\n",
6235 ioc->name, status_str);
6236 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
6237 "start_phy(%02d), count(%d)\n",
6238 le16_to_cpu(event_data->ExpanderDevHandle),
6239 le16_to_cpu(event_data->EnclosureHandle),
6240 event_data->StartPhyNum, event_data->NumEntries);
6241 for (i = 0; i < event_data->NumEntries; i++) {
6242 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6243 if (!handle)
6244 continue;
6245 phy_number = event_data->StartPhyNum + i;
6246 reason_code = event_data->PHY[i].PhyStatus &
6247 MPI2_EVENT_SAS_TOPO_RC_MASK;
6248 switch (reason_code) {
6249 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6250 status_str = "target add";
6251 break;
6252 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6253 status_str = "target remove";
6254 break;
6255 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6256 status_str = "delay target remove";
6257 break;
6258 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6259 status_str = "link rate change";
6260 break;
6261 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6262 status_str = "target responding";
6263 break;
6264 default:
6265 status_str = "unknown";
6266 break;
6267 }
6268 link_rate = event_data->PHY[i].LinkRate >> 4;
6269 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6270 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
6271 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
6272 handle, status_str, link_rate, prev_link_rate);
6273
6274 }
6275 }
6276
6277 /**
6278 * _scsih_sas_topology_change_event - handle topology changes
6279 * @ioc: per adapter object
6280 * @fw_event: The fw_event_work object
6281 * Context: user.
6282 *
6283 */
6284 static int
_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)6285 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6286 struct fw_event_work *fw_event)
6287 {
6288 int i;
6289 u16 parent_handle, handle;
6290 u16 reason_code;
6291 u8 phy_number, max_phys;
6292 struct _sas_node *sas_expander;
6293 u64 sas_address;
6294 unsigned long flags;
6295 u8 link_rate, prev_link_rate;
6296 Mpi2EventDataSasTopologyChangeList_t *event_data =
6297 (Mpi2EventDataSasTopologyChangeList_t *)
6298 fw_event->event_data;
6299
6300 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6301 _scsih_sas_topology_change_event_debug(ioc, event_data);
6302
6303 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
6304 return 0;
6305
6306 if (!ioc->sas_hba.num_phys)
6307 _scsih_sas_host_add(ioc);
6308 else
6309 _scsih_sas_host_refresh(ioc);
6310
6311 if (fw_event->ignore) {
6312 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6313 "ignoring expander event\n", ioc->name));
6314 return 0;
6315 }
6316
6317 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
6318
6319 /* handle expander add */
6320 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
6321 if (_scsih_expander_add(ioc, parent_handle) != 0)
6322 return 0;
6323
6324 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6325 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
6326 parent_handle);
6327 if (sas_expander) {
6328 sas_address = sas_expander->sas_address;
6329 max_phys = sas_expander->num_phys;
6330 } else if (parent_handle < ioc->sas_hba.num_phys) {
6331 sas_address = ioc->sas_hba.sas_address;
6332 max_phys = ioc->sas_hba.num_phys;
6333 } else {
6334 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6335 return 0;
6336 }
6337 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6338
6339 /* handle siblings events */
6340 for (i = 0; i < event_data->NumEntries; i++) {
6341 if (fw_event->ignore) {
6342 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6343 "ignoring expander event\n", ioc->name));
6344 return 0;
6345 }
6346 if (ioc->remove_host || ioc->pci_error_recovery)
6347 return 0;
6348 phy_number = event_data->StartPhyNum + i;
6349 if (phy_number >= max_phys)
6350 continue;
6351 reason_code = event_data->PHY[i].PhyStatus &
6352 MPI2_EVENT_SAS_TOPO_RC_MASK;
6353 if ((event_data->PHY[i].PhyStatus &
6354 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
6355 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
6356 continue;
6357 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6358 if (!handle)
6359 continue;
6360 link_rate = event_data->PHY[i].LinkRate >> 4;
6361 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6362 switch (reason_code) {
6363 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6364
6365 if (ioc->shost_recovery)
6366 break;
6367
6368 if (link_rate == prev_link_rate)
6369 break;
6370
6371 mpt3sas_transport_update_links(ioc, sas_address,
6372 handle, phy_number, link_rate);
6373
6374 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6375 break;
6376
6377 _scsih_check_device(ioc, sas_address, handle,
6378 phy_number, link_rate);
6379
6380 if (!test_bit(handle, ioc->pend_os_device_add))
6381 break;
6382
6383 /* fall through */
6384
6385 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6386
6387 if (ioc->shost_recovery)
6388 break;
6389
6390 mpt3sas_transport_update_links(ioc, sas_address,
6391 handle, phy_number, link_rate);
6392
6393 _scsih_add_device(ioc, handle, phy_number, 0);
6394
6395 break;
6396 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6397
6398 _scsih_device_remove_by_handle(ioc, handle);
6399 break;
6400 }
6401 }
6402
6403 /* handle expander removal */
6404 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
6405 sas_expander)
6406 mpt3sas_expander_remove(ioc, sas_address);
6407
6408 return 0;
6409 }
6410
6411 /**
6412 * _scsih_sas_device_status_change_event_debug - debug for device event
6413 * @ioc: ?
6414 * @event_data: event data payload
6415 * Context: user.
6416 */
6417 static void
_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)6418 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6419 Mpi2EventDataSasDeviceStatusChange_t *event_data)
6420 {
6421 char *reason_str = NULL;
6422
6423 switch (event_data->ReasonCode) {
6424 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
6425 reason_str = "smart data";
6426 break;
6427 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
6428 reason_str = "unsupported device discovered";
6429 break;
6430 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
6431 reason_str = "internal device reset";
6432 break;
6433 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
6434 reason_str = "internal task abort";
6435 break;
6436 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
6437 reason_str = "internal task abort set";
6438 break;
6439 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
6440 reason_str = "internal clear task set";
6441 break;
6442 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
6443 reason_str = "internal query task";
6444 break;
6445 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
6446 reason_str = "sata init failure";
6447 break;
6448 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
6449 reason_str = "internal device reset complete";
6450 break;
6451 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
6452 reason_str = "internal task abort complete";
6453 break;
6454 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
6455 reason_str = "internal async notification";
6456 break;
6457 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
6458 reason_str = "expander reduced functionality";
6459 break;
6460 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
6461 reason_str = "expander reduced functionality complete";
6462 break;
6463 default:
6464 reason_str = "unknown reason";
6465 break;
6466 }
6467 pr_info(MPT3SAS_FMT "device status change: (%s)\n"
6468 "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
6469 ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
6470 (unsigned long long)le64_to_cpu(event_data->SASAddress),
6471 le16_to_cpu(event_data->TaskTag));
6472 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
6473 pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
6474 event_data->ASC, event_data->ASCQ);
6475 pr_info("\n");
6476 }
6477
6478 /**
6479 * _scsih_sas_device_status_change_event - handle device status change
6480 * @ioc: per adapter object
6481 * @fw_event: The fw_event_work object
6482 * Context: user.
6483 */
6484 static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)6485 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
6486 struct fw_event_work *fw_event)
6487 {
6488 struct MPT3SAS_TARGET *target_priv_data;
6489 struct _sas_device *sas_device;
6490 u64 sas_address;
6491 unsigned long flags;
6492 Mpi2EventDataSasDeviceStatusChange_t *event_data =
6493 (Mpi2EventDataSasDeviceStatusChange_t *)
6494 fw_event->event_data;
6495
6496 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6497 _scsih_sas_device_status_change_event_debug(ioc,
6498 event_data);
6499
6500 /* In MPI Revision K (0xC), the internal device reset complete was
6501 * implemented, so avoid setting tm_busy flag for older firmware.
6502 */
6503 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
6504 return;
6505
6506 if (event_data->ReasonCode !=
6507 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
6508 event_data->ReasonCode !=
6509 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
6510 return;
6511
6512 spin_lock_irqsave(&ioc->sas_device_lock, flags);
6513 sas_address = le64_to_cpu(event_data->SASAddress);
6514 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6515 sas_address);
6516
6517 if (!sas_device || !sas_device->starget)
6518 goto out;
6519
6520 target_priv_data = sas_device->starget->hostdata;
6521 if (!target_priv_data)
6522 goto out;
6523
6524 if (event_data->ReasonCode ==
6525 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
6526 target_priv_data->tm_busy = 1;
6527 else
6528 target_priv_data->tm_busy = 0;
6529
6530 out:
6531 if (sas_device)
6532 sas_device_put(sas_device);
6533
6534 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6535 }
6536
6537
6538 /**
6539 * _scsih_check_pcie_access_status - check access flags
6540 * @ioc: per adapter object
6541 * @wwid: wwid
6542 * @handle: sas device handle
6543 * @access_status: errors returned during discovery of the device
6544 *
6545 * Return: 0 for success, else failure
6546 */
6547 static u8
_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle,u8 access_status)6548 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6549 u16 handle, u8 access_status)
6550 {
6551 u8 rc = 1;
6552 char *desc = NULL;
6553
6554 switch (access_status) {
6555 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
6556 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
6557 rc = 0;
6558 break;
6559 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
6560 desc = "PCIe device capability failed";
6561 break;
6562 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
6563 desc = "PCIe device blocked";
6564 break;
6565 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
6566 desc = "PCIe device mem space access failed";
6567 break;
6568 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
6569 desc = "PCIe device unsupported";
6570 break;
6571 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
6572 desc = "PCIe device MSIx Required";
6573 break;
6574 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
6575 desc = "PCIe device init fail max";
6576 break;
6577 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
6578 desc = "PCIe device status unknown";
6579 break;
6580 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
6581 desc = "nvme ready timeout";
6582 break;
6583 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
6584 desc = "nvme device configuration unsupported";
6585 break;
6586 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
6587 desc = "nvme identify failed";
6588 break;
6589 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
6590 desc = "nvme qconfig failed";
6591 break;
6592 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
6593 desc = "nvme qcreation failed";
6594 break;
6595 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
6596 desc = "nvme eventcfg failed";
6597 break;
6598 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
6599 desc = "nvme get feature stat failed";
6600 break;
6601 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
6602 desc = "nvme idle timeout";
6603 break;
6604 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
6605 desc = "nvme failure status";
6606 break;
6607 default:
6608 pr_err(MPT3SAS_FMT
6609 " NVMe discovery error(0x%02x): wwid(0x%016llx),"
6610 "handle(0x%04x)\n", ioc->name, access_status,
6611 (unsigned long long)wwid, handle);
6612 return rc;
6613 }
6614
6615 if (!rc)
6616 return rc;
6617
6618 pr_info(MPT3SAS_FMT
6619 "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
6620 ioc->name, desc,
6621 (unsigned long long)wwid, handle);
6622 return rc;
6623 }
6624
6625 /**
6626 * _scsih_pcie_device_remove_from_sml - removing pcie device
6627 * from SML and free up associated memory
6628 * @ioc: per adapter object
6629 * @pcie_device: the pcie_device object
6630 */
6631 static void
_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)6632 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6633 struct _pcie_device *pcie_device)
6634 {
6635 struct MPT3SAS_TARGET *sas_target_priv_data;
6636
6637 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6638 "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
6639 pcie_device->handle, (unsigned long long)
6640 pcie_device->wwid));
6641 if (pcie_device->enclosure_handle != 0)
6642 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6643 "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
6644 ioc->name, __func__,
6645 (unsigned long long)pcie_device->enclosure_logical_id,
6646 pcie_device->slot));
6647 if (pcie_device->connector_name[0] != '\0')
6648 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6649 "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
6650 ioc->name, __func__,
6651 pcie_device->enclosure_level,
6652 pcie_device->connector_name));
6653
6654 if (pcie_device->starget && pcie_device->starget->hostdata) {
6655 sas_target_priv_data = pcie_device->starget->hostdata;
6656 sas_target_priv_data->deleted = 1;
6657 _scsih_ublock_io_device(ioc, pcie_device->wwid);
6658 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
6659 }
6660
6661 pr_info(MPT3SAS_FMT
6662 "removing handle(0x%04x), wwid (0x%016llx)\n",
6663 ioc->name, pcie_device->handle,
6664 (unsigned long long) pcie_device->wwid);
6665 if (pcie_device->enclosure_handle != 0)
6666 pr_info(MPT3SAS_FMT
6667 "removing : enclosure logical id(0x%016llx), slot(%d)\n",
6668 ioc->name,
6669 (unsigned long long)pcie_device->enclosure_logical_id,
6670 pcie_device->slot);
6671 if (pcie_device->connector_name[0] != '\0')
6672 pr_info(MPT3SAS_FMT
6673 "removing: enclosure level(0x%04x), connector name( %s)\n",
6674 ioc->name, pcie_device->enclosure_level,
6675 pcie_device->connector_name);
6676
6677 if (pcie_device->starget)
6678 scsi_remove_target(&pcie_device->starget->dev);
6679 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6680 "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
6681 pcie_device->handle, (unsigned long long)
6682 pcie_device->wwid));
6683 if (pcie_device->enclosure_handle != 0)
6684 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6685 "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
6686 ioc->name, __func__,
6687 (unsigned long long)pcie_device->enclosure_logical_id,
6688 pcie_device->slot));
6689 if (pcie_device->connector_name[0] != '\0')
6690 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6691 "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
6692 ioc->name, __func__, pcie_device->enclosure_level,
6693 pcie_device->connector_name));
6694
6695 kfree(pcie_device->serial_number);
6696 }
6697
6698
6699 /**
6700 * _scsih_pcie_check_device - checking device responsiveness
6701 * @ioc: per adapter object
6702 * @handle: attached device handle
6703 */
6704 static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)6705 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6706 {
6707 Mpi2ConfigReply_t mpi_reply;
6708 Mpi26PCIeDevicePage0_t pcie_device_pg0;
6709 u32 ioc_status;
6710 struct _pcie_device *pcie_device;
6711 u64 wwid;
6712 unsigned long flags;
6713 struct scsi_target *starget;
6714 struct MPT3SAS_TARGET *sas_target_priv_data;
6715 u32 device_info;
6716
6717 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6718 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
6719 return;
6720
6721 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6722 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6723 return;
6724
6725 /* check if this is end device */
6726 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6727 if (!(_scsih_is_nvme_device(device_info)))
6728 return;
6729
6730 wwid = le64_to_cpu(pcie_device_pg0.WWID);
6731 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
6732 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
6733
6734 if (!pcie_device) {
6735 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6736 return;
6737 }
6738
6739 if (unlikely(pcie_device->handle != handle)) {
6740 starget = pcie_device->starget;
6741 sas_target_priv_data = starget->hostdata;
6742 starget_printk(KERN_INFO, starget,
6743 "handle changed from(0x%04x) to (0x%04x)!!!\n",
6744 pcie_device->handle, handle);
6745 sas_target_priv_data->handle = handle;
6746 pcie_device->handle = handle;
6747
6748 if (le32_to_cpu(pcie_device_pg0.Flags) &
6749 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6750 pcie_device->enclosure_level =
6751 pcie_device_pg0.EnclosureLevel;
6752 memcpy(&pcie_device->connector_name[0],
6753 &pcie_device_pg0.ConnectorName[0], 4);
6754 } else {
6755 pcie_device->enclosure_level = 0;
6756 pcie_device->connector_name[0] = '\0';
6757 }
6758 }
6759
6760 /* check if device is present */
6761 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6762 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6763 pr_info(MPT3SAS_FMT
6764 "device is not present handle(0x%04x), flags!!!\n",
6765 ioc->name, handle);
6766 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6767 pcie_device_put(pcie_device);
6768 return;
6769 }
6770
6771 /* check if there were any issues with discovery */
6772 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6773 pcie_device_pg0.AccessStatus)) {
6774 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6775 pcie_device_put(pcie_device);
6776 return;
6777 }
6778
6779 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6780 pcie_device_put(pcie_device);
6781
6782 _scsih_ublock_io_device(ioc, wwid);
6783
6784 return;
6785 }
6786
6787 /**
6788 * _scsih_pcie_add_device - creating pcie device object
6789 * @ioc: per adapter object
6790 * @handle: pcie device handle
6791 *
6792 * Creating end device object, stored in ioc->pcie_device_list.
6793 *
6794 * Return: 1 means queue the event later, 0 means complete the event
6795 */
6796 static int
_scsih_pcie_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)6797 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6798 {
6799 Mpi26PCIeDevicePage0_t pcie_device_pg0;
6800 Mpi26PCIeDevicePage2_t pcie_device_pg2;
6801 Mpi2ConfigReply_t mpi_reply;
6802 struct _pcie_device *pcie_device;
6803 struct _enclosure_node *enclosure_dev;
6804 u32 ioc_status;
6805 u64 wwid;
6806
6807 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6808 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
6809 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
6810 ioc->name, __FILE__, __LINE__, __func__);
6811 return 0;
6812 }
6813 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6814 MPI2_IOCSTATUS_MASK;
6815 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6816 pr_err(MPT3SAS_FMT
6817 "failure at %s:%d/%s()!\n",
6818 ioc->name, __FILE__, __LINE__, __func__);
6819 return 0;
6820 }
6821
6822 set_bit(handle, ioc->pend_os_device_add);
6823 wwid = le64_to_cpu(pcie_device_pg0.WWID);
6824
6825 /* check if device is present */
6826 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6827 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6828 pr_err(MPT3SAS_FMT
6829 "device is not present handle(0x04%x)!!!\n",
6830 ioc->name, handle);
6831 return 0;
6832 }
6833
6834 /* check if there were any issues with discovery */
6835 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6836 pcie_device_pg0.AccessStatus))
6837 return 0;
6838
6839 if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo))))
6840 return 0;
6841
6842 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
6843 if (pcie_device) {
6844 clear_bit(handle, ioc->pend_os_device_add);
6845 pcie_device_put(pcie_device);
6846 return 0;
6847 }
6848
6849 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
6850 if (!pcie_device) {
6851 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
6852 ioc->name, __FILE__, __LINE__, __func__);
6853 return 0;
6854 }
6855
6856 kref_init(&pcie_device->refcount);
6857 pcie_device->id = ioc->pcie_target_id++;
6858 pcie_device->channel = PCIE_CHANNEL;
6859 pcie_device->handle = handle;
6860 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6861 pcie_device->wwid = wwid;
6862 pcie_device->port_num = pcie_device_pg0.PortNum;
6863 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
6864 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6865
6866 pcie_device->enclosure_handle =
6867 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
6868 if (pcie_device->enclosure_handle != 0)
6869 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
6870
6871 if (le32_to_cpu(pcie_device_pg0.Flags) &
6872 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6873 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
6874 memcpy(&pcie_device->connector_name[0],
6875 &pcie_device_pg0.ConnectorName[0], 4);
6876 } else {
6877 pcie_device->enclosure_level = 0;
6878 pcie_device->connector_name[0] = '\0';
6879 }
6880
6881 /* get enclosure_logical_id */
6882 if (pcie_device->enclosure_handle) {
6883 enclosure_dev =
6884 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6885 pcie_device->enclosure_handle);
6886 if (enclosure_dev)
6887 pcie_device->enclosure_logical_id =
6888 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6889 }
6890 /* TODO -- Add device name once FW supports it */
6891 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
6892 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
6893 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
6894 ioc->name, __FILE__, __LINE__, __func__);
6895 kfree(pcie_device);
6896 return 0;
6897 }
6898
6899 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6900 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6901 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
6902 ioc->name, __FILE__, __LINE__, __func__);
6903 kfree(pcie_device);
6904 return 0;
6905 }
6906 pcie_device->nvme_mdts =
6907 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
6908 if (pcie_device_pg2.ControllerResetTO)
6909 pcie_device->reset_timeout =
6910 pcie_device_pg2.ControllerResetTO;
6911 else
6912 pcie_device->reset_timeout = 30;
6913
6914 if (ioc->wait_for_discovery_to_complete)
6915 _scsih_pcie_device_init_add(ioc, pcie_device);
6916 else
6917 _scsih_pcie_device_add(ioc, pcie_device);
6918
6919 pcie_device_put(pcie_device);
6920 return 0;
6921 }
6922
6923 /**
6924 * _scsih_pcie_topology_change_event_debug - debug for topology
6925 * event
6926 * @ioc: per adapter object
6927 * @event_data: event data payload
6928 * Context: user.
6929 */
6930 static void
_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)6931 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6932 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
6933 {
6934 int i;
6935 u16 handle;
6936 u16 reason_code;
6937 u8 port_number;
6938 char *status_str = NULL;
6939 u8 link_rate, prev_link_rate;
6940
6941 switch (event_data->SwitchStatus) {
6942 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
6943 status_str = "add";
6944 break;
6945 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
6946 status_str = "remove";
6947 break;
6948 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
6949 case 0:
6950 status_str = "responding";
6951 break;
6952 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
6953 status_str = "remove delay";
6954 break;
6955 default:
6956 status_str = "unknown status";
6957 break;
6958 }
6959 pr_info(MPT3SAS_FMT "pcie topology change: (%s)\n",
6960 ioc->name, status_str);
6961 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
6962 "start_port(%02d), count(%d)\n",
6963 le16_to_cpu(event_data->SwitchDevHandle),
6964 le16_to_cpu(event_data->EnclosureHandle),
6965 event_data->StartPortNum, event_data->NumEntries);
6966 for (i = 0; i < event_data->NumEntries; i++) {
6967 handle =
6968 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
6969 if (!handle)
6970 continue;
6971 port_number = event_data->StartPortNum + i;
6972 reason_code = event_data->PortEntry[i].PortStatus;
6973 switch (reason_code) {
6974 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
6975 status_str = "target add";
6976 break;
6977 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
6978 status_str = "target remove";
6979 break;
6980 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
6981 status_str = "delay target remove";
6982 break;
6983 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
6984 status_str = "link rate change";
6985 break;
6986 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
6987 status_str = "target responding";
6988 break;
6989 default:
6990 status_str = "unknown";
6991 break;
6992 }
6993 link_rate = event_data->PortEntry[i].CurrentPortInfo &
6994 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
6995 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
6996 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
6997 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
6998 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
6999 handle, status_str, link_rate, prev_link_rate);
7000 }
7001 }
7002
7003 /**
7004 * _scsih_pcie_topology_change_event - handle PCIe topology
7005 * changes
7006 * @ioc: per adapter object
7007 * @fw_event: The fw_event_work object
7008 * Context: user.
7009 *
7010 */
7011 static void
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7012 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7013 struct fw_event_work *fw_event)
7014 {
7015 int i;
7016 u16 handle;
7017 u16 reason_code;
7018 u8 link_rate, prev_link_rate;
7019 unsigned long flags;
7020 int rc;
7021 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
7022 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
7023 struct _pcie_device *pcie_device;
7024
7025 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7026 _scsih_pcie_topology_change_event_debug(ioc, event_data);
7027
7028 if (ioc->shost_recovery || ioc->remove_host ||
7029 ioc->pci_error_recovery)
7030 return;
7031
7032 if (fw_event->ignore) {
7033 dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
7034 ioc->name));
7035 return;
7036 }
7037
7038 /* handle siblings events */
7039 for (i = 0; i < event_data->NumEntries; i++) {
7040 if (fw_event->ignore) {
7041 dewtprintk(ioc, pr_info(MPT3SAS_FMT
7042 "ignoring switch event\n", ioc->name));
7043 return;
7044 }
7045 if (ioc->remove_host || ioc->pci_error_recovery)
7046 return;
7047 reason_code = event_data->PortEntry[i].PortStatus;
7048 handle =
7049 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7050 if (!handle)
7051 continue;
7052
7053 link_rate = event_data->PortEntry[i].CurrentPortInfo
7054 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7055 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
7056 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7057
7058 switch (reason_code) {
7059 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7060 if (ioc->shost_recovery)
7061 break;
7062 if (link_rate == prev_link_rate)
7063 break;
7064 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7065 break;
7066
7067 _scsih_pcie_check_device(ioc, handle);
7068
7069 /* This code after this point handles the test case
7070 * where a device has been added, however its returning
7071 * BUSY for sometime. Then before the Device Missing
7072 * Delay expires and the device becomes READY, the
7073 * device is removed and added back.
7074 */
7075 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7076 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
7077 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7078
7079 if (pcie_device) {
7080 pcie_device_put(pcie_device);
7081 break;
7082 }
7083
7084 if (!test_bit(handle, ioc->pend_os_device_add))
7085 break;
7086
7087 dewtprintk(ioc, pr_info(MPT3SAS_FMT
7088 "handle(0x%04x) device not found: convert "
7089 "event to a device add\n", ioc->name, handle));
7090 event_data->PortEntry[i].PortStatus &= 0xF0;
7091 event_data->PortEntry[i].PortStatus |=
7092 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
7093 /* fall through */
7094 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7095 if (ioc->shost_recovery)
7096 break;
7097 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7098 break;
7099
7100 rc = _scsih_pcie_add_device(ioc, handle);
7101 if (!rc) {
7102 /* mark entry vacant */
7103 /* TODO This needs to be reviewed and fixed,
7104 * we dont have an entry
7105 * to make an event void like vacant
7106 */
7107 event_data->PortEntry[i].PortStatus |=
7108 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
7109 }
7110 break;
7111 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7112 _scsih_pcie_device_remove_by_handle(ioc, handle);
7113 break;
7114 }
7115 }
7116 }
7117
7118 /**
7119 * _scsih_pcie_device_status_change_event_debug - debug for device event
7120 * @ioc: ?
7121 * @event_data: event data payload
7122 * Context: user.
7123 */
7124 static void
_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeDeviceStatusChange_t * event_data)7125 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7126 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
7127 {
7128 char *reason_str = NULL;
7129
7130 switch (event_data->ReasonCode) {
7131 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
7132 reason_str = "smart data";
7133 break;
7134 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
7135 reason_str = "unsupported device discovered";
7136 break;
7137 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
7138 reason_str = "internal device reset";
7139 break;
7140 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
7141 reason_str = "internal task abort";
7142 break;
7143 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7144 reason_str = "internal task abort set";
7145 break;
7146 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7147 reason_str = "internal clear task set";
7148 break;
7149 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
7150 reason_str = "internal query task";
7151 break;
7152 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
7153 reason_str = "device init failure";
7154 break;
7155 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7156 reason_str = "internal device reset complete";
7157 break;
7158 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7159 reason_str = "internal task abort complete";
7160 break;
7161 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
7162 reason_str = "internal async notification";
7163 break;
7164 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
7165 reason_str = "pcie hot reset failed";
7166 break;
7167 default:
7168 reason_str = "unknown reason";
7169 break;
7170 }
7171
7172 pr_info(MPT3SAS_FMT "PCIE device status change: (%s)\n"
7173 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
7174 ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
7175 (unsigned long long)le64_to_cpu(event_data->WWID),
7176 le16_to_cpu(event_data->TaskTag));
7177 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
7178 pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
7179 event_data->ASC, event_data->ASCQ);
7180 pr_info("\n");
7181 }
7182
7183 /**
7184 * _scsih_pcie_device_status_change_event - handle device status
7185 * change
7186 * @ioc: per adapter object
7187 * @fw_event: The fw_event_work object
7188 * Context: user.
7189 */
7190 static void
_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7191 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7192 struct fw_event_work *fw_event)
7193 {
7194 struct MPT3SAS_TARGET *target_priv_data;
7195 struct _pcie_device *pcie_device;
7196 u64 wwid;
7197 unsigned long flags;
7198 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
7199 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
7200 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7201 _scsih_pcie_device_status_change_event_debug(ioc,
7202 event_data);
7203
7204 if (event_data->ReasonCode !=
7205 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7206 event_data->ReasonCode !=
7207 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7208 return;
7209
7210 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7211 wwid = le64_to_cpu(event_data->WWID);
7212 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7213
7214 if (!pcie_device || !pcie_device->starget)
7215 goto out;
7216
7217 target_priv_data = pcie_device->starget->hostdata;
7218 if (!target_priv_data)
7219 goto out;
7220
7221 if (event_data->ReasonCode ==
7222 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
7223 target_priv_data->tm_busy = 1;
7224 else
7225 target_priv_data->tm_busy = 0;
7226 out:
7227 if (pcie_device)
7228 pcie_device_put(pcie_device);
7229
7230 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7231 }
7232
7233 /**
7234 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
7235 * event
7236 * @ioc: per adapter object
7237 * @event_data: event data payload
7238 * Context: user.
7239 */
7240 static void
_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasEnclDevStatusChange_t * event_data)7241 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7242 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
7243 {
7244 char *reason_str = NULL;
7245
7246 switch (event_data->ReasonCode) {
7247 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7248 reason_str = "enclosure add";
7249 break;
7250 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7251 reason_str = "enclosure remove";
7252 break;
7253 default:
7254 reason_str = "unknown reason";
7255 break;
7256 }
7257
7258 pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n"
7259 "\thandle(0x%04x), enclosure logical id(0x%016llx)"
7260 " number slots(%d)\n", ioc->name, reason_str,
7261 le16_to_cpu(event_data->EnclosureHandle),
7262 (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
7263 le16_to_cpu(event_data->StartSlot));
7264 }
7265
7266 /**
7267 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
7268 * @ioc: per adapter object
7269 * @fw_event: The fw_event_work object
7270 * Context: user.
7271 */
7272 static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7273 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7274 struct fw_event_work *fw_event)
7275 {
7276 Mpi2ConfigReply_t mpi_reply;
7277 struct _enclosure_node *enclosure_dev = NULL;
7278 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
7279 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
7280 int rc;
7281 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
7282
7283 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7284 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
7285 (Mpi2EventDataSasEnclDevStatusChange_t *)
7286 fw_event->event_data);
7287 if (ioc->shost_recovery)
7288 return;
7289
7290 if (enclosure_handle)
7291 enclosure_dev =
7292 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7293 enclosure_handle);
7294 switch (event_data->ReasonCode) {
7295 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7296 if (!enclosure_dev) {
7297 enclosure_dev =
7298 kzalloc(sizeof(struct _enclosure_node),
7299 GFP_KERNEL);
7300 if (!enclosure_dev) {
7301 pr_info(MPT3SAS_FMT
7302 "failure at %s:%d/%s()!\n", ioc->name,
7303 __FILE__, __LINE__, __func__);
7304 return;
7305 }
7306 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7307 &enclosure_dev->pg0,
7308 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7309 enclosure_handle);
7310
7311 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
7312 MPI2_IOCSTATUS_MASK)) {
7313 kfree(enclosure_dev);
7314 return;
7315 }
7316
7317 list_add_tail(&enclosure_dev->list,
7318 &ioc->enclosure_list);
7319 }
7320 break;
7321 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7322 if (enclosure_dev) {
7323 list_del(&enclosure_dev->list);
7324 kfree(enclosure_dev);
7325 }
7326 break;
7327 default:
7328 break;
7329 }
7330 }
7331
7332 /**
7333 * _scsih_sas_broadcast_primitive_event - handle broadcast events
7334 * @ioc: per adapter object
7335 * @fw_event: The fw_event_work object
7336 * Context: user.
7337 */
7338 static void
_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7339 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7340 struct fw_event_work *fw_event)
7341 {
7342 struct scsi_cmnd *scmd;
7343 struct scsi_device *sdev;
7344 struct scsiio_tracker *st;
7345 u16 smid, handle;
7346 u32 lun;
7347 struct MPT3SAS_DEVICE *sas_device_priv_data;
7348 u32 termination_count;
7349 u32 query_count;
7350 Mpi2SCSITaskManagementReply_t *mpi_reply;
7351 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
7352 (Mpi2EventDataSasBroadcastPrimitive_t *)
7353 fw_event->event_data;
7354 u16 ioc_status;
7355 unsigned long flags;
7356 int r;
7357 u8 max_retries = 0;
7358 u8 task_abort_retries;
7359
7360 mutex_lock(&ioc->tm_cmds.mutex);
7361 pr_info(MPT3SAS_FMT
7362 "%s: enter: phy number(%d), width(%d)\n",
7363 ioc->name, __func__, event_data->PhyNum,
7364 event_data->PortWidth);
7365
7366 _scsih_block_io_all_device(ioc);
7367
7368 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7369 mpi_reply = ioc->tm_cmds.reply;
7370 broadcast_aen_retry:
7371
7372 /* sanity checks for retrying this loop */
7373 if (max_retries++ == 5) {
7374 dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n",
7375 ioc->name, __func__));
7376 goto out;
7377 } else if (max_retries > 1)
7378 dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n",
7379 ioc->name, __func__, max_retries - 1));
7380
7381 termination_count = 0;
7382 query_count = 0;
7383 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
7384 if (ioc->shost_recovery)
7385 goto out;
7386 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
7387 if (!scmd)
7388 continue;
7389 st = scsi_cmd_priv(scmd);
7390 sdev = scmd->device;
7391 sas_device_priv_data = sdev->hostdata;
7392 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
7393 continue;
7394 /* skip hidden raid components */
7395 if (sas_device_priv_data->sas_target->flags &
7396 MPT_TARGET_FLAGS_RAID_COMPONENT)
7397 continue;
7398 /* skip volumes */
7399 if (sas_device_priv_data->sas_target->flags &
7400 MPT_TARGET_FLAGS_VOLUME)
7401 continue;
7402 /* skip PCIe devices */
7403 if (sas_device_priv_data->sas_target->flags &
7404 MPT_TARGET_FLAGS_PCIE_DEVICE)
7405 continue;
7406
7407 handle = sas_device_priv_data->sas_target->handle;
7408 lun = sas_device_priv_data->lun;
7409 query_count++;
7410
7411 if (ioc->shost_recovery)
7412 goto out;
7413
7414 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7415 r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
7416 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
7417 st->msix_io, 30, 0);
7418 if (r == FAILED) {
7419 sdev_printk(KERN_WARNING, sdev,
7420 "mpt3sas_scsih_issue_tm: FAILED when sending "
7421 "QUERY_TASK: scmd(%p)\n", scmd);
7422 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7423 goto broadcast_aen_retry;
7424 }
7425 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
7426 & MPI2_IOCSTATUS_MASK;
7427 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7428 sdev_printk(KERN_WARNING, sdev,
7429 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
7430 ioc_status, scmd);
7431 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7432 goto broadcast_aen_retry;
7433 }
7434
7435 /* see if IO is still owned by IOC and target */
7436 if (mpi_reply->ResponseCode ==
7437 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
7438 mpi_reply->ResponseCode ==
7439 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
7440 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7441 continue;
7442 }
7443 task_abort_retries = 0;
7444 tm_retry:
7445 if (task_abort_retries++ == 60) {
7446 dewtprintk(ioc, pr_info(MPT3SAS_FMT
7447 "%s: ABORT_TASK: giving up\n", ioc->name,
7448 __func__));
7449 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7450 goto broadcast_aen_retry;
7451 }
7452
7453 if (ioc->shost_recovery)
7454 goto out_no_lock;
7455
7456 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
7457 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
7458 st->msix_io, 30, 0);
7459 if (r == FAILED || st->cb_idx != 0xFF) {
7460 sdev_printk(KERN_WARNING, sdev,
7461 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
7462 "scmd(%p)\n", scmd);
7463 goto tm_retry;
7464 }
7465
7466 if (task_abort_retries > 1)
7467 sdev_printk(KERN_WARNING, sdev,
7468 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
7469 " scmd(%p)\n",
7470 task_abort_retries - 1, scmd);
7471
7472 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
7473 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7474 }
7475
7476 if (ioc->broadcast_aen_pending) {
7477 dewtprintk(ioc, pr_info(MPT3SAS_FMT
7478 "%s: loop back due to pending AEN\n",
7479 ioc->name, __func__));
7480 ioc->broadcast_aen_pending = 0;
7481 goto broadcast_aen_retry;
7482 }
7483
7484 out:
7485 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7486 out_no_lock:
7487
7488 dewtprintk(ioc, pr_info(MPT3SAS_FMT
7489 "%s - exit, query_count = %d termination_count = %d\n",
7490 ioc->name, __func__, query_count, termination_count));
7491
7492 ioc->broadcast_aen_busy = 0;
7493 if (!ioc->shost_recovery)
7494 _scsih_ublock_io_all_device(ioc);
7495 mutex_unlock(&ioc->tm_cmds.mutex);
7496 }
7497
7498 /**
7499 * _scsih_sas_discovery_event - handle discovery events
7500 * @ioc: per adapter object
7501 * @fw_event: The fw_event_work object
7502 * Context: user.
7503 */
7504 static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7505 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
7506 struct fw_event_work *fw_event)
7507 {
7508 Mpi2EventDataSasDiscovery_t *event_data =
7509 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
7510
7511 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
7512 pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name,
7513 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
7514 "start" : "stop");
7515 if (event_data->DiscoveryStatus)
7516 pr_info("discovery_status(0x%08x)",
7517 le32_to_cpu(event_data->DiscoveryStatus));
7518 pr_info("\n");
7519 }
7520
7521 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
7522 !ioc->sas_hba.num_phys) {
7523 if (disable_discovery > 0 && ioc->shost_recovery) {
7524 /* Wait for the reset to complete */
7525 while (ioc->shost_recovery)
7526 ssleep(1);
7527 }
7528 _scsih_sas_host_add(ioc);
7529 }
7530 }
7531
7532 /**
7533 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
7534 * events
7535 * @ioc: per adapter object
7536 * @fw_event: The fw_event_work object
7537 * Context: user.
7538 */
7539 static void
_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7540 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
7541 struct fw_event_work *fw_event)
7542 {
7543 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
7544 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
7545
7546 switch (event_data->ReasonCode) {
7547 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
7548 pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
7549 "(handle:0x%04x, sas_address:0x%016llx,"
7550 "physical_port:0x%02x) has failed",
7551 ioc->name, le16_to_cpu(event_data->DevHandle),
7552 (unsigned long long)le64_to_cpu(event_data->SASAddress),
7553 event_data->PhysicalPort);
7554 break;
7555 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
7556 pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
7557 "(handle:0x%04x, sas_address:0x%016llx,"
7558 "physical_port:0x%02x) has timed out",
7559 ioc->name, le16_to_cpu(event_data->DevHandle),
7560 (unsigned long long)le64_to_cpu(event_data->SASAddress),
7561 event_data->PhysicalPort);
7562 break;
7563 default:
7564 break;
7565 }
7566 }
7567
7568 /**
7569 * _scsih_pcie_enumeration_event - handle enumeration events
7570 * @ioc: per adapter object
7571 * @fw_event: The fw_event_work object
7572 * Context: user.
7573 */
7574 static void
_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7575 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
7576 struct fw_event_work *fw_event)
7577 {
7578 Mpi26EventDataPCIeEnumeration_t *event_data =
7579 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
7580
7581 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
7582 return;
7583
7584 pr_info(MPT3SAS_FMT "pcie enumeration event: (%s) Flag 0x%02x",
7585 ioc->name,
7586 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
7587 "started" : "completed",
7588 event_data->Flags);
7589 if (event_data->EnumerationStatus)
7590 pr_cont("enumeration_status(0x%08x)",
7591 le32_to_cpu(event_data->EnumerationStatus));
7592 pr_cont("\n");
7593 }
7594
7595 /**
7596 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
7597 * @ioc: per adapter object
7598 * @handle: device handle for physical disk
7599 * @phys_disk_num: physical disk number
7600 *
7601 * Return: 0 for success, else failure.
7602 */
7603 static int
_scsih_ir_fastpath(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phys_disk_num)7604 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7605 {
7606 Mpi2RaidActionRequest_t *mpi_request;
7607 Mpi2RaidActionReply_t *mpi_reply;
7608 u16 smid;
7609 u8 issue_reset = 0;
7610 int rc = 0;
7611 u16 ioc_status;
7612 u32 log_info;
7613
7614 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
7615 return rc;
7616
7617 mutex_lock(&ioc->scsih_cmds.mutex);
7618
7619 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
7620 pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
7621 ioc->name, __func__);
7622 rc = -EAGAIN;
7623 goto out;
7624 }
7625 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
7626
7627 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
7628 if (!smid) {
7629 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
7630 ioc->name, __func__);
7631 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7632 rc = -EAGAIN;
7633 goto out;
7634 }
7635
7636 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7637 ioc->scsih_cmds.smid = smid;
7638 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
7639
7640 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
7641 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
7642 mpi_request->PhysDiskNum = phys_disk_num;
7643
7644 dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\
7645 "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name,
7646 handle, phys_disk_num));
7647
7648 init_completion(&ioc->scsih_cmds.done);
7649 mpt3sas_base_put_smid_default(ioc, smid);
7650 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
7651
7652 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
7653 issue_reset =
7654 mpt3sas_base_check_cmd_timeout(ioc,
7655 ioc->scsih_cmds.status, mpi_request,
7656 sizeof(Mpi2RaidActionRequest_t)/4);
7657 rc = -EFAULT;
7658 goto out;
7659 }
7660
7661 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
7662
7663 mpi_reply = ioc->scsih_cmds.reply;
7664 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
7665 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
7666 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
7667 else
7668 log_info = 0;
7669 ioc_status &= MPI2_IOCSTATUS_MASK;
7670 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7671 dewtprintk(ioc, pr_info(MPT3SAS_FMT
7672 "IR RAID_ACTION: failed: ioc_status(0x%04x), "
7673 "loginfo(0x%08x)!!!\n", ioc->name, ioc_status,
7674 log_info));
7675 rc = -EFAULT;
7676 } else
7677 dewtprintk(ioc, pr_info(MPT3SAS_FMT
7678 "IR RAID_ACTION: completed successfully\n",
7679 ioc->name));
7680 }
7681
7682 out:
7683 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7684 mutex_unlock(&ioc->scsih_cmds.mutex);
7685
7686 if (issue_reset)
7687 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7688 return rc;
7689 }
7690
7691 /**
7692 * _scsih_reprobe_lun - reprobing lun
7693 * @sdev: scsi device struct
7694 * @no_uld_attach: sdev->no_uld_attach flag setting
7695 *
7696 **/
7697 static void
_scsih_reprobe_lun(struct scsi_device * sdev,void * no_uld_attach)7698 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
7699 {
7700 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
7701 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
7702 sdev->no_uld_attach ? "hiding" : "exposing");
7703 WARN_ON(scsi_device_reprobe(sdev));
7704 }
7705
7706 /**
7707 * _scsih_sas_volume_add - add new volume
7708 * @ioc: per adapter object
7709 * @element: IR config element data
7710 * Context: user.
7711 */
7712 static void
_scsih_sas_volume_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)7713 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
7714 Mpi2EventIrConfigElement_t *element)
7715 {
7716 struct _raid_device *raid_device;
7717 unsigned long flags;
7718 u64 wwid;
7719 u16 handle = le16_to_cpu(element->VolDevHandle);
7720 int rc;
7721
7722 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
7723 if (!wwid) {
7724 pr_err(MPT3SAS_FMT
7725 "failure at %s:%d/%s()!\n", ioc->name,
7726 __FILE__, __LINE__, __func__);
7727 return;
7728 }
7729
7730 spin_lock_irqsave(&ioc->raid_device_lock, flags);
7731 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
7732 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7733
7734 if (raid_device)
7735 return;
7736
7737 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
7738 if (!raid_device) {
7739 pr_err(MPT3SAS_FMT
7740 "failure at %s:%d/%s()!\n", ioc->name,
7741 __FILE__, __LINE__, __func__);
7742 return;
7743 }
7744
7745 raid_device->id = ioc->sas_id++;
7746 raid_device->channel = RAID_CHANNEL;
7747 raid_device->handle = handle;
7748 raid_device->wwid = wwid;
7749 _scsih_raid_device_add(ioc, raid_device);
7750 if (!ioc->wait_for_discovery_to_complete) {
7751 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
7752 raid_device->id, 0);
7753 if (rc)
7754 _scsih_raid_device_remove(ioc, raid_device);
7755 } else {
7756 spin_lock_irqsave(&ioc->raid_device_lock, flags);
7757 _scsih_determine_boot_device(ioc, raid_device, 1);
7758 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7759 }
7760 }
7761
7762 /**
7763 * _scsih_sas_volume_delete - delete volume
7764 * @ioc: per adapter object
7765 * @handle: volume device handle
7766 * Context: user.
7767 */
7768 static void
_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER * ioc,u16 handle)7769 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7770 {
7771 struct _raid_device *raid_device;
7772 unsigned long flags;
7773 struct MPT3SAS_TARGET *sas_target_priv_data;
7774 struct scsi_target *starget = NULL;
7775
7776 spin_lock_irqsave(&ioc->raid_device_lock, flags);
7777 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
7778 if (raid_device) {
7779 if (raid_device->starget) {
7780 starget = raid_device->starget;
7781 sas_target_priv_data = starget->hostdata;
7782 sas_target_priv_data->deleted = 1;
7783 }
7784 pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
7785 ioc->name, raid_device->handle,
7786 (unsigned long long) raid_device->wwid);
7787 list_del(&raid_device->list);
7788 kfree(raid_device);
7789 }
7790 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7791 if (starget)
7792 scsi_remove_target(&starget->dev);
7793 }
7794
7795 /**
7796 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
7797 * @ioc: per adapter object
7798 * @element: IR config element data
7799 * Context: user.
7800 */
7801 static void
_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)7802 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
7803 Mpi2EventIrConfigElement_t *element)
7804 {
7805 struct _sas_device *sas_device;
7806 struct scsi_target *starget = NULL;
7807 struct MPT3SAS_TARGET *sas_target_priv_data;
7808 unsigned long flags;
7809 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7810
7811 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7812 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7813 if (sas_device) {
7814 sas_device->volume_handle = 0;
7815 sas_device->volume_wwid = 0;
7816 clear_bit(handle, ioc->pd_handles);
7817 if (sas_device->starget && sas_device->starget->hostdata) {
7818 starget = sas_device->starget;
7819 sas_target_priv_data = starget->hostdata;
7820 sas_target_priv_data->flags &=
7821 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
7822 }
7823 }
7824 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7825 if (!sas_device)
7826 return;
7827
7828 /* exposing raid component */
7829 if (starget)
7830 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
7831
7832 sas_device_put(sas_device);
7833 }
7834
7835 /**
7836 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
7837 * @ioc: per adapter object
7838 * @element: IR config element data
7839 * Context: user.
7840 */
7841 static void
_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)7842 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
7843 Mpi2EventIrConfigElement_t *element)
7844 {
7845 struct _sas_device *sas_device;
7846 struct scsi_target *starget = NULL;
7847 struct MPT3SAS_TARGET *sas_target_priv_data;
7848 unsigned long flags;
7849 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7850 u16 volume_handle = 0;
7851 u64 volume_wwid = 0;
7852
7853 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
7854 if (volume_handle)
7855 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
7856 &volume_wwid);
7857
7858 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7859 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7860 if (sas_device) {
7861 set_bit(handle, ioc->pd_handles);
7862 if (sas_device->starget && sas_device->starget->hostdata) {
7863 starget = sas_device->starget;
7864 sas_target_priv_data = starget->hostdata;
7865 sas_target_priv_data->flags |=
7866 MPT_TARGET_FLAGS_RAID_COMPONENT;
7867 sas_device->volume_handle = volume_handle;
7868 sas_device->volume_wwid = volume_wwid;
7869 }
7870 }
7871 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7872 if (!sas_device)
7873 return;
7874
7875 /* hiding raid component */
7876 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7877
7878 if (starget)
7879 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
7880
7881 sas_device_put(sas_device);
7882 }
7883
7884 /**
7885 * _scsih_sas_pd_delete - delete pd component
7886 * @ioc: per adapter object
7887 * @element: IR config element data
7888 * Context: user.
7889 */
7890 static void
_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)7891 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
7892 Mpi2EventIrConfigElement_t *element)
7893 {
7894 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7895
7896 _scsih_device_remove_by_handle(ioc, handle);
7897 }
7898
7899 /**
7900 * _scsih_sas_pd_add - remove pd component
7901 * @ioc: per adapter object
7902 * @element: IR config element data
7903 * Context: user.
7904 */
7905 static void
_scsih_sas_pd_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)7906 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
7907 Mpi2EventIrConfigElement_t *element)
7908 {
7909 struct _sas_device *sas_device;
7910 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7911 Mpi2ConfigReply_t mpi_reply;
7912 Mpi2SasDevicePage0_t sas_device_pg0;
7913 u32 ioc_status;
7914 u64 sas_address;
7915 u16 parent_handle;
7916
7917 set_bit(handle, ioc->pd_handles);
7918
7919 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
7920 if (sas_device) {
7921 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7922 sas_device_put(sas_device);
7923 return;
7924 }
7925
7926 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7927 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7928 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
7929 ioc->name, __FILE__, __LINE__, __func__);
7930 return;
7931 }
7932
7933 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7934 MPI2_IOCSTATUS_MASK;
7935 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7936 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
7937 ioc->name, __FILE__, __LINE__, __func__);
7938 return;
7939 }
7940
7941 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
7942 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
7943 mpt3sas_transport_update_links(ioc, sas_address, handle,
7944 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
7945
7946 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7947 _scsih_add_device(ioc, handle, 0, 1);
7948 }
7949
7950 /**
7951 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
7952 * @ioc: per adapter object
7953 * @event_data: event data payload
7954 * Context: user.
7955 */
7956 static void
_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)7957 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7958 Mpi2EventDataIrConfigChangeList_t *event_data)
7959 {
7960 Mpi2EventIrConfigElement_t *element;
7961 u8 element_type;
7962 int i;
7963 char *reason_str = NULL, *element_str = NULL;
7964
7965 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
7966
7967 pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n",
7968 ioc->name, (le32_to_cpu(event_data->Flags) &
7969 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
7970 "foreign" : "native", event_data->NumElements);
7971 for (i = 0; i < event_data->NumElements; i++, element++) {
7972 switch (element->ReasonCode) {
7973 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7974 reason_str = "add";
7975 break;
7976 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7977 reason_str = "remove";
7978 break;
7979 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
7980 reason_str = "no change";
7981 break;
7982 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7983 reason_str = "hide";
7984 break;
7985 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7986 reason_str = "unhide";
7987 break;
7988 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7989 reason_str = "volume_created";
7990 break;
7991 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7992 reason_str = "volume_deleted";
7993 break;
7994 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7995 reason_str = "pd_created";
7996 break;
7997 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7998 reason_str = "pd_deleted";
7999 break;
8000 default:
8001 reason_str = "unknown reason";
8002 break;
8003 }
8004 element_type = le16_to_cpu(element->ElementFlags) &
8005 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
8006 switch (element_type) {
8007 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
8008 element_str = "volume";
8009 break;
8010 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
8011 element_str = "phys disk";
8012 break;
8013 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
8014 element_str = "hot spare";
8015 break;
8016 default:
8017 element_str = "unknown element";
8018 break;
8019 }
8020 pr_info("\t(%s:%s), vol handle(0x%04x), " \
8021 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
8022 reason_str, le16_to_cpu(element->VolDevHandle),
8023 le16_to_cpu(element->PhysDiskDevHandle),
8024 element->PhysDiskNum);
8025 }
8026 }
8027
8028 /**
8029 * _scsih_sas_ir_config_change_event - handle ir configuration change events
8030 * @ioc: per adapter object
8031 * @fw_event: The fw_event_work object
8032 * Context: user.
8033 */
8034 static void
_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8035 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
8036 struct fw_event_work *fw_event)
8037 {
8038 Mpi2EventIrConfigElement_t *element;
8039 int i;
8040 u8 foreign_config;
8041 Mpi2EventDataIrConfigChangeList_t *event_data =
8042 (Mpi2EventDataIrConfigChangeList_t *)
8043 fw_event->event_data;
8044
8045 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8046 (!ioc->hide_ir_msg))
8047 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
8048
8049 foreign_config = (le32_to_cpu(event_data->Flags) &
8050 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
8051
8052 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8053 if (ioc->shost_recovery &&
8054 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8055 for (i = 0; i < event_data->NumElements; i++, element++) {
8056 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
8057 _scsih_ir_fastpath(ioc,
8058 le16_to_cpu(element->PhysDiskDevHandle),
8059 element->PhysDiskNum);
8060 }
8061 return;
8062 }
8063
8064 for (i = 0; i < event_data->NumElements; i++, element++) {
8065
8066 switch (element->ReasonCode) {
8067 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8068 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8069 if (!foreign_config)
8070 _scsih_sas_volume_add(ioc, element);
8071 break;
8072 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8073 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8074 if (!foreign_config)
8075 _scsih_sas_volume_delete(ioc,
8076 le16_to_cpu(element->VolDevHandle));
8077 break;
8078 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8079 if (!ioc->is_warpdrive)
8080 _scsih_sas_pd_hide(ioc, element);
8081 break;
8082 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8083 if (!ioc->is_warpdrive)
8084 _scsih_sas_pd_expose(ioc, element);
8085 break;
8086 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8087 if (!ioc->is_warpdrive)
8088 _scsih_sas_pd_add(ioc, element);
8089 break;
8090 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8091 if (!ioc->is_warpdrive)
8092 _scsih_sas_pd_delete(ioc, element);
8093 break;
8094 }
8095 }
8096 }
8097
8098 /**
8099 * _scsih_sas_ir_volume_event - IR volume event
8100 * @ioc: per adapter object
8101 * @fw_event: The fw_event_work object
8102 * Context: user.
8103 */
8104 static void
_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8105 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
8106 struct fw_event_work *fw_event)
8107 {
8108 u64 wwid;
8109 unsigned long flags;
8110 struct _raid_device *raid_device;
8111 u16 handle;
8112 u32 state;
8113 int rc;
8114 Mpi2EventDataIrVolume_t *event_data =
8115 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
8116
8117 if (ioc->shost_recovery)
8118 return;
8119
8120 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
8121 return;
8122
8123 handle = le16_to_cpu(event_data->VolDevHandle);
8124 state = le32_to_cpu(event_data->NewValue);
8125 if (!ioc->hide_ir_msg)
8126 dewtprintk(ioc, pr_info(MPT3SAS_FMT
8127 "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8128 ioc->name, __func__, handle,
8129 le32_to_cpu(event_data->PreviousValue), state));
8130 switch (state) {
8131 case MPI2_RAID_VOL_STATE_MISSING:
8132 case MPI2_RAID_VOL_STATE_FAILED:
8133 _scsih_sas_volume_delete(ioc, handle);
8134 break;
8135
8136 case MPI2_RAID_VOL_STATE_ONLINE:
8137 case MPI2_RAID_VOL_STATE_DEGRADED:
8138 case MPI2_RAID_VOL_STATE_OPTIMAL:
8139
8140 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8141 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8142 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8143
8144 if (raid_device)
8145 break;
8146
8147 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8148 if (!wwid) {
8149 pr_err(MPT3SAS_FMT
8150 "failure at %s:%d/%s()!\n", ioc->name,
8151 __FILE__, __LINE__, __func__);
8152 break;
8153 }
8154
8155 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8156 if (!raid_device) {
8157 pr_err(MPT3SAS_FMT
8158 "failure at %s:%d/%s()!\n", ioc->name,
8159 __FILE__, __LINE__, __func__);
8160 break;
8161 }
8162
8163 raid_device->id = ioc->sas_id++;
8164 raid_device->channel = RAID_CHANNEL;
8165 raid_device->handle = handle;
8166 raid_device->wwid = wwid;
8167 _scsih_raid_device_add(ioc, raid_device);
8168 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8169 raid_device->id, 0);
8170 if (rc)
8171 _scsih_raid_device_remove(ioc, raid_device);
8172 break;
8173
8174 case MPI2_RAID_VOL_STATE_INITIALIZING:
8175 default:
8176 break;
8177 }
8178 }
8179
8180 /**
8181 * _scsih_sas_ir_physical_disk_event - PD event
8182 * @ioc: per adapter object
8183 * @fw_event: The fw_event_work object
8184 * Context: user.
8185 */
8186 static void
_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8187 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
8188 struct fw_event_work *fw_event)
8189 {
8190 u16 handle, parent_handle;
8191 u32 state;
8192 struct _sas_device *sas_device;
8193 Mpi2ConfigReply_t mpi_reply;
8194 Mpi2SasDevicePage0_t sas_device_pg0;
8195 u32 ioc_status;
8196 Mpi2EventDataIrPhysicalDisk_t *event_data =
8197 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
8198 u64 sas_address;
8199
8200 if (ioc->shost_recovery)
8201 return;
8202
8203 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
8204 return;
8205
8206 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
8207 state = le32_to_cpu(event_data->NewValue);
8208
8209 if (!ioc->hide_ir_msg)
8210 dewtprintk(ioc, pr_info(MPT3SAS_FMT
8211 "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8212 ioc->name, __func__, handle,
8213 le32_to_cpu(event_data->PreviousValue), state));
8214
8215 switch (state) {
8216 case MPI2_RAID_PD_STATE_ONLINE:
8217 case MPI2_RAID_PD_STATE_DEGRADED:
8218 case MPI2_RAID_PD_STATE_REBUILDING:
8219 case MPI2_RAID_PD_STATE_OPTIMAL:
8220 case MPI2_RAID_PD_STATE_HOT_SPARE:
8221
8222 if (!ioc->is_warpdrive)
8223 set_bit(handle, ioc->pd_handles);
8224
8225 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8226 if (sas_device) {
8227 sas_device_put(sas_device);
8228 return;
8229 }
8230
8231 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8232 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8233 handle))) {
8234 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
8235 ioc->name, __FILE__, __LINE__, __func__);
8236 return;
8237 }
8238
8239 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8240 MPI2_IOCSTATUS_MASK;
8241 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8242 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
8243 ioc->name, __FILE__, __LINE__, __func__);
8244 return;
8245 }
8246
8247 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8248 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8249 mpt3sas_transport_update_links(ioc, sas_address, handle,
8250 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8251
8252 _scsih_add_device(ioc, handle, 0, 1);
8253
8254 break;
8255
8256 case MPI2_RAID_PD_STATE_OFFLINE:
8257 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
8258 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
8259 default:
8260 break;
8261 }
8262 }
8263
8264 /**
8265 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
8266 * @ioc: per adapter object
8267 * @event_data: event data payload
8268 * Context: user.
8269 */
8270 static void
_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrOperationStatus_t * event_data)8271 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
8272 Mpi2EventDataIrOperationStatus_t *event_data)
8273 {
8274 char *reason_str = NULL;
8275
8276 switch (event_data->RAIDOperation) {
8277 case MPI2_EVENT_IR_RAIDOP_RESYNC:
8278 reason_str = "resync";
8279 break;
8280 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8281 reason_str = "online capacity expansion";
8282 break;
8283 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8284 reason_str = "consistency check";
8285 break;
8286 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
8287 reason_str = "background init";
8288 break;
8289 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
8290 reason_str = "make data consistent";
8291 break;
8292 }
8293
8294 if (!reason_str)
8295 return;
8296
8297 pr_info(MPT3SAS_FMT "raid operational status: (%s)" \
8298 "\thandle(0x%04x), percent complete(%d)\n",
8299 ioc->name, reason_str,
8300 le16_to_cpu(event_data->VolDevHandle),
8301 event_data->PercentComplete);
8302 }
8303
8304 /**
8305 * _scsih_sas_ir_operation_status_event - handle RAID operation events
8306 * @ioc: per adapter object
8307 * @fw_event: The fw_event_work object
8308 * Context: user.
8309 */
8310 static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8311 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
8312 struct fw_event_work *fw_event)
8313 {
8314 Mpi2EventDataIrOperationStatus_t *event_data =
8315 (Mpi2EventDataIrOperationStatus_t *)
8316 fw_event->event_data;
8317 static struct _raid_device *raid_device;
8318 unsigned long flags;
8319 u16 handle;
8320
8321 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8322 (!ioc->hide_ir_msg))
8323 _scsih_sas_ir_operation_status_event_debug(ioc,
8324 event_data);
8325
8326 /* code added for raid transport support */
8327 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
8328
8329 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8330 handle = le16_to_cpu(event_data->VolDevHandle);
8331 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8332 if (raid_device)
8333 raid_device->percent_complete =
8334 event_data->PercentComplete;
8335 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8336 }
8337 }
8338
8339 /**
8340 * _scsih_prep_device_scan - initialize parameters prior to device scan
8341 * @ioc: per adapter object
8342 *
8343 * Set the deleted flag prior to device scan. If the device is found during
8344 * the scan, then we clear the deleted flag.
8345 */
8346 static void
_scsih_prep_device_scan(struct MPT3SAS_ADAPTER * ioc)8347 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
8348 {
8349 struct MPT3SAS_DEVICE *sas_device_priv_data;
8350 struct scsi_device *sdev;
8351
8352 shost_for_each_device(sdev, ioc->shost) {
8353 sas_device_priv_data = sdev->hostdata;
8354 if (sas_device_priv_data && sas_device_priv_data->sas_target)
8355 sas_device_priv_data->sas_target->deleted = 1;
8356 }
8357 }
8358
8359 /**
8360 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
8361 * @ioc: per adapter object
8362 * @sas_device_pg0: SAS Device page 0
8363 *
8364 * After host reset, find out whether devices are still responding.
8365 * Used in _scsih_remove_unresponsive_sas_devices.
8366 */
8367 static void
_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER * ioc,Mpi2SasDevicePage0_t * sas_device_pg0)8368 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
8369 Mpi2SasDevicePage0_t *sas_device_pg0)
8370 {
8371 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8372 struct scsi_target *starget;
8373 struct _sas_device *sas_device = NULL;
8374 struct _enclosure_node *enclosure_dev = NULL;
8375 unsigned long flags;
8376
8377 if (sas_device_pg0->EnclosureHandle) {
8378 enclosure_dev =
8379 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8380 le16_to_cpu(sas_device_pg0->EnclosureHandle));
8381 if (enclosure_dev == NULL)
8382 pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
8383 "doesn't match with enclosure device!\n",
8384 ioc->name, sas_device_pg0->EnclosureHandle);
8385 }
8386 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8387 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
8388 if ((sas_device->sas_address == le64_to_cpu(
8389 sas_device_pg0->SASAddress)) && (sas_device->slot ==
8390 le16_to_cpu(sas_device_pg0->Slot))) {
8391 sas_device->responding = 1;
8392 starget = sas_device->starget;
8393 if (starget && starget->hostdata) {
8394 sas_target_priv_data = starget->hostdata;
8395 sas_target_priv_data->tm_busy = 0;
8396 sas_target_priv_data->deleted = 0;
8397 } else
8398 sas_target_priv_data = NULL;
8399 if (starget) {
8400 starget_printk(KERN_INFO, starget,
8401 "handle(0x%04x), sas_addr(0x%016llx)\n",
8402 le16_to_cpu(sas_device_pg0->DevHandle),
8403 (unsigned long long)
8404 sas_device->sas_address);
8405
8406 if (sas_device->enclosure_handle != 0)
8407 starget_printk(KERN_INFO, starget,
8408 "enclosure logical id(0x%016llx),"
8409 " slot(%d)\n",
8410 (unsigned long long)
8411 sas_device->enclosure_logical_id,
8412 sas_device->slot);
8413 }
8414 if (le16_to_cpu(sas_device_pg0->Flags) &
8415 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
8416 sas_device->enclosure_level =
8417 sas_device_pg0->EnclosureLevel;
8418 memcpy(&sas_device->connector_name[0],
8419 &sas_device_pg0->ConnectorName[0], 4);
8420 } else {
8421 sas_device->enclosure_level = 0;
8422 sas_device->connector_name[0] = '\0';
8423 }
8424
8425 sas_device->enclosure_handle =
8426 le16_to_cpu(sas_device_pg0->EnclosureHandle);
8427 sas_device->is_chassis_slot_valid = 0;
8428 if (enclosure_dev) {
8429 sas_device->enclosure_logical_id = le64_to_cpu(
8430 enclosure_dev->pg0.EnclosureLogicalID);
8431 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8432 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8433 sas_device->is_chassis_slot_valid = 1;
8434 sas_device->chassis_slot =
8435 enclosure_dev->pg0.ChassisSlot;
8436 }
8437 }
8438
8439 if (sas_device->handle == le16_to_cpu(
8440 sas_device_pg0->DevHandle))
8441 goto out;
8442 pr_info("\thandle changed from(0x%04x)!!!\n",
8443 sas_device->handle);
8444 sas_device->handle = le16_to_cpu(
8445 sas_device_pg0->DevHandle);
8446 if (sas_target_priv_data)
8447 sas_target_priv_data->handle =
8448 le16_to_cpu(sas_device_pg0->DevHandle);
8449 goto out;
8450 }
8451 }
8452 out:
8453 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8454 }
8455
8456 /**
8457 * _scsih_create_enclosure_list_after_reset - Free Existing list,
8458 * And create enclosure list by scanning all Enclosure Page(0)s
8459 * @ioc: per adapter object
8460 */
8461 static void
_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER * ioc)8462 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
8463 {
8464 struct _enclosure_node *enclosure_dev;
8465 Mpi2ConfigReply_t mpi_reply;
8466 u16 enclosure_handle;
8467 int rc;
8468
8469 /* Free existing enclosure list */
8470 mpt3sas_free_enclosure_list(ioc);
8471
8472 /* Re constructing enclosure list after reset*/
8473 enclosure_handle = 0xFFFF;
8474 do {
8475 enclosure_dev =
8476 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
8477 if (!enclosure_dev) {
8478 pr_err(MPT3SAS_FMT
8479 "failure at %s:%d/%s()!\n", ioc->name,
8480 __FILE__, __LINE__, __func__);
8481 return;
8482 }
8483 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8484 &enclosure_dev->pg0,
8485 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
8486 enclosure_handle);
8487
8488 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8489 MPI2_IOCSTATUS_MASK)) {
8490 kfree(enclosure_dev);
8491 return;
8492 }
8493 list_add_tail(&enclosure_dev->list,
8494 &ioc->enclosure_list);
8495 enclosure_handle =
8496 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
8497 } while (1);
8498 }
8499
8500 /**
8501 * _scsih_search_responding_sas_devices -
8502 * @ioc: per adapter object
8503 *
8504 * After host reset, find out whether devices are still responding.
8505 * If not remove.
8506 */
8507 static void
_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER * ioc)8508 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8509 {
8510 Mpi2SasDevicePage0_t sas_device_pg0;
8511 Mpi2ConfigReply_t mpi_reply;
8512 u16 ioc_status;
8513 u16 handle;
8514 u32 device_info;
8515
8516 pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
8517
8518 if (list_empty(&ioc->sas_device_list))
8519 goto out;
8520
8521 handle = 0xFFFF;
8522 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8523 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8524 handle))) {
8525 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8526 MPI2_IOCSTATUS_MASK;
8527 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8528 break;
8529 handle = le16_to_cpu(sas_device_pg0.DevHandle);
8530 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8531 if (!(_scsih_is_end_device(device_info)))
8532 continue;
8533 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
8534 }
8535
8536 out:
8537 pr_info(MPT3SAS_FMT "search for end-devices: complete\n",
8538 ioc->name);
8539 }
8540
8541 /**
8542 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
8543 * @ioc: per adapter object
8544 * @pcie_device_pg0: PCIe Device page 0
8545 *
8546 * After host reset, find out whether devices are still responding.
8547 * Used in _scsih_remove_unresponding_devices.
8548 */
8549 static void
_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER * ioc,Mpi26PCIeDevicePage0_t * pcie_device_pg0)8550 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8551 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
8552 {
8553 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8554 struct scsi_target *starget;
8555 struct _pcie_device *pcie_device;
8556 unsigned long flags;
8557
8558 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8559 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
8560 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
8561 && (pcie_device->slot == le16_to_cpu(
8562 pcie_device_pg0->Slot))) {
8563 pcie_device->responding = 1;
8564 starget = pcie_device->starget;
8565 if (starget && starget->hostdata) {
8566 sas_target_priv_data = starget->hostdata;
8567 sas_target_priv_data->tm_busy = 0;
8568 sas_target_priv_data->deleted = 0;
8569 } else
8570 sas_target_priv_data = NULL;
8571 if (starget) {
8572 starget_printk(KERN_INFO, starget,
8573 "handle(0x%04x), wwid(0x%016llx) ",
8574 pcie_device->handle,
8575 (unsigned long long)pcie_device->wwid);
8576 if (pcie_device->enclosure_handle != 0)
8577 starget_printk(KERN_INFO, starget,
8578 "enclosure logical id(0x%016llx), "
8579 "slot(%d)\n",
8580 (unsigned long long)
8581 pcie_device->enclosure_logical_id,
8582 pcie_device->slot);
8583 }
8584
8585 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
8586 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
8587 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8588 pcie_device->enclosure_level =
8589 pcie_device_pg0->EnclosureLevel;
8590 memcpy(&pcie_device->connector_name[0],
8591 &pcie_device_pg0->ConnectorName[0], 4);
8592 } else {
8593 pcie_device->enclosure_level = 0;
8594 pcie_device->connector_name[0] = '\0';
8595 }
8596
8597 if (pcie_device->handle == le16_to_cpu(
8598 pcie_device_pg0->DevHandle))
8599 goto out;
8600 pr_info("\thandle changed from(0x%04x)!!!\n",
8601 pcie_device->handle);
8602 pcie_device->handle = le16_to_cpu(
8603 pcie_device_pg0->DevHandle);
8604 if (sas_target_priv_data)
8605 sas_target_priv_data->handle =
8606 le16_to_cpu(pcie_device_pg0->DevHandle);
8607 goto out;
8608 }
8609 }
8610
8611 out:
8612 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8613 }
8614
8615 /**
8616 * _scsih_search_responding_pcie_devices -
8617 * @ioc: per adapter object
8618 *
8619 * After host reset, find out whether devices are still responding.
8620 * If not remove.
8621 */
8622 static void
_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER * ioc)8623 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8624 {
8625 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8626 Mpi2ConfigReply_t mpi_reply;
8627 u16 ioc_status;
8628 u16 handle;
8629 u32 device_info;
8630
8631 pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
8632
8633 if (list_empty(&ioc->pcie_device_list))
8634 goto out;
8635
8636 handle = 0xFFFF;
8637 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8638 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8639 handle))) {
8640 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8641 MPI2_IOCSTATUS_MASK;
8642 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8643 pr_info(MPT3SAS_FMT "\tbreak from %s: "
8644 "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name,
8645 __func__, ioc_status,
8646 le32_to_cpu(mpi_reply.IOCLogInfo));
8647 break;
8648 }
8649 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
8650 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8651 if (!(_scsih_is_nvme_device(device_info)))
8652 continue;
8653 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8654 }
8655 out:
8656 pr_info(MPT3SAS_FMT "search for PCIe end-devices: complete\n",
8657 ioc->name);
8658 }
8659
8660 /**
8661 * _scsih_mark_responding_raid_device - mark a raid_device as responding
8662 * @ioc: per adapter object
8663 * @wwid: world wide identifier for raid volume
8664 * @handle: device handle
8665 *
8666 * After host reset, find out whether devices are still responding.
8667 * Used in _scsih_remove_unresponsive_raid_devices.
8668 */
8669 static void
_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle)8670 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
8671 u16 handle)
8672 {
8673 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8674 struct scsi_target *starget;
8675 struct _raid_device *raid_device;
8676 unsigned long flags;
8677
8678 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8679 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
8680 if (raid_device->wwid == wwid && raid_device->starget) {
8681 starget = raid_device->starget;
8682 if (starget && starget->hostdata) {
8683 sas_target_priv_data = starget->hostdata;
8684 sas_target_priv_data->deleted = 0;
8685 } else
8686 sas_target_priv_data = NULL;
8687 raid_device->responding = 1;
8688 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8689 starget_printk(KERN_INFO, raid_device->starget,
8690 "handle(0x%04x), wwid(0x%016llx)\n", handle,
8691 (unsigned long long)raid_device->wwid);
8692
8693 /*
8694 * WARPDRIVE: The handles of the PDs might have changed
8695 * across the host reset so re-initialize the
8696 * required data for Direct IO
8697 */
8698 mpt3sas_init_warpdrive_properties(ioc, raid_device);
8699 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8700 if (raid_device->handle == handle) {
8701 spin_unlock_irqrestore(&ioc->raid_device_lock,
8702 flags);
8703 return;
8704 }
8705 pr_info("\thandle changed from(0x%04x)!!!\n",
8706 raid_device->handle);
8707 raid_device->handle = handle;
8708 if (sas_target_priv_data)
8709 sas_target_priv_data->handle = handle;
8710 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8711 return;
8712 }
8713 }
8714 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8715 }
8716
8717 /**
8718 * _scsih_search_responding_raid_devices -
8719 * @ioc: per adapter object
8720 *
8721 * After host reset, find out whether devices are still responding.
8722 * If not remove.
8723 */
8724 static void
_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER * ioc)8725 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
8726 {
8727 Mpi2RaidVolPage1_t volume_pg1;
8728 Mpi2RaidVolPage0_t volume_pg0;
8729 Mpi2RaidPhysDiskPage0_t pd_pg0;
8730 Mpi2ConfigReply_t mpi_reply;
8731 u16 ioc_status;
8732 u16 handle;
8733 u8 phys_disk_num;
8734
8735 if (!ioc->ir_firmware)
8736 return;
8737
8738 pr_info(MPT3SAS_FMT "search for raid volumes: start\n",
8739 ioc->name);
8740
8741 if (list_empty(&ioc->raid_device_list))
8742 goto out;
8743
8744 handle = 0xFFFF;
8745 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
8746 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
8747 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8748 MPI2_IOCSTATUS_MASK;
8749 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8750 break;
8751 handle = le16_to_cpu(volume_pg1.DevHandle);
8752
8753 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
8754 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
8755 sizeof(Mpi2RaidVolPage0_t)))
8756 continue;
8757
8758 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
8759 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
8760 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
8761 _scsih_mark_responding_raid_device(ioc,
8762 le64_to_cpu(volume_pg1.WWID), handle);
8763 }
8764
8765 /* refresh the pd_handles */
8766 if (!ioc->is_warpdrive) {
8767 phys_disk_num = 0xFF;
8768 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
8769 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
8770 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
8771 phys_disk_num))) {
8772 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8773 MPI2_IOCSTATUS_MASK;
8774 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8775 break;
8776 phys_disk_num = pd_pg0.PhysDiskNum;
8777 handle = le16_to_cpu(pd_pg0.DevHandle);
8778 set_bit(handle, ioc->pd_handles);
8779 }
8780 }
8781 out:
8782 pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n",
8783 ioc->name);
8784 }
8785
8786 /**
8787 * _scsih_mark_responding_expander - mark a expander as responding
8788 * @ioc: per adapter object
8789 * @expander_pg0:SAS Expander Config Page0
8790 *
8791 * After host reset, find out whether devices are still responding.
8792 * Used in _scsih_remove_unresponsive_expanders.
8793 */
8794 static void
_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER * ioc,Mpi2ExpanderPage0_t * expander_pg0)8795 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
8796 Mpi2ExpanderPage0_t *expander_pg0)
8797 {
8798 struct _sas_node *sas_expander = NULL;
8799 unsigned long flags;
8800 int i;
8801 struct _enclosure_node *enclosure_dev = NULL;
8802 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
8803 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
8804 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
8805
8806 if (enclosure_handle)
8807 enclosure_dev =
8808 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8809 enclosure_handle);
8810
8811 spin_lock_irqsave(&ioc->sas_node_lock, flags);
8812 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
8813 if (sas_expander->sas_address != sas_address)
8814 continue;
8815 sas_expander->responding = 1;
8816
8817 if (enclosure_dev) {
8818 sas_expander->enclosure_logical_id =
8819 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8820 sas_expander->enclosure_handle =
8821 le16_to_cpu(expander_pg0->EnclosureHandle);
8822 }
8823
8824 if (sas_expander->handle == handle)
8825 goto out;
8826 pr_info("\texpander(0x%016llx): handle changed" \
8827 " from(0x%04x) to (0x%04x)!!!\n",
8828 (unsigned long long)sas_expander->sas_address,
8829 sas_expander->handle, handle);
8830 sas_expander->handle = handle;
8831 for (i = 0 ; i < sas_expander->num_phys ; i++)
8832 sas_expander->phy[i].handle = handle;
8833 goto out;
8834 }
8835 out:
8836 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8837 }
8838
8839 /**
8840 * _scsih_search_responding_expanders -
8841 * @ioc: per adapter object
8842 *
8843 * After host reset, find out whether devices are still responding.
8844 * If not remove.
8845 */
8846 static void
_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER * ioc)8847 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
8848 {
8849 Mpi2ExpanderPage0_t expander_pg0;
8850 Mpi2ConfigReply_t mpi_reply;
8851 u16 ioc_status;
8852 u64 sas_address;
8853 u16 handle;
8854
8855 pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name);
8856
8857 if (list_empty(&ioc->sas_expander_list))
8858 goto out;
8859
8860 handle = 0xFFFF;
8861 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
8862 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
8863
8864 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8865 MPI2_IOCSTATUS_MASK;
8866 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8867 break;
8868
8869 handle = le16_to_cpu(expander_pg0.DevHandle);
8870 sas_address = le64_to_cpu(expander_pg0.SASAddress);
8871 pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
8872 handle,
8873 (unsigned long long)sas_address);
8874 _scsih_mark_responding_expander(ioc, &expander_pg0);
8875 }
8876
8877 out:
8878 pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name);
8879 }
8880
8881 /**
8882 * _scsih_remove_unresponding_devices - removing unresponding devices
8883 * @ioc: per adapter object
8884 */
8885 static void
_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER * ioc)8886 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
8887 {
8888 struct _sas_device *sas_device, *sas_device_next;
8889 struct _sas_node *sas_expander, *sas_expander_next;
8890 struct _raid_device *raid_device, *raid_device_next;
8891 struct _pcie_device *pcie_device, *pcie_device_next;
8892 struct list_head tmp_list;
8893 unsigned long flags;
8894 LIST_HEAD(head);
8895
8896 pr_info(MPT3SAS_FMT "removing unresponding devices: start\n",
8897 ioc->name);
8898
8899 /* removing unresponding end devices */
8900 pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n",
8901 ioc->name);
8902 /*
8903 * Iterate, pulling off devices marked as non-responding. We become the
8904 * owner for the reference the list had on any object we prune.
8905 */
8906 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8907 list_for_each_entry_safe(sas_device, sas_device_next,
8908 &ioc->sas_device_list, list) {
8909 if (!sas_device->responding)
8910 list_move_tail(&sas_device->list, &head);
8911 else
8912 sas_device->responding = 0;
8913 }
8914 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8915
8916 /*
8917 * Now, uninitialize and remove the unresponding devices we pruned.
8918 */
8919 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
8920 _scsih_remove_device(ioc, sas_device);
8921 list_del_init(&sas_device->list);
8922 sas_device_put(sas_device);
8923 }
8924
8925 pr_info(MPT3SAS_FMT
8926 " Removing unresponding devices: pcie end-devices\n"
8927 , ioc->name);
8928 INIT_LIST_HEAD(&head);
8929 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8930 list_for_each_entry_safe(pcie_device, pcie_device_next,
8931 &ioc->pcie_device_list, list) {
8932 if (!pcie_device->responding)
8933 list_move_tail(&pcie_device->list, &head);
8934 else
8935 pcie_device->responding = 0;
8936 }
8937 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8938
8939 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
8940 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
8941 list_del_init(&pcie_device->list);
8942 pcie_device_put(pcie_device);
8943 }
8944
8945 /* removing unresponding volumes */
8946 if (ioc->ir_firmware) {
8947 pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
8948 ioc->name);
8949 list_for_each_entry_safe(raid_device, raid_device_next,
8950 &ioc->raid_device_list, list) {
8951 if (!raid_device->responding)
8952 _scsih_sas_volume_delete(ioc,
8953 raid_device->handle);
8954 else
8955 raid_device->responding = 0;
8956 }
8957 }
8958
8959 /* removing unresponding expanders */
8960 pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n",
8961 ioc->name);
8962 spin_lock_irqsave(&ioc->sas_node_lock, flags);
8963 INIT_LIST_HEAD(&tmp_list);
8964 list_for_each_entry_safe(sas_expander, sas_expander_next,
8965 &ioc->sas_expander_list, list) {
8966 if (!sas_expander->responding)
8967 list_move_tail(&sas_expander->list, &tmp_list);
8968 else
8969 sas_expander->responding = 0;
8970 }
8971 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8972 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
8973 list) {
8974 _scsih_expander_node_remove(ioc, sas_expander);
8975 }
8976
8977 pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n",
8978 ioc->name);
8979
8980 /* unblock devices */
8981 _scsih_ublock_io_all_device(ioc);
8982 }
8983
8984 static void
_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander,u16 handle)8985 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
8986 struct _sas_node *sas_expander, u16 handle)
8987 {
8988 Mpi2ExpanderPage1_t expander_pg1;
8989 Mpi2ConfigReply_t mpi_reply;
8990 int i;
8991
8992 for (i = 0 ; i < sas_expander->num_phys ; i++) {
8993 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
8994 &expander_pg1, i, handle))) {
8995 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
8996 ioc->name, __FILE__, __LINE__, __func__);
8997 return;
8998 }
8999
9000 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
9001 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
9002 expander_pg1.NegotiatedLinkRate >> 4);
9003 }
9004 }
9005
9006 /**
9007 * _scsih_scan_for_devices_after_reset - scan for devices after host reset
9008 * @ioc: per adapter object
9009 */
9010 static void
_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER * ioc)9011 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9012 {
9013 Mpi2ExpanderPage0_t expander_pg0;
9014 Mpi2SasDevicePage0_t sas_device_pg0;
9015 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9016 Mpi2RaidVolPage1_t volume_pg1;
9017 Mpi2RaidVolPage0_t volume_pg0;
9018 Mpi2RaidPhysDiskPage0_t pd_pg0;
9019 Mpi2EventIrConfigElement_t element;
9020 Mpi2ConfigReply_t mpi_reply;
9021 u8 phys_disk_num;
9022 u16 ioc_status;
9023 u16 handle, parent_handle;
9024 u64 sas_address;
9025 struct _sas_device *sas_device;
9026 struct _pcie_device *pcie_device;
9027 struct _sas_node *expander_device;
9028 static struct _raid_device *raid_device;
9029 u8 retry_count;
9030 unsigned long flags;
9031
9032 pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name);
9033
9034 _scsih_sas_host_refresh(ioc);
9035
9036 pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name);
9037
9038 /* expanders */
9039 handle = 0xFFFF;
9040 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
9041 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
9042 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9043 MPI2_IOCSTATUS_MASK;
9044 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9045 pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
9046 "ioc_status(0x%04x), loginfo(0x%08x)\n",
9047 ioc->name, ioc_status,
9048 le32_to_cpu(mpi_reply.IOCLogInfo));
9049 break;
9050 }
9051 handle = le16_to_cpu(expander_pg0.DevHandle);
9052 spin_lock_irqsave(&ioc->sas_node_lock, flags);
9053 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
9054 ioc, le64_to_cpu(expander_pg0.SASAddress));
9055 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9056 if (expander_device)
9057 _scsih_refresh_expander_links(ioc, expander_device,
9058 handle);
9059 else {
9060 pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \
9061 "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
9062 handle, (unsigned long long)
9063 le64_to_cpu(expander_pg0.SASAddress));
9064 _scsih_expander_add(ioc, handle);
9065 pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \
9066 "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
9067 handle, (unsigned long long)
9068 le64_to_cpu(expander_pg0.SASAddress));
9069 }
9070 }
9071
9072 pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n",
9073 ioc->name);
9074
9075 if (!ioc->ir_firmware)
9076 goto skip_to_sas;
9077
9078 pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name);
9079
9080 /* phys disk */
9081 phys_disk_num = 0xFF;
9082 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9083 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9084 phys_disk_num))) {
9085 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9086 MPI2_IOCSTATUS_MASK;
9087 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9088 pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
9089 "ioc_status(0x%04x), loginfo(0x%08x)\n",
9090 ioc->name, ioc_status,
9091 le32_to_cpu(mpi_reply.IOCLogInfo));
9092 break;
9093 }
9094 phys_disk_num = pd_pg0.PhysDiskNum;
9095 handle = le16_to_cpu(pd_pg0.DevHandle);
9096 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9097 if (sas_device) {
9098 sas_device_put(sas_device);
9099 continue;
9100 }
9101 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9102 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9103 handle) != 0)
9104 continue;
9105 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9106 MPI2_IOCSTATUS_MASK;
9107 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9108 pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \
9109 "ioc_status(0x%04x), loginfo(0x%08x)\n",
9110 ioc->name, ioc_status,
9111 le32_to_cpu(mpi_reply.IOCLogInfo));
9112 break;
9113 }
9114 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9115 if (!_scsih_get_sas_address(ioc, parent_handle,
9116 &sas_address)) {
9117 pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \
9118 " handle (0x%04x), sas_addr(0x%016llx)\n",
9119 ioc->name, handle, (unsigned long long)
9120 le64_to_cpu(sas_device_pg0.SASAddress));
9121 mpt3sas_transport_update_links(ioc, sas_address,
9122 handle, sas_device_pg0.PhyNum,
9123 MPI2_SAS_NEG_LINK_RATE_1_5);
9124 set_bit(handle, ioc->pd_handles);
9125 retry_count = 0;
9126 /* This will retry adding the end device.
9127 * _scsih_add_device() will decide on retries and
9128 * return "1" when it should be retried
9129 */
9130 while (_scsih_add_device(ioc, handle, retry_count++,
9131 1)) {
9132 ssleep(1);
9133 }
9134 pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \
9135 " handle (0x%04x), sas_addr(0x%016llx)\n",
9136 ioc->name, handle, (unsigned long long)
9137 le64_to_cpu(sas_device_pg0.SASAddress));
9138 }
9139 }
9140
9141 pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n",
9142 ioc->name);
9143
9144 pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name);
9145
9146 /* volumes */
9147 handle = 0xFFFF;
9148 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9149 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9150 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9151 MPI2_IOCSTATUS_MASK;
9152 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9153 pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
9154 "ioc_status(0x%04x), loginfo(0x%08x)\n",
9155 ioc->name, ioc_status,
9156 le32_to_cpu(mpi_reply.IOCLogInfo));
9157 break;
9158 }
9159 handle = le16_to_cpu(volume_pg1.DevHandle);
9160 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9161 raid_device = _scsih_raid_device_find_by_wwid(ioc,
9162 le64_to_cpu(volume_pg1.WWID));
9163 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9164 if (raid_device)
9165 continue;
9166 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9167 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9168 sizeof(Mpi2RaidVolPage0_t)))
9169 continue;
9170 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9171 MPI2_IOCSTATUS_MASK;
9172 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9173 pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
9174 "ioc_status(0x%04x), loginfo(0x%08x)\n",
9175 ioc->name, ioc_status,
9176 le32_to_cpu(mpi_reply.IOCLogInfo));
9177 break;
9178 }
9179 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9180 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9181 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
9182 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
9183 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
9184 element.VolDevHandle = volume_pg1.DevHandle;
9185 pr_info(MPT3SAS_FMT
9186 "\tBEFORE adding volume: handle (0x%04x)\n",
9187 ioc->name, volume_pg1.DevHandle);
9188 _scsih_sas_volume_add(ioc, &element);
9189 pr_info(MPT3SAS_FMT
9190 "\tAFTER adding volume: handle (0x%04x)\n",
9191 ioc->name, volume_pg1.DevHandle);
9192 }
9193 }
9194
9195 pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n",
9196 ioc->name);
9197
9198 skip_to_sas:
9199
9200 pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n",
9201 ioc->name);
9202
9203 /* sas devices */
9204 handle = 0xFFFF;
9205 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9206 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9207 handle))) {
9208 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9209 MPI2_IOCSTATUS_MASK;
9210 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9211 pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
9212 " ioc_status(0x%04x), loginfo(0x%08x)\n",
9213 ioc->name, ioc_status,
9214 le32_to_cpu(mpi_reply.IOCLogInfo));
9215 break;
9216 }
9217 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9218 if (!(_scsih_is_end_device(
9219 le32_to_cpu(sas_device_pg0.DeviceInfo))))
9220 continue;
9221 sas_device = mpt3sas_get_sdev_by_addr(ioc,
9222 le64_to_cpu(sas_device_pg0.SASAddress));
9223 if (sas_device) {
9224 sas_device_put(sas_device);
9225 continue;
9226 }
9227 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9228 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
9229 pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \
9230 "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
9231 handle, (unsigned long long)
9232 le64_to_cpu(sas_device_pg0.SASAddress));
9233 mpt3sas_transport_update_links(ioc, sas_address, handle,
9234 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
9235 retry_count = 0;
9236 /* This will retry adding the end device.
9237 * _scsih_add_device() will decide on retries and
9238 * return "1" when it should be retried
9239 */
9240 while (_scsih_add_device(ioc, handle, retry_count++,
9241 0)) {
9242 ssleep(1);
9243 }
9244 pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \
9245 "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
9246 handle, (unsigned long long)
9247 le64_to_cpu(sas_device_pg0.SASAddress));
9248 }
9249 }
9250 pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
9251 ioc->name);
9252 pr_info(MPT3SAS_FMT "\tscan devices: pcie end devices start\n",
9253 ioc->name);
9254
9255 /* pcie devices */
9256 handle = 0xFFFF;
9257 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9258 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9259 handle))) {
9260 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
9261 & MPI2_IOCSTATUS_MASK;
9262 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9263 pr_info(MPT3SAS_FMT "\tbreak from pcie end device"
9264 " scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9265 ioc->name, ioc_status,
9266 le32_to_cpu(mpi_reply.IOCLogInfo));
9267 break;
9268 }
9269 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9270 if (!(_scsih_is_nvme_device(
9271 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
9272 continue;
9273 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
9274 le64_to_cpu(pcie_device_pg0.WWID));
9275 if (pcie_device) {
9276 pcie_device_put(pcie_device);
9277 continue;
9278 }
9279 retry_count = 0;
9280 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
9281 _scsih_pcie_add_device(ioc, handle);
9282
9283 pr_info(MPT3SAS_FMT "\tAFTER adding pcie end device: "
9284 "handle (0x%04x), wwid(0x%016llx)\n", ioc->name,
9285 handle,
9286 (unsigned long long) le64_to_cpu(pcie_device_pg0.WWID));
9287 }
9288 pr_info(MPT3SAS_FMT "\tpcie devices: pcie end devices complete\n",
9289 ioc->name);
9290 pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
9291 }
9292
9293 /**
9294 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9295 * @ioc: per adapter object
9296 *
9297 * The handler for doing any required cleanup or initialization.
9298 */
mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER * ioc)9299 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9300 {
9301 dtmprintk(ioc, pr_info(MPT3SAS_FMT
9302 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
9303 }
9304
9305 /**
9306 * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
9307 * @ioc: per adapter object
9308 *
9309 * The handler for doing any required cleanup or initialization.
9310 */
9311 void
mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER * ioc)9312 mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9313 {
9314 dtmprintk(ioc, pr_info(MPT3SAS_FMT
9315 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
9316 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
9317 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
9318 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
9319 complete(&ioc->scsih_cmds.done);
9320 }
9321 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
9322 ioc->tm_cmds.status |= MPT3_CMD_RESET;
9323 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
9324 complete(&ioc->tm_cmds.done);
9325 }
9326
9327 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
9328 memset(ioc->device_remove_in_progress, 0,
9329 ioc->device_remove_in_progress_sz);
9330 _scsih_fw_event_cleanup_queue(ioc);
9331 _scsih_flush_running_cmds(ioc);
9332 }
9333
9334 /**
9335 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9336 * @ioc: per adapter object
9337 *
9338 * The handler for doing any required cleanup or initialization.
9339 */
9340 void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER * ioc)9341 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
9342 {
9343 dtmprintk(ioc, pr_info(MPT3SAS_FMT
9344 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
9345 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
9346 !ioc->sas_hba.num_phys)) {
9347 _scsih_prep_device_scan(ioc);
9348 _scsih_create_enclosure_list_after_reset(ioc);
9349 _scsih_search_responding_sas_devices(ioc);
9350 _scsih_search_responding_pcie_devices(ioc);
9351 _scsih_search_responding_raid_devices(ioc);
9352 _scsih_search_responding_expanders(ioc);
9353 _scsih_error_recovery_delete_devices(ioc);
9354 }
9355 }
9356
9357 /**
9358 * _mpt3sas_fw_work - delayed task for processing firmware events
9359 * @ioc: per adapter object
9360 * @fw_event: The fw_event_work object
9361 * Context: user.
9362 */
9363 static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9364 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9365 {
9366 _scsih_fw_event_del_from_list(ioc, fw_event);
9367
9368 /* the queue is being flushed so ignore this event */
9369 if (ioc->remove_host || ioc->pci_error_recovery) {
9370 fw_event_work_put(fw_event);
9371 return;
9372 }
9373
9374 switch (fw_event->event) {
9375 case MPT3SAS_PROCESS_TRIGGER_DIAG:
9376 mpt3sas_process_trigger_data(ioc,
9377 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
9378 fw_event->event_data);
9379 break;
9380 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
9381 while (scsi_host_in_recovery(ioc->shost) ||
9382 ioc->shost_recovery) {
9383 /*
9384 * If we're unloading, bail. Otherwise, this can become
9385 * an infinite loop.
9386 */
9387 if (ioc->remove_host)
9388 goto out;
9389 ssleep(1);
9390 }
9391 _scsih_remove_unresponding_devices(ioc);
9392 _scsih_scan_for_devices_after_reset(ioc);
9393 break;
9394 case MPT3SAS_PORT_ENABLE_COMPLETE:
9395 ioc->start_scan = 0;
9396 if (missing_delay[0] != -1 && missing_delay[1] != -1)
9397 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
9398 missing_delay[1]);
9399 dewtprintk(ioc, pr_info(MPT3SAS_FMT
9400 "port enable: complete from worker thread\n",
9401 ioc->name));
9402 break;
9403 case MPT3SAS_TURN_ON_PFA_LED:
9404 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
9405 break;
9406 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9407 _scsih_sas_topology_change_event(ioc, fw_event);
9408 break;
9409 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9410 _scsih_sas_device_status_change_event(ioc, fw_event);
9411 break;
9412 case MPI2_EVENT_SAS_DISCOVERY:
9413 _scsih_sas_discovery_event(ioc, fw_event);
9414 break;
9415 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9416 _scsih_sas_device_discovery_error_event(ioc, fw_event);
9417 break;
9418 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9419 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
9420 break;
9421 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9422 _scsih_sas_enclosure_dev_status_change_event(ioc,
9423 fw_event);
9424 break;
9425 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9426 _scsih_sas_ir_config_change_event(ioc, fw_event);
9427 break;
9428 case MPI2_EVENT_IR_VOLUME:
9429 _scsih_sas_ir_volume_event(ioc, fw_event);
9430 break;
9431 case MPI2_EVENT_IR_PHYSICAL_DISK:
9432 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
9433 break;
9434 case MPI2_EVENT_IR_OPERATION_STATUS:
9435 _scsih_sas_ir_operation_status_event(ioc, fw_event);
9436 break;
9437 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9438 _scsih_pcie_device_status_change_event(ioc, fw_event);
9439 break;
9440 case MPI2_EVENT_PCIE_ENUMERATION:
9441 _scsih_pcie_enumeration_event(ioc, fw_event);
9442 break;
9443 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9444 _scsih_pcie_topology_change_event(ioc, fw_event);
9445 return;
9446 break;
9447 }
9448 out:
9449 fw_event_work_put(fw_event);
9450 }
9451
9452 /**
9453 * _firmware_event_work
9454 * @work: The fw_event_work object
9455 * Context: user.
9456 *
9457 * wrappers for the work thread handling firmware events
9458 */
9459
9460 static void
_firmware_event_work(struct work_struct * work)9461 _firmware_event_work(struct work_struct *work)
9462 {
9463 struct fw_event_work *fw_event = container_of(work,
9464 struct fw_event_work, work);
9465
9466 _mpt3sas_fw_work(fw_event->ioc, fw_event);
9467 }
9468
9469 /**
9470 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
9471 * @ioc: per adapter object
9472 * @msix_index: MSIX table index supplied by the OS
9473 * @reply: reply message frame(lower 32bit addr)
9474 * Context: interrupt.
9475 *
9476 * This function merely adds a new work task into ioc->firmware_event_thread.
9477 * The tasks are worked from _firmware_event_work in user context.
9478 *
9479 * Return: 1 meaning mf should be freed from _base_interrupt
9480 * 0 means the mf is freed from this function.
9481 */
9482 u8
mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER * ioc,u8 msix_index,u32 reply)9483 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9484 u32 reply)
9485 {
9486 struct fw_event_work *fw_event;
9487 Mpi2EventNotificationReply_t *mpi_reply;
9488 u16 event;
9489 u16 sz;
9490 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
9491
9492 /* events turned off due to host reset */
9493 if (ioc->pci_error_recovery)
9494 return 1;
9495
9496 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
9497
9498 if (unlikely(!mpi_reply)) {
9499 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
9500 ioc->name, __FILE__, __LINE__, __func__);
9501 return 1;
9502 }
9503
9504 event = le16_to_cpu(mpi_reply->Event);
9505
9506 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
9507 mpt3sas_trigger_event(ioc, event, 0);
9508
9509 switch (event) {
9510 /* handle these */
9511 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9512 {
9513 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
9514 (Mpi2EventDataSasBroadcastPrimitive_t *)
9515 mpi_reply->EventData;
9516
9517 if (baen_data->Primitive !=
9518 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
9519 return 1;
9520
9521 if (ioc->broadcast_aen_busy) {
9522 ioc->broadcast_aen_pending++;
9523 return 1;
9524 } else
9525 ioc->broadcast_aen_busy = 1;
9526 break;
9527 }
9528
9529 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9530 _scsih_check_topo_delete_events(ioc,
9531 (Mpi2EventDataSasTopologyChangeList_t *)
9532 mpi_reply->EventData);
9533 break;
9534 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9535 _scsih_check_pcie_topo_remove_events(ioc,
9536 (Mpi26EventDataPCIeTopologyChangeList_t *)
9537 mpi_reply->EventData);
9538 break;
9539 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9540 _scsih_check_ir_config_unhide_events(ioc,
9541 (Mpi2EventDataIrConfigChangeList_t *)
9542 mpi_reply->EventData);
9543 break;
9544 case MPI2_EVENT_IR_VOLUME:
9545 _scsih_check_volume_delete_events(ioc,
9546 (Mpi2EventDataIrVolume_t *)
9547 mpi_reply->EventData);
9548 break;
9549 case MPI2_EVENT_LOG_ENTRY_ADDED:
9550 {
9551 Mpi2EventDataLogEntryAdded_t *log_entry;
9552 u32 *log_code;
9553
9554 if (!ioc->is_warpdrive)
9555 break;
9556
9557 log_entry = (Mpi2EventDataLogEntryAdded_t *)
9558 mpi_reply->EventData;
9559 log_code = (u32 *)log_entry->LogData;
9560
9561 if (le16_to_cpu(log_entry->LogEntryQualifier)
9562 != MPT2_WARPDRIVE_LOGENTRY)
9563 break;
9564
9565 switch (le32_to_cpu(*log_code)) {
9566 case MPT2_WARPDRIVE_LC_SSDT:
9567 pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
9568 "IO Throttling has occurred in the WarpDrive "
9569 "subsystem. Check WarpDrive documentation for "
9570 "additional details.\n", ioc->name);
9571 break;
9572 case MPT2_WARPDRIVE_LC_SSDLW:
9573 pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
9574 "Program/Erase Cycles for the WarpDrive subsystem "
9575 "in degraded range. Check WarpDrive documentation "
9576 "for additional details.\n", ioc->name);
9577 break;
9578 case MPT2_WARPDRIVE_LC_SSDLF:
9579 pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
9580 "There are no Program/Erase Cycles for the "
9581 "WarpDrive subsystem. The storage device will be "
9582 "in read-only mode. Check WarpDrive documentation "
9583 "for additional details.\n", ioc->name);
9584 break;
9585 case MPT2_WARPDRIVE_LC_BRMF:
9586 pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
9587 "The Backup Rail Monitor has failed on the "
9588 "WarpDrive subsystem. Check WarpDrive "
9589 "documentation for additional details.\n",
9590 ioc->name);
9591 break;
9592 }
9593
9594 break;
9595 }
9596 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9597 case MPI2_EVENT_IR_OPERATION_STATUS:
9598 case MPI2_EVENT_SAS_DISCOVERY:
9599 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9600 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9601 case MPI2_EVENT_IR_PHYSICAL_DISK:
9602 case MPI2_EVENT_PCIE_ENUMERATION:
9603 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9604 break;
9605
9606 case MPI2_EVENT_TEMP_THRESHOLD:
9607 _scsih_temp_threshold_events(ioc,
9608 (Mpi2EventDataTemperature_t *)
9609 mpi_reply->EventData);
9610 break;
9611 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
9612 ActiveCableEventData =
9613 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
9614 switch (ActiveCableEventData->ReasonCode) {
9615 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
9616 pr_notice(MPT3SAS_FMT
9617 "Currently an active cable with ReceptacleID %d\n",
9618 ioc->name, ActiveCableEventData->ReceptacleID);
9619 pr_notice("cannot be powered and devices connected\n");
9620 pr_notice("to this active cable will not be seen\n");
9621 pr_notice("This active cable requires %d mW of power\n",
9622 ActiveCableEventData->ActiveCablePowerRequirement);
9623 break;
9624
9625 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
9626 pr_notice(MPT3SAS_FMT
9627 "Currently a cable with ReceptacleID %d\n",
9628 ioc->name, ActiveCableEventData->ReceptacleID);
9629 pr_notice(
9630 "is not running at optimal speed(12 Gb/s rate)\n");
9631 break;
9632 }
9633
9634 break;
9635
9636 default: /* ignore the rest */
9637 return 1;
9638 }
9639
9640 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
9641 fw_event = alloc_fw_event_work(sz);
9642 if (!fw_event) {
9643 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
9644 ioc->name, __FILE__, __LINE__, __func__);
9645 return 1;
9646 }
9647
9648 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
9649 fw_event->ioc = ioc;
9650 fw_event->VF_ID = mpi_reply->VF_ID;
9651 fw_event->VP_ID = mpi_reply->VP_ID;
9652 fw_event->event = event;
9653 _scsih_fw_event_add(ioc, fw_event);
9654 fw_event_work_put(fw_event);
9655 return 1;
9656 }
9657
9658 /**
9659 * _scsih_expander_node_remove - removing expander device from list.
9660 * @ioc: per adapter object
9661 * @sas_expander: the sas_device object
9662 *
9663 * Removing object and freeing associated memory from the
9664 * ioc->sas_expander_list.
9665 */
9666 static void
_scsih_expander_node_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)9667 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
9668 struct _sas_node *sas_expander)
9669 {
9670 struct _sas_port *mpt3sas_port, *next;
9671 unsigned long flags;
9672
9673 /* remove sibling ports attached to this expander */
9674 list_for_each_entry_safe(mpt3sas_port, next,
9675 &sas_expander->sas_port_list, port_list) {
9676 if (ioc->shost_recovery)
9677 return;
9678 if (mpt3sas_port->remote_identify.device_type ==
9679 SAS_END_DEVICE)
9680 mpt3sas_device_remove_by_sas_address(ioc,
9681 mpt3sas_port->remote_identify.sas_address);
9682 else if (mpt3sas_port->remote_identify.device_type ==
9683 SAS_EDGE_EXPANDER_DEVICE ||
9684 mpt3sas_port->remote_identify.device_type ==
9685 SAS_FANOUT_EXPANDER_DEVICE)
9686 mpt3sas_expander_remove(ioc,
9687 mpt3sas_port->remote_identify.sas_address);
9688 }
9689
9690 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
9691 sas_expander->sas_address_parent);
9692
9693 pr_info(MPT3SAS_FMT
9694 "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
9695 ioc->name,
9696 sas_expander->handle, (unsigned long long)
9697 sas_expander->sas_address);
9698
9699 spin_lock_irqsave(&ioc->sas_node_lock, flags);
9700 list_del(&sas_expander->list);
9701 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9702
9703 kfree(sas_expander->phy);
9704 kfree(sas_expander);
9705 }
9706
9707 /**
9708 * _scsih_ir_shutdown - IR shutdown notification
9709 * @ioc: per adapter object
9710 *
9711 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
9712 * the host system is shutting down.
9713 */
9714 static void
_scsih_ir_shutdown(struct MPT3SAS_ADAPTER * ioc)9715 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
9716 {
9717 Mpi2RaidActionRequest_t *mpi_request;
9718 Mpi2RaidActionReply_t *mpi_reply;
9719 u16 smid;
9720
9721 /* is IR firmware build loaded ? */
9722 if (!ioc->ir_firmware)
9723 return;
9724
9725 /* are there any volumes ? */
9726 if (list_empty(&ioc->raid_device_list))
9727 return;
9728
9729 mutex_lock(&ioc->scsih_cmds.mutex);
9730
9731 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
9732 pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
9733 ioc->name, __func__);
9734 goto out;
9735 }
9736 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
9737
9738 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
9739 if (!smid) {
9740 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
9741 ioc->name, __func__);
9742 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9743 goto out;
9744 }
9745
9746 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
9747 ioc->scsih_cmds.smid = smid;
9748 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
9749
9750 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
9751 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
9752
9753 if (!ioc->hide_ir_msg)
9754 pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
9755 init_completion(&ioc->scsih_cmds.done);
9756 mpt3sas_base_put_smid_default(ioc, smid);
9757 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
9758
9759 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
9760 pr_err(MPT3SAS_FMT "%s: timeout\n",
9761 ioc->name, __func__);
9762 goto out;
9763 }
9764
9765 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
9766 mpi_reply = ioc->scsih_cmds.reply;
9767 if (!ioc->hide_ir_msg)
9768 pr_info(MPT3SAS_FMT "IR shutdown "
9769 "(complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
9770 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
9771 le32_to_cpu(mpi_reply->IOCLogInfo));
9772 }
9773
9774 out:
9775 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9776 mutex_unlock(&ioc->scsih_cmds.mutex);
9777 }
9778
9779 /**
9780 * scsih_remove - detach and remove add host
9781 * @pdev: PCI device struct
9782 *
9783 * Routine called when unloading the driver.
9784 */
scsih_remove(struct pci_dev * pdev)9785 static void scsih_remove(struct pci_dev *pdev)
9786 {
9787 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9788 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9789 struct _sas_port *mpt3sas_port, *next_port;
9790 struct _raid_device *raid_device, *next;
9791 struct MPT3SAS_TARGET *sas_target_priv_data;
9792 struct _pcie_device *pcie_device, *pcienext;
9793 struct workqueue_struct *wq;
9794 unsigned long flags;
9795
9796 ioc->remove_host = 1;
9797
9798 mpt3sas_wait_for_commands_to_complete(ioc);
9799 _scsih_flush_running_cmds(ioc);
9800
9801 _scsih_fw_event_cleanup_queue(ioc);
9802
9803 spin_lock_irqsave(&ioc->fw_event_lock, flags);
9804 wq = ioc->firmware_event_thread;
9805 ioc->firmware_event_thread = NULL;
9806 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9807 if (wq)
9808 destroy_workqueue(wq);
9809
9810 /* release all the volumes */
9811 _scsih_ir_shutdown(ioc);
9812 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
9813 list) {
9814 if (raid_device->starget) {
9815 sas_target_priv_data =
9816 raid_device->starget->hostdata;
9817 sas_target_priv_data->deleted = 1;
9818 scsi_remove_target(&raid_device->starget->dev);
9819 }
9820 pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
9821 ioc->name, raid_device->handle,
9822 (unsigned long long) raid_device->wwid);
9823 _scsih_raid_device_remove(ioc, raid_device);
9824 }
9825 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
9826 list) {
9827 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9828 list_del_init(&pcie_device->list);
9829 pcie_device_put(pcie_device);
9830 }
9831
9832 /* free ports attached to the sas_host */
9833 list_for_each_entry_safe(mpt3sas_port, next_port,
9834 &ioc->sas_hba.sas_port_list, port_list) {
9835 if (mpt3sas_port->remote_identify.device_type ==
9836 SAS_END_DEVICE)
9837 mpt3sas_device_remove_by_sas_address(ioc,
9838 mpt3sas_port->remote_identify.sas_address);
9839 else if (mpt3sas_port->remote_identify.device_type ==
9840 SAS_EDGE_EXPANDER_DEVICE ||
9841 mpt3sas_port->remote_identify.device_type ==
9842 SAS_FANOUT_EXPANDER_DEVICE)
9843 mpt3sas_expander_remove(ioc,
9844 mpt3sas_port->remote_identify.sas_address);
9845 }
9846
9847 /* free phys attached to the sas_host */
9848 if (ioc->sas_hba.num_phys) {
9849 kfree(ioc->sas_hba.phy);
9850 ioc->sas_hba.phy = NULL;
9851 ioc->sas_hba.num_phys = 0;
9852 }
9853
9854 sas_remove_host(shost);
9855 mpt3sas_base_detach(ioc);
9856 spin_lock(&gioc_lock);
9857 list_del(&ioc->list);
9858 spin_unlock(&gioc_lock);
9859 scsi_host_put(shost);
9860 }
9861
9862 /**
9863 * scsih_shutdown - routine call during system shutdown
9864 * @pdev: PCI device struct
9865 */
9866 static void
scsih_shutdown(struct pci_dev * pdev)9867 scsih_shutdown(struct pci_dev *pdev)
9868 {
9869 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9870 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9871 struct workqueue_struct *wq;
9872 unsigned long flags;
9873
9874 ioc->remove_host = 1;
9875
9876 mpt3sas_wait_for_commands_to_complete(ioc);
9877 _scsih_flush_running_cmds(ioc);
9878
9879 _scsih_fw_event_cleanup_queue(ioc);
9880
9881 spin_lock_irqsave(&ioc->fw_event_lock, flags);
9882 wq = ioc->firmware_event_thread;
9883 ioc->firmware_event_thread = NULL;
9884 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9885 if (wq)
9886 destroy_workqueue(wq);
9887
9888 _scsih_ir_shutdown(ioc);
9889 mpt3sas_base_detach(ioc);
9890 }
9891
9892
9893 /**
9894 * _scsih_probe_boot_devices - reports 1st device
9895 * @ioc: per adapter object
9896 *
9897 * If specified in bios page 2, this routine reports the 1st
9898 * device scsi-ml or sas transport for persistent boot device
9899 * purposes. Please refer to function _scsih_determine_boot_device()
9900 */
9901 static void
_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER * ioc)9902 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
9903 {
9904 u32 channel;
9905 void *device;
9906 struct _sas_device *sas_device;
9907 struct _raid_device *raid_device;
9908 struct _pcie_device *pcie_device;
9909 u16 handle;
9910 u64 sas_address_parent;
9911 u64 sas_address;
9912 unsigned long flags;
9913 int rc;
9914 int tid;
9915
9916 /* no Bios, return immediately */
9917 if (!ioc->bios_pg3.BiosVersion)
9918 return;
9919
9920 device = NULL;
9921 if (ioc->req_boot_device.device) {
9922 device = ioc->req_boot_device.device;
9923 channel = ioc->req_boot_device.channel;
9924 } else if (ioc->req_alt_boot_device.device) {
9925 device = ioc->req_alt_boot_device.device;
9926 channel = ioc->req_alt_boot_device.channel;
9927 } else if (ioc->current_boot_device.device) {
9928 device = ioc->current_boot_device.device;
9929 channel = ioc->current_boot_device.channel;
9930 }
9931
9932 if (!device)
9933 return;
9934
9935 if (channel == RAID_CHANNEL) {
9936 raid_device = device;
9937 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9938 raid_device->id, 0);
9939 if (rc)
9940 _scsih_raid_device_remove(ioc, raid_device);
9941 } else if (channel == PCIE_CHANNEL) {
9942 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9943 pcie_device = device;
9944 tid = pcie_device->id;
9945 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
9946 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9947 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
9948 if (rc)
9949 _scsih_pcie_device_remove(ioc, pcie_device);
9950 } else {
9951 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9952 sas_device = device;
9953 handle = sas_device->handle;
9954 sas_address_parent = sas_device->sas_address_parent;
9955 sas_address = sas_device->sas_address;
9956 list_move_tail(&sas_device->list, &ioc->sas_device_list);
9957 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9958
9959 if (ioc->hide_drives)
9960 return;
9961 if (!mpt3sas_transport_port_add(ioc, handle,
9962 sas_address_parent)) {
9963 _scsih_sas_device_remove(ioc, sas_device);
9964 } else if (!sas_device->starget) {
9965 if (!ioc->is_driver_loading) {
9966 mpt3sas_transport_port_remove(ioc,
9967 sas_address,
9968 sas_address_parent);
9969 _scsih_sas_device_remove(ioc, sas_device);
9970 }
9971 }
9972 }
9973 }
9974
9975 /**
9976 * _scsih_probe_raid - reporting raid volumes to scsi-ml
9977 * @ioc: per adapter object
9978 *
9979 * Called during initial loading of the driver.
9980 */
9981 static void
_scsih_probe_raid(struct MPT3SAS_ADAPTER * ioc)9982 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
9983 {
9984 struct _raid_device *raid_device, *raid_next;
9985 int rc;
9986
9987 list_for_each_entry_safe(raid_device, raid_next,
9988 &ioc->raid_device_list, list) {
9989 if (raid_device->starget)
9990 continue;
9991 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9992 raid_device->id, 0);
9993 if (rc)
9994 _scsih_raid_device_remove(ioc, raid_device);
9995 }
9996 }
9997
get_next_sas_device(struct MPT3SAS_ADAPTER * ioc)9998 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
9999 {
10000 struct _sas_device *sas_device = NULL;
10001 unsigned long flags;
10002
10003 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10004 if (!list_empty(&ioc->sas_device_init_list)) {
10005 sas_device = list_first_entry(&ioc->sas_device_init_list,
10006 struct _sas_device, list);
10007 sas_device_get(sas_device);
10008 }
10009 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10010
10011 return sas_device;
10012 }
10013
sas_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)10014 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10015 struct _sas_device *sas_device)
10016 {
10017 unsigned long flags;
10018
10019 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10020
10021 /*
10022 * Since we dropped the lock during the call to port_add(), we need to
10023 * be careful here that somebody else didn't move or delete this item
10024 * while we were busy with other things.
10025 *
10026 * If it was on the list, we need a put() for the reference the list
10027 * had. Either way, we need a get() for the destination list.
10028 */
10029 if (!list_empty(&sas_device->list)) {
10030 list_del_init(&sas_device->list);
10031 sas_device_put(sas_device);
10032 }
10033
10034 sas_device_get(sas_device);
10035 list_add_tail(&sas_device->list, &ioc->sas_device_list);
10036
10037 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10038 }
10039
10040 /**
10041 * _scsih_probe_sas - reporting sas devices to sas transport
10042 * @ioc: per adapter object
10043 *
10044 * Called during initial loading of the driver.
10045 */
10046 static void
_scsih_probe_sas(struct MPT3SAS_ADAPTER * ioc)10047 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
10048 {
10049 struct _sas_device *sas_device;
10050
10051 if (ioc->hide_drives)
10052 return;
10053
10054 while ((sas_device = get_next_sas_device(ioc))) {
10055 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
10056 sas_device->sas_address_parent)) {
10057 _scsih_sas_device_remove(ioc, sas_device);
10058 sas_device_put(sas_device);
10059 continue;
10060 } else if (!sas_device->starget) {
10061 /*
10062 * When asyn scanning is enabled, its not possible to
10063 * remove devices while scanning is turned on due to an
10064 * oops in scsi_sysfs_add_sdev()->add_device()->
10065 * sysfs_addrm_start()
10066 */
10067 if (!ioc->is_driver_loading) {
10068 mpt3sas_transport_port_remove(ioc,
10069 sas_device->sas_address,
10070 sas_device->sas_address_parent);
10071 _scsih_sas_device_remove(ioc, sas_device);
10072 sas_device_put(sas_device);
10073 continue;
10074 }
10075 }
10076 sas_device_make_active(ioc, sas_device);
10077 sas_device_put(sas_device);
10078 }
10079 }
10080
10081 /**
10082 * get_next_pcie_device - Get the next pcie device
10083 * @ioc: per adapter object
10084 *
10085 * Get the next pcie device from pcie_device_init_list list.
10086 *
10087 * Return: pcie device structure if pcie_device_init_list list is not empty
10088 * otherwise returns NULL
10089 */
get_next_pcie_device(struct MPT3SAS_ADAPTER * ioc)10090 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
10091 {
10092 struct _pcie_device *pcie_device = NULL;
10093 unsigned long flags;
10094
10095 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10096 if (!list_empty(&ioc->pcie_device_init_list)) {
10097 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
10098 struct _pcie_device, list);
10099 pcie_device_get(pcie_device);
10100 }
10101 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10102
10103 return pcie_device;
10104 }
10105
10106 /**
10107 * pcie_device_make_active - Add pcie device to pcie_device_list list
10108 * @ioc: per adapter object
10109 * @pcie_device: pcie device object
10110 *
10111 * Add the pcie device which has registered with SCSI Transport Later to
10112 * pcie_device_list list
10113 */
pcie_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)10114 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10115 struct _pcie_device *pcie_device)
10116 {
10117 unsigned long flags;
10118
10119 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10120
10121 if (!list_empty(&pcie_device->list)) {
10122 list_del_init(&pcie_device->list);
10123 pcie_device_put(pcie_device);
10124 }
10125 pcie_device_get(pcie_device);
10126 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
10127
10128 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10129 }
10130
10131 /**
10132 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
10133 * @ioc: per adapter object
10134 *
10135 * Called during initial loading of the driver.
10136 */
10137 static void
_scsih_probe_pcie(struct MPT3SAS_ADAPTER * ioc)10138 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
10139 {
10140 struct _pcie_device *pcie_device;
10141 int rc;
10142
10143 /* PCIe Device List */
10144 while ((pcie_device = get_next_pcie_device(ioc))) {
10145 if (pcie_device->starget) {
10146 pcie_device_put(pcie_device);
10147 continue;
10148 }
10149 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
10150 pcie_device->id, 0);
10151 if (rc) {
10152 _scsih_pcie_device_remove(ioc, pcie_device);
10153 pcie_device_put(pcie_device);
10154 continue;
10155 } else if (!pcie_device->starget) {
10156 /*
10157 * When async scanning is enabled, its not possible to
10158 * remove devices while scanning is turned on due to an
10159 * oops in scsi_sysfs_add_sdev()->add_device()->
10160 * sysfs_addrm_start()
10161 */
10162 if (!ioc->is_driver_loading) {
10163 /* TODO-- Need to find out whether this condition will
10164 * occur or not
10165 */
10166 _scsih_pcie_device_remove(ioc, pcie_device);
10167 pcie_device_put(pcie_device);
10168 continue;
10169 }
10170 }
10171 pcie_device_make_active(ioc, pcie_device);
10172 pcie_device_put(pcie_device);
10173 }
10174 }
10175
10176 /**
10177 * _scsih_probe_devices - probing for devices
10178 * @ioc: per adapter object
10179 *
10180 * Called during initial loading of the driver.
10181 */
10182 static void
_scsih_probe_devices(struct MPT3SAS_ADAPTER * ioc)10183 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
10184 {
10185 u16 volume_mapping_flags;
10186
10187 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
10188 return; /* return when IOC doesn't support initiator mode */
10189
10190 _scsih_probe_boot_devices(ioc);
10191
10192 if (ioc->ir_firmware) {
10193 volume_mapping_flags =
10194 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
10195 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
10196 if (volume_mapping_flags ==
10197 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
10198 _scsih_probe_raid(ioc);
10199 _scsih_probe_sas(ioc);
10200 } else {
10201 _scsih_probe_sas(ioc);
10202 _scsih_probe_raid(ioc);
10203 }
10204 } else {
10205 _scsih_probe_sas(ioc);
10206 _scsih_probe_pcie(ioc);
10207 }
10208 }
10209
10210 /**
10211 * scsih_scan_start - scsi lld callback for .scan_start
10212 * @shost: SCSI host pointer
10213 *
10214 * The shost has the ability to discover targets on its own instead
10215 * of scanning the entire bus. In our implemention, we will kick off
10216 * firmware discovery.
10217 */
10218 static void
scsih_scan_start(struct Scsi_Host * shost)10219 scsih_scan_start(struct Scsi_Host *shost)
10220 {
10221 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10222 int rc;
10223 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
10224 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
10225
10226 if (disable_discovery > 0)
10227 return;
10228
10229 ioc->start_scan = 1;
10230 rc = mpt3sas_port_enable(ioc);
10231
10232 if (rc != 0)
10233 pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name);
10234 }
10235
10236 /**
10237 * scsih_scan_finished - scsi lld callback for .scan_finished
10238 * @shost: SCSI host pointer
10239 * @time: elapsed time of the scan in jiffies
10240 *
10241 * This function will be called periodicallyn until it returns 1 with the
10242 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
10243 * we wait for firmware discovery to complete, then return 1.
10244 */
10245 static int
scsih_scan_finished(struct Scsi_Host * shost,unsigned long time)10246 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
10247 {
10248 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10249
10250 if (disable_discovery > 0) {
10251 ioc->is_driver_loading = 0;
10252 ioc->wait_for_discovery_to_complete = 0;
10253 return 1;
10254 }
10255
10256 if (time >= (300 * HZ)) {
10257 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10258 pr_info(MPT3SAS_FMT
10259 "port enable: FAILED with timeout (timeout=300s)\n",
10260 ioc->name);
10261 ioc->is_driver_loading = 0;
10262 return 1;
10263 }
10264
10265 if (ioc->start_scan)
10266 return 0;
10267
10268 if (ioc->start_scan_failed) {
10269 pr_info(MPT3SAS_FMT
10270 "port enable: FAILED with (ioc_status=0x%08x)\n",
10271 ioc->name, ioc->start_scan_failed);
10272 ioc->is_driver_loading = 0;
10273 ioc->wait_for_discovery_to_complete = 0;
10274 ioc->remove_host = 1;
10275 return 1;
10276 }
10277
10278 pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
10279 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10280
10281 if (ioc->wait_for_discovery_to_complete) {
10282 ioc->wait_for_discovery_to_complete = 0;
10283 _scsih_probe_devices(ioc);
10284 }
10285 mpt3sas_base_start_watchdog(ioc);
10286 ioc->is_driver_loading = 0;
10287 return 1;
10288 }
10289
10290 /* shost template for SAS 2.0 HBA devices */
10291 static struct scsi_host_template mpt2sas_driver_template = {
10292 .module = THIS_MODULE,
10293 .name = "Fusion MPT SAS Host",
10294 .proc_name = MPT2SAS_DRIVER_NAME,
10295 .queuecommand = scsih_qcmd,
10296 .target_alloc = scsih_target_alloc,
10297 .slave_alloc = scsih_slave_alloc,
10298 .slave_configure = scsih_slave_configure,
10299 .target_destroy = scsih_target_destroy,
10300 .slave_destroy = scsih_slave_destroy,
10301 .scan_finished = scsih_scan_finished,
10302 .scan_start = scsih_scan_start,
10303 .change_queue_depth = scsih_change_queue_depth,
10304 .eh_abort_handler = scsih_abort,
10305 .eh_device_reset_handler = scsih_dev_reset,
10306 .eh_target_reset_handler = scsih_target_reset,
10307 .eh_host_reset_handler = scsih_host_reset,
10308 .bios_param = scsih_bios_param,
10309 .can_queue = 1,
10310 .this_id = -1,
10311 .sg_tablesize = MPT2SAS_SG_DEPTH,
10312 .max_sectors = 32767,
10313 .cmd_per_lun = 7,
10314 .use_clustering = ENABLE_CLUSTERING,
10315 .shost_attrs = mpt3sas_host_attrs,
10316 .sdev_attrs = mpt3sas_dev_attrs,
10317 .track_queue_depth = 1,
10318 .cmd_size = sizeof(struct scsiio_tracker),
10319 };
10320
10321 /* raid transport support for SAS 2.0 HBA devices */
10322 static struct raid_function_template mpt2sas_raid_functions = {
10323 .cookie = &mpt2sas_driver_template,
10324 .is_raid = scsih_is_raid,
10325 .get_resync = scsih_get_resync,
10326 .get_state = scsih_get_state,
10327 };
10328
10329 /* shost template for SAS 3.0 HBA devices */
10330 static struct scsi_host_template mpt3sas_driver_template = {
10331 .module = THIS_MODULE,
10332 .name = "Fusion MPT SAS Host",
10333 .proc_name = MPT3SAS_DRIVER_NAME,
10334 .queuecommand = scsih_qcmd,
10335 .target_alloc = scsih_target_alloc,
10336 .slave_alloc = scsih_slave_alloc,
10337 .slave_configure = scsih_slave_configure,
10338 .target_destroy = scsih_target_destroy,
10339 .slave_destroy = scsih_slave_destroy,
10340 .scan_finished = scsih_scan_finished,
10341 .scan_start = scsih_scan_start,
10342 .change_queue_depth = scsih_change_queue_depth,
10343 .eh_abort_handler = scsih_abort,
10344 .eh_device_reset_handler = scsih_dev_reset,
10345 .eh_target_reset_handler = scsih_target_reset,
10346 .eh_host_reset_handler = scsih_host_reset,
10347 .bios_param = scsih_bios_param,
10348 .can_queue = 1,
10349 .this_id = -1,
10350 .sg_tablesize = MPT3SAS_SG_DEPTH,
10351 .max_sectors = 32767,
10352 .cmd_per_lun = 7,
10353 .use_clustering = ENABLE_CLUSTERING,
10354 .shost_attrs = mpt3sas_host_attrs,
10355 .sdev_attrs = mpt3sas_dev_attrs,
10356 .track_queue_depth = 1,
10357 .cmd_size = sizeof(struct scsiio_tracker),
10358 };
10359
10360 /* raid transport support for SAS 3.0 HBA devices */
10361 static struct raid_function_template mpt3sas_raid_functions = {
10362 .cookie = &mpt3sas_driver_template,
10363 .is_raid = scsih_is_raid,
10364 .get_resync = scsih_get_resync,
10365 .get_state = scsih_get_state,
10366 };
10367
10368 /**
10369 * _scsih_determine_hba_mpi_version - determine in which MPI version class
10370 * this device belongs to.
10371 * @pdev: PCI device struct
10372 *
10373 * return MPI2_VERSION for SAS 2.0 HBA devices,
10374 * MPI25_VERSION for SAS 3.0 HBA devices, and
10375 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
10376 */
10377 static u16
_scsih_determine_hba_mpi_version(struct pci_dev * pdev)10378 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
10379 {
10380
10381 switch (pdev->device) {
10382 case MPI2_MFGPAGE_DEVID_SSS6200:
10383 case MPI2_MFGPAGE_DEVID_SAS2004:
10384 case MPI2_MFGPAGE_DEVID_SAS2008:
10385 case MPI2_MFGPAGE_DEVID_SAS2108_1:
10386 case MPI2_MFGPAGE_DEVID_SAS2108_2:
10387 case MPI2_MFGPAGE_DEVID_SAS2108_3:
10388 case MPI2_MFGPAGE_DEVID_SAS2116_1:
10389 case MPI2_MFGPAGE_DEVID_SAS2116_2:
10390 case MPI2_MFGPAGE_DEVID_SAS2208_1:
10391 case MPI2_MFGPAGE_DEVID_SAS2208_2:
10392 case MPI2_MFGPAGE_DEVID_SAS2208_3:
10393 case MPI2_MFGPAGE_DEVID_SAS2208_4:
10394 case MPI2_MFGPAGE_DEVID_SAS2208_5:
10395 case MPI2_MFGPAGE_DEVID_SAS2208_6:
10396 case MPI2_MFGPAGE_DEVID_SAS2308_1:
10397 case MPI2_MFGPAGE_DEVID_SAS2308_2:
10398 case MPI2_MFGPAGE_DEVID_SAS2308_3:
10399 case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP:
10400 return MPI2_VERSION;
10401 case MPI25_MFGPAGE_DEVID_SAS3004:
10402 case MPI25_MFGPAGE_DEVID_SAS3008:
10403 case MPI25_MFGPAGE_DEVID_SAS3108_1:
10404 case MPI25_MFGPAGE_DEVID_SAS3108_2:
10405 case MPI25_MFGPAGE_DEVID_SAS3108_5:
10406 case MPI25_MFGPAGE_DEVID_SAS3108_6:
10407 return MPI25_VERSION;
10408 case MPI26_MFGPAGE_DEVID_SAS3216:
10409 case MPI26_MFGPAGE_DEVID_SAS3224:
10410 case MPI26_MFGPAGE_DEVID_SAS3316_1:
10411 case MPI26_MFGPAGE_DEVID_SAS3316_2:
10412 case MPI26_MFGPAGE_DEVID_SAS3316_3:
10413 case MPI26_MFGPAGE_DEVID_SAS3316_4:
10414 case MPI26_MFGPAGE_DEVID_SAS3324_1:
10415 case MPI26_MFGPAGE_DEVID_SAS3324_2:
10416 case MPI26_MFGPAGE_DEVID_SAS3324_3:
10417 case MPI26_MFGPAGE_DEVID_SAS3324_4:
10418 case MPI26_MFGPAGE_DEVID_SAS3508:
10419 case MPI26_MFGPAGE_DEVID_SAS3508_1:
10420 case MPI26_MFGPAGE_DEVID_SAS3408:
10421 case MPI26_MFGPAGE_DEVID_SAS3516:
10422 case MPI26_MFGPAGE_DEVID_SAS3516_1:
10423 case MPI26_MFGPAGE_DEVID_SAS3416:
10424 case MPI26_MFGPAGE_DEVID_SAS3616:
10425 return MPI26_VERSION;
10426 }
10427 return 0;
10428 }
10429
10430 /**
10431 * _scsih_probe - attach and add scsi host
10432 * @pdev: PCI device struct
10433 * @id: pci device id
10434 *
10435 * Return: 0 success, anything else error.
10436 */
10437 static int
_scsih_probe(struct pci_dev * pdev,const struct pci_device_id * id)10438 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10439 {
10440 struct MPT3SAS_ADAPTER *ioc;
10441 struct Scsi_Host *shost = NULL;
10442 int rv;
10443 u16 hba_mpi_version;
10444
10445 /* Determine in which MPI version class this pci device belongs */
10446 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
10447 if (hba_mpi_version == 0)
10448 return -ENODEV;
10449
10450 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
10451 * for other generation HBA's return with -ENODEV
10452 */
10453 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
10454 return -ENODEV;
10455
10456 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
10457 * for other generation HBA's return with -ENODEV
10458 */
10459 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
10460 || hba_mpi_version == MPI26_VERSION)))
10461 return -ENODEV;
10462
10463 switch (hba_mpi_version) {
10464 case MPI2_VERSION:
10465 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
10466 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
10467 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
10468 shost = scsi_host_alloc(&mpt2sas_driver_template,
10469 sizeof(struct MPT3SAS_ADAPTER));
10470 if (!shost)
10471 return -ENODEV;
10472 ioc = shost_priv(shost);
10473 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10474 ioc->hba_mpi_version_belonged = hba_mpi_version;
10475 ioc->id = mpt2_ids++;
10476 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
10477 switch (pdev->device) {
10478 case MPI2_MFGPAGE_DEVID_SSS6200:
10479 ioc->is_warpdrive = 1;
10480 ioc->hide_ir_msg = 1;
10481 break;
10482 case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP:
10483 ioc->is_mcpu_endpoint = 1;
10484 break;
10485 default:
10486 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
10487 break;
10488 }
10489 break;
10490 case MPI25_VERSION:
10491 case MPI26_VERSION:
10492 /* Use mpt3sas driver host template for SAS 3.0 HBA's */
10493 shost = scsi_host_alloc(&mpt3sas_driver_template,
10494 sizeof(struct MPT3SAS_ADAPTER));
10495 if (!shost)
10496 return -ENODEV;
10497 ioc = shost_priv(shost);
10498 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10499 ioc->hba_mpi_version_belonged = hba_mpi_version;
10500 ioc->id = mpt3_ids++;
10501 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
10502 switch (pdev->device) {
10503 case MPI26_MFGPAGE_DEVID_SAS3508:
10504 case MPI26_MFGPAGE_DEVID_SAS3508_1:
10505 case MPI26_MFGPAGE_DEVID_SAS3408:
10506 case MPI26_MFGPAGE_DEVID_SAS3516:
10507 case MPI26_MFGPAGE_DEVID_SAS3516_1:
10508 case MPI26_MFGPAGE_DEVID_SAS3416:
10509 case MPI26_MFGPAGE_DEVID_SAS3616:
10510 ioc->is_gen35_ioc = 1;
10511 break;
10512 default:
10513 ioc->is_gen35_ioc = 0;
10514 }
10515 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
10516 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
10517 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
10518 ioc->combined_reply_queue = 1;
10519 if (ioc->is_gen35_ioc)
10520 ioc->combined_reply_index_count =
10521 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
10522 else
10523 ioc->combined_reply_index_count =
10524 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
10525 }
10526 break;
10527 default:
10528 return -ENODEV;
10529 }
10530
10531 INIT_LIST_HEAD(&ioc->list);
10532 spin_lock(&gioc_lock);
10533 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
10534 spin_unlock(&gioc_lock);
10535 ioc->shost = shost;
10536 ioc->pdev = pdev;
10537 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
10538 ioc->tm_cb_idx = tm_cb_idx;
10539 ioc->ctl_cb_idx = ctl_cb_idx;
10540 ioc->base_cb_idx = base_cb_idx;
10541 ioc->port_enable_cb_idx = port_enable_cb_idx;
10542 ioc->transport_cb_idx = transport_cb_idx;
10543 ioc->scsih_cb_idx = scsih_cb_idx;
10544 ioc->config_cb_idx = config_cb_idx;
10545 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
10546 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
10547 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
10548 ioc->logging_level = logging_level;
10549 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
10550 /* misc semaphores and spin locks */
10551 mutex_init(&ioc->reset_in_progress_mutex);
10552 /* initializing pci_access_mutex lock */
10553 mutex_init(&ioc->pci_access_mutex);
10554 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
10555 spin_lock_init(&ioc->scsi_lookup_lock);
10556 spin_lock_init(&ioc->sas_device_lock);
10557 spin_lock_init(&ioc->sas_node_lock);
10558 spin_lock_init(&ioc->fw_event_lock);
10559 spin_lock_init(&ioc->raid_device_lock);
10560 spin_lock_init(&ioc->pcie_device_lock);
10561 spin_lock_init(&ioc->diag_trigger_lock);
10562
10563 INIT_LIST_HEAD(&ioc->sas_device_list);
10564 INIT_LIST_HEAD(&ioc->sas_device_init_list);
10565 INIT_LIST_HEAD(&ioc->sas_expander_list);
10566 INIT_LIST_HEAD(&ioc->enclosure_list);
10567 INIT_LIST_HEAD(&ioc->pcie_device_list);
10568 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
10569 INIT_LIST_HEAD(&ioc->fw_event_list);
10570 INIT_LIST_HEAD(&ioc->raid_device_list);
10571 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
10572 INIT_LIST_HEAD(&ioc->delayed_tr_list);
10573 INIT_LIST_HEAD(&ioc->delayed_sc_list);
10574 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
10575 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
10576 INIT_LIST_HEAD(&ioc->reply_queue_list);
10577
10578 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
10579
10580 /* init shost parameters */
10581 shost->max_cmd_len = 32;
10582 shost->max_lun = max_lun;
10583 shost->transportt = mpt3sas_transport_template;
10584 shost->unique_id = ioc->id;
10585
10586 if (ioc->is_mcpu_endpoint) {
10587 /* mCPU MPI support 64K max IO */
10588 shost->max_sectors = 128;
10589 pr_info(MPT3SAS_FMT
10590 "The max_sectors value is set to %d\n",
10591 ioc->name, shost->max_sectors);
10592 } else {
10593 if (max_sectors != 0xFFFF) {
10594 if (max_sectors < 64) {
10595 shost->max_sectors = 64;
10596 pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
10597 "for max_sectors, range is 64 to 32767. " \
10598 "Assigning value of 64.\n", \
10599 ioc->name, max_sectors);
10600 } else if (max_sectors > 32767) {
10601 shost->max_sectors = 32767;
10602 pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
10603 "for max_sectors, range is 64 to 32767." \
10604 "Assigning default value of 32767.\n", \
10605 ioc->name, max_sectors);
10606 } else {
10607 shost->max_sectors = max_sectors & 0xFFFE;
10608 pr_info(MPT3SAS_FMT
10609 "The max_sectors value is set to %d\n",
10610 ioc->name, shost->max_sectors);
10611 }
10612 }
10613 }
10614 /* register EEDP capabilities with SCSI layer */
10615 if (prot_mask > 0)
10616 scsi_host_set_prot(shost, prot_mask);
10617 else
10618 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
10619 | SHOST_DIF_TYPE2_PROTECTION
10620 | SHOST_DIF_TYPE3_PROTECTION);
10621
10622 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
10623
10624 /* event thread */
10625 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
10626 "fw_event_%s%d", ioc->driver_name, ioc->id);
10627 ioc->firmware_event_thread = alloc_ordered_workqueue(
10628 ioc->firmware_event_name, 0);
10629 if (!ioc->firmware_event_thread) {
10630 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
10631 ioc->name, __FILE__, __LINE__, __func__);
10632 rv = -ENODEV;
10633 goto out_thread_fail;
10634 }
10635
10636 ioc->is_driver_loading = 1;
10637 if ((mpt3sas_base_attach(ioc))) {
10638 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
10639 ioc->name, __FILE__, __LINE__, __func__);
10640 rv = -ENODEV;
10641 goto out_attach_fail;
10642 }
10643
10644 if (ioc->is_warpdrive) {
10645 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
10646 ioc->hide_drives = 0;
10647 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
10648 ioc->hide_drives = 1;
10649 else {
10650 if (mpt3sas_get_num_volumes(ioc))
10651 ioc->hide_drives = 1;
10652 else
10653 ioc->hide_drives = 0;
10654 }
10655 } else
10656 ioc->hide_drives = 0;
10657
10658 rv = scsi_add_host(shost, &pdev->dev);
10659 if (rv) {
10660 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
10661 ioc->name, __FILE__, __LINE__, __func__);
10662 goto out_add_shost_fail;
10663 }
10664
10665 scsi_scan_host(shost);
10666 return 0;
10667 out_add_shost_fail:
10668 mpt3sas_base_detach(ioc);
10669 out_attach_fail:
10670 destroy_workqueue(ioc->firmware_event_thread);
10671 out_thread_fail:
10672 spin_lock(&gioc_lock);
10673 list_del(&ioc->list);
10674 spin_unlock(&gioc_lock);
10675 scsi_host_put(shost);
10676 return rv;
10677 }
10678
10679 #ifdef CONFIG_PM
10680 /**
10681 * scsih_suspend - power management suspend main entry point
10682 * @pdev: PCI device struct
10683 * @state: PM state change to (usually PCI_D3)
10684 *
10685 * Return: 0 success, anything else error.
10686 */
10687 static int
scsih_suspend(struct pci_dev * pdev,pm_message_t state)10688 scsih_suspend(struct pci_dev *pdev, pm_message_t state)
10689 {
10690 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10691 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10692 pci_power_t device_state;
10693
10694 mpt3sas_base_stop_watchdog(ioc);
10695 flush_scheduled_work();
10696 scsi_block_requests(shost);
10697 device_state = pci_choose_state(pdev, state);
10698 pr_info(MPT3SAS_FMT
10699 "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
10700 ioc->name, pdev, pci_name(pdev), device_state);
10701
10702 pci_save_state(pdev);
10703 mpt3sas_base_free_resources(ioc);
10704 pci_set_power_state(pdev, device_state);
10705 return 0;
10706 }
10707
10708 /**
10709 * scsih_resume - power management resume main entry point
10710 * @pdev: PCI device struct
10711 *
10712 * Return: 0 success, anything else error.
10713 */
10714 static int
scsih_resume(struct pci_dev * pdev)10715 scsih_resume(struct pci_dev *pdev)
10716 {
10717 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10718 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10719 pci_power_t device_state = pdev->current_state;
10720 int r;
10721
10722 pr_info(MPT3SAS_FMT
10723 "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
10724 ioc->name, pdev, pci_name(pdev), device_state);
10725
10726 pci_set_power_state(pdev, PCI_D0);
10727 pci_enable_wake(pdev, PCI_D0, 0);
10728 pci_restore_state(pdev);
10729 ioc->pdev = pdev;
10730 r = mpt3sas_base_map_resources(ioc);
10731 if (r)
10732 return r;
10733
10734 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
10735 scsi_unblock_requests(shost);
10736 mpt3sas_base_start_watchdog(ioc);
10737 return 0;
10738 }
10739 #endif /* CONFIG_PM */
10740
10741 /**
10742 * scsih_pci_error_detected - Called when a PCI error is detected.
10743 * @pdev: PCI device struct
10744 * @state: PCI channel state
10745 *
10746 * Description: Called when a PCI error is detected.
10747 *
10748 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
10749 */
10750 static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)10751 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10752 {
10753 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10754 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10755
10756 pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n",
10757 ioc->name, state);
10758
10759 switch (state) {
10760 case pci_channel_io_normal:
10761 return PCI_ERS_RESULT_CAN_RECOVER;
10762 case pci_channel_io_frozen:
10763 /* Fatal error, prepare for slot reset */
10764 ioc->pci_error_recovery = 1;
10765 scsi_block_requests(ioc->shost);
10766 mpt3sas_base_stop_watchdog(ioc);
10767 mpt3sas_base_free_resources(ioc);
10768 return PCI_ERS_RESULT_NEED_RESET;
10769 case pci_channel_io_perm_failure:
10770 /* Permanent error, prepare for device removal */
10771 ioc->pci_error_recovery = 1;
10772 mpt3sas_base_stop_watchdog(ioc);
10773 _scsih_flush_running_cmds(ioc);
10774 return PCI_ERS_RESULT_DISCONNECT;
10775 }
10776 return PCI_ERS_RESULT_NEED_RESET;
10777 }
10778
10779 /**
10780 * scsih_pci_slot_reset - Called when PCI slot has been reset.
10781 * @pdev: PCI device struct
10782 *
10783 * Description: This routine is called by the pci error recovery
10784 * code after the PCI slot has been reset, just before we
10785 * should resume normal operations.
10786 */
10787 static pci_ers_result_t
scsih_pci_slot_reset(struct pci_dev * pdev)10788 scsih_pci_slot_reset(struct pci_dev *pdev)
10789 {
10790 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10791 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10792 int rc;
10793
10794 pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n",
10795 ioc->name);
10796
10797 ioc->pci_error_recovery = 0;
10798 ioc->pdev = pdev;
10799 pci_restore_state(pdev);
10800 rc = mpt3sas_base_map_resources(ioc);
10801 if (rc)
10802 return PCI_ERS_RESULT_DISCONNECT;
10803
10804 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
10805
10806 pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
10807 (rc == 0) ? "success" : "failed");
10808
10809 if (!rc)
10810 return PCI_ERS_RESULT_RECOVERED;
10811 else
10812 return PCI_ERS_RESULT_DISCONNECT;
10813 }
10814
10815 /**
10816 * scsih_pci_resume() - resume normal ops after PCI reset
10817 * @pdev: pointer to PCI device
10818 *
10819 * Called when the error recovery driver tells us that its
10820 * OK to resume normal operation. Use completion to allow
10821 * halted scsi ops to resume.
10822 */
10823 static void
scsih_pci_resume(struct pci_dev * pdev)10824 scsih_pci_resume(struct pci_dev *pdev)
10825 {
10826 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10827 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10828
10829 pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name);
10830
10831 pci_cleanup_aer_uncorrect_error_status(pdev);
10832 mpt3sas_base_start_watchdog(ioc);
10833 scsi_unblock_requests(ioc->shost);
10834 }
10835
10836 /**
10837 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
10838 * @pdev: pointer to PCI device
10839 */
10840 static pci_ers_result_t
scsih_pci_mmio_enabled(struct pci_dev * pdev)10841 scsih_pci_mmio_enabled(struct pci_dev *pdev)
10842 {
10843 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10844 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10845
10846 pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n",
10847 ioc->name);
10848
10849 /* TODO - dump whatever for debugging purposes */
10850
10851 /* This called only if scsih_pci_error_detected returns
10852 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
10853 * works, no need to reset slot.
10854 */
10855 return PCI_ERS_RESULT_RECOVERED;
10856 }
10857
10858 /**
10859 * scsih__ncq_prio_supp - Check for NCQ command priority support
10860 * @sdev: scsi device struct
10861 *
10862 * This is called when a user indicates they would like to enable
10863 * ncq command priorities. This works only on SATA devices.
10864 */
scsih_ncq_prio_supp(struct scsi_device * sdev)10865 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
10866 {
10867 unsigned char *buf;
10868 bool ncq_prio_supp = false;
10869
10870 if (!scsi_device_supports_vpd(sdev))
10871 return ncq_prio_supp;
10872
10873 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
10874 if (!buf)
10875 return ncq_prio_supp;
10876
10877 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
10878 ncq_prio_supp = (buf[213] >> 4) & 1;
10879
10880 kfree(buf);
10881 return ncq_prio_supp;
10882 }
10883 /*
10884 * The pci device ids are defined in mpi/mpi2_cnfg.h.
10885 */
10886 static const struct pci_device_id mpt3sas_pci_table[] = {
10887 /* Spitfire ~ 2004 */
10888 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
10889 PCI_ANY_ID, PCI_ANY_ID },
10890 /* Falcon ~ 2008 */
10891 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
10892 PCI_ANY_ID, PCI_ANY_ID },
10893 /* Liberator ~ 2108 */
10894 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
10895 PCI_ANY_ID, PCI_ANY_ID },
10896 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
10897 PCI_ANY_ID, PCI_ANY_ID },
10898 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
10899 PCI_ANY_ID, PCI_ANY_ID },
10900 /* Meteor ~ 2116 */
10901 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
10902 PCI_ANY_ID, PCI_ANY_ID },
10903 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
10904 PCI_ANY_ID, PCI_ANY_ID },
10905 /* Thunderbolt ~ 2208 */
10906 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
10907 PCI_ANY_ID, PCI_ANY_ID },
10908 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
10909 PCI_ANY_ID, PCI_ANY_ID },
10910 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
10911 PCI_ANY_ID, PCI_ANY_ID },
10912 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
10913 PCI_ANY_ID, PCI_ANY_ID },
10914 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
10915 PCI_ANY_ID, PCI_ANY_ID },
10916 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
10917 PCI_ANY_ID, PCI_ANY_ID },
10918 /* Mustang ~ 2308 */
10919 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
10920 PCI_ANY_ID, PCI_ANY_ID },
10921 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
10922 PCI_ANY_ID, PCI_ANY_ID },
10923 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
10924 PCI_ANY_ID, PCI_ANY_ID },
10925 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP,
10926 PCI_ANY_ID, PCI_ANY_ID },
10927 /* SSS6200 */
10928 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
10929 PCI_ANY_ID, PCI_ANY_ID },
10930 /* Fury ~ 3004 and 3008 */
10931 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
10932 PCI_ANY_ID, PCI_ANY_ID },
10933 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
10934 PCI_ANY_ID, PCI_ANY_ID },
10935 /* Invader ~ 3108 */
10936 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
10937 PCI_ANY_ID, PCI_ANY_ID },
10938 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
10939 PCI_ANY_ID, PCI_ANY_ID },
10940 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
10941 PCI_ANY_ID, PCI_ANY_ID },
10942 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
10943 PCI_ANY_ID, PCI_ANY_ID },
10944 /* Cutlass ~ 3216 and 3224 */
10945 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
10946 PCI_ANY_ID, PCI_ANY_ID },
10947 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
10948 PCI_ANY_ID, PCI_ANY_ID },
10949 /* Intruder ~ 3316 and 3324 */
10950 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
10951 PCI_ANY_ID, PCI_ANY_ID },
10952 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
10953 PCI_ANY_ID, PCI_ANY_ID },
10954 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
10955 PCI_ANY_ID, PCI_ANY_ID },
10956 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
10957 PCI_ANY_ID, PCI_ANY_ID },
10958 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
10959 PCI_ANY_ID, PCI_ANY_ID },
10960 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
10961 PCI_ANY_ID, PCI_ANY_ID },
10962 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
10963 PCI_ANY_ID, PCI_ANY_ID },
10964 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
10965 PCI_ANY_ID, PCI_ANY_ID },
10966 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
10967 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
10968 PCI_ANY_ID, PCI_ANY_ID },
10969 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
10970 PCI_ANY_ID, PCI_ANY_ID },
10971 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
10972 PCI_ANY_ID, PCI_ANY_ID },
10973 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
10974 PCI_ANY_ID, PCI_ANY_ID },
10975 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
10976 PCI_ANY_ID, PCI_ANY_ID },
10977 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
10978 PCI_ANY_ID, PCI_ANY_ID },
10979 /* Mercator ~ 3616*/
10980 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
10981 PCI_ANY_ID, PCI_ANY_ID },
10982 {0} /* Terminating entry */
10983 };
10984 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
10985
10986 static struct pci_error_handlers _mpt3sas_err_handler = {
10987 .error_detected = scsih_pci_error_detected,
10988 .mmio_enabled = scsih_pci_mmio_enabled,
10989 .slot_reset = scsih_pci_slot_reset,
10990 .resume = scsih_pci_resume,
10991 };
10992
10993 static struct pci_driver mpt3sas_driver = {
10994 .name = MPT3SAS_DRIVER_NAME,
10995 .id_table = mpt3sas_pci_table,
10996 .probe = _scsih_probe,
10997 .remove = scsih_remove,
10998 .shutdown = scsih_shutdown,
10999 .err_handler = &_mpt3sas_err_handler,
11000 #ifdef CONFIG_PM
11001 .suspend = scsih_suspend,
11002 .resume = scsih_resume,
11003 #endif
11004 };
11005
11006 /**
11007 * scsih_init - main entry point for this driver.
11008 *
11009 * Return: 0 success, anything else error.
11010 */
11011 static int
scsih_init(void)11012 scsih_init(void)
11013 {
11014 mpt2_ids = 0;
11015 mpt3_ids = 0;
11016
11017 mpt3sas_base_initialize_callback_handler();
11018
11019 /* queuecommand callback hander */
11020 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
11021
11022 /* task management callback handler */
11023 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
11024
11025 /* base internal commands callback handler */
11026 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
11027 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
11028 mpt3sas_port_enable_done);
11029
11030 /* transport internal commands callback handler */
11031 transport_cb_idx = mpt3sas_base_register_callback_handler(
11032 mpt3sas_transport_done);
11033
11034 /* scsih internal commands callback handler */
11035 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
11036
11037 /* configuration page API internal commands callback handler */
11038 config_cb_idx = mpt3sas_base_register_callback_handler(
11039 mpt3sas_config_done);
11040
11041 /* ctl module callback handler */
11042 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
11043
11044 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
11045 _scsih_tm_tr_complete);
11046
11047 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
11048 _scsih_tm_volume_tr_complete);
11049
11050 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
11051 _scsih_sas_control_complete);
11052
11053 return 0;
11054 }
11055
11056 /**
11057 * scsih_exit - exit point for this driver (when it is a module).
11058 *
11059 * Return: 0 success, anything else error.
11060 */
11061 static void
scsih_exit(void)11062 scsih_exit(void)
11063 {
11064
11065 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
11066 mpt3sas_base_release_callback_handler(tm_cb_idx);
11067 mpt3sas_base_release_callback_handler(base_cb_idx);
11068 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
11069 mpt3sas_base_release_callback_handler(transport_cb_idx);
11070 mpt3sas_base_release_callback_handler(scsih_cb_idx);
11071 mpt3sas_base_release_callback_handler(config_cb_idx);
11072 mpt3sas_base_release_callback_handler(ctl_cb_idx);
11073
11074 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
11075 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
11076 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
11077
11078 /* raid transport support */
11079 if (hbas_to_enumerate != 1)
11080 raid_class_release(mpt3sas_raid_template);
11081 if (hbas_to_enumerate != 2)
11082 raid_class_release(mpt2sas_raid_template);
11083 sas_release_transport(mpt3sas_transport_template);
11084 }
11085
11086 /**
11087 * _mpt3sas_init - main entry point for this driver.
11088 *
11089 * Return: 0 success, anything else error.
11090 */
11091 static int __init
_mpt3sas_init(void)11092 _mpt3sas_init(void)
11093 {
11094 int error;
11095
11096 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
11097 MPT3SAS_DRIVER_VERSION);
11098
11099 mpt3sas_transport_template =
11100 sas_attach_transport(&mpt3sas_transport_functions);
11101 if (!mpt3sas_transport_template)
11102 return -ENODEV;
11103
11104 /* No need attach mpt3sas raid functions template
11105 * if hbas_to_enumarate value is one.
11106 */
11107 if (hbas_to_enumerate != 1) {
11108 mpt3sas_raid_template =
11109 raid_class_attach(&mpt3sas_raid_functions);
11110 if (!mpt3sas_raid_template) {
11111 sas_release_transport(mpt3sas_transport_template);
11112 return -ENODEV;
11113 }
11114 }
11115
11116 /* No need to attach mpt2sas raid functions template
11117 * if hbas_to_enumarate value is two
11118 */
11119 if (hbas_to_enumerate != 2) {
11120 mpt2sas_raid_template =
11121 raid_class_attach(&mpt2sas_raid_functions);
11122 if (!mpt2sas_raid_template) {
11123 sas_release_transport(mpt3sas_transport_template);
11124 return -ENODEV;
11125 }
11126 }
11127
11128 error = scsih_init();
11129 if (error) {
11130 scsih_exit();
11131 return error;
11132 }
11133
11134 mpt3sas_ctl_init(hbas_to_enumerate);
11135
11136 error = pci_register_driver(&mpt3sas_driver);
11137 if (error)
11138 scsih_exit();
11139
11140 return error;
11141 }
11142
11143 /**
11144 * _mpt3sas_exit - exit point for this driver (when it is a module).
11145 *
11146 */
11147 static void __exit
_mpt3sas_exit(void)11148 _mpt3sas_exit(void)
11149 {
11150 pr_info("mpt3sas version %s unloading\n",
11151 MPT3SAS_DRIVER_VERSION);
11152
11153 mpt3sas_ctl_exit(hbas_to_enumerate);
11154
11155 pci_unregister_driver(&mpt3sas_driver);
11156
11157 scsih_exit();
11158 }
11159
11160 module_init(_mpt3sas_init);
11161 module_exit(_mpt3sas_exit);
11162