1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * driver for Microsemi PQI-based storage controllers
4 * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
7 *
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
9 *
10 */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
29 #include "smartpqi.h"
30 #include "smartpqi_sis.h"
31
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
34 #endif
35
36 #define DRIVER_VERSION "1.2.8-026"
37 #define DRIVER_MAJOR 1
38 #define DRIVER_MINOR 2
39 #define DRIVER_RELEASE 8
40 #define DRIVER_REVISION 26
41
42 #define DRIVER_NAME "Microsemi PQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
45
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
47
48 MODULE_AUTHOR("Microsemi");
49 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
50 DRIVER_VERSION);
51 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
52 MODULE_VERSION(DRIVER_VERSION);
53 MODULE_LICENSE("GPL");
54
55 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
56 static void pqi_ctrl_offline_worker(struct work_struct *work);
57 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
58 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
59 static void pqi_scan_start(struct Scsi_Host *shost);
60 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
61 struct pqi_queue_group *queue_group, enum pqi_io_path path,
62 struct pqi_io_request *io_request);
63 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
64 struct pqi_iu_header *request, unsigned int flags,
65 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
66 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
67 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
68 unsigned int cdb_length, struct pqi_queue_group *queue_group,
69 struct pqi_encryption_info *encryption_info, bool raid_bypass);
70 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
71 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
72 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
73 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
74 u32 bytes_requested);
75 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
76 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
77 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
78 struct pqi_scsi_dev *device, unsigned long timeout_secs);
79
80 /* for flags argument to pqi_submit_raid_request_synchronous() */
81 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
82
83 static struct scsi_transport_template *pqi_sas_transport_template;
84
85 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
86
87 enum pqi_lockup_action {
88 NONE,
89 REBOOT,
90 PANIC
91 };
92
93 static enum pqi_lockup_action pqi_lockup_action = NONE;
94
95 static struct {
96 enum pqi_lockup_action action;
97 char *name;
98 } pqi_lockup_actions[] = {
99 {
100 .action = NONE,
101 .name = "none",
102 },
103 {
104 .action = REBOOT,
105 .name = "reboot",
106 },
107 {
108 .action = PANIC,
109 .name = "panic",
110 },
111 };
112
113 static unsigned int pqi_supported_event_types[] = {
114 PQI_EVENT_TYPE_HOTPLUG,
115 PQI_EVENT_TYPE_HARDWARE,
116 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
117 PQI_EVENT_TYPE_LOGICAL_DEVICE,
118 PQI_EVENT_TYPE_OFA,
119 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
120 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
121 };
122
123 static int pqi_disable_device_id_wildcards;
124 module_param_named(disable_device_id_wildcards,
125 pqi_disable_device_id_wildcards, int, 0644);
126 MODULE_PARM_DESC(disable_device_id_wildcards,
127 "Disable device ID wildcards.");
128
129 static int pqi_disable_heartbeat;
130 module_param_named(disable_heartbeat,
131 pqi_disable_heartbeat, int, 0644);
132 MODULE_PARM_DESC(disable_heartbeat,
133 "Disable heartbeat.");
134
135 static int pqi_disable_ctrl_shutdown;
136 module_param_named(disable_ctrl_shutdown,
137 pqi_disable_ctrl_shutdown, int, 0644);
138 MODULE_PARM_DESC(disable_ctrl_shutdown,
139 "Disable controller shutdown when controller locked up.");
140
141 static char *pqi_lockup_action_param;
142 module_param_named(lockup_action,
143 pqi_lockup_action_param, charp, 0644);
144 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
145 "\t\tSupported: none, reboot, panic\n"
146 "\t\tDefault: none");
147
148 static int pqi_expose_ld_first;
149 module_param_named(expose_ld_first,
150 pqi_expose_ld_first, int, 0644);
151 MODULE_PARM_DESC(expose_ld_first,
152 "Expose logical drives before physical drives.");
153
154 static int pqi_hide_vsep;
155 module_param_named(hide_vsep,
156 pqi_hide_vsep, int, 0644);
157 MODULE_PARM_DESC(hide_vsep,
158 "Hide the virtual SEP for direct attached drives.");
159
160 static char *raid_levels[] = {
161 "RAID-0",
162 "RAID-4",
163 "RAID-1(1+0)",
164 "RAID-5",
165 "RAID-5+1",
166 "RAID-ADG",
167 "RAID-1(ADM)",
168 };
169
pqi_raid_level_to_string(u8 raid_level)170 static char *pqi_raid_level_to_string(u8 raid_level)
171 {
172 if (raid_level < ARRAY_SIZE(raid_levels))
173 return raid_levels[raid_level];
174
175 return "RAID UNKNOWN";
176 }
177
178 #define SA_RAID_0 0
179 #define SA_RAID_4 1
180 #define SA_RAID_1 2 /* also used for RAID 10 */
181 #define SA_RAID_5 3 /* also used for RAID 50 */
182 #define SA_RAID_51 4
183 #define SA_RAID_6 5 /* also used for RAID 60 */
184 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
185 #define SA_RAID_MAX SA_RAID_ADM
186 #define SA_RAID_UNKNOWN 0xff
187
pqi_scsi_done(struct scsi_cmnd * scmd)188 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
189 {
190 pqi_prep_for_scsi_done(scmd);
191 scmd->scsi_done(scmd);
192 }
193
pqi_disable_write_same(struct scsi_device * sdev)194 static inline void pqi_disable_write_same(struct scsi_device *sdev)
195 {
196 sdev->no_write_same = 1;
197 }
198
pqi_scsi3addr_equal(u8 * scsi3addr1,u8 * scsi3addr2)199 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
200 {
201 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
202 }
203
pqi_is_logical_device(struct pqi_scsi_dev * device)204 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
205 {
206 return !device->is_physical_device;
207 }
208
pqi_is_external_raid_addr(u8 * scsi3addr)209 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
210 {
211 return scsi3addr[2] != 0;
212 }
213
pqi_check_ctrl_health(struct pqi_ctrl_info * ctrl_info)214 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
215 {
216 if (ctrl_info->controller_online)
217 if (!sis_is_firmware_running(ctrl_info))
218 pqi_take_ctrl_offline(ctrl_info);
219 }
220
pqi_is_hba_lunid(u8 * scsi3addr)221 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
222 {
223 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
224 }
225
pqi_get_ctrl_mode(struct pqi_ctrl_info * ctrl_info)226 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
227 struct pqi_ctrl_info *ctrl_info)
228 {
229 return sis_read_driver_scratch(ctrl_info);
230 }
231
pqi_save_ctrl_mode(struct pqi_ctrl_info * ctrl_info,enum pqi_ctrl_mode mode)232 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
233 enum pqi_ctrl_mode mode)
234 {
235 sis_write_driver_scratch(ctrl_info, mode);
236 }
237
pqi_ctrl_block_requests(struct pqi_ctrl_info * ctrl_info)238 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
239 {
240 ctrl_info->block_requests = true;
241 scsi_block_requests(ctrl_info->scsi_host);
242 }
243
pqi_ctrl_unblock_requests(struct pqi_ctrl_info * ctrl_info)244 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
245 {
246 ctrl_info->block_requests = false;
247 wake_up_all(&ctrl_info->block_requests_wait);
248 pqi_retry_raid_bypass_requests(ctrl_info);
249 scsi_unblock_requests(ctrl_info->scsi_host);
250 }
251
pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info * ctrl_info,unsigned long timeout_msecs)252 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
253 unsigned long timeout_msecs)
254 {
255 unsigned long remaining_msecs;
256
257 if (!pqi_ctrl_blocked(ctrl_info))
258 return timeout_msecs;
259
260 atomic_inc(&ctrl_info->num_blocked_threads);
261
262 if (timeout_msecs == NO_TIMEOUT) {
263 wait_event(ctrl_info->block_requests_wait,
264 !pqi_ctrl_blocked(ctrl_info));
265 remaining_msecs = timeout_msecs;
266 } else {
267 unsigned long remaining_jiffies;
268
269 remaining_jiffies =
270 wait_event_timeout(ctrl_info->block_requests_wait,
271 !pqi_ctrl_blocked(ctrl_info),
272 msecs_to_jiffies(timeout_msecs));
273 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
274 }
275
276 atomic_dec(&ctrl_info->num_blocked_threads);
277
278 return remaining_msecs;
279 }
280
pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info * ctrl_info)281 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
282 {
283 while (atomic_read(&ctrl_info->num_busy_threads) >
284 atomic_read(&ctrl_info->num_blocked_threads))
285 usleep_range(1000, 2000);
286 }
287
pqi_device_offline(struct pqi_scsi_dev * device)288 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
289 {
290 return device->device_offline;
291 }
292
pqi_device_reset_start(struct pqi_scsi_dev * device)293 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
294 {
295 device->in_reset = true;
296 }
297
pqi_device_reset_done(struct pqi_scsi_dev * device)298 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
299 {
300 device->in_reset = false;
301 }
302
pqi_device_in_reset(struct pqi_scsi_dev * device)303 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
304 {
305 return device->in_reset;
306 }
307
pqi_ctrl_ofa_start(struct pqi_ctrl_info * ctrl_info)308 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
309 {
310 ctrl_info->in_ofa = true;
311 }
312
pqi_ctrl_ofa_done(struct pqi_ctrl_info * ctrl_info)313 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
314 {
315 ctrl_info->in_ofa = false;
316 }
317
pqi_ctrl_in_ofa(struct pqi_ctrl_info * ctrl_info)318 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
319 {
320 return ctrl_info->in_ofa;
321 }
322
pqi_device_remove_start(struct pqi_scsi_dev * device)323 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
324 {
325 device->in_remove = true;
326 }
327
pqi_device_in_remove(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)328 static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
329 struct pqi_scsi_dev *device)
330 {
331 return device->in_remove && !ctrl_info->in_shutdown;
332 }
333
pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info * ctrl_info,unsigned long delay)334 static inline void pqi_schedule_rescan_worker_with_delay(
335 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
336 {
337 if (pqi_ctrl_offline(ctrl_info))
338 return;
339 if (pqi_ctrl_in_ofa(ctrl_info))
340 return;
341
342 schedule_delayed_work(&ctrl_info->rescan_work, delay);
343 }
344
pqi_schedule_rescan_worker(struct pqi_ctrl_info * ctrl_info)345 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
346 {
347 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
348 }
349
350 #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
351
pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info * ctrl_info)352 static inline void pqi_schedule_rescan_worker_delayed(
353 struct pqi_ctrl_info *ctrl_info)
354 {
355 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
356 }
357
pqi_cancel_rescan_worker(struct pqi_ctrl_info * ctrl_info)358 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
359 {
360 cancel_delayed_work_sync(&ctrl_info->rescan_work);
361 }
362
pqi_read_heartbeat_counter(struct pqi_ctrl_info * ctrl_info)363 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
364 {
365 if (!ctrl_info->heartbeat_counter)
366 return 0;
367
368 return readl(ctrl_info->heartbeat_counter);
369 }
370
pqi_read_soft_reset_status(struct pqi_ctrl_info * ctrl_info)371 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
372 {
373 if (!ctrl_info->soft_reset_status)
374 return 0;
375
376 return readb(ctrl_info->soft_reset_status);
377 }
378
pqi_clear_soft_reset_status(struct pqi_ctrl_info * ctrl_info,u8 clear)379 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
380 u8 clear)
381 {
382 u8 status;
383
384 if (!ctrl_info->soft_reset_status)
385 return;
386
387 status = pqi_read_soft_reset_status(ctrl_info);
388 status &= ~clear;
389 writeb(status, ctrl_info->soft_reset_status);
390 }
391
pqi_map_single(struct pci_dev * pci_dev,struct pqi_sg_descriptor * sg_descriptor,void * buffer,size_t buffer_length,enum dma_data_direction data_direction)392 static int pqi_map_single(struct pci_dev *pci_dev,
393 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
394 size_t buffer_length, enum dma_data_direction data_direction)
395 {
396 dma_addr_t bus_address;
397
398 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
399 return 0;
400
401 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
402 data_direction);
403 if (dma_mapping_error(&pci_dev->dev, bus_address))
404 return -ENOMEM;
405
406 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
407 put_unaligned_le32(buffer_length, &sg_descriptor->length);
408 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
409
410 return 0;
411 }
412
pqi_pci_unmap(struct pci_dev * pci_dev,struct pqi_sg_descriptor * descriptors,int num_descriptors,enum dma_data_direction data_direction)413 static void pqi_pci_unmap(struct pci_dev *pci_dev,
414 struct pqi_sg_descriptor *descriptors, int num_descriptors,
415 enum dma_data_direction data_direction)
416 {
417 int i;
418
419 if (data_direction == DMA_NONE)
420 return;
421
422 for (i = 0; i < num_descriptors; i++)
423 dma_unmap_single(&pci_dev->dev,
424 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
425 get_unaligned_le32(&descriptors[i].length),
426 data_direction);
427 }
428
pqi_build_raid_path_request(struct pqi_ctrl_info * ctrl_info,struct pqi_raid_path_request * request,u8 cmd,u8 * scsi3addr,void * buffer,size_t buffer_length,u16 vpd_page,enum dma_data_direction * dir)429 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
430 struct pqi_raid_path_request *request, u8 cmd,
431 u8 *scsi3addr, void *buffer, size_t buffer_length,
432 u16 vpd_page, enum dma_data_direction *dir)
433 {
434 u8 *cdb;
435 size_t cdb_length = buffer_length;
436
437 memset(request, 0, sizeof(*request));
438
439 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
440 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
441 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
442 &request->header.iu_length);
443 put_unaligned_le32(buffer_length, &request->buffer_length);
444 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
445 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
446 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
447
448 cdb = request->cdb;
449
450 switch (cmd) {
451 case INQUIRY:
452 request->data_direction = SOP_READ_FLAG;
453 cdb[0] = INQUIRY;
454 if (vpd_page & VPD_PAGE) {
455 cdb[1] = 0x1;
456 cdb[2] = (u8)vpd_page;
457 }
458 cdb[4] = (u8)cdb_length;
459 break;
460 case CISS_REPORT_LOG:
461 case CISS_REPORT_PHYS:
462 request->data_direction = SOP_READ_FLAG;
463 cdb[0] = cmd;
464 if (cmd == CISS_REPORT_PHYS)
465 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
466 else
467 cdb[1] = CISS_REPORT_LOG_EXTENDED;
468 put_unaligned_be32(cdb_length, &cdb[6]);
469 break;
470 case CISS_GET_RAID_MAP:
471 request->data_direction = SOP_READ_FLAG;
472 cdb[0] = CISS_READ;
473 cdb[1] = CISS_GET_RAID_MAP;
474 put_unaligned_be32(cdb_length, &cdb[6]);
475 break;
476 case SA_FLUSH_CACHE:
477 request->data_direction = SOP_WRITE_FLAG;
478 cdb[0] = BMIC_WRITE;
479 cdb[6] = BMIC_FLUSH_CACHE;
480 put_unaligned_be16(cdb_length, &cdb[7]);
481 break;
482 case BMIC_SENSE_DIAG_OPTIONS:
483 cdb_length = 0;
484 /* fall through */
485 case BMIC_IDENTIFY_CONTROLLER:
486 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
487 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
488 request->data_direction = SOP_READ_FLAG;
489 cdb[0] = BMIC_READ;
490 cdb[6] = cmd;
491 put_unaligned_be16(cdb_length, &cdb[7]);
492 break;
493 case BMIC_SET_DIAG_OPTIONS:
494 cdb_length = 0;
495 /* fall through */
496 case BMIC_WRITE_HOST_WELLNESS:
497 request->data_direction = SOP_WRITE_FLAG;
498 cdb[0] = BMIC_WRITE;
499 cdb[6] = cmd;
500 put_unaligned_be16(cdb_length, &cdb[7]);
501 break;
502 case BMIC_CSMI_PASSTHRU:
503 request->data_direction = SOP_BIDIRECTIONAL;
504 cdb[0] = BMIC_WRITE;
505 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
506 cdb[6] = cmd;
507 put_unaligned_be16(cdb_length, &cdb[7]);
508 break;
509 default:
510 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
511 cmd);
512 break;
513 }
514
515 switch (request->data_direction) {
516 case SOP_READ_FLAG:
517 *dir = DMA_FROM_DEVICE;
518 break;
519 case SOP_WRITE_FLAG:
520 *dir = DMA_TO_DEVICE;
521 break;
522 case SOP_NO_DIRECTION_FLAG:
523 *dir = DMA_NONE;
524 break;
525 default:
526 *dir = DMA_BIDIRECTIONAL;
527 break;
528 }
529
530 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
531 buffer, buffer_length, *dir);
532 }
533
pqi_reinit_io_request(struct pqi_io_request * io_request)534 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
535 {
536 io_request->scmd = NULL;
537 io_request->status = 0;
538 io_request->error_info = NULL;
539 io_request->raid_bypass = false;
540 }
541
pqi_alloc_io_request(struct pqi_ctrl_info * ctrl_info)542 static struct pqi_io_request *pqi_alloc_io_request(
543 struct pqi_ctrl_info *ctrl_info)
544 {
545 struct pqi_io_request *io_request;
546 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
547
548 while (1) {
549 io_request = &ctrl_info->io_request_pool[i];
550 if (atomic_inc_return(&io_request->refcount) == 1)
551 break;
552 atomic_dec(&io_request->refcount);
553 i = (i + 1) % ctrl_info->max_io_slots;
554 }
555
556 /* benignly racy */
557 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
558
559 pqi_reinit_io_request(io_request);
560
561 return io_request;
562 }
563
pqi_free_io_request(struct pqi_io_request * io_request)564 static void pqi_free_io_request(struct pqi_io_request *io_request)
565 {
566 atomic_dec(&io_request->refcount);
567 }
568
pqi_send_scsi_raid_request(struct pqi_ctrl_info * ctrl_info,u8 cmd,u8 * scsi3addr,void * buffer,size_t buffer_length,u16 vpd_page,struct pqi_raid_error_info * error_info,unsigned long timeout_msecs)569 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
570 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
571 struct pqi_raid_error_info *error_info,
572 unsigned long timeout_msecs)
573 {
574 int rc;
575 enum dma_data_direction dir;
576 struct pqi_raid_path_request request;
577
578 rc = pqi_build_raid_path_request(ctrl_info, &request,
579 cmd, scsi3addr, buffer,
580 buffer_length, vpd_page, &dir);
581 if (rc)
582 return rc;
583
584 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
585 0, error_info, timeout_msecs);
586
587 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
588 return rc;
589 }
590
591 /* Helper functions for pqi_send_scsi_raid_request */
592
pqi_send_ctrl_raid_request(struct pqi_ctrl_info * ctrl_info,u8 cmd,void * buffer,size_t buffer_length)593 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
594 u8 cmd, void *buffer, size_t buffer_length)
595 {
596 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
597 buffer, buffer_length, 0, NULL, NO_TIMEOUT);
598 }
599
pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info * ctrl_info,u8 cmd,void * buffer,size_t buffer_length,struct pqi_raid_error_info * error_info)600 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
601 u8 cmd, void *buffer, size_t buffer_length,
602 struct pqi_raid_error_info *error_info)
603 {
604 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
605 buffer, buffer_length, 0, error_info, NO_TIMEOUT);
606 }
607
608
pqi_identify_controller(struct pqi_ctrl_info * ctrl_info,struct bmic_identify_controller * buffer)609 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
610 struct bmic_identify_controller *buffer)
611 {
612 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
613 buffer, sizeof(*buffer));
614 }
615
pqi_sense_subsystem_info(struct pqi_ctrl_info * ctrl_info,struct bmic_sense_subsystem_info * sense_info)616 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
617 struct bmic_sense_subsystem_info *sense_info)
618 {
619 return pqi_send_ctrl_raid_request(ctrl_info,
620 BMIC_SENSE_SUBSYSTEM_INFORMATION,
621 sense_info, sizeof(*sense_info));
622 }
623
pqi_scsi_inquiry(struct pqi_ctrl_info * ctrl_info,u8 * scsi3addr,u16 vpd_page,void * buffer,size_t buffer_length)624 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
625 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
626 {
627 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
628 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
629 }
630
pqi_vpd_page_supported(struct pqi_ctrl_info * ctrl_info,u8 * scsi3addr,u16 vpd_page)631 static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
632 u8 *scsi3addr, u16 vpd_page)
633 {
634 int rc;
635 int i;
636 int pages;
637 unsigned char *buf, bufsize;
638
639 buf = kzalloc(256, GFP_KERNEL);
640 if (!buf)
641 return false;
642
643 /* Get the size of the page list first */
644 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
645 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
646 buf, SCSI_VPD_HEADER_SZ);
647 if (rc != 0)
648 goto exit_unsupported;
649
650 pages = buf[3];
651 if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
652 bufsize = pages + SCSI_VPD_HEADER_SZ;
653 else
654 bufsize = 255;
655
656 /* Get the whole VPD page list */
657 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
658 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
659 buf, bufsize);
660 if (rc != 0)
661 goto exit_unsupported;
662
663 pages = buf[3];
664 for (i = 1; i <= pages; i++)
665 if (buf[3 + i] == vpd_page)
666 goto exit_supported;
667
668 exit_unsupported:
669 kfree(buf);
670 return false;
671
672 exit_supported:
673 kfree(buf);
674 return true;
675 }
676
pqi_get_device_id(struct pqi_ctrl_info * ctrl_info,u8 * scsi3addr,u8 * device_id,int buflen)677 static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
678 u8 *scsi3addr, u8 *device_id, int buflen)
679 {
680 int rc;
681 unsigned char *buf;
682
683 if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
684 return 1; /* function not supported */
685
686 buf = kzalloc(64, GFP_KERNEL);
687 if (!buf)
688 return -ENOMEM;
689
690 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
691 VPD_PAGE | SCSI_VPD_DEVICE_ID,
692 buf, 64);
693 if (rc == 0) {
694 if (buflen > 16)
695 buflen = 16;
696 memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
697 }
698
699 kfree(buf);
700
701 return rc;
702 }
703
pqi_identify_physical_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * buffer,size_t buffer_length)704 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
705 struct pqi_scsi_dev *device,
706 struct bmic_identify_physical_device *buffer,
707 size_t buffer_length)
708 {
709 int rc;
710 enum dma_data_direction dir;
711 u16 bmic_device_index;
712 struct pqi_raid_path_request request;
713
714 rc = pqi_build_raid_path_request(ctrl_info, &request,
715 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
716 buffer_length, 0, &dir);
717 if (rc)
718 return rc;
719
720 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
721 request.cdb[2] = (u8)bmic_device_index;
722 request.cdb[9] = (u8)(bmic_device_index >> 8);
723
724 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
725 0, NULL, NO_TIMEOUT);
726
727 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
728 return rc;
729 }
730
pqi_flush_cache(struct pqi_ctrl_info * ctrl_info,enum bmic_flush_cache_shutdown_event shutdown_event)731 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
732 enum bmic_flush_cache_shutdown_event shutdown_event)
733 {
734 int rc;
735 struct bmic_flush_cache *flush_cache;
736
737 /*
738 * Don't bother trying to flush the cache if the controller is
739 * locked up.
740 */
741 if (pqi_ctrl_offline(ctrl_info))
742 return -ENXIO;
743
744 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
745 if (!flush_cache)
746 return -ENOMEM;
747
748 flush_cache->shutdown_event = shutdown_event;
749
750 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
751 sizeof(*flush_cache));
752
753 kfree(flush_cache);
754
755 return rc;
756 }
757
pqi_csmi_smp_passthru(struct pqi_ctrl_info * ctrl_info,struct bmic_csmi_smp_passthru_buffer * buffer,size_t buffer_length,struct pqi_raid_error_info * error_info)758 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
759 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
760 struct pqi_raid_error_info *error_info)
761 {
762 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
763 buffer, buffer_length, error_info);
764 }
765
766 #define PQI_FETCH_PTRAID_DATA (1UL<<31)
767
pqi_set_diag_rescan(struct pqi_ctrl_info * ctrl_info)768 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
769 {
770 int rc;
771 struct bmic_diag_options *diag;
772
773 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
774 if (!diag)
775 return -ENOMEM;
776
777 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
778 diag, sizeof(*diag));
779 if (rc)
780 goto out;
781
782 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
783
784 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
785 diag, sizeof(*diag));
786 out:
787 kfree(diag);
788
789 return rc;
790 }
791
pqi_write_host_wellness(struct pqi_ctrl_info * ctrl_info,void * buffer,size_t buffer_length)792 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
793 void *buffer, size_t buffer_length)
794 {
795 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
796 buffer, buffer_length);
797 }
798
799 #pragma pack(1)
800
801 struct bmic_host_wellness_driver_version {
802 u8 start_tag[4];
803 u8 driver_version_tag[2];
804 __le16 driver_version_length;
805 char driver_version[32];
806 u8 dont_write_tag[2];
807 u8 end_tag[2];
808 };
809
810 #pragma pack()
811
pqi_write_driver_version_to_host_wellness(struct pqi_ctrl_info * ctrl_info)812 static int pqi_write_driver_version_to_host_wellness(
813 struct pqi_ctrl_info *ctrl_info)
814 {
815 int rc;
816 struct bmic_host_wellness_driver_version *buffer;
817 size_t buffer_length;
818
819 buffer_length = sizeof(*buffer);
820
821 buffer = kmalloc(buffer_length, GFP_KERNEL);
822 if (!buffer)
823 return -ENOMEM;
824
825 buffer->start_tag[0] = '<';
826 buffer->start_tag[1] = 'H';
827 buffer->start_tag[2] = 'W';
828 buffer->start_tag[3] = '>';
829 buffer->driver_version_tag[0] = 'D';
830 buffer->driver_version_tag[1] = 'V';
831 put_unaligned_le16(sizeof(buffer->driver_version),
832 &buffer->driver_version_length);
833 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
834 sizeof(buffer->driver_version) - 1);
835 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
836 buffer->dont_write_tag[0] = 'D';
837 buffer->dont_write_tag[1] = 'W';
838 buffer->end_tag[0] = 'Z';
839 buffer->end_tag[1] = 'Z';
840
841 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
842
843 kfree(buffer);
844
845 return rc;
846 }
847
848 #pragma pack(1)
849
850 struct bmic_host_wellness_time {
851 u8 start_tag[4];
852 u8 time_tag[2];
853 __le16 time_length;
854 u8 time[8];
855 u8 dont_write_tag[2];
856 u8 end_tag[2];
857 };
858
859 #pragma pack()
860
pqi_write_current_time_to_host_wellness(struct pqi_ctrl_info * ctrl_info)861 static int pqi_write_current_time_to_host_wellness(
862 struct pqi_ctrl_info *ctrl_info)
863 {
864 int rc;
865 struct bmic_host_wellness_time *buffer;
866 size_t buffer_length;
867 time64_t local_time;
868 unsigned int year;
869 struct tm tm;
870
871 buffer_length = sizeof(*buffer);
872
873 buffer = kmalloc(buffer_length, GFP_KERNEL);
874 if (!buffer)
875 return -ENOMEM;
876
877 buffer->start_tag[0] = '<';
878 buffer->start_tag[1] = 'H';
879 buffer->start_tag[2] = 'W';
880 buffer->start_tag[3] = '>';
881 buffer->time_tag[0] = 'T';
882 buffer->time_tag[1] = 'D';
883 put_unaligned_le16(sizeof(buffer->time),
884 &buffer->time_length);
885
886 local_time = ktime_get_real_seconds();
887 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
888 year = tm.tm_year + 1900;
889
890 buffer->time[0] = bin2bcd(tm.tm_hour);
891 buffer->time[1] = bin2bcd(tm.tm_min);
892 buffer->time[2] = bin2bcd(tm.tm_sec);
893 buffer->time[3] = 0;
894 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
895 buffer->time[5] = bin2bcd(tm.tm_mday);
896 buffer->time[6] = bin2bcd(year / 100);
897 buffer->time[7] = bin2bcd(year % 100);
898
899 buffer->dont_write_tag[0] = 'D';
900 buffer->dont_write_tag[1] = 'W';
901 buffer->end_tag[0] = 'Z';
902 buffer->end_tag[1] = 'Z';
903
904 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
905
906 kfree(buffer);
907
908 return rc;
909 }
910
911 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
912
pqi_update_time_worker(struct work_struct * work)913 static void pqi_update_time_worker(struct work_struct *work)
914 {
915 int rc;
916 struct pqi_ctrl_info *ctrl_info;
917
918 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
919 update_time_work);
920
921 if (pqi_ctrl_offline(ctrl_info))
922 return;
923
924 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
925 if (rc)
926 dev_warn(&ctrl_info->pci_dev->dev,
927 "error updating time on controller\n");
928
929 schedule_delayed_work(&ctrl_info->update_time_work,
930 PQI_UPDATE_TIME_WORK_INTERVAL);
931 }
932
pqi_schedule_update_time_worker(struct pqi_ctrl_info * ctrl_info)933 static inline void pqi_schedule_update_time_worker(
934 struct pqi_ctrl_info *ctrl_info)
935 {
936 schedule_delayed_work(&ctrl_info->update_time_work, 0);
937 }
938
pqi_cancel_update_time_worker(struct pqi_ctrl_info * ctrl_info)939 static inline void pqi_cancel_update_time_worker(
940 struct pqi_ctrl_info *ctrl_info)
941 {
942 cancel_delayed_work_sync(&ctrl_info->update_time_work);
943 }
944
pqi_report_luns(struct pqi_ctrl_info * ctrl_info,u8 cmd,void * buffer,size_t buffer_length)945 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
946 void *buffer, size_t buffer_length)
947 {
948 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
949 buffer_length);
950 }
951
pqi_report_phys_logical_luns(struct pqi_ctrl_info * ctrl_info,u8 cmd,void ** buffer)952 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
953 void **buffer)
954 {
955 int rc;
956 size_t lun_list_length;
957 size_t lun_data_length;
958 size_t new_lun_list_length;
959 void *lun_data = NULL;
960 struct report_lun_header *report_lun_header;
961
962 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
963 if (!report_lun_header) {
964 rc = -ENOMEM;
965 goto out;
966 }
967
968 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
969 sizeof(*report_lun_header));
970 if (rc)
971 goto out;
972
973 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
974
975 again:
976 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
977
978 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
979 if (!lun_data) {
980 rc = -ENOMEM;
981 goto out;
982 }
983
984 if (lun_list_length == 0) {
985 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
986 goto out;
987 }
988
989 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
990 if (rc)
991 goto out;
992
993 new_lun_list_length = get_unaligned_be32(
994 &((struct report_lun_header *)lun_data)->list_length);
995
996 if (new_lun_list_length > lun_list_length) {
997 lun_list_length = new_lun_list_length;
998 kfree(lun_data);
999 goto again;
1000 }
1001
1002 out:
1003 kfree(report_lun_header);
1004
1005 if (rc) {
1006 kfree(lun_data);
1007 lun_data = NULL;
1008 }
1009
1010 *buffer = lun_data;
1011
1012 return rc;
1013 }
1014
pqi_report_phys_luns(struct pqi_ctrl_info * ctrl_info,void ** buffer)1015 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
1016 void **buffer)
1017 {
1018 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
1019 buffer);
1020 }
1021
pqi_report_logical_luns(struct pqi_ctrl_info * ctrl_info,void ** buffer)1022 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
1023 void **buffer)
1024 {
1025 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1026 }
1027
pqi_get_device_lists(struct pqi_ctrl_info * ctrl_info,struct report_phys_lun_extended ** physdev_list,struct report_log_lun_extended ** logdev_list)1028 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1029 struct report_phys_lun_extended **physdev_list,
1030 struct report_log_lun_extended **logdev_list)
1031 {
1032 int rc;
1033 size_t logdev_list_length;
1034 size_t logdev_data_length;
1035 struct report_log_lun_extended *internal_logdev_list;
1036 struct report_log_lun_extended *logdev_data;
1037 struct report_lun_header report_lun_header;
1038
1039 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1040 if (rc)
1041 dev_err(&ctrl_info->pci_dev->dev,
1042 "report physical LUNs failed\n");
1043
1044 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1045 if (rc)
1046 dev_err(&ctrl_info->pci_dev->dev,
1047 "report logical LUNs failed\n");
1048
1049 /*
1050 * Tack the controller itself onto the end of the logical device list.
1051 */
1052
1053 logdev_data = *logdev_list;
1054
1055 if (logdev_data) {
1056 logdev_list_length =
1057 get_unaligned_be32(&logdev_data->header.list_length);
1058 } else {
1059 memset(&report_lun_header, 0, sizeof(report_lun_header));
1060 logdev_data =
1061 (struct report_log_lun_extended *)&report_lun_header;
1062 logdev_list_length = 0;
1063 }
1064
1065 logdev_data_length = sizeof(struct report_lun_header) +
1066 logdev_list_length;
1067
1068 internal_logdev_list = kmalloc(logdev_data_length +
1069 sizeof(struct report_log_lun_extended), GFP_KERNEL);
1070 if (!internal_logdev_list) {
1071 kfree(*logdev_list);
1072 *logdev_list = NULL;
1073 return -ENOMEM;
1074 }
1075
1076 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1077 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1078 sizeof(struct report_log_lun_extended_entry));
1079 put_unaligned_be32(logdev_list_length +
1080 sizeof(struct report_log_lun_extended_entry),
1081 &internal_logdev_list->header.list_length);
1082
1083 kfree(*logdev_list);
1084 *logdev_list = internal_logdev_list;
1085
1086 return 0;
1087 }
1088
pqi_set_bus_target_lun(struct pqi_scsi_dev * device,int bus,int target,int lun)1089 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1090 int bus, int target, int lun)
1091 {
1092 device->bus = bus;
1093 device->target = target;
1094 device->lun = lun;
1095 }
1096
pqi_assign_bus_target_lun(struct pqi_scsi_dev * device)1097 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1098 {
1099 u8 *scsi3addr;
1100 u32 lunid;
1101 int bus;
1102 int target;
1103 int lun;
1104
1105 scsi3addr = device->scsi3addr;
1106 lunid = get_unaligned_le32(scsi3addr);
1107
1108 if (pqi_is_hba_lunid(scsi3addr)) {
1109 /* The specified device is the controller. */
1110 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1111 device->target_lun_valid = true;
1112 return;
1113 }
1114
1115 if (pqi_is_logical_device(device)) {
1116 if (device->is_external_raid_device) {
1117 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1118 target = (lunid >> 16) & 0x3fff;
1119 lun = lunid & 0xff;
1120 } else {
1121 bus = PQI_RAID_VOLUME_BUS;
1122 target = 0;
1123 lun = lunid & 0x3fff;
1124 }
1125 pqi_set_bus_target_lun(device, bus, target, lun);
1126 device->target_lun_valid = true;
1127 return;
1128 }
1129
1130 /*
1131 * Defer target and LUN assignment for non-controller physical devices
1132 * because the SAS transport layer will make these assignments later.
1133 */
1134 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1135 }
1136
pqi_get_raid_level(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1137 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1138 struct pqi_scsi_dev *device)
1139 {
1140 int rc;
1141 u8 raid_level;
1142 u8 *buffer;
1143
1144 raid_level = SA_RAID_UNKNOWN;
1145
1146 buffer = kmalloc(64, GFP_KERNEL);
1147 if (buffer) {
1148 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1149 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1150 if (rc == 0) {
1151 raid_level = buffer[8];
1152 if (raid_level > SA_RAID_MAX)
1153 raid_level = SA_RAID_UNKNOWN;
1154 }
1155 kfree(buffer);
1156 }
1157
1158 device->raid_level = raid_level;
1159 }
1160
pqi_validate_raid_map(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct raid_map * raid_map)1161 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1162 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1163 {
1164 char *err_msg;
1165 u32 raid_map_size;
1166 u32 r5or6_blocks_per_row;
1167
1168 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1169
1170 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1171 err_msg = "RAID map too small";
1172 goto bad_raid_map;
1173 }
1174
1175 if (device->raid_level == SA_RAID_1) {
1176 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1177 err_msg = "invalid RAID-1 map";
1178 goto bad_raid_map;
1179 }
1180 } else if (device->raid_level == SA_RAID_ADM) {
1181 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1182 err_msg = "invalid RAID-1(ADM) map";
1183 goto bad_raid_map;
1184 }
1185 } else if ((device->raid_level == SA_RAID_5 ||
1186 device->raid_level == SA_RAID_6) &&
1187 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1188 /* RAID 50/60 */
1189 r5or6_blocks_per_row =
1190 get_unaligned_le16(&raid_map->strip_size) *
1191 get_unaligned_le16(&raid_map->data_disks_per_row);
1192 if (r5or6_blocks_per_row == 0) {
1193 err_msg = "invalid RAID-5 or RAID-6 map";
1194 goto bad_raid_map;
1195 }
1196 }
1197
1198 return 0;
1199
1200 bad_raid_map:
1201 dev_warn(&ctrl_info->pci_dev->dev,
1202 "logical device %08x%08x %s\n",
1203 *((u32 *)&device->scsi3addr),
1204 *((u32 *)&device->scsi3addr[4]), err_msg);
1205
1206 return -EINVAL;
1207 }
1208
pqi_get_raid_map(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1209 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1210 struct pqi_scsi_dev *device)
1211 {
1212 int rc;
1213 u32 raid_map_size;
1214 struct raid_map *raid_map;
1215
1216 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1217 if (!raid_map)
1218 return -ENOMEM;
1219
1220 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1221 device->scsi3addr, raid_map, sizeof(*raid_map),
1222 0, NULL, NO_TIMEOUT);
1223
1224 if (rc)
1225 goto error;
1226
1227 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1228
1229 if (raid_map_size > sizeof(*raid_map)) {
1230
1231 kfree(raid_map);
1232
1233 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1234 if (!raid_map)
1235 return -ENOMEM;
1236
1237 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1238 device->scsi3addr, raid_map, raid_map_size,
1239 0, NULL, NO_TIMEOUT);
1240 if (rc)
1241 goto error;
1242
1243 if (get_unaligned_le32(&raid_map->structure_size)
1244 != raid_map_size) {
1245 dev_warn(&ctrl_info->pci_dev->dev,
1246 "Requested %d bytes, received %d bytes",
1247 raid_map_size,
1248 get_unaligned_le32(&raid_map->structure_size));
1249 goto error;
1250 }
1251 }
1252
1253 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1254 if (rc)
1255 goto error;
1256
1257 device->raid_map = raid_map;
1258
1259 return 0;
1260
1261 error:
1262 kfree(raid_map);
1263
1264 return rc;
1265 }
1266
pqi_get_raid_bypass_status(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1267 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1268 struct pqi_scsi_dev *device)
1269 {
1270 int rc;
1271 u8 *buffer;
1272 u8 bypass_status;
1273
1274 buffer = kmalloc(64, GFP_KERNEL);
1275 if (!buffer)
1276 return;
1277
1278 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1279 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1280 if (rc)
1281 goto out;
1282
1283 #define RAID_BYPASS_STATUS 4
1284 #define RAID_BYPASS_CONFIGURED 0x1
1285 #define RAID_BYPASS_ENABLED 0x2
1286
1287 bypass_status = buffer[RAID_BYPASS_STATUS];
1288 device->raid_bypass_configured =
1289 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1290 if (device->raid_bypass_configured &&
1291 (bypass_status & RAID_BYPASS_ENABLED) &&
1292 pqi_get_raid_map(ctrl_info, device) == 0)
1293 device->raid_bypass_enabled = true;
1294
1295 out:
1296 kfree(buffer);
1297 }
1298
1299 /*
1300 * Use vendor-specific VPD to determine online/offline status of a volume.
1301 */
1302
pqi_get_volume_status(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1303 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1304 struct pqi_scsi_dev *device)
1305 {
1306 int rc;
1307 size_t page_length;
1308 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1309 bool volume_offline = true;
1310 u32 volume_flags;
1311 struct ciss_vpd_logical_volume_status *vpd;
1312
1313 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1314 if (!vpd)
1315 goto no_buffer;
1316
1317 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1318 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1319 if (rc)
1320 goto out;
1321
1322 if (vpd->page_code != CISS_VPD_LV_STATUS)
1323 goto out;
1324
1325 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1326 volume_status) + vpd->page_length;
1327 if (page_length < sizeof(*vpd))
1328 goto out;
1329
1330 volume_status = vpd->volume_status;
1331 volume_flags = get_unaligned_be32(&vpd->flags);
1332 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1333
1334 out:
1335 kfree(vpd);
1336 no_buffer:
1337 device->volume_status = volume_status;
1338 device->volume_offline = volume_offline;
1339 }
1340
1341 #define PQI_INQUIRY_PAGE0_RETRIES 3
1342
pqi_get_device_info(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1343 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1344 struct pqi_scsi_dev *device)
1345 {
1346 int rc;
1347 u8 *buffer;
1348 unsigned int retries;
1349
1350 if (device->is_expander_smp_device)
1351 return 0;
1352
1353 buffer = kmalloc(64, GFP_KERNEL);
1354 if (!buffer)
1355 return -ENOMEM;
1356
1357 /* Send an inquiry to the device to see what it is. */
1358 for (retries = 0;;) {
1359 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
1360 buffer, 64);
1361 if (rc == 0)
1362 break;
1363 if (pqi_is_logical_device(device) ||
1364 rc != PQI_CMD_STATUS_ABORTED ||
1365 ++retries > PQI_INQUIRY_PAGE0_RETRIES)
1366 goto out;
1367 }
1368
1369 scsi_sanitize_inquiry_string(&buffer[8], 8);
1370 scsi_sanitize_inquiry_string(&buffer[16], 16);
1371
1372 device->devtype = buffer[0] & 0x1f;
1373 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1374 memcpy(device->model, &buffer[16], sizeof(device->model));
1375
1376 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1377 if (device->is_external_raid_device) {
1378 device->raid_level = SA_RAID_UNKNOWN;
1379 device->volume_status = CISS_LV_OK;
1380 device->volume_offline = false;
1381 } else {
1382 pqi_get_raid_level(ctrl_info, device);
1383 pqi_get_raid_bypass_status(ctrl_info, device);
1384 pqi_get_volume_status(ctrl_info, device);
1385 }
1386 }
1387
1388 if (pqi_get_device_id(ctrl_info, device->scsi3addr,
1389 device->unique_id, sizeof(device->unique_id)) < 0)
1390 dev_warn(&ctrl_info->pci_dev->dev,
1391 "Can't get device id for scsi %d:%d:%d:%d\n",
1392 ctrl_info->scsi_host->host_no,
1393 device->bus, device->target,
1394 device->lun);
1395
1396 out:
1397 kfree(buffer);
1398
1399 return rc;
1400 }
1401
pqi_get_physical_disk_info(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * id_phys)1402 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1403 struct pqi_scsi_dev *device,
1404 struct bmic_identify_physical_device *id_phys)
1405 {
1406 int rc;
1407
1408 memset(id_phys, 0, sizeof(*id_phys));
1409
1410 rc = pqi_identify_physical_device(ctrl_info, device,
1411 id_phys, sizeof(*id_phys));
1412 if (rc) {
1413 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1414 return;
1415 }
1416 device->box_index = id_phys->box_index;
1417 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1418 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1419 device->queue_depth =
1420 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1421 device->device_type = id_phys->device_type;
1422 device->active_path_index = id_phys->active_path_number;
1423 device->path_map = id_phys->redundant_path_present_map;
1424 memcpy(&device->box,
1425 &id_phys->alternate_paths_phys_box_on_port,
1426 sizeof(device->box));
1427 memcpy(&device->phys_connector,
1428 &id_phys->alternate_paths_phys_connector,
1429 sizeof(device->phys_connector));
1430 device->bay = id_phys->phys_bay_in_box;
1431 }
1432
pqi_show_volume_status(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1433 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1434 struct pqi_scsi_dev *device)
1435 {
1436 char *status;
1437 static const char unknown_state_str[] =
1438 "Volume is in an unknown state (%u)";
1439 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1440
1441 switch (device->volume_status) {
1442 case CISS_LV_OK:
1443 status = "Volume online";
1444 break;
1445 case CISS_LV_FAILED:
1446 status = "Volume failed";
1447 break;
1448 case CISS_LV_NOT_CONFIGURED:
1449 status = "Volume not configured";
1450 break;
1451 case CISS_LV_DEGRADED:
1452 status = "Volume degraded";
1453 break;
1454 case CISS_LV_READY_FOR_RECOVERY:
1455 status = "Volume ready for recovery operation";
1456 break;
1457 case CISS_LV_UNDERGOING_RECOVERY:
1458 status = "Volume undergoing recovery";
1459 break;
1460 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1461 status = "Wrong physical drive was replaced";
1462 break;
1463 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1464 status = "A physical drive not properly connected";
1465 break;
1466 case CISS_LV_HARDWARE_OVERHEATING:
1467 status = "Hardware is overheating";
1468 break;
1469 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1470 status = "Hardware has overheated";
1471 break;
1472 case CISS_LV_UNDERGOING_EXPANSION:
1473 status = "Volume undergoing expansion";
1474 break;
1475 case CISS_LV_NOT_AVAILABLE:
1476 status = "Volume waiting for transforming volume";
1477 break;
1478 case CISS_LV_QUEUED_FOR_EXPANSION:
1479 status = "Volume queued for expansion";
1480 break;
1481 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1482 status = "Volume disabled due to SCSI ID conflict";
1483 break;
1484 case CISS_LV_EJECTED:
1485 status = "Volume has been ejected";
1486 break;
1487 case CISS_LV_UNDERGOING_ERASE:
1488 status = "Volume undergoing background erase";
1489 break;
1490 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1491 status = "Volume ready for predictive spare rebuild";
1492 break;
1493 case CISS_LV_UNDERGOING_RPI:
1494 status = "Volume undergoing rapid parity initialization";
1495 break;
1496 case CISS_LV_PENDING_RPI:
1497 status = "Volume queued for rapid parity initialization";
1498 break;
1499 case CISS_LV_ENCRYPTED_NO_KEY:
1500 status = "Encrypted volume inaccessible - key not present";
1501 break;
1502 case CISS_LV_UNDERGOING_ENCRYPTION:
1503 status = "Volume undergoing encryption process";
1504 break;
1505 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1506 status = "Volume undergoing encryption re-keying process";
1507 break;
1508 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1509 status = "Volume encrypted but encryption is disabled";
1510 break;
1511 case CISS_LV_PENDING_ENCRYPTION:
1512 status = "Volume pending migration to encrypted state";
1513 break;
1514 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1515 status = "Volume pending encryption rekeying";
1516 break;
1517 case CISS_LV_NOT_SUPPORTED:
1518 status = "Volume not supported on this controller";
1519 break;
1520 case CISS_LV_STATUS_UNAVAILABLE:
1521 status = "Volume status not available";
1522 break;
1523 default:
1524 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1525 unknown_state_str, device->volume_status);
1526 status = unknown_state_buffer;
1527 break;
1528 }
1529
1530 dev_info(&ctrl_info->pci_dev->dev,
1531 "scsi %d:%d:%d:%d %s\n",
1532 ctrl_info->scsi_host->host_no,
1533 device->bus, device->target, device->lun, status);
1534 }
1535
pqi_rescan_worker(struct work_struct * work)1536 static void pqi_rescan_worker(struct work_struct *work)
1537 {
1538 struct pqi_ctrl_info *ctrl_info;
1539
1540 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1541 rescan_work);
1542
1543 pqi_scan_scsi_devices(ctrl_info);
1544 }
1545
pqi_add_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1546 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1547 struct pqi_scsi_dev *device)
1548 {
1549 int rc;
1550
1551 if (pqi_is_logical_device(device))
1552 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1553 device->target, device->lun);
1554 else
1555 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1556
1557 return rc;
1558 }
1559
1560 #define PQI_PENDING_IO_TIMEOUT_SECS 20
1561
pqi_remove_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1562 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1563 struct pqi_scsi_dev *device)
1564 {
1565 int rc;
1566
1567 pqi_device_remove_start(device);
1568
1569 rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1570 PQI_PENDING_IO_TIMEOUT_SECS);
1571 if (rc)
1572 dev_err(&ctrl_info->pci_dev->dev,
1573 "scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
1574 ctrl_info->scsi_host->host_no, device->bus,
1575 device->target, device->lun,
1576 atomic_read(&device->scsi_cmds_outstanding));
1577
1578 if (pqi_is_logical_device(device))
1579 scsi_remove_device(device->sdev);
1580 else
1581 pqi_remove_sas_device(device);
1582 }
1583
1584 /* Assumes the SCSI device list lock is held. */
1585
pqi_find_scsi_dev(struct pqi_ctrl_info * ctrl_info,int bus,int target,int lun)1586 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1587 int bus, int target, int lun)
1588 {
1589 struct pqi_scsi_dev *device;
1590
1591 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1592 scsi_device_list_entry)
1593 if (device->bus == bus && device->target == target &&
1594 device->lun == lun)
1595 return device;
1596
1597 return NULL;
1598 }
1599
pqi_device_equal(struct pqi_scsi_dev * dev1,struct pqi_scsi_dev * dev2)1600 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1601 struct pqi_scsi_dev *dev2)
1602 {
1603 if (dev1->is_physical_device != dev2->is_physical_device)
1604 return false;
1605
1606 if (dev1->is_physical_device)
1607 return dev1->wwid == dev2->wwid;
1608
1609 return memcmp(dev1->volume_id, dev2->volume_id,
1610 sizeof(dev1->volume_id)) == 0;
1611 }
1612
1613 enum pqi_find_result {
1614 DEVICE_NOT_FOUND,
1615 DEVICE_CHANGED,
1616 DEVICE_SAME,
1617 };
1618
pqi_scsi_find_entry(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device_to_find,struct pqi_scsi_dev ** matching_device)1619 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1620 struct pqi_scsi_dev *device_to_find,
1621 struct pqi_scsi_dev **matching_device)
1622 {
1623 struct pqi_scsi_dev *device;
1624
1625 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1626 scsi_device_list_entry) {
1627 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1628 device->scsi3addr)) {
1629 *matching_device = device;
1630 if (pqi_device_equal(device_to_find, device)) {
1631 if (device_to_find->volume_offline)
1632 return DEVICE_CHANGED;
1633 return DEVICE_SAME;
1634 }
1635 return DEVICE_CHANGED;
1636 }
1637 }
1638
1639 return DEVICE_NOT_FOUND;
1640 }
1641
pqi_device_type(struct pqi_scsi_dev * device)1642 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1643 {
1644 if (device->is_expander_smp_device)
1645 return "Enclosure SMP ";
1646
1647 return scsi_device_type(device->devtype);
1648 }
1649
1650 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1651
pqi_dev_info(struct pqi_ctrl_info * ctrl_info,char * action,struct pqi_scsi_dev * device)1652 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1653 char *action, struct pqi_scsi_dev *device)
1654 {
1655 ssize_t count;
1656 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1657
1658 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1659 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1660
1661 if (device->target_lun_valid)
1662 count += snprintf(buffer + count,
1663 PQI_DEV_INFO_BUFFER_LENGTH - count,
1664 "%d:%d",
1665 device->target,
1666 device->lun);
1667 else
1668 count += snprintf(buffer + count,
1669 PQI_DEV_INFO_BUFFER_LENGTH - count,
1670 "-:-");
1671
1672 if (pqi_is_logical_device(device))
1673 count += snprintf(buffer + count,
1674 PQI_DEV_INFO_BUFFER_LENGTH - count,
1675 " %08x%08x",
1676 *((u32 *)&device->scsi3addr),
1677 *((u32 *)&device->scsi3addr[4]));
1678 else
1679 count += snprintf(buffer + count,
1680 PQI_DEV_INFO_BUFFER_LENGTH - count,
1681 " %016llx", device->sas_address);
1682
1683 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1684 " %s %.8s %.16s ",
1685 pqi_device_type(device),
1686 device->vendor,
1687 device->model);
1688
1689 if (pqi_is_logical_device(device)) {
1690 if (device->devtype == TYPE_DISK)
1691 count += snprintf(buffer + count,
1692 PQI_DEV_INFO_BUFFER_LENGTH - count,
1693 "SSDSmartPathCap%c En%c %-12s",
1694 device->raid_bypass_configured ? '+' : '-',
1695 device->raid_bypass_enabled ? '+' : '-',
1696 pqi_raid_level_to_string(device->raid_level));
1697 } else {
1698 count += snprintf(buffer + count,
1699 PQI_DEV_INFO_BUFFER_LENGTH - count,
1700 "AIO%c", device->aio_enabled ? '+' : '-');
1701 if (device->devtype == TYPE_DISK ||
1702 device->devtype == TYPE_ZBC)
1703 count += snprintf(buffer + count,
1704 PQI_DEV_INFO_BUFFER_LENGTH - count,
1705 " qd=%-6d", device->queue_depth);
1706 }
1707
1708 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
1709 }
1710
1711 /* Assumes the SCSI device list lock is held. */
1712
pqi_scsi_update_device(struct pqi_scsi_dev * existing_device,struct pqi_scsi_dev * new_device)1713 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1714 struct pqi_scsi_dev *new_device)
1715 {
1716 existing_device->devtype = new_device->devtype;
1717 existing_device->device_type = new_device->device_type;
1718 existing_device->bus = new_device->bus;
1719 if (new_device->target_lun_valid) {
1720 existing_device->target = new_device->target;
1721 existing_device->lun = new_device->lun;
1722 existing_device->target_lun_valid = true;
1723 }
1724
1725 /* By definition, the scsi3addr and wwid fields are already the same. */
1726
1727 existing_device->is_physical_device = new_device->is_physical_device;
1728 existing_device->is_external_raid_device =
1729 new_device->is_external_raid_device;
1730 existing_device->is_expander_smp_device =
1731 new_device->is_expander_smp_device;
1732 existing_device->aio_enabled = new_device->aio_enabled;
1733 memcpy(existing_device->vendor, new_device->vendor,
1734 sizeof(existing_device->vendor));
1735 memcpy(existing_device->model, new_device->model,
1736 sizeof(existing_device->model));
1737 existing_device->sas_address = new_device->sas_address;
1738 existing_device->raid_level = new_device->raid_level;
1739 existing_device->queue_depth = new_device->queue_depth;
1740 existing_device->aio_handle = new_device->aio_handle;
1741 existing_device->volume_status = new_device->volume_status;
1742 existing_device->active_path_index = new_device->active_path_index;
1743 existing_device->path_map = new_device->path_map;
1744 existing_device->bay = new_device->bay;
1745 existing_device->box_index = new_device->box_index;
1746 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
1747 existing_device->phy_connected_dev_type =
1748 new_device->phy_connected_dev_type;
1749 memcpy(existing_device->box, new_device->box,
1750 sizeof(existing_device->box));
1751 memcpy(existing_device->phys_connector, new_device->phys_connector,
1752 sizeof(existing_device->phys_connector));
1753 existing_device->offload_to_mirror = 0;
1754 kfree(existing_device->raid_map);
1755 existing_device->raid_map = new_device->raid_map;
1756 existing_device->raid_bypass_configured =
1757 new_device->raid_bypass_configured;
1758 existing_device->raid_bypass_enabled =
1759 new_device->raid_bypass_enabled;
1760 existing_device->device_offline = false;
1761
1762 /* To prevent this from being freed later. */
1763 new_device->raid_map = NULL;
1764 }
1765
pqi_free_device(struct pqi_scsi_dev * device)1766 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1767 {
1768 if (device) {
1769 kfree(device->raid_map);
1770 kfree(device);
1771 }
1772 }
1773
1774 /*
1775 * Called when exposing a new device to the OS fails in order to re-adjust
1776 * our internal SCSI device list to match the SCSI ML's view.
1777 */
1778
pqi_fixup_botched_add(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1779 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1780 struct pqi_scsi_dev *device)
1781 {
1782 unsigned long flags;
1783
1784 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1785 list_del(&device->scsi_device_list_entry);
1786 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1787
1788 /* Allow the device structure to be freed later. */
1789 device->keep_device = false;
1790 }
1791
pqi_is_device_added(struct pqi_scsi_dev * device)1792 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1793 {
1794 if (device->is_expander_smp_device)
1795 return device->sas_port != NULL;
1796
1797 return device->sdev != NULL;
1798 }
1799
pqi_update_device_list(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * new_device_list[],unsigned int num_new_devices)1800 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1801 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1802 {
1803 int rc;
1804 unsigned int i;
1805 unsigned long flags;
1806 enum pqi_find_result find_result;
1807 struct pqi_scsi_dev *device;
1808 struct pqi_scsi_dev *next;
1809 struct pqi_scsi_dev *matching_device;
1810 LIST_HEAD(add_list);
1811 LIST_HEAD(delete_list);
1812
1813 /*
1814 * The idea here is to do as little work as possible while holding the
1815 * spinlock. That's why we go to great pains to defer anything other
1816 * than updating the internal device list until after we release the
1817 * spinlock.
1818 */
1819
1820 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1821
1822 /* Assume that all devices in the existing list have gone away. */
1823 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1824 scsi_device_list_entry)
1825 device->device_gone = true;
1826
1827 for (i = 0; i < num_new_devices; i++) {
1828 device = new_device_list[i];
1829
1830 find_result = pqi_scsi_find_entry(ctrl_info, device,
1831 &matching_device);
1832
1833 switch (find_result) {
1834 case DEVICE_SAME:
1835 /*
1836 * The newly found device is already in the existing
1837 * device list.
1838 */
1839 device->new_device = false;
1840 matching_device->device_gone = false;
1841 pqi_scsi_update_device(matching_device, device);
1842 break;
1843 case DEVICE_NOT_FOUND:
1844 /*
1845 * The newly found device is NOT in the existing device
1846 * list.
1847 */
1848 device->new_device = true;
1849 break;
1850 case DEVICE_CHANGED:
1851 /*
1852 * The original device has gone away and we need to add
1853 * the new device.
1854 */
1855 device->new_device = true;
1856 break;
1857 }
1858 }
1859
1860 /* Process all devices that have gone away. */
1861 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1862 scsi_device_list_entry) {
1863 if (device->device_gone) {
1864 list_del(&device->scsi_device_list_entry);
1865 list_add_tail(&device->delete_list_entry, &delete_list);
1866 }
1867 }
1868
1869 /* Process all new devices. */
1870 for (i = 0; i < num_new_devices; i++) {
1871 device = new_device_list[i];
1872 if (!device->new_device)
1873 continue;
1874 if (device->volume_offline)
1875 continue;
1876 list_add_tail(&device->scsi_device_list_entry,
1877 &ctrl_info->scsi_device_list);
1878 list_add_tail(&device->add_list_entry, &add_list);
1879 /* To prevent this device structure from being freed later. */
1880 device->keep_device = true;
1881 }
1882
1883 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1884
1885 if (pqi_ctrl_in_ofa(ctrl_info))
1886 pqi_ctrl_ofa_done(ctrl_info);
1887
1888 /* Remove all devices that have gone away. */
1889 list_for_each_entry_safe(device, next, &delete_list,
1890 delete_list_entry) {
1891 if (device->volume_offline) {
1892 pqi_dev_info(ctrl_info, "offline", device);
1893 pqi_show_volume_status(ctrl_info, device);
1894 } else {
1895 pqi_dev_info(ctrl_info, "removed", device);
1896 }
1897 if (pqi_is_device_added(device))
1898 pqi_remove_device(ctrl_info, device);
1899 list_del(&device->delete_list_entry);
1900 pqi_free_device(device);
1901 }
1902
1903 /*
1904 * Notify the SCSI ML if the queue depth of any existing device has
1905 * changed.
1906 */
1907 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1908 scsi_device_list_entry) {
1909 if (device->sdev && device->queue_depth !=
1910 device->advertised_queue_depth) {
1911 device->advertised_queue_depth = device->queue_depth;
1912 scsi_change_queue_depth(device->sdev,
1913 device->advertised_queue_depth);
1914 }
1915 }
1916
1917 /* Expose any new devices. */
1918 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1919 if (!pqi_is_device_added(device)) {
1920 pqi_dev_info(ctrl_info, "added", device);
1921 rc = pqi_add_device(ctrl_info, device);
1922 if (rc) {
1923 dev_warn(&ctrl_info->pci_dev->dev,
1924 "scsi %d:%d:%d:%d addition failed, device not added\n",
1925 ctrl_info->scsi_host->host_no,
1926 device->bus, device->target,
1927 device->lun);
1928 pqi_fixup_botched_add(ctrl_info, device);
1929 }
1930 }
1931 }
1932 }
1933
pqi_is_supported_device(struct pqi_scsi_dev * device)1934 static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1935 {
1936 bool is_supported;
1937
1938 if (device->is_expander_smp_device)
1939 return true;
1940
1941 is_supported = false;
1942
1943 switch (device->devtype) {
1944 case TYPE_DISK:
1945 case TYPE_ZBC:
1946 case TYPE_TAPE:
1947 case TYPE_MEDIUM_CHANGER:
1948 case TYPE_ENCLOSURE:
1949 is_supported = true;
1950 break;
1951 case TYPE_RAID:
1952 /*
1953 * Only support the HBA controller itself as a RAID
1954 * controller. If it's a RAID controller other than
1955 * the HBA itself (an external RAID controller, for
1956 * example), we don't support it.
1957 */
1958 if (pqi_is_hba_lunid(device->scsi3addr))
1959 is_supported = true;
1960 break;
1961 }
1962
1963 return is_supported;
1964 }
1965
pqi_skip_device(u8 * scsi3addr)1966 static inline bool pqi_skip_device(u8 *scsi3addr)
1967 {
1968 /* Ignore all masked devices. */
1969 if (MASKED_DEVICE(scsi3addr))
1970 return true;
1971
1972 return false;
1973 }
1974
pqi_mask_device(u8 * scsi3addr)1975 static inline void pqi_mask_device(u8 *scsi3addr)
1976 {
1977 scsi3addr[3] |= 0xc0;
1978 }
1979
pqi_is_device_with_sas_address(struct pqi_scsi_dev * device)1980 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
1981 {
1982 if (!device->is_physical_device)
1983 return false;
1984
1985 if (device->is_expander_smp_device)
1986 return true;
1987
1988 switch (device->devtype) {
1989 case TYPE_DISK:
1990 case TYPE_ZBC:
1991 case TYPE_ENCLOSURE:
1992 return true;
1993 }
1994
1995 return false;
1996 }
1997
pqi_expose_device(struct pqi_scsi_dev * device)1998 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1999 {
2000 return !device->is_physical_device ||
2001 !pqi_skip_device(device->scsi3addr);
2002 }
2003
pqi_update_scsi_devices(struct pqi_ctrl_info * ctrl_info)2004 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2005 {
2006 int i;
2007 int rc;
2008 LIST_HEAD(new_device_list_head);
2009 struct report_phys_lun_extended *physdev_list = NULL;
2010 struct report_log_lun_extended *logdev_list = NULL;
2011 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
2012 struct report_log_lun_extended_entry *log_lun_ext_entry;
2013 struct bmic_identify_physical_device *id_phys = NULL;
2014 u32 num_physicals;
2015 u32 num_logicals;
2016 struct pqi_scsi_dev **new_device_list = NULL;
2017 struct pqi_scsi_dev *device;
2018 struct pqi_scsi_dev *next;
2019 unsigned int num_new_devices;
2020 unsigned int num_valid_devices;
2021 bool is_physical_device;
2022 u8 *scsi3addr;
2023 unsigned int physical_index;
2024 unsigned int logical_index;
2025 static char *out_of_memory_msg =
2026 "failed to allocate memory, device discovery stopped";
2027
2028 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2029 if (rc)
2030 goto out;
2031
2032 if (physdev_list)
2033 num_physicals =
2034 get_unaligned_be32(&physdev_list->header.list_length)
2035 / sizeof(physdev_list->lun_entries[0]);
2036 else
2037 num_physicals = 0;
2038
2039 if (logdev_list)
2040 num_logicals =
2041 get_unaligned_be32(&logdev_list->header.list_length)
2042 / sizeof(logdev_list->lun_entries[0]);
2043 else
2044 num_logicals = 0;
2045
2046 if (num_physicals) {
2047 /*
2048 * We need this buffer for calls to pqi_get_physical_disk_info()
2049 * below. We allocate it here instead of inside
2050 * pqi_get_physical_disk_info() because it's a fairly large
2051 * buffer.
2052 */
2053 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2054 if (!id_phys) {
2055 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2056 out_of_memory_msg);
2057 rc = -ENOMEM;
2058 goto out;
2059 }
2060 if (pqi_hide_vsep) {
2061 int i;
2062
2063 for (i = num_physicals - 1; i >= 0; i--) {
2064 phys_lun_ext_entry =
2065 &physdev_list->lun_entries[i];
2066 if (CISS_GET_DRIVE_NUMBER(
2067 phys_lun_ext_entry->lunid) ==
2068 PQI_VSEP_CISS_BTL) {
2069 pqi_mask_device(
2070 phys_lun_ext_entry->lunid);
2071 break;
2072 }
2073 }
2074 }
2075 }
2076
2077 num_new_devices = num_physicals + num_logicals;
2078
2079 new_device_list = kmalloc_array(num_new_devices,
2080 sizeof(*new_device_list),
2081 GFP_KERNEL);
2082 if (!new_device_list) {
2083 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2084 rc = -ENOMEM;
2085 goto out;
2086 }
2087
2088 for (i = 0; i < num_new_devices; i++) {
2089 device = kzalloc(sizeof(*device), GFP_KERNEL);
2090 if (!device) {
2091 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2092 out_of_memory_msg);
2093 rc = -ENOMEM;
2094 goto out;
2095 }
2096 list_add_tail(&device->new_device_list_entry,
2097 &new_device_list_head);
2098 }
2099
2100 device = NULL;
2101 num_valid_devices = 0;
2102 physical_index = 0;
2103 logical_index = 0;
2104
2105 for (i = 0; i < num_new_devices; i++) {
2106
2107 if ((!pqi_expose_ld_first && i < num_physicals) ||
2108 (pqi_expose_ld_first && i >= num_logicals)) {
2109 is_physical_device = true;
2110 phys_lun_ext_entry =
2111 &physdev_list->lun_entries[physical_index++];
2112 log_lun_ext_entry = NULL;
2113 scsi3addr = phys_lun_ext_entry->lunid;
2114 } else {
2115 is_physical_device = false;
2116 phys_lun_ext_entry = NULL;
2117 log_lun_ext_entry =
2118 &logdev_list->lun_entries[logical_index++];
2119 scsi3addr = log_lun_ext_entry->lunid;
2120 }
2121
2122 if (is_physical_device && pqi_skip_device(scsi3addr))
2123 continue;
2124
2125 if (device)
2126 device = list_next_entry(device, new_device_list_entry);
2127 else
2128 device = list_first_entry(&new_device_list_head,
2129 struct pqi_scsi_dev, new_device_list_entry);
2130
2131 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2132 device->is_physical_device = is_physical_device;
2133 if (is_physical_device) {
2134 if (phys_lun_ext_entry->device_type ==
2135 SA_EXPANDER_SMP_DEVICE)
2136 device->is_expander_smp_device = true;
2137 } else {
2138 device->is_external_raid_device =
2139 pqi_is_external_raid_addr(scsi3addr);
2140 }
2141
2142 /* Gather information about the device. */
2143 rc = pqi_get_device_info(ctrl_info, device);
2144 if (rc == -ENOMEM) {
2145 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2146 out_of_memory_msg);
2147 goto out;
2148 }
2149 if (rc) {
2150 if (device->is_physical_device)
2151 dev_warn(&ctrl_info->pci_dev->dev,
2152 "obtaining device info failed, skipping physical device %016llx\n",
2153 get_unaligned_be64(
2154 &phys_lun_ext_entry->wwid));
2155 else
2156 dev_warn(&ctrl_info->pci_dev->dev,
2157 "obtaining device info failed, skipping logical device %08x%08x\n",
2158 *((u32 *)&device->scsi3addr),
2159 *((u32 *)&device->scsi3addr[4]));
2160 rc = 0;
2161 continue;
2162 }
2163
2164 if (!pqi_is_supported_device(device))
2165 continue;
2166
2167 pqi_assign_bus_target_lun(device);
2168
2169 if (device->is_physical_device) {
2170 device->wwid = phys_lun_ext_entry->wwid;
2171 if ((phys_lun_ext_entry->device_flags &
2172 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
2173 phys_lun_ext_entry->aio_handle) {
2174 device->aio_enabled = true;
2175 device->aio_handle =
2176 phys_lun_ext_entry->aio_handle;
2177 }
2178
2179 pqi_get_physical_disk_info(ctrl_info,
2180 device, id_phys);
2181
2182 } else {
2183 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2184 sizeof(device->volume_id));
2185 }
2186
2187 if (pqi_is_device_with_sas_address(device))
2188 device->sas_address = get_unaligned_be64(&device->wwid);
2189
2190 new_device_list[num_valid_devices++] = device;
2191 }
2192
2193 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2194
2195 out:
2196 list_for_each_entry_safe(device, next, &new_device_list_head,
2197 new_device_list_entry) {
2198 if (device->keep_device)
2199 continue;
2200 list_del(&device->new_device_list_entry);
2201 pqi_free_device(device);
2202 }
2203
2204 kfree(new_device_list);
2205 kfree(physdev_list);
2206 kfree(logdev_list);
2207 kfree(id_phys);
2208
2209 return rc;
2210 }
2211
pqi_remove_all_scsi_devices(struct pqi_ctrl_info * ctrl_info)2212 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2213 {
2214 unsigned long flags;
2215 struct pqi_scsi_dev *device;
2216
2217 while (1) {
2218 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2219
2220 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
2221 struct pqi_scsi_dev, scsi_device_list_entry);
2222 if (device)
2223 list_del(&device->scsi_device_list_entry);
2224
2225 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
2226 flags);
2227
2228 if (!device)
2229 break;
2230
2231 if (pqi_is_device_added(device))
2232 pqi_remove_device(ctrl_info, device);
2233 pqi_free_device(device);
2234 }
2235 }
2236
pqi_scan_scsi_devices(struct pqi_ctrl_info * ctrl_info)2237 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2238 {
2239 int rc = 0;
2240
2241 if (pqi_ctrl_offline(ctrl_info))
2242 return -ENXIO;
2243
2244 if (!mutex_trylock(&ctrl_info->scan_mutex)) {
2245 pqi_schedule_rescan_worker_delayed(ctrl_info);
2246 rc = -EINPROGRESS;
2247 } else {
2248 rc = pqi_update_scsi_devices(ctrl_info);
2249 if (rc)
2250 pqi_schedule_rescan_worker_delayed(ctrl_info);
2251 mutex_unlock(&ctrl_info->scan_mutex);
2252 }
2253
2254 return rc;
2255 }
2256
pqi_scan_start(struct Scsi_Host * shost)2257 static void pqi_scan_start(struct Scsi_Host *shost)
2258 {
2259 struct pqi_ctrl_info *ctrl_info;
2260
2261 ctrl_info = shost_to_hba(shost);
2262 if (pqi_ctrl_in_ofa(ctrl_info))
2263 return;
2264
2265 pqi_scan_scsi_devices(ctrl_info);
2266 }
2267
2268 /* Returns TRUE if scan is finished. */
2269
pqi_scan_finished(struct Scsi_Host * shost,unsigned long elapsed_time)2270 static int pqi_scan_finished(struct Scsi_Host *shost,
2271 unsigned long elapsed_time)
2272 {
2273 struct pqi_ctrl_info *ctrl_info;
2274
2275 ctrl_info = shost_priv(shost);
2276
2277 return !mutex_is_locked(&ctrl_info->scan_mutex);
2278 }
2279
pqi_wait_until_scan_finished(struct pqi_ctrl_info * ctrl_info)2280 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2281 {
2282 mutex_lock(&ctrl_info->scan_mutex);
2283 mutex_unlock(&ctrl_info->scan_mutex);
2284 }
2285
pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info * ctrl_info)2286 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2287 {
2288 mutex_lock(&ctrl_info->lun_reset_mutex);
2289 mutex_unlock(&ctrl_info->lun_reset_mutex);
2290 }
2291
pqi_wait_until_ofa_finished(struct pqi_ctrl_info * ctrl_info)2292 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
2293 {
2294 mutex_lock(&ctrl_info->ofa_mutex);
2295 mutex_unlock(&ctrl_info->ofa_mutex);
2296 }
2297
pqi_set_encryption_info(struct pqi_encryption_info * encryption_info,struct raid_map * raid_map,u64 first_block)2298 static inline void pqi_set_encryption_info(
2299 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2300 u64 first_block)
2301 {
2302 u32 volume_blk_size;
2303
2304 /*
2305 * Set the encryption tweak values based on logical block address.
2306 * If the block size is 512, the tweak value is equal to the LBA.
2307 * For other block sizes, tweak value is (LBA * block size) / 512.
2308 */
2309 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2310 if (volume_blk_size != 512)
2311 first_block = (first_block * volume_blk_size) / 512;
2312
2313 encryption_info->data_encryption_key_index =
2314 get_unaligned_le16(&raid_map->data_encryption_key_index);
2315 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2316 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2317 }
2318
2319 /*
2320 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2321 */
2322
2323 #define PQI_RAID_BYPASS_INELIGIBLE 1
2324
pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group)2325 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2326 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2327 struct pqi_queue_group *queue_group)
2328 {
2329 struct raid_map *raid_map;
2330 bool is_write = false;
2331 u32 map_index;
2332 u64 first_block;
2333 u64 last_block;
2334 u32 block_cnt;
2335 u32 blocks_per_row;
2336 u64 first_row;
2337 u64 last_row;
2338 u32 first_row_offset;
2339 u32 last_row_offset;
2340 u32 first_column;
2341 u32 last_column;
2342 u64 r0_first_row;
2343 u64 r0_last_row;
2344 u32 r5or6_blocks_per_row;
2345 u64 r5or6_first_row;
2346 u64 r5or6_last_row;
2347 u32 r5or6_first_row_offset;
2348 u32 r5or6_last_row_offset;
2349 u32 r5or6_first_column;
2350 u32 r5or6_last_column;
2351 u16 data_disks_per_row;
2352 u32 total_disks_per_row;
2353 u16 layout_map_count;
2354 u32 stripesize;
2355 u16 strip_size;
2356 u32 first_group;
2357 u32 last_group;
2358 u32 current_group;
2359 u32 map_row;
2360 u32 aio_handle;
2361 u64 disk_block;
2362 u32 disk_block_cnt;
2363 u8 cdb[16];
2364 u8 cdb_length;
2365 int offload_to_mirror;
2366 struct pqi_encryption_info *encryption_info_ptr;
2367 struct pqi_encryption_info encryption_info;
2368 #if BITS_PER_LONG == 32
2369 u64 tmpdiv;
2370 #endif
2371
2372 /* Check for valid opcode, get LBA and block count. */
2373 switch (scmd->cmnd[0]) {
2374 case WRITE_6:
2375 is_write = true;
2376 /* fall through */
2377 case READ_6:
2378 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2379 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2380 block_cnt = (u32)scmd->cmnd[4];
2381 if (block_cnt == 0)
2382 block_cnt = 256;
2383 break;
2384 case WRITE_10:
2385 is_write = true;
2386 /* fall through */
2387 case READ_10:
2388 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2389 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2390 break;
2391 case WRITE_12:
2392 is_write = true;
2393 /* fall through */
2394 case READ_12:
2395 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2396 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2397 break;
2398 case WRITE_16:
2399 is_write = true;
2400 /* fall through */
2401 case READ_16:
2402 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2403 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2404 break;
2405 default:
2406 /* Process via normal I/O path. */
2407 return PQI_RAID_BYPASS_INELIGIBLE;
2408 }
2409
2410 /* Check for write to non-RAID-0. */
2411 if (is_write && device->raid_level != SA_RAID_0)
2412 return PQI_RAID_BYPASS_INELIGIBLE;
2413
2414 if (unlikely(block_cnt == 0))
2415 return PQI_RAID_BYPASS_INELIGIBLE;
2416
2417 last_block = first_block + block_cnt - 1;
2418 raid_map = device->raid_map;
2419
2420 /* Check for invalid block or wraparound. */
2421 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2422 last_block < first_block)
2423 return PQI_RAID_BYPASS_INELIGIBLE;
2424
2425 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2426 strip_size = get_unaligned_le16(&raid_map->strip_size);
2427 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2428
2429 /* Calculate stripe information for the request. */
2430 blocks_per_row = data_disks_per_row * strip_size;
2431 #if BITS_PER_LONG == 32
2432 tmpdiv = first_block;
2433 do_div(tmpdiv, blocks_per_row);
2434 first_row = tmpdiv;
2435 tmpdiv = last_block;
2436 do_div(tmpdiv, blocks_per_row);
2437 last_row = tmpdiv;
2438 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2439 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2440 tmpdiv = first_row_offset;
2441 do_div(tmpdiv, strip_size);
2442 first_column = tmpdiv;
2443 tmpdiv = last_row_offset;
2444 do_div(tmpdiv, strip_size);
2445 last_column = tmpdiv;
2446 #else
2447 first_row = first_block / blocks_per_row;
2448 last_row = last_block / blocks_per_row;
2449 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2450 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2451 first_column = first_row_offset / strip_size;
2452 last_column = last_row_offset / strip_size;
2453 #endif
2454
2455 /* If this isn't a single row/column then give to the controller. */
2456 if (first_row != last_row || first_column != last_column)
2457 return PQI_RAID_BYPASS_INELIGIBLE;
2458
2459 /* Proceeding with driver mapping. */
2460 total_disks_per_row = data_disks_per_row +
2461 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2462 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2463 get_unaligned_le16(&raid_map->row_cnt);
2464 map_index = (map_row * total_disks_per_row) + first_column;
2465
2466 /* RAID 1 */
2467 if (device->raid_level == SA_RAID_1) {
2468 if (device->offload_to_mirror)
2469 map_index += data_disks_per_row;
2470 device->offload_to_mirror = !device->offload_to_mirror;
2471 } else if (device->raid_level == SA_RAID_ADM) {
2472 /* RAID ADM */
2473 /*
2474 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2475 * divisible by 3.
2476 */
2477 offload_to_mirror = device->offload_to_mirror;
2478 if (offload_to_mirror == 0) {
2479 /* use physical disk in the first mirrored group. */
2480 map_index %= data_disks_per_row;
2481 } else {
2482 do {
2483 /*
2484 * Determine mirror group that map_index
2485 * indicates.
2486 */
2487 current_group = map_index / data_disks_per_row;
2488
2489 if (offload_to_mirror != current_group) {
2490 if (current_group <
2491 layout_map_count - 1) {
2492 /*
2493 * Select raid index from
2494 * next group.
2495 */
2496 map_index += data_disks_per_row;
2497 current_group++;
2498 } else {
2499 /*
2500 * Select raid index from first
2501 * group.
2502 */
2503 map_index %= data_disks_per_row;
2504 current_group = 0;
2505 }
2506 }
2507 } while (offload_to_mirror != current_group);
2508 }
2509
2510 /* Set mirror group to use next time. */
2511 offload_to_mirror =
2512 (offload_to_mirror >= layout_map_count - 1) ?
2513 0 : offload_to_mirror + 1;
2514 WARN_ON(offload_to_mirror >= layout_map_count);
2515 device->offload_to_mirror = offload_to_mirror;
2516 /*
2517 * Avoid direct use of device->offload_to_mirror within this
2518 * function since multiple threads might simultaneously
2519 * increment it beyond the range of device->layout_map_count -1.
2520 */
2521 } else if ((device->raid_level == SA_RAID_5 ||
2522 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2523 /* RAID 50/60 */
2524 /* Verify first and last block are in same RAID group */
2525 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2526 stripesize = r5or6_blocks_per_row * layout_map_count;
2527 #if BITS_PER_LONG == 32
2528 tmpdiv = first_block;
2529 first_group = do_div(tmpdiv, stripesize);
2530 tmpdiv = first_group;
2531 do_div(tmpdiv, r5or6_blocks_per_row);
2532 first_group = tmpdiv;
2533 tmpdiv = last_block;
2534 last_group = do_div(tmpdiv, stripesize);
2535 tmpdiv = last_group;
2536 do_div(tmpdiv, r5or6_blocks_per_row);
2537 last_group = tmpdiv;
2538 #else
2539 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2540 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2541 #endif
2542 if (first_group != last_group)
2543 return PQI_RAID_BYPASS_INELIGIBLE;
2544
2545 /* Verify request is in a single row of RAID 5/6 */
2546 #if BITS_PER_LONG == 32
2547 tmpdiv = first_block;
2548 do_div(tmpdiv, stripesize);
2549 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2550 tmpdiv = last_block;
2551 do_div(tmpdiv, stripesize);
2552 r5or6_last_row = r0_last_row = tmpdiv;
2553 #else
2554 first_row = r5or6_first_row = r0_first_row =
2555 first_block / stripesize;
2556 r5or6_last_row = r0_last_row = last_block / stripesize;
2557 #endif
2558 if (r5or6_first_row != r5or6_last_row)
2559 return PQI_RAID_BYPASS_INELIGIBLE;
2560
2561 /* Verify request is in a single column */
2562 #if BITS_PER_LONG == 32
2563 tmpdiv = first_block;
2564 first_row_offset = do_div(tmpdiv, stripesize);
2565 tmpdiv = first_row_offset;
2566 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2567 r5or6_first_row_offset = first_row_offset;
2568 tmpdiv = last_block;
2569 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2570 tmpdiv = r5or6_last_row_offset;
2571 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2572 tmpdiv = r5or6_first_row_offset;
2573 do_div(tmpdiv, strip_size);
2574 first_column = r5or6_first_column = tmpdiv;
2575 tmpdiv = r5or6_last_row_offset;
2576 do_div(tmpdiv, strip_size);
2577 r5or6_last_column = tmpdiv;
2578 #else
2579 first_row_offset = r5or6_first_row_offset =
2580 (u32)((first_block % stripesize) %
2581 r5or6_blocks_per_row);
2582
2583 r5or6_last_row_offset =
2584 (u32)((last_block % stripesize) %
2585 r5or6_blocks_per_row);
2586
2587 first_column = r5or6_first_row_offset / strip_size;
2588 r5or6_first_column = first_column;
2589 r5or6_last_column = r5or6_last_row_offset / strip_size;
2590 #endif
2591 if (r5or6_first_column != r5or6_last_column)
2592 return PQI_RAID_BYPASS_INELIGIBLE;
2593
2594 /* Request is eligible */
2595 map_row =
2596 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2597 get_unaligned_le16(&raid_map->row_cnt);
2598
2599 map_index = (first_group *
2600 (get_unaligned_le16(&raid_map->row_cnt) *
2601 total_disks_per_row)) +
2602 (map_row * total_disks_per_row) + first_column;
2603 }
2604
2605 aio_handle = raid_map->disk_data[map_index].aio_handle;
2606 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2607 first_row * strip_size +
2608 (first_row_offset - first_column * strip_size);
2609 disk_block_cnt = block_cnt;
2610
2611 /* Handle differing logical/physical block sizes. */
2612 if (raid_map->phys_blk_shift) {
2613 disk_block <<= raid_map->phys_blk_shift;
2614 disk_block_cnt <<= raid_map->phys_blk_shift;
2615 }
2616
2617 if (unlikely(disk_block_cnt > 0xffff))
2618 return PQI_RAID_BYPASS_INELIGIBLE;
2619
2620 /* Build the new CDB for the physical disk I/O. */
2621 if (disk_block > 0xffffffff) {
2622 cdb[0] = is_write ? WRITE_16 : READ_16;
2623 cdb[1] = 0;
2624 put_unaligned_be64(disk_block, &cdb[2]);
2625 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2626 cdb[14] = 0;
2627 cdb[15] = 0;
2628 cdb_length = 16;
2629 } else {
2630 cdb[0] = is_write ? WRITE_10 : READ_10;
2631 cdb[1] = 0;
2632 put_unaligned_be32((u32)disk_block, &cdb[2]);
2633 cdb[6] = 0;
2634 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2635 cdb[9] = 0;
2636 cdb_length = 10;
2637 }
2638
2639 if (get_unaligned_le16(&raid_map->flags) &
2640 RAID_MAP_ENCRYPTION_ENABLED) {
2641 pqi_set_encryption_info(&encryption_info, raid_map,
2642 first_block);
2643 encryption_info_ptr = &encryption_info;
2644 } else {
2645 encryption_info_ptr = NULL;
2646 }
2647
2648 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2649 cdb, cdb_length, queue_group, encryption_info_ptr, true);
2650 }
2651
2652 #define PQI_STATUS_IDLE 0x0
2653
2654 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2655 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2656
2657 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2658 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2659 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2660 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2661 #define PQI_DEVICE_STATE_ERROR 0x4
2662
2663 #define PQI_MODE_READY_TIMEOUT_SECS 30
2664 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2665
pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info * ctrl_info)2666 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2667 {
2668 struct pqi_device_registers __iomem *pqi_registers;
2669 unsigned long timeout;
2670 u64 signature;
2671 u8 status;
2672
2673 pqi_registers = ctrl_info->pqi_registers;
2674 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
2675
2676 while (1) {
2677 signature = readq(&pqi_registers->signature);
2678 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2679 sizeof(signature)) == 0)
2680 break;
2681 if (time_after(jiffies, timeout)) {
2682 dev_err(&ctrl_info->pci_dev->dev,
2683 "timed out waiting for PQI signature\n");
2684 return -ETIMEDOUT;
2685 }
2686 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2687 }
2688
2689 while (1) {
2690 status = readb(&pqi_registers->function_and_status_code);
2691 if (status == PQI_STATUS_IDLE)
2692 break;
2693 if (time_after(jiffies, timeout)) {
2694 dev_err(&ctrl_info->pci_dev->dev,
2695 "timed out waiting for PQI IDLE\n");
2696 return -ETIMEDOUT;
2697 }
2698 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2699 }
2700
2701 while (1) {
2702 if (readl(&pqi_registers->device_status) ==
2703 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2704 break;
2705 if (time_after(jiffies, timeout)) {
2706 dev_err(&ctrl_info->pci_dev->dev,
2707 "timed out waiting for PQI all registers ready\n");
2708 return -ETIMEDOUT;
2709 }
2710 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2711 }
2712
2713 return 0;
2714 }
2715
pqi_aio_path_disabled(struct pqi_io_request * io_request)2716 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2717 {
2718 struct pqi_scsi_dev *device;
2719
2720 device = io_request->scmd->device->hostdata;
2721 device->raid_bypass_enabled = false;
2722 device->aio_enabled = false;
2723 }
2724
pqi_take_device_offline(struct scsi_device * sdev,char * path)2725 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
2726 {
2727 struct pqi_ctrl_info *ctrl_info;
2728 struct pqi_scsi_dev *device;
2729
2730 device = sdev->hostdata;
2731 if (device->device_offline)
2732 return;
2733
2734 device->device_offline = true;
2735 ctrl_info = shost_to_hba(sdev->host);
2736 pqi_schedule_rescan_worker(ctrl_info);
2737 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
2738 path, ctrl_info->scsi_host->host_no, device->bus,
2739 device->target, device->lun);
2740 }
2741
pqi_process_raid_io_error(struct pqi_io_request * io_request)2742 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2743 {
2744 u8 scsi_status;
2745 u8 host_byte;
2746 struct scsi_cmnd *scmd;
2747 struct pqi_raid_error_info *error_info;
2748 size_t sense_data_length;
2749 int residual_count;
2750 int xfer_count;
2751 struct scsi_sense_hdr sshdr;
2752
2753 scmd = io_request->scmd;
2754 if (!scmd)
2755 return;
2756
2757 error_info = io_request->error_info;
2758 scsi_status = error_info->status;
2759 host_byte = DID_OK;
2760
2761 switch (error_info->data_out_result) {
2762 case PQI_DATA_IN_OUT_GOOD:
2763 break;
2764 case PQI_DATA_IN_OUT_UNDERFLOW:
2765 xfer_count =
2766 get_unaligned_le32(&error_info->data_out_transferred);
2767 residual_count = scsi_bufflen(scmd) - xfer_count;
2768 scsi_set_resid(scmd, residual_count);
2769 if (xfer_count < scmd->underflow)
2770 host_byte = DID_SOFT_ERROR;
2771 break;
2772 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2773 case PQI_DATA_IN_OUT_ABORTED:
2774 host_byte = DID_ABORT;
2775 break;
2776 case PQI_DATA_IN_OUT_TIMEOUT:
2777 host_byte = DID_TIME_OUT;
2778 break;
2779 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2780 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2781 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2782 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2783 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2784 case PQI_DATA_IN_OUT_ERROR:
2785 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2786 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2787 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2788 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2789 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2790 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2791 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2792 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2793 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2794 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2795 default:
2796 host_byte = DID_ERROR;
2797 break;
2798 }
2799
2800 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2801 if (sense_data_length == 0)
2802 sense_data_length =
2803 get_unaligned_le16(&error_info->response_data_length);
2804 if (sense_data_length) {
2805 if (sense_data_length > sizeof(error_info->data))
2806 sense_data_length = sizeof(error_info->data);
2807
2808 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2809 scsi_normalize_sense(error_info->data,
2810 sense_data_length, &sshdr) &&
2811 sshdr.sense_key == HARDWARE_ERROR &&
2812 sshdr.asc == 0x3e) {
2813 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
2814 struct pqi_scsi_dev *device = scmd->device->hostdata;
2815
2816 switch (sshdr.ascq) {
2817 case 0x1: /* LOGICAL UNIT FAILURE */
2818 if (printk_ratelimit())
2819 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
2820 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2821 pqi_take_device_offline(scmd->device, "RAID");
2822 host_byte = DID_NO_CONNECT;
2823 break;
2824
2825 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
2826 if (printk_ratelimit())
2827 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
2828 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2829 break;
2830 }
2831 }
2832
2833 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2834 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2835 memcpy(scmd->sense_buffer, error_info->data,
2836 sense_data_length);
2837 }
2838
2839 scmd->result = scsi_status;
2840 set_host_byte(scmd, host_byte);
2841 }
2842
pqi_process_aio_io_error(struct pqi_io_request * io_request)2843 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2844 {
2845 u8 scsi_status;
2846 u8 host_byte;
2847 struct scsi_cmnd *scmd;
2848 struct pqi_aio_error_info *error_info;
2849 size_t sense_data_length;
2850 int residual_count;
2851 int xfer_count;
2852 bool device_offline;
2853
2854 scmd = io_request->scmd;
2855 error_info = io_request->error_info;
2856 host_byte = DID_OK;
2857 sense_data_length = 0;
2858 device_offline = false;
2859
2860 switch (error_info->service_response) {
2861 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2862 scsi_status = error_info->status;
2863 break;
2864 case PQI_AIO_SERV_RESPONSE_FAILURE:
2865 switch (error_info->status) {
2866 case PQI_AIO_STATUS_IO_ABORTED:
2867 scsi_status = SAM_STAT_TASK_ABORTED;
2868 break;
2869 case PQI_AIO_STATUS_UNDERRUN:
2870 scsi_status = SAM_STAT_GOOD;
2871 residual_count = get_unaligned_le32(
2872 &error_info->residual_count);
2873 scsi_set_resid(scmd, residual_count);
2874 xfer_count = scsi_bufflen(scmd) - residual_count;
2875 if (xfer_count < scmd->underflow)
2876 host_byte = DID_SOFT_ERROR;
2877 break;
2878 case PQI_AIO_STATUS_OVERRUN:
2879 scsi_status = SAM_STAT_GOOD;
2880 break;
2881 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2882 pqi_aio_path_disabled(io_request);
2883 scsi_status = SAM_STAT_GOOD;
2884 io_request->status = -EAGAIN;
2885 break;
2886 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2887 case PQI_AIO_STATUS_INVALID_DEVICE:
2888 if (!io_request->raid_bypass) {
2889 device_offline = true;
2890 pqi_take_device_offline(scmd->device, "AIO");
2891 host_byte = DID_NO_CONNECT;
2892 }
2893 scsi_status = SAM_STAT_CHECK_CONDITION;
2894 break;
2895 case PQI_AIO_STATUS_IO_ERROR:
2896 default:
2897 scsi_status = SAM_STAT_CHECK_CONDITION;
2898 break;
2899 }
2900 break;
2901 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2902 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2903 scsi_status = SAM_STAT_GOOD;
2904 break;
2905 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2906 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2907 default:
2908 scsi_status = SAM_STAT_CHECK_CONDITION;
2909 break;
2910 }
2911
2912 if (error_info->data_present) {
2913 sense_data_length =
2914 get_unaligned_le16(&error_info->data_length);
2915 if (sense_data_length) {
2916 if (sense_data_length > sizeof(error_info->data))
2917 sense_data_length = sizeof(error_info->data);
2918 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2919 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2920 memcpy(scmd->sense_buffer, error_info->data,
2921 sense_data_length);
2922 }
2923 }
2924
2925 if (device_offline && sense_data_length == 0)
2926 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2927 0x3e, 0x1);
2928
2929 scmd->result = scsi_status;
2930 set_host_byte(scmd, host_byte);
2931 }
2932
pqi_process_io_error(unsigned int iu_type,struct pqi_io_request * io_request)2933 static void pqi_process_io_error(unsigned int iu_type,
2934 struct pqi_io_request *io_request)
2935 {
2936 switch (iu_type) {
2937 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2938 pqi_process_raid_io_error(io_request);
2939 break;
2940 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2941 pqi_process_aio_io_error(io_request);
2942 break;
2943 }
2944 }
2945
pqi_interpret_task_management_response(struct pqi_task_management_response * response)2946 static int pqi_interpret_task_management_response(
2947 struct pqi_task_management_response *response)
2948 {
2949 int rc;
2950
2951 switch (response->response_code) {
2952 case SOP_TMF_COMPLETE:
2953 case SOP_TMF_FUNCTION_SUCCEEDED:
2954 rc = 0;
2955 break;
2956 case SOP_TMF_REJECTED:
2957 rc = -EAGAIN;
2958 break;
2959 default:
2960 rc = -EIO;
2961 break;
2962 }
2963
2964 return rc;
2965 }
2966
pqi_process_io_intr(struct pqi_ctrl_info * ctrl_info,struct pqi_queue_group * queue_group)2967 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2968 struct pqi_queue_group *queue_group)
2969 {
2970 unsigned int num_responses;
2971 pqi_index_t oq_pi;
2972 pqi_index_t oq_ci;
2973 struct pqi_io_request *io_request;
2974 struct pqi_io_response *response;
2975 u16 request_id;
2976
2977 num_responses = 0;
2978 oq_ci = queue_group->oq_ci_copy;
2979
2980 while (1) {
2981 oq_pi = readl(queue_group->oq_pi);
2982 if (oq_pi == oq_ci)
2983 break;
2984
2985 num_responses++;
2986 response = queue_group->oq_element_array +
2987 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2988
2989 request_id = get_unaligned_le16(&response->request_id);
2990 WARN_ON(request_id >= ctrl_info->max_io_slots);
2991
2992 io_request = &ctrl_info->io_request_pool[request_id];
2993 WARN_ON(atomic_read(&io_request->refcount) == 0);
2994
2995 switch (response->header.iu_type) {
2996 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2997 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2998 if (io_request->scmd)
2999 io_request->scmd->result = 0;
3000 /* fall through */
3001 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3002 break;
3003 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3004 io_request->status =
3005 get_unaligned_le16(
3006 &((struct pqi_vendor_general_response *)
3007 response)->status);
3008 break;
3009 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3010 io_request->status =
3011 pqi_interpret_task_management_response(
3012 (void *)response);
3013 break;
3014 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3015 pqi_aio_path_disabled(io_request);
3016 io_request->status = -EAGAIN;
3017 break;
3018 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3019 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3020 io_request->error_info = ctrl_info->error_buffer +
3021 (get_unaligned_le16(&response->error_index) *
3022 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3023 pqi_process_io_error(response->header.iu_type,
3024 io_request);
3025 break;
3026 default:
3027 dev_err(&ctrl_info->pci_dev->dev,
3028 "unexpected IU type: 0x%x\n",
3029 response->header.iu_type);
3030 break;
3031 }
3032
3033 io_request->io_complete_callback(io_request,
3034 io_request->context);
3035
3036 /*
3037 * Note that the I/O request structure CANNOT BE TOUCHED after
3038 * returning from the I/O completion callback!
3039 */
3040
3041 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3042 }
3043
3044 if (num_responses) {
3045 queue_group->oq_ci_copy = oq_ci;
3046 writel(oq_ci, queue_group->oq_ci);
3047 }
3048
3049 return num_responses;
3050 }
3051
pqi_num_elements_free(unsigned int pi,unsigned int ci,unsigned int elements_in_queue)3052 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3053 unsigned int ci, unsigned int elements_in_queue)
3054 {
3055 unsigned int num_elements_used;
3056
3057 if (pi >= ci)
3058 num_elements_used = pi - ci;
3059 else
3060 num_elements_used = elements_in_queue - ci + pi;
3061
3062 return elements_in_queue - num_elements_used - 1;
3063 }
3064
pqi_send_event_ack(struct pqi_ctrl_info * ctrl_info,struct pqi_event_acknowledge_request * iu,size_t iu_length)3065 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3066 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3067 {
3068 pqi_index_t iq_pi;
3069 pqi_index_t iq_ci;
3070 unsigned long flags;
3071 void *next_element;
3072 struct pqi_queue_group *queue_group;
3073
3074 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3075 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3076
3077 while (1) {
3078 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3079
3080 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3081 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3082
3083 if (pqi_num_elements_free(iq_pi, iq_ci,
3084 ctrl_info->num_elements_per_iq))
3085 break;
3086
3087 spin_unlock_irqrestore(
3088 &queue_group->submit_lock[RAID_PATH], flags);
3089
3090 if (pqi_ctrl_offline(ctrl_info))
3091 return;
3092 }
3093
3094 next_element = queue_group->iq_element_array[RAID_PATH] +
3095 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3096
3097 memcpy(next_element, iu, iu_length);
3098
3099 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3100 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3101
3102 /*
3103 * This write notifies the controller that an IU is available to be
3104 * processed.
3105 */
3106 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3107
3108 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3109 }
3110
pqi_acknowledge_event(struct pqi_ctrl_info * ctrl_info,struct pqi_event * event)3111 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3112 struct pqi_event *event)
3113 {
3114 struct pqi_event_acknowledge_request request;
3115
3116 memset(&request, 0, sizeof(request));
3117
3118 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3119 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3120 &request.header.iu_length);
3121 request.event_type = event->event_type;
3122 request.event_id = event->event_id;
3123 request.additional_event_id = event->additional_event_id;
3124
3125 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3126 }
3127
3128 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3129 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3130
pqi_poll_for_soft_reset_status(struct pqi_ctrl_info * ctrl_info)3131 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3132 struct pqi_ctrl_info *ctrl_info)
3133 {
3134 unsigned long timeout;
3135 u8 status;
3136
3137 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
3138
3139 while (1) {
3140 status = pqi_read_soft_reset_status(ctrl_info);
3141 if (status & PQI_SOFT_RESET_INITIATE)
3142 return RESET_INITIATE_DRIVER;
3143
3144 if (status & PQI_SOFT_RESET_ABORT)
3145 return RESET_ABORT;
3146
3147 if (time_after(jiffies, timeout)) {
3148 dev_err(&ctrl_info->pci_dev->dev,
3149 "timed out waiting for soft reset status\n");
3150 return RESET_TIMEDOUT;
3151 }
3152
3153 if (!sis_is_firmware_running(ctrl_info))
3154 return RESET_NORESPONSE;
3155
3156 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3157 }
3158 }
3159
pqi_process_soft_reset(struct pqi_ctrl_info * ctrl_info,enum pqi_soft_reset_status reset_status)3160 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
3161 enum pqi_soft_reset_status reset_status)
3162 {
3163 int rc;
3164
3165 switch (reset_status) {
3166 case RESET_INITIATE_DRIVER:
3167 /* fall through */
3168 case RESET_TIMEDOUT:
3169 dev_info(&ctrl_info->pci_dev->dev,
3170 "resetting controller %u\n", ctrl_info->ctrl_id);
3171 sis_soft_reset(ctrl_info);
3172 /* fall through */
3173 case RESET_INITIATE_FIRMWARE:
3174 rc = pqi_ofa_ctrl_restart(ctrl_info);
3175 pqi_ofa_free_host_buffer(ctrl_info);
3176 dev_info(&ctrl_info->pci_dev->dev,
3177 "Online Firmware Activation for controller %u: %s\n",
3178 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
3179 break;
3180 case RESET_ABORT:
3181 pqi_ofa_ctrl_unquiesce(ctrl_info);
3182 dev_info(&ctrl_info->pci_dev->dev,
3183 "Online Firmware Activation for controller %u: %s\n",
3184 ctrl_info->ctrl_id, "ABORTED");
3185 break;
3186 case RESET_NORESPONSE:
3187 pqi_ofa_free_host_buffer(ctrl_info);
3188 pqi_take_ctrl_offline(ctrl_info);
3189 break;
3190 }
3191 }
3192
pqi_ofa_process_event(struct pqi_ctrl_info * ctrl_info,struct pqi_event * event)3193 static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3194 struct pqi_event *event)
3195 {
3196 u16 event_id;
3197 enum pqi_soft_reset_status status;
3198
3199 event_id = get_unaligned_le16(&event->event_id);
3200
3201 mutex_lock(&ctrl_info->ofa_mutex);
3202
3203 if (event_id == PQI_EVENT_OFA_QUIESCE) {
3204 dev_info(&ctrl_info->pci_dev->dev,
3205 "Received Online Firmware Activation quiesce event for controller %u\n",
3206 ctrl_info->ctrl_id);
3207 pqi_ofa_ctrl_quiesce(ctrl_info);
3208 pqi_acknowledge_event(ctrl_info, event);
3209 if (ctrl_info->soft_reset_handshake_supported) {
3210 status = pqi_poll_for_soft_reset_status(ctrl_info);
3211 pqi_process_soft_reset(ctrl_info, status);
3212 } else {
3213 pqi_process_soft_reset(ctrl_info,
3214 RESET_INITIATE_FIRMWARE);
3215 }
3216
3217 } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3218 pqi_acknowledge_event(ctrl_info, event);
3219 pqi_ofa_setup_host_buffer(ctrl_info,
3220 le32_to_cpu(event->ofa_bytes_requested));
3221 pqi_ofa_host_memory_update(ctrl_info);
3222 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3223 pqi_ofa_free_host_buffer(ctrl_info);
3224 pqi_acknowledge_event(ctrl_info, event);
3225 dev_info(&ctrl_info->pci_dev->dev,
3226 "Online Firmware Activation(%u) cancel reason : %u\n",
3227 ctrl_info->ctrl_id, event->ofa_cancel_reason);
3228 }
3229
3230 mutex_unlock(&ctrl_info->ofa_mutex);
3231 }
3232
pqi_event_worker(struct work_struct * work)3233 static void pqi_event_worker(struct work_struct *work)
3234 {
3235 unsigned int i;
3236 struct pqi_ctrl_info *ctrl_info;
3237 struct pqi_event *event;
3238
3239 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3240
3241 pqi_ctrl_busy(ctrl_info);
3242 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
3243 if (pqi_ctrl_offline(ctrl_info))
3244 goto out;
3245
3246 pqi_schedule_rescan_worker_delayed(ctrl_info);
3247
3248 event = ctrl_info->events;
3249 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3250 if (event->pending) {
3251 event->pending = false;
3252 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3253 pqi_ctrl_unbusy(ctrl_info);
3254 pqi_ofa_process_event(ctrl_info, event);
3255 return;
3256 }
3257 pqi_acknowledge_event(ctrl_info, event);
3258 }
3259 event++;
3260 }
3261
3262 out:
3263 pqi_ctrl_unbusy(ctrl_info);
3264 }
3265
3266 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
3267
pqi_heartbeat_timer_handler(struct timer_list * t)3268 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3269 {
3270 int num_interrupts;
3271 u32 heartbeat_count;
3272 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
3273 heartbeat_timer);
3274
3275 pqi_check_ctrl_health(ctrl_info);
3276 if (pqi_ctrl_offline(ctrl_info))
3277 return;
3278
3279 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3280 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3281
3282 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3283 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3284 dev_err(&ctrl_info->pci_dev->dev,
3285 "no heartbeat detected - last heartbeat count: %u\n",
3286 heartbeat_count);
3287 pqi_take_ctrl_offline(ctrl_info);
3288 return;
3289 }
3290 } else {
3291 ctrl_info->previous_num_interrupts = num_interrupts;
3292 }
3293
3294 ctrl_info->previous_heartbeat_count = heartbeat_count;
3295 mod_timer(&ctrl_info->heartbeat_timer,
3296 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3297 }
3298
pqi_start_heartbeat_timer(struct pqi_ctrl_info * ctrl_info)3299 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3300 {
3301 if (!ctrl_info->heartbeat_counter)
3302 return;
3303
3304 ctrl_info->previous_num_interrupts =
3305 atomic_read(&ctrl_info->num_interrupts);
3306 ctrl_info->previous_heartbeat_count =
3307 pqi_read_heartbeat_counter(ctrl_info);
3308
3309 ctrl_info->heartbeat_timer.expires =
3310 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3311 add_timer(&ctrl_info->heartbeat_timer);
3312 }
3313
pqi_stop_heartbeat_timer(struct pqi_ctrl_info * ctrl_info)3314 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3315 {
3316 del_timer_sync(&ctrl_info->heartbeat_timer);
3317 }
3318
pqi_event_type_to_event_index(unsigned int event_type)3319 static inline int pqi_event_type_to_event_index(unsigned int event_type)
3320 {
3321 int index;
3322
3323 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
3324 if (event_type == pqi_supported_event_types[index])
3325 return index;
3326
3327 return -1;
3328 }
3329
pqi_is_supported_event(unsigned int event_type)3330 static inline bool pqi_is_supported_event(unsigned int event_type)
3331 {
3332 return pqi_event_type_to_event_index(event_type) != -1;
3333 }
3334
pqi_ofa_capture_event_payload(struct pqi_event * event,struct pqi_event_response * response)3335 static void pqi_ofa_capture_event_payload(struct pqi_event *event,
3336 struct pqi_event_response *response)
3337 {
3338 u16 event_id;
3339
3340 event_id = get_unaligned_le16(&event->event_id);
3341
3342 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3343 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3344 event->ofa_bytes_requested =
3345 response->data.ofa_memory_allocation.bytes_requested;
3346 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3347 event->ofa_cancel_reason =
3348 response->data.ofa_cancelled.reason;
3349 }
3350 }
3351 }
3352
pqi_process_event_intr(struct pqi_ctrl_info * ctrl_info)3353 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3354 {
3355 unsigned int num_events;
3356 pqi_index_t oq_pi;
3357 pqi_index_t oq_ci;
3358 struct pqi_event_queue *event_queue;
3359 struct pqi_event_response *response;
3360 struct pqi_event *event;
3361 int event_index;
3362
3363 event_queue = &ctrl_info->event_queue;
3364 num_events = 0;
3365 oq_ci = event_queue->oq_ci_copy;
3366
3367 while (1) {
3368 oq_pi = readl(event_queue->oq_pi);
3369 if (oq_pi == oq_ci)
3370 break;
3371
3372 num_events++;
3373 response = event_queue->oq_element_array +
3374 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3375
3376 event_index =
3377 pqi_event_type_to_event_index(response->event_type);
3378
3379 if (event_index >= 0) {
3380 if (response->request_acknowlege) {
3381 event = &ctrl_info->events[event_index];
3382 event->pending = true;
3383 event->event_type = response->event_type;
3384 event->event_id = response->event_id;
3385 event->additional_event_id =
3386 response->additional_event_id;
3387 pqi_ofa_capture_event_payload(event, response);
3388 }
3389 }
3390
3391 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3392 }
3393
3394 if (num_events) {
3395 event_queue->oq_ci_copy = oq_ci;
3396 writel(oq_ci, event_queue->oq_ci);
3397 schedule_work(&ctrl_info->event_work);
3398 }
3399
3400 return num_events;
3401 }
3402
3403 #define PQI_LEGACY_INTX_MASK 0x1
3404
pqi_configure_legacy_intx(struct pqi_ctrl_info * ctrl_info,bool enable_intx)3405 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
3406 bool enable_intx)
3407 {
3408 u32 intx_mask;
3409 struct pqi_device_registers __iomem *pqi_registers;
3410 volatile void __iomem *register_addr;
3411
3412 pqi_registers = ctrl_info->pqi_registers;
3413
3414 if (enable_intx)
3415 register_addr = &pqi_registers->legacy_intx_mask_clear;
3416 else
3417 register_addr = &pqi_registers->legacy_intx_mask_set;
3418
3419 intx_mask = readl(register_addr);
3420 intx_mask |= PQI_LEGACY_INTX_MASK;
3421 writel(intx_mask, register_addr);
3422 }
3423
pqi_change_irq_mode(struct pqi_ctrl_info * ctrl_info,enum pqi_irq_mode new_mode)3424 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3425 enum pqi_irq_mode new_mode)
3426 {
3427 switch (ctrl_info->irq_mode) {
3428 case IRQ_MODE_MSIX:
3429 switch (new_mode) {
3430 case IRQ_MODE_MSIX:
3431 break;
3432 case IRQ_MODE_INTX:
3433 pqi_configure_legacy_intx(ctrl_info, true);
3434 sis_enable_intx(ctrl_info);
3435 break;
3436 case IRQ_MODE_NONE:
3437 break;
3438 }
3439 break;
3440 case IRQ_MODE_INTX:
3441 switch (new_mode) {
3442 case IRQ_MODE_MSIX:
3443 pqi_configure_legacy_intx(ctrl_info, false);
3444 sis_enable_msix(ctrl_info);
3445 break;
3446 case IRQ_MODE_INTX:
3447 break;
3448 case IRQ_MODE_NONE:
3449 pqi_configure_legacy_intx(ctrl_info, false);
3450 break;
3451 }
3452 break;
3453 case IRQ_MODE_NONE:
3454 switch (new_mode) {
3455 case IRQ_MODE_MSIX:
3456 sis_enable_msix(ctrl_info);
3457 break;
3458 case IRQ_MODE_INTX:
3459 pqi_configure_legacy_intx(ctrl_info, true);
3460 sis_enable_intx(ctrl_info);
3461 break;
3462 case IRQ_MODE_NONE:
3463 break;
3464 }
3465 break;
3466 }
3467
3468 ctrl_info->irq_mode = new_mode;
3469 }
3470
3471 #define PQI_LEGACY_INTX_PENDING 0x1
3472
pqi_is_valid_irq(struct pqi_ctrl_info * ctrl_info)3473 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3474 {
3475 bool valid_irq;
3476 u32 intx_status;
3477
3478 switch (ctrl_info->irq_mode) {
3479 case IRQ_MODE_MSIX:
3480 valid_irq = true;
3481 break;
3482 case IRQ_MODE_INTX:
3483 intx_status =
3484 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3485 if (intx_status & PQI_LEGACY_INTX_PENDING)
3486 valid_irq = true;
3487 else
3488 valid_irq = false;
3489 break;
3490 case IRQ_MODE_NONE:
3491 default:
3492 valid_irq = false;
3493 break;
3494 }
3495
3496 return valid_irq;
3497 }
3498
pqi_irq_handler(int irq,void * data)3499 static irqreturn_t pqi_irq_handler(int irq, void *data)
3500 {
3501 struct pqi_ctrl_info *ctrl_info;
3502 struct pqi_queue_group *queue_group;
3503 unsigned int num_responses_handled;
3504
3505 queue_group = data;
3506 ctrl_info = queue_group->ctrl_info;
3507
3508 if (!pqi_is_valid_irq(ctrl_info))
3509 return IRQ_NONE;
3510
3511 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3512
3513 if (irq == ctrl_info->event_irq)
3514 num_responses_handled += pqi_process_event_intr(ctrl_info);
3515
3516 if (num_responses_handled)
3517 atomic_inc(&ctrl_info->num_interrupts);
3518
3519 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3520 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3521
3522 return IRQ_HANDLED;
3523 }
3524
pqi_request_irqs(struct pqi_ctrl_info * ctrl_info)3525 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3526 {
3527 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3528 int i;
3529 int rc;
3530
3531 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3532
3533 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3534 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3535 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3536 if (rc) {
3537 dev_err(&pci_dev->dev,
3538 "irq %u init failed with error %d\n",
3539 pci_irq_vector(pci_dev, i), rc);
3540 return rc;
3541 }
3542 ctrl_info->num_msix_vectors_initialized++;
3543 }
3544
3545 return 0;
3546 }
3547
pqi_free_irqs(struct pqi_ctrl_info * ctrl_info)3548 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3549 {
3550 int i;
3551
3552 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3553 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3554 &ctrl_info->queue_groups[i]);
3555
3556 ctrl_info->num_msix_vectors_initialized = 0;
3557 }
3558
pqi_enable_msix_interrupts(struct pqi_ctrl_info * ctrl_info)3559 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3560 {
3561 int num_vectors_enabled;
3562
3563 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3564 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3565 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3566 if (num_vectors_enabled < 0) {
3567 dev_err(&ctrl_info->pci_dev->dev,
3568 "MSI-X init failed with error %d\n",
3569 num_vectors_enabled);
3570 return num_vectors_enabled;
3571 }
3572
3573 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3574 ctrl_info->irq_mode = IRQ_MODE_MSIX;
3575 return 0;
3576 }
3577
pqi_disable_msix_interrupts(struct pqi_ctrl_info * ctrl_info)3578 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3579 {
3580 if (ctrl_info->num_msix_vectors_enabled) {
3581 pci_free_irq_vectors(ctrl_info->pci_dev);
3582 ctrl_info->num_msix_vectors_enabled = 0;
3583 }
3584 }
3585
pqi_alloc_operational_queues(struct pqi_ctrl_info * ctrl_info)3586 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3587 {
3588 unsigned int i;
3589 size_t alloc_length;
3590 size_t element_array_length_per_iq;
3591 size_t element_array_length_per_oq;
3592 void *element_array;
3593 void __iomem *next_queue_index;
3594 void *aligned_pointer;
3595 unsigned int num_inbound_queues;
3596 unsigned int num_outbound_queues;
3597 unsigned int num_queue_indexes;
3598 struct pqi_queue_group *queue_group;
3599
3600 element_array_length_per_iq =
3601 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3602 ctrl_info->num_elements_per_iq;
3603 element_array_length_per_oq =
3604 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3605 ctrl_info->num_elements_per_oq;
3606 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3607 num_outbound_queues = ctrl_info->num_queue_groups;
3608 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3609
3610 aligned_pointer = NULL;
3611
3612 for (i = 0; i < num_inbound_queues; i++) {
3613 aligned_pointer = PTR_ALIGN(aligned_pointer,
3614 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3615 aligned_pointer += element_array_length_per_iq;
3616 }
3617
3618 for (i = 0; i < num_outbound_queues; i++) {
3619 aligned_pointer = PTR_ALIGN(aligned_pointer,
3620 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3621 aligned_pointer += element_array_length_per_oq;
3622 }
3623
3624 aligned_pointer = PTR_ALIGN(aligned_pointer,
3625 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3626 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3627 PQI_EVENT_OQ_ELEMENT_LENGTH;
3628
3629 for (i = 0; i < num_queue_indexes; i++) {
3630 aligned_pointer = PTR_ALIGN(aligned_pointer,
3631 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3632 aligned_pointer += sizeof(pqi_index_t);
3633 }
3634
3635 alloc_length = (size_t)aligned_pointer +
3636 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3637
3638 alloc_length += PQI_EXTRA_SGL_MEMORY;
3639
3640 ctrl_info->queue_memory_base =
3641 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3642 &ctrl_info->queue_memory_base_dma_handle,
3643 GFP_KERNEL);
3644
3645 if (!ctrl_info->queue_memory_base)
3646 return -ENOMEM;
3647
3648 ctrl_info->queue_memory_length = alloc_length;
3649
3650 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3651 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3652
3653 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3654 queue_group = &ctrl_info->queue_groups[i];
3655 queue_group->iq_element_array[RAID_PATH] = element_array;
3656 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3657 ctrl_info->queue_memory_base_dma_handle +
3658 (element_array - ctrl_info->queue_memory_base);
3659 element_array += element_array_length_per_iq;
3660 element_array = PTR_ALIGN(element_array,
3661 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3662 queue_group->iq_element_array[AIO_PATH] = element_array;
3663 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3664 ctrl_info->queue_memory_base_dma_handle +
3665 (element_array - ctrl_info->queue_memory_base);
3666 element_array += element_array_length_per_iq;
3667 element_array = PTR_ALIGN(element_array,
3668 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3669 }
3670
3671 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3672 queue_group = &ctrl_info->queue_groups[i];
3673 queue_group->oq_element_array = element_array;
3674 queue_group->oq_element_array_bus_addr =
3675 ctrl_info->queue_memory_base_dma_handle +
3676 (element_array - ctrl_info->queue_memory_base);
3677 element_array += element_array_length_per_oq;
3678 element_array = PTR_ALIGN(element_array,
3679 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3680 }
3681
3682 ctrl_info->event_queue.oq_element_array = element_array;
3683 ctrl_info->event_queue.oq_element_array_bus_addr =
3684 ctrl_info->queue_memory_base_dma_handle +
3685 (element_array - ctrl_info->queue_memory_base);
3686 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3687 PQI_EVENT_OQ_ELEMENT_LENGTH;
3688
3689 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
3690 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3691
3692 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3693 queue_group = &ctrl_info->queue_groups[i];
3694 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3695 queue_group->iq_ci_bus_addr[RAID_PATH] =
3696 ctrl_info->queue_memory_base_dma_handle +
3697 (next_queue_index -
3698 (void __iomem *)ctrl_info->queue_memory_base);
3699 next_queue_index += sizeof(pqi_index_t);
3700 next_queue_index = PTR_ALIGN(next_queue_index,
3701 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3702 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3703 queue_group->iq_ci_bus_addr[AIO_PATH] =
3704 ctrl_info->queue_memory_base_dma_handle +
3705 (next_queue_index -
3706 (void __iomem *)ctrl_info->queue_memory_base);
3707 next_queue_index += sizeof(pqi_index_t);
3708 next_queue_index = PTR_ALIGN(next_queue_index,
3709 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3710 queue_group->oq_pi = next_queue_index;
3711 queue_group->oq_pi_bus_addr =
3712 ctrl_info->queue_memory_base_dma_handle +
3713 (next_queue_index -
3714 (void __iomem *)ctrl_info->queue_memory_base);
3715 next_queue_index += sizeof(pqi_index_t);
3716 next_queue_index = PTR_ALIGN(next_queue_index,
3717 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3718 }
3719
3720 ctrl_info->event_queue.oq_pi = next_queue_index;
3721 ctrl_info->event_queue.oq_pi_bus_addr =
3722 ctrl_info->queue_memory_base_dma_handle +
3723 (next_queue_index -
3724 (void __iomem *)ctrl_info->queue_memory_base);
3725
3726 return 0;
3727 }
3728
pqi_init_operational_queues(struct pqi_ctrl_info * ctrl_info)3729 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3730 {
3731 unsigned int i;
3732 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3733 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3734
3735 /*
3736 * Initialize the backpointers to the controller structure in
3737 * each operational queue group structure.
3738 */
3739 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3740 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3741
3742 /*
3743 * Assign IDs to all operational queues. Note that the IDs
3744 * assigned to operational IQs are independent of the IDs
3745 * assigned to operational OQs.
3746 */
3747 ctrl_info->event_queue.oq_id = next_oq_id++;
3748 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3749 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3750 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3751 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3752 }
3753
3754 /*
3755 * Assign MSI-X table entry indexes to all queues. Note that the
3756 * interrupt for the event queue is shared with the first queue group.
3757 */
3758 ctrl_info->event_queue.int_msg_num = 0;
3759 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3760 ctrl_info->queue_groups[i].int_msg_num = i;
3761
3762 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3763 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3764 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3765 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3766 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3767 }
3768 }
3769
pqi_alloc_admin_queues(struct pqi_ctrl_info * ctrl_info)3770 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3771 {
3772 size_t alloc_length;
3773 struct pqi_admin_queues_aligned *admin_queues_aligned;
3774 struct pqi_admin_queues *admin_queues;
3775
3776 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3777 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3778
3779 ctrl_info->admin_queue_memory_base =
3780 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3781 &ctrl_info->admin_queue_memory_base_dma_handle,
3782 GFP_KERNEL);
3783
3784 if (!ctrl_info->admin_queue_memory_base)
3785 return -ENOMEM;
3786
3787 ctrl_info->admin_queue_memory_length = alloc_length;
3788
3789 admin_queues = &ctrl_info->admin_queues;
3790 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3791 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3792 admin_queues->iq_element_array =
3793 &admin_queues_aligned->iq_element_array;
3794 admin_queues->oq_element_array =
3795 &admin_queues_aligned->oq_element_array;
3796 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3797 admin_queues->oq_pi =
3798 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
3799
3800 admin_queues->iq_element_array_bus_addr =
3801 ctrl_info->admin_queue_memory_base_dma_handle +
3802 (admin_queues->iq_element_array -
3803 ctrl_info->admin_queue_memory_base);
3804 admin_queues->oq_element_array_bus_addr =
3805 ctrl_info->admin_queue_memory_base_dma_handle +
3806 (admin_queues->oq_element_array -
3807 ctrl_info->admin_queue_memory_base);
3808 admin_queues->iq_ci_bus_addr =
3809 ctrl_info->admin_queue_memory_base_dma_handle +
3810 ((void *)admin_queues->iq_ci -
3811 ctrl_info->admin_queue_memory_base);
3812 admin_queues->oq_pi_bus_addr =
3813 ctrl_info->admin_queue_memory_base_dma_handle +
3814 ((void __iomem *)admin_queues->oq_pi -
3815 (void __iomem *)ctrl_info->admin_queue_memory_base);
3816
3817 return 0;
3818 }
3819
3820 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
3821 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3822
pqi_create_admin_queues(struct pqi_ctrl_info * ctrl_info)3823 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3824 {
3825 struct pqi_device_registers __iomem *pqi_registers;
3826 struct pqi_admin_queues *admin_queues;
3827 unsigned long timeout;
3828 u8 status;
3829 u32 reg;
3830
3831 pqi_registers = ctrl_info->pqi_registers;
3832 admin_queues = &ctrl_info->admin_queues;
3833
3834 writeq((u64)admin_queues->iq_element_array_bus_addr,
3835 &pqi_registers->admin_iq_element_array_addr);
3836 writeq((u64)admin_queues->oq_element_array_bus_addr,
3837 &pqi_registers->admin_oq_element_array_addr);
3838 writeq((u64)admin_queues->iq_ci_bus_addr,
3839 &pqi_registers->admin_iq_ci_addr);
3840 writeq((u64)admin_queues->oq_pi_bus_addr,
3841 &pqi_registers->admin_oq_pi_addr);
3842
3843 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3844 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3845 (admin_queues->int_msg_num << 16);
3846 writel(reg, &pqi_registers->admin_iq_num_elements);
3847 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3848 &pqi_registers->function_and_status_code);
3849
3850 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3851 while (1) {
3852 status = readb(&pqi_registers->function_and_status_code);
3853 if (status == PQI_STATUS_IDLE)
3854 break;
3855 if (time_after(jiffies, timeout))
3856 return -ETIMEDOUT;
3857 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3858 }
3859
3860 /*
3861 * The offset registers are not initialized to the correct
3862 * offsets until *after* the create admin queue pair command
3863 * completes successfully.
3864 */
3865 admin_queues->iq_pi = ctrl_info->iomem_base +
3866 PQI_DEVICE_REGISTERS_OFFSET +
3867 readq(&pqi_registers->admin_iq_pi_offset);
3868 admin_queues->oq_ci = ctrl_info->iomem_base +
3869 PQI_DEVICE_REGISTERS_OFFSET +
3870 readq(&pqi_registers->admin_oq_ci_offset);
3871
3872 return 0;
3873 }
3874
pqi_submit_admin_request(struct pqi_ctrl_info * ctrl_info,struct pqi_general_admin_request * request)3875 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3876 struct pqi_general_admin_request *request)
3877 {
3878 struct pqi_admin_queues *admin_queues;
3879 void *next_element;
3880 pqi_index_t iq_pi;
3881
3882 admin_queues = &ctrl_info->admin_queues;
3883 iq_pi = admin_queues->iq_pi_copy;
3884
3885 next_element = admin_queues->iq_element_array +
3886 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3887
3888 memcpy(next_element, request, sizeof(*request));
3889
3890 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3891 admin_queues->iq_pi_copy = iq_pi;
3892
3893 /*
3894 * This write notifies the controller that an IU is available to be
3895 * processed.
3896 */
3897 writel(iq_pi, admin_queues->iq_pi);
3898 }
3899
3900 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3901
pqi_poll_for_admin_response(struct pqi_ctrl_info * ctrl_info,struct pqi_general_admin_response * response)3902 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3903 struct pqi_general_admin_response *response)
3904 {
3905 struct pqi_admin_queues *admin_queues;
3906 pqi_index_t oq_pi;
3907 pqi_index_t oq_ci;
3908 unsigned long timeout;
3909
3910 admin_queues = &ctrl_info->admin_queues;
3911 oq_ci = admin_queues->oq_ci_copy;
3912
3913 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
3914
3915 while (1) {
3916 oq_pi = readl(admin_queues->oq_pi);
3917 if (oq_pi != oq_ci)
3918 break;
3919 if (time_after(jiffies, timeout)) {
3920 dev_err(&ctrl_info->pci_dev->dev,
3921 "timed out waiting for admin response\n");
3922 return -ETIMEDOUT;
3923 }
3924 if (!sis_is_firmware_running(ctrl_info))
3925 return -ENXIO;
3926 usleep_range(1000, 2000);
3927 }
3928
3929 memcpy(response, admin_queues->oq_element_array +
3930 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3931
3932 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3933 admin_queues->oq_ci_copy = oq_ci;
3934 writel(oq_ci, admin_queues->oq_ci);
3935
3936 return 0;
3937 }
3938
pqi_start_io(struct pqi_ctrl_info * ctrl_info,struct pqi_queue_group * queue_group,enum pqi_io_path path,struct pqi_io_request * io_request)3939 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3940 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3941 struct pqi_io_request *io_request)
3942 {
3943 struct pqi_io_request *next;
3944 void *next_element;
3945 pqi_index_t iq_pi;
3946 pqi_index_t iq_ci;
3947 size_t iu_length;
3948 unsigned long flags;
3949 unsigned int num_elements_needed;
3950 unsigned int num_elements_to_end_of_queue;
3951 size_t copy_count;
3952 struct pqi_iu_header *request;
3953
3954 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3955
3956 if (io_request) {
3957 io_request->queue_group = queue_group;
3958 list_add_tail(&io_request->request_list_entry,
3959 &queue_group->request_list[path]);
3960 }
3961
3962 iq_pi = queue_group->iq_pi_copy[path];
3963
3964 list_for_each_entry_safe(io_request, next,
3965 &queue_group->request_list[path], request_list_entry) {
3966
3967 request = io_request->iu;
3968
3969 iu_length = get_unaligned_le16(&request->iu_length) +
3970 PQI_REQUEST_HEADER_LENGTH;
3971 num_elements_needed =
3972 DIV_ROUND_UP(iu_length,
3973 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3974
3975 iq_ci = readl(queue_group->iq_ci[path]);
3976
3977 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3978 ctrl_info->num_elements_per_iq))
3979 break;
3980
3981 put_unaligned_le16(queue_group->oq_id,
3982 &request->response_queue_id);
3983
3984 next_element = queue_group->iq_element_array[path] +
3985 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3986
3987 num_elements_to_end_of_queue =
3988 ctrl_info->num_elements_per_iq - iq_pi;
3989
3990 if (num_elements_needed <= num_elements_to_end_of_queue) {
3991 memcpy(next_element, request, iu_length);
3992 } else {
3993 copy_count = num_elements_to_end_of_queue *
3994 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3995 memcpy(next_element, request, copy_count);
3996 memcpy(queue_group->iq_element_array[path],
3997 (u8 *)request + copy_count,
3998 iu_length - copy_count);
3999 }
4000
4001 iq_pi = (iq_pi + num_elements_needed) %
4002 ctrl_info->num_elements_per_iq;
4003
4004 list_del(&io_request->request_list_entry);
4005 }
4006
4007 if (iq_pi != queue_group->iq_pi_copy[path]) {
4008 queue_group->iq_pi_copy[path] = iq_pi;
4009 /*
4010 * This write notifies the controller that one or more IUs are
4011 * available to be processed.
4012 */
4013 writel(iq_pi, queue_group->iq_pi[path]);
4014 }
4015
4016 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4017 }
4018
4019 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4020
pqi_wait_for_completion_io(struct pqi_ctrl_info * ctrl_info,struct completion * wait)4021 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4022 struct completion *wait)
4023 {
4024 int rc;
4025
4026 while (1) {
4027 if (wait_for_completion_io_timeout(wait,
4028 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
4029 rc = 0;
4030 break;
4031 }
4032
4033 pqi_check_ctrl_health(ctrl_info);
4034 if (pqi_ctrl_offline(ctrl_info)) {
4035 rc = -ENXIO;
4036 break;
4037 }
4038 }
4039
4040 return rc;
4041 }
4042
pqi_raid_synchronous_complete(struct pqi_io_request * io_request,void * context)4043 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4044 void *context)
4045 {
4046 struct completion *waiting = context;
4047
4048 complete(waiting);
4049 }
4050
pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info * error_info)4051 static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
4052 *error_info)
4053 {
4054 int rc = -EIO;
4055
4056 switch (error_info->data_out_result) {
4057 case PQI_DATA_IN_OUT_GOOD:
4058 if (error_info->status == SAM_STAT_GOOD)
4059 rc = 0;
4060 break;
4061 case PQI_DATA_IN_OUT_UNDERFLOW:
4062 if (error_info->status == SAM_STAT_GOOD ||
4063 error_info->status == SAM_STAT_CHECK_CONDITION)
4064 rc = 0;
4065 break;
4066 case PQI_DATA_IN_OUT_ABORTED:
4067 rc = PQI_CMD_STATUS_ABORTED;
4068 break;
4069 }
4070
4071 return rc;
4072 }
4073
pqi_submit_raid_request_synchronous(struct pqi_ctrl_info * ctrl_info,struct pqi_iu_header * request,unsigned int flags,struct pqi_raid_error_info * error_info,unsigned long timeout_msecs)4074 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4075 struct pqi_iu_header *request, unsigned int flags,
4076 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
4077 {
4078 int rc = 0;
4079 struct pqi_io_request *io_request;
4080 unsigned long start_jiffies;
4081 unsigned long msecs_blocked;
4082 size_t iu_length;
4083 DECLARE_COMPLETION_ONSTACK(wait);
4084
4085 /*
4086 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
4087 * are mutually exclusive.
4088 */
4089
4090 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4091 if (down_interruptible(&ctrl_info->sync_request_sem))
4092 return -ERESTARTSYS;
4093 } else {
4094 if (timeout_msecs == NO_TIMEOUT) {
4095 down(&ctrl_info->sync_request_sem);
4096 } else {
4097 start_jiffies = jiffies;
4098 if (down_timeout(&ctrl_info->sync_request_sem,
4099 msecs_to_jiffies(timeout_msecs)))
4100 return -ETIMEDOUT;
4101 msecs_blocked =
4102 jiffies_to_msecs(jiffies - start_jiffies);
4103 if (msecs_blocked >= timeout_msecs) {
4104 rc = -ETIMEDOUT;
4105 goto out;
4106 }
4107 timeout_msecs -= msecs_blocked;
4108 }
4109 }
4110
4111 pqi_ctrl_busy(ctrl_info);
4112 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
4113 if (timeout_msecs == 0) {
4114 pqi_ctrl_unbusy(ctrl_info);
4115 rc = -ETIMEDOUT;
4116 goto out;
4117 }
4118
4119 if (pqi_ctrl_offline(ctrl_info)) {
4120 pqi_ctrl_unbusy(ctrl_info);
4121 rc = -ENXIO;
4122 goto out;
4123 }
4124
4125 io_request = pqi_alloc_io_request(ctrl_info);
4126
4127 put_unaligned_le16(io_request->index,
4128 &(((struct pqi_raid_path_request *)request)->request_id));
4129
4130 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4131 ((struct pqi_raid_path_request *)request)->error_index =
4132 ((struct pqi_raid_path_request *)request)->request_id;
4133
4134 iu_length = get_unaligned_le16(&request->iu_length) +
4135 PQI_REQUEST_HEADER_LENGTH;
4136 memcpy(io_request->iu, request, iu_length);
4137
4138 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4139 io_request->context = &wait;
4140
4141 pqi_start_io(ctrl_info,
4142 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4143 io_request);
4144
4145 pqi_ctrl_unbusy(ctrl_info);
4146
4147 if (timeout_msecs == NO_TIMEOUT) {
4148 pqi_wait_for_completion_io(ctrl_info, &wait);
4149 } else {
4150 if (!wait_for_completion_io_timeout(&wait,
4151 msecs_to_jiffies(timeout_msecs))) {
4152 dev_warn(&ctrl_info->pci_dev->dev,
4153 "command timed out\n");
4154 rc = -ETIMEDOUT;
4155 }
4156 }
4157
4158 if (error_info) {
4159 if (io_request->error_info)
4160 memcpy(error_info, io_request->error_info,
4161 sizeof(*error_info));
4162 else
4163 memset(error_info, 0, sizeof(*error_info));
4164 } else if (rc == 0 && io_request->error_info) {
4165 rc = pqi_process_raid_io_error_synchronous(
4166 io_request->error_info);
4167 }
4168
4169 pqi_free_io_request(io_request);
4170
4171 out:
4172 up(&ctrl_info->sync_request_sem);
4173
4174 return rc;
4175 }
4176
pqi_validate_admin_response(struct pqi_general_admin_response * response,u8 expected_function_code)4177 static int pqi_validate_admin_response(
4178 struct pqi_general_admin_response *response, u8 expected_function_code)
4179 {
4180 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4181 return -EINVAL;
4182
4183 if (get_unaligned_le16(&response->header.iu_length) !=
4184 PQI_GENERAL_ADMIN_IU_LENGTH)
4185 return -EINVAL;
4186
4187 if (response->function_code != expected_function_code)
4188 return -EINVAL;
4189
4190 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4191 return -EINVAL;
4192
4193 return 0;
4194 }
4195
pqi_submit_admin_request_synchronous(struct pqi_ctrl_info * ctrl_info,struct pqi_general_admin_request * request,struct pqi_general_admin_response * response)4196 static int pqi_submit_admin_request_synchronous(
4197 struct pqi_ctrl_info *ctrl_info,
4198 struct pqi_general_admin_request *request,
4199 struct pqi_general_admin_response *response)
4200 {
4201 int rc;
4202
4203 pqi_submit_admin_request(ctrl_info, request);
4204
4205 rc = pqi_poll_for_admin_response(ctrl_info, response);
4206
4207 if (rc == 0)
4208 rc = pqi_validate_admin_response(response,
4209 request->function_code);
4210
4211 return rc;
4212 }
4213
pqi_report_device_capability(struct pqi_ctrl_info * ctrl_info)4214 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4215 {
4216 int rc;
4217 struct pqi_general_admin_request request;
4218 struct pqi_general_admin_response response;
4219 struct pqi_device_capability *capability;
4220 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4221
4222 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4223 if (!capability)
4224 return -ENOMEM;
4225
4226 memset(&request, 0, sizeof(request));
4227
4228 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4229 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4230 &request.header.iu_length);
4231 request.function_code =
4232 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4233 put_unaligned_le32(sizeof(*capability),
4234 &request.data.report_device_capability.buffer_length);
4235
4236 rc = pqi_map_single(ctrl_info->pci_dev,
4237 &request.data.report_device_capability.sg_descriptor,
4238 capability, sizeof(*capability),
4239 DMA_FROM_DEVICE);
4240 if (rc)
4241 goto out;
4242
4243 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4244 &response);
4245
4246 pqi_pci_unmap(ctrl_info->pci_dev,
4247 &request.data.report_device_capability.sg_descriptor, 1,
4248 DMA_FROM_DEVICE);
4249
4250 if (rc)
4251 goto out;
4252
4253 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4254 rc = -EIO;
4255 goto out;
4256 }
4257
4258 ctrl_info->max_inbound_queues =
4259 get_unaligned_le16(&capability->max_inbound_queues);
4260 ctrl_info->max_elements_per_iq =
4261 get_unaligned_le16(&capability->max_elements_per_iq);
4262 ctrl_info->max_iq_element_length =
4263 get_unaligned_le16(&capability->max_iq_element_length)
4264 * 16;
4265 ctrl_info->max_outbound_queues =
4266 get_unaligned_le16(&capability->max_outbound_queues);
4267 ctrl_info->max_elements_per_oq =
4268 get_unaligned_le16(&capability->max_elements_per_oq);
4269 ctrl_info->max_oq_element_length =
4270 get_unaligned_le16(&capability->max_oq_element_length)
4271 * 16;
4272
4273 sop_iu_layer_descriptor =
4274 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4275
4276 ctrl_info->max_inbound_iu_length_per_firmware =
4277 get_unaligned_le16(
4278 &sop_iu_layer_descriptor->max_inbound_iu_length);
4279 ctrl_info->inbound_spanning_supported =
4280 sop_iu_layer_descriptor->inbound_spanning_supported;
4281 ctrl_info->outbound_spanning_supported =
4282 sop_iu_layer_descriptor->outbound_spanning_supported;
4283
4284 out:
4285 kfree(capability);
4286
4287 return rc;
4288 }
4289
pqi_validate_device_capability(struct pqi_ctrl_info * ctrl_info)4290 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4291 {
4292 if (ctrl_info->max_iq_element_length <
4293 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4294 dev_err(&ctrl_info->pci_dev->dev,
4295 "max. inbound queue element length of %d is less than the required length of %d\n",
4296 ctrl_info->max_iq_element_length,
4297 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4298 return -EINVAL;
4299 }
4300
4301 if (ctrl_info->max_oq_element_length <
4302 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4303 dev_err(&ctrl_info->pci_dev->dev,
4304 "max. outbound queue element length of %d is less than the required length of %d\n",
4305 ctrl_info->max_oq_element_length,
4306 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4307 return -EINVAL;
4308 }
4309
4310 if (ctrl_info->max_inbound_iu_length_per_firmware <
4311 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4312 dev_err(&ctrl_info->pci_dev->dev,
4313 "max. inbound IU length of %u is less than the min. required length of %d\n",
4314 ctrl_info->max_inbound_iu_length_per_firmware,
4315 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4316 return -EINVAL;
4317 }
4318
4319 if (!ctrl_info->inbound_spanning_supported) {
4320 dev_err(&ctrl_info->pci_dev->dev,
4321 "the controller does not support inbound spanning\n");
4322 return -EINVAL;
4323 }
4324
4325 if (ctrl_info->outbound_spanning_supported) {
4326 dev_err(&ctrl_info->pci_dev->dev,
4327 "the controller supports outbound spanning but this driver does not\n");
4328 return -EINVAL;
4329 }
4330
4331 return 0;
4332 }
4333
pqi_create_event_queue(struct pqi_ctrl_info * ctrl_info)4334 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4335 {
4336 int rc;
4337 struct pqi_event_queue *event_queue;
4338 struct pqi_general_admin_request request;
4339 struct pqi_general_admin_response response;
4340
4341 event_queue = &ctrl_info->event_queue;
4342
4343 /*
4344 * Create OQ (Outbound Queue - device to host queue) to dedicate
4345 * to events.
4346 */
4347 memset(&request, 0, sizeof(request));
4348 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4349 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4350 &request.header.iu_length);
4351 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4352 put_unaligned_le16(event_queue->oq_id,
4353 &request.data.create_operational_oq.queue_id);
4354 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4355 &request.data.create_operational_oq.element_array_addr);
4356 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4357 &request.data.create_operational_oq.pi_addr);
4358 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4359 &request.data.create_operational_oq.num_elements);
4360 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4361 &request.data.create_operational_oq.element_length);
4362 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4363 put_unaligned_le16(event_queue->int_msg_num,
4364 &request.data.create_operational_oq.int_msg_num);
4365
4366 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4367 &response);
4368 if (rc)
4369 return rc;
4370
4371 event_queue->oq_ci = ctrl_info->iomem_base +
4372 PQI_DEVICE_REGISTERS_OFFSET +
4373 get_unaligned_le64(
4374 &response.data.create_operational_oq.oq_ci_offset);
4375
4376 return 0;
4377 }
4378
pqi_create_queue_group(struct pqi_ctrl_info * ctrl_info,unsigned int group_number)4379 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4380 unsigned int group_number)
4381 {
4382 int rc;
4383 struct pqi_queue_group *queue_group;
4384 struct pqi_general_admin_request request;
4385 struct pqi_general_admin_response response;
4386
4387 queue_group = &ctrl_info->queue_groups[group_number];
4388
4389 /*
4390 * Create IQ (Inbound Queue - host to device queue) for
4391 * RAID path.
4392 */
4393 memset(&request, 0, sizeof(request));
4394 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4395 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4396 &request.header.iu_length);
4397 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4398 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4399 &request.data.create_operational_iq.queue_id);
4400 put_unaligned_le64(
4401 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4402 &request.data.create_operational_iq.element_array_addr);
4403 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4404 &request.data.create_operational_iq.ci_addr);
4405 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4406 &request.data.create_operational_iq.num_elements);
4407 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4408 &request.data.create_operational_iq.element_length);
4409 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4410
4411 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4412 &response);
4413 if (rc) {
4414 dev_err(&ctrl_info->pci_dev->dev,
4415 "error creating inbound RAID queue\n");
4416 return rc;
4417 }
4418
4419 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4420 PQI_DEVICE_REGISTERS_OFFSET +
4421 get_unaligned_le64(
4422 &response.data.create_operational_iq.iq_pi_offset);
4423
4424 /*
4425 * Create IQ (Inbound Queue - host to device queue) for
4426 * Advanced I/O (AIO) path.
4427 */
4428 memset(&request, 0, sizeof(request));
4429 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4430 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4431 &request.header.iu_length);
4432 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4433 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4434 &request.data.create_operational_iq.queue_id);
4435 put_unaligned_le64((u64)queue_group->
4436 iq_element_array_bus_addr[AIO_PATH],
4437 &request.data.create_operational_iq.element_array_addr);
4438 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4439 &request.data.create_operational_iq.ci_addr);
4440 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4441 &request.data.create_operational_iq.num_elements);
4442 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4443 &request.data.create_operational_iq.element_length);
4444 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4445
4446 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4447 &response);
4448 if (rc) {
4449 dev_err(&ctrl_info->pci_dev->dev,
4450 "error creating inbound AIO queue\n");
4451 return rc;
4452 }
4453
4454 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4455 PQI_DEVICE_REGISTERS_OFFSET +
4456 get_unaligned_le64(
4457 &response.data.create_operational_iq.iq_pi_offset);
4458
4459 /*
4460 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4461 * assumed to be for RAID path I/O unless we change the queue's
4462 * property.
4463 */
4464 memset(&request, 0, sizeof(request));
4465 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4466 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4467 &request.header.iu_length);
4468 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4469 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4470 &request.data.change_operational_iq_properties.queue_id);
4471 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4472 &request.data.change_operational_iq_properties.vendor_specific);
4473
4474 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4475 &response);
4476 if (rc) {
4477 dev_err(&ctrl_info->pci_dev->dev,
4478 "error changing queue property\n");
4479 return rc;
4480 }
4481
4482 /*
4483 * Create OQ (Outbound Queue - device to host queue).
4484 */
4485 memset(&request, 0, sizeof(request));
4486 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4487 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4488 &request.header.iu_length);
4489 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4490 put_unaligned_le16(queue_group->oq_id,
4491 &request.data.create_operational_oq.queue_id);
4492 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4493 &request.data.create_operational_oq.element_array_addr);
4494 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4495 &request.data.create_operational_oq.pi_addr);
4496 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4497 &request.data.create_operational_oq.num_elements);
4498 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4499 &request.data.create_operational_oq.element_length);
4500 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4501 put_unaligned_le16(queue_group->int_msg_num,
4502 &request.data.create_operational_oq.int_msg_num);
4503
4504 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4505 &response);
4506 if (rc) {
4507 dev_err(&ctrl_info->pci_dev->dev,
4508 "error creating outbound queue\n");
4509 return rc;
4510 }
4511
4512 queue_group->oq_ci = ctrl_info->iomem_base +
4513 PQI_DEVICE_REGISTERS_OFFSET +
4514 get_unaligned_le64(
4515 &response.data.create_operational_oq.oq_ci_offset);
4516
4517 return 0;
4518 }
4519
pqi_create_queues(struct pqi_ctrl_info * ctrl_info)4520 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4521 {
4522 int rc;
4523 unsigned int i;
4524
4525 rc = pqi_create_event_queue(ctrl_info);
4526 if (rc) {
4527 dev_err(&ctrl_info->pci_dev->dev,
4528 "error creating event queue\n");
4529 return rc;
4530 }
4531
4532 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4533 rc = pqi_create_queue_group(ctrl_info, i);
4534 if (rc) {
4535 dev_err(&ctrl_info->pci_dev->dev,
4536 "error creating queue group number %u/%u\n",
4537 i, ctrl_info->num_queue_groups);
4538 return rc;
4539 }
4540 }
4541
4542 return 0;
4543 }
4544
4545 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4546 (offsetof(struct pqi_event_config, descriptors) + \
4547 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4548
pqi_configure_events(struct pqi_ctrl_info * ctrl_info,bool enable_events)4549 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4550 bool enable_events)
4551 {
4552 int rc;
4553 unsigned int i;
4554 struct pqi_event_config *event_config;
4555 struct pqi_event_descriptor *event_descriptor;
4556 struct pqi_general_management_request request;
4557
4558 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4559 GFP_KERNEL);
4560 if (!event_config)
4561 return -ENOMEM;
4562
4563 memset(&request, 0, sizeof(request));
4564
4565 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4566 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4567 data.report_event_configuration.sg_descriptors[1]) -
4568 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4569 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4570 &request.data.report_event_configuration.buffer_length);
4571
4572 rc = pqi_map_single(ctrl_info->pci_dev,
4573 request.data.report_event_configuration.sg_descriptors,
4574 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4575 DMA_FROM_DEVICE);
4576 if (rc)
4577 goto out;
4578
4579 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4580 0, NULL, NO_TIMEOUT);
4581
4582 pqi_pci_unmap(ctrl_info->pci_dev,
4583 request.data.report_event_configuration.sg_descriptors, 1,
4584 DMA_FROM_DEVICE);
4585
4586 if (rc)
4587 goto out;
4588
4589 for (i = 0; i < event_config->num_event_descriptors; i++) {
4590 event_descriptor = &event_config->descriptors[i];
4591 if (enable_events &&
4592 pqi_is_supported_event(event_descriptor->event_type))
4593 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4594 &event_descriptor->oq_id);
4595 else
4596 put_unaligned_le16(0, &event_descriptor->oq_id);
4597 }
4598
4599 memset(&request, 0, sizeof(request));
4600
4601 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4602 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4603 data.report_event_configuration.sg_descriptors[1]) -
4604 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4605 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4606 &request.data.report_event_configuration.buffer_length);
4607
4608 rc = pqi_map_single(ctrl_info->pci_dev,
4609 request.data.report_event_configuration.sg_descriptors,
4610 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4611 DMA_TO_DEVICE);
4612 if (rc)
4613 goto out;
4614
4615 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4616 NULL, NO_TIMEOUT);
4617
4618 pqi_pci_unmap(ctrl_info->pci_dev,
4619 request.data.report_event_configuration.sg_descriptors, 1,
4620 DMA_TO_DEVICE);
4621
4622 out:
4623 kfree(event_config);
4624
4625 return rc;
4626 }
4627
pqi_enable_events(struct pqi_ctrl_info * ctrl_info)4628 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4629 {
4630 return pqi_configure_events(ctrl_info, true);
4631 }
4632
pqi_disable_events(struct pqi_ctrl_info * ctrl_info)4633 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4634 {
4635 return pqi_configure_events(ctrl_info, false);
4636 }
4637
pqi_free_all_io_requests(struct pqi_ctrl_info * ctrl_info)4638 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4639 {
4640 unsigned int i;
4641 struct device *dev;
4642 size_t sg_chain_buffer_length;
4643 struct pqi_io_request *io_request;
4644
4645 if (!ctrl_info->io_request_pool)
4646 return;
4647
4648 dev = &ctrl_info->pci_dev->dev;
4649 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4650 io_request = ctrl_info->io_request_pool;
4651
4652 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4653 kfree(io_request->iu);
4654 if (!io_request->sg_chain_buffer)
4655 break;
4656 dma_free_coherent(dev, sg_chain_buffer_length,
4657 io_request->sg_chain_buffer,
4658 io_request->sg_chain_buffer_dma_handle);
4659 io_request++;
4660 }
4661
4662 kfree(ctrl_info->io_request_pool);
4663 ctrl_info->io_request_pool = NULL;
4664 }
4665
pqi_alloc_error_buffer(struct pqi_ctrl_info * ctrl_info)4666 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4667 {
4668 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4669 ctrl_info->error_buffer_length,
4670 &ctrl_info->error_buffer_dma_handle,
4671 GFP_KERNEL);
4672
4673 if (!ctrl_info->error_buffer)
4674 return -ENOMEM;
4675
4676 return 0;
4677 }
4678
pqi_alloc_io_resources(struct pqi_ctrl_info * ctrl_info)4679 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4680 {
4681 unsigned int i;
4682 void *sg_chain_buffer;
4683 size_t sg_chain_buffer_length;
4684 dma_addr_t sg_chain_buffer_dma_handle;
4685 struct device *dev;
4686 struct pqi_io_request *io_request;
4687
4688 ctrl_info->io_request_pool =
4689 kcalloc(ctrl_info->max_io_slots,
4690 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4691
4692 if (!ctrl_info->io_request_pool) {
4693 dev_err(&ctrl_info->pci_dev->dev,
4694 "failed to allocate I/O request pool\n");
4695 goto error;
4696 }
4697
4698 dev = &ctrl_info->pci_dev->dev;
4699 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4700 io_request = ctrl_info->io_request_pool;
4701
4702 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4703 io_request->iu =
4704 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4705
4706 if (!io_request->iu) {
4707 dev_err(&ctrl_info->pci_dev->dev,
4708 "failed to allocate IU buffers\n");
4709 goto error;
4710 }
4711
4712 sg_chain_buffer = dma_alloc_coherent(dev,
4713 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4714 GFP_KERNEL);
4715
4716 if (!sg_chain_buffer) {
4717 dev_err(&ctrl_info->pci_dev->dev,
4718 "failed to allocate PQI scatter-gather chain buffers\n");
4719 goto error;
4720 }
4721
4722 io_request->index = i;
4723 io_request->sg_chain_buffer = sg_chain_buffer;
4724 io_request->sg_chain_buffer_dma_handle =
4725 sg_chain_buffer_dma_handle;
4726 io_request++;
4727 }
4728
4729 return 0;
4730
4731 error:
4732 pqi_free_all_io_requests(ctrl_info);
4733
4734 return -ENOMEM;
4735 }
4736
4737 /*
4738 * Calculate required resources that are sized based on max. outstanding
4739 * requests and max. transfer size.
4740 */
4741
pqi_calculate_io_resources(struct pqi_ctrl_info * ctrl_info)4742 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4743 {
4744 u32 max_transfer_size;
4745 u32 max_sg_entries;
4746
4747 ctrl_info->scsi_ml_can_queue =
4748 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4749 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4750
4751 ctrl_info->error_buffer_length =
4752 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4753
4754 if (reset_devices)
4755 max_transfer_size = min(ctrl_info->max_transfer_size,
4756 PQI_MAX_TRANSFER_SIZE_KDUMP);
4757 else
4758 max_transfer_size = min(ctrl_info->max_transfer_size,
4759 PQI_MAX_TRANSFER_SIZE);
4760
4761 max_sg_entries = max_transfer_size / PAGE_SIZE;
4762
4763 /* +1 to cover when the buffer is not page-aligned. */
4764 max_sg_entries++;
4765
4766 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4767
4768 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4769
4770 ctrl_info->sg_chain_buffer_length =
4771 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4772 PQI_EXTRA_SGL_MEMORY;
4773 ctrl_info->sg_tablesize = max_sg_entries;
4774 ctrl_info->max_sectors = max_transfer_size / 512;
4775 }
4776
pqi_calculate_queue_resources(struct pqi_ctrl_info * ctrl_info)4777 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4778 {
4779 int num_queue_groups;
4780 u16 num_elements_per_iq;
4781 u16 num_elements_per_oq;
4782
4783 if (reset_devices) {
4784 num_queue_groups = 1;
4785 } else {
4786 int num_cpus;
4787 int max_queue_groups;
4788
4789 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4790 ctrl_info->max_outbound_queues - 1);
4791 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4792
4793 num_cpus = num_online_cpus();
4794 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4795 num_queue_groups = min(num_queue_groups, max_queue_groups);
4796 }
4797
4798 ctrl_info->num_queue_groups = num_queue_groups;
4799 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
4800
4801 /*
4802 * Make sure that the max. inbound IU length is an even multiple
4803 * of our inbound element length.
4804 */
4805 ctrl_info->max_inbound_iu_length =
4806 (ctrl_info->max_inbound_iu_length_per_firmware /
4807 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4808 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4809
4810 num_elements_per_iq =
4811 (ctrl_info->max_inbound_iu_length /
4812 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4813
4814 /* Add one because one element in each queue is unusable. */
4815 num_elements_per_iq++;
4816
4817 num_elements_per_iq = min(num_elements_per_iq,
4818 ctrl_info->max_elements_per_iq);
4819
4820 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4821 num_elements_per_oq = min(num_elements_per_oq,
4822 ctrl_info->max_elements_per_oq);
4823
4824 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4825 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4826
4827 ctrl_info->max_sg_per_iu =
4828 ((ctrl_info->max_inbound_iu_length -
4829 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4830 sizeof(struct pqi_sg_descriptor)) +
4831 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4832 }
4833
pqi_set_sg_descriptor(struct pqi_sg_descriptor * sg_descriptor,struct scatterlist * sg)4834 static inline void pqi_set_sg_descriptor(
4835 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4836 {
4837 u64 address = (u64)sg_dma_address(sg);
4838 unsigned int length = sg_dma_len(sg);
4839
4840 put_unaligned_le64(address, &sg_descriptor->address);
4841 put_unaligned_le32(length, &sg_descriptor->length);
4842 put_unaligned_le32(0, &sg_descriptor->flags);
4843 }
4844
pqi_build_raid_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_raid_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)4845 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4846 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4847 struct pqi_io_request *io_request)
4848 {
4849 int i;
4850 u16 iu_length;
4851 int sg_count;
4852 bool chained;
4853 unsigned int num_sg_in_iu;
4854 unsigned int max_sg_per_iu;
4855 struct scatterlist *sg;
4856 struct pqi_sg_descriptor *sg_descriptor;
4857
4858 sg_count = scsi_dma_map(scmd);
4859 if (sg_count < 0)
4860 return sg_count;
4861
4862 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4863 PQI_REQUEST_HEADER_LENGTH;
4864
4865 if (sg_count == 0)
4866 goto out;
4867
4868 sg = scsi_sglist(scmd);
4869 sg_descriptor = request->sg_descriptors;
4870 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4871 chained = false;
4872 num_sg_in_iu = 0;
4873 i = 0;
4874
4875 while (1) {
4876 pqi_set_sg_descriptor(sg_descriptor, sg);
4877 if (!chained)
4878 num_sg_in_iu++;
4879 i++;
4880 if (i == sg_count)
4881 break;
4882 sg_descriptor++;
4883 if (i == max_sg_per_iu) {
4884 put_unaligned_le64(
4885 (u64)io_request->sg_chain_buffer_dma_handle,
4886 &sg_descriptor->address);
4887 put_unaligned_le32((sg_count - num_sg_in_iu)
4888 * sizeof(*sg_descriptor),
4889 &sg_descriptor->length);
4890 put_unaligned_le32(CISS_SG_CHAIN,
4891 &sg_descriptor->flags);
4892 chained = true;
4893 num_sg_in_iu++;
4894 sg_descriptor = io_request->sg_chain_buffer;
4895 }
4896 sg = sg_next(sg);
4897 }
4898
4899 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4900 request->partial = chained;
4901 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4902
4903 out:
4904 put_unaligned_le16(iu_length, &request->header.iu_length);
4905
4906 return 0;
4907 }
4908
pqi_build_aio_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_aio_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)4909 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4910 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4911 struct pqi_io_request *io_request)
4912 {
4913 int i;
4914 u16 iu_length;
4915 int sg_count;
4916 bool chained;
4917 unsigned int num_sg_in_iu;
4918 unsigned int max_sg_per_iu;
4919 struct scatterlist *sg;
4920 struct pqi_sg_descriptor *sg_descriptor;
4921
4922 sg_count = scsi_dma_map(scmd);
4923 if (sg_count < 0)
4924 return sg_count;
4925
4926 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4927 PQI_REQUEST_HEADER_LENGTH;
4928 num_sg_in_iu = 0;
4929
4930 if (sg_count == 0)
4931 goto out;
4932
4933 sg = scsi_sglist(scmd);
4934 sg_descriptor = request->sg_descriptors;
4935 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4936 chained = false;
4937 i = 0;
4938
4939 while (1) {
4940 pqi_set_sg_descriptor(sg_descriptor, sg);
4941 if (!chained)
4942 num_sg_in_iu++;
4943 i++;
4944 if (i == sg_count)
4945 break;
4946 sg_descriptor++;
4947 if (i == max_sg_per_iu) {
4948 put_unaligned_le64(
4949 (u64)io_request->sg_chain_buffer_dma_handle,
4950 &sg_descriptor->address);
4951 put_unaligned_le32((sg_count - num_sg_in_iu)
4952 * sizeof(*sg_descriptor),
4953 &sg_descriptor->length);
4954 put_unaligned_le32(CISS_SG_CHAIN,
4955 &sg_descriptor->flags);
4956 chained = true;
4957 num_sg_in_iu++;
4958 sg_descriptor = io_request->sg_chain_buffer;
4959 }
4960 sg = sg_next(sg);
4961 }
4962
4963 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4964 request->partial = chained;
4965 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4966
4967 out:
4968 put_unaligned_le16(iu_length, &request->header.iu_length);
4969 request->num_sg_descriptors = num_sg_in_iu;
4970
4971 return 0;
4972 }
4973
pqi_raid_io_complete(struct pqi_io_request * io_request,void * context)4974 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4975 void *context)
4976 {
4977 struct scsi_cmnd *scmd;
4978
4979 scmd = io_request->scmd;
4980 pqi_free_io_request(io_request);
4981 scsi_dma_unmap(scmd);
4982 pqi_scsi_done(scmd);
4983 }
4984
pqi_raid_submit_scsi_cmd_with_io_request(struct pqi_ctrl_info * ctrl_info,struct pqi_io_request * io_request,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group)4985 static int pqi_raid_submit_scsi_cmd_with_io_request(
4986 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
4987 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4988 struct pqi_queue_group *queue_group)
4989 {
4990 int rc;
4991 size_t cdb_length;
4992 struct pqi_raid_path_request *request;
4993
4994 io_request->io_complete_callback = pqi_raid_io_complete;
4995 io_request->scmd = scmd;
4996
4997 request = io_request->iu;
4998 memset(request, 0,
4999 offsetof(struct pqi_raid_path_request, sg_descriptors));
5000
5001 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5002 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5003 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5004 put_unaligned_le16(io_request->index, &request->request_id);
5005 request->error_index = request->request_id;
5006 memcpy(request->lun_number, device->scsi3addr,
5007 sizeof(request->lun_number));
5008
5009 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5010 memcpy(request->cdb, scmd->cmnd, cdb_length);
5011
5012 switch (cdb_length) {
5013 case 6:
5014 case 10:
5015 case 12:
5016 case 16:
5017 /* No bytes in the Additional CDB bytes field */
5018 request->additional_cdb_bytes_usage =
5019 SOP_ADDITIONAL_CDB_BYTES_0;
5020 break;
5021 case 20:
5022 /* 4 bytes in the Additional cdb field */
5023 request->additional_cdb_bytes_usage =
5024 SOP_ADDITIONAL_CDB_BYTES_4;
5025 break;
5026 case 24:
5027 /* 8 bytes in the Additional cdb field */
5028 request->additional_cdb_bytes_usage =
5029 SOP_ADDITIONAL_CDB_BYTES_8;
5030 break;
5031 case 28:
5032 /* 12 bytes in the Additional cdb field */
5033 request->additional_cdb_bytes_usage =
5034 SOP_ADDITIONAL_CDB_BYTES_12;
5035 break;
5036 case 32:
5037 default:
5038 /* 16 bytes in the Additional cdb field */
5039 request->additional_cdb_bytes_usage =
5040 SOP_ADDITIONAL_CDB_BYTES_16;
5041 break;
5042 }
5043
5044 switch (scmd->sc_data_direction) {
5045 case DMA_TO_DEVICE:
5046 request->data_direction = SOP_READ_FLAG;
5047 break;
5048 case DMA_FROM_DEVICE:
5049 request->data_direction = SOP_WRITE_FLAG;
5050 break;
5051 case DMA_NONE:
5052 request->data_direction = SOP_NO_DIRECTION_FLAG;
5053 break;
5054 case DMA_BIDIRECTIONAL:
5055 request->data_direction = SOP_BIDIRECTIONAL;
5056 break;
5057 default:
5058 dev_err(&ctrl_info->pci_dev->dev,
5059 "unknown data direction: %d\n",
5060 scmd->sc_data_direction);
5061 break;
5062 }
5063
5064 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5065 if (rc) {
5066 pqi_free_io_request(io_request);
5067 return SCSI_MLQUEUE_HOST_BUSY;
5068 }
5069
5070 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5071
5072 return 0;
5073 }
5074
pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group)5075 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5076 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5077 struct pqi_queue_group *queue_group)
5078 {
5079 struct pqi_io_request *io_request;
5080
5081 io_request = pqi_alloc_io_request(ctrl_info);
5082
5083 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5084 device, scmd, queue_group);
5085 }
5086
pqi_schedule_bypass_retry(struct pqi_ctrl_info * ctrl_info)5087 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
5088 {
5089 if (!pqi_ctrl_blocked(ctrl_info))
5090 schedule_work(&ctrl_info->raid_bypass_retry_work);
5091 }
5092
pqi_raid_bypass_retry_needed(struct pqi_io_request * io_request)5093 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5094 {
5095 struct scsi_cmnd *scmd;
5096 struct pqi_scsi_dev *device;
5097 struct pqi_ctrl_info *ctrl_info;
5098
5099 if (!io_request->raid_bypass)
5100 return false;
5101
5102 scmd = io_request->scmd;
5103 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5104 return false;
5105 if (host_byte(scmd->result) == DID_NO_CONNECT)
5106 return false;
5107
5108 device = scmd->device->hostdata;
5109 if (pqi_device_offline(device))
5110 return false;
5111
5112 ctrl_info = shost_to_hba(scmd->device->host);
5113 if (pqi_ctrl_offline(ctrl_info))
5114 return false;
5115
5116 return true;
5117 }
5118
pqi_add_to_raid_bypass_retry_list(struct pqi_ctrl_info * ctrl_info,struct pqi_io_request * io_request,bool at_head)5119 static inline void pqi_add_to_raid_bypass_retry_list(
5120 struct pqi_ctrl_info *ctrl_info,
5121 struct pqi_io_request *io_request, bool at_head)
5122 {
5123 unsigned long flags;
5124
5125 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5126 if (at_head)
5127 list_add(&io_request->request_list_entry,
5128 &ctrl_info->raid_bypass_retry_list);
5129 else
5130 list_add_tail(&io_request->request_list_entry,
5131 &ctrl_info->raid_bypass_retry_list);
5132 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5133 }
5134
pqi_queued_raid_bypass_complete(struct pqi_io_request * io_request,void * context)5135 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
5136 void *context)
5137 {
5138 struct scsi_cmnd *scmd;
5139
5140 scmd = io_request->scmd;
5141 pqi_free_io_request(io_request);
5142 pqi_scsi_done(scmd);
5143 }
5144
pqi_queue_raid_bypass_retry(struct pqi_io_request * io_request)5145 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
5146 {
5147 struct scsi_cmnd *scmd;
5148 struct pqi_ctrl_info *ctrl_info;
5149
5150 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
5151 scmd = io_request->scmd;
5152 scmd->result = 0;
5153 ctrl_info = shost_to_hba(scmd->device->host);
5154
5155 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
5156 pqi_schedule_bypass_retry(ctrl_info);
5157 }
5158
pqi_retry_raid_bypass(struct pqi_io_request * io_request)5159 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
5160 {
5161 struct scsi_cmnd *scmd;
5162 struct pqi_scsi_dev *device;
5163 struct pqi_ctrl_info *ctrl_info;
5164 struct pqi_queue_group *queue_group;
5165
5166 scmd = io_request->scmd;
5167 device = scmd->device->hostdata;
5168 if (pqi_device_in_reset(device)) {
5169 pqi_free_io_request(io_request);
5170 set_host_byte(scmd, DID_RESET);
5171 pqi_scsi_done(scmd);
5172 return 0;
5173 }
5174
5175 ctrl_info = shost_to_hba(scmd->device->host);
5176 queue_group = io_request->queue_group;
5177
5178 pqi_reinit_io_request(io_request);
5179
5180 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5181 device, scmd, queue_group);
5182 }
5183
pqi_next_queued_raid_bypass_request(struct pqi_ctrl_info * ctrl_info)5184 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
5185 struct pqi_ctrl_info *ctrl_info)
5186 {
5187 unsigned long flags;
5188 struct pqi_io_request *io_request;
5189
5190 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5191 io_request = list_first_entry_or_null(
5192 &ctrl_info->raid_bypass_retry_list,
5193 struct pqi_io_request, request_list_entry);
5194 if (io_request)
5195 list_del(&io_request->request_list_entry);
5196 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5197
5198 return io_request;
5199 }
5200
pqi_retry_raid_bypass_requests(struct pqi_ctrl_info * ctrl_info)5201 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
5202 {
5203 int rc;
5204 struct pqi_io_request *io_request;
5205
5206 pqi_ctrl_busy(ctrl_info);
5207
5208 while (1) {
5209 if (pqi_ctrl_blocked(ctrl_info))
5210 break;
5211 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
5212 if (!io_request)
5213 break;
5214 rc = pqi_retry_raid_bypass(io_request);
5215 if (rc) {
5216 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
5217 true);
5218 pqi_schedule_bypass_retry(ctrl_info);
5219 break;
5220 }
5221 }
5222
5223 pqi_ctrl_unbusy(ctrl_info);
5224 }
5225
pqi_raid_bypass_retry_worker(struct work_struct * work)5226 static void pqi_raid_bypass_retry_worker(struct work_struct *work)
5227 {
5228 struct pqi_ctrl_info *ctrl_info;
5229
5230 ctrl_info = container_of(work, struct pqi_ctrl_info,
5231 raid_bypass_retry_work);
5232 pqi_retry_raid_bypass_requests(ctrl_info);
5233 }
5234
pqi_clear_all_queued_raid_bypass_retries(struct pqi_ctrl_info * ctrl_info)5235 static void pqi_clear_all_queued_raid_bypass_retries(
5236 struct pqi_ctrl_info *ctrl_info)
5237 {
5238 unsigned long flags;
5239
5240 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5241 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
5242 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5243 }
5244
pqi_aio_io_complete(struct pqi_io_request * io_request,void * context)5245 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5246 void *context)
5247 {
5248 struct scsi_cmnd *scmd;
5249
5250 scmd = io_request->scmd;
5251 scsi_dma_unmap(scmd);
5252 if (io_request->status == -EAGAIN)
5253 set_host_byte(scmd, DID_IMM_RETRY);
5254 else if (pqi_raid_bypass_retry_needed(io_request)) {
5255 pqi_queue_raid_bypass_retry(io_request);
5256 return;
5257 }
5258 pqi_free_io_request(io_request);
5259 pqi_scsi_done(scmd);
5260 }
5261
pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group)5262 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5263 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5264 struct pqi_queue_group *queue_group)
5265 {
5266 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5267 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
5268 }
5269
pqi_aio_submit_io(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd,u32 aio_handle,u8 * cdb,unsigned int cdb_length,struct pqi_queue_group * queue_group,struct pqi_encryption_info * encryption_info,bool raid_bypass)5270 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5271 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5272 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5273 struct pqi_encryption_info *encryption_info, bool raid_bypass)
5274 {
5275 int rc;
5276 struct pqi_io_request *io_request;
5277 struct pqi_aio_path_request *request;
5278
5279 io_request = pqi_alloc_io_request(ctrl_info);
5280 io_request->io_complete_callback = pqi_aio_io_complete;
5281 io_request->scmd = scmd;
5282 io_request->raid_bypass = raid_bypass;
5283
5284 request = io_request->iu;
5285 memset(request, 0,
5286 offsetof(struct pqi_raid_path_request, sg_descriptors));
5287
5288 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5289 put_unaligned_le32(aio_handle, &request->nexus_id);
5290 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5291 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5292 put_unaligned_le16(io_request->index, &request->request_id);
5293 request->error_index = request->request_id;
5294 if (cdb_length > sizeof(request->cdb))
5295 cdb_length = sizeof(request->cdb);
5296 request->cdb_length = cdb_length;
5297 memcpy(request->cdb, cdb, cdb_length);
5298
5299 switch (scmd->sc_data_direction) {
5300 case DMA_TO_DEVICE:
5301 request->data_direction = SOP_READ_FLAG;
5302 break;
5303 case DMA_FROM_DEVICE:
5304 request->data_direction = SOP_WRITE_FLAG;
5305 break;
5306 case DMA_NONE:
5307 request->data_direction = SOP_NO_DIRECTION_FLAG;
5308 break;
5309 case DMA_BIDIRECTIONAL:
5310 request->data_direction = SOP_BIDIRECTIONAL;
5311 break;
5312 default:
5313 dev_err(&ctrl_info->pci_dev->dev,
5314 "unknown data direction: %d\n",
5315 scmd->sc_data_direction);
5316 break;
5317 }
5318
5319 if (encryption_info) {
5320 request->encryption_enable = true;
5321 put_unaligned_le16(encryption_info->data_encryption_key_index,
5322 &request->data_encryption_key_index);
5323 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5324 &request->encrypt_tweak_lower);
5325 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5326 &request->encrypt_tweak_upper);
5327 }
5328
5329 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5330 if (rc) {
5331 pqi_free_io_request(io_request);
5332 return SCSI_MLQUEUE_HOST_BUSY;
5333 }
5334
5335 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5336
5337 return 0;
5338 }
5339
pqi_get_hw_queue(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd)5340 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5341 struct scsi_cmnd *scmd)
5342 {
5343 u16 hw_queue;
5344
5345 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
5346 if (hw_queue > ctrl_info->max_hw_queue_index)
5347 hw_queue = 0;
5348
5349 return hw_queue;
5350 }
5351
5352 /*
5353 * This function gets called just before we hand the completed SCSI request
5354 * back to the SML.
5355 */
5356
pqi_prep_for_scsi_done(struct scsi_cmnd * scmd)5357 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5358 {
5359 struct pqi_scsi_dev *device;
5360
5361 if (!scmd->device) {
5362 set_host_byte(scmd, DID_NO_CONNECT);
5363 return;
5364 }
5365
5366 device = scmd->device->hostdata;
5367 if (!device) {
5368 set_host_byte(scmd, DID_NO_CONNECT);
5369 return;
5370 }
5371
5372 atomic_dec(&device->scsi_cmds_outstanding);
5373 }
5374
pqi_scsi_queue_command(struct Scsi_Host * shost,struct scsi_cmnd * scmd)5375 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
5376 struct scsi_cmnd *scmd)
5377 {
5378 int rc;
5379 struct pqi_ctrl_info *ctrl_info;
5380 struct pqi_scsi_dev *device;
5381 u16 hw_queue;
5382 struct pqi_queue_group *queue_group;
5383 bool raid_bypassed;
5384
5385 device = scmd->device->hostdata;
5386 ctrl_info = shost_to_hba(shost);
5387
5388 if (!device) {
5389 set_host_byte(scmd, DID_NO_CONNECT);
5390 pqi_scsi_done(scmd);
5391 return 0;
5392 }
5393
5394 atomic_inc(&device->scsi_cmds_outstanding);
5395
5396 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
5397 device)) {
5398 set_host_byte(scmd, DID_NO_CONNECT);
5399 pqi_scsi_done(scmd);
5400 return 0;
5401 }
5402
5403 pqi_ctrl_busy(ctrl_info);
5404 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
5405 pqi_ctrl_in_ofa(ctrl_info)) {
5406 rc = SCSI_MLQUEUE_HOST_BUSY;
5407 goto out;
5408 }
5409
5410 /*
5411 * This is necessary because the SML doesn't zero out this field during
5412 * error recovery.
5413 */
5414 scmd->result = 0;
5415
5416 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5417 queue_group = &ctrl_info->queue_groups[hw_queue];
5418
5419 if (pqi_is_logical_device(device)) {
5420 raid_bypassed = false;
5421 if (device->raid_bypass_enabled &&
5422 !blk_rq_is_passthrough(scmd->request)) {
5423 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5424 scmd, queue_group);
5425 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
5426 raid_bypassed = true;
5427 }
5428 if (!raid_bypassed)
5429 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5430 queue_group);
5431 } else {
5432 if (device->aio_enabled)
5433 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
5434 queue_group);
5435 else
5436 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5437 queue_group);
5438 }
5439
5440 out:
5441 pqi_ctrl_unbusy(ctrl_info);
5442 if (rc)
5443 atomic_dec(&device->scsi_cmds_outstanding);
5444
5445 return rc;
5446 }
5447
pqi_wait_until_queued_io_drained(struct pqi_ctrl_info * ctrl_info,struct pqi_queue_group * queue_group)5448 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5449 struct pqi_queue_group *queue_group)
5450 {
5451 unsigned int path;
5452 unsigned long flags;
5453 bool list_is_empty;
5454
5455 for (path = 0; path < 2; path++) {
5456 while (1) {
5457 spin_lock_irqsave(
5458 &queue_group->submit_lock[path], flags);
5459 list_is_empty =
5460 list_empty(&queue_group->request_list[path]);
5461 spin_unlock_irqrestore(
5462 &queue_group->submit_lock[path], flags);
5463 if (list_is_empty)
5464 break;
5465 pqi_check_ctrl_health(ctrl_info);
5466 if (pqi_ctrl_offline(ctrl_info))
5467 return -ENXIO;
5468 usleep_range(1000, 2000);
5469 }
5470 }
5471
5472 return 0;
5473 }
5474
pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info * ctrl_info)5475 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5476 {
5477 int rc;
5478 unsigned int i;
5479 unsigned int path;
5480 struct pqi_queue_group *queue_group;
5481 pqi_index_t iq_pi;
5482 pqi_index_t iq_ci;
5483
5484 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5485 queue_group = &ctrl_info->queue_groups[i];
5486
5487 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5488 if (rc)
5489 return rc;
5490
5491 for (path = 0; path < 2; path++) {
5492 iq_pi = queue_group->iq_pi_copy[path];
5493
5494 while (1) {
5495 iq_ci = readl(queue_group->iq_ci[path]);
5496 if (iq_ci == iq_pi)
5497 break;
5498 pqi_check_ctrl_health(ctrl_info);
5499 if (pqi_ctrl_offline(ctrl_info))
5500 return -ENXIO;
5501 usleep_range(1000, 2000);
5502 }
5503 }
5504 }
5505
5506 return 0;
5507 }
5508
pqi_fail_io_queued_for_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)5509 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5510 struct pqi_scsi_dev *device)
5511 {
5512 unsigned int i;
5513 unsigned int path;
5514 struct pqi_queue_group *queue_group;
5515 unsigned long flags;
5516 struct pqi_io_request *io_request;
5517 struct pqi_io_request *next;
5518 struct scsi_cmnd *scmd;
5519 struct pqi_scsi_dev *scsi_device;
5520
5521 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5522 queue_group = &ctrl_info->queue_groups[i];
5523
5524 for (path = 0; path < 2; path++) {
5525 spin_lock_irqsave(
5526 &queue_group->submit_lock[path], flags);
5527
5528 list_for_each_entry_safe(io_request, next,
5529 &queue_group->request_list[path],
5530 request_list_entry) {
5531 scmd = io_request->scmd;
5532 if (!scmd)
5533 continue;
5534
5535 scsi_device = scmd->device->hostdata;
5536 if (scsi_device != device)
5537 continue;
5538
5539 list_del(&io_request->request_list_entry);
5540 set_host_byte(scmd, DID_RESET);
5541 pqi_scsi_done(scmd);
5542 }
5543
5544 spin_unlock_irqrestore(
5545 &queue_group->submit_lock[path], flags);
5546 }
5547 }
5548 }
5549
pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info * ctrl_info)5550 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
5551 {
5552 unsigned int i;
5553 unsigned int path;
5554 struct pqi_queue_group *queue_group;
5555 unsigned long flags;
5556 struct pqi_io_request *io_request;
5557 struct pqi_io_request *next;
5558 struct scsi_cmnd *scmd;
5559
5560 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5561 queue_group = &ctrl_info->queue_groups[i];
5562
5563 for (path = 0; path < 2; path++) {
5564 spin_lock_irqsave(&queue_group->submit_lock[path],
5565 flags);
5566
5567 list_for_each_entry_safe(io_request, next,
5568 &queue_group->request_list[path],
5569 request_list_entry) {
5570
5571 scmd = io_request->scmd;
5572 if (!scmd)
5573 continue;
5574
5575 list_del(&io_request->request_list_entry);
5576 set_host_byte(scmd, DID_RESET);
5577 pqi_scsi_done(scmd);
5578 }
5579
5580 spin_unlock_irqrestore(
5581 &queue_group->submit_lock[path], flags);
5582 }
5583 }
5584 }
5585
pqi_device_wait_for_pending_io(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,unsigned long timeout_secs)5586 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5587 struct pqi_scsi_dev *device, unsigned long timeout_secs)
5588 {
5589 unsigned long timeout;
5590
5591 timeout = (timeout_secs * PQI_HZ) + jiffies;
5592
5593 while (atomic_read(&device->scsi_cmds_outstanding)) {
5594 pqi_check_ctrl_health(ctrl_info);
5595 if (pqi_ctrl_offline(ctrl_info))
5596 return -ENXIO;
5597 if (timeout_secs != NO_TIMEOUT) {
5598 if (time_after(jiffies, timeout)) {
5599 dev_err(&ctrl_info->pci_dev->dev,
5600 "timed out waiting for pending IO\n");
5601 return -ETIMEDOUT;
5602 }
5603 }
5604 usleep_range(1000, 2000);
5605 }
5606
5607 return 0;
5608 }
5609
pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info * ctrl_info,unsigned long timeout_secs)5610 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5611 unsigned long timeout_secs)
5612 {
5613 bool io_pending;
5614 unsigned long flags;
5615 unsigned long timeout;
5616 struct pqi_scsi_dev *device;
5617
5618 timeout = (timeout_secs * PQI_HZ) + jiffies;
5619 while (1) {
5620 io_pending = false;
5621
5622 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5623 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5624 scsi_device_list_entry) {
5625 if (atomic_read(&device->scsi_cmds_outstanding)) {
5626 io_pending = true;
5627 break;
5628 }
5629 }
5630 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5631 flags);
5632
5633 if (!io_pending)
5634 break;
5635
5636 pqi_check_ctrl_health(ctrl_info);
5637 if (pqi_ctrl_offline(ctrl_info))
5638 return -ENXIO;
5639
5640 if (timeout_secs != NO_TIMEOUT) {
5641 if (time_after(jiffies, timeout)) {
5642 dev_err(&ctrl_info->pci_dev->dev,
5643 "timed out waiting for pending IO\n");
5644 return -ETIMEDOUT;
5645 }
5646 }
5647 usleep_range(1000, 2000);
5648 }
5649
5650 return 0;
5651 }
5652
pqi_lun_reset_complete(struct pqi_io_request * io_request,void * context)5653 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5654 void *context)
5655 {
5656 struct completion *waiting = context;
5657
5658 complete(waiting);
5659 }
5660
5661 #define PQI_LUN_RESET_TIMEOUT_SECS 10
5662
pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct completion * wait)5663 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5664 struct pqi_scsi_dev *device, struct completion *wait)
5665 {
5666 int rc;
5667
5668 while (1) {
5669 if (wait_for_completion_io_timeout(wait,
5670 PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) {
5671 rc = 0;
5672 break;
5673 }
5674
5675 pqi_check_ctrl_health(ctrl_info);
5676 if (pqi_ctrl_offline(ctrl_info)) {
5677 rc = -ENXIO;
5678 break;
5679 }
5680 }
5681
5682 return rc;
5683 }
5684
pqi_lun_reset(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)5685 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
5686 struct pqi_scsi_dev *device)
5687 {
5688 int rc;
5689 struct pqi_io_request *io_request;
5690 DECLARE_COMPLETION_ONSTACK(wait);
5691 struct pqi_task_management_request *request;
5692
5693 io_request = pqi_alloc_io_request(ctrl_info);
5694 io_request->io_complete_callback = pqi_lun_reset_complete;
5695 io_request->context = &wait;
5696
5697 request = io_request->iu;
5698 memset(request, 0, sizeof(*request));
5699
5700 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5701 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5702 &request->header.iu_length);
5703 put_unaligned_le16(io_request->index, &request->request_id);
5704 memcpy(request->lun_number, device->scsi3addr,
5705 sizeof(request->lun_number));
5706 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5707
5708 pqi_start_io(ctrl_info,
5709 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5710 io_request);
5711
5712 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5713 if (rc == 0)
5714 rc = io_request->status;
5715
5716 pqi_free_io_request(io_request);
5717
5718 return rc;
5719 }
5720
5721 /* Performs a reset at the LUN level. */
5722
5723 #define PQI_LUN_RESET_RETRIES 3
5724 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
5725 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120
5726
_pqi_device_reset(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)5727 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5728 struct pqi_scsi_dev *device)
5729 {
5730 int rc;
5731 unsigned int retries;
5732 unsigned long timeout_secs;
5733
5734 for (retries = 0;;) {
5735 rc = pqi_lun_reset(ctrl_info, device);
5736 if (rc != -EAGAIN || ++retries > PQI_LUN_RESET_RETRIES)
5737 break;
5738 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5739 }
5740
5741 timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
5742
5743 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
5744
5745 return rc == 0 ? SUCCESS : FAILED;
5746 }
5747
pqi_device_reset(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)5748 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5749 struct pqi_scsi_dev *device)
5750 {
5751 int rc;
5752
5753 mutex_lock(&ctrl_info->lun_reset_mutex);
5754
5755 pqi_ctrl_block_requests(ctrl_info);
5756 pqi_ctrl_wait_until_quiesced(ctrl_info);
5757 pqi_fail_io_queued_for_device(ctrl_info, device);
5758 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5759 pqi_device_reset_start(device);
5760 pqi_ctrl_unblock_requests(ctrl_info);
5761
5762 if (rc)
5763 rc = FAILED;
5764 else
5765 rc = _pqi_device_reset(ctrl_info, device);
5766
5767 pqi_device_reset_done(device);
5768
5769 mutex_unlock(&ctrl_info->lun_reset_mutex);
5770
5771 return rc;
5772 }
5773
pqi_eh_device_reset_handler(struct scsi_cmnd * scmd)5774 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5775 {
5776 int rc;
5777 struct Scsi_Host *shost;
5778 struct pqi_ctrl_info *ctrl_info;
5779 struct pqi_scsi_dev *device;
5780
5781 shost = scmd->device->host;
5782 ctrl_info = shost_to_hba(shost);
5783 device = scmd->device->hostdata;
5784
5785 dev_err(&ctrl_info->pci_dev->dev,
5786 "resetting scsi %d:%d:%d:%d\n",
5787 shost->host_no, device->bus, device->target, device->lun);
5788
5789 pqi_check_ctrl_health(ctrl_info);
5790 if (pqi_ctrl_offline(ctrl_info)) {
5791 dev_err(&ctrl_info->pci_dev->dev,
5792 "controller %u offlined - cannot send device reset\n",
5793 ctrl_info->ctrl_id);
5794 rc = FAILED;
5795 goto out;
5796 }
5797
5798 pqi_wait_until_ofa_finished(ctrl_info);
5799
5800 rc = pqi_device_reset(ctrl_info, device);
5801
5802 out:
5803 dev_err(&ctrl_info->pci_dev->dev,
5804 "reset of scsi %d:%d:%d:%d: %s\n",
5805 shost->host_no, device->bus, device->target, device->lun,
5806 rc == SUCCESS ? "SUCCESS" : "FAILED");
5807
5808 return rc;
5809 }
5810
pqi_slave_alloc(struct scsi_device * sdev)5811 static int pqi_slave_alloc(struct scsi_device *sdev)
5812 {
5813 struct pqi_scsi_dev *device;
5814 unsigned long flags;
5815 struct pqi_ctrl_info *ctrl_info;
5816 struct scsi_target *starget;
5817 struct sas_rphy *rphy;
5818
5819 ctrl_info = shost_to_hba(sdev->host);
5820
5821 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5822
5823 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5824 starget = scsi_target(sdev);
5825 rphy = target_to_rphy(starget);
5826 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5827 if (device) {
5828 device->target = sdev_id(sdev);
5829 device->lun = sdev->lun;
5830 device->target_lun_valid = true;
5831 }
5832 } else {
5833 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5834 sdev_id(sdev), sdev->lun);
5835 }
5836
5837 if (device) {
5838 sdev->hostdata = device;
5839 device->sdev = sdev;
5840 if (device->queue_depth) {
5841 device->advertised_queue_depth = device->queue_depth;
5842 scsi_change_queue_depth(sdev,
5843 device->advertised_queue_depth);
5844 }
5845 if (pqi_is_logical_device(device))
5846 pqi_disable_write_same(sdev);
5847 else
5848 sdev->allow_restart = 1;
5849 }
5850
5851 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5852
5853 return 0;
5854 }
5855
pqi_map_queues(struct Scsi_Host * shost)5856 static int pqi_map_queues(struct Scsi_Host *shost)
5857 {
5858 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5859
5860 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
5861 ctrl_info->pci_dev, 0);
5862 }
5863
pqi_getpciinfo_ioctl(struct pqi_ctrl_info * ctrl_info,void __user * arg)5864 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5865 void __user *arg)
5866 {
5867 struct pci_dev *pci_dev;
5868 u32 subsystem_vendor;
5869 u32 subsystem_device;
5870 cciss_pci_info_struct pciinfo;
5871
5872 if (!arg)
5873 return -EINVAL;
5874
5875 pci_dev = ctrl_info->pci_dev;
5876
5877 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5878 pciinfo.bus = pci_dev->bus->number;
5879 pciinfo.dev_fn = pci_dev->devfn;
5880 subsystem_vendor = pci_dev->subsystem_vendor;
5881 subsystem_device = pci_dev->subsystem_device;
5882 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5883 subsystem_vendor;
5884
5885 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5886 return -EFAULT;
5887
5888 return 0;
5889 }
5890
pqi_getdrivver_ioctl(void __user * arg)5891 static int pqi_getdrivver_ioctl(void __user *arg)
5892 {
5893 u32 version;
5894
5895 if (!arg)
5896 return -EINVAL;
5897
5898 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5899 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5900
5901 if (copy_to_user(arg, &version, sizeof(version)))
5902 return -EFAULT;
5903
5904 return 0;
5905 }
5906
5907 struct ciss_error_info {
5908 u8 scsi_status;
5909 int command_status;
5910 size_t sense_data_length;
5911 };
5912
pqi_error_info_to_ciss(struct pqi_raid_error_info * pqi_error_info,struct ciss_error_info * ciss_error_info)5913 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5914 struct ciss_error_info *ciss_error_info)
5915 {
5916 int ciss_cmd_status;
5917 size_t sense_data_length;
5918
5919 switch (pqi_error_info->data_out_result) {
5920 case PQI_DATA_IN_OUT_GOOD:
5921 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5922 break;
5923 case PQI_DATA_IN_OUT_UNDERFLOW:
5924 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5925 break;
5926 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5927 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5928 break;
5929 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5930 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5931 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5932 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5933 case PQI_DATA_IN_OUT_ERROR:
5934 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5935 break;
5936 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5937 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5938 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5939 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5940 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5941 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5942 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5943 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5944 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5945 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5946 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5947 break;
5948 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5949 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5950 break;
5951 case PQI_DATA_IN_OUT_ABORTED:
5952 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5953 break;
5954 case PQI_DATA_IN_OUT_TIMEOUT:
5955 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5956 break;
5957 default:
5958 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5959 break;
5960 }
5961
5962 sense_data_length =
5963 get_unaligned_le16(&pqi_error_info->sense_data_length);
5964 if (sense_data_length == 0)
5965 sense_data_length =
5966 get_unaligned_le16(&pqi_error_info->response_data_length);
5967 if (sense_data_length)
5968 if (sense_data_length > sizeof(pqi_error_info->data))
5969 sense_data_length = sizeof(pqi_error_info->data);
5970
5971 ciss_error_info->scsi_status = pqi_error_info->status;
5972 ciss_error_info->command_status = ciss_cmd_status;
5973 ciss_error_info->sense_data_length = sense_data_length;
5974 }
5975
pqi_passthru_ioctl(struct pqi_ctrl_info * ctrl_info,void __user * arg)5976 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5977 {
5978 int rc;
5979 char *kernel_buffer = NULL;
5980 u16 iu_length;
5981 size_t sense_data_length;
5982 IOCTL_Command_struct iocommand;
5983 struct pqi_raid_path_request request;
5984 struct pqi_raid_error_info pqi_error_info;
5985 struct ciss_error_info ciss_error_info;
5986
5987 if (pqi_ctrl_offline(ctrl_info))
5988 return -ENXIO;
5989 if (!arg)
5990 return -EINVAL;
5991 if (!capable(CAP_SYS_RAWIO))
5992 return -EPERM;
5993 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5994 return -EFAULT;
5995 if (iocommand.buf_size < 1 &&
5996 iocommand.Request.Type.Direction != XFER_NONE)
5997 return -EINVAL;
5998 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5999 return -EINVAL;
6000 if (iocommand.Request.Type.Type != TYPE_CMD)
6001 return -EINVAL;
6002
6003 switch (iocommand.Request.Type.Direction) {
6004 case XFER_NONE:
6005 case XFER_WRITE:
6006 case XFER_READ:
6007 case XFER_READ | XFER_WRITE:
6008 break;
6009 default:
6010 return -EINVAL;
6011 }
6012
6013 if (iocommand.buf_size > 0) {
6014 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6015 if (!kernel_buffer)
6016 return -ENOMEM;
6017 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6018 if (copy_from_user(kernel_buffer, iocommand.buf,
6019 iocommand.buf_size)) {
6020 rc = -EFAULT;
6021 goto out;
6022 }
6023 } else {
6024 memset(kernel_buffer, 0, iocommand.buf_size);
6025 }
6026 }
6027
6028 memset(&request, 0, sizeof(request));
6029
6030 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6031 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6032 PQI_REQUEST_HEADER_LENGTH;
6033 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6034 sizeof(request.lun_number));
6035 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6036 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6037
6038 switch (iocommand.Request.Type.Direction) {
6039 case XFER_NONE:
6040 request.data_direction = SOP_NO_DIRECTION_FLAG;
6041 break;
6042 case XFER_WRITE:
6043 request.data_direction = SOP_WRITE_FLAG;
6044 break;
6045 case XFER_READ:
6046 request.data_direction = SOP_READ_FLAG;
6047 break;
6048 case XFER_READ | XFER_WRITE:
6049 request.data_direction = SOP_BIDIRECTIONAL;
6050 break;
6051 }
6052
6053 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6054
6055 if (iocommand.buf_size > 0) {
6056 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6057
6058 rc = pqi_map_single(ctrl_info->pci_dev,
6059 &request.sg_descriptors[0], kernel_buffer,
6060 iocommand.buf_size, DMA_BIDIRECTIONAL);
6061 if (rc)
6062 goto out;
6063
6064 iu_length += sizeof(request.sg_descriptors[0]);
6065 }
6066
6067 put_unaligned_le16(iu_length, &request.header.iu_length);
6068
6069 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6070 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
6071
6072 if (iocommand.buf_size > 0)
6073 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6074 DMA_BIDIRECTIONAL);
6075
6076 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6077
6078 if (rc == 0) {
6079 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6080 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6081 iocommand.error_info.CommandStatus =
6082 ciss_error_info.command_status;
6083 sense_data_length = ciss_error_info.sense_data_length;
6084 if (sense_data_length) {
6085 if (sense_data_length >
6086 sizeof(iocommand.error_info.SenseInfo))
6087 sense_data_length =
6088 sizeof(iocommand.error_info.SenseInfo);
6089 memcpy(iocommand.error_info.SenseInfo,
6090 pqi_error_info.data, sense_data_length);
6091 iocommand.error_info.SenseLen = sense_data_length;
6092 }
6093 }
6094
6095 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6096 rc = -EFAULT;
6097 goto out;
6098 }
6099
6100 if (rc == 0 && iocommand.buf_size > 0 &&
6101 (iocommand.Request.Type.Direction & XFER_READ)) {
6102 if (copy_to_user(iocommand.buf, kernel_buffer,
6103 iocommand.buf_size)) {
6104 rc = -EFAULT;
6105 }
6106 }
6107
6108 out:
6109 kfree(kernel_buffer);
6110
6111 return rc;
6112 }
6113
pqi_ioctl(struct scsi_device * sdev,unsigned int cmd,void __user * arg)6114 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6115 void __user *arg)
6116 {
6117 int rc;
6118 struct pqi_ctrl_info *ctrl_info;
6119
6120 ctrl_info = shost_to_hba(sdev->host);
6121
6122 if (pqi_ctrl_in_ofa(ctrl_info))
6123 return -EBUSY;
6124
6125 switch (cmd) {
6126 case CCISS_DEREGDISK:
6127 case CCISS_REGNEWDISK:
6128 case CCISS_REGNEWD:
6129 rc = pqi_scan_scsi_devices(ctrl_info);
6130 break;
6131 case CCISS_GETPCIINFO:
6132 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6133 break;
6134 case CCISS_GETDRIVVER:
6135 rc = pqi_getdrivver_ioctl(arg);
6136 break;
6137 case CCISS_PASSTHRU:
6138 rc = pqi_passthru_ioctl(ctrl_info, arg);
6139 break;
6140 default:
6141 rc = -EINVAL;
6142 break;
6143 }
6144
6145 return rc;
6146 }
6147
pqi_firmware_version_show(struct device * dev,struct device_attribute * attr,char * buffer)6148 static ssize_t pqi_firmware_version_show(struct device *dev,
6149 struct device_attribute *attr, char *buffer)
6150 {
6151 struct Scsi_Host *shost;
6152 struct pqi_ctrl_info *ctrl_info;
6153
6154 shost = class_to_shost(dev);
6155 ctrl_info = shost_to_hba(shost);
6156
6157 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6158 }
6159
pqi_driver_version_show(struct device * dev,struct device_attribute * attr,char * buffer)6160 static ssize_t pqi_driver_version_show(struct device *dev,
6161 struct device_attribute *attr, char *buffer)
6162 {
6163 struct Scsi_Host *shost;
6164 struct pqi_ctrl_info *ctrl_info;
6165
6166 shost = class_to_shost(dev);
6167 ctrl_info = shost_to_hba(shost);
6168
6169 return snprintf(buffer, PAGE_SIZE,
6170 "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6171 }
6172
pqi_serial_number_show(struct device * dev,struct device_attribute * attr,char * buffer)6173 static ssize_t pqi_serial_number_show(struct device *dev,
6174 struct device_attribute *attr, char *buffer)
6175 {
6176 struct Scsi_Host *shost;
6177 struct pqi_ctrl_info *ctrl_info;
6178
6179 shost = class_to_shost(dev);
6180 ctrl_info = shost_to_hba(shost);
6181
6182 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6183 }
6184
pqi_model_show(struct device * dev,struct device_attribute * attr,char * buffer)6185 static ssize_t pqi_model_show(struct device *dev,
6186 struct device_attribute *attr, char *buffer)
6187 {
6188 struct Scsi_Host *shost;
6189 struct pqi_ctrl_info *ctrl_info;
6190
6191 shost = class_to_shost(dev);
6192 ctrl_info = shost_to_hba(shost);
6193
6194 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6195 }
6196
pqi_vendor_show(struct device * dev,struct device_attribute * attr,char * buffer)6197 static ssize_t pqi_vendor_show(struct device *dev,
6198 struct device_attribute *attr, char *buffer)
6199 {
6200 struct Scsi_Host *shost;
6201 struct pqi_ctrl_info *ctrl_info;
6202
6203 shost = class_to_shost(dev);
6204 ctrl_info = shost_to_hba(shost);
6205
6206 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6207 }
6208
pqi_host_rescan_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)6209 static ssize_t pqi_host_rescan_store(struct device *dev,
6210 struct device_attribute *attr, const char *buffer, size_t count)
6211 {
6212 struct Scsi_Host *shost = class_to_shost(dev);
6213
6214 pqi_scan_start(shost);
6215
6216 return count;
6217 }
6218
pqi_lockup_action_show(struct device * dev,struct device_attribute * attr,char * buffer)6219 static ssize_t pqi_lockup_action_show(struct device *dev,
6220 struct device_attribute *attr, char *buffer)
6221 {
6222 int count = 0;
6223 unsigned int i;
6224
6225 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6226 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6227 count += snprintf(buffer + count, PAGE_SIZE - count,
6228 "[%s] ", pqi_lockup_actions[i].name);
6229 else
6230 count += snprintf(buffer + count, PAGE_SIZE - count,
6231 "%s ", pqi_lockup_actions[i].name);
6232 }
6233
6234 count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
6235
6236 return count;
6237 }
6238
pqi_lockup_action_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)6239 static ssize_t pqi_lockup_action_store(struct device *dev,
6240 struct device_attribute *attr, const char *buffer, size_t count)
6241 {
6242 unsigned int i;
6243 char *action_name;
6244 char action_name_buffer[32];
6245
6246 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6247 action_name = strstrip(action_name_buffer);
6248
6249 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6250 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6251 pqi_lockup_action = pqi_lockup_actions[i].action;
6252 return count;
6253 }
6254 }
6255
6256 return -EINVAL;
6257 }
6258
6259 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6260 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6261 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6262 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6263 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6264 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6265 static DEVICE_ATTR(lockup_action, 0644,
6266 pqi_lockup_action_show, pqi_lockup_action_store);
6267
6268 static struct device_attribute *pqi_shost_attrs[] = {
6269 &dev_attr_driver_version,
6270 &dev_attr_firmware_version,
6271 &dev_attr_model,
6272 &dev_attr_serial_number,
6273 &dev_attr_vendor,
6274 &dev_attr_rescan,
6275 &dev_attr_lockup_action,
6276 NULL
6277 };
6278
pqi_unique_id_show(struct device * dev,struct device_attribute * attr,char * buffer)6279 static ssize_t pqi_unique_id_show(struct device *dev,
6280 struct device_attribute *attr, char *buffer)
6281 {
6282 struct pqi_ctrl_info *ctrl_info;
6283 struct scsi_device *sdev;
6284 struct pqi_scsi_dev *device;
6285 unsigned long flags;
6286 unsigned char uid[16];
6287
6288 sdev = to_scsi_device(dev);
6289 ctrl_info = shost_to_hba(sdev->host);
6290
6291 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6292
6293 device = sdev->hostdata;
6294 if (!device) {
6295 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6296 flags);
6297 return -ENODEV;
6298 }
6299 memcpy(uid, device->unique_id, sizeof(uid));
6300
6301 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6302
6303 return snprintf(buffer, PAGE_SIZE,
6304 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
6305 uid[0], uid[1], uid[2], uid[3],
6306 uid[4], uid[5], uid[6], uid[7],
6307 uid[8], uid[9], uid[10], uid[11],
6308 uid[12], uid[13], uid[14], uid[15]);
6309 }
6310
pqi_lunid_show(struct device * dev,struct device_attribute * attr,char * buffer)6311 static ssize_t pqi_lunid_show(struct device *dev,
6312 struct device_attribute *attr, char *buffer)
6313 {
6314 struct pqi_ctrl_info *ctrl_info;
6315 struct scsi_device *sdev;
6316 struct pqi_scsi_dev *device;
6317 unsigned long flags;
6318 u8 lunid[8];
6319
6320 sdev = to_scsi_device(dev);
6321 ctrl_info = shost_to_hba(sdev->host);
6322
6323 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6324
6325 device = sdev->hostdata;
6326 if (!device) {
6327 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6328 flags);
6329 return -ENODEV;
6330 }
6331 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6332
6333 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6334
6335 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6336 }
6337
6338 #define MAX_PATHS 8
pqi_path_info_show(struct device * dev,struct device_attribute * attr,char * buf)6339 static ssize_t pqi_path_info_show(struct device *dev,
6340 struct device_attribute *attr, char *buf)
6341 {
6342 struct pqi_ctrl_info *ctrl_info;
6343 struct scsi_device *sdev;
6344 struct pqi_scsi_dev *device;
6345 unsigned long flags;
6346 int i;
6347 int output_len = 0;
6348 u8 box;
6349 u8 bay;
6350 u8 path_map_index = 0;
6351 char *active;
6352 unsigned char phys_connector[2];
6353
6354 sdev = to_scsi_device(dev);
6355 ctrl_info = shost_to_hba(sdev->host);
6356
6357 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6358
6359 device = sdev->hostdata;
6360 if (!device) {
6361 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6362 flags);
6363 return -ENODEV;
6364 }
6365
6366 bay = device->bay;
6367 for (i = 0; i < MAX_PATHS; i++) {
6368 path_map_index = 1<<i;
6369 if (i == device->active_path_index)
6370 active = "Active";
6371 else if (device->path_map & path_map_index)
6372 active = "Inactive";
6373 else
6374 continue;
6375
6376 output_len += scnprintf(buf + output_len,
6377 PAGE_SIZE - output_len,
6378 "[%d:%d:%d:%d] %20.20s ",
6379 ctrl_info->scsi_host->host_no,
6380 device->bus, device->target,
6381 device->lun,
6382 scsi_device_type(device->devtype));
6383
6384 if (device->devtype == TYPE_RAID ||
6385 pqi_is_logical_device(device))
6386 goto end_buffer;
6387
6388 memcpy(&phys_connector, &device->phys_connector[i],
6389 sizeof(phys_connector));
6390 if (phys_connector[0] < '0')
6391 phys_connector[0] = '0';
6392 if (phys_connector[1] < '0')
6393 phys_connector[1] = '0';
6394
6395 output_len += scnprintf(buf + output_len,
6396 PAGE_SIZE - output_len,
6397 "PORT: %.2s ", phys_connector);
6398
6399 box = device->box[i];
6400 if (box != 0 && box != 0xFF)
6401 output_len += scnprintf(buf + output_len,
6402 PAGE_SIZE - output_len,
6403 "BOX: %hhu ", box);
6404
6405 if ((device->devtype == TYPE_DISK ||
6406 device->devtype == TYPE_ZBC) &&
6407 pqi_expose_device(device))
6408 output_len += scnprintf(buf + output_len,
6409 PAGE_SIZE - output_len,
6410 "BAY: %hhu ", bay);
6411
6412 end_buffer:
6413 output_len += scnprintf(buf + output_len,
6414 PAGE_SIZE - output_len,
6415 "%s\n", active);
6416 }
6417
6418 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6419 return output_len;
6420 }
6421
6422
pqi_sas_address_show(struct device * dev,struct device_attribute * attr,char * buffer)6423 static ssize_t pqi_sas_address_show(struct device *dev,
6424 struct device_attribute *attr, char *buffer)
6425 {
6426 struct pqi_ctrl_info *ctrl_info;
6427 struct scsi_device *sdev;
6428 struct pqi_scsi_dev *device;
6429 unsigned long flags;
6430 u64 sas_address;
6431
6432 sdev = to_scsi_device(dev);
6433 ctrl_info = shost_to_hba(sdev->host);
6434
6435 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6436
6437 device = sdev->hostdata;
6438 if (pqi_is_logical_device(device)) {
6439 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6440 flags);
6441 return -ENODEV;
6442 }
6443 sas_address = device->sas_address;
6444
6445 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6446
6447 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6448 }
6449
pqi_ssd_smart_path_enabled_show(struct device * dev,struct device_attribute * attr,char * buffer)6450 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
6451 struct device_attribute *attr, char *buffer)
6452 {
6453 struct pqi_ctrl_info *ctrl_info;
6454 struct scsi_device *sdev;
6455 struct pqi_scsi_dev *device;
6456 unsigned long flags;
6457
6458 sdev = to_scsi_device(dev);
6459 ctrl_info = shost_to_hba(sdev->host);
6460
6461 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6462
6463 device = sdev->hostdata;
6464 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6465 buffer[1] = '\n';
6466 buffer[2] = '\0';
6467
6468 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6469
6470 return 2;
6471 }
6472
pqi_raid_level_show(struct device * dev,struct device_attribute * attr,char * buffer)6473 static ssize_t pqi_raid_level_show(struct device *dev,
6474 struct device_attribute *attr, char *buffer)
6475 {
6476 struct pqi_ctrl_info *ctrl_info;
6477 struct scsi_device *sdev;
6478 struct pqi_scsi_dev *device;
6479 unsigned long flags;
6480 char *raid_level;
6481
6482 sdev = to_scsi_device(dev);
6483 ctrl_info = shost_to_hba(sdev->host);
6484
6485 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6486
6487 device = sdev->hostdata;
6488
6489 if (pqi_is_logical_device(device))
6490 raid_level = pqi_raid_level_to_string(device->raid_level);
6491 else
6492 raid_level = "N/A";
6493
6494 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6495
6496 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
6497 }
6498
6499 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6500 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6501 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
6502 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
6503 static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
6504 pqi_ssd_smart_path_enabled_show, NULL);
6505 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
6506
6507 static struct device_attribute *pqi_sdev_attrs[] = {
6508 &dev_attr_lunid,
6509 &dev_attr_unique_id,
6510 &dev_attr_path_info,
6511 &dev_attr_sas_address,
6512 &dev_attr_ssd_smart_path_enabled,
6513 &dev_attr_raid_level,
6514 NULL
6515 };
6516
6517 static struct scsi_host_template pqi_driver_template = {
6518 .module = THIS_MODULE,
6519 .name = DRIVER_NAME_SHORT,
6520 .proc_name = DRIVER_NAME_SHORT,
6521 .queuecommand = pqi_scsi_queue_command,
6522 .scan_start = pqi_scan_start,
6523 .scan_finished = pqi_scan_finished,
6524 .this_id = -1,
6525 .eh_device_reset_handler = pqi_eh_device_reset_handler,
6526 .ioctl = pqi_ioctl,
6527 .slave_alloc = pqi_slave_alloc,
6528 .map_queues = pqi_map_queues,
6529 .sdev_attrs = pqi_sdev_attrs,
6530 .shost_attrs = pqi_shost_attrs,
6531 };
6532
pqi_register_scsi(struct pqi_ctrl_info * ctrl_info)6533 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
6534 {
6535 int rc;
6536 struct Scsi_Host *shost;
6537
6538 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
6539 if (!shost) {
6540 dev_err(&ctrl_info->pci_dev->dev,
6541 "scsi_host_alloc failed for controller %u\n",
6542 ctrl_info->ctrl_id);
6543 return -ENOMEM;
6544 }
6545
6546 shost->io_port = 0;
6547 shost->n_io_port = 0;
6548 shost->this_id = -1;
6549 shost->max_channel = PQI_MAX_BUS;
6550 shost->max_cmd_len = MAX_COMMAND_SIZE;
6551 shost->max_lun = ~0;
6552 shost->max_id = ~0;
6553 shost->max_sectors = ctrl_info->max_sectors;
6554 shost->can_queue = ctrl_info->scsi_ml_can_queue;
6555 shost->cmd_per_lun = shost->can_queue;
6556 shost->sg_tablesize = ctrl_info->sg_tablesize;
6557 shost->transportt = pqi_sas_transport_template;
6558 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6559 shost->unique_id = shost->irq;
6560 shost->nr_hw_queues = ctrl_info->num_queue_groups;
6561 shost->hostdata[0] = (unsigned long)ctrl_info;
6562
6563 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
6564 if (rc) {
6565 dev_err(&ctrl_info->pci_dev->dev,
6566 "scsi_add_host failed for controller %u\n",
6567 ctrl_info->ctrl_id);
6568 goto free_host;
6569 }
6570
6571 rc = pqi_add_sas_host(shost, ctrl_info);
6572 if (rc) {
6573 dev_err(&ctrl_info->pci_dev->dev,
6574 "add SAS host failed for controller %u\n",
6575 ctrl_info->ctrl_id);
6576 goto remove_host;
6577 }
6578
6579 ctrl_info->scsi_host = shost;
6580
6581 return 0;
6582
6583 remove_host:
6584 scsi_remove_host(shost);
6585 free_host:
6586 scsi_host_put(shost);
6587
6588 return rc;
6589 }
6590
pqi_unregister_scsi(struct pqi_ctrl_info * ctrl_info)6591 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
6592 {
6593 struct Scsi_Host *shost;
6594
6595 pqi_delete_sas_host(ctrl_info);
6596
6597 shost = ctrl_info->scsi_host;
6598 if (!shost)
6599 return;
6600
6601 scsi_remove_host(shost);
6602 scsi_host_put(shost);
6603 }
6604
pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info * ctrl_info)6605 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
6606 {
6607 int rc = 0;
6608 struct pqi_device_registers __iomem *pqi_registers;
6609 unsigned long timeout;
6610 unsigned int timeout_msecs;
6611 union pqi_reset_register reset_reg;
6612
6613 pqi_registers = ctrl_info->pqi_registers;
6614 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
6615 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
6616
6617 while (1) {
6618 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
6619 reset_reg.all_bits = readl(&pqi_registers->device_reset);
6620 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
6621 break;
6622 pqi_check_ctrl_health(ctrl_info);
6623 if (pqi_ctrl_offline(ctrl_info)) {
6624 rc = -ENXIO;
6625 break;
6626 }
6627 if (time_after(jiffies, timeout)) {
6628 rc = -ETIMEDOUT;
6629 break;
6630 }
6631 }
6632
6633 return rc;
6634 }
6635
pqi_reset(struct pqi_ctrl_info * ctrl_info)6636 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
6637 {
6638 int rc;
6639 union pqi_reset_register reset_reg;
6640
6641 if (ctrl_info->pqi_reset_quiesce_supported) {
6642 rc = sis_pqi_reset_quiesce(ctrl_info);
6643 if (rc) {
6644 dev_err(&ctrl_info->pci_dev->dev,
6645 "PQI reset failed during quiesce with error %d\n",
6646 rc);
6647 return rc;
6648 }
6649 }
6650
6651 reset_reg.all_bits = 0;
6652 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
6653 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
6654
6655 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6656
6657 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
6658 if (rc)
6659 dev_err(&ctrl_info->pci_dev->dev,
6660 "PQI reset failed with error %d\n", rc);
6661
6662 return rc;
6663 }
6664
pqi_get_ctrl_serial_number(struct pqi_ctrl_info * ctrl_info)6665 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
6666 {
6667 int rc;
6668 struct bmic_sense_subsystem_info *sense_info;
6669
6670 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
6671 if (!sense_info)
6672 return -ENOMEM;
6673
6674 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
6675 if (rc)
6676 goto out;
6677
6678 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
6679 sizeof(sense_info->ctrl_serial_number));
6680 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
6681
6682 out:
6683 kfree(sense_info);
6684
6685 return rc;
6686 }
6687
pqi_get_ctrl_product_details(struct pqi_ctrl_info * ctrl_info)6688 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
6689 {
6690 int rc;
6691 struct bmic_identify_controller *identify;
6692
6693 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
6694 if (!identify)
6695 return -ENOMEM;
6696
6697 rc = pqi_identify_controller(ctrl_info, identify);
6698 if (rc)
6699 goto out;
6700
6701 memcpy(ctrl_info->firmware_version, identify->firmware_version,
6702 sizeof(identify->firmware_version));
6703 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
6704 snprintf(ctrl_info->firmware_version +
6705 strlen(ctrl_info->firmware_version),
6706 sizeof(ctrl_info->firmware_version),
6707 "-%u", get_unaligned_le16(&identify->firmware_build_number));
6708
6709 memcpy(ctrl_info->model, identify->product_id,
6710 sizeof(identify->product_id));
6711 ctrl_info->model[sizeof(identify->product_id)] = '\0';
6712
6713 memcpy(ctrl_info->vendor, identify->vendor_id,
6714 sizeof(identify->vendor_id));
6715 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
6716
6717 out:
6718 kfree(identify);
6719
6720 return rc;
6721 }
6722
6723 struct pqi_config_table_section_info {
6724 struct pqi_ctrl_info *ctrl_info;
6725 void *section;
6726 u32 section_offset;
6727 void __iomem *section_iomem_addr;
6728 };
6729
pqi_is_firmware_feature_supported(struct pqi_config_table_firmware_features * firmware_features,unsigned int bit_position)6730 static inline bool pqi_is_firmware_feature_supported(
6731 struct pqi_config_table_firmware_features *firmware_features,
6732 unsigned int bit_position)
6733 {
6734 unsigned int byte_index;
6735
6736 byte_index = bit_position / BITS_PER_BYTE;
6737
6738 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
6739 return false;
6740
6741 return firmware_features->features_supported[byte_index] &
6742 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6743 }
6744
pqi_is_firmware_feature_enabled(struct pqi_config_table_firmware_features * firmware_features,void __iomem * firmware_features_iomem_addr,unsigned int bit_position)6745 static inline bool pqi_is_firmware_feature_enabled(
6746 struct pqi_config_table_firmware_features *firmware_features,
6747 void __iomem *firmware_features_iomem_addr,
6748 unsigned int bit_position)
6749 {
6750 unsigned int byte_index;
6751 u8 __iomem *features_enabled_iomem_addr;
6752
6753 byte_index = (bit_position / BITS_PER_BYTE) +
6754 (le16_to_cpu(firmware_features->num_elements) * 2);
6755
6756 features_enabled_iomem_addr = firmware_features_iomem_addr +
6757 offsetof(struct pqi_config_table_firmware_features,
6758 features_supported) + byte_index;
6759
6760 return *((__force u8 *)features_enabled_iomem_addr) &
6761 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6762 }
6763
pqi_request_firmware_feature(struct pqi_config_table_firmware_features * firmware_features,unsigned int bit_position)6764 static inline void pqi_request_firmware_feature(
6765 struct pqi_config_table_firmware_features *firmware_features,
6766 unsigned int bit_position)
6767 {
6768 unsigned int byte_index;
6769
6770 byte_index = (bit_position / BITS_PER_BYTE) +
6771 le16_to_cpu(firmware_features->num_elements);
6772
6773 firmware_features->features_supported[byte_index] |=
6774 (1 << (bit_position % BITS_PER_BYTE));
6775 }
6776
pqi_config_table_update(struct pqi_ctrl_info * ctrl_info,u16 first_section,u16 last_section)6777 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6778 u16 first_section, u16 last_section)
6779 {
6780 struct pqi_vendor_general_request request;
6781
6782 memset(&request, 0, sizeof(request));
6783
6784 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
6785 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
6786 &request.header.iu_length);
6787 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
6788 &request.function_code);
6789 put_unaligned_le16(first_section,
6790 &request.data.config_table_update.first_section);
6791 put_unaligned_le16(last_section,
6792 &request.data.config_table_update.last_section);
6793
6794 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6795 0, NULL, NO_TIMEOUT);
6796 }
6797
pqi_enable_firmware_features(struct pqi_ctrl_info * ctrl_info,struct pqi_config_table_firmware_features * firmware_features,void __iomem * firmware_features_iomem_addr)6798 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6799 struct pqi_config_table_firmware_features *firmware_features,
6800 void __iomem *firmware_features_iomem_addr)
6801 {
6802 void *features_requested;
6803 void __iomem *features_requested_iomem_addr;
6804
6805 features_requested = firmware_features->features_supported +
6806 le16_to_cpu(firmware_features->num_elements);
6807
6808 features_requested_iomem_addr = firmware_features_iomem_addr +
6809 (features_requested - (void *)firmware_features);
6810
6811 memcpy_toio(features_requested_iomem_addr, features_requested,
6812 le16_to_cpu(firmware_features->num_elements));
6813
6814 return pqi_config_table_update(ctrl_info,
6815 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
6816 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
6817 }
6818
6819 struct pqi_firmware_feature {
6820 char *feature_name;
6821 unsigned int feature_bit;
6822 bool supported;
6823 bool enabled;
6824 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6825 struct pqi_firmware_feature *firmware_feature);
6826 };
6827
pqi_firmware_feature_status(struct pqi_ctrl_info * ctrl_info,struct pqi_firmware_feature * firmware_feature)6828 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6829 struct pqi_firmware_feature *firmware_feature)
6830 {
6831 if (!firmware_feature->supported) {
6832 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6833 firmware_feature->feature_name);
6834 return;
6835 }
6836
6837 if (firmware_feature->enabled) {
6838 dev_info(&ctrl_info->pci_dev->dev,
6839 "%s enabled\n", firmware_feature->feature_name);
6840 return;
6841 }
6842
6843 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6844 firmware_feature->feature_name);
6845 }
6846
pqi_firmware_feature_update(struct pqi_ctrl_info * ctrl_info,struct pqi_firmware_feature * firmware_feature)6847 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6848 struct pqi_firmware_feature *firmware_feature)
6849 {
6850 if (firmware_feature->feature_status)
6851 firmware_feature->feature_status(ctrl_info, firmware_feature);
6852 }
6853
6854 static DEFINE_MUTEX(pqi_firmware_features_mutex);
6855
6856 static struct pqi_firmware_feature pqi_firmware_features[] = {
6857 {
6858 .feature_name = "Online Firmware Activation",
6859 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
6860 .feature_status = pqi_firmware_feature_status,
6861 },
6862 {
6863 .feature_name = "Serial Management Protocol",
6864 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
6865 .feature_status = pqi_firmware_feature_status,
6866 },
6867 {
6868 .feature_name = "New Soft Reset Handshake",
6869 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
6870 .feature_status = pqi_firmware_feature_status,
6871 },
6872 };
6873
pqi_process_firmware_features(struct pqi_config_table_section_info * section_info)6874 static void pqi_process_firmware_features(
6875 struct pqi_config_table_section_info *section_info)
6876 {
6877 int rc;
6878 struct pqi_ctrl_info *ctrl_info;
6879 struct pqi_config_table_firmware_features *firmware_features;
6880 void __iomem *firmware_features_iomem_addr;
6881 unsigned int i;
6882 unsigned int num_features_supported;
6883
6884 ctrl_info = section_info->ctrl_info;
6885 firmware_features = section_info->section;
6886 firmware_features_iomem_addr = section_info->section_iomem_addr;
6887
6888 for (i = 0, num_features_supported = 0;
6889 i < ARRAY_SIZE(pqi_firmware_features); i++) {
6890 if (pqi_is_firmware_feature_supported(firmware_features,
6891 pqi_firmware_features[i].feature_bit)) {
6892 pqi_firmware_features[i].supported = true;
6893 num_features_supported++;
6894 } else {
6895 pqi_firmware_feature_update(ctrl_info,
6896 &pqi_firmware_features[i]);
6897 }
6898 }
6899
6900 if (num_features_supported == 0)
6901 return;
6902
6903 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6904 if (!pqi_firmware_features[i].supported)
6905 continue;
6906 pqi_request_firmware_feature(firmware_features,
6907 pqi_firmware_features[i].feature_bit);
6908 }
6909
6910 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6911 firmware_features_iomem_addr);
6912 if (rc) {
6913 dev_err(&ctrl_info->pci_dev->dev,
6914 "failed to enable firmware features in PQI configuration table\n");
6915 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6916 if (!pqi_firmware_features[i].supported)
6917 continue;
6918 pqi_firmware_feature_update(ctrl_info,
6919 &pqi_firmware_features[i]);
6920 }
6921 return;
6922 }
6923
6924 ctrl_info->soft_reset_handshake_supported = false;
6925 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6926 if (!pqi_firmware_features[i].supported)
6927 continue;
6928 if (pqi_is_firmware_feature_enabled(firmware_features,
6929 firmware_features_iomem_addr,
6930 pqi_firmware_features[i].feature_bit)) {
6931 pqi_firmware_features[i].enabled = true;
6932 if (pqi_firmware_features[i].feature_bit ==
6933 PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE)
6934 ctrl_info->soft_reset_handshake_supported =
6935 true;
6936 }
6937 pqi_firmware_feature_update(ctrl_info,
6938 &pqi_firmware_features[i]);
6939 }
6940 }
6941
pqi_init_firmware_features(void)6942 static void pqi_init_firmware_features(void)
6943 {
6944 unsigned int i;
6945
6946 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6947 pqi_firmware_features[i].supported = false;
6948 pqi_firmware_features[i].enabled = false;
6949 }
6950 }
6951
pqi_process_firmware_features_section(struct pqi_config_table_section_info * section_info)6952 static void pqi_process_firmware_features_section(
6953 struct pqi_config_table_section_info *section_info)
6954 {
6955 mutex_lock(&pqi_firmware_features_mutex);
6956 pqi_init_firmware_features();
6957 pqi_process_firmware_features(section_info);
6958 mutex_unlock(&pqi_firmware_features_mutex);
6959 }
6960
pqi_process_config_table(struct pqi_ctrl_info * ctrl_info)6961 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
6962 {
6963 u32 table_length;
6964 u32 section_offset;
6965 void __iomem *table_iomem_addr;
6966 struct pqi_config_table *config_table;
6967 struct pqi_config_table_section_header *section;
6968 struct pqi_config_table_section_info section_info;
6969
6970 table_length = ctrl_info->config_table_length;
6971 if (table_length == 0)
6972 return 0;
6973
6974 config_table = kmalloc(table_length, GFP_KERNEL);
6975 if (!config_table) {
6976 dev_err(&ctrl_info->pci_dev->dev,
6977 "failed to allocate memory for PQI configuration table\n");
6978 return -ENOMEM;
6979 }
6980
6981 /*
6982 * Copy the config table contents from I/O memory space into the
6983 * temporary buffer.
6984 */
6985 table_iomem_addr = ctrl_info->iomem_base +
6986 ctrl_info->config_table_offset;
6987 memcpy_fromio(config_table, table_iomem_addr, table_length);
6988
6989 section_info.ctrl_info = ctrl_info;
6990 section_offset =
6991 get_unaligned_le32(&config_table->first_section_offset);
6992
6993 while (section_offset) {
6994 section = (void *)config_table + section_offset;
6995
6996 section_info.section = section;
6997 section_info.section_offset = section_offset;
6998 section_info.section_iomem_addr =
6999 table_iomem_addr + section_offset;
7000
7001 switch (get_unaligned_le16(§ion->section_id)) {
7002 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7003 pqi_process_firmware_features_section(§ion_info);
7004 break;
7005 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7006 if (pqi_disable_heartbeat)
7007 dev_warn(&ctrl_info->pci_dev->dev,
7008 "heartbeat disabled by module parameter\n");
7009 else
7010 ctrl_info->heartbeat_counter =
7011 table_iomem_addr +
7012 section_offset +
7013 offsetof(
7014 struct pqi_config_table_heartbeat,
7015 heartbeat_counter);
7016 break;
7017 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7018 ctrl_info->soft_reset_status =
7019 table_iomem_addr +
7020 section_offset +
7021 offsetof(struct pqi_config_table_soft_reset,
7022 soft_reset_status);
7023 break;
7024 }
7025
7026 section_offset =
7027 get_unaligned_le16(§ion->next_section_offset);
7028 }
7029
7030 kfree(config_table);
7031
7032 return 0;
7033 }
7034
7035 /* Switches the controller from PQI mode back into SIS mode. */
7036
pqi_revert_to_sis_mode(struct pqi_ctrl_info * ctrl_info)7037 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7038 {
7039 int rc;
7040
7041 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
7042 rc = pqi_reset(ctrl_info);
7043 if (rc)
7044 return rc;
7045 rc = sis_reenable_sis_mode(ctrl_info);
7046 if (rc) {
7047 dev_err(&ctrl_info->pci_dev->dev,
7048 "re-enabling SIS mode failed with error %d\n", rc);
7049 return rc;
7050 }
7051 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7052
7053 return 0;
7054 }
7055
7056 /*
7057 * If the controller isn't already in SIS mode, this function forces it into
7058 * SIS mode.
7059 */
7060
pqi_force_sis_mode(struct pqi_ctrl_info * ctrl_info)7061 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
7062 {
7063 if (!sis_is_firmware_running(ctrl_info))
7064 return -ENXIO;
7065
7066 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7067 return 0;
7068
7069 if (sis_is_kernel_up(ctrl_info)) {
7070 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7071 return 0;
7072 }
7073
7074 return pqi_revert_to_sis_mode(ctrl_info);
7075 }
7076
pqi_ctrl_init(struct pqi_ctrl_info * ctrl_info)7077 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7078 {
7079 int rc;
7080
7081 rc = pqi_force_sis_mode(ctrl_info);
7082 if (rc)
7083 return rc;
7084
7085 /*
7086 * Wait until the controller is ready to start accepting SIS
7087 * commands.
7088 */
7089 rc = sis_wait_for_ctrl_ready(ctrl_info);
7090 if (rc)
7091 return rc;
7092
7093 /*
7094 * Get the controller properties. This allows us to determine
7095 * whether or not it supports PQI mode.
7096 */
7097 rc = sis_get_ctrl_properties(ctrl_info);
7098 if (rc) {
7099 dev_err(&ctrl_info->pci_dev->dev,
7100 "error obtaining controller properties\n");
7101 return rc;
7102 }
7103
7104 rc = sis_get_pqi_capabilities(ctrl_info);
7105 if (rc) {
7106 dev_err(&ctrl_info->pci_dev->dev,
7107 "error obtaining controller capabilities\n");
7108 return rc;
7109 }
7110
7111 if (reset_devices) {
7112 if (ctrl_info->max_outstanding_requests >
7113 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
7114 ctrl_info->max_outstanding_requests =
7115 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
7116 } else {
7117 if (ctrl_info->max_outstanding_requests >
7118 PQI_MAX_OUTSTANDING_REQUESTS)
7119 ctrl_info->max_outstanding_requests =
7120 PQI_MAX_OUTSTANDING_REQUESTS;
7121 }
7122
7123 pqi_calculate_io_resources(ctrl_info);
7124
7125 rc = pqi_alloc_error_buffer(ctrl_info);
7126 if (rc) {
7127 dev_err(&ctrl_info->pci_dev->dev,
7128 "failed to allocate PQI error buffer\n");
7129 return rc;
7130 }
7131
7132 /*
7133 * If the function we are about to call succeeds, the
7134 * controller will transition from legacy SIS mode
7135 * into PQI mode.
7136 */
7137 rc = sis_init_base_struct_addr(ctrl_info);
7138 if (rc) {
7139 dev_err(&ctrl_info->pci_dev->dev,
7140 "error initializing PQI mode\n");
7141 return rc;
7142 }
7143
7144 /* Wait for the controller to complete the SIS -> PQI transition. */
7145 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7146 if (rc) {
7147 dev_err(&ctrl_info->pci_dev->dev,
7148 "transition to PQI mode failed\n");
7149 return rc;
7150 }
7151
7152 /* From here on, we are running in PQI mode. */
7153 ctrl_info->pqi_mode_enabled = true;
7154 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7155
7156 rc = pqi_alloc_admin_queues(ctrl_info);
7157 if (rc) {
7158 dev_err(&ctrl_info->pci_dev->dev,
7159 "failed to allocate admin queues\n");
7160 return rc;
7161 }
7162
7163 rc = pqi_create_admin_queues(ctrl_info);
7164 if (rc) {
7165 dev_err(&ctrl_info->pci_dev->dev,
7166 "error creating admin queues\n");
7167 return rc;
7168 }
7169
7170 rc = pqi_report_device_capability(ctrl_info);
7171 if (rc) {
7172 dev_err(&ctrl_info->pci_dev->dev,
7173 "obtaining device capability failed\n");
7174 return rc;
7175 }
7176
7177 rc = pqi_validate_device_capability(ctrl_info);
7178 if (rc)
7179 return rc;
7180
7181 pqi_calculate_queue_resources(ctrl_info);
7182
7183 rc = pqi_enable_msix_interrupts(ctrl_info);
7184 if (rc)
7185 return rc;
7186
7187 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7188 ctrl_info->max_msix_vectors =
7189 ctrl_info->num_msix_vectors_enabled;
7190 pqi_calculate_queue_resources(ctrl_info);
7191 }
7192
7193 rc = pqi_alloc_io_resources(ctrl_info);
7194 if (rc)
7195 return rc;
7196
7197 rc = pqi_alloc_operational_queues(ctrl_info);
7198 if (rc) {
7199 dev_err(&ctrl_info->pci_dev->dev,
7200 "failed to allocate operational queues\n");
7201 return rc;
7202 }
7203
7204 pqi_init_operational_queues(ctrl_info);
7205
7206 rc = pqi_request_irqs(ctrl_info);
7207 if (rc)
7208 return rc;
7209
7210 rc = pqi_create_queues(ctrl_info);
7211 if (rc)
7212 return rc;
7213
7214 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7215
7216 ctrl_info->controller_online = true;
7217
7218 rc = pqi_process_config_table(ctrl_info);
7219 if (rc)
7220 return rc;
7221
7222 pqi_start_heartbeat_timer(ctrl_info);
7223
7224 rc = pqi_enable_events(ctrl_info);
7225 if (rc) {
7226 dev_err(&ctrl_info->pci_dev->dev,
7227 "error enabling events\n");
7228 return rc;
7229 }
7230
7231 /* Register with the SCSI subsystem. */
7232 rc = pqi_register_scsi(ctrl_info);
7233 if (rc)
7234 return rc;
7235
7236 rc = pqi_get_ctrl_product_details(ctrl_info);
7237 if (rc) {
7238 dev_err(&ctrl_info->pci_dev->dev,
7239 "error obtaining product details\n");
7240 return rc;
7241 }
7242
7243 rc = pqi_get_ctrl_serial_number(ctrl_info);
7244 if (rc) {
7245 dev_err(&ctrl_info->pci_dev->dev,
7246 "error obtaining ctrl serial number\n");
7247 return rc;
7248 }
7249
7250 rc = pqi_set_diag_rescan(ctrl_info);
7251 if (rc) {
7252 dev_err(&ctrl_info->pci_dev->dev,
7253 "error enabling multi-lun rescan\n");
7254 return rc;
7255 }
7256
7257 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7258 if (rc) {
7259 dev_err(&ctrl_info->pci_dev->dev,
7260 "error updating host wellness\n");
7261 return rc;
7262 }
7263
7264 pqi_schedule_update_time_worker(ctrl_info);
7265
7266 pqi_scan_scsi_devices(ctrl_info);
7267
7268 return 0;
7269 }
7270
pqi_reinit_queues(struct pqi_ctrl_info * ctrl_info)7271 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
7272 {
7273 unsigned int i;
7274 struct pqi_admin_queues *admin_queues;
7275 struct pqi_event_queue *event_queue;
7276
7277 admin_queues = &ctrl_info->admin_queues;
7278 admin_queues->iq_pi_copy = 0;
7279 admin_queues->oq_ci_copy = 0;
7280 writel(0, admin_queues->oq_pi);
7281
7282 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
7283 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
7284 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
7285 ctrl_info->queue_groups[i].oq_ci_copy = 0;
7286
7287 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
7288 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
7289 writel(0, ctrl_info->queue_groups[i].oq_pi);
7290 }
7291
7292 event_queue = &ctrl_info->event_queue;
7293 writel(0, event_queue->oq_pi);
7294 event_queue->oq_ci_copy = 0;
7295 }
7296
pqi_ctrl_init_resume(struct pqi_ctrl_info * ctrl_info)7297 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
7298 {
7299 int rc;
7300
7301 rc = pqi_force_sis_mode(ctrl_info);
7302 if (rc)
7303 return rc;
7304
7305 /*
7306 * Wait until the controller is ready to start accepting SIS
7307 * commands.
7308 */
7309 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
7310 if (rc)
7311 return rc;
7312
7313 /*
7314 * Get the controller properties. This allows us to determine
7315 * whether or not it supports PQI mode.
7316 */
7317 rc = sis_get_ctrl_properties(ctrl_info);
7318 if (rc) {
7319 dev_err(&ctrl_info->pci_dev->dev,
7320 "error obtaining controller properties\n");
7321 return rc;
7322 }
7323
7324 rc = sis_get_pqi_capabilities(ctrl_info);
7325 if (rc) {
7326 dev_err(&ctrl_info->pci_dev->dev,
7327 "error obtaining controller capabilities\n");
7328 return rc;
7329 }
7330
7331 /*
7332 * If the function we are about to call succeeds, the
7333 * controller will transition from legacy SIS mode
7334 * into PQI mode.
7335 */
7336 rc = sis_init_base_struct_addr(ctrl_info);
7337 if (rc) {
7338 dev_err(&ctrl_info->pci_dev->dev,
7339 "error initializing PQI mode\n");
7340 return rc;
7341 }
7342
7343 /* Wait for the controller to complete the SIS -> PQI transition. */
7344 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7345 if (rc) {
7346 dev_err(&ctrl_info->pci_dev->dev,
7347 "transition to PQI mode failed\n");
7348 return rc;
7349 }
7350
7351 /* From here on, we are running in PQI mode. */
7352 ctrl_info->pqi_mode_enabled = true;
7353 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7354
7355 pqi_reinit_queues(ctrl_info);
7356
7357 rc = pqi_create_admin_queues(ctrl_info);
7358 if (rc) {
7359 dev_err(&ctrl_info->pci_dev->dev,
7360 "error creating admin queues\n");
7361 return rc;
7362 }
7363
7364 rc = pqi_create_queues(ctrl_info);
7365 if (rc)
7366 return rc;
7367
7368 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7369
7370 ctrl_info->controller_online = true;
7371 pqi_ctrl_unblock_requests(ctrl_info);
7372
7373 rc = pqi_process_config_table(ctrl_info);
7374 if (rc)
7375 return rc;
7376
7377 pqi_start_heartbeat_timer(ctrl_info);
7378
7379 rc = pqi_enable_events(ctrl_info);
7380 if (rc) {
7381 dev_err(&ctrl_info->pci_dev->dev,
7382 "error enabling events\n");
7383 return rc;
7384 }
7385
7386 rc = pqi_get_ctrl_product_details(ctrl_info);
7387 if (rc) {
7388 dev_err(&ctrl_info->pci_dev->dev,
7389 "error obtaining product detail\n");
7390 return rc;
7391 }
7392
7393 rc = pqi_set_diag_rescan(ctrl_info);
7394 if (rc) {
7395 dev_err(&ctrl_info->pci_dev->dev,
7396 "error enabling multi-lun rescan\n");
7397 return rc;
7398 }
7399
7400 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7401 if (rc) {
7402 dev_err(&ctrl_info->pci_dev->dev,
7403 "error updating host wellness\n");
7404 return rc;
7405 }
7406
7407 pqi_schedule_update_time_worker(ctrl_info);
7408
7409 pqi_scan_scsi_devices(ctrl_info);
7410
7411 return 0;
7412 }
7413
pqi_set_pcie_completion_timeout(struct pci_dev * pci_dev,u16 timeout)7414 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
7415 u16 timeout)
7416 {
7417 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
7418 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
7419 }
7420
pqi_pci_init(struct pqi_ctrl_info * ctrl_info)7421 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
7422 {
7423 int rc;
7424 u64 mask;
7425
7426 rc = pci_enable_device(ctrl_info->pci_dev);
7427 if (rc) {
7428 dev_err(&ctrl_info->pci_dev->dev,
7429 "failed to enable PCI device\n");
7430 return rc;
7431 }
7432
7433 if (sizeof(dma_addr_t) > 4)
7434 mask = DMA_BIT_MASK(64);
7435 else
7436 mask = DMA_BIT_MASK(32);
7437
7438 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
7439 if (rc) {
7440 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
7441 goto disable_device;
7442 }
7443
7444 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
7445 if (rc) {
7446 dev_err(&ctrl_info->pci_dev->dev,
7447 "failed to obtain PCI resources\n");
7448 goto disable_device;
7449 }
7450
7451 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
7452 ctrl_info->pci_dev, 0),
7453 sizeof(struct pqi_ctrl_registers));
7454 if (!ctrl_info->iomem_base) {
7455 dev_err(&ctrl_info->pci_dev->dev,
7456 "failed to map memory for controller registers\n");
7457 rc = -ENOMEM;
7458 goto release_regions;
7459 }
7460
7461 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
7462
7463 /* Increase the PCIe completion timeout. */
7464 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
7465 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
7466 if (rc) {
7467 dev_err(&ctrl_info->pci_dev->dev,
7468 "failed to set PCIe completion timeout\n");
7469 goto release_regions;
7470 }
7471
7472 /* Enable bus mastering. */
7473 pci_set_master(ctrl_info->pci_dev);
7474
7475 ctrl_info->registers = ctrl_info->iomem_base;
7476 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
7477
7478 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
7479
7480 return 0;
7481
7482 release_regions:
7483 pci_release_regions(ctrl_info->pci_dev);
7484 disable_device:
7485 pci_disable_device(ctrl_info->pci_dev);
7486
7487 return rc;
7488 }
7489
pqi_cleanup_pci_init(struct pqi_ctrl_info * ctrl_info)7490 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
7491 {
7492 iounmap(ctrl_info->iomem_base);
7493 pci_release_regions(ctrl_info->pci_dev);
7494 if (pci_is_enabled(ctrl_info->pci_dev))
7495 pci_disable_device(ctrl_info->pci_dev);
7496 pci_set_drvdata(ctrl_info->pci_dev, NULL);
7497 }
7498
pqi_alloc_ctrl_info(int numa_node)7499 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
7500 {
7501 struct pqi_ctrl_info *ctrl_info;
7502
7503 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
7504 GFP_KERNEL, numa_node);
7505 if (!ctrl_info)
7506 return NULL;
7507
7508 mutex_init(&ctrl_info->scan_mutex);
7509 mutex_init(&ctrl_info->lun_reset_mutex);
7510 mutex_init(&ctrl_info->ofa_mutex);
7511
7512 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
7513 spin_lock_init(&ctrl_info->scsi_device_list_lock);
7514
7515 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
7516 atomic_set(&ctrl_info->num_interrupts, 0);
7517
7518 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
7519 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
7520
7521 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
7522 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
7523
7524 sema_init(&ctrl_info->sync_request_sem,
7525 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7526 init_waitqueue_head(&ctrl_info->block_requests_wait);
7527
7528 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
7529 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
7530 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
7531 pqi_raid_bypass_retry_worker);
7532
7533 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
7534 ctrl_info->irq_mode = IRQ_MODE_NONE;
7535 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
7536
7537 return ctrl_info;
7538 }
7539
pqi_free_ctrl_info(struct pqi_ctrl_info * ctrl_info)7540 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
7541 {
7542 kfree(ctrl_info);
7543 }
7544
pqi_free_interrupts(struct pqi_ctrl_info * ctrl_info)7545 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
7546 {
7547 pqi_free_irqs(ctrl_info);
7548 pqi_disable_msix_interrupts(ctrl_info);
7549 }
7550
pqi_free_ctrl_resources(struct pqi_ctrl_info * ctrl_info)7551 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
7552 {
7553 pqi_stop_heartbeat_timer(ctrl_info);
7554 pqi_free_interrupts(ctrl_info);
7555 if (ctrl_info->queue_memory_base)
7556 dma_free_coherent(&ctrl_info->pci_dev->dev,
7557 ctrl_info->queue_memory_length,
7558 ctrl_info->queue_memory_base,
7559 ctrl_info->queue_memory_base_dma_handle);
7560 if (ctrl_info->admin_queue_memory_base)
7561 dma_free_coherent(&ctrl_info->pci_dev->dev,
7562 ctrl_info->admin_queue_memory_length,
7563 ctrl_info->admin_queue_memory_base,
7564 ctrl_info->admin_queue_memory_base_dma_handle);
7565 pqi_free_all_io_requests(ctrl_info);
7566 if (ctrl_info->error_buffer)
7567 dma_free_coherent(&ctrl_info->pci_dev->dev,
7568 ctrl_info->error_buffer_length,
7569 ctrl_info->error_buffer,
7570 ctrl_info->error_buffer_dma_handle);
7571 if (ctrl_info->iomem_base)
7572 pqi_cleanup_pci_init(ctrl_info);
7573 pqi_free_ctrl_info(ctrl_info);
7574 }
7575
pqi_remove_ctrl(struct pqi_ctrl_info * ctrl_info)7576 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
7577 {
7578 pqi_cancel_rescan_worker(ctrl_info);
7579 pqi_cancel_update_time_worker(ctrl_info);
7580 pqi_remove_all_scsi_devices(ctrl_info);
7581 pqi_unregister_scsi(ctrl_info);
7582 if (ctrl_info->pqi_mode_enabled)
7583 pqi_revert_to_sis_mode(ctrl_info);
7584 pqi_free_ctrl_resources(ctrl_info);
7585 }
7586
pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info * ctrl_info)7587 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
7588 {
7589 pqi_cancel_update_time_worker(ctrl_info);
7590 pqi_cancel_rescan_worker(ctrl_info);
7591 pqi_wait_until_lun_reset_finished(ctrl_info);
7592 pqi_wait_until_scan_finished(ctrl_info);
7593 pqi_ctrl_ofa_start(ctrl_info);
7594 pqi_ctrl_block_requests(ctrl_info);
7595 pqi_ctrl_wait_until_quiesced(ctrl_info);
7596 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
7597 pqi_fail_io_queued_for_all_devices(ctrl_info);
7598 pqi_wait_until_inbound_queues_empty(ctrl_info);
7599 pqi_stop_heartbeat_timer(ctrl_info);
7600 ctrl_info->pqi_mode_enabled = false;
7601 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7602 }
7603
pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info * ctrl_info)7604 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
7605 {
7606 pqi_ofa_free_host_buffer(ctrl_info);
7607 ctrl_info->pqi_mode_enabled = true;
7608 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7609 ctrl_info->controller_online = true;
7610 pqi_ctrl_unblock_requests(ctrl_info);
7611 pqi_start_heartbeat_timer(ctrl_info);
7612 pqi_schedule_update_time_worker(ctrl_info);
7613 pqi_clear_soft_reset_status(ctrl_info,
7614 PQI_SOFT_RESET_ABORT);
7615 pqi_scan_scsi_devices(ctrl_info);
7616 }
7617
pqi_ofa_alloc_mem(struct pqi_ctrl_info * ctrl_info,u32 total_size,u32 chunk_size)7618 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7619 u32 total_size, u32 chunk_size)
7620 {
7621 u32 sg_count;
7622 u32 size;
7623 int i;
7624 struct pqi_sg_descriptor *mem_descriptor = NULL;
7625 struct device *dev;
7626 struct pqi_ofa_memory *ofap;
7627
7628 dev = &ctrl_info->pci_dev->dev;
7629
7630 sg_count = (total_size + chunk_size - 1);
7631 sg_count /= chunk_size;
7632
7633 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7634
7635 if (sg_count*chunk_size < total_size)
7636 goto out;
7637
7638 ctrl_info->pqi_ofa_chunk_virt_addr =
7639 kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
7640 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
7641 goto out;
7642
7643 for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
7644 dma_addr_t dma_handle;
7645
7646 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
7647 dma_alloc_coherent(dev, chunk_size, &dma_handle,
7648 GFP_KERNEL);
7649
7650 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7651 break;
7652
7653 mem_descriptor = &ofap->sg_descriptor[i];
7654 put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
7655 put_unaligned_le32 (chunk_size, &mem_descriptor->length);
7656 }
7657
7658 if (!size || size < total_size)
7659 goto out_free_chunks;
7660
7661 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
7662 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
7663 put_unaligned_le32(size, &ofap->bytes_allocated);
7664
7665 return 0;
7666
7667 out_free_chunks:
7668 while (--i >= 0) {
7669 mem_descriptor = &ofap->sg_descriptor[i];
7670 dma_free_coherent(dev, chunk_size,
7671 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7672 get_unaligned_le64(&mem_descriptor->address));
7673 }
7674 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7675
7676 out:
7677 put_unaligned_le32 (0, &ofap->bytes_allocated);
7678 return -ENOMEM;
7679 }
7680
pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info * ctrl_info)7681 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
7682 {
7683 u32 total_size;
7684 u32 min_chunk_size;
7685 u32 chunk_sz;
7686
7687 total_size = le32_to_cpu(
7688 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
7689 min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
7690
7691 for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
7692 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
7693 return 0;
7694
7695 return -ENOMEM;
7696 }
7697
pqi_ofa_setup_host_buffer(struct pqi_ctrl_info * ctrl_info,u32 bytes_requested)7698 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7699 u32 bytes_requested)
7700 {
7701 struct pqi_ofa_memory *pqi_ofa_memory;
7702 struct device *dev;
7703
7704 dev = &ctrl_info->pci_dev->dev;
7705 pqi_ofa_memory = dma_alloc_coherent(dev,
7706 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
7707 &ctrl_info->pqi_ofa_mem_dma_handle,
7708 GFP_KERNEL);
7709
7710 if (!pqi_ofa_memory)
7711 return;
7712
7713 put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
7714 memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
7715 sizeof(pqi_ofa_memory->signature));
7716 pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
7717
7718 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
7719
7720 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
7721 dev_err(dev, "Failed to allocate host buffer of size = %u",
7722 bytes_requested);
7723 }
7724 }
7725
pqi_ofa_free_host_buffer(struct pqi_ctrl_info * ctrl_info)7726 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
7727 {
7728 int i;
7729 struct pqi_sg_descriptor *mem_descriptor;
7730 struct pqi_ofa_memory *ofap;
7731
7732 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7733
7734 if (!ofap)
7735 return;
7736
7737 if (!ofap->bytes_allocated)
7738 goto out;
7739
7740 mem_descriptor = ofap->sg_descriptor;
7741
7742 for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
7743 i++) {
7744 dma_free_coherent(&ctrl_info->pci_dev->dev,
7745 get_unaligned_le32(&mem_descriptor[i].length),
7746 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7747 get_unaligned_le64(&mem_descriptor[i].address));
7748 }
7749 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7750
7751 out:
7752 dma_free_coherent(&ctrl_info->pci_dev->dev,
7753 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
7754 ctrl_info->pqi_ofa_mem_dma_handle);
7755 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
7756 }
7757
pqi_ofa_host_memory_update(struct pqi_ctrl_info * ctrl_info)7758 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
7759 {
7760 struct pqi_vendor_general_request request;
7761 size_t size;
7762 struct pqi_ofa_memory *ofap;
7763
7764 memset(&request, 0, sizeof(request));
7765
7766 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7767
7768 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7769 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7770 &request.header.iu_length);
7771 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
7772 &request.function_code);
7773
7774 if (ofap) {
7775 size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
7776 get_unaligned_le16(&ofap->num_memory_descriptors) *
7777 sizeof(struct pqi_sg_descriptor);
7778
7779 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
7780 &request.data.ofa_memory_allocation.buffer_address);
7781 put_unaligned_le32(size,
7782 &request.data.ofa_memory_allocation.buffer_length);
7783
7784 }
7785
7786 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
7787 0, NULL, NO_TIMEOUT);
7788 }
7789
7790 #define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
7791
pqi_ofa_ctrl_restart(struct pqi_ctrl_info * ctrl_info)7792 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
7793 {
7794 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7795 return pqi_ctrl_init_resume(ctrl_info);
7796 }
7797
pqi_perform_lockup_action(void)7798 static void pqi_perform_lockup_action(void)
7799 {
7800 switch (pqi_lockup_action) {
7801 case PANIC:
7802 panic("FATAL: Smart Family Controller lockup detected");
7803 break;
7804 case REBOOT:
7805 emergency_restart();
7806 break;
7807 case NONE:
7808 default:
7809 break;
7810 }
7811 }
7812
7813 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
7814 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
7815 .status = SAM_STAT_CHECK_CONDITION,
7816 };
7817
pqi_fail_all_outstanding_requests(struct pqi_ctrl_info * ctrl_info)7818 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
7819 {
7820 unsigned int i;
7821 struct pqi_io_request *io_request;
7822 struct scsi_cmnd *scmd;
7823
7824 for (i = 0; i < ctrl_info->max_io_slots; i++) {
7825 io_request = &ctrl_info->io_request_pool[i];
7826 if (atomic_read(&io_request->refcount) == 0)
7827 continue;
7828
7829 scmd = io_request->scmd;
7830 if (scmd) {
7831 set_host_byte(scmd, DID_NO_CONNECT);
7832 } else {
7833 io_request->status = -ENXIO;
7834 io_request->error_info =
7835 &pqi_ctrl_offline_raid_error_info;
7836 }
7837
7838 io_request->io_complete_callback(io_request,
7839 io_request->context);
7840 }
7841 }
7842
pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info * ctrl_info)7843 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
7844 {
7845 pqi_perform_lockup_action();
7846 pqi_stop_heartbeat_timer(ctrl_info);
7847 pqi_free_interrupts(ctrl_info);
7848 pqi_cancel_rescan_worker(ctrl_info);
7849 pqi_cancel_update_time_worker(ctrl_info);
7850 pqi_ctrl_wait_until_quiesced(ctrl_info);
7851 pqi_fail_all_outstanding_requests(ctrl_info);
7852 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
7853 pqi_ctrl_unblock_requests(ctrl_info);
7854 }
7855
pqi_ctrl_offline_worker(struct work_struct * work)7856 static void pqi_ctrl_offline_worker(struct work_struct *work)
7857 {
7858 struct pqi_ctrl_info *ctrl_info;
7859
7860 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
7861 pqi_take_ctrl_offline_deferred(ctrl_info);
7862 }
7863
pqi_take_ctrl_offline(struct pqi_ctrl_info * ctrl_info)7864 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
7865 {
7866 if (!ctrl_info->controller_online)
7867 return;
7868
7869 ctrl_info->controller_online = false;
7870 ctrl_info->pqi_mode_enabled = false;
7871 pqi_ctrl_block_requests(ctrl_info);
7872 if (!pqi_disable_ctrl_shutdown)
7873 sis_shutdown_ctrl(ctrl_info);
7874 pci_disable_device(ctrl_info->pci_dev);
7875 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
7876 schedule_work(&ctrl_info->ctrl_offline_work);
7877 }
7878
pqi_print_ctrl_info(struct pci_dev * pci_dev,const struct pci_device_id * id)7879 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
7880 const struct pci_device_id *id)
7881 {
7882 char *ctrl_description;
7883
7884 if (id->driver_data)
7885 ctrl_description = (char *)id->driver_data;
7886 else
7887 ctrl_description = "Microsemi Smart Family Controller";
7888
7889 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
7890 }
7891
pqi_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)7892 static int pqi_pci_probe(struct pci_dev *pci_dev,
7893 const struct pci_device_id *id)
7894 {
7895 int rc;
7896 int node, cp_node;
7897 struct pqi_ctrl_info *ctrl_info;
7898
7899 pqi_print_ctrl_info(pci_dev, id);
7900
7901 if (pqi_disable_device_id_wildcards &&
7902 id->subvendor == PCI_ANY_ID &&
7903 id->subdevice == PCI_ANY_ID) {
7904 dev_warn(&pci_dev->dev,
7905 "controller not probed because device ID wildcards are disabled\n");
7906 return -ENODEV;
7907 }
7908
7909 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
7910 dev_warn(&pci_dev->dev,
7911 "controller device ID matched using wildcards\n");
7912
7913 node = dev_to_node(&pci_dev->dev);
7914 if (node == NUMA_NO_NODE) {
7915 cp_node = cpu_to_node(0);
7916 if (cp_node == NUMA_NO_NODE)
7917 cp_node = 0;
7918 set_dev_node(&pci_dev->dev, cp_node);
7919 }
7920
7921 ctrl_info = pqi_alloc_ctrl_info(node);
7922 if (!ctrl_info) {
7923 dev_err(&pci_dev->dev,
7924 "failed to allocate controller info block\n");
7925 return -ENOMEM;
7926 }
7927
7928 ctrl_info->pci_dev = pci_dev;
7929
7930 rc = pqi_pci_init(ctrl_info);
7931 if (rc)
7932 goto error;
7933
7934 rc = pqi_ctrl_init(ctrl_info);
7935 if (rc)
7936 goto error;
7937
7938 return 0;
7939
7940 error:
7941 pqi_remove_ctrl(ctrl_info);
7942
7943 return rc;
7944 }
7945
pqi_pci_remove(struct pci_dev * pci_dev)7946 static void pqi_pci_remove(struct pci_dev *pci_dev)
7947 {
7948 struct pqi_ctrl_info *ctrl_info;
7949
7950 ctrl_info = pci_get_drvdata(pci_dev);
7951 if (!ctrl_info)
7952 return;
7953
7954 ctrl_info->in_shutdown = true;
7955
7956 pqi_remove_ctrl(ctrl_info);
7957 }
7958
pqi_shutdown(struct pci_dev * pci_dev)7959 static void pqi_shutdown(struct pci_dev *pci_dev)
7960 {
7961 int rc;
7962 struct pqi_ctrl_info *ctrl_info;
7963
7964 ctrl_info = pci_get_drvdata(pci_dev);
7965 if (!ctrl_info)
7966 goto error;
7967
7968 /*
7969 * Write all data in the controller's battery-backed cache to
7970 * storage.
7971 */
7972 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
7973 pqi_free_interrupts(ctrl_info);
7974 pqi_reset(ctrl_info);
7975 if (rc == 0)
7976 return;
7977
7978 error:
7979 dev_warn(&pci_dev->dev,
7980 "unable to flush controller cache\n");
7981 }
7982
pqi_process_lockup_action_param(void)7983 static void pqi_process_lockup_action_param(void)
7984 {
7985 unsigned int i;
7986
7987 if (!pqi_lockup_action_param)
7988 return;
7989
7990 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
7991 if (strcmp(pqi_lockup_action_param,
7992 pqi_lockup_actions[i].name) == 0) {
7993 pqi_lockup_action = pqi_lockup_actions[i].action;
7994 return;
7995 }
7996 }
7997
7998 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
7999 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8000 }
8001
pqi_process_module_params(void)8002 static void pqi_process_module_params(void)
8003 {
8004 pqi_process_lockup_action_param();
8005 }
8006
pqi_suspend(struct pci_dev * pci_dev,pm_message_t state)8007 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
8008 {
8009 struct pqi_ctrl_info *ctrl_info;
8010
8011 ctrl_info = pci_get_drvdata(pci_dev);
8012
8013 pqi_disable_events(ctrl_info);
8014 pqi_cancel_update_time_worker(ctrl_info);
8015 pqi_cancel_rescan_worker(ctrl_info);
8016 pqi_wait_until_scan_finished(ctrl_info);
8017 pqi_wait_until_lun_reset_finished(ctrl_info);
8018 pqi_wait_until_ofa_finished(ctrl_info);
8019 pqi_flush_cache(ctrl_info, SUSPEND);
8020 pqi_ctrl_block_requests(ctrl_info);
8021 pqi_ctrl_wait_until_quiesced(ctrl_info);
8022 pqi_wait_until_inbound_queues_empty(ctrl_info);
8023 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8024 pqi_stop_heartbeat_timer(ctrl_info);
8025
8026 if (state.event == PM_EVENT_FREEZE)
8027 return 0;
8028
8029 pci_save_state(pci_dev);
8030 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
8031
8032 ctrl_info->controller_online = false;
8033 ctrl_info->pqi_mode_enabled = false;
8034
8035 return 0;
8036 }
8037
pqi_resume(struct pci_dev * pci_dev)8038 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
8039 {
8040 int rc;
8041 struct pqi_ctrl_info *ctrl_info;
8042
8043 ctrl_info = pci_get_drvdata(pci_dev);
8044
8045 if (pci_dev->current_state != PCI_D0) {
8046 ctrl_info->max_hw_queue_index = 0;
8047 pqi_free_interrupts(ctrl_info);
8048 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
8049 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
8050 IRQF_SHARED, DRIVER_NAME_SHORT,
8051 &ctrl_info->queue_groups[0]);
8052 if (rc) {
8053 dev_err(&ctrl_info->pci_dev->dev,
8054 "irq %u init failed with error %d\n",
8055 pci_dev->irq, rc);
8056 return rc;
8057 }
8058 pqi_start_heartbeat_timer(ctrl_info);
8059 pqi_ctrl_unblock_requests(ctrl_info);
8060 return 0;
8061 }
8062
8063 pci_set_power_state(pci_dev, PCI_D0);
8064 pci_restore_state(pci_dev);
8065
8066 return pqi_ctrl_init_resume(ctrl_info);
8067 }
8068
8069 /* Define the PCI IDs for the controllers that we support. */
8070 static const struct pci_device_id pqi_pci_id_table[] = {
8071 {
8072 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8073 0x105b, 0x1211)
8074 },
8075 {
8076 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8077 0x105b, 0x1321)
8078 },
8079 {
8080 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8081 0x152d, 0x8a22)
8082 },
8083 {
8084 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8085 0x152d, 0x8a23)
8086 },
8087 {
8088 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8089 0x152d, 0x8a24)
8090 },
8091 {
8092 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8093 0x152d, 0x8a36)
8094 },
8095 {
8096 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8097 0x152d, 0x8a37)
8098 },
8099 {
8100 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8101 0x193d, 0x1104)
8102 },
8103 {
8104 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8105 0x193d, 0x1105)
8106 },
8107 {
8108 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8109 0x193d, 0x1106)
8110 },
8111 {
8112 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8113 0x193d, 0x1107)
8114 },
8115 {
8116 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8117 0x193d, 0x8460)
8118 },
8119 {
8120 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8121 0x193d, 0x8461)
8122 },
8123 {
8124 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8125 0x193d, 0xc460)
8126 },
8127 {
8128 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8129 0x193d, 0xc461)
8130 },
8131 {
8132 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8133 0x193d, 0xf460)
8134 },
8135 {
8136 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8137 0x193d, 0xf461)
8138 },
8139 {
8140 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8141 0x1bd4, 0x0045)
8142 },
8143 {
8144 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8145 0x1bd4, 0x0046)
8146 },
8147 {
8148 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8149 0x1bd4, 0x0047)
8150 },
8151 {
8152 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8153 0x1bd4, 0x0048)
8154 },
8155 {
8156 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8157 0x1bd4, 0x004a)
8158 },
8159 {
8160 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8161 0x1bd4, 0x004b)
8162 },
8163 {
8164 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8165 0x1bd4, 0x004c)
8166 },
8167 {
8168 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8169 0x1bd4, 0x004f)
8170 },
8171 {
8172 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8173 0x19e5, 0xd227)
8174 },
8175 {
8176 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8177 0x19e5, 0xd228)
8178 },
8179 {
8180 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8181 0x19e5, 0xd229)
8182 },
8183 {
8184 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8185 0x19e5, 0xd22a)
8186 },
8187 {
8188 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8189 0x19e5, 0xd22b)
8190 },
8191 {
8192 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8193 0x19e5, 0xd22c)
8194 },
8195 {
8196 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8197 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
8198 },
8199 {
8200 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8201 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
8202 },
8203 {
8204 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8205 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
8206 },
8207 {
8208 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8209 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
8210 },
8211 {
8212 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8213 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
8214 },
8215 {
8216 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8217 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
8218 },
8219 {
8220 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8221 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
8222 },
8223 {
8224 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8225 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
8226 },
8227 {
8228 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8229 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
8230 },
8231 {
8232 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8233 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
8234 },
8235 {
8236 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8237 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
8238 },
8239 {
8240 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8241 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
8242 },
8243 {
8244 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8245 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
8246 },
8247 {
8248 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8249 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
8250 },
8251 {
8252 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8253 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
8254 },
8255 {
8256 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8257 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
8258 },
8259 {
8260 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8261 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
8262 },
8263 {
8264 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8265 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
8266 },
8267 {
8268 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8269 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
8270 },
8271 {
8272 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8273 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
8274 },
8275 {
8276 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8277 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
8278 },
8279 {
8280 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8281 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
8282 },
8283 {
8284 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8285 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
8286 },
8287 {
8288 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8289 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
8290 },
8291 {
8292 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8293 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
8294 },
8295 {
8296 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8297 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
8298 },
8299 {
8300 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8301 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
8302 },
8303 {
8304 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8305 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
8306 },
8307 {
8308 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8309 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
8310 },
8311 {
8312 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8313 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
8314 },
8315 {
8316 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8317 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
8318 },
8319 {
8320 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8321 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
8322 },
8323 {
8324 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8325 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
8326 },
8327 {
8328 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8329 PCI_VENDOR_ID_ADVANTECH, 0x8312)
8330 },
8331 {
8332 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8333 PCI_VENDOR_ID_DELL, 0x1fe0)
8334 },
8335 {
8336 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8337 PCI_VENDOR_ID_HP, 0x0600)
8338 },
8339 {
8340 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8341 PCI_VENDOR_ID_HP, 0x0601)
8342 },
8343 {
8344 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8345 PCI_VENDOR_ID_HP, 0x0602)
8346 },
8347 {
8348 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8349 PCI_VENDOR_ID_HP, 0x0603)
8350 },
8351 {
8352 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8353 PCI_VENDOR_ID_HP, 0x0609)
8354 },
8355 {
8356 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8357 PCI_VENDOR_ID_HP, 0x0650)
8358 },
8359 {
8360 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8361 PCI_VENDOR_ID_HP, 0x0651)
8362 },
8363 {
8364 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8365 PCI_VENDOR_ID_HP, 0x0652)
8366 },
8367 {
8368 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8369 PCI_VENDOR_ID_HP, 0x0653)
8370 },
8371 {
8372 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8373 PCI_VENDOR_ID_HP, 0x0654)
8374 },
8375 {
8376 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8377 PCI_VENDOR_ID_HP, 0x0655)
8378 },
8379 {
8380 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8381 PCI_VENDOR_ID_HP, 0x0700)
8382 },
8383 {
8384 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8385 PCI_VENDOR_ID_HP, 0x0701)
8386 },
8387 {
8388 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8389 PCI_VENDOR_ID_HP, 0x1001)
8390 },
8391 {
8392 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8393 PCI_VENDOR_ID_HP, 0x1100)
8394 },
8395 {
8396 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8397 PCI_VENDOR_ID_HP, 0x1101)
8398 },
8399 {
8400 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8401 0x1d8d, 0x0800)
8402 },
8403 {
8404 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8405 0x1d8d, 0x0908)
8406 },
8407 {
8408 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8409 0x1d8d, 0x0806)
8410 },
8411 {
8412 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8413 0x1d8d, 0x0916)
8414 },
8415 {
8416 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8417 PCI_VENDOR_ID_GIGABYTE, 0x1000)
8418 },
8419 {
8420 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8421 PCI_ANY_ID, PCI_ANY_ID)
8422 },
8423 { 0 }
8424 };
8425
8426 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
8427
8428 static struct pci_driver pqi_pci_driver = {
8429 .name = DRIVER_NAME_SHORT,
8430 .id_table = pqi_pci_id_table,
8431 .probe = pqi_pci_probe,
8432 .remove = pqi_pci_remove,
8433 .shutdown = pqi_shutdown,
8434 #if defined(CONFIG_PM)
8435 .suspend = pqi_suspend,
8436 .resume = pqi_resume,
8437 #endif
8438 };
8439
pqi_init(void)8440 static int __init pqi_init(void)
8441 {
8442 int rc;
8443
8444 pr_info(DRIVER_NAME "\n");
8445
8446 pqi_sas_transport_template =
8447 sas_attach_transport(&pqi_sas_transport_functions);
8448 if (!pqi_sas_transport_template)
8449 return -ENODEV;
8450
8451 pqi_process_module_params();
8452
8453 rc = pci_register_driver(&pqi_pci_driver);
8454 if (rc)
8455 sas_release_transport(pqi_sas_transport_template);
8456
8457 return rc;
8458 }
8459
pqi_cleanup(void)8460 static void __exit pqi_cleanup(void)
8461 {
8462 pci_unregister_driver(&pqi_pci_driver);
8463 sas_release_transport(pqi_sas_transport_template);
8464 }
8465
8466 module_init(pqi_init);
8467 module_exit(pqi_cleanup);
8468
verify_structures(void)8469 static void __attribute__((unused)) verify_structures(void)
8470 {
8471 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8472 sis_host_to_ctrl_doorbell) != 0x20);
8473 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8474 sis_interrupt_mask) != 0x34);
8475 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8476 sis_ctrl_to_host_doorbell) != 0x9c);
8477 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8478 sis_ctrl_to_host_doorbell_clear) != 0xa0);
8479 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8480 sis_driver_scratch) != 0xb0);
8481 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8482 sis_firmware_status) != 0xbc);
8483 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8484 sis_mailbox) != 0x1000);
8485 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8486 pqi_registers) != 0x4000);
8487
8488 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8489 iu_type) != 0x0);
8490 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8491 iu_length) != 0x2);
8492 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8493 response_queue_id) != 0x4);
8494 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8495 work_area) != 0x6);
8496 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
8497
8498 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8499 status) != 0x0);
8500 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8501 service_response) != 0x1);
8502 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8503 data_present) != 0x2);
8504 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8505 reserved) != 0x3);
8506 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8507 residual_count) != 0x4);
8508 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8509 data_length) != 0x8);
8510 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8511 reserved1) != 0xa);
8512 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8513 data) != 0xc);
8514 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
8515
8516 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8517 data_in_result) != 0x0);
8518 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8519 data_out_result) != 0x1);
8520 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8521 reserved) != 0x2);
8522 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8523 status) != 0x5);
8524 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8525 status_qualifier) != 0x6);
8526 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8527 sense_data_length) != 0x8);
8528 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8529 response_data_length) != 0xa);
8530 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8531 data_in_transferred) != 0xc);
8532 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8533 data_out_transferred) != 0x10);
8534 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8535 data) != 0x14);
8536 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
8537
8538 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8539 signature) != 0x0);
8540 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8541 function_and_status_code) != 0x8);
8542 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8543 max_admin_iq_elements) != 0x10);
8544 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8545 max_admin_oq_elements) != 0x11);
8546 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8547 admin_iq_element_length) != 0x12);
8548 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8549 admin_oq_element_length) != 0x13);
8550 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8551 max_reset_timeout) != 0x14);
8552 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8553 legacy_intx_status) != 0x18);
8554 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8555 legacy_intx_mask_set) != 0x1c);
8556 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8557 legacy_intx_mask_clear) != 0x20);
8558 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8559 device_status) != 0x40);
8560 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8561 admin_iq_pi_offset) != 0x48);
8562 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8563 admin_oq_ci_offset) != 0x50);
8564 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8565 admin_iq_element_array_addr) != 0x58);
8566 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8567 admin_oq_element_array_addr) != 0x60);
8568 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8569 admin_iq_ci_addr) != 0x68);
8570 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8571 admin_oq_pi_addr) != 0x70);
8572 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8573 admin_iq_num_elements) != 0x78);
8574 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8575 admin_oq_num_elements) != 0x79);
8576 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8577 admin_queue_int_msg_num) != 0x7a);
8578 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8579 device_error) != 0x80);
8580 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8581 error_details) != 0x88);
8582 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8583 device_reset) != 0x90);
8584 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8585 power_action) != 0x94);
8586 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
8587
8588 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8589 header.iu_type) != 0);
8590 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8591 header.iu_length) != 2);
8592 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8593 header.work_area) != 6);
8594 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8595 request_id) != 8);
8596 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8597 function_code) != 10);
8598 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8599 data.report_device_capability.buffer_length) != 44);
8600 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8601 data.report_device_capability.sg_descriptor) != 48);
8602 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8603 data.create_operational_iq.queue_id) != 12);
8604 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8605 data.create_operational_iq.element_array_addr) != 16);
8606 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8607 data.create_operational_iq.ci_addr) != 24);
8608 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8609 data.create_operational_iq.num_elements) != 32);
8610 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8611 data.create_operational_iq.element_length) != 34);
8612 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8613 data.create_operational_iq.queue_protocol) != 36);
8614 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8615 data.create_operational_oq.queue_id) != 12);
8616 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8617 data.create_operational_oq.element_array_addr) != 16);
8618 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8619 data.create_operational_oq.pi_addr) != 24);
8620 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8621 data.create_operational_oq.num_elements) != 32);
8622 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8623 data.create_operational_oq.element_length) != 34);
8624 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8625 data.create_operational_oq.queue_protocol) != 36);
8626 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8627 data.create_operational_oq.int_msg_num) != 40);
8628 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8629 data.create_operational_oq.coalescing_count) != 42);
8630 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8631 data.create_operational_oq.min_coalescing_time) != 44);
8632 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8633 data.create_operational_oq.max_coalescing_time) != 48);
8634 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8635 data.delete_operational_queue.queue_id) != 12);
8636 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
8637 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8638 data.create_operational_iq) != 64 - 11);
8639 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8640 data.create_operational_oq) != 64 - 11);
8641 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8642 data.delete_operational_queue) != 64 - 11);
8643
8644 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8645 header.iu_type) != 0);
8646 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8647 header.iu_length) != 2);
8648 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8649 header.work_area) != 6);
8650 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8651 request_id) != 8);
8652 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8653 function_code) != 10);
8654 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8655 status) != 11);
8656 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8657 data.create_operational_iq.status_descriptor) != 12);
8658 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8659 data.create_operational_iq.iq_pi_offset) != 16);
8660 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8661 data.create_operational_oq.status_descriptor) != 12);
8662 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8663 data.create_operational_oq.oq_ci_offset) != 16);
8664 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
8665
8666 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8667 header.iu_type) != 0);
8668 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8669 header.iu_length) != 2);
8670 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8671 header.response_queue_id) != 4);
8672 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8673 header.work_area) != 6);
8674 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8675 request_id) != 8);
8676 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8677 nexus_id) != 10);
8678 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8679 buffer_length) != 12);
8680 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8681 lun_number) != 16);
8682 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8683 protocol_specific) != 24);
8684 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8685 error_index) != 27);
8686 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8687 cdb) != 32);
8688 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8689 sg_descriptors) != 64);
8690 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
8691 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8692
8693 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8694 header.iu_type) != 0);
8695 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8696 header.iu_length) != 2);
8697 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8698 header.response_queue_id) != 4);
8699 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8700 header.work_area) != 6);
8701 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8702 request_id) != 8);
8703 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8704 nexus_id) != 12);
8705 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8706 buffer_length) != 16);
8707 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8708 data_encryption_key_index) != 22);
8709 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8710 encrypt_tweak_lower) != 24);
8711 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8712 encrypt_tweak_upper) != 28);
8713 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8714 cdb) != 32);
8715 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8716 error_index) != 48);
8717 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8718 num_sg_descriptors) != 50);
8719 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8720 cdb_length) != 51);
8721 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8722 lun_number) != 52);
8723 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8724 sg_descriptors) != 64);
8725 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
8726 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8727
8728 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8729 header.iu_type) != 0);
8730 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8731 header.iu_length) != 2);
8732 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8733 request_id) != 8);
8734 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8735 error_index) != 10);
8736
8737 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8738 header.iu_type) != 0);
8739 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8740 header.iu_length) != 2);
8741 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8742 header.response_queue_id) != 4);
8743 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8744 request_id) != 8);
8745 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8746 data.report_event_configuration.buffer_length) != 12);
8747 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8748 data.report_event_configuration.sg_descriptors) != 16);
8749 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8750 data.set_event_configuration.global_event_oq_id) != 10);
8751 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8752 data.set_event_configuration.buffer_length) != 12);
8753 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8754 data.set_event_configuration.sg_descriptors) != 16);
8755
8756 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8757 max_inbound_iu_length) != 6);
8758 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8759 max_outbound_iu_length) != 14);
8760 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
8761
8762 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8763 data_length) != 0);
8764 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8765 iq_arbitration_priority_support_bitmask) != 8);
8766 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8767 maximum_aw_a) != 9);
8768 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8769 maximum_aw_b) != 10);
8770 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8771 maximum_aw_c) != 11);
8772 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8773 max_inbound_queues) != 16);
8774 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8775 max_elements_per_iq) != 18);
8776 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8777 max_iq_element_length) != 24);
8778 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8779 min_iq_element_length) != 26);
8780 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8781 max_outbound_queues) != 30);
8782 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8783 max_elements_per_oq) != 32);
8784 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8785 intr_coalescing_time_granularity) != 34);
8786 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8787 max_oq_element_length) != 36);
8788 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8789 min_oq_element_length) != 38);
8790 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8791 iu_layer_descriptors) != 64);
8792 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
8793
8794 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8795 event_type) != 0);
8796 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8797 oq_id) != 2);
8798 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
8799
8800 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8801 num_event_descriptors) != 2);
8802 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8803 descriptors) != 4);
8804
8805 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
8806 ARRAY_SIZE(pqi_supported_event_types));
8807
8808 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8809 header.iu_type) != 0);
8810 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8811 header.iu_length) != 2);
8812 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8813 event_type) != 8);
8814 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8815 event_id) != 10);
8816 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8817 additional_event_id) != 12);
8818 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8819 data) != 16);
8820 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
8821
8822 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8823 header.iu_type) != 0);
8824 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8825 header.iu_length) != 2);
8826 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8827 event_type) != 8);
8828 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8829 event_id) != 10);
8830 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8831 additional_event_id) != 12);
8832 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
8833
8834 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8835 header.iu_type) != 0);
8836 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8837 header.iu_length) != 2);
8838 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8839 request_id) != 8);
8840 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8841 nexus_id) != 10);
8842 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8843 lun_number) != 16);
8844 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8845 protocol_specific) != 24);
8846 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8847 outbound_queue_id_to_manage) != 26);
8848 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8849 request_id_to_manage) != 28);
8850 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8851 task_management_function) != 30);
8852 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
8853
8854 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8855 header.iu_type) != 0);
8856 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8857 header.iu_length) != 2);
8858 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8859 request_id) != 8);
8860 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8861 nexus_id) != 10);
8862 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8863 additional_response_info) != 12);
8864 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8865 response_code) != 15);
8866 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
8867
8868 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8869 configured_logical_drive_count) != 0);
8870 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8871 configuration_signature) != 1);
8872 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8873 firmware_version) != 5);
8874 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8875 extended_logical_unit_count) != 154);
8876 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8877 firmware_build_number) != 190);
8878 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8879 controller_mode) != 292);
8880
8881 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8882 phys_bay_in_box) != 115);
8883 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8884 device_type) != 120);
8885 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8886 redundant_path_present_map) != 1736);
8887 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8888 active_path_number) != 1738);
8889 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8890 alternate_paths_phys_connector) != 1739);
8891 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8892 alternate_paths_phys_box_on_port) != 1755);
8893 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8894 current_queue_depth_limit) != 1796);
8895 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
8896
8897 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
8898 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
8899 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
8900 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8901 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
8902 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8903 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
8904 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
8905 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8906 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
8907 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
8908 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8909
8910 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
8911 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
8912 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
8913 }
8914