Lines Matching refs:ctrl_info
55 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
57 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
58 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
60 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
63 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
66 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
70 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
71 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
72 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
73 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
75 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
76 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
77 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
214 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_offline() argument
216 return !ctrl_info->controller_online; in pqi_ctrl_offline()
219 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) in pqi_check_ctrl_health() argument
221 if (ctrl_info->controller_online) in pqi_check_ctrl_health()
222 if (!sis_is_firmware_running(ctrl_info)) in pqi_check_ctrl_health()
223 pqi_take_ctrl_offline(ctrl_info); in pqi_check_ctrl_health()
232 struct pqi_ctrl_info *ctrl_info) in pqi_get_ctrl_mode() argument
234 return sis_read_driver_scratch(ctrl_info); in pqi_get_ctrl_mode()
237 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, in pqi_save_ctrl_mode() argument
240 sis_write_driver_scratch(ctrl_info, mode); in pqi_save_ctrl_mode()
243 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_block_device_reset() argument
245 ctrl_info->block_device_reset = true; in pqi_ctrl_block_device_reset()
248 static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info) in pqi_device_reset_blocked() argument
250 return ctrl_info->block_device_reset; in pqi_device_reset_blocked()
253 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_blocked() argument
255 return ctrl_info->block_requests; in pqi_ctrl_blocked()
258 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_block_requests() argument
260 ctrl_info->block_requests = true; in pqi_ctrl_block_requests()
261 scsi_block_requests(ctrl_info->scsi_host); in pqi_ctrl_block_requests()
264 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_unblock_requests() argument
266 ctrl_info->block_requests = false; in pqi_ctrl_unblock_requests()
267 wake_up_all(&ctrl_info->block_requests_wait); in pqi_ctrl_unblock_requests()
268 pqi_retry_raid_bypass_requests(ctrl_info); in pqi_ctrl_unblock_requests()
269 scsi_unblock_requests(ctrl_info->scsi_host); in pqi_ctrl_unblock_requests()
272 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, in pqi_wait_if_ctrl_blocked() argument
277 if (!pqi_ctrl_blocked(ctrl_info)) in pqi_wait_if_ctrl_blocked()
280 atomic_inc(&ctrl_info->num_blocked_threads); in pqi_wait_if_ctrl_blocked()
283 wait_event(ctrl_info->block_requests_wait, in pqi_wait_if_ctrl_blocked()
284 !pqi_ctrl_blocked(ctrl_info)); in pqi_wait_if_ctrl_blocked()
290 wait_event_timeout(ctrl_info->block_requests_wait, in pqi_wait_if_ctrl_blocked()
291 !pqi_ctrl_blocked(ctrl_info), in pqi_wait_if_ctrl_blocked()
296 atomic_dec(&ctrl_info->num_blocked_threads); in pqi_wait_if_ctrl_blocked()
301 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_wait_until_quiesced() argument
303 while (atomic_read(&ctrl_info->num_busy_threads) > in pqi_ctrl_wait_until_quiesced()
304 atomic_read(&ctrl_info->num_blocked_threads)) in pqi_ctrl_wait_until_quiesced()
328 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_ofa_start() argument
330 ctrl_info->in_ofa = true; in pqi_ctrl_ofa_start()
333 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_ofa_done() argument
335 ctrl_info->in_ofa = false; in pqi_ctrl_ofa_done()
338 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_in_ofa() argument
340 return ctrl_info->in_ofa; in pqi_ctrl_in_ofa()
348 static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, in pqi_device_in_remove() argument
351 return device->in_remove && !ctrl_info->in_shutdown; in pqi_device_in_remove()
354 static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_shutdown_start() argument
356 ctrl_info->in_shutdown = true; in pqi_ctrl_shutdown_start()
359 static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_in_shutdown() argument
361 return ctrl_info->in_shutdown; in pqi_ctrl_in_shutdown()
365 struct pqi_ctrl_info *ctrl_info, unsigned long delay) in pqi_schedule_rescan_worker_with_delay() argument
367 if (pqi_ctrl_offline(ctrl_info)) in pqi_schedule_rescan_worker_with_delay()
369 if (pqi_ctrl_in_ofa(ctrl_info)) in pqi_schedule_rescan_worker_with_delay()
372 schedule_delayed_work(&ctrl_info->rescan_work, delay); in pqi_schedule_rescan_worker_with_delay()
375 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) in pqi_schedule_rescan_worker() argument
377 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); in pqi_schedule_rescan_worker()
383 struct pqi_ctrl_info *ctrl_info) in pqi_schedule_rescan_worker_delayed() argument
385 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); in pqi_schedule_rescan_worker_delayed()
388 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) in pqi_cancel_rescan_worker() argument
390 cancel_delayed_work_sync(&ctrl_info->rescan_work); in pqi_cancel_rescan_worker()
393 static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info) in pqi_cancel_event_worker() argument
395 cancel_work_sync(&ctrl_info->event_work); in pqi_cancel_event_worker()
398 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) in pqi_read_heartbeat_counter() argument
400 if (!ctrl_info->heartbeat_counter) in pqi_read_heartbeat_counter()
403 return readl(ctrl_info->heartbeat_counter); in pqi_read_heartbeat_counter()
406 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) in pqi_read_soft_reset_status() argument
408 if (!ctrl_info->soft_reset_status) in pqi_read_soft_reset_status()
411 return readb(ctrl_info->soft_reset_status); in pqi_read_soft_reset_status()
414 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info, in pqi_clear_soft_reset_status() argument
419 if (!ctrl_info->soft_reset_status) in pqi_clear_soft_reset_status()
422 status = pqi_read_soft_reset_status(ctrl_info); in pqi_clear_soft_reset_status()
424 writeb(status, ctrl_info->soft_reset_status); in pqi_clear_soft_reset_status()
464 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, in pqi_build_raid_path_request() argument
545 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); in pqi_build_raid_path_request()
564 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], in pqi_build_raid_path_request()
577 struct pqi_ctrl_info *ctrl_info) in pqi_alloc_io_request() argument
580 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ in pqi_alloc_io_request()
583 io_request = &ctrl_info->io_request_pool[i]; in pqi_alloc_io_request()
587 i = (i + 1) % ctrl_info->max_io_slots; in pqi_alloc_io_request()
591 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; in pqi_alloc_io_request()
603 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, in pqi_send_scsi_raid_request() argument
611 rc = pqi_build_raid_path_request(ctrl_info, &request, in pqi_send_scsi_raid_request()
617 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, in pqi_send_scsi_raid_request()
620 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_send_scsi_raid_request()
627 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, in pqi_send_ctrl_raid_request() argument
630 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, in pqi_send_ctrl_raid_request()
634 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, in pqi_send_ctrl_raid_with_error() argument
638 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, in pqi_send_ctrl_raid_with_error()
642 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, in pqi_identify_controller() argument
645 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, in pqi_identify_controller()
649 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, in pqi_sense_subsystem_info() argument
652 return pqi_send_ctrl_raid_request(ctrl_info, in pqi_sense_subsystem_info()
657 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, in pqi_scsi_inquiry() argument
660 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, in pqi_scsi_inquiry()
664 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, in pqi_identify_physical_device() argument
673 rc = pqi_build_raid_path_request(ctrl_info, &request, in pqi_identify_physical_device()
683 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, in pqi_identify_physical_device()
686 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_identify_physical_device()
691 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, in pqi_flush_cache() argument
701 if (pqi_ctrl_offline(ctrl_info)) in pqi_flush_cache()
710 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, in pqi_flush_cache()
718 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, in pqi_csmi_smp_passthru() argument
722 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, in pqi_csmi_smp_passthru()
728 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) in pqi_set_diag_rescan() argument
737 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, in pqi_set_diag_rescan()
744 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, in pqi_set_diag_rescan()
753 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, in pqi_write_host_wellness() argument
756 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, in pqi_write_host_wellness()
774 struct pqi_ctrl_info *ctrl_info) in pqi_write_driver_version_to_host_wellness() argument
802 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); in pqi_write_driver_version_to_host_wellness()
823 struct pqi_ctrl_info *ctrl_info) in pqi_write_current_time_to_host_wellness() argument
865 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); in pqi_write_current_time_to_host_wellness()
877 struct pqi_ctrl_info *ctrl_info; in pqi_update_time_worker() local
879 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, in pqi_update_time_worker()
882 if (pqi_ctrl_offline(ctrl_info)) in pqi_update_time_worker()
885 rc = pqi_write_current_time_to_host_wellness(ctrl_info); in pqi_update_time_worker()
887 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_time_worker()
890 schedule_delayed_work(&ctrl_info->update_time_work, in pqi_update_time_worker()
895 struct pqi_ctrl_info *ctrl_info) in pqi_schedule_update_time_worker() argument
897 schedule_delayed_work(&ctrl_info->update_time_work, 0); in pqi_schedule_update_time_worker()
901 struct pqi_ctrl_info *ctrl_info) in pqi_cancel_update_time_worker() argument
903 cancel_delayed_work_sync(&ctrl_info->update_time_work); in pqi_cancel_update_time_worker()
906 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, in pqi_report_luns() argument
909 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, in pqi_report_luns()
913 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, in pqi_report_phys_logical_luns() argument
929 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, in pqi_report_phys_logical_luns()
950 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); in pqi_report_phys_logical_luns()
976 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, in pqi_report_phys_luns() argument
979 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, in pqi_report_phys_luns()
983 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, in pqi_report_logical_luns() argument
986 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); in pqi_report_logical_luns()
989 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, in pqi_get_device_lists() argument
1000 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); in pqi_get_device_lists()
1002 dev_err(&ctrl_info->pci_dev->dev, in pqi_get_device_lists()
1005 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); in pqi_get_device_lists()
1007 dev_err(&ctrl_info->pci_dev->dev, in pqi_get_device_lists()
1098 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, in pqi_get_raid_level() argument
1109 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_raid_level()
1122 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, in pqi_validate_raid_map() argument
1162 dev_warn(&ctrl_info->pci_dev->dev, in pqi_validate_raid_map()
1170 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, in pqi_get_raid_map() argument
1181 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, in pqi_get_raid_map()
1198 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, in pqi_get_raid_map()
1206 dev_warn(&ctrl_info->pci_dev->dev, in pqi_get_raid_map()
1214 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); in pqi_get_raid_map()
1228 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, in pqi_get_raid_bypass_status() argument
1239 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_raid_bypass_status()
1253 pqi_get_raid_map(ctrl_info, device) == 0) in pqi_get_raid_bypass_status()
1264 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, in pqi_get_volume_status() argument
1278 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_volume_status()
1302 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, in pqi_get_physical_device_info() argument
1310 rc = pqi_identify_physical_device(ctrl_info, device, in pqi_get_physical_device_info()
1341 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, in pqi_get_logical_device_info() argument
1352 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); in pqi_get_logical_device_info()
1369 pqi_get_raid_level(ctrl_info, device); in pqi_get_logical_device_info()
1370 pqi_get_raid_bypass_status(ctrl_info, device); in pqi_get_logical_device_info()
1371 pqi_get_volume_status(ctrl_info, device); in pqi_get_logical_device_info()
1381 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, in pqi_get_device_info() argument
1391 rc = pqi_get_logical_device_info(ctrl_info, device); in pqi_get_device_info()
1393 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); in pqi_get_device_info()
1398 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, in pqi_show_volume_status() argument
1495 dev_info(&ctrl_info->pci_dev->dev, in pqi_show_volume_status()
1497 ctrl_info->scsi_host->host_no, in pqi_show_volume_status()
1503 struct pqi_ctrl_info *ctrl_info; in pqi_rescan_worker() local
1505 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, in pqi_rescan_worker()
1508 pqi_scan_scsi_devices(ctrl_info); in pqi_rescan_worker()
1511 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, in pqi_add_device() argument
1517 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, in pqi_add_device()
1520 rc = pqi_add_sas_device(ctrl_info->sas_host, device); in pqi_add_device()
1527 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, in pqi_remove_device() argument
1534 rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS); in pqi_remove_device()
1536 dev_err(&ctrl_info->pci_dev->dev, in pqi_remove_device()
1538 ctrl_info->scsi_host->host_no, device->bus, in pqi_remove_device()
1550 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, in pqi_find_scsi_dev() argument
1555 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_find_scsi_dev()
1581 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, in pqi_scsi_find_entry() argument
1586 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_scsi_find_entry()
1611 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, in pqi_dev_info() argument
1618 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); in pqi_dev_info()
1667 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); in pqi_dev_info()
1743 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, in pqi_fixup_botched_add() argument
1748 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_fixup_botched_add()
1750 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_fixup_botched_add()
1764 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, in pqi_update_device_list() argument
1784 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
1787 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_update_device_list()
1793 find_result = pqi_scsi_find_entry(ctrl_info, device, in pqi_update_device_list()
1824 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, in pqi_update_device_list()
1840 &ctrl_info->scsi_device_list); in pqi_update_device_list()
1846 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
1848 if (pqi_ctrl_in_ofa(ctrl_info)) in pqi_update_device_list()
1849 pqi_ctrl_ofa_done(ctrl_info); in pqi_update_device_list()
1854 pqi_dev_info(ctrl_info, "offline", device); in pqi_update_device_list()
1855 pqi_show_volume_status(ctrl_info, device); in pqi_update_device_list()
1859 pqi_remove_device(ctrl_info, device); in pqi_update_device_list()
1862 pqi_dev_info(ctrl_info, "removed", device); in pqi_update_device_list()
1871 list_for_each_entry(device, &ctrl_info->scsi_device_list, in pqi_update_device_list()
1890 rc = pqi_add_device(ctrl_info, device); in pqi_update_device_list()
1892 pqi_dev_info(ctrl_info, "added", device); in pqi_update_device_list()
1894 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_device_list()
1896 ctrl_info->scsi_host->host_no, in pqi_update_device_list()
1899 pqi_fixup_botched_add(ctrl_info, device); in pqi_update_device_list()
1952 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) in pqi_update_scsi_devices() argument
1976 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); in pqi_update_scsi_devices()
2003 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2030 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); in pqi_update_scsi_devices()
2038 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2093 rc = pqi_get_device_info(ctrl_info, device, id_phys); in pqi_update_scsi_devices()
2095 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2101 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_scsi_devices()
2106 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_scsi_devices()
2136 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); in pqi_update_scsi_devices()
2155 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) in pqi_scan_scsi_devices() argument
2159 if (pqi_ctrl_offline(ctrl_info)) in pqi_scan_scsi_devices()
2162 if (!mutex_trylock(&ctrl_info->scan_mutex)) { in pqi_scan_scsi_devices()
2163 pqi_schedule_rescan_worker_delayed(ctrl_info); in pqi_scan_scsi_devices()
2166 rc = pqi_update_scsi_devices(ctrl_info); in pqi_scan_scsi_devices()
2168 pqi_schedule_rescan_worker_delayed(ctrl_info); in pqi_scan_scsi_devices()
2169 mutex_unlock(&ctrl_info->scan_mutex); in pqi_scan_scsi_devices()
2177 struct pqi_ctrl_info *ctrl_info; in pqi_scan_start() local
2179 ctrl_info = shost_to_hba(shost); in pqi_scan_start()
2180 if (pqi_ctrl_in_ofa(ctrl_info)) in pqi_scan_start()
2183 pqi_scan_scsi_devices(ctrl_info); in pqi_scan_start()
2191 struct pqi_ctrl_info *ctrl_info; in pqi_scan_finished() local
2193 ctrl_info = shost_priv(shost); in pqi_scan_finished()
2195 return !mutex_is_locked(&ctrl_info->scan_mutex); in pqi_scan_finished()
2198 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) in pqi_wait_until_scan_finished() argument
2200 mutex_lock(&ctrl_info->scan_mutex); in pqi_wait_until_scan_finished()
2201 mutex_unlock(&ctrl_info->scan_mutex); in pqi_wait_until_scan_finished()
2204 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) in pqi_wait_until_lun_reset_finished() argument
2206 mutex_lock(&ctrl_info->lun_reset_mutex); in pqi_wait_until_lun_reset_finished()
2207 mutex_unlock(&ctrl_info->lun_reset_mutex); in pqi_wait_until_lun_reset_finished()
2210 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) in pqi_wait_until_ofa_finished() argument
2212 mutex_lock(&ctrl_info->ofa_mutex); in pqi_wait_until_ofa_finished()
2213 mutex_unlock(&ctrl_info->ofa_mutex); in pqi_wait_until_ofa_finished()
2243 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, in pqi_raid_bypass_submit_scsi_cmd() argument
2565 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle, in pqi_raid_bypass_submit_scsi_cmd()
2583 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) in pqi_wait_for_pqi_mode_ready() argument
2590 pqi_registers = ctrl_info->pqi_registers; in pqi_wait_for_pqi_mode_ready()
2599 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
2611 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
2623 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
2644 struct pqi_ctrl_info *ctrl_info; in pqi_take_device_offline() local
2652 ctrl_info = shost_to_hba(sdev->host); in pqi_take_device_offline()
2653 pqi_schedule_rescan_worker(ctrl_info); in pqi_take_device_offline()
2654 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", in pqi_take_device_offline()
2655 path, ctrl_info->scsi_host->host_no, device->bus, in pqi_take_device_offline()
2730 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); in pqi_process_raid_io_error() local
2737 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); in pqi_process_raid_io_error()
2745 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); in pqi_process_raid_io_error()
2884 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info) in pqi_invalid_response() argument
2886 pqi_take_ctrl_offline(ctrl_info); in pqi_invalid_response()
2889 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) in pqi_process_io_intr() argument
2903 if (oq_pi >= ctrl_info->num_elements_per_oq) { in pqi_process_io_intr()
2904 pqi_invalid_response(ctrl_info); in pqi_process_io_intr()
2905 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
2907 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); in pqi_process_io_intr()
2918 if (request_id >= ctrl_info->max_io_slots) { in pqi_process_io_intr()
2919 pqi_invalid_response(ctrl_info); in pqi_process_io_intr()
2920 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
2922 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); in pqi_process_io_intr()
2926 io_request = &ctrl_info->io_request_pool[request_id]; in pqi_process_io_intr()
2928 pqi_invalid_response(ctrl_info); in pqi_process_io_intr()
2929 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
2960 io_request->error_info = ctrl_info->error_buffer + in pqi_process_io_intr()
2966 pqi_invalid_response(ctrl_info); in pqi_process_io_intr()
2967 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
2979 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; in pqi_process_io_intr()
3003 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, in pqi_send_event_ack() argument
3012 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; in pqi_send_event_ack()
3022 ctrl_info->num_elements_per_iq)) in pqi_send_event_ack()
3028 if (pqi_ctrl_offline(ctrl_info)) in pqi_send_event_ack()
3037 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; in pqi_send_event_ack()
3049 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, in pqi_acknowledge_event() argument
3063 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); in pqi_acknowledge_event()
3070 struct pqi_ctrl_info *ctrl_info) in pqi_poll_for_soft_reset_status() argument
3078 status = pqi_read_soft_reset_status(ctrl_info); in pqi_poll_for_soft_reset_status()
3086 dev_err(&ctrl_info->pci_dev->dev, in pqi_poll_for_soft_reset_status()
3091 if (!sis_is_firmware_running(ctrl_info)) in pqi_poll_for_soft_reset_status()
3098 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info, in pqi_process_soft_reset() argument
3106 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3107 "resetting controller %u\n", ctrl_info->ctrl_id); in pqi_process_soft_reset()
3108 sis_soft_reset(ctrl_info); in pqi_process_soft_reset()
3111 rc = pqi_ofa_ctrl_restart(ctrl_info); in pqi_process_soft_reset()
3112 pqi_ofa_free_host_buffer(ctrl_info); in pqi_process_soft_reset()
3113 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3115 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED"); in pqi_process_soft_reset()
3118 pqi_ofa_ctrl_unquiesce(ctrl_info); in pqi_process_soft_reset()
3119 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3121 ctrl_info->ctrl_id, "ABORTED"); in pqi_process_soft_reset()
3124 pqi_ofa_free_host_buffer(ctrl_info); in pqi_process_soft_reset()
3125 pqi_take_ctrl_offline(ctrl_info); in pqi_process_soft_reset()
3130 static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, in pqi_ofa_process_event() argument
3138 mutex_lock(&ctrl_info->ofa_mutex); in pqi_ofa_process_event()
3141 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3143 ctrl_info->ctrl_id); in pqi_ofa_process_event()
3144 pqi_ofa_ctrl_quiesce(ctrl_info); in pqi_ofa_process_event()
3145 pqi_acknowledge_event(ctrl_info, event); in pqi_ofa_process_event()
3146 if (ctrl_info->soft_reset_handshake_supported) { in pqi_ofa_process_event()
3147 status = pqi_poll_for_soft_reset_status(ctrl_info); in pqi_ofa_process_event()
3148 pqi_process_soft_reset(ctrl_info, status); in pqi_ofa_process_event()
3150 pqi_process_soft_reset(ctrl_info, in pqi_ofa_process_event()
3155 pqi_acknowledge_event(ctrl_info, event); in pqi_ofa_process_event()
3156 pqi_ofa_setup_host_buffer(ctrl_info, in pqi_ofa_process_event()
3158 pqi_ofa_host_memory_update(ctrl_info); in pqi_ofa_process_event()
3160 pqi_ofa_free_host_buffer(ctrl_info); in pqi_ofa_process_event()
3161 pqi_acknowledge_event(ctrl_info, event); in pqi_ofa_process_event()
3162 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3164 ctrl_info->ctrl_id, event->ofa_cancel_reason); in pqi_ofa_process_event()
3167 mutex_unlock(&ctrl_info->ofa_mutex); in pqi_ofa_process_event()
3173 struct pqi_ctrl_info *ctrl_info; in pqi_event_worker() local
3176 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); in pqi_event_worker()
3178 pqi_ctrl_busy(ctrl_info); in pqi_event_worker()
3179 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); in pqi_event_worker()
3180 if (pqi_ctrl_offline(ctrl_info)) in pqi_event_worker()
3183 pqi_schedule_rescan_worker_delayed(ctrl_info); in pqi_event_worker()
3185 event = ctrl_info->events; in pqi_event_worker()
3190 pqi_ctrl_unbusy(ctrl_info); in pqi_event_worker()
3191 pqi_ofa_process_event(ctrl_info, event); in pqi_event_worker()
3194 pqi_acknowledge_event(ctrl_info, event); in pqi_event_worker()
3200 pqi_ctrl_unbusy(ctrl_info); in pqi_event_worker()
3209 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, in pqi_heartbeat_timer_handler() local
3212 pqi_check_ctrl_health(ctrl_info); in pqi_heartbeat_timer_handler()
3213 if (pqi_ctrl_offline(ctrl_info)) in pqi_heartbeat_timer_handler()
3216 num_interrupts = atomic_read(&ctrl_info->num_interrupts); in pqi_heartbeat_timer_handler()
3217 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); in pqi_heartbeat_timer_handler()
3219 if (num_interrupts == ctrl_info->previous_num_interrupts) { in pqi_heartbeat_timer_handler()
3220 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { in pqi_heartbeat_timer_handler()
3221 dev_err(&ctrl_info->pci_dev->dev, in pqi_heartbeat_timer_handler()
3224 pqi_take_ctrl_offline(ctrl_info); in pqi_heartbeat_timer_handler()
3228 ctrl_info->previous_num_interrupts = num_interrupts; in pqi_heartbeat_timer_handler()
3231 ctrl_info->previous_heartbeat_count = heartbeat_count; in pqi_heartbeat_timer_handler()
3232 mod_timer(&ctrl_info->heartbeat_timer, in pqi_heartbeat_timer_handler()
3236 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) in pqi_start_heartbeat_timer() argument
3238 if (!ctrl_info->heartbeat_counter) in pqi_start_heartbeat_timer()
3241 ctrl_info->previous_num_interrupts = in pqi_start_heartbeat_timer()
3242 atomic_read(&ctrl_info->num_interrupts); in pqi_start_heartbeat_timer()
3243 ctrl_info->previous_heartbeat_count = in pqi_start_heartbeat_timer()
3244 pqi_read_heartbeat_counter(ctrl_info); in pqi_start_heartbeat_timer()
3246 ctrl_info->heartbeat_timer.expires = in pqi_start_heartbeat_timer()
3248 add_timer(&ctrl_info->heartbeat_timer); in pqi_start_heartbeat_timer()
3251 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) in pqi_stop_heartbeat_timer() argument
3253 del_timer_sync(&ctrl_info->heartbeat_timer); in pqi_stop_heartbeat_timer()
3290 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) in pqi_process_event_intr() argument
3300 event_queue = &ctrl_info->event_queue; in pqi_process_event_intr()
3307 pqi_invalid_response(ctrl_info); in pqi_process_event_intr()
3308 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_event_intr()
3324 event = &ctrl_info->events[event_index]; in pqi_process_event_intr()
3339 schedule_work(&ctrl_info->event_work); in pqi_process_event_intr()
3347 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, in pqi_configure_legacy_intx() argument
3354 pqi_registers = ctrl_info->pqi_registers; in pqi_configure_legacy_intx()
3366 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, in pqi_change_irq_mode() argument
3369 switch (ctrl_info->irq_mode) { in pqi_change_irq_mode()
3375 pqi_configure_legacy_intx(ctrl_info, true); in pqi_change_irq_mode()
3376 sis_enable_intx(ctrl_info); in pqi_change_irq_mode()
3385 pqi_configure_legacy_intx(ctrl_info, false); in pqi_change_irq_mode()
3386 sis_enable_msix(ctrl_info); in pqi_change_irq_mode()
3391 pqi_configure_legacy_intx(ctrl_info, false); in pqi_change_irq_mode()
3398 sis_enable_msix(ctrl_info); in pqi_change_irq_mode()
3401 pqi_configure_legacy_intx(ctrl_info, true); in pqi_change_irq_mode()
3402 sis_enable_intx(ctrl_info); in pqi_change_irq_mode()
3410 ctrl_info->irq_mode = new_mode; in pqi_change_irq_mode()
3415 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) in pqi_is_valid_irq() argument
3420 switch (ctrl_info->irq_mode) { in pqi_is_valid_irq()
3426 readl(&ctrl_info->pqi_registers->legacy_intx_status); in pqi_is_valid_irq()
3443 struct pqi_ctrl_info *ctrl_info; in pqi_irq_handler() local
3449 ctrl_info = queue_group->ctrl_info; in pqi_irq_handler()
3451 if (!pqi_is_valid_irq(ctrl_info)) in pqi_irq_handler()
3454 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); in pqi_irq_handler()
3458 if (irq == ctrl_info->event_irq) { in pqi_irq_handler()
3459 num_events_handled = pqi_process_event_intr(ctrl_info); in pqi_irq_handler()
3467 atomic_inc(&ctrl_info->num_interrupts); in pqi_irq_handler()
3469 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); in pqi_irq_handler()
3470 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); in pqi_irq_handler()
3476 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) in pqi_request_irqs() argument
3478 struct pci_dev *pci_dev = ctrl_info->pci_dev; in pqi_request_irqs()
3482 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); in pqi_request_irqs()
3484 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { in pqi_request_irqs()
3486 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); in pqi_request_irqs()
3493 ctrl_info->num_msix_vectors_initialized++; in pqi_request_irqs()
3499 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) in pqi_free_irqs() argument
3503 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) in pqi_free_irqs()
3504 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), in pqi_free_irqs()
3505 &ctrl_info->queue_groups[i]); in pqi_free_irqs()
3507 ctrl_info->num_msix_vectors_initialized = 0; in pqi_free_irqs()
3510 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) in pqi_enable_msix_interrupts() argument
3514 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, in pqi_enable_msix_interrupts()
3515 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, in pqi_enable_msix_interrupts()
3518 dev_err(&ctrl_info->pci_dev->dev, in pqi_enable_msix_interrupts()
3524 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; in pqi_enable_msix_interrupts()
3525 ctrl_info->irq_mode = IRQ_MODE_MSIX; in pqi_enable_msix_interrupts()
3529 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) in pqi_disable_msix_interrupts() argument
3531 if (ctrl_info->num_msix_vectors_enabled) { in pqi_disable_msix_interrupts()
3532 pci_free_irq_vectors(ctrl_info->pci_dev); in pqi_disable_msix_interrupts()
3533 ctrl_info->num_msix_vectors_enabled = 0; in pqi_disable_msix_interrupts()
3537 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_operational_queues() argument
3553 ctrl_info->num_elements_per_iq; in pqi_alloc_operational_queues()
3556 ctrl_info->num_elements_per_oq; in pqi_alloc_operational_queues()
3557 num_inbound_queues = ctrl_info->num_queue_groups * 2; in pqi_alloc_operational_queues()
3558 num_outbound_queues = ctrl_info->num_queue_groups; in pqi_alloc_operational_queues()
3559 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; in pqi_alloc_operational_queues()
3591 ctrl_info->queue_memory_base = in pqi_alloc_operational_queues()
3592 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, in pqi_alloc_operational_queues()
3593 &ctrl_info->queue_memory_base_dma_handle, in pqi_alloc_operational_queues()
3596 if (!ctrl_info->queue_memory_base) in pqi_alloc_operational_queues()
3599 ctrl_info->queue_memory_length = alloc_length; in pqi_alloc_operational_queues()
3601 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, in pqi_alloc_operational_queues()
3604 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
3605 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
3608 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3609 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3615 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3616 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3622 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
3623 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
3626 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3627 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3633 ctrl_info->event_queue.oq_element_array = element_array; in pqi_alloc_operational_queues()
3634 ctrl_info->event_queue.oq_element_array_bus_addr = in pqi_alloc_operational_queues()
3635 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3636 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3643 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
3644 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
3647 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3649 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3655 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3657 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3663 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3665 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3671 ctrl_info->event_queue.oq_pi = next_queue_index; in pqi_alloc_operational_queues()
3672 ctrl_info->event_queue.oq_pi_bus_addr = in pqi_alloc_operational_queues()
3673 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3675 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3680 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) in pqi_init_operational_queues() argument
3690 for (i = 0; i < ctrl_info->num_queue_groups; i++) in pqi_init_operational_queues()
3691 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; in pqi_init_operational_queues()
3698 ctrl_info->event_queue.oq_id = next_oq_id++; in pqi_init_operational_queues()
3699 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_init_operational_queues()
3700 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; in pqi_init_operational_queues()
3701 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; in pqi_init_operational_queues()
3702 ctrl_info->queue_groups[i].oq_id = next_oq_id++; in pqi_init_operational_queues()
3709 ctrl_info->event_queue.int_msg_num = 0; in pqi_init_operational_queues()
3710 for (i = 0; i < ctrl_info->num_queue_groups; i++) in pqi_init_operational_queues()
3711 ctrl_info->queue_groups[i].int_msg_num = i; in pqi_init_operational_queues()
3713 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_init_operational_queues()
3714 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); in pqi_init_operational_queues()
3715 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); in pqi_init_operational_queues()
3716 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); in pqi_init_operational_queues()
3717 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); in pqi_init_operational_queues()
3721 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_admin_queues() argument
3730 ctrl_info->admin_queue_memory_base = in pqi_alloc_admin_queues()
3731 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, in pqi_alloc_admin_queues()
3732 &ctrl_info->admin_queue_memory_base_dma_handle, in pqi_alloc_admin_queues()
3735 if (!ctrl_info->admin_queue_memory_base) in pqi_alloc_admin_queues()
3738 ctrl_info->admin_queue_memory_length = alloc_length; in pqi_alloc_admin_queues()
3740 admin_queues = &ctrl_info->admin_queues; in pqi_alloc_admin_queues()
3741 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, in pqi_alloc_admin_queues()
3752 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
3754 ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
3756 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
3758 ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
3760 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
3762 ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
3764 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
3766 (void __iomem *)ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
3774 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) in pqi_create_admin_queues() argument
3782 pqi_registers = ctrl_info->pqi_registers; in pqi_create_admin_queues()
3783 admin_queues = &ctrl_info->admin_queues; in pqi_create_admin_queues()
3816 admin_queues->iq_pi = ctrl_info->iomem_base + in pqi_create_admin_queues()
3819 admin_queues->oq_ci = ctrl_info->iomem_base + in pqi_create_admin_queues()
3826 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, in pqi_submit_admin_request() argument
3833 admin_queues = &ctrl_info->admin_queues; in pqi_submit_admin_request()
3853 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, in pqi_poll_for_admin_response() argument
3861 admin_queues = &ctrl_info->admin_queues; in pqi_poll_for_admin_response()
3871 dev_err(&ctrl_info->pci_dev->dev, in pqi_poll_for_admin_response()
3875 if (!sis_is_firmware_running(ctrl_info)) in pqi_poll_for_admin_response()
3890 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, in pqi_start_io() argument
3929 ctrl_info->num_elements_per_iq)) in pqi_start_io()
3939 ctrl_info->num_elements_per_iq - iq_pi; in pqi_start_io()
3953 ctrl_info->num_elements_per_iq; in pqi_start_io()
3972 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, in pqi_wait_for_completion_io() argument
3984 pqi_check_ctrl_health(ctrl_info); in pqi_wait_for_completion_io()
3985 if (pqi_ctrl_offline(ctrl_info)) { in pqi_wait_for_completion_io()
4025 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, in pqi_submit_raid_request_synchronous() argument
4042 if (down_interruptible(&ctrl_info->sync_request_sem)) in pqi_submit_raid_request_synchronous()
4046 down(&ctrl_info->sync_request_sem); in pqi_submit_raid_request_synchronous()
4049 if (down_timeout(&ctrl_info->sync_request_sem, in pqi_submit_raid_request_synchronous()
4062 pqi_ctrl_busy(ctrl_info); in pqi_submit_raid_request_synchronous()
4063 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); in pqi_submit_raid_request_synchronous()
4065 pqi_ctrl_unbusy(ctrl_info); in pqi_submit_raid_request_synchronous()
4070 if (pqi_ctrl_offline(ctrl_info)) { in pqi_submit_raid_request_synchronous()
4071 pqi_ctrl_unbusy(ctrl_info); in pqi_submit_raid_request_synchronous()
4076 atomic_inc(&ctrl_info->sync_cmds_outstanding); in pqi_submit_raid_request_synchronous()
4078 io_request = pqi_alloc_io_request(ctrl_info); in pqi_submit_raid_request_synchronous()
4094 pqi_start_io(ctrl_info, in pqi_submit_raid_request_synchronous()
4095 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, in pqi_submit_raid_request_synchronous()
4098 pqi_ctrl_unbusy(ctrl_info); in pqi_submit_raid_request_synchronous()
4101 pqi_wait_for_completion_io(ctrl_info, &wait); in pqi_submit_raid_request_synchronous()
4105 dev_warn(&ctrl_info->pci_dev->dev, in pqi_submit_raid_request_synchronous()
4124 atomic_dec(&ctrl_info->sync_cmds_outstanding); in pqi_submit_raid_request_synchronous()
4126 up(&ctrl_info->sync_request_sem); in pqi_submit_raid_request_synchronous()
4151 struct pqi_ctrl_info *ctrl_info, in pqi_submit_admin_request_synchronous() argument
4157 pqi_submit_admin_request(ctrl_info, request); in pqi_submit_admin_request_synchronous()
4159 rc = pqi_poll_for_admin_response(ctrl_info, response); in pqi_submit_admin_request_synchronous()
4168 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) in pqi_report_device_capability() argument
4190 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_report_device_capability()
4197 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_report_device_capability()
4200 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_report_device_capability()
4212 ctrl_info->max_inbound_queues = in pqi_report_device_capability()
4214 ctrl_info->max_elements_per_iq = in pqi_report_device_capability()
4216 ctrl_info->max_iq_element_length = in pqi_report_device_capability()
4219 ctrl_info->max_outbound_queues = in pqi_report_device_capability()
4221 ctrl_info->max_elements_per_oq = in pqi_report_device_capability()
4223 ctrl_info->max_oq_element_length = in pqi_report_device_capability()
4230 ctrl_info->max_inbound_iu_length_per_firmware = in pqi_report_device_capability()
4233 ctrl_info->inbound_spanning_supported = in pqi_report_device_capability()
4235 ctrl_info->outbound_spanning_supported = in pqi_report_device_capability()
4244 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) in pqi_validate_device_capability() argument
4246 if (ctrl_info->max_iq_element_length < in pqi_validate_device_capability()
4248 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4250 ctrl_info->max_iq_element_length, in pqi_validate_device_capability()
4255 if (ctrl_info->max_oq_element_length < in pqi_validate_device_capability()
4257 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4259 ctrl_info->max_oq_element_length, in pqi_validate_device_capability()
4264 if (ctrl_info->max_inbound_iu_length_per_firmware < in pqi_validate_device_capability()
4266 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4268 ctrl_info->max_inbound_iu_length_per_firmware, in pqi_validate_device_capability()
4273 if (!ctrl_info->inbound_spanning_supported) { in pqi_validate_device_capability()
4274 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4279 if (ctrl_info->outbound_spanning_supported) { in pqi_validate_device_capability()
4280 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4288 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) in pqi_create_event_queue() argument
4295 event_queue = &ctrl_info->event_queue; in pqi_create_event_queue()
4320 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_event_queue()
4325 event_queue->oq_ci = ctrl_info->iomem_base + in pqi_create_event_queue()
4333 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, in pqi_create_queue_group() argument
4341 queue_group = &ctrl_info->queue_groups[group_number]; in pqi_create_queue_group()
4359 put_unaligned_le16(ctrl_info->num_elements_per_iq, in pqi_create_queue_group()
4365 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4368 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4373 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4394 put_unaligned_le16(ctrl_info->num_elements_per_iq, in pqi_create_queue_group()
4400 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4403 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4408 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4428 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4431 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4450 put_unaligned_le16(ctrl_info->num_elements_per_oq, in pqi_create_queue_group()
4458 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4461 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4466 queue_group->oq_ci = ctrl_info->iomem_base + in pqi_create_queue_group()
4474 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) in pqi_create_queues() argument
4479 rc = pqi_create_event_queue(ctrl_info); in pqi_create_queues()
4481 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queues()
4486 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_create_queues()
4487 rc = pqi_create_queue_group(ctrl_info, i); in pqi_create_queues()
4489 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queues()
4491 i, ctrl_info->num_queue_groups); in pqi_create_queues()
4503 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, in pqi_configure_events() argument
4526 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_configure_events()
4533 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, in pqi_configure_events()
4536 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_configure_events()
4547 put_unaligned_le16(ctrl_info->event_queue.oq_id, in pqi_configure_events()
4562 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_configure_events()
4569 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, in pqi_configure_events()
4572 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_configure_events()
4582 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) in pqi_enable_events() argument
4584 return pqi_configure_events(ctrl_info, true); in pqi_enable_events()
4587 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) in pqi_disable_events() argument
4589 return pqi_configure_events(ctrl_info, false); in pqi_disable_events()
4592 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) in pqi_free_all_io_requests() argument
4599 if (!ctrl_info->io_request_pool) in pqi_free_all_io_requests()
4602 dev = &ctrl_info->pci_dev->dev; in pqi_free_all_io_requests()
4603 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; in pqi_free_all_io_requests()
4604 io_request = ctrl_info->io_request_pool; in pqi_free_all_io_requests()
4606 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_free_all_io_requests()
4616 kfree(ctrl_info->io_request_pool); in pqi_free_all_io_requests()
4617 ctrl_info->io_request_pool = NULL; in pqi_free_all_io_requests()
4620 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_error_buffer() argument
4623 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, in pqi_alloc_error_buffer()
4624 ctrl_info->error_buffer_length, in pqi_alloc_error_buffer()
4625 &ctrl_info->error_buffer_dma_handle, in pqi_alloc_error_buffer()
4627 if (!ctrl_info->error_buffer) in pqi_alloc_error_buffer()
4633 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_io_resources() argument
4642 ctrl_info->io_request_pool = in pqi_alloc_io_resources()
4643 kcalloc(ctrl_info->max_io_slots, in pqi_alloc_io_resources()
4644 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); in pqi_alloc_io_resources()
4646 if (!ctrl_info->io_request_pool) { in pqi_alloc_io_resources()
4647 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
4652 dev = &ctrl_info->pci_dev->dev; in pqi_alloc_io_resources()
4653 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; in pqi_alloc_io_resources()
4654 io_request = ctrl_info->io_request_pool; in pqi_alloc_io_resources()
4656 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_alloc_io_resources()
4658 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); in pqi_alloc_io_resources()
4661 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
4671 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
4686 pqi_free_all_io_requests(ctrl_info); in pqi_alloc_io_resources()
4696 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) in pqi_calculate_io_resources() argument
4701 ctrl_info->scsi_ml_can_queue = in pqi_calculate_io_resources()
4702 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; in pqi_calculate_io_resources()
4703 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; in pqi_calculate_io_resources()
4705 ctrl_info->error_buffer_length = in pqi_calculate_io_resources()
4706 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; in pqi_calculate_io_resources()
4709 max_transfer_size = min(ctrl_info->max_transfer_size, in pqi_calculate_io_resources()
4712 max_transfer_size = min(ctrl_info->max_transfer_size, in pqi_calculate_io_resources()
4720 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); in pqi_calculate_io_resources()
4724 ctrl_info->sg_chain_buffer_length = in pqi_calculate_io_resources()
4727 ctrl_info->sg_tablesize = max_sg_entries; in pqi_calculate_io_resources()
4728 ctrl_info->max_sectors = max_transfer_size / 512; in pqi_calculate_io_resources()
4731 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) in pqi_calculate_queue_resources() argument
4743 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, in pqi_calculate_queue_resources()
4744 ctrl_info->max_outbound_queues - 1); in pqi_calculate_queue_resources()
4748 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); in pqi_calculate_queue_resources()
4752 ctrl_info->num_queue_groups = num_queue_groups; in pqi_calculate_queue_resources()
4753 ctrl_info->max_hw_queue_index = num_queue_groups - 1; in pqi_calculate_queue_resources()
4759 ctrl_info->max_inbound_iu_length = in pqi_calculate_queue_resources()
4760 (ctrl_info->max_inbound_iu_length_per_firmware / in pqi_calculate_queue_resources()
4765 (ctrl_info->max_inbound_iu_length / in pqi_calculate_queue_resources()
4772 ctrl_info->max_elements_per_iq); in pqi_calculate_queue_resources()
4776 ctrl_info->max_elements_per_oq); in pqi_calculate_queue_resources()
4778 ctrl_info->num_elements_per_iq = num_elements_per_iq; in pqi_calculate_queue_resources()
4779 ctrl_info->num_elements_per_oq = num_elements_per_oq; in pqi_calculate_queue_resources()
4781 ctrl_info->max_sg_per_iu = in pqi_calculate_queue_resources()
4782 ((ctrl_info->max_inbound_iu_length - in pqi_calculate_queue_resources()
4799 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_raid_sg_list() argument
4824 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; in pqi_build_raid_sg_list()
4863 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_aio_sg_list() argument
4889 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; in pqi_build_aio_sg_list()
4940 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, in pqi_raid_submit_scsi_cmd_with_io_request() argument
5012 dev_err(&ctrl_info->pci_dev->dev, in pqi_raid_submit_scsi_cmd_with_io_request()
5018 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); in pqi_raid_submit_scsi_cmd_with_io_request()
5024 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); in pqi_raid_submit_scsi_cmd_with_io_request()
5029 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, in pqi_raid_submit_scsi_cmd() argument
5035 io_request = pqi_alloc_io_request(ctrl_info); in pqi_raid_submit_scsi_cmd()
5037 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, in pqi_raid_submit_scsi_cmd()
5041 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) in pqi_schedule_bypass_retry() argument
5043 if (!pqi_ctrl_blocked(ctrl_info)) in pqi_schedule_bypass_retry()
5044 schedule_work(&ctrl_info->raid_bypass_retry_work); in pqi_schedule_bypass_retry()
5051 struct pqi_ctrl_info *ctrl_info; in pqi_raid_bypass_retry_needed() local
5066 ctrl_info = shost_to_hba(scmd->device->host); in pqi_raid_bypass_retry_needed()
5067 if (pqi_ctrl_offline(ctrl_info)) in pqi_raid_bypass_retry_needed()
5074 struct pqi_ctrl_info *ctrl_info, in pqi_add_to_raid_bypass_retry_list() argument
5079 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); in pqi_add_to_raid_bypass_retry_list()
5082 &ctrl_info->raid_bypass_retry_list); in pqi_add_to_raid_bypass_retry_list()
5085 &ctrl_info->raid_bypass_retry_list); in pqi_add_to_raid_bypass_retry_list()
5086 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); in pqi_add_to_raid_bypass_retry_list()
5102 struct pqi_ctrl_info *ctrl_info; in pqi_queue_raid_bypass_retry() local
5107 ctrl_info = shost_to_hba(scmd->device->host); in pqi_queue_raid_bypass_retry()
5109 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); in pqi_queue_raid_bypass_retry()
5110 pqi_schedule_bypass_retry(ctrl_info); in pqi_queue_raid_bypass_retry()
5117 struct pqi_ctrl_info *ctrl_info; in pqi_retry_raid_bypass() local
5129 ctrl_info = shost_to_hba(scmd->device->host); in pqi_retry_raid_bypass()
5134 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, in pqi_retry_raid_bypass()
5139 struct pqi_ctrl_info *ctrl_info) in pqi_next_queued_raid_bypass_request() argument
5144 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); in pqi_next_queued_raid_bypass_request()
5146 &ctrl_info->raid_bypass_retry_list, in pqi_next_queued_raid_bypass_request()
5150 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); in pqi_next_queued_raid_bypass_request()
5155 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) in pqi_retry_raid_bypass_requests() argument
5160 pqi_ctrl_busy(ctrl_info); in pqi_retry_raid_bypass_requests()
5163 if (pqi_ctrl_blocked(ctrl_info)) in pqi_retry_raid_bypass_requests()
5165 io_request = pqi_next_queued_raid_bypass_request(ctrl_info); in pqi_retry_raid_bypass_requests()
5170 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, in pqi_retry_raid_bypass_requests()
5172 pqi_schedule_bypass_retry(ctrl_info); in pqi_retry_raid_bypass_requests()
5177 pqi_ctrl_unbusy(ctrl_info); in pqi_retry_raid_bypass_requests()
5182 struct pqi_ctrl_info *ctrl_info; in pqi_raid_bypass_retry_worker() local
5184 ctrl_info = container_of(work, struct pqi_ctrl_info, in pqi_raid_bypass_retry_worker()
5186 pqi_retry_raid_bypass_requests(ctrl_info); in pqi_raid_bypass_retry_worker()
5190 struct pqi_ctrl_info *ctrl_info) in pqi_clear_all_queued_raid_bypass_retries() argument
5194 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); in pqi_clear_all_queued_raid_bypass_retries()
5195 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); in pqi_clear_all_queued_raid_bypass_retries()
5196 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); in pqi_clear_all_queued_raid_bypass_retries()
5216 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_scsi_cmd() argument
5220 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, in pqi_aio_submit_scsi_cmd()
5224 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_io() argument
5233 io_request = pqi_alloc_io_request(ctrl_info); in pqi_aio_submit_io()
5267 dev_err(&ctrl_info->pci_dev->dev, in pqi_aio_submit_io()
5283 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); in pqi_aio_submit_io()
5289 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); in pqi_aio_submit_io()
5294 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, in pqi_get_hw_queue() argument
5300 if (hw_queue > ctrl_info->max_hw_queue_index) in pqi_get_hw_queue()
5333 struct pqi_ctrl_info *ctrl_info; in pqi_scsi_queue_command() local
5340 ctrl_info = shost_to_hba(shost); in pqi_scsi_queue_command()
5350 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info, in pqi_scsi_queue_command()
5357 pqi_ctrl_busy(ctrl_info); in pqi_scsi_queue_command()
5358 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) || in pqi_scsi_queue_command()
5359 pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) { in pqi_scsi_queue_command()
5370 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); in pqi_scsi_queue_command()
5371 queue_group = &ctrl_info->queue_groups[hw_queue]; in pqi_scsi_queue_command()
5377 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, in pqi_scsi_queue_command()
5385 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
5388 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
5390 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
5394 pqi_ctrl_unbusy(ctrl_info); in pqi_scsi_queue_command()
5401 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, in pqi_wait_until_queued_io_drained() argument
5418 pqi_check_ctrl_health(ctrl_info); in pqi_wait_until_queued_io_drained()
5419 if (pqi_ctrl_offline(ctrl_info)) in pqi_wait_until_queued_io_drained()
5428 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) in pqi_wait_until_inbound_queues_empty() argument
5437 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_wait_until_inbound_queues_empty()
5438 queue_group = &ctrl_info->queue_groups[i]; in pqi_wait_until_inbound_queues_empty()
5440 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); in pqi_wait_until_inbound_queues_empty()
5451 pqi_check_ctrl_health(ctrl_info); in pqi_wait_until_inbound_queues_empty()
5452 if (pqi_ctrl_offline(ctrl_info)) in pqi_wait_until_inbound_queues_empty()
5462 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, in pqi_fail_io_queued_for_device() argument
5474 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_fail_io_queued_for_device()
5475 queue_group = &ctrl_info->queue_groups[i]; in pqi_fail_io_queued_for_device()
5503 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info) in pqi_fail_io_queued_for_all_devices() argument
5513 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_fail_io_queued_for_all_devices()
5514 queue_group = &ctrl_info->queue_groups[i]; in pqi_fail_io_queued_for_all_devices()
5539 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, in pqi_device_wait_for_pending_io() argument
5547 pqi_check_ctrl_health(ctrl_info); in pqi_device_wait_for_pending_io()
5548 if (pqi_ctrl_offline(ctrl_info)) in pqi_device_wait_for_pending_io()
5552 dev_err(&ctrl_info->pci_dev->dev, in pqi_device_wait_for_pending_io()
5563 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, in pqi_ctrl_wait_for_pending_io() argument
5575 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_ctrl_wait_for_pending_io()
5576 list_for_each_entry(device, &ctrl_info->scsi_device_list, in pqi_ctrl_wait_for_pending_io()
5583 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, in pqi_ctrl_wait_for_pending_io()
5589 pqi_check_ctrl_health(ctrl_info); in pqi_ctrl_wait_for_pending_io()
5590 if (pqi_ctrl_offline(ctrl_info)) in pqi_ctrl_wait_for_pending_io()
5595 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_wait_for_pending_io()
5606 static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_wait_for_pending_sync_cmds() argument
5608 while (atomic_read(&ctrl_info->sync_cmds_outstanding)) { in pqi_ctrl_wait_for_pending_sync_cmds()
5609 pqi_check_ctrl_health(ctrl_info); in pqi_ctrl_wait_for_pending_sync_cmds()
5610 if (pqi_ctrl_offline(ctrl_info)) in pqi_ctrl_wait_for_pending_sync_cmds()
5629 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, in pqi_wait_for_lun_reset_completion() argument
5641 pqi_check_ctrl_health(ctrl_info); in pqi_wait_for_lun_reset_completion()
5642 if (pqi_ctrl_offline(ctrl_info)) { in pqi_wait_for_lun_reset_completion()
5651 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, in pqi_lun_reset() argument
5659 io_request = pqi_alloc_io_request(ctrl_info); in pqi_lun_reset()
5673 if (ctrl_info->tmf_iu_timeout_supported) in pqi_lun_reset()
5677 pqi_start_io(ctrl_info, in pqi_lun_reset()
5678 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, in pqi_lun_reset()
5681 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); in pqi_lun_reset()
5696 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info, in _pqi_device_reset() argument
5704 rc = pqi_lun_reset(ctrl_info, device); in _pqi_device_reset()
5712 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs); in _pqi_device_reset()
5717 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, in pqi_device_reset() argument
5722 mutex_lock(&ctrl_info->lun_reset_mutex); in pqi_device_reset()
5724 pqi_ctrl_block_requests(ctrl_info); in pqi_device_reset()
5725 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_device_reset()
5726 pqi_fail_io_queued_for_device(ctrl_info, device); in pqi_device_reset()
5727 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); in pqi_device_reset()
5729 pqi_ctrl_unblock_requests(ctrl_info); in pqi_device_reset()
5734 rc = _pqi_device_reset(ctrl_info, device); in pqi_device_reset()
5738 mutex_unlock(&ctrl_info->lun_reset_mutex); in pqi_device_reset()
5747 struct pqi_ctrl_info *ctrl_info; in pqi_eh_device_reset_handler() local
5751 ctrl_info = shost_to_hba(shost); in pqi_eh_device_reset_handler()
5754 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_device_reset_handler()
5758 pqi_check_ctrl_health(ctrl_info); in pqi_eh_device_reset_handler()
5759 if (pqi_ctrl_offline(ctrl_info) || in pqi_eh_device_reset_handler()
5760 pqi_device_reset_blocked(ctrl_info)) { in pqi_eh_device_reset_handler()
5765 pqi_wait_until_ofa_finished(ctrl_info); in pqi_eh_device_reset_handler()
5767 atomic_inc(&ctrl_info->sync_cmds_outstanding); in pqi_eh_device_reset_handler()
5768 rc = pqi_device_reset(ctrl_info, device); in pqi_eh_device_reset_handler()
5769 atomic_dec(&ctrl_info->sync_cmds_outstanding); in pqi_eh_device_reset_handler()
5772 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_device_reset_handler()
5784 struct pqi_ctrl_info *ctrl_info; in pqi_slave_alloc() local
5788 ctrl_info = shost_to_hba(sdev->host); in pqi_slave_alloc()
5790 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_alloc()
5795 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); in pqi_slave_alloc()
5802 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), in pqi_slave_alloc()
5820 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_alloc()
5827 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_map_queues() local
5830 ctrl_info->pci_dev, 0); in pqi_map_queues()
5847 struct pqi_ctrl_info *ctrl_info; in pqi_slave_destroy() local
5849 ctrl_info = shost_to_hba(sdev->host); in pqi_slave_destroy()
5851 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_destroy()
5860 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_destroy()
5863 pqi_dev_info(ctrl_info, "removed", device); in pqi_slave_destroy()
5868 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) in pqi_getpciinfo_ioctl() argument
5878 pci_dev = ctrl_info->pci_dev; in pqi_getpciinfo_ioctl()
5978 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) in pqi_passthru_ioctl() argument
5989 if (pqi_ctrl_offline(ctrl_info)) in pqi_passthru_ioctl()
6060 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_passthru_ioctl()
6071 if (ctrl_info->raid_iu_timeout_supported) in pqi_passthru_ioctl()
6074 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, in pqi_passthru_ioctl()
6078 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, in pqi_passthru_ioctl()
6123 struct pqi_ctrl_info *ctrl_info; in pqi_ioctl() local
6125 ctrl_info = shost_to_hba(sdev->host); in pqi_ioctl()
6127 if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) in pqi_ioctl()
6134 rc = pqi_scan_scsi_devices(ctrl_info); in pqi_ioctl()
6137 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); in pqi_ioctl()
6143 rc = pqi_passthru_ioctl(ctrl_info, arg); in pqi_ioctl()
6157 struct pqi_ctrl_info *ctrl_info; in pqi_firmware_version_show() local
6160 ctrl_info = shost_to_hba(shost); in pqi_firmware_version_show()
6162 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); in pqi_firmware_version_show()
6176 struct pqi_ctrl_info *ctrl_info; in pqi_serial_number_show() local
6179 ctrl_info = shost_to_hba(shost); in pqi_serial_number_show()
6181 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); in pqi_serial_number_show()
6188 struct pqi_ctrl_info *ctrl_info; in pqi_model_show() local
6191 ctrl_info = shost_to_hba(shost); in pqi_model_show()
6193 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); in pqi_model_show()
6200 struct pqi_ctrl_info *ctrl_info; in pqi_vendor_show() local
6203 ctrl_info = shost_to_hba(shost); in pqi_vendor_show()
6205 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); in pqi_vendor_show()
6281 struct pqi_ctrl_info *ctrl_info; in pqi_unique_id_show() local
6288 ctrl_info = shost_to_hba(sdev->host); in pqi_unique_id_show()
6290 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
6294 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
6305 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
6318 struct pqi_ctrl_info *ctrl_info; in pqi_lunid_show() local
6325 ctrl_info = shost_to_hba(sdev->host); in pqi_lunid_show()
6327 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
6331 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
6337 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
6347 struct pqi_ctrl_info *ctrl_info; in pqi_path_info_show() local
6360 ctrl_info = shost_to_hba(sdev->host); in pqi_path_info_show()
6362 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
6366 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
6383 ctrl_info->scsi_host->host_no, in pqi_path_info_show()
6422 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
6430 struct pqi_ctrl_info *ctrl_info; in pqi_sas_address_show() local
6437 ctrl_info = shost_to_hba(sdev->host); in pqi_sas_address_show()
6439 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
6443 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
6449 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
6457 struct pqi_ctrl_info *ctrl_info; in pqi_ssd_smart_path_enabled_show() local
6463 ctrl_info = shost_to_hba(sdev->host); in pqi_ssd_smart_path_enabled_show()
6465 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
6469 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
6477 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
6485 struct pqi_ctrl_info *ctrl_info; in pqi_raid_level_show() local
6492 ctrl_info = shost_to_hba(sdev->host); in pqi_raid_level_show()
6494 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
6498 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
6507 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
6515 struct pqi_ctrl_info *ctrl_info; in pqi_raid_bypass_cnt_show() local
6522 ctrl_info = shost_to_hba(sdev->host); in pqi_raid_bypass_cnt_show()
6524 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
6528 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
6534 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
6576 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) in pqi_register_scsi() argument
6581 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); in pqi_register_scsi()
6583 dev_err(&ctrl_info->pci_dev->dev, in pqi_register_scsi()
6585 ctrl_info->ctrl_id); in pqi_register_scsi()
6596 shost->max_sectors = ctrl_info->max_sectors; in pqi_register_scsi()
6597 shost->can_queue = ctrl_info->scsi_ml_can_queue; in pqi_register_scsi()
6599 shost->sg_tablesize = ctrl_info->sg_tablesize; in pqi_register_scsi()
6601 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); in pqi_register_scsi()
6603 shost->nr_hw_queues = ctrl_info->num_queue_groups; in pqi_register_scsi()
6604 shost->hostdata[0] = (unsigned long)ctrl_info; in pqi_register_scsi()
6606 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); in pqi_register_scsi()
6608 dev_err(&ctrl_info->pci_dev->dev, in pqi_register_scsi()
6610 ctrl_info->ctrl_id); in pqi_register_scsi()
6614 rc = pqi_add_sas_host(shost, ctrl_info); in pqi_register_scsi()
6616 dev_err(&ctrl_info->pci_dev->dev, in pqi_register_scsi()
6618 ctrl_info->ctrl_id); in pqi_register_scsi()
6622 ctrl_info->scsi_host = shost; in pqi_register_scsi()
6634 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) in pqi_unregister_scsi() argument
6638 pqi_delete_sas_host(ctrl_info); in pqi_unregister_scsi()
6640 shost = ctrl_info->scsi_host; in pqi_unregister_scsi()
6648 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) in pqi_wait_for_pqi_reset_completion() argument
6656 pqi_registers = ctrl_info->pqi_registers; in pqi_wait_for_pqi_reset_completion()
6665 pqi_check_ctrl_health(ctrl_info); in pqi_wait_for_pqi_reset_completion()
6666 if (pqi_ctrl_offline(ctrl_info)) { in pqi_wait_for_pqi_reset_completion()
6679 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) in pqi_reset() argument
6684 if (ctrl_info->pqi_reset_quiesce_supported) { in pqi_reset()
6685 rc = sis_pqi_reset_quiesce(ctrl_info); in pqi_reset()
6687 dev_err(&ctrl_info->pci_dev->dev, in pqi_reset()
6698 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); in pqi_reset()
6700 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); in pqi_reset()
6702 dev_err(&ctrl_info->pci_dev->dev, in pqi_reset()
6708 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) in pqi_get_ctrl_serial_number() argument
6717 rc = pqi_sense_subsystem_info(ctrl_info, sense_info); in pqi_get_ctrl_serial_number()
6721 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, in pqi_get_ctrl_serial_number()
6723 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; in pqi_get_ctrl_serial_number()
6731 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) in pqi_get_ctrl_product_details() argument
6740 rc = pqi_identify_controller(ctrl_info, identify); in pqi_get_ctrl_product_details()
6744 memcpy(ctrl_info->firmware_version, identify->firmware_version, in pqi_get_ctrl_product_details()
6746 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0'; in pqi_get_ctrl_product_details()
6747 snprintf(ctrl_info->firmware_version + in pqi_get_ctrl_product_details()
6748 strlen(ctrl_info->firmware_version), in pqi_get_ctrl_product_details()
6749 sizeof(ctrl_info->firmware_version), in pqi_get_ctrl_product_details()
6752 memcpy(ctrl_info->model, identify->product_id, in pqi_get_ctrl_product_details()
6754 ctrl_info->model[sizeof(identify->product_id)] = '\0'; in pqi_get_ctrl_product_details()
6756 memcpy(ctrl_info->vendor, identify->vendor_id, in pqi_get_ctrl_product_details()
6758 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; in pqi_get_ctrl_product_details()
6767 struct pqi_ctrl_info *ctrl_info; member
6820 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, in pqi_config_table_update() argument
6837 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, in pqi_config_table_update()
6841 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, in pqi_enable_firmware_features() argument
6857 return pqi_config_table_update(ctrl_info, in pqi_enable_firmware_features()
6867 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6871 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, in pqi_firmware_feature_status() argument
6875 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", in pqi_firmware_feature_status()
6881 dev_info(&ctrl_info->pci_dev->dev, in pqi_firmware_feature_status()
6886 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", in pqi_firmware_feature_status()
6890 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, in pqi_ctrl_update_feature_flags() argument
6895 ctrl_info->soft_reset_handshake_supported = in pqi_ctrl_update_feature_flags()
6899 ctrl_info->raid_iu_timeout_supported = in pqi_ctrl_update_feature_flags()
6903 ctrl_info->tmf_iu_timeout_supported = in pqi_ctrl_update_feature_flags()
6908 pqi_firmware_feature_status(ctrl_info, firmware_feature); in pqi_ctrl_update_feature_flags()
6911 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, in pqi_firmware_feature_update() argument
6915 firmware_feature->feature_status(ctrl_info, firmware_feature); in pqi_firmware_feature_update()
6952 struct pqi_ctrl_info *ctrl_info; in pqi_process_firmware_features() local
6958 ctrl_info = section_info->ctrl_info; in pqi_process_firmware_features()
6969 pqi_firmware_feature_update(ctrl_info, in pqi_process_firmware_features()
6984 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, in pqi_process_firmware_features()
6987 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_firmware_features()
6992 pqi_firmware_feature_update(ctrl_info, in pqi_process_firmware_features()
7006 pqi_firmware_feature_update(ctrl_info, in pqi_process_firmware_features()
7030 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) in pqi_process_config_table() argument
7039 table_length = ctrl_info->config_table_length; in pqi_process_config_table()
7045 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_config_table()
7054 table_iomem_addr = ctrl_info->iomem_base + in pqi_process_config_table()
7055 ctrl_info->config_table_offset; in pqi_process_config_table()
7058 section_info.ctrl_info = ctrl_info; in pqi_process_config_table()
7076 dev_warn(&ctrl_info->pci_dev->dev, in pqi_process_config_table()
7079 ctrl_info->heartbeat_counter = in pqi_process_config_table()
7087 ctrl_info->soft_reset_status = in pqi_process_config_table()
7106 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) in pqi_revert_to_sis_mode() argument
7110 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); in pqi_revert_to_sis_mode()
7111 rc = pqi_reset(ctrl_info); in pqi_revert_to_sis_mode()
7114 rc = sis_reenable_sis_mode(ctrl_info); in pqi_revert_to_sis_mode()
7116 dev_err(&ctrl_info->pci_dev->dev, in pqi_revert_to_sis_mode()
7120 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); in pqi_revert_to_sis_mode()
7130 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) in pqi_force_sis_mode() argument
7132 if (!sis_is_firmware_running(ctrl_info)) in pqi_force_sis_mode()
7135 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) in pqi_force_sis_mode()
7138 if (sis_is_kernel_up(ctrl_info)) { in pqi_force_sis_mode()
7139 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); in pqi_force_sis_mode()
7143 return pqi_revert_to_sis_mode(ctrl_info); in pqi_force_sis_mode()
7148 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_init() argument
7153 sis_soft_reset(ctrl_info); in pqi_ctrl_init()
7156 rc = pqi_force_sis_mode(ctrl_info); in pqi_ctrl_init()
7165 rc = sis_wait_for_ctrl_ready(ctrl_info); in pqi_ctrl_init()
7173 rc = sis_get_ctrl_properties(ctrl_info); in pqi_ctrl_init()
7175 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7180 rc = sis_get_pqi_capabilities(ctrl_info); in pqi_ctrl_init()
7182 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7188 if (ctrl_info->max_outstanding_requests > in pqi_ctrl_init()
7190 ctrl_info->max_outstanding_requests = in pqi_ctrl_init()
7193 if (ctrl_info->max_outstanding_requests > in pqi_ctrl_init()
7195 ctrl_info->max_outstanding_requests = in pqi_ctrl_init()
7199 pqi_calculate_io_resources(ctrl_info); in pqi_ctrl_init()
7201 rc = pqi_alloc_error_buffer(ctrl_info); in pqi_ctrl_init()
7203 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7213 rc = sis_init_base_struct_addr(ctrl_info); in pqi_ctrl_init()
7215 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7221 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); in pqi_ctrl_init()
7223 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7229 ctrl_info->pqi_mode_enabled = true; in pqi_ctrl_init()
7230 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); in pqi_ctrl_init()
7232 rc = pqi_alloc_admin_queues(ctrl_info); in pqi_ctrl_init()
7234 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7239 rc = pqi_create_admin_queues(ctrl_info); in pqi_ctrl_init()
7241 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7246 rc = pqi_report_device_capability(ctrl_info); in pqi_ctrl_init()
7248 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7253 rc = pqi_validate_device_capability(ctrl_info); in pqi_ctrl_init()
7257 pqi_calculate_queue_resources(ctrl_info); in pqi_ctrl_init()
7259 rc = pqi_enable_msix_interrupts(ctrl_info); in pqi_ctrl_init()
7263 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { in pqi_ctrl_init()
7264 ctrl_info->max_msix_vectors = in pqi_ctrl_init()
7265 ctrl_info->num_msix_vectors_enabled; in pqi_ctrl_init()
7266 pqi_calculate_queue_resources(ctrl_info); in pqi_ctrl_init()
7269 rc = pqi_alloc_io_resources(ctrl_info); in pqi_ctrl_init()
7273 rc = pqi_alloc_operational_queues(ctrl_info); in pqi_ctrl_init()
7275 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7280 pqi_init_operational_queues(ctrl_info); in pqi_ctrl_init()
7282 rc = pqi_request_irqs(ctrl_info); in pqi_ctrl_init()
7286 rc = pqi_create_queues(ctrl_info); in pqi_ctrl_init()
7290 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); in pqi_ctrl_init()
7292 ctrl_info->controller_online = true; in pqi_ctrl_init()
7294 rc = pqi_process_config_table(ctrl_info); in pqi_ctrl_init()
7298 pqi_start_heartbeat_timer(ctrl_info); in pqi_ctrl_init()
7300 rc = pqi_enable_events(ctrl_info); in pqi_ctrl_init()
7302 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7308 rc = pqi_register_scsi(ctrl_info); in pqi_ctrl_init()
7312 rc = pqi_get_ctrl_product_details(ctrl_info); in pqi_ctrl_init()
7314 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7319 rc = pqi_get_ctrl_serial_number(ctrl_info); in pqi_ctrl_init()
7321 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7326 rc = pqi_set_diag_rescan(ctrl_info); in pqi_ctrl_init()
7328 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7333 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); in pqi_ctrl_init()
7335 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7340 pqi_schedule_update_time_worker(ctrl_info); in pqi_ctrl_init()
7342 pqi_scan_scsi_devices(ctrl_info); in pqi_ctrl_init()
7347 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) in pqi_reinit_queues() argument
7353 admin_queues = &ctrl_info->admin_queues; in pqi_reinit_queues()
7358 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_reinit_queues()
7359 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; in pqi_reinit_queues()
7360 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; in pqi_reinit_queues()
7361 ctrl_info->queue_groups[i].oq_ci_copy = 0; in pqi_reinit_queues()
7363 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); in pqi_reinit_queues()
7364 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); in pqi_reinit_queues()
7365 writel(0, ctrl_info->queue_groups[i].oq_pi); in pqi_reinit_queues()
7368 event_queue = &ctrl_info->event_queue; in pqi_reinit_queues()
7373 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_init_resume() argument
7377 rc = pqi_force_sis_mode(ctrl_info); in pqi_ctrl_init_resume()
7385 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); in pqi_ctrl_init_resume()
7393 rc = sis_get_ctrl_properties(ctrl_info); in pqi_ctrl_init_resume()
7395 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7400 rc = sis_get_pqi_capabilities(ctrl_info); in pqi_ctrl_init_resume()
7402 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7412 rc = sis_init_base_struct_addr(ctrl_info); in pqi_ctrl_init_resume()
7414 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7420 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); in pqi_ctrl_init_resume()
7422 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7428 ctrl_info->pqi_mode_enabled = true; in pqi_ctrl_init_resume()
7429 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); in pqi_ctrl_init_resume()
7431 pqi_reinit_queues(ctrl_info); in pqi_ctrl_init_resume()
7433 rc = pqi_create_admin_queues(ctrl_info); in pqi_ctrl_init_resume()
7435 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7440 rc = pqi_create_queues(ctrl_info); in pqi_ctrl_init_resume()
7444 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); in pqi_ctrl_init_resume()
7446 ctrl_info->controller_online = true; in pqi_ctrl_init_resume()
7447 pqi_ctrl_unblock_requests(ctrl_info); in pqi_ctrl_init_resume()
7449 rc = pqi_process_config_table(ctrl_info); in pqi_ctrl_init_resume()
7453 pqi_start_heartbeat_timer(ctrl_info); in pqi_ctrl_init_resume()
7455 rc = pqi_enable_events(ctrl_info); in pqi_ctrl_init_resume()
7457 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7462 rc = pqi_get_ctrl_product_details(ctrl_info); in pqi_ctrl_init_resume()
7464 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7469 rc = pqi_set_diag_rescan(ctrl_info); in pqi_ctrl_init_resume()
7471 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7476 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); in pqi_ctrl_init_resume()
7478 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7483 pqi_schedule_update_time_worker(ctrl_info); in pqi_ctrl_init_resume()
7485 pqi_scan_scsi_devices(ctrl_info); in pqi_ctrl_init_resume()
7501 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) in pqi_pci_init() argument
7506 rc = pci_enable_device(ctrl_info->pci_dev); in pqi_pci_init()
7508 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
7518 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); in pqi_pci_init()
7520 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); in pqi_pci_init()
7524 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); in pqi_pci_init()
7526 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
7531 ctrl_info->iomem_base = ioremap(pci_resource_start( in pqi_pci_init()
7532 ctrl_info->pci_dev, 0), in pqi_pci_init()
7534 if (!ctrl_info->iomem_base) { in pqi_pci_init()
7535 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
7544 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, in pqi_pci_init()
7547 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
7553 pci_set_master(ctrl_info->pci_dev); in pqi_pci_init()
7555 ctrl_info->registers = ctrl_info->iomem_base; in pqi_pci_init()
7556 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; in pqi_pci_init()
7558 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); in pqi_pci_init()
7563 pci_release_regions(ctrl_info->pci_dev); in pqi_pci_init()
7565 pci_disable_device(ctrl_info->pci_dev); in pqi_pci_init()
7570 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) in pqi_cleanup_pci_init() argument
7572 iounmap(ctrl_info->iomem_base); in pqi_cleanup_pci_init()
7573 pci_release_regions(ctrl_info->pci_dev); in pqi_cleanup_pci_init()
7574 if (pci_is_enabled(ctrl_info->pci_dev)) in pqi_cleanup_pci_init()
7575 pci_disable_device(ctrl_info->pci_dev); in pqi_cleanup_pci_init()
7576 pci_set_drvdata(ctrl_info->pci_dev, NULL); in pqi_cleanup_pci_init()
7581 struct pqi_ctrl_info *ctrl_info; in pqi_alloc_ctrl_info() local
7583 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), in pqi_alloc_ctrl_info()
7585 if (!ctrl_info) in pqi_alloc_ctrl_info()
7588 mutex_init(&ctrl_info->scan_mutex); in pqi_alloc_ctrl_info()
7589 mutex_init(&ctrl_info->lun_reset_mutex); in pqi_alloc_ctrl_info()
7590 mutex_init(&ctrl_info->ofa_mutex); in pqi_alloc_ctrl_info()
7592 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); in pqi_alloc_ctrl_info()
7593 spin_lock_init(&ctrl_info->scsi_device_list_lock); in pqi_alloc_ctrl_info()
7595 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); in pqi_alloc_ctrl_info()
7596 atomic_set(&ctrl_info->num_interrupts, 0); in pqi_alloc_ctrl_info()
7597 atomic_set(&ctrl_info->sync_cmds_outstanding, 0); in pqi_alloc_ctrl_info()
7599 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); in pqi_alloc_ctrl_info()
7600 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); in pqi_alloc_ctrl_info()
7602 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); in pqi_alloc_ctrl_info()
7603 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); in pqi_alloc_ctrl_info()
7605 sema_init(&ctrl_info->sync_request_sem, in pqi_alloc_ctrl_info()
7607 init_waitqueue_head(&ctrl_info->block_requests_wait); in pqi_alloc_ctrl_info()
7609 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); in pqi_alloc_ctrl_info()
7610 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); in pqi_alloc_ctrl_info()
7611 INIT_WORK(&ctrl_info->raid_bypass_retry_work, in pqi_alloc_ctrl_info()
7614 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; in pqi_alloc_ctrl_info()
7615 ctrl_info->irq_mode = IRQ_MODE_NONE; in pqi_alloc_ctrl_info()
7616 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; in pqi_alloc_ctrl_info()
7618 return ctrl_info; in pqi_alloc_ctrl_info()
7621 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) in pqi_free_ctrl_info() argument
7623 kfree(ctrl_info); in pqi_free_ctrl_info()
7626 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) in pqi_free_interrupts() argument
7628 pqi_free_irqs(ctrl_info); in pqi_free_interrupts()
7629 pqi_disable_msix_interrupts(ctrl_info); in pqi_free_interrupts()
7632 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) in pqi_free_ctrl_resources() argument
7634 pqi_stop_heartbeat_timer(ctrl_info); in pqi_free_ctrl_resources()
7635 pqi_free_interrupts(ctrl_info); in pqi_free_ctrl_resources()
7636 if (ctrl_info->queue_memory_base) in pqi_free_ctrl_resources()
7637 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
7638 ctrl_info->queue_memory_length, in pqi_free_ctrl_resources()
7639 ctrl_info->queue_memory_base, in pqi_free_ctrl_resources()
7640 ctrl_info->queue_memory_base_dma_handle); in pqi_free_ctrl_resources()
7641 if (ctrl_info->admin_queue_memory_base) in pqi_free_ctrl_resources()
7642 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
7643 ctrl_info->admin_queue_memory_length, in pqi_free_ctrl_resources()
7644 ctrl_info->admin_queue_memory_base, in pqi_free_ctrl_resources()
7645 ctrl_info->admin_queue_memory_base_dma_handle); in pqi_free_ctrl_resources()
7646 pqi_free_all_io_requests(ctrl_info); in pqi_free_ctrl_resources()
7647 if (ctrl_info->error_buffer) in pqi_free_ctrl_resources()
7648 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
7649 ctrl_info->error_buffer_length, in pqi_free_ctrl_resources()
7650 ctrl_info->error_buffer, in pqi_free_ctrl_resources()
7651 ctrl_info->error_buffer_dma_handle); in pqi_free_ctrl_resources()
7652 if (ctrl_info->iomem_base) in pqi_free_ctrl_resources()
7653 pqi_cleanup_pci_init(ctrl_info); in pqi_free_ctrl_resources()
7654 pqi_free_ctrl_info(ctrl_info); in pqi_free_ctrl_resources()
7657 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) in pqi_remove_ctrl() argument
7659 pqi_cancel_rescan_worker(ctrl_info); in pqi_remove_ctrl()
7660 pqi_cancel_update_time_worker(ctrl_info); in pqi_remove_ctrl()
7661 pqi_unregister_scsi(ctrl_info); in pqi_remove_ctrl()
7662 if (ctrl_info->pqi_mode_enabled) in pqi_remove_ctrl()
7663 pqi_revert_to_sis_mode(ctrl_info); in pqi_remove_ctrl()
7664 pqi_free_ctrl_resources(ctrl_info); in pqi_remove_ctrl()
7667 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_ctrl_quiesce() argument
7669 pqi_cancel_update_time_worker(ctrl_info); in pqi_ofa_ctrl_quiesce()
7670 pqi_cancel_rescan_worker(ctrl_info); in pqi_ofa_ctrl_quiesce()
7671 pqi_wait_until_lun_reset_finished(ctrl_info); in pqi_ofa_ctrl_quiesce()
7672 pqi_wait_until_scan_finished(ctrl_info); in pqi_ofa_ctrl_quiesce()
7673 pqi_ctrl_ofa_start(ctrl_info); in pqi_ofa_ctrl_quiesce()
7674 pqi_ctrl_block_requests(ctrl_info); in pqi_ofa_ctrl_quiesce()
7675 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_ofa_ctrl_quiesce()
7676 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS); in pqi_ofa_ctrl_quiesce()
7677 pqi_fail_io_queued_for_all_devices(ctrl_info); in pqi_ofa_ctrl_quiesce()
7678 pqi_wait_until_inbound_queues_empty(ctrl_info); in pqi_ofa_ctrl_quiesce()
7679 pqi_stop_heartbeat_timer(ctrl_info); in pqi_ofa_ctrl_quiesce()
7680 ctrl_info->pqi_mode_enabled = false; in pqi_ofa_ctrl_quiesce()
7681 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); in pqi_ofa_ctrl_quiesce()
7684 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_ctrl_unquiesce() argument
7686 pqi_ofa_free_host_buffer(ctrl_info); in pqi_ofa_ctrl_unquiesce()
7687 ctrl_info->pqi_mode_enabled = true; in pqi_ofa_ctrl_unquiesce()
7688 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); in pqi_ofa_ctrl_unquiesce()
7689 ctrl_info->controller_online = true; in pqi_ofa_ctrl_unquiesce()
7690 pqi_ctrl_unblock_requests(ctrl_info); in pqi_ofa_ctrl_unquiesce()
7691 pqi_start_heartbeat_timer(ctrl_info); in pqi_ofa_ctrl_unquiesce()
7692 pqi_schedule_update_time_worker(ctrl_info); in pqi_ofa_ctrl_unquiesce()
7693 pqi_clear_soft_reset_status(ctrl_info, in pqi_ofa_ctrl_unquiesce()
7695 pqi_scan_scsi_devices(ctrl_info); in pqi_ofa_ctrl_unquiesce()
7698 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, in pqi_ofa_alloc_mem() argument
7708 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_alloc_mem()
7713 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_alloc_mem()
7718 ctrl_info->pqi_ofa_chunk_virt_addr = in pqi_ofa_alloc_mem()
7720 if (!ctrl_info->pqi_ofa_chunk_virt_addr) in pqi_ofa_alloc_mem()
7726 ctrl_info->pqi_ofa_chunk_virt_addr[i] = in pqi_ofa_alloc_mem()
7730 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) in pqi_ofa_alloc_mem()
7751 ctrl_info->pqi_ofa_chunk_virt_addr[i], in pqi_ofa_alloc_mem()
7754 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); in pqi_ofa_alloc_mem()
7761 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_alloc_host_buffer() argument
7768 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated); in pqi_ofa_alloc_host_buffer()
7772 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz)) in pqi_ofa_alloc_host_buffer()
7778 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, in pqi_ofa_setup_host_buffer() argument
7784 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_setup_host_buffer()
7787 &ctrl_info->pqi_ofa_mem_dma_handle, in pqi_ofa_setup_host_buffer()
7798 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory; in pqi_ofa_setup_host_buffer()
7800 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { in pqi_ofa_setup_host_buffer()
7808 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_free_host_buffer() argument
7814 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_free_host_buffer()
7826 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_ofa_free_host_buffer()
7828 ctrl_info->pqi_ofa_chunk_virt_addr[i], in pqi_ofa_free_host_buffer()
7831 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); in pqi_ofa_free_host_buffer()
7834 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_ofa_free_host_buffer()
7836 ctrl_info->pqi_ofa_mem_dma_handle); in pqi_ofa_free_host_buffer()
7837 ctrl_info->pqi_ofa_mem_virt_addr = NULL; in pqi_ofa_free_host_buffer()
7840 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_host_memory_update() argument
7848 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_host_memory_update()
7861 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, in pqi_ofa_host_memory_update()
7868 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, in pqi_ofa_host_memory_update()
7872 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_ctrl_restart() argument
7875 return pqi_ctrl_init_resume(ctrl_info); in pqi_ofa_ctrl_restart()
7898 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) in pqi_fail_all_outstanding_requests() argument
7904 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_fail_all_outstanding_requests()
7905 io_request = &ctrl_info->io_request_pool[i]; in pqi_fail_all_outstanding_requests()
7923 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) in pqi_take_ctrl_offline_deferred() argument
7926 pqi_stop_heartbeat_timer(ctrl_info); in pqi_take_ctrl_offline_deferred()
7927 pqi_free_interrupts(ctrl_info); in pqi_take_ctrl_offline_deferred()
7928 pqi_cancel_rescan_worker(ctrl_info); in pqi_take_ctrl_offline_deferred()
7929 pqi_cancel_update_time_worker(ctrl_info); in pqi_take_ctrl_offline_deferred()
7930 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_take_ctrl_offline_deferred()
7931 pqi_fail_all_outstanding_requests(ctrl_info); in pqi_take_ctrl_offline_deferred()
7932 pqi_clear_all_queued_raid_bypass_retries(ctrl_info); in pqi_take_ctrl_offline_deferred()
7933 pqi_ctrl_unblock_requests(ctrl_info); in pqi_take_ctrl_offline_deferred()
7938 struct pqi_ctrl_info *ctrl_info; in pqi_ctrl_offline_worker() local
7940 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); in pqi_ctrl_offline_worker()
7941 pqi_take_ctrl_offline_deferred(ctrl_info); in pqi_ctrl_offline_worker()
7944 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) in pqi_take_ctrl_offline() argument
7946 if (!ctrl_info->controller_online) in pqi_take_ctrl_offline()
7949 ctrl_info->controller_online = false; in pqi_take_ctrl_offline()
7950 ctrl_info->pqi_mode_enabled = false; in pqi_take_ctrl_offline()
7951 pqi_ctrl_block_requests(ctrl_info); in pqi_take_ctrl_offline()
7953 sis_shutdown_ctrl(ctrl_info); in pqi_take_ctrl_offline()
7954 pci_disable_device(ctrl_info->pci_dev); in pqi_take_ctrl_offline()
7955 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); in pqi_take_ctrl_offline()
7956 schedule_work(&ctrl_info->ctrl_offline_work); in pqi_take_ctrl_offline()
7977 struct pqi_ctrl_info *ctrl_info; in pqi_pci_probe() local
8001 ctrl_info = pqi_alloc_ctrl_info(node); in pqi_pci_probe()
8002 if (!ctrl_info) { in pqi_pci_probe()
8008 ctrl_info->pci_dev = pci_dev; in pqi_pci_probe()
8010 rc = pqi_pci_init(ctrl_info); in pqi_pci_probe()
8014 rc = pqi_ctrl_init(ctrl_info); in pqi_pci_probe()
8021 pqi_remove_ctrl(ctrl_info); in pqi_pci_probe()
8028 struct pqi_ctrl_info *ctrl_info; in pqi_pci_remove() local
8030 ctrl_info = pci_get_drvdata(pci_dev); in pqi_pci_remove()
8031 if (!ctrl_info) in pqi_pci_remove()
8034 ctrl_info->in_shutdown = true; in pqi_pci_remove()
8036 pqi_remove_ctrl(ctrl_info); in pqi_pci_remove()
8039 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) in pqi_crash_if_pending_command() argument
8045 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_crash_if_pending_command()
8046 io_request = &ctrl_info->io_request_pool[i]; in pqi_crash_if_pending_command()
8058 struct pqi_ctrl_info *ctrl_info; in pqi_shutdown() local
8060 ctrl_info = pci_get_drvdata(pci_dev); in pqi_shutdown()
8061 if (!ctrl_info) { in pqi_shutdown()
8067 pqi_disable_events(ctrl_info); in pqi_shutdown()
8068 pqi_wait_until_ofa_finished(ctrl_info); in pqi_shutdown()
8069 pqi_cancel_update_time_worker(ctrl_info); in pqi_shutdown()
8070 pqi_cancel_rescan_worker(ctrl_info); in pqi_shutdown()
8071 pqi_cancel_event_worker(ctrl_info); in pqi_shutdown()
8073 pqi_ctrl_shutdown_start(ctrl_info); in pqi_shutdown()
8074 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_shutdown()
8076 rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); in pqi_shutdown()
8083 pqi_ctrl_block_device_reset(ctrl_info); in pqi_shutdown()
8084 pqi_wait_until_lun_reset_finished(ctrl_info); in pqi_shutdown()
8090 rc = pqi_flush_cache(ctrl_info, SHUTDOWN); in pqi_shutdown()
8095 pqi_ctrl_block_requests(ctrl_info); in pqi_shutdown()
8097 rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info); in pqi_shutdown()
8104 pqi_crash_if_pending_command(ctrl_info); in pqi_shutdown()
8105 pqi_reset(ctrl_info); in pqi_shutdown()
8134 struct pqi_ctrl_info *ctrl_info; in pqi_suspend() local
8136 ctrl_info = pci_get_drvdata(pci_dev); in pqi_suspend()
8138 pqi_disable_events(ctrl_info); in pqi_suspend()
8139 pqi_cancel_update_time_worker(ctrl_info); in pqi_suspend()
8140 pqi_cancel_rescan_worker(ctrl_info); in pqi_suspend()
8141 pqi_wait_until_scan_finished(ctrl_info); in pqi_suspend()
8142 pqi_wait_until_lun_reset_finished(ctrl_info); in pqi_suspend()
8143 pqi_wait_until_ofa_finished(ctrl_info); in pqi_suspend()
8144 pqi_flush_cache(ctrl_info, SUSPEND); in pqi_suspend()
8145 pqi_ctrl_block_requests(ctrl_info); in pqi_suspend()
8146 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_suspend()
8147 pqi_wait_until_inbound_queues_empty(ctrl_info); in pqi_suspend()
8148 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); in pqi_suspend()
8149 pqi_stop_heartbeat_timer(ctrl_info); in pqi_suspend()
8157 ctrl_info->controller_online = false; in pqi_suspend()
8158 ctrl_info->pqi_mode_enabled = false; in pqi_suspend()
8166 struct pqi_ctrl_info *ctrl_info; in pqi_resume() local
8168 ctrl_info = pci_get_drvdata(pci_dev); in pqi_resume()
8171 ctrl_info->max_hw_queue_index = 0; in pqi_resume()
8172 pqi_free_interrupts(ctrl_info); in pqi_resume()
8173 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); in pqi_resume()
8176 &ctrl_info->queue_groups[0]); in pqi_resume()
8178 dev_err(&ctrl_info->pci_dev->dev, in pqi_resume()
8183 pqi_start_heartbeat_timer(ctrl_info); in pqi_resume()
8184 pqi_ctrl_unblock_requests(ctrl_info); in pqi_resume()
8191 return pqi_ctrl_init_resume(ctrl_info); in pqi_resume()