Lines Matching refs:ctrl_info

57 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
59 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
61 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
64 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
67 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
71 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
75 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
79 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
80 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
81 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
82 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
83 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
84 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
85 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
220 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_offline() argument
222 return !ctrl_info->controller_online; in pqi_ctrl_offline()
225 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) in pqi_check_ctrl_health() argument
227 if (ctrl_info->controller_online) in pqi_check_ctrl_health()
228 if (!sis_is_firmware_running(ctrl_info)) in pqi_check_ctrl_health()
229 pqi_take_ctrl_offline(ctrl_info); in pqi_check_ctrl_health()
237 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) in pqi_get_ctrl_mode() argument
239 return sis_read_driver_scratch(ctrl_info); in pqi_get_ctrl_mode()
242 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, in pqi_save_ctrl_mode() argument
245 sis_write_driver_scratch(ctrl_info, mode); in pqi_save_ctrl_mode()
248 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_block_scan() argument
250 ctrl_info->scan_blocked = true; in pqi_ctrl_block_scan()
251 mutex_lock(&ctrl_info->scan_mutex); in pqi_ctrl_block_scan()
254 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_unblock_scan() argument
256 ctrl_info->scan_blocked = false; in pqi_ctrl_unblock_scan()
257 mutex_unlock(&ctrl_info->scan_mutex); in pqi_ctrl_unblock_scan()
260 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_scan_blocked() argument
262 return ctrl_info->scan_blocked; in pqi_ctrl_scan_blocked()
265 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_block_device_reset() argument
267 mutex_lock(&ctrl_info->lun_reset_mutex); in pqi_ctrl_block_device_reset()
270 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_unblock_device_reset() argument
272 mutex_unlock(&ctrl_info->lun_reset_mutex); in pqi_ctrl_unblock_device_reset()
275 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info) in pqi_scsi_block_requests() argument
281 shost = ctrl_info->scsi_host; in pqi_scsi_block_requests()
295 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info) in pqi_scsi_unblock_requests() argument
297 scsi_unblock_requests(ctrl_info->scsi_host); in pqi_scsi_unblock_requests()
300 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_busy() argument
302 atomic_inc(&ctrl_info->num_busy_threads); in pqi_ctrl_busy()
305 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_unbusy() argument
307 atomic_dec(&ctrl_info->num_busy_threads); in pqi_ctrl_unbusy()
310 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_blocked() argument
312 return ctrl_info->block_requests; in pqi_ctrl_blocked()
315 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_block_requests() argument
317 ctrl_info->block_requests = true; in pqi_ctrl_block_requests()
320 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_unblock_requests() argument
322 ctrl_info->block_requests = false; in pqi_ctrl_unblock_requests()
323 wake_up_all(&ctrl_info->block_requests_wait); in pqi_ctrl_unblock_requests()
326 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) in pqi_wait_if_ctrl_blocked() argument
328 if (!pqi_ctrl_blocked(ctrl_info)) in pqi_wait_if_ctrl_blocked()
331 atomic_inc(&ctrl_info->num_blocked_threads); in pqi_wait_if_ctrl_blocked()
332 wait_event(ctrl_info->block_requests_wait, in pqi_wait_if_ctrl_blocked()
333 !pqi_ctrl_blocked(ctrl_info)); in pqi_wait_if_ctrl_blocked()
334 atomic_dec(&ctrl_info->num_blocked_threads); in pqi_wait_if_ctrl_blocked()
339 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_wait_until_quiesced() argument
349 while (atomic_read(&ctrl_info->num_busy_threads) > in pqi_ctrl_wait_until_quiesced()
350 atomic_read(&ctrl_info->num_blocked_threads)) { in pqi_ctrl_wait_until_quiesced()
352 dev_warn(&ctrl_info->pci_dev->dev, in pqi_ctrl_wait_until_quiesced()
362 dev_warn(&ctrl_info->pci_dev->dev, in pqi_ctrl_wait_until_quiesced()
372 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_ofa_start() argument
374 mutex_lock(&ctrl_info->ofa_mutex); in pqi_ctrl_ofa_start()
377 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_ofa_done() argument
379 mutex_unlock(&ctrl_info->ofa_mutex); in pqi_ctrl_ofa_done()
382 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) in pqi_wait_until_ofa_finished() argument
384 mutex_lock(&ctrl_info->ofa_mutex); in pqi_wait_until_ofa_finished()
385 mutex_unlock(&ctrl_info->ofa_mutex); in pqi_wait_until_ofa_finished()
388 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_in_progress() argument
390 return mutex_is_locked(&ctrl_info->ofa_mutex); in pqi_ofa_in_progress()
419 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info, in pqi_schedule_rescan_worker_with_delay() argument
422 if (pqi_ctrl_offline(ctrl_info)) in pqi_schedule_rescan_worker_with_delay()
425 schedule_delayed_work(&ctrl_info->rescan_work, delay); in pqi_schedule_rescan_worker_with_delay()
428 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) in pqi_schedule_rescan_worker() argument
430 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); in pqi_schedule_rescan_worker()
435 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info) in pqi_schedule_rescan_worker_delayed() argument
437 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); in pqi_schedule_rescan_worker_delayed()
440 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) in pqi_cancel_rescan_worker() argument
442 cancel_delayed_work_sync(&ctrl_info->rescan_work); in pqi_cancel_rescan_worker()
445 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) in pqi_read_heartbeat_counter() argument
447 if (!ctrl_info->heartbeat_counter) in pqi_read_heartbeat_counter()
450 return readl(ctrl_info->heartbeat_counter); in pqi_read_heartbeat_counter()
453 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) in pqi_read_soft_reset_status() argument
455 return readb(ctrl_info->soft_reset_status); in pqi_read_soft_reset_status()
458 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) in pqi_clear_soft_reset_status() argument
462 status = pqi_read_soft_reset_status(ctrl_info); in pqi_clear_soft_reset_status()
464 writeb(status, ctrl_info->soft_reset_status); in pqi_clear_soft_reset_status()
504 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, in pqi_build_raid_path_request() argument
542 cdb[1] = ctrl_info->ciss_report_log_flags; in pqi_build_raid_path_request()
587 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); in pqi_build_raid_path_request()
606 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], in pqi_build_raid_path_request()
619 struct pqi_ctrl_info *ctrl_info) in pqi_alloc_io_request() argument
622 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ in pqi_alloc_io_request()
625 io_request = &ctrl_info->io_request_pool[i]; in pqi_alloc_io_request()
629 i = (i + 1) % ctrl_info->max_io_slots; in pqi_alloc_io_request()
633 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; in pqi_alloc_io_request()
645 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, in pqi_send_scsi_raid_request() argument
653 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr, in pqi_send_scsi_raid_request()
658 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info); in pqi_send_scsi_raid_request()
660 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_send_scsi_raid_request()
667 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, in pqi_send_ctrl_raid_request() argument
670 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, in pqi_send_ctrl_raid_request()
674 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, in pqi_send_ctrl_raid_with_error() argument
678 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, in pqi_send_ctrl_raid_with_error()
682 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, in pqi_identify_controller() argument
685 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, in pqi_identify_controller()
689 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, in pqi_sense_subsystem_info() argument
692 return pqi_send_ctrl_raid_request(ctrl_info, in pqi_sense_subsystem_info()
697 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, in pqi_scsi_inquiry() argument
700 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, in pqi_scsi_inquiry()
704 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, in pqi_identify_physical_device() argument
713 rc = pqi_build_raid_path_request(ctrl_info, &request, in pqi_identify_physical_device()
723 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_identify_physical_device()
725 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_identify_physical_device()
761 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info) in pqi_get_advanced_raid_bypass_config() argument
772 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID, in pqi_get_advanced_raid_bypass_config()
780 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_get_advanced_raid_bypass_config()
782 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_get_advanced_raid_bypass_config()
801 ctrl_info->max_transfer_encrypted_sas_sata = in pqi_get_advanced_raid_bypass_config()
805 ctrl_info->max_transfer_encrypted_nvme = in pqi_get_advanced_raid_bypass_config()
809 ctrl_info->max_write_raid_5_6 = in pqi_get_advanced_raid_bypass_config()
813 ctrl_info->max_write_raid_1_10_2drive = in pqi_get_advanced_raid_bypass_config()
817 ctrl_info->max_write_raid_1_10_3drive = in pqi_get_advanced_raid_bypass_config()
827 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, in pqi_flush_cache() argument
839 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, in pqi_flush_cache()
847 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, in pqi_csmi_smp_passthru() argument
851 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, in pqi_csmi_smp_passthru()
857 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) in pqi_set_diag_rescan() argument
866 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, in pqi_set_diag_rescan()
873 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, in pqi_set_diag_rescan()
882 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, in pqi_write_host_wellness() argument
885 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, in pqi_write_host_wellness()
903 struct pqi_ctrl_info *ctrl_info) in pqi_write_driver_version_to_host_wellness() argument
931 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); in pqi_write_driver_version_to_host_wellness()
952 struct pqi_ctrl_info *ctrl_info) in pqi_write_current_time_to_host_wellness() argument
994 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); in pqi_write_current_time_to_host_wellness()
1006 struct pqi_ctrl_info *ctrl_info; in pqi_update_time_worker() local
1008 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, in pqi_update_time_worker()
1011 rc = pqi_write_current_time_to_host_wellness(ctrl_info); in pqi_update_time_worker()
1013 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_time_worker()
1016 schedule_delayed_work(&ctrl_info->update_time_work, in pqi_update_time_worker()
1020 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info) in pqi_schedule_update_time_worker() argument
1022 schedule_delayed_work(&ctrl_info->update_time_work, 0); in pqi_schedule_update_time_worker()
1025 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info) in pqi_cancel_update_time_worker() argument
1027 cancel_delayed_work_sync(&ctrl_info->update_time_work); in pqi_cancel_update_time_worker()
1030 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, in pqi_report_luns() argument
1033 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length); in pqi_report_luns()
1036 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer) in pqi_report_phys_logical_luns() argument
1051 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header)); in pqi_report_phys_logical_luns()
1071 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); in pqi_report_phys_logical_luns()
1097 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) in pqi_report_phys_luns() argument
1099 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer); in pqi_report_phys_luns()
1102 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) in pqi_report_logical_luns() argument
1104 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); in pqi_report_logical_luns()
1107 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, in pqi_get_device_lists() argument
1118 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); in pqi_get_device_lists()
1120 dev_err(&ctrl_info->pci_dev->dev, in pqi_get_device_lists()
1123 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); in pqi_get_device_lists()
1125 dev_err(&ctrl_info->pci_dev->dev, in pqi_get_device_lists()
1216 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, in pqi_get_raid_level() argument
1227 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_raid_level()
1240 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, in pqi_validate_raid_map() argument
1280 dev_warn(&ctrl_info->pci_dev->dev, in pqi_validate_raid_map()
1288 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, in pqi_get_raid_map() argument
1299 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, in pqi_get_raid_map()
1314 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, in pqi_get_raid_map()
1321 dev_warn(&ctrl_info->pci_dev->dev, in pqi_get_raid_map()
1330 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); in pqi_get_raid_map()
1344 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info, in pqi_set_max_transfer_encrypted() argument
1347 if (!ctrl_info->lv_drive_type_mix_valid) { in pqi_set_max_transfer_encrypted()
1361 ctrl_info->max_transfer_encrypted_sas_sata; in pqi_set_max_transfer_encrypted()
1365 ctrl_info->max_transfer_encrypted_nvme; in pqi_set_max_transfer_encrypted()
1371 min(ctrl_info->max_transfer_encrypted_sas_sata, in pqi_set_max_transfer_encrypted()
1372 ctrl_info->max_transfer_encrypted_nvme); in pqi_set_max_transfer_encrypted()
1377 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, in pqi_get_raid_bypass_status() argument
1388 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_raid_bypass_status()
1402 pqi_get_raid_map(ctrl_info, device) == 0) { in pqi_get_raid_bypass_status()
1406 pqi_set_max_transfer_encrypted(ctrl_info, device); in pqi_get_raid_bypass_status()
1417 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, in pqi_get_volume_status() argument
1431 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_volume_status()
1457 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, in pqi_get_physical_device_info() argument
1465 rc = pqi_identify_physical_device(ctrl_info, device, in pqi_get_physical_device_info()
1506 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, in pqi_get_logical_device_info() argument
1517 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); in pqi_get_logical_device_info()
1534 pqi_get_raid_level(ctrl_info, device); in pqi_get_logical_device_info()
1535 pqi_get_raid_bypass_status(ctrl_info, device); in pqi_get_logical_device_info()
1536 pqi_get_volume_status(ctrl_info, device); in pqi_get_logical_device_info()
1546 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, in pqi_get_device_info() argument
1556 rc = pqi_get_logical_device_info(ctrl_info, device); in pqi_get_device_info()
1558 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); in pqi_get_device_info()
1563 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, in pqi_show_volume_status() argument
1660 dev_info(&ctrl_info->pci_dev->dev, in pqi_show_volume_status()
1662 ctrl_info->scsi_host->host_no, in pqi_show_volume_status()
1668 struct pqi_ctrl_info *ctrl_info; in pqi_rescan_worker() local
1670 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, in pqi_rescan_worker()
1673 pqi_scan_scsi_devices(ctrl_info); in pqi_rescan_worker()
1676 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, in pqi_add_device() argument
1682 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, in pqi_add_device()
1685 rc = pqi_add_sas_device(ctrl_info->sas_host, device); in pqi_add_device()
1692 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) in pqi_remove_device() argument
1698 rc = pqi_device_wait_for_pending_io(ctrl_info, device, in pqi_remove_device()
1701 dev_err(&ctrl_info->pci_dev->dev, in pqi_remove_device()
1703 ctrl_info->scsi_host->host_no, device->bus, in pqi_remove_device()
1715 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, in pqi_find_scsi_dev() argument
1720 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_find_scsi_dev()
1744 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, in pqi_scsi_find_entry() argument
1749 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_scsi_find_entry()
1774 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, in pqi_dev_info() argument
1781 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); in pqi_dev_info()
1830 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); in pqi_dev_info()
1905 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, in pqi_fixup_botched_add() argument
1910 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_fixup_botched_add()
1912 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_fixup_botched_add()
1926 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, in pqi_update_device_list() argument
1946 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
1949 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_update_device_list()
1955 find_result = pqi_scsi_find_entry(ctrl_info, device, in pqi_update_device_list()
1986 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, in pqi_update_device_list()
2002 &ctrl_info->scsi_device_list); in pqi_update_device_list()
2008 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2015 if (pqi_ofa_in_progress(ctrl_info)) { in pqi_update_device_list()
2019 pqi_ctrl_unblock_device_reset(ctrl_info); in pqi_update_device_list()
2020 pqi_scsi_unblock_requests(ctrl_info); in pqi_update_device_list()
2026 pqi_dev_info(ctrl_info, "offline", device); in pqi_update_device_list()
2027 pqi_show_volume_status(ctrl_info, device); in pqi_update_device_list()
2031 pqi_remove_device(ctrl_info, device); in pqi_update_device_list()
2034 pqi_dev_info(ctrl_info, "removed", device); in pqi_update_device_list()
2043 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_update_device_list()
2057 rc = pqi_add_device(ctrl_info, device); in pqi_update_device_list()
2059 pqi_dev_info(ctrl_info, "added", device); in pqi_update_device_list()
2061 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_device_list()
2063 ctrl_info->scsi_host->host_no, in pqi_update_device_list()
2066 pqi_fixup_botched_add(ctrl_info, device); in pqi_update_device_list()
2118 static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info, in pqi_set_physical_device_wwid() argument
2121 if (ctrl_info->unique_wwid_in_report_phys_lun_supported || in pqi_set_physical_device_wwid()
2128 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) in pqi_update_scsi_devices() argument
2152 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); in pqi_update_scsi_devices()
2179 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2199 ctrl_info->lv_drive_type_mix_valid = true; in pqi_update_scsi_devices()
2207 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); in pqi_update_scsi_devices()
2215 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2270 rc = pqi_get_device_info(ctrl_info, device, id_phys); in pqi_update_scsi_devices()
2272 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2278 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_scsi_devices()
2282 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_scsi_devices()
2293 pqi_set_physical_device_wwid(ctrl_info, device, phys_lun_ext_entry); in pqi_update_scsi_devices()
2312 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); in pqi_update_scsi_devices()
2331 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) in pqi_scan_scsi_devices() argument
2336 if (pqi_ctrl_offline(ctrl_info)) in pqi_scan_scsi_devices()
2339 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); in pqi_scan_scsi_devices()
2342 if (pqi_ctrl_scan_blocked(ctrl_info)) in pqi_scan_scsi_devices()
2344 pqi_schedule_rescan_worker_delayed(ctrl_info); in pqi_scan_scsi_devices()
2348 rc = pqi_update_scsi_devices(ctrl_info); in pqi_scan_scsi_devices()
2349 if (rc && !pqi_ctrl_scan_blocked(ctrl_info)) in pqi_scan_scsi_devices()
2350 pqi_schedule_rescan_worker_delayed(ctrl_info); in pqi_scan_scsi_devices()
2352 mutex_unlock(&ctrl_info->scan_mutex); in pqi_scan_scsi_devices()
2359 struct pqi_ctrl_info *ctrl_info; in pqi_scan_start() local
2361 ctrl_info = shost_to_hba(shost); in pqi_scan_start()
2363 pqi_scan_scsi_devices(ctrl_info); in pqi_scan_start()
2371 struct pqi_ctrl_info *ctrl_info; in pqi_scan_finished() local
2373 ctrl_info = shost_priv(shost); in pqi_scan_finished()
2375 return !mutex_is_locked(&ctrl_info->scan_mutex); in pqi_scan_finished()
2402 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, in pqi_aio_raid_level_supported() argument
2411 if (rmd->is_write && (!ctrl_info->enable_r1_writes || in pqi_aio_raid_level_supported()
2412 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) in pqi_aio_raid_level_supported()
2416 if (rmd->is_write && (!ctrl_info->enable_r1_writes || in pqi_aio_raid_level_supported()
2417 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) in pqi_aio_raid_level_supported()
2421 if (rmd->is_write && (!ctrl_info->enable_r5_writes || in pqi_aio_raid_level_supported()
2422 rmd->data_length > ctrl_info->max_write_raid_5_6)) in pqi_aio_raid_level_supported()
2426 if (rmd->is_write && (!ctrl_info->enable_r6_writes || in pqi_aio_raid_level_supported()
2427 rmd->data_length > ctrl_info->max_write_raid_5_6)) in pqi_aio_raid_level_supported()
2486 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info, in pci_get_aio_common_raid_map_values() argument
2720 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, in pqi_raid_bypass_submit_scsi_cmd() argument
2738 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd)) in pqi_raid_bypass_submit_scsi_cmd()
2746 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); in pqi_raid_bypass_submit_scsi_cmd()
2803 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group, in pqi_raid_bypass_submit_scsi_cmd()
2807 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, in pqi_raid_bypass_submit_scsi_cmd()
2812 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, in pqi_raid_bypass_submit_scsi_cmd()
2831 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) in pqi_wait_for_pqi_mode_ready() argument
2838 pqi_registers = ctrl_info->pqi_registers; in pqi_wait_for_pqi_mode_ready()
2847 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
2859 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
2871 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
2892 struct pqi_ctrl_info *ctrl_info; in pqi_take_device_offline() local
2900 ctrl_info = shost_to_hba(sdev->host); in pqi_take_device_offline()
2901 pqi_schedule_rescan_worker(ctrl_info); in pqi_take_device_offline()
2902 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", in pqi_take_device_offline()
2903 path, ctrl_info->scsi_host->host_no, device->bus, in pqi_take_device_offline()
2978 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); in pqi_process_raid_io_error() local
2985 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); in pqi_process_raid_io_error()
2993 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); in pqi_process_raid_io_error()
3110 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info, in pqi_interpret_task_management_response() argument
3129 dev_err(&ctrl_info->pci_dev->dev, in pqi_interpret_task_management_response()
3135 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info) in pqi_invalid_response() argument
3137 pqi_take_ctrl_offline(ctrl_info); in pqi_invalid_response()
3140 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) in pqi_process_io_intr() argument
3154 if (oq_pi >= ctrl_info->num_elements_per_oq) { in pqi_process_io_intr()
3155 pqi_invalid_response(ctrl_info); in pqi_process_io_intr()
3156 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3158 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); in pqi_process_io_intr()
3169 if (request_id >= ctrl_info->max_io_slots) { in pqi_process_io_intr()
3170 pqi_invalid_response(ctrl_info); in pqi_process_io_intr()
3171 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3173 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); in pqi_process_io_intr()
3177 io_request = &ctrl_info->io_request_pool[request_id]; in pqi_process_io_intr()
3179 pqi_invalid_response(ctrl_info); in pqi_process_io_intr()
3180 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3200 io_request->status = pqi_interpret_task_management_response(ctrl_info, in pqi_process_io_intr()
3209 io_request->error_info = ctrl_info->error_buffer + in pqi_process_io_intr()
3215 pqi_invalid_response(ctrl_info); in pqi_process_io_intr()
3216 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3228 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; in pqi_process_io_intr()
3252 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, in pqi_send_event_ack() argument
3261 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; in pqi_send_event_ack()
3271 ctrl_info->num_elements_per_iq)) in pqi_send_event_ack()
3277 if (pqi_ctrl_offline(ctrl_info)) in pqi_send_event_ack()
3286 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; in pqi_send_event_ack()
3298 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, in pqi_acknowledge_event() argument
3312 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); in pqi_acknowledge_event()
3319 struct pqi_ctrl_info *ctrl_info) in pqi_poll_for_soft_reset_status() argument
3327 status = pqi_read_soft_reset_status(ctrl_info); in pqi_poll_for_soft_reset_status()
3334 if (!sis_is_firmware_running(ctrl_info)) in pqi_poll_for_soft_reset_status()
3338 dev_warn(&ctrl_info->pci_dev->dev, in pqi_poll_for_soft_reset_status()
3347 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) in pqi_process_soft_reset() argument
3353 if (ctrl_info->soft_reset_handshake_supported) in pqi_process_soft_reset()
3354 reset_status = pqi_poll_for_soft_reset_status(ctrl_info); in pqi_process_soft_reset()
3365 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3367 sis_soft_reset(ctrl_info); in pqi_process_soft_reset()
3370 ctrl_info->pqi_mode_enabled = false; in pqi_process_soft_reset()
3371 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); in pqi_process_soft_reset()
3372 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); in pqi_process_soft_reset()
3373 pqi_ofa_free_host_buffer(ctrl_info); in pqi_process_soft_reset()
3374 pqi_ctrl_ofa_done(ctrl_info); in pqi_process_soft_reset()
3375 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3380 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3382 if (ctrl_info->soft_reset_handshake_supported) in pqi_process_soft_reset()
3383 pqi_clear_soft_reset_status(ctrl_info); in pqi_process_soft_reset()
3384 pqi_ofa_free_host_buffer(ctrl_info); in pqi_process_soft_reset()
3385 pqi_ctrl_ofa_done(ctrl_info); in pqi_process_soft_reset()
3386 pqi_ofa_ctrl_unquiesce(ctrl_info); in pqi_process_soft_reset()
3391 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3394 pqi_ofa_free_host_buffer(ctrl_info); in pqi_process_soft_reset()
3395 pqi_ctrl_ofa_done(ctrl_info); in pqi_process_soft_reset()
3396 pqi_ofa_ctrl_unquiesce(ctrl_info); in pqi_process_soft_reset()
3397 pqi_take_ctrl_offline(ctrl_info); in pqi_process_soft_reset()
3404 struct pqi_ctrl_info *ctrl_info; in pqi_ofa_memory_alloc_worker() local
3406 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); in pqi_ofa_memory_alloc_worker()
3408 pqi_ctrl_ofa_start(ctrl_info); in pqi_ofa_memory_alloc_worker()
3409 pqi_ofa_setup_host_buffer(ctrl_info); in pqi_ofa_memory_alloc_worker()
3410 pqi_ofa_host_memory_update(ctrl_info); in pqi_ofa_memory_alloc_worker()
3415 struct pqi_ctrl_info *ctrl_info; in pqi_ofa_quiesce_worker() local
3418 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work); in pqi_ofa_quiesce_worker()
3420 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; in pqi_ofa_quiesce_worker()
3422 pqi_ofa_ctrl_quiesce(ctrl_info); in pqi_ofa_quiesce_worker()
3423 pqi_acknowledge_event(ctrl_info, event); in pqi_ofa_quiesce_worker()
3424 pqi_process_soft_reset(ctrl_info); in pqi_ofa_quiesce_worker()
3427 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, in pqi_ofa_process_event() argument
3436 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3438 schedule_work(&ctrl_info->ofa_memory_alloc_work); in pqi_ofa_process_event()
3441 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3443 schedule_work(&ctrl_info->ofa_quiesce_work); in pqi_ofa_process_event()
3447 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3449 ctrl_info->ofa_cancel_reason); in pqi_ofa_process_event()
3450 pqi_ofa_free_host_buffer(ctrl_info); in pqi_ofa_process_event()
3451 pqi_ctrl_ofa_done(ctrl_info); in pqi_ofa_process_event()
3454 dev_err(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3467 struct pqi_ctrl_info *ctrl_info; in pqi_event_worker() local
3471 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); in pqi_event_worker()
3473 pqi_ctrl_busy(ctrl_info); in pqi_event_worker()
3474 pqi_wait_if_ctrl_blocked(ctrl_info); in pqi_event_worker()
3475 if (pqi_ctrl_offline(ctrl_info)) in pqi_event_worker()
3479 event = ctrl_info->events; in pqi_event_worker()
3484 ack_event = pqi_ofa_process_event(ctrl_info, event); in pqi_event_worker()
3490 pqi_acknowledge_event(ctrl_info, event); in pqi_event_worker()
3496 pqi_schedule_rescan_worker_delayed(ctrl_info); in pqi_event_worker()
3499 pqi_ctrl_unbusy(ctrl_info); in pqi_event_worker()
3508 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer); in pqi_heartbeat_timer_handler() local
3510 pqi_check_ctrl_health(ctrl_info); in pqi_heartbeat_timer_handler()
3511 if (pqi_ctrl_offline(ctrl_info)) in pqi_heartbeat_timer_handler()
3514 num_interrupts = atomic_read(&ctrl_info->num_interrupts); in pqi_heartbeat_timer_handler()
3515 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); in pqi_heartbeat_timer_handler()
3517 if (num_interrupts == ctrl_info->previous_num_interrupts) { in pqi_heartbeat_timer_handler()
3518 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { in pqi_heartbeat_timer_handler()
3519 dev_err(&ctrl_info->pci_dev->dev, in pqi_heartbeat_timer_handler()
3522 pqi_take_ctrl_offline(ctrl_info); in pqi_heartbeat_timer_handler()
3526 ctrl_info->previous_num_interrupts = num_interrupts; in pqi_heartbeat_timer_handler()
3529 ctrl_info->previous_heartbeat_count = heartbeat_count; in pqi_heartbeat_timer_handler()
3530 mod_timer(&ctrl_info->heartbeat_timer, in pqi_heartbeat_timer_handler()
3534 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) in pqi_start_heartbeat_timer() argument
3536 if (!ctrl_info->heartbeat_counter) in pqi_start_heartbeat_timer()
3539 ctrl_info->previous_num_interrupts = in pqi_start_heartbeat_timer()
3540 atomic_read(&ctrl_info->num_interrupts); in pqi_start_heartbeat_timer()
3541 ctrl_info->previous_heartbeat_count = in pqi_start_heartbeat_timer()
3542 pqi_read_heartbeat_counter(ctrl_info); in pqi_start_heartbeat_timer()
3544 ctrl_info->heartbeat_timer.expires = in pqi_start_heartbeat_timer()
3546 add_timer(&ctrl_info->heartbeat_timer); in pqi_start_heartbeat_timer()
3549 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) in pqi_stop_heartbeat_timer() argument
3551 del_timer_sync(&ctrl_info->heartbeat_timer); in pqi_stop_heartbeat_timer()
3554 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, in pqi_ofa_capture_event_payload() argument
3559 ctrl_info->ofa_bytes_requested = in pqi_ofa_capture_event_payload()
3563 ctrl_info->ofa_cancel_reason = in pqi_ofa_capture_event_payload()
3569 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) in pqi_process_event_intr() argument
3579 event_queue = &ctrl_info->event_queue; in pqi_process_event_intr()
3586 pqi_invalid_response(ctrl_info); in pqi_process_event_intr()
3587 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_event_intr()
3602 event = &ctrl_info->events[event_index]; in pqi_process_event_intr()
3609 pqi_ofa_capture_event_payload(ctrl_info, event, response); in pqi_process_event_intr()
3618 schedule_work(&ctrl_info->event_work); in pqi_process_event_intr()
3626 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx) in pqi_configure_legacy_intx() argument
3632 pqi_registers = ctrl_info->pqi_registers; in pqi_configure_legacy_intx()
3644 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, in pqi_change_irq_mode() argument
3647 switch (ctrl_info->irq_mode) { in pqi_change_irq_mode()
3653 pqi_configure_legacy_intx(ctrl_info, true); in pqi_change_irq_mode()
3654 sis_enable_intx(ctrl_info); in pqi_change_irq_mode()
3663 pqi_configure_legacy_intx(ctrl_info, false); in pqi_change_irq_mode()
3664 sis_enable_msix(ctrl_info); in pqi_change_irq_mode()
3669 pqi_configure_legacy_intx(ctrl_info, false); in pqi_change_irq_mode()
3676 sis_enable_msix(ctrl_info); in pqi_change_irq_mode()
3679 pqi_configure_legacy_intx(ctrl_info, true); in pqi_change_irq_mode()
3680 sis_enable_intx(ctrl_info); in pqi_change_irq_mode()
3688 ctrl_info->irq_mode = new_mode; in pqi_change_irq_mode()
3693 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) in pqi_is_valid_irq() argument
3698 switch (ctrl_info->irq_mode) { in pqi_is_valid_irq()
3703 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); in pqi_is_valid_irq()
3720 struct pqi_ctrl_info *ctrl_info; in pqi_irq_handler() local
3726 ctrl_info = queue_group->ctrl_info; in pqi_irq_handler()
3728 if (!pqi_is_valid_irq(ctrl_info)) in pqi_irq_handler()
3731 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); in pqi_irq_handler()
3735 if (irq == ctrl_info->event_irq) { in pqi_irq_handler()
3736 num_events_handled = pqi_process_event_intr(ctrl_info); in pqi_irq_handler()
3744 atomic_inc(&ctrl_info->num_interrupts); in pqi_irq_handler()
3746 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); in pqi_irq_handler()
3747 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); in pqi_irq_handler()
3753 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) in pqi_request_irqs() argument
3755 struct pci_dev *pci_dev = ctrl_info->pci_dev; in pqi_request_irqs()
3759 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); in pqi_request_irqs()
3761 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { in pqi_request_irqs()
3763 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); in pqi_request_irqs()
3770 ctrl_info->num_msix_vectors_initialized++; in pqi_request_irqs()
3776 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) in pqi_free_irqs() argument
3780 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) in pqi_free_irqs()
3781 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), in pqi_free_irqs()
3782 &ctrl_info->queue_groups[i]); in pqi_free_irqs()
3784 ctrl_info->num_msix_vectors_initialized = 0; in pqi_free_irqs()
3787 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) in pqi_enable_msix_interrupts() argument
3791 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, in pqi_enable_msix_interrupts()
3792 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, in pqi_enable_msix_interrupts()
3795 dev_err(&ctrl_info->pci_dev->dev, in pqi_enable_msix_interrupts()
3801 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; in pqi_enable_msix_interrupts()
3802 ctrl_info->irq_mode = IRQ_MODE_MSIX; in pqi_enable_msix_interrupts()
3806 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) in pqi_disable_msix_interrupts() argument
3808 if (ctrl_info->num_msix_vectors_enabled) { in pqi_disable_msix_interrupts()
3809 pci_free_irq_vectors(ctrl_info->pci_dev); in pqi_disable_msix_interrupts()
3810 ctrl_info->num_msix_vectors_enabled = 0; in pqi_disable_msix_interrupts()
3814 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_operational_queues() argument
3830 ctrl_info->num_elements_per_iq; in pqi_alloc_operational_queues()
3833 ctrl_info->num_elements_per_oq; in pqi_alloc_operational_queues()
3834 num_inbound_queues = ctrl_info->num_queue_groups * 2; in pqi_alloc_operational_queues()
3835 num_outbound_queues = ctrl_info->num_queue_groups; in pqi_alloc_operational_queues()
3836 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; in pqi_alloc_operational_queues()
3868 ctrl_info->queue_memory_base = in pqi_alloc_operational_queues()
3869 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, in pqi_alloc_operational_queues()
3870 &ctrl_info->queue_memory_base_dma_handle, in pqi_alloc_operational_queues()
3873 if (!ctrl_info->queue_memory_base) in pqi_alloc_operational_queues()
3876 ctrl_info->queue_memory_length = alloc_length; in pqi_alloc_operational_queues()
3878 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, in pqi_alloc_operational_queues()
3881 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
3882 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
3885 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3886 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3892 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3893 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3899 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
3900 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
3903 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3904 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3910 ctrl_info->event_queue.oq_element_array = element_array; in pqi_alloc_operational_queues()
3911 ctrl_info->event_queue.oq_element_array_bus_addr = in pqi_alloc_operational_queues()
3912 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3913 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3920 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
3921 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
3924 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3926 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3932 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3934 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3940 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3942 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3948 ctrl_info->event_queue.oq_pi = next_queue_index; in pqi_alloc_operational_queues()
3949 ctrl_info->event_queue.oq_pi_bus_addr = in pqi_alloc_operational_queues()
3950 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3952 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3957 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) in pqi_init_operational_queues() argument
3967 for (i = 0; i < ctrl_info->num_queue_groups; i++) in pqi_init_operational_queues()
3968 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; in pqi_init_operational_queues()
3975 ctrl_info->event_queue.oq_id = next_oq_id++; in pqi_init_operational_queues()
3976 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_init_operational_queues()
3977 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; in pqi_init_operational_queues()
3978 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; in pqi_init_operational_queues()
3979 ctrl_info->queue_groups[i].oq_id = next_oq_id++; in pqi_init_operational_queues()
3986 ctrl_info->event_queue.int_msg_num = 0; in pqi_init_operational_queues()
3987 for (i = 0; i < ctrl_info->num_queue_groups; i++) in pqi_init_operational_queues()
3988 ctrl_info->queue_groups[i].int_msg_num = i; in pqi_init_operational_queues()
3990 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_init_operational_queues()
3991 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); in pqi_init_operational_queues()
3992 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); in pqi_init_operational_queues()
3993 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); in pqi_init_operational_queues()
3994 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); in pqi_init_operational_queues()
3998 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_admin_queues() argument
4007 ctrl_info->admin_queue_memory_base = in pqi_alloc_admin_queues()
4008 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, in pqi_alloc_admin_queues()
4009 &ctrl_info->admin_queue_memory_base_dma_handle, in pqi_alloc_admin_queues()
4012 if (!ctrl_info->admin_queue_memory_base) in pqi_alloc_admin_queues()
4015 ctrl_info->admin_queue_memory_length = alloc_length; in pqi_alloc_admin_queues()
4017 admin_queues = &ctrl_info->admin_queues; in pqi_alloc_admin_queues()
4018 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, in pqi_alloc_admin_queues()
4030 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4032 ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4034 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4036 ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4038 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4040 (void __iomem *)ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4042 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4044 (void __iomem *)ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4052 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) in pqi_create_admin_queues() argument
4060 pqi_registers = ctrl_info->pqi_registers; in pqi_create_admin_queues()
4061 admin_queues = &ctrl_info->admin_queues; in pqi_create_admin_queues()
4095 admin_queues->iq_pi = ctrl_info->iomem_base + in pqi_create_admin_queues()
4098 admin_queues->oq_ci = ctrl_info->iomem_base + in pqi_create_admin_queues()
4105 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, in pqi_submit_admin_request() argument
4112 admin_queues = &ctrl_info->admin_queues; in pqi_submit_admin_request()
4132 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, in pqi_poll_for_admin_response() argument
4140 admin_queues = &ctrl_info->admin_queues; in pqi_poll_for_admin_response()
4150 dev_err(&ctrl_info->pci_dev->dev, in pqi_poll_for_admin_response()
4154 if (!sis_is_firmware_running(ctrl_info)) in pqi_poll_for_admin_response()
4169 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, in pqi_start_io() argument
4208 ctrl_info->num_elements_per_iq)) in pqi_start_io()
4218 ctrl_info->num_elements_per_iq - iq_pi; in pqi_start_io()
4232 ctrl_info->num_elements_per_iq; in pqi_start_io()
4251 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, in pqi_wait_for_completion_io() argument
4263 pqi_check_ctrl_health(ctrl_info); in pqi_wait_for_completion_io()
4264 if (pqi_ctrl_offline(ctrl_info)) { in pqi_wait_for_completion_io()
4309 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, in pqi_submit_raid_request_synchronous() argument
4319 if (down_interruptible(&ctrl_info->sync_request_sem)) in pqi_submit_raid_request_synchronous()
4322 down(&ctrl_info->sync_request_sem); in pqi_submit_raid_request_synchronous()
4325 pqi_ctrl_busy(ctrl_info); in pqi_submit_raid_request_synchronous()
4331 pqi_wait_if_ctrl_blocked(ctrl_info); in pqi_submit_raid_request_synchronous()
4333 if (pqi_ctrl_offline(ctrl_info)) { in pqi_submit_raid_request_synchronous()
4338 io_request = pqi_alloc_io_request(ctrl_info); in pqi_submit_raid_request_synchronous()
4354 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, in pqi_submit_raid_request_synchronous()
4357 pqi_wait_for_completion_io(ctrl_info, &wait); in pqi_submit_raid_request_synchronous()
4371 pqi_ctrl_unbusy(ctrl_info); in pqi_submit_raid_request_synchronous()
4372 up(&ctrl_info->sync_request_sem); in pqi_submit_raid_request_synchronous()
4397 struct pqi_ctrl_info *ctrl_info, in pqi_submit_admin_request_synchronous() argument
4403 pqi_submit_admin_request(ctrl_info, request); in pqi_submit_admin_request_synchronous()
4405 rc = pqi_poll_for_admin_response(ctrl_info, response); in pqi_submit_admin_request_synchronous()
4413 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) in pqi_report_device_capability() argument
4435 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_report_device_capability()
4442 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response); in pqi_report_device_capability()
4444 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_report_device_capability()
4456 ctrl_info->max_inbound_queues = in pqi_report_device_capability()
4458 ctrl_info->max_elements_per_iq = in pqi_report_device_capability()
4460 ctrl_info->max_iq_element_length = in pqi_report_device_capability()
4463 ctrl_info->max_outbound_queues = in pqi_report_device_capability()
4465 ctrl_info->max_elements_per_oq = in pqi_report_device_capability()
4467 ctrl_info->max_oq_element_length = in pqi_report_device_capability()
4474 ctrl_info->max_inbound_iu_length_per_firmware = in pqi_report_device_capability()
4477 ctrl_info->inbound_spanning_supported = in pqi_report_device_capability()
4479 ctrl_info->outbound_spanning_supported = in pqi_report_device_capability()
4488 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) in pqi_validate_device_capability() argument
4490 if (ctrl_info->max_iq_element_length < in pqi_validate_device_capability()
4492 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4494 ctrl_info->max_iq_element_length, in pqi_validate_device_capability()
4499 if (ctrl_info->max_oq_element_length < in pqi_validate_device_capability()
4501 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4503 ctrl_info->max_oq_element_length, in pqi_validate_device_capability()
4508 if (ctrl_info->max_inbound_iu_length_per_firmware < in pqi_validate_device_capability()
4510 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4512 ctrl_info->max_inbound_iu_length_per_firmware, in pqi_validate_device_capability()
4517 if (!ctrl_info->inbound_spanning_supported) { in pqi_validate_device_capability()
4518 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4523 if (ctrl_info->outbound_spanning_supported) { in pqi_validate_device_capability()
4524 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4532 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) in pqi_create_event_queue() argument
4539 event_queue = &ctrl_info->event_queue; in pqi_create_event_queue()
4564 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_event_queue()
4569 event_queue->oq_ci = ctrl_info->iomem_base + in pqi_create_event_queue()
4577 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, in pqi_create_queue_group() argument
4585 queue_group = &ctrl_info->queue_groups[group_number]; in pqi_create_queue_group()
4603 put_unaligned_le16(ctrl_info->num_elements_per_iq, in pqi_create_queue_group()
4609 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4612 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4617 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4638 put_unaligned_le16(ctrl_info->num_elements_per_iq, in pqi_create_queue_group()
4644 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4647 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4652 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4672 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4675 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4694 put_unaligned_le16(ctrl_info->num_elements_per_oq, in pqi_create_queue_group()
4702 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4705 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4710 queue_group->oq_ci = ctrl_info->iomem_base + in pqi_create_queue_group()
4718 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) in pqi_create_queues() argument
4723 rc = pqi_create_event_queue(ctrl_info); in pqi_create_queues()
4725 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queues()
4730 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_create_queues()
4731 rc = pqi_create_queue_group(ctrl_info, i); in pqi_create_queues()
4733 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queues()
4735 i, ctrl_info->num_queue_groups); in pqi_create_queues()
4746 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, in pqi_configure_events() argument
4769 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_configure_events()
4776 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_configure_events()
4778 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_configure_events()
4789 put_unaligned_le16(ctrl_info->event_queue.oq_id, in pqi_configure_events()
4804 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_configure_events()
4811 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_configure_events()
4813 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_configure_events()
4823 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) in pqi_enable_events() argument
4825 return pqi_configure_events(ctrl_info, true); in pqi_enable_events()
4828 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) in pqi_free_all_io_requests() argument
4835 if (!ctrl_info->io_request_pool) in pqi_free_all_io_requests()
4838 dev = &ctrl_info->pci_dev->dev; in pqi_free_all_io_requests()
4839 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; in pqi_free_all_io_requests()
4840 io_request = ctrl_info->io_request_pool; in pqi_free_all_io_requests()
4842 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_free_all_io_requests()
4852 kfree(ctrl_info->io_request_pool); in pqi_free_all_io_requests()
4853 ctrl_info->io_request_pool = NULL; in pqi_free_all_io_requests()
4856 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_error_buffer() argument
4858 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, in pqi_alloc_error_buffer()
4859 ctrl_info->error_buffer_length, in pqi_alloc_error_buffer()
4860 &ctrl_info->error_buffer_dma_handle, in pqi_alloc_error_buffer()
4862 if (!ctrl_info->error_buffer) in pqi_alloc_error_buffer()
4868 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_io_resources() argument
4877 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots, in pqi_alloc_io_resources()
4878 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); in pqi_alloc_io_resources()
4880 if (!ctrl_info->io_request_pool) { in pqi_alloc_io_resources()
4881 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
4886 dev = &ctrl_info->pci_dev->dev; in pqi_alloc_io_resources()
4887 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; in pqi_alloc_io_resources()
4888 io_request = ctrl_info->io_request_pool; in pqi_alloc_io_resources()
4890 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_alloc_io_resources()
4891 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); in pqi_alloc_io_resources()
4894 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
4904 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
4918 pqi_free_all_io_requests(ctrl_info); in pqi_alloc_io_resources()
4928 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) in pqi_calculate_io_resources() argument
4933 ctrl_info->scsi_ml_can_queue = in pqi_calculate_io_resources()
4934 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; in pqi_calculate_io_resources()
4935 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; in pqi_calculate_io_resources()
4937 ctrl_info->error_buffer_length = in pqi_calculate_io_resources()
4938 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; in pqi_calculate_io_resources()
4941 max_transfer_size = min(ctrl_info->max_transfer_size, in pqi_calculate_io_resources()
4944 max_transfer_size = min(ctrl_info->max_transfer_size, in pqi_calculate_io_resources()
4952 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); in pqi_calculate_io_resources()
4956 ctrl_info->sg_chain_buffer_length = in pqi_calculate_io_resources()
4959 ctrl_info->sg_tablesize = max_sg_entries; in pqi_calculate_io_resources()
4960 ctrl_info->max_sectors = max_transfer_size / 512; in pqi_calculate_io_resources()
4963 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) in pqi_calculate_queue_resources() argument
4975 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, in pqi_calculate_queue_resources()
4976 ctrl_info->max_outbound_queues - 1); in pqi_calculate_queue_resources()
4980 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); in pqi_calculate_queue_resources()
4984 ctrl_info->num_queue_groups = num_queue_groups; in pqi_calculate_queue_resources()
4985 ctrl_info->max_hw_queue_index = num_queue_groups - 1; in pqi_calculate_queue_resources()
4991 ctrl_info->max_inbound_iu_length = in pqi_calculate_queue_resources()
4992 (ctrl_info->max_inbound_iu_length_per_firmware / in pqi_calculate_queue_resources()
4997 (ctrl_info->max_inbound_iu_length / in pqi_calculate_queue_resources()
5004 ctrl_info->max_elements_per_iq); in pqi_calculate_queue_resources()
5008 ctrl_info->max_elements_per_oq); in pqi_calculate_queue_resources()
5010 ctrl_info->num_elements_per_iq = num_elements_per_iq; in pqi_calculate_queue_resources()
5011 ctrl_info->num_elements_per_oq = num_elements_per_oq; in pqi_calculate_queue_resources()
5013 ctrl_info->max_sg_per_iu = in pqi_calculate_queue_resources()
5014 ((ctrl_info->max_inbound_iu_length - in pqi_calculate_queue_resources()
5019 ctrl_info->max_sg_per_r56_iu = in pqi_calculate_queue_resources()
5020 ((ctrl_info->max_inbound_iu_length - in pqi_calculate_queue_resources()
5075 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_raid_sg_list() argument
5100 ctrl_info->max_sg_per_iu, &chained); in pqi_build_raid_sg_list()
5111 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_aio_r1_sg_list() argument
5137 ctrl_info->max_sg_per_iu, &chained); in pqi_build_aio_r1_sg_list()
5149 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_aio_r56_sg_list() argument
5173 ctrl_info->max_sg_per_r56_iu, &chained); in pqi_build_aio_r56_sg_list()
5185 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_aio_sg_list() argument
5211 ctrl_info->max_sg_per_iu, &chained); in pqi_build_aio_sg_list()
5235 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, in pqi_raid_submit_scsi_cmd_with_io_request() argument
5295 dev_err(&ctrl_info->pci_dev->dev, in pqi_raid_submit_scsi_cmd_with_io_request()
5301 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); in pqi_raid_submit_scsi_cmd_with_io_request()
5307 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); in pqi_raid_submit_scsi_cmd_with_io_request()
5312 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, in pqi_raid_submit_scsi_cmd() argument
5318 io_request = pqi_alloc_io_request(ctrl_info); in pqi_raid_submit_scsi_cmd()
5320 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, in pqi_raid_submit_scsi_cmd()
5328 struct pqi_ctrl_info *ctrl_info; in pqi_raid_bypass_retry_needed() local
5343 ctrl_info = shost_to_hba(scmd->device->host); in pqi_raid_bypass_retry_needed()
5344 if (pqi_ctrl_offline(ctrl_info)) in pqi_raid_bypass_retry_needed()
5366 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_scsi_cmd() argument
5370 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, in pqi_aio_submit_scsi_cmd()
5374 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_io() argument
5383 io_request = pqi_alloc_io_request(ctrl_info); in pqi_aio_submit_io()
5416 dev_err(&ctrl_info->pci_dev->dev, in pqi_aio_submit_io()
5432 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); in pqi_aio_submit_io()
5438 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); in pqi_aio_submit_io()
5443 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_r1_write_io() argument
5452 io_request = pqi_alloc_io_request(ctrl_info); in pqi_aio_submit_r1_write_io()
5490 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request); in pqi_aio_submit_r1_write_io()
5496 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); in pqi_aio_submit_r1_write_io()
5501 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_r56_write_io() argument
5510 io_request = pqi_alloc_io_request(ctrl_info); in pqi_aio_submit_r56_write_io()
5555 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request); in pqi_aio_submit_r56_write_io()
5561 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); in pqi_aio_submit_r56_write_io()
5566 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, in pqi_get_hw_queue() argument
5572 if (hw_queue > ctrl_info->max_hw_queue_index) in pqi_get_hw_queue()
5609 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, in pqi_is_parity_write_stream() argument
5620 if (!ctrl_info->enable_stream_detection) in pqi_is_parity_write_stream()
5641 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || in pqi_is_parity_write_stream()
5642 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) in pqi_is_parity_write_stream()
5687 struct pqi_ctrl_info *ctrl_info; in pqi_scsi_queue_command() local
5703 ctrl_info = shost_to_hba(shost); in pqi_scsi_queue_command()
5705 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { in pqi_scsi_queue_command()
5711 if (pqi_ctrl_blocked(ctrl_info)) { in pqi_scsi_queue_command()
5722 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); in pqi_scsi_queue_command()
5723 queue_group = &ctrl_info->queue_groups[hw_queue]; in pqi_scsi_queue_command()
5729 !pqi_is_parity_write_stream(ctrl_info, scmd)) { in pqi_scsi_queue_command()
5730 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
5737 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
5740 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
5742 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
5752 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, in pqi_wait_until_queued_io_drained() argument
5769 pqi_check_ctrl_health(ctrl_info); in pqi_wait_until_queued_io_drained()
5770 if (pqi_ctrl_offline(ctrl_info)) in pqi_wait_until_queued_io_drained()
5779 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) in pqi_wait_until_inbound_queues_empty() argument
5788 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_wait_until_inbound_queues_empty()
5789 queue_group = &ctrl_info->queue_groups[i]; in pqi_wait_until_inbound_queues_empty()
5791 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); in pqi_wait_until_inbound_queues_empty()
5802 pqi_check_ctrl_health(ctrl_info); in pqi_wait_until_inbound_queues_empty()
5803 if (pqi_ctrl_offline(ctrl_info)) in pqi_wait_until_inbound_queues_empty()
5813 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, in pqi_fail_io_queued_for_device() argument
5825 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_fail_io_queued_for_device()
5826 queue_group = &ctrl_info->queue_groups[i]; in pqi_fail_io_queued_for_device()
5859 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, in pqi_device_wait_for_pending_io() argument
5871 pqi_check_ctrl_health(ctrl_info); in pqi_device_wait_for_pending_io()
5872 if (pqi_ctrl_offline(ctrl_info)) in pqi_device_wait_for_pending_io()
5876 dev_err(&ctrl_info->pci_dev->dev, in pqi_device_wait_for_pending_io()
5878 ctrl_info->scsi_host->host_no, device->bus, device->target, in pqi_device_wait_for_pending_io()
5883 dev_warn(&ctrl_info->pci_dev->dev, in pqi_device_wait_for_pending_io()
5885 ctrl_info->scsi_host->host_no, device->bus, device->target, in pqi_device_wait_for_pending_io()
5905 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, in pqi_wait_for_lun_reset_completion() argument
5920 pqi_check_ctrl_health(ctrl_info); in pqi_wait_for_lun_reset_completion()
5921 if (pqi_ctrl_offline(ctrl_info)) { in pqi_wait_for_lun_reset_completion()
5928 dev_warn(&ctrl_info->pci_dev->dev, in pqi_wait_for_lun_reset_completion()
5930 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, in pqi_wait_for_lun_reset_completion()
5939 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) in pqi_lun_reset() argument
5946 io_request = pqi_alloc_io_request(ctrl_info); in pqi_lun_reset()
5960 if (ctrl_info->tmf_iu_timeout_supported) in pqi_lun_reset()
5963 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, in pqi_lun_reset()
5966 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); in pqi_lun_reset()
5980 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) in pqi_lun_reset_with_retries() argument
5988 reset_rc = pqi_lun_reset(ctrl_info, device); in pqi_lun_reset_with_retries()
5997 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs); in pqi_lun_reset_with_retries()
6004 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, in pqi_device_reset() argument
6009 pqi_ctrl_block_requests(ctrl_info); in pqi_device_reset()
6010 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_device_reset()
6011 pqi_fail_io_queued_for_device(ctrl_info, device); in pqi_device_reset()
6012 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); in pqi_device_reset()
6016 rc = pqi_lun_reset_with_retries(ctrl_info, device); in pqi_device_reset()
6017 pqi_ctrl_unblock_requests(ctrl_info); in pqi_device_reset()
6026 struct pqi_ctrl_info *ctrl_info; in pqi_eh_device_reset_handler() local
6030 ctrl_info = shost_to_hba(shost); in pqi_eh_device_reset_handler()
6033 mutex_lock(&ctrl_info->lun_reset_mutex); in pqi_eh_device_reset_handler()
6035 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_device_reset_handler()
6041 pqi_check_ctrl_health(ctrl_info); in pqi_eh_device_reset_handler()
6042 if (pqi_ctrl_offline(ctrl_info)) in pqi_eh_device_reset_handler()
6045 rc = pqi_device_reset(ctrl_info, device); in pqi_eh_device_reset_handler()
6047 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_device_reset_handler()
6052 mutex_unlock(&ctrl_info->lun_reset_mutex); in pqi_eh_device_reset_handler()
6061 struct pqi_ctrl_info *ctrl_info; in pqi_slave_alloc() local
6065 ctrl_info = shost_to_hba(sdev->host); in pqi_slave_alloc()
6067 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_alloc()
6072 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); in pqi_slave_alloc()
6079 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), in pqi_slave_alloc()
6100 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_alloc()
6107 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_map_queues() local
6110 ctrl_info->pci_dev, 0); in pqi_map_queues()
6127 struct pqi_ctrl_info *ctrl_info; in pqi_slave_destroy() local
6129 ctrl_info = shost_to_hba(sdev->host); in pqi_slave_destroy()
6131 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_destroy()
6140 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_destroy()
6143 pqi_dev_info(ctrl_info, "removed", device); in pqi_slave_destroy()
6148 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) in pqi_getpciinfo_ioctl() argument
6158 pci_dev = ctrl_info->pci_dev; in pqi_getpciinfo_ioctl()
6258 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) in pqi_passthru_ioctl() argument
6269 if (pqi_ctrl_offline(ctrl_info)) in pqi_passthru_ioctl()
6271 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info)) in pqi_passthru_ioctl()
6342 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_passthru_ioctl()
6353 if (ctrl_info->raid_iu_timeout_supported) in pqi_passthru_ioctl()
6356 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, in pqi_passthru_ioctl()
6360 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, in pqi_passthru_ioctl()
6405 struct pqi_ctrl_info *ctrl_info; in pqi_ioctl() local
6407 ctrl_info = shost_to_hba(sdev->host); in pqi_ioctl()
6413 rc = pqi_scan_scsi_devices(ctrl_info); in pqi_ioctl()
6416 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); in pqi_ioctl()
6422 rc = pqi_passthru_ioctl(ctrl_info, arg); in pqi_ioctl()
6436 struct pqi_ctrl_info *ctrl_info; in pqi_firmware_version_show() local
6439 ctrl_info = shost_to_hba(shost); in pqi_firmware_version_show()
6441 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); in pqi_firmware_version_show()
6454 struct pqi_ctrl_info *ctrl_info; in pqi_serial_number_show() local
6457 ctrl_info = shost_to_hba(shost); in pqi_serial_number_show()
6459 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); in pqi_serial_number_show()
6466 struct pqi_ctrl_info *ctrl_info; in pqi_model_show() local
6469 ctrl_info = shost_to_hba(shost); in pqi_model_show()
6471 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); in pqi_model_show()
6478 struct pqi_ctrl_info *ctrl_info; in pqi_vendor_show() local
6481 ctrl_info = shost_to_hba(shost); in pqi_vendor_show()
6483 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); in pqi_vendor_show()
6540 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_stream_detection_show() local
6543 ctrl_info->enable_stream_detection); in pqi_host_enable_stream_detection_show()
6550 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_stream_detection_store() local
6559 ctrl_info->enable_stream_detection = set_stream_detection; in pqi_host_enable_stream_detection_store()
6568 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_r5_writes_show() local
6570 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); in pqi_host_enable_r5_writes_show()
6577 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_r5_writes_store() local
6586 ctrl_info->enable_r5_writes = set_r5_writes; in pqi_host_enable_r5_writes_store()
6595 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_r6_writes_show() local
6597 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); in pqi_host_enable_r6_writes_show()
6604 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_r6_writes_store() local
6613 ctrl_info->enable_r6_writes = set_r6_writes; in pqi_host_enable_r6_writes_store()
6651 struct pqi_ctrl_info *ctrl_info; in pqi_unique_id_show() local
6658 ctrl_info = shost_to_hba(sdev->host); in pqi_unique_id_show()
6660 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
6664 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
6675 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
6689 struct pqi_ctrl_info *ctrl_info; in pqi_lunid_show() local
6696 ctrl_info = shost_to_hba(sdev->host); in pqi_lunid_show()
6698 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
6702 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
6708 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
6718 struct pqi_ctrl_info *ctrl_info; in pqi_path_info_show() local
6731 ctrl_info = shost_to_hba(sdev->host); in pqi_path_info_show()
6733 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
6737 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
6754 ctrl_info->scsi_host->host_no, in pqi_path_info_show()
6793 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
6801 struct pqi_ctrl_info *ctrl_info; in pqi_sas_address_show() local
6808 ctrl_info = shost_to_hba(sdev->host); in pqi_sas_address_show()
6810 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
6814 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
6820 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
6828 struct pqi_ctrl_info *ctrl_info; in pqi_ssd_smart_path_enabled_show() local
6834 ctrl_info = shost_to_hba(sdev->host); in pqi_ssd_smart_path_enabled_show()
6836 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
6840 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
6848 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
6856 struct pqi_ctrl_info *ctrl_info; in pqi_raid_level_show() local
6863 ctrl_info = shost_to_hba(sdev->host); in pqi_raid_level_show()
6865 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
6869 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
6878 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
6886 struct pqi_ctrl_info *ctrl_info; in pqi_raid_bypass_cnt_show() local
6893 ctrl_info = shost_to_hba(sdev->host); in pqi_raid_bypass_cnt_show()
6895 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
6899 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
6905 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
6947 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) in pqi_register_scsi() argument
6952 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); in pqi_register_scsi()
6954 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); in pqi_register_scsi()
6965 shost->max_sectors = ctrl_info->max_sectors; in pqi_register_scsi()
6966 shost->can_queue = ctrl_info->scsi_ml_can_queue; in pqi_register_scsi()
6968 shost->sg_tablesize = ctrl_info->sg_tablesize; in pqi_register_scsi()
6970 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); in pqi_register_scsi()
6972 shost->nr_hw_queues = ctrl_info->num_queue_groups; in pqi_register_scsi()
6974 shost->hostdata[0] = (unsigned long)ctrl_info; in pqi_register_scsi()
6976 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); in pqi_register_scsi()
6978 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); in pqi_register_scsi()
6982 rc = pqi_add_sas_host(shost, ctrl_info); in pqi_register_scsi()
6984 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); in pqi_register_scsi()
6988 ctrl_info->scsi_host = shost; in pqi_register_scsi()
7000 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) in pqi_unregister_scsi() argument
7004 pqi_delete_sas_host(ctrl_info); in pqi_unregister_scsi()
7006 shost = ctrl_info->scsi_host; in pqi_unregister_scsi()
7014 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) in pqi_wait_for_pqi_reset_completion() argument
7022 pqi_registers = ctrl_info->pqi_registers; in pqi_wait_for_pqi_reset_completion()
7031 pqi_check_ctrl_health(ctrl_info); in pqi_wait_for_pqi_reset_completion()
7032 if (pqi_ctrl_offline(ctrl_info)) { in pqi_wait_for_pqi_reset_completion()
7045 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) in pqi_reset() argument
7050 if (ctrl_info->pqi_reset_quiesce_supported) { in pqi_reset()
7051 rc = sis_pqi_reset_quiesce(ctrl_info); in pqi_reset()
7053 dev_err(&ctrl_info->pci_dev->dev, in pqi_reset()
7063 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); in pqi_reset()
7065 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); in pqi_reset()
7067 dev_err(&ctrl_info->pci_dev->dev, in pqi_reset()
7073 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) in pqi_get_ctrl_serial_number() argument
7082 rc = pqi_sense_subsystem_info(ctrl_info, sense_info); in pqi_get_ctrl_serial_number()
7086 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, in pqi_get_ctrl_serial_number()
7088 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; in pqi_get_ctrl_serial_number()
7096 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) in pqi_get_ctrl_product_details() argument
7105 rc = pqi_identify_controller(ctrl_info, identify); in pqi_get_ctrl_product_details()
7111 memcpy(ctrl_info->firmware_version, in pqi_get_ctrl_product_details()
7115 memcpy(ctrl_info->firmware_version, in pqi_get_ctrl_product_details()
7118 ctrl_info->firmware_version in pqi_get_ctrl_product_details()
7120 snprintf(ctrl_info->firmware_version + in pqi_get_ctrl_product_details()
7121 strlen(ctrl_info->firmware_version), in pqi_get_ctrl_product_details()
7122 sizeof(ctrl_info->firmware_version) - in pqi_get_ctrl_product_details()
7128 memcpy(ctrl_info->model, identify->product_id, in pqi_get_ctrl_product_details()
7130 ctrl_info->model[sizeof(identify->product_id)] = '\0'; in pqi_get_ctrl_product_details()
7132 memcpy(ctrl_info->vendor, identify->vendor_id, in pqi_get_ctrl_product_details()
7134 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; in pqi_get_ctrl_product_details()
7143 struct pqi_ctrl_info *ctrl_info; member
7196 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, in pqi_config_table_update() argument
7213 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_config_table_update()
7216 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, in pqi_enable_firmware_features() argument
7243 return pqi_config_table_update(ctrl_info, in pqi_enable_firmware_features()
7253 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7257 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, in pqi_firmware_feature_status() argument
7261 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", in pqi_firmware_feature_status()
7267 dev_info(&ctrl_info->pci_dev->dev, in pqi_firmware_feature_status()
7272 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", in pqi_firmware_feature_status()
7276 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, in pqi_ctrl_update_feature_flags() argument
7281 ctrl_info->enable_r1_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7284 ctrl_info->enable_r5_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7287 ctrl_info->enable_r6_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7290 ctrl_info->soft_reset_handshake_supported = in pqi_ctrl_update_feature_flags()
7292 pqi_read_soft_reset_status(ctrl_info); in pqi_ctrl_update_feature_flags()
7295 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7298 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7301 ctrl_info->unique_wwid_in_report_phys_lun_supported = in pqi_ctrl_update_feature_flags()
7306 pqi_firmware_feature_status(ctrl_info, firmware_feature); in pqi_ctrl_update_feature_flags()
7309 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, in pqi_firmware_feature_update() argument
7313 firmware_feature->feature_status(ctrl_info, firmware_feature); in pqi_firmware_feature_update()
7405 struct pqi_ctrl_info *ctrl_info; in pqi_process_firmware_features() local
7411 ctrl_info = section_info->ctrl_info; in pqi_process_firmware_features()
7422 pqi_firmware_feature_update(ctrl_info, in pqi_process_firmware_features()
7437 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, in pqi_process_firmware_features()
7440 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_firmware_features()
7445 pqi_firmware_feature_update(ctrl_info, in pqi_process_firmware_features()
7459 pqi_firmware_feature_update(ctrl_info, in pqi_process_firmware_features()
7488 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_reset_config() argument
7490 ctrl_info->heartbeat_counter = NULL; in pqi_ctrl_reset_config()
7491 ctrl_info->soft_reset_status = NULL; in pqi_ctrl_reset_config()
7492 ctrl_info->soft_reset_handshake_supported = false; in pqi_ctrl_reset_config()
7493 ctrl_info->enable_r1_writes = false; in pqi_ctrl_reset_config()
7494 ctrl_info->enable_r5_writes = false; in pqi_ctrl_reset_config()
7495 ctrl_info->enable_r6_writes = false; in pqi_ctrl_reset_config()
7496 ctrl_info->raid_iu_timeout_supported = false; in pqi_ctrl_reset_config()
7497 ctrl_info->tmf_iu_timeout_supported = false; in pqi_ctrl_reset_config()
7498 ctrl_info->unique_wwid_in_report_phys_lun_supported = false; in pqi_ctrl_reset_config()
7501 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) in pqi_process_config_table() argument
7512 table_length = ctrl_info->config_table_length; in pqi_process_config_table()
7518 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_config_table()
7527 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; in pqi_process_config_table()
7531 section_info.ctrl_info = ctrl_info; in pqi_process_config_table()
7548 dev_warn(&ctrl_info->pci_dev->dev, in pqi_process_config_table()
7551 ctrl_info->heartbeat_counter = in pqi_process_config_table()
7558 ctrl_info->soft_reset_status = in pqi_process_config_table()
7584 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) in pqi_revert_to_sis_mode() argument
7588 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); in pqi_revert_to_sis_mode()
7589 rc = pqi_reset(ctrl_info); in pqi_revert_to_sis_mode()
7592 rc = sis_reenable_sis_mode(ctrl_info); in pqi_revert_to_sis_mode()
7594 dev_err(&ctrl_info->pci_dev->dev, in pqi_revert_to_sis_mode()
7598 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); in pqi_revert_to_sis_mode()
7608 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) in pqi_force_sis_mode() argument
7610 if (!sis_is_firmware_running(ctrl_info)) in pqi_force_sis_mode()
7613 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) in pqi_force_sis_mode()
7616 if (sis_is_kernel_up(ctrl_info)) { in pqi_force_sis_mode()
7617 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); in pqi_force_sis_mode()
7621 return pqi_revert_to_sis_mode(ctrl_info); in pqi_force_sis_mode()
7624 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_init() argument
7630 sis_soft_reset(ctrl_info); in pqi_ctrl_init()
7633 rc = pqi_force_sis_mode(ctrl_info); in pqi_ctrl_init()
7642 rc = sis_wait_for_ctrl_ready(ctrl_info); in pqi_ctrl_init()
7650 rc = sis_get_ctrl_properties(ctrl_info); in pqi_ctrl_init()
7652 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7657 rc = sis_get_pqi_capabilities(ctrl_info); in pqi_ctrl_init()
7659 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7664 product_id = sis_get_product_id(ctrl_info); in pqi_ctrl_init()
7665 ctrl_info->product_id = (u8)product_id; in pqi_ctrl_init()
7666 ctrl_info->product_revision = (u8)(product_id >> 8); in pqi_ctrl_init()
7669 if (ctrl_info->max_outstanding_requests > in pqi_ctrl_init()
7671 ctrl_info->max_outstanding_requests = in pqi_ctrl_init()
7674 if (ctrl_info->max_outstanding_requests > in pqi_ctrl_init()
7676 ctrl_info->max_outstanding_requests = in pqi_ctrl_init()
7680 pqi_calculate_io_resources(ctrl_info); in pqi_ctrl_init()
7682 rc = pqi_alloc_error_buffer(ctrl_info); in pqi_ctrl_init()
7684 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7694 rc = sis_init_base_struct_addr(ctrl_info); in pqi_ctrl_init()
7696 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7702 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); in pqi_ctrl_init()
7704 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7710 ctrl_info->pqi_mode_enabled = true; in pqi_ctrl_init()
7711 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); in pqi_ctrl_init()
7713 rc = pqi_alloc_admin_queues(ctrl_info); in pqi_ctrl_init()
7715 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7720 rc = pqi_create_admin_queues(ctrl_info); in pqi_ctrl_init()
7722 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7727 rc = pqi_report_device_capability(ctrl_info); in pqi_ctrl_init()
7729 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7734 rc = pqi_validate_device_capability(ctrl_info); in pqi_ctrl_init()
7738 pqi_calculate_queue_resources(ctrl_info); in pqi_ctrl_init()
7740 rc = pqi_enable_msix_interrupts(ctrl_info); in pqi_ctrl_init()
7744 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { in pqi_ctrl_init()
7745 ctrl_info->max_msix_vectors = in pqi_ctrl_init()
7746 ctrl_info->num_msix_vectors_enabled; in pqi_ctrl_init()
7747 pqi_calculate_queue_resources(ctrl_info); in pqi_ctrl_init()
7750 rc = pqi_alloc_io_resources(ctrl_info); in pqi_ctrl_init()
7754 rc = pqi_alloc_operational_queues(ctrl_info); in pqi_ctrl_init()
7756 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7761 pqi_init_operational_queues(ctrl_info); in pqi_ctrl_init()
7763 rc = pqi_create_queues(ctrl_info); in pqi_ctrl_init()
7767 rc = pqi_request_irqs(ctrl_info); in pqi_ctrl_init()
7771 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); in pqi_ctrl_init()
7773 ctrl_info->controller_online = true; in pqi_ctrl_init()
7775 rc = pqi_process_config_table(ctrl_info); in pqi_ctrl_init()
7779 pqi_start_heartbeat_timer(ctrl_info); in pqi_ctrl_init()
7781 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { in pqi_ctrl_init()
7782 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); in pqi_ctrl_init()
7784 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7788 ctrl_info->ciss_report_log_flags |= in pqi_ctrl_init()
7792 rc = pqi_enable_events(ctrl_info); in pqi_ctrl_init()
7794 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7800 rc = pqi_register_scsi(ctrl_info); in pqi_ctrl_init()
7804 rc = pqi_get_ctrl_product_details(ctrl_info); in pqi_ctrl_init()
7806 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7811 rc = pqi_get_ctrl_serial_number(ctrl_info); in pqi_ctrl_init()
7813 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7818 rc = pqi_set_diag_rescan(ctrl_info); in pqi_ctrl_init()
7820 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7825 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); in pqi_ctrl_init()
7827 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7832 pqi_schedule_update_time_worker(ctrl_info); in pqi_ctrl_init()
7834 pqi_scan_scsi_devices(ctrl_info); in pqi_ctrl_init()
7839 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) in pqi_reinit_queues() argument
7845 admin_queues = &ctrl_info->admin_queues; in pqi_reinit_queues()
7850 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_reinit_queues()
7851 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; in pqi_reinit_queues()
7852 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; in pqi_reinit_queues()
7853 ctrl_info->queue_groups[i].oq_ci_copy = 0; in pqi_reinit_queues()
7855 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); in pqi_reinit_queues()
7856 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); in pqi_reinit_queues()
7857 writel(0, ctrl_info->queue_groups[i].oq_pi); in pqi_reinit_queues()
7860 event_queue = &ctrl_info->event_queue; in pqi_reinit_queues()
7865 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_init_resume() argument
7869 rc = pqi_force_sis_mode(ctrl_info); in pqi_ctrl_init_resume()
7877 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); in pqi_ctrl_init_resume()
7885 rc = sis_get_ctrl_properties(ctrl_info); in pqi_ctrl_init_resume()
7887 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7892 rc = sis_get_pqi_capabilities(ctrl_info); in pqi_ctrl_init_resume()
7894 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7904 rc = sis_init_base_struct_addr(ctrl_info); in pqi_ctrl_init_resume()
7906 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7912 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); in pqi_ctrl_init_resume()
7914 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7920 ctrl_info->pqi_mode_enabled = true; in pqi_ctrl_init_resume()
7921 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); in pqi_ctrl_init_resume()
7923 pqi_reinit_queues(ctrl_info); in pqi_ctrl_init_resume()
7925 rc = pqi_create_admin_queues(ctrl_info); in pqi_ctrl_init_resume()
7927 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7932 rc = pqi_create_queues(ctrl_info); in pqi_ctrl_init_resume()
7936 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); in pqi_ctrl_init_resume()
7938 ctrl_info->controller_online = true; in pqi_ctrl_init_resume()
7939 pqi_ctrl_unblock_requests(ctrl_info); in pqi_ctrl_init_resume()
7941 pqi_ctrl_reset_config(ctrl_info); in pqi_ctrl_init_resume()
7943 rc = pqi_process_config_table(ctrl_info); in pqi_ctrl_init_resume()
7947 pqi_start_heartbeat_timer(ctrl_info); in pqi_ctrl_init_resume()
7949 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { in pqi_ctrl_init_resume()
7950 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); in pqi_ctrl_init_resume()
7952 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7956 ctrl_info->ciss_report_log_flags |= in pqi_ctrl_init_resume()
7960 rc = pqi_enable_events(ctrl_info); in pqi_ctrl_init_resume()
7962 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7967 rc = pqi_get_ctrl_product_details(ctrl_info); in pqi_ctrl_init_resume()
7969 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7974 rc = pqi_set_diag_rescan(ctrl_info); in pqi_ctrl_init_resume()
7976 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7981 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); in pqi_ctrl_init_resume()
7983 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7988 if (pqi_ofa_in_progress(ctrl_info)) in pqi_ctrl_init_resume()
7989 pqi_ctrl_unblock_scan(ctrl_info); in pqi_ctrl_init_resume()
7991 pqi_scan_scsi_devices(ctrl_info); in pqi_ctrl_init_resume()
8006 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) in pqi_pci_init() argument
8011 rc = pci_enable_device(ctrl_info->pci_dev); in pqi_pci_init()
8013 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8023 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); in pqi_pci_init()
8025 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); in pqi_pci_init()
8029 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); in pqi_pci_init()
8031 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8036 ctrl_info->iomem_base = ioremap(pci_resource_start( in pqi_pci_init()
8037 ctrl_info->pci_dev, 0), in pqi_pci_init()
8039 if (!ctrl_info->iomem_base) { in pqi_pci_init()
8040 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8049 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, in pqi_pci_init()
8052 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8058 pci_set_master(ctrl_info->pci_dev); in pqi_pci_init()
8060 ctrl_info->registers = ctrl_info->iomem_base; in pqi_pci_init()
8061 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; in pqi_pci_init()
8063 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); in pqi_pci_init()
8068 pci_release_regions(ctrl_info->pci_dev); in pqi_pci_init()
8070 pci_disable_device(ctrl_info->pci_dev); in pqi_pci_init()
8075 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) in pqi_cleanup_pci_init() argument
8077 iounmap(ctrl_info->iomem_base); in pqi_cleanup_pci_init()
8078 pci_release_regions(ctrl_info->pci_dev); in pqi_cleanup_pci_init()
8079 if (pci_is_enabled(ctrl_info->pci_dev)) in pqi_cleanup_pci_init()
8080 pci_disable_device(ctrl_info->pci_dev); in pqi_cleanup_pci_init()
8081 pci_set_drvdata(ctrl_info->pci_dev, NULL); in pqi_cleanup_pci_init()
8086 struct pqi_ctrl_info *ctrl_info; in pqi_alloc_ctrl_info() local
8088 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), in pqi_alloc_ctrl_info()
8090 if (!ctrl_info) in pqi_alloc_ctrl_info()
8093 mutex_init(&ctrl_info->scan_mutex); in pqi_alloc_ctrl_info()
8094 mutex_init(&ctrl_info->lun_reset_mutex); in pqi_alloc_ctrl_info()
8095 mutex_init(&ctrl_info->ofa_mutex); in pqi_alloc_ctrl_info()
8097 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); in pqi_alloc_ctrl_info()
8098 spin_lock_init(&ctrl_info->scsi_device_list_lock); in pqi_alloc_ctrl_info()
8100 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); in pqi_alloc_ctrl_info()
8101 atomic_set(&ctrl_info->num_interrupts, 0); in pqi_alloc_ctrl_info()
8103 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); in pqi_alloc_ctrl_info()
8104 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); in pqi_alloc_ctrl_info()
8106 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); in pqi_alloc_ctrl_info()
8107 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); in pqi_alloc_ctrl_info()
8109 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); in pqi_alloc_ctrl_info()
8110 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); in pqi_alloc_ctrl_info()
8112 sema_init(&ctrl_info->sync_request_sem, in pqi_alloc_ctrl_info()
8114 init_waitqueue_head(&ctrl_info->block_requests_wait); in pqi_alloc_ctrl_info()
8116 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; in pqi_alloc_ctrl_info()
8117 ctrl_info->irq_mode = IRQ_MODE_NONE; in pqi_alloc_ctrl_info()
8118 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; in pqi_alloc_ctrl_info()
8120 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; in pqi_alloc_ctrl_info()
8121 ctrl_info->max_transfer_encrypted_sas_sata = in pqi_alloc_ctrl_info()
8123 ctrl_info->max_transfer_encrypted_nvme = in pqi_alloc_ctrl_info()
8125 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; in pqi_alloc_ctrl_info()
8126 ctrl_info->max_write_raid_1_10_2drive = ~0; in pqi_alloc_ctrl_info()
8127 ctrl_info->max_write_raid_1_10_3drive = ~0; in pqi_alloc_ctrl_info()
8129 return ctrl_info; in pqi_alloc_ctrl_info()
8132 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) in pqi_free_ctrl_info() argument
8134 kfree(ctrl_info); in pqi_free_ctrl_info()
8137 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) in pqi_free_interrupts() argument
8139 pqi_free_irqs(ctrl_info); in pqi_free_interrupts()
8140 pqi_disable_msix_interrupts(ctrl_info); in pqi_free_interrupts()
8143 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) in pqi_free_ctrl_resources() argument
8145 pqi_stop_heartbeat_timer(ctrl_info); in pqi_free_ctrl_resources()
8146 pqi_free_interrupts(ctrl_info); in pqi_free_ctrl_resources()
8147 if (ctrl_info->queue_memory_base) in pqi_free_ctrl_resources()
8148 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8149 ctrl_info->queue_memory_length, in pqi_free_ctrl_resources()
8150 ctrl_info->queue_memory_base, in pqi_free_ctrl_resources()
8151 ctrl_info->queue_memory_base_dma_handle); in pqi_free_ctrl_resources()
8152 if (ctrl_info->admin_queue_memory_base) in pqi_free_ctrl_resources()
8153 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8154 ctrl_info->admin_queue_memory_length, in pqi_free_ctrl_resources()
8155 ctrl_info->admin_queue_memory_base, in pqi_free_ctrl_resources()
8156 ctrl_info->admin_queue_memory_base_dma_handle); in pqi_free_ctrl_resources()
8157 pqi_free_all_io_requests(ctrl_info); in pqi_free_ctrl_resources()
8158 if (ctrl_info->error_buffer) in pqi_free_ctrl_resources()
8159 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8160 ctrl_info->error_buffer_length, in pqi_free_ctrl_resources()
8161 ctrl_info->error_buffer, in pqi_free_ctrl_resources()
8162 ctrl_info->error_buffer_dma_handle); in pqi_free_ctrl_resources()
8163 if (ctrl_info->iomem_base) in pqi_free_ctrl_resources()
8164 pqi_cleanup_pci_init(ctrl_info); in pqi_free_ctrl_resources()
8165 pqi_free_ctrl_info(ctrl_info); in pqi_free_ctrl_resources()
8168 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) in pqi_remove_ctrl() argument
8170 pqi_cancel_rescan_worker(ctrl_info); in pqi_remove_ctrl()
8171 pqi_cancel_update_time_worker(ctrl_info); in pqi_remove_ctrl()
8172 pqi_unregister_scsi(ctrl_info); in pqi_remove_ctrl()
8173 if (ctrl_info->pqi_mode_enabled) in pqi_remove_ctrl()
8174 pqi_revert_to_sis_mode(ctrl_info); in pqi_remove_ctrl()
8175 pqi_free_ctrl_resources(ctrl_info); in pqi_remove_ctrl()
8178 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_ctrl_quiesce() argument
8180 pqi_ctrl_block_scan(ctrl_info); in pqi_ofa_ctrl_quiesce()
8181 pqi_scsi_block_requests(ctrl_info); in pqi_ofa_ctrl_quiesce()
8182 pqi_ctrl_block_device_reset(ctrl_info); in pqi_ofa_ctrl_quiesce()
8183 pqi_ctrl_block_requests(ctrl_info); in pqi_ofa_ctrl_quiesce()
8184 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_ofa_ctrl_quiesce()
8185 pqi_stop_heartbeat_timer(ctrl_info); in pqi_ofa_ctrl_quiesce()
8188 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_ctrl_unquiesce() argument
8190 pqi_start_heartbeat_timer(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8191 pqi_ctrl_unblock_requests(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8192 pqi_ctrl_unblock_device_reset(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8193 pqi_scsi_unblock_requests(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8194 pqi_ctrl_unblock_scan(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8197 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size) in pqi_ofa_alloc_mem() argument
8206 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_alloc_mem()
8212 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL); in pqi_ofa_alloc_mem()
8213 if (!ctrl_info->pqi_ofa_chunk_virt_addr) in pqi_ofa_alloc_mem()
8216 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_alloc_mem()
8219 ctrl_info->pqi_ofa_chunk_virt_addr[i] = in pqi_ofa_alloc_mem()
8221 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) in pqi_ofa_alloc_mem()
8238 ctrl_info->pqi_ofa_chunk_virt_addr[i], in pqi_ofa_alloc_mem()
8241 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); in pqi_ofa_alloc_mem()
8247 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_alloc_host_buffer() argument
8253 if (ctrl_info->ofa_bytes_requested == 0) in pqi_ofa_alloc_host_buffer()
8256 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested); in pqi_ofa_alloc_host_buffer()
8261 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0) in pqi_ofa_alloc_host_buffer()
8270 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_setup_host_buffer() argument
8275 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_setup_host_buffer()
8278 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL); in pqi_ofa_setup_host_buffer()
8282 ctrl_info->pqi_ofa_mem_virt_addr = ofap; in pqi_ofa_setup_host_buffer()
8284 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { in pqi_ofa_setup_host_buffer()
8287 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle); in pqi_ofa_setup_host_buffer()
8288 ctrl_info->pqi_ofa_mem_virt_addr = NULL; in pqi_ofa_setup_host_buffer()
8296 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_free_host_buffer() argument
8304 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_free_host_buffer()
8308 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_free_host_buffer()
8320 ctrl_info->pqi_ofa_chunk_virt_addr[i], in pqi_ofa_free_host_buffer()
8323 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); in pqi_ofa_free_host_buffer()
8327 ctrl_info->pqi_ofa_mem_dma_handle); in pqi_ofa_free_host_buffer()
8328 ctrl_info->pqi_ofa_mem_virt_addr = NULL; in pqi_ofa_free_host_buffer()
8331 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_host_memory_update() argument
8345 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_host_memory_update()
8352 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, in pqi_ofa_host_memory_update()
8358 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_ofa_host_memory_update()
8361 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) in pqi_ofa_ctrl_restart() argument
8365 return pqi_ctrl_init_resume(ctrl_info); in pqi_ofa_ctrl_restart()
8388 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) in pqi_fail_all_outstanding_requests() argument
8394 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_fail_all_outstanding_requests()
8395 io_request = &ctrl_info->io_request_pool[i]; in pqi_fail_all_outstanding_requests()
8413 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) in pqi_take_ctrl_offline_deferred() argument
8416 pqi_stop_heartbeat_timer(ctrl_info); in pqi_take_ctrl_offline_deferred()
8417 pqi_free_interrupts(ctrl_info); in pqi_take_ctrl_offline_deferred()
8418 pqi_cancel_rescan_worker(ctrl_info); in pqi_take_ctrl_offline_deferred()
8419 pqi_cancel_update_time_worker(ctrl_info); in pqi_take_ctrl_offline_deferred()
8420 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_take_ctrl_offline_deferred()
8421 pqi_fail_all_outstanding_requests(ctrl_info); in pqi_take_ctrl_offline_deferred()
8422 pqi_ctrl_unblock_requests(ctrl_info); in pqi_take_ctrl_offline_deferred()
8427 struct pqi_ctrl_info *ctrl_info; in pqi_ctrl_offline_worker() local
8429 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); in pqi_ctrl_offline_worker()
8430 pqi_take_ctrl_offline_deferred(ctrl_info); in pqi_ctrl_offline_worker()
8433 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) in pqi_take_ctrl_offline() argument
8435 if (!ctrl_info->controller_online) in pqi_take_ctrl_offline()
8438 ctrl_info->controller_online = false; in pqi_take_ctrl_offline()
8439 ctrl_info->pqi_mode_enabled = false; in pqi_take_ctrl_offline()
8440 pqi_ctrl_block_requests(ctrl_info); in pqi_take_ctrl_offline()
8442 sis_shutdown_ctrl(ctrl_info); in pqi_take_ctrl_offline()
8443 pci_disable_device(ctrl_info->pci_dev); in pqi_take_ctrl_offline()
8444 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); in pqi_take_ctrl_offline()
8445 schedule_work(&ctrl_info->ctrl_offline_work); in pqi_take_ctrl_offline()
8466 struct pqi_ctrl_info *ctrl_info; in pqi_pci_probe() local
8490 ctrl_info = pqi_alloc_ctrl_info(node); in pqi_pci_probe()
8491 if (!ctrl_info) { in pqi_pci_probe()
8497 ctrl_info->pci_dev = pci_dev; in pqi_pci_probe()
8499 rc = pqi_pci_init(ctrl_info); in pqi_pci_probe()
8503 rc = pqi_ctrl_init(ctrl_info); in pqi_pci_probe()
8510 pqi_remove_ctrl(ctrl_info); in pqi_pci_probe()
8517 struct pqi_ctrl_info *ctrl_info; in pqi_pci_remove() local
8519 ctrl_info = pci_get_drvdata(pci_dev); in pqi_pci_remove()
8520 if (!ctrl_info) in pqi_pci_remove()
8523 pqi_remove_ctrl(ctrl_info); in pqi_pci_remove()
8526 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) in pqi_crash_if_pending_command() argument
8532 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_crash_if_pending_command()
8533 io_request = &ctrl_info->io_request_pool[i]; in pqi_crash_if_pending_command()
8545 struct pqi_ctrl_info *ctrl_info; in pqi_shutdown() local
8547 ctrl_info = pci_get_drvdata(pci_dev); in pqi_shutdown()
8548 if (!ctrl_info) { in pqi_shutdown()
8554 pqi_wait_until_ofa_finished(ctrl_info); in pqi_shutdown()
8556 pqi_scsi_block_requests(ctrl_info); in pqi_shutdown()
8557 pqi_ctrl_block_device_reset(ctrl_info); in pqi_shutdown()
8558 pqi_ctrl_block_requests(ctrl_info); in pqi_shutdown()
8559 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_shutdown()
8565 rc = pqi_flush_cache(ctrl_info, SHUTDOWN); in pqi_shutdown()
8570 pqi_crash_if_pending_command(ctrl_info); in pqi_shutdown()
8571 pqi_reset(ctrl_info); in pqi_shutdown()
8600 struct pqi_ctrl_info *ctrl_info; in pqi_suspend() local
8602 ctrl_info = pci_get_drvdata(pci_dev); in pqi_suspend()
8604 pqi_wait_until_ofa_finished(ctrl_info); in pqi_suspend()
8606 pqi_ctrl_block_scan(ctrl_info); in pqi_suspend()
8607 pqi_scsi_block_requests(ctrl_info); in pqi_suspend()
8608 pqi_ctrl_block_device_reset(ctrl_info); in pqi_suspend()
8609 pqi_ctrl_block_requests(ctrl_info); in pqi_suspend()
8610 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_suspend()
8611 pqi_flush_cache(ctrl_info, SUSPEND); in pqi_suspend()
8612 pqi_stop_heartbeat_timer(ctrl_info); in pqi_suspend()
8614 pqi_crash_if_pending_command(ctrl_info); in pqi_suspend()
8622 ctrl_info->controller_online = false; in pqi_suspend()
8623 ctrl_info->pqi_mode_enabled = false; in pqi_suspend()
8631 struct pqi_ctrl_info *ctrl_info; in pqi_resume() local
8633 ctrl_info = pci_get_drvdata(pci_dev); in pqi_resume()
8636 ctrl_info->max_hw_queue_index = 0; in pqi_resume()
8637 pqi_free_interrupts(ctrl_info); in pqi_resume()
8638 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); in pqi_resume()
8641 &ctrl_info->queue_groups[0]); in pqi_resume()
8643 dev_err(&ctrl_info->pci_dev->dev, in pqi_resume()
8648 pqi_ctrl_unblock_device_reset(ctrl_info); in pqi_resume()
8649 pqi_ctrl_unblock_requests(ctrl_info); in pqi_resume()
8650 pqi_scsi_unblock_requests(ctrl_info); in pqi_resume()
8651 pqi_ctrl_unblock_scan(ctrl_info); in pqi_resume()
8658 pqi_ctrl_unblock_device_reset(ctrl_info); in pqi_resume()
8659 pqi_ctrl_unblock_requests(ctrl_info); in pqi_resume()
8660 pqi_scsi_unblock_requests(ctrl_info); in pqi_resume()
8661 pqi_ctrl_unblock_scan(ctrl_info); in pqi_resume()
8663 return pqi_ctrl_init_resume(ctrl_info); in pqi_resume()