Lines Matching refs:ioa_cfg
604 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_trc_hook() local
607 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; in ipr_trc_hook()
608 trace_entry = &ioa_cfg->trace[trace_index]; in ipr_trc_hook()
612 if (ipr_cmd->ioa_cfg->sis64) in ipr_trc_hook()
635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_lock_and_done() local
637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
665 if (ipr_cmd->ioa_cfg->sis64) { in ipr_reinit_ipr_cmnd()
732 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) in ipr_get_free_ipr_cmnd() argument
735 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); in ipr_get_free_ipr_cmnd()
751 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, in ipr_mask_and_clear_interrupts() argument
758 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_mask_and_clear_interrupts()
759 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
760 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_mask_and_clear_interrupts()
761 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
765 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
766 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
768 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
771 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
772 writel(~0, ioa_cfg->regs.clr_interrupt_reg); in ipr_mask_and_clear_interrupts()
773 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); in ipr_mask_and_clear_interrupts()
774 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_mask_and_clear_interrupts()
784 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) in ipr_save_pcix_cmd_reg() argument
786 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_save_pcix_cmd_reg()
791 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_save_pcix_cmd_reg()
792 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_save_pcix_cmd_reg()
793 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); in ipr_save_pcix_cmd_reg()
797 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; in ipr_save_pcix_cmd_reg()
808 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) in ipr_set_pcix_cmd_reg() argument
810 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_set_pcix_cmd_reg()
813 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_set_pcix_cmd_reg()
814 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_set_pcix_cmd_reg()
815 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); in ipr_set_pcix_cmd_reg()
918 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) in ipr_fail_all_ops() argument
924 for_each_hrrq(hrrq, ioa_cfg) { in ipr_fail_all_ops()
963 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_command() local
966 if (ioa_cfg->sis64) { in ipr_send_command()
974 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
976 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
1049 if (ipr_cmd->ioa_cfg->sis64) { in ipr_init_ioadl()
1086 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_blocking_cmd() local
1091 spin_unlock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1093 spin_lock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) in ipr_get_hrrq_index() argument
1100 if (ioa_cfg->hrrq_num == 1) in ipr_get_hrrq_index()
1103 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); in ipr_get_hrrq_index()
1104 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; in ipr_get_hrrq_index()
1122 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, in ipr_send_hcam() argument
1128 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_send_hcam()
1129 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_send_hcam()
1131 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); in ipr_send_hcam()
1155 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_send_hcam()
1197 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_init_res_entry() local
1209 if (ioa_cfg->sis64) { in ipr_init_res_entry()
1225 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { in ipr_init_res_entry()
1233 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1234 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1235 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1242 res->target = find_first_zero_bit(ioa_cfg->array_ids, in ipr_init_res_entry()
1243 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1244 set_bit(res->target, ioa_cfg->array_ids); in ipr_init_res_entry()
1247 res->target = find_first_zero_bit(ioa_cfg->vset_ids, in ipr_init_res_entry()
1248 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1249 set_bit(res->target, ioa_cfg->vset_ids); in ipr_init_res_entry()
1251 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1252 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1253 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1284 if (res->ioa_cfg->sis64) { in ipr_is_same_device()
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg, in ipr_format_res_path() argument
1339 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); in ipr_format_res_path()
1359 if (res->ioa_cfg->sis64) { in ipr_update_res_entry()
1384 ipr_format_res_path(res->ioa_cfg, in ipr_update_res_entry()
1416 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_clear_res_target() local
1418 if (!ioa_cfg->sis64) in ipr_clear_res_target()
1422 clear_bit(res->target, ioa_cfg->array_ids); in ipr_clear_res_target()
1424 clear_bit(res->target, ioa_cfg->vset_ids); in ipr_clear_res_target()
1426 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) in ipr_clear_res_target()
1429 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1432 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, in ipr_handle_config_change() argument
1452 if (ioa_cfg->sis64) { in ipr_handle_config_change()
1460 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_handle_config_change()
1468 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_handle_config_change()
1469 ipr_send_hcam(ioa_cfg, in ipr_handle_config_change()
1475 res = list_entry(ioa_cfg->free_res_q.next, in ipr_handle_config_change()
1480 list_add_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_handle_config_change()
1489 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1492 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_handle_config_change()
1496 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1499 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); in ipr_handle_config_change()
1514 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_ccn() local
1524 dev_err(&ioa_cfg->pdev->dev, in ipr_process_ccn()
1527 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); in ipr_process_ccn()
1529 ipr_handle_config_change(ioa_cfg, hostrcb); in ipr_process_ccn()
1642 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_enhanced_cache_error() argument
1647 if (ioa_cfg->sis64) in ipr_log_enhanced_cache_error()
1678 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_cache_error() argument
1710 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_enhanced_config_error() argument
1728 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_enhanced_config_error()
1750 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_sis64_config_error() argument
1793 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_config_error() argument
1811 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_config_error()
1840 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_enhanced_array_error() argument
1854 ioa_cfg->host->host_no, in ipr_log_enhanced_array_error()
1875 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_enhanced_array_error()
1876 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_enhanced_array_error()
1891 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_array_error() argument
1905 ioa_cfg->host->host_no, in ipr_log_array_error()
1925 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_array_error()
1926 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_array_error()
1947 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len) in ipr_log_hex_data() argument
1954 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_log_hex_data()
1974 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_enhanced_dual_ioa_error() argument
1979 if (ioa_cfg->sis64) in ipr_log_enhanced_dual_ioa_error()
1990 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_enhanced_dual_ioa_error()
2004 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_dual_ioa_error() argument
2016 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_dual_ioa_error()
2117 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_fabric_path()
2125 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, in ipr_log64_fabric_path()
2267 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2277 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2291 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_fabric_error() argument
2317 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); in ipr_log_fabric_error()
2328 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_sis64_array_error() argument
2343 ipr_format_res_path(ioa_cfg, error->last_res_path, in ipr_log_sis64_array_error()
2365 ipr_format_res_path(ioa_cfg, array_entry->res_path, in ipr_log_sis64_array_error()
2368 ipr_format_res_path(ioa_cfg, in ipr_log_sis64_array_error()
2384 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_sis64_fabric_error() argument
2411 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); in ipr_log_sis64_fabric_error()
2422 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_sis64_service_required_error() argument
2431 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_sis64_service_required_error()
2444 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_generic_error() argument
2447 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, in ipr_log_generic_error()
2459 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_sis64_device_error() argument
2479 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); in ipr_log_sis64_device_error()
2481 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); in ipr_log_sis64_device_error()
2484 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); in ipr_log_sis64_device_error()
2519 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_handle_log_data() argument
2530 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); in ipr_handle_log_data()
2532 if (ioa_cfg->sis64) in ipr_handle_log_data()
2537 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || in ipr_handle_log_data()
2540 scsi_report_bus_reset(ioa_cfg->host, in ipr_handle_log_data()
2554 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_handle_log_data()
2561 ioa_cfg->errors_logged++; in ipr_handle_log_data()
2563 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) in ipr_handle_log_data()
2570 ipr_log_cache_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2573 ipr_log_config_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2577 ipr_log_array_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2580 ipr_log_dual_ioa_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2583 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2586 ipr_log_enhanced_config_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2590 ipr_log_enhanced_array_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2593 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2596 ipr_log_fabric_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2599 ipr_log_sis64_device_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2602 ipr_log_sis64_config_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2606 ipr_log_sis64_array_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2609 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2612 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2617 ipr_log_generic_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2652 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_error() local
2657 if (ioa_cfg->sis64) in ipr_process_error()
2666 ipr_handle_log_data(ioa_cfg, hostrcb); in ipr_process_error()
2668 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); in ipr_process_error()
2671 dev_err(&ioa_cfg->pdev->dev, in ipr_process_error()
2675 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); in ipr_process_error()
2676 schedule_work(&ioa_cfg->work_q); in ipr_process_error()
2677 hostrcb = ipr_get_free_hostrcb(ioa_cfg); in ipr_process_error()
2679 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); in ipr_process_error()
2696 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_timeout() local
2699 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2701 ioa_cfg->errors_logged++; in ipr_timeout()
2702 dev_err(&ioa_cfg->pdev->dev, in ipr_timeout()
2705 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_timeout()
2706 ioa_cfg->sdt_state = GET_DUMP; in ipr_timeout()
2708 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) in ipr_timeout()
2709 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_timeout()
2711 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2729 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_oper_timeout() local
2732 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2734 ioa_cfg->errors_logged++; in ipr_oper_timeout()
2735 dev_err(&ioa_cfg->pdev->dev, in ipr_oper_timeout()
2738 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_oper_timeout()
2739 ioa_cfg->sdt_state = GET_DUMP; in ipr_oper_timeout()
2741 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { in ipr_oper_timeout()
2743 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_oper_timeout()
2744 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_oper_timeout()
2747 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2796 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width) in ipr_get_max_scsi_speed() argument
2803 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_get_max_scsi_speed()
2829 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) in ipr_wait_iodbg_ack() argument
2836 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_wait_iodbg_ack()
2862 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg, in ipr_get_sis64_dump_data_section() argument
2869 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); in ipr_get_sis64_dump_data_section()
2870 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); in ipr_get_sis64_dump_data_section()
2887 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, in ipr_get_ldump_data_section() argument
2894 if (ioa_cfg->sis64) in ipr_get_ldump_data_section()
2895 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr, in ipr_get_ldump_data_section()
2900 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2903 if (ipr_wait_iodbg_ack(ioa_cfg, in ipr_get_ldump_data_section()
2905 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2912 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2915 writel(start_addr, ioa_cfg->ioa_mailbox); in ipr_get_ldump_data_section()
2919 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2923 if (ipr_wait_iodbg_ack(ioa_cfg, in ipr_get_ldump_data_section()
2925 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2931 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); in ipr_get_ldump_data_section()
2938 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2944 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2947 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2951 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2956 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2980 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, in ipr_sdt_copy() argument
2987 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; in ipr_sdt_copy()
2989 if (ioa_cfg->sis64) in ipr_sdt_copy()
3015 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
3016 if (ioa_cfg->sdt_state == ABORT_DUMP) { in ipr_sdt_copy()
3019 rc = ipr_get_ldump_data_section(ioa_cfg, in ipr_sdt_copy()
3024 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
3062 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_ioa_type_data() argument
3065 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_dump_ioa_type_data()
3073 driver_dump->ioa_type_entry.type = ioa_cfg->type; in ipr_dump_ioa_type_data()
3088 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_version_data() argument
3109 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_trace_data() argument
3118 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); in ipr_dump_trace_data()
3130 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_location_data() argument
3139 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); in ipr_dump_location_data()
3151 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) in ipr_get_ioa_dump() argument
3165 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3167 if (ioa_cfg->sdt_state != READ_DUMP) { in ipr_get_ioa_dump()
3168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3172 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3173 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3175 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3178 start_addr = readl(ioa_cfg->ioa_mailbox); in ipr_get_ioa_dump()
3180 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { in ipr_get_ioa_dump()
3181 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3183 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3187 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); in ipr_get_ioa_dump()
3199 ipr_dump_version_data(ioa_cfg, driver_dump); in ipr_get_ioa_dump()
3200 ipr_dump_location_data(ioa_cfg, driver_dump); in ipr_get_ioa_dump()
3201 ipr_dump_ioa_type_data(ioa_cfg, driver_dump); in ipr_get_ioa_dump()
3202 ipr_dump_trace_data(ioa_cfg, driver_dump); in ipr_get_ioa_dump()
3219 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3229 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, in ipr_get_ioa_dump()
3235 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3239 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3251 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3256 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3266 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3284 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word, in ipr_get_ioa_dump()
3297 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); in ipr_get_ioa_dump()
3302 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3307 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0) argument
3320 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; in ipr_release_dump() local
3325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3326 ioa_cfg->dump = NULL; in ipr_release_dump()
3327 ioa_cfg->sdt_state = INACTIVE; in ipr_release_dump()
3328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3343 struct ipr_ioa_cfg *ioa_cfg = in ipr_add_remove_thread() local
3349 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3354 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_add_remove_thread()
3355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3359 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_add_remove_thread()
3365 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_add_remove_thread()
3368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3378 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_add_remove_thread()
3384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3385 scsi_add_device(ioa_cfg->host, bus, target, lun); in ipr_add_remove_thread()
3386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3391 ioa_cfg->scan_done = 1; in ipr_add_remove_thread()
3392 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3393 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); in ipr_add_remove_thread()
3412 struct ipr_ioa_cfg *ioa_cfg = in ipr_worker_thread() local
3416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3418 if (ioa_cfg->sdt_state == READ_DUMP) { in ipr_worker_thread()
3419 dump = ioa_cfg->dump; in ipr_worker_thread()
3421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3425 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3426 ipr_get_ioa_dump(ioa_cfg, dump); in ipr_worker_thread()
3429 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3430 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) in ipr_worker_thread()
3431 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_worker_thread()
3432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3436 if (ioa_cfg->scsi_unblock) { in ipr_worker_thread()
3437 ioa_cfg->scsi_unblock = 0; in ipr_worker_thread()
3438 ioa_cfg->scsi_blocked = 0; in ipr_worker_thread()
3439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3440 scsi_unblock_requests(ioa_cfg->host); in ipr_worker_thread()
3441 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3442 if (ioa_cfg->scsi_blocked) in ipr_worker_thread()
3443 scsi_block_requests(ioa_cfg->host); in ipr_worker_thread()
3446 if (!ioa_cfg->scan_enabled) { in ipr_worker_thread()
3447 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3451 schedule_work(&ioa_cfg->scsi_add_work_q); in ipr_worker_thread()
3453 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3476 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_trace() local
3480 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3481 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, in ipr_read_trace()
3483 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3510 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_version() local
3511 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_show_fw_version()
3515 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3520 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3544 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_log_level() local
3548 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3549 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); in ipr_show_log_level()
3550 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3567 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_log_level() local
3570 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3571 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); in ipr_store_log_level()
3572 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3602 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_diagnostics() local
3609 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3610 while (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3612 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3613 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3616 ioa_cfg->errors_logged = 0; in ipr_store_diagnostics()
3617 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); in ipr_store_diagnostics()
3619 if (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3621 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3626 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3630 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3631 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) in ipr_store_diagnostics()
3633 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3658 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_adapter_state() local
3662 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3663 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_show_adapter_state()
3667 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3687 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_adapter_state() local
3694 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3695 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && in ipr_store_adapter_state()
3697 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_store_adapter_state()
3698 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3699 ioa_cfg->hrrq[i].ioa_is_dead = 0; in ipr_store_adapter_state()
3700 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3703 ioa_cfg->reset_retries = 0; in ipr_store_adapter_state()
3704 ioa_cfg->in_ioa_bringdown = 0; in ipr_store_adapter_state()
3705 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_store_adapter_state()
3707 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3708 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_adapter_state()
3738 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_reset_adapter() local
3745 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3746 if (!ioa_cfg->in_reset_reload) in ipr_store_reset_adapter()
3747 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); in ipr_store_reset_adapter()
3748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3749 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_reset_adapter()
3775 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_iopoll_weight() local
3780 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); in ipr_show_iopoll_weight()
3799 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_iopoll_weight() local
3804 if (!ioa_cfg->sis64) { in ipr_store_iopoll_weight()
3805 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n"); in ipr_store_iopoll_weight()
3812 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n"); in ipr_store_iopoll_weight()
3816 if (user_iopoll_weight == ioa_cfg->iopoll_weight) { in ipr_store_iopoll_weight()
3817 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n"); in ipr_store_iopoll_weight()
3821 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3822 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_store_iopoll_weight()
3823 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
3827 ioa_cfg->iopoll_weight = user_iopoll_weight; in ipr_store_iopoll_weight()
3828 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3829 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_store_iopoll_weight()
3830 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_store_iopoll_weight()
3831 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_store_iopoll_weight()
4030 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, in ipr_update_ioa_ucode() argument
4035 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4036 while (ioa_cfg->in_reset_reload) { in ipr_update_ioa_ucode()
4037 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4038 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
4039 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4042 if (ioa_cfg->ucode_sglist) { in ipr_update_ioa_ucode()
4043 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4044 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
4049 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
4054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4055 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
4060 ioa_cfg->ucode_sglist = sglist; in ipr_update_ioa_ucode()
4061 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); in ipr_update_ioa_ucode()
4062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4063 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
4065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4066 ioa_cfg->ucode_sglist = NULL; in ipr_update_ioa_ucode()
4067 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4087 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_update_fw() local
4105 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { in ipr_store_update_fw()
4106 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); in ipr_store_update_fw()
4117 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); in ipr_store_update_fw()
4125 dev_err(&ioa_cfg->pdev->dev, in ipr_store_update_fw()
4132 result = ipr_update_ioa_ucode(ioa_cfg, sglist); in ipr_store_update_fw()
4162 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_type() local
4166 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4167 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); in ipr_show_fw_type()
4168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4186 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_async_err_log() local
4191 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4192 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, in ipr_read_async_err_log()
4195 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4200 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4210 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_next_async_err_log() local
4214 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4215 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, in ipr_next_async_err_log()
4218 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4223 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_next_async_err_log()
4224 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4269 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_dump() local
4279 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4280 dump = ioa_cfg->dump; in ipr_read_dump()
4282 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { in ipr_read_dump()
4283 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4313 if (ioa_cfg->sis64) in ipr_read_dump()
4359 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) in ipr_alloc_dump() argument
4372 if (ioa_cfg->sis64) in ipr_alloc_dump()
4388 dump->ioa_cfg = ioa_cfg; in ipr_alloc_dump()
4390 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4392 if (INACTIVE != ioa_cfg->sdt_state) { in ipr_alloc_dump()
4393 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4399 ioa_cfg->dump = dump; in ipr_alloc_dump()
4400 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_alloc_dump()
4401 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { in ipr_alloc_dump()
4402 ioa_cfg->dump_taken = 1; in ipr_alloc_dump()
4403 schedule_work(&ioa_cfg->work_q); in ipr_alloc_dump()
4405 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4417 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_dump() argument
4424 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4425 dump = ioa_cfg->dump; in ipr_free_dump()
4427 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4431 ioa_cfg->dump = NULL; in ipr_free_dump()
4432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4458 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_write_dump() local
4465 rc = ipr_alloc_dump(ioa_cfg); in ipr_write_dump()
4467 rc = ipr_free_dump(ioa_cfg); in ipr_write_dump()
4487 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; in ipr_free_dump() argument
4501 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_change_queue_depth() local
4505 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_change_queue_depth()
4510 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_change_queue_depth()
4528 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_adapter_handle() local
4533 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4537 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4562 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_path() local
4568 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4570 if (res && ioa_cfg->sis64) in ipr_show_resource_path()
4575 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, in ipr_show_resource_path()
4578 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4602 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_device_id() local
4607 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4609 if (res && ioa_cfg->sis64) in ipr_show_device_id()
4614 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4638 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_type() local
4643 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4649 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4673 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_raw_mode() local
4678 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4684 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4701 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_store_raw_mode() local
4706 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4719 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4786 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_find_starget() local
4789 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_starget()
4814 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_alloc() local
4820 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4825 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4830 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost); in ipr_target_alloc()
4832 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4833 sata_port->ioa_cfg = ioa_cfg; in ipr_target_alloc()
4845 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4862 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_destroy() local
4864 if (ioa_cfg->sis64) { in ipr_target_destroy()
4867 clear_bit(starget->id, ioa_cfg->array_ids); in ipr_target_destroy()
4869 clear_bit(starget->id, ioa_cfg->vset_ids); in ipr_target_destroy()
4871 clear_bit(starget->id, ioa_cfg->target_ids); in ipr_target_destroy()
4891 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_find_sdev() local
4894 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_sdev()
4914 struct ipr_ioa_cfg *ioa_cfg; in ipr_slave_destroy() local
4917 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_destroy()
4919 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4928 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4942 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_configure() local
4948 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4973 if (ioa_cfg->sis64) in ipr_slave_configure()
4975 ipr_format_res_path(ioa_cfg, in ipr_slave_configure()
4979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
5028 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_alloc() local
5035 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
5047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
5052 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
5117 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, in ipr_wait_for_ops() argument
5131 for_each_hrrq(hrrq, ioa_cfg) { in ipr_wait_for_ops()
5134 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in ipr_wait_for_ops()
5151 for_each_hrrq(hrrq, ioa_cfg) { in ipr_wait_for_ops()
5154 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in ipr_wait_for_ops()
5166 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); in ipr_wait_for_ops()
5179 struct ipr_ioa_cfg *ioa_cfg; in ipr_eh_host_reset() local
5184 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_host_reset()
5185 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5187 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5188 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); in ipr_eh_host_reset()
5189 dev_err(&ioa_cfg->pdev->dev, in ipr_eh_host_reset()
5192 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_eh_host_reset()
5193 ioa_cfg->sdt_state = GET_DUMP; in ipr_eh_host_reset()
5196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5197 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_eh_host_reset()
5198 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5202 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5207 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5226 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, in ipr_device_reset() argument
5236 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_device_reset()
5240 if (ipr_cmd->ioa_cfg->sis64) { in ipr_device_reset()
5259 if (ipr_cmd->ioa_cfg->sis64) in ipr_device_reset()
5285 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_sata_reset() local
5291 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5292 while (ioa_cfg->in_reset_reload) { in ipr_sata_reset()
5293 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5294 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_sata_reset()
5295 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5300 rc = ipr_device_reset(ioa_cfg, res); in ipr_sata_reset()
5302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5304 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res); in ipr_sata_reset()
5306 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5307 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); in ipr_sata_reset()
5308 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5310 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_sata_reset()
5313 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5333 struct ipr_ioa_cfg *ioa_cfg; in __ipr_eh_dev_reset() local
5340 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in __ipr_eh_dev_reset()
5348 if (ioa_cfg->in_reset_reload) in __ipr_eh_dev_reset()
5350 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in __ipr_eh_dev_reset()
5353 for_each_hrrq(hrrq, ioa_cfg) { in __ipr_eh_dev_reset()
5356 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in __ipr_eh_dev_reset()
5382 rc = ipr_device_reset(ioa_cfg, res); in __ipr_eh_dev_reset()
5393 struct ipr_ioa_cfg *ioa_cfg; in ipr_eh_dev_reset() local
5396 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_dev_reset()
5408 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res); in ipr_eh_dev_reset()
5410 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); in ipr_eh_dev_reset()
5427 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_bus_reset_done() local
5431 if (!ioa_cfg->sis64) in ipr_bus_reset_done()
5432 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_bus_reset_done()
5434 scsi_report_bus_reset(ioa_cfg->host, res->bus); in ipr_bus_reset_done()
5467 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_abort_timeout() local
5472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5473 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { in ipr_abort_timeout()
5474 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5479 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_abort_timeout()
5489 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5505 struct ipr_ioa_cfg *ioa_cfg; in ipr_cancel_op() local
5513 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; in ipr_cancel_op()
5520 if (ioa_cfg->in_reset_reload || in ipr_cancel_op()
5521 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_cancel_op()
5531 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_cancel_op()
5536 for_each_hrrq(hrrq, ioa_cfg) { in ipr_cancel_op()
5539 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) { in ipr_cancel_op()
5540 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) { in ipr_cancel_op()
5552 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_cancel_op()
5591 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_scan_finished() local
5595 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) in ipr_scan_finished()
5597 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) in ipr_scan_finished()
5614 struct ipr_ioa_cfg *ioa_cfg; in ipr_eh_abort() local
5618 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in ipr_eh_abort()
5625 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); in ipr_eh_abort()
5638 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, in ipr_handle_other_interrupt() argument
5644 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_handle_other_interrupt()
5651 if (ioa_cfg->sis64) { in ipr_handle_other_interrupt()
5652 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_handle_other_interrupt()
5653 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5657 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); in ipr_handle_other_interrupt()
5658 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5659 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5660 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5661 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5671 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_handle_other_interrupt()
5672 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_handle_other_interrupt()
5674 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5675 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5676 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5678 if (ioa_cfg->clear_isr) { in ipr_handle_other_interrupt()
5680 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5682 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); in ipr_handle_other_interrupt()
5683 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_handle_other_interrupt()
5688 ioa_cfg->ioa_unit_checked = 1; in ipr_handle_other_interrupt()
5690 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5693 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5696 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_handle_other_interrupt()
5697 ioa_cfg->sdt_state = GET_DUMP; in ipr_handle_other_interrupt()
5699 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); in ipr_handle_other_interrupt()
5700 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_handle_other_interrupt()
5714 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number) in ipr_isr_eh() argument
5716 ioa_cfg->errors_logged++; in ipr_isr_eh()
5717 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); in ipr_isr_eh()
5719 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_isr_eh()
5720 ioa_cfg->sdt_state = GET_DUMP; in ipr_isr_eh()
5722 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_isr_eh()
5731 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; in ipr_process_hrrq() local
5747 ipr_isr_eh(ioa_cfg, in ipr_process_hrrq()
5753 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; in ipr_process_hrrq()
5776 struct ipr_ioa_cfg *ioa_cfg; in ipr_iopoll() local
5784 ioa_cfg = hrrq->ioa_cfg; in ipr_iopoll()
5813 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr() local
5833 if (!ioa_cfg->clear_isr) in ipr_isr()
5840 ioa_cfg->regs.clr_interrupt_reg32); in ipr_isr()
5841 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5846 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5850 ipr_isr_eh(ioa_cfg, in ipr_isr()
5859 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); in ipr_isr()
5881 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr_mhrrq() local
5895 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_isr_mhrrq()
5928 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, in ipr_build_ioadl64() argument
5946 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl64()
5980 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, in ipr_build_ioadl() argument
5997 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl()
6111 if (ipr_cmd->ioa_cfg->sis64) in ipr_reinit_ipr_cmnd_for_erp()
6225 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_ioasa() argument
6241 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) in ipr_dump_ioasa()
6249 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { in ipr_dump_ioasa()
6261 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); in ipr_dump_ioasa()
6264 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) in ipr_dump_ioasa()
6266 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) in ipr_dump_ioasa()
6389 if (ipr_cmd->ioa_cfg->sis64) in ipr_get_autosense()
6411 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, in ipr_erp_start() argument
6427 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); in ipr_erp_start()
6466 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); in ipr_erp_start()
6520 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_scsi_done() local
6537 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6539 ipr_erp_start(ioa_cfg, ipr_cmd); in ipr_scsi_done()
6541 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6560 struct ipr_ioa_cfg *ioa_cfg; in ipr_queuecommand() local
6569 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_queuecommand()
6575 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_queuecommand()
6577 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_queuecommand()
6581 hrrq_id = ipr_get_hrrq_index(ioa_cfg); in ipr_queuecommand()
6582 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_queuecommand()
6649 if (ioa_cfg->sis64) in ipr_queuecommand()
6650 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); in ipr_queuecommand()
6652 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); in ipr_queuecommand()
6723 struct ipr_ioa_cfg *ioa_cfg; in ipr_ioa_info() local
6726 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; in ipr_ioa_info()
6729 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); in ipr_ioa_info()
6773 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_ata_phy_reset() local
6777 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6778 while (ioa_cfg->in_reset_reload) { in ipr_ata_phy_reset()
6779 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6780 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_ata_phy_reset()
6781 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6784 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) in ipr_ata_phy_reset()
6787 rc = ipr_device_reset(ioa_cfg, res); in ipr_ata_phy_reset()
6799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6813 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_ata_post_internal() local
6818 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6819 while (ioa_cfg->in_reset_reload) { in ipr_ata_post_internal()
6820 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6821 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_ata_post_internal()
6822 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6825 for_each_hrrq(hrrq, ioa_cfg) { in ipr_ata_post_internal()
6829 ipr_device_reset(ioa_cfg, sata_port->res); in ipr_ata_post_internal()
6835 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6876 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_sata_done() local
6883 if (ipr_cmd->ioa_cfg->sis64) in ipr_sata_done()
6889 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); in ipr_sata_done()
6892 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); in ipr_sata_done()
7005 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_qc_defer() local
7010 hrrq_id = ipr_get_hrrq_index(ioa_cfg); in ipr_qc_defer()
7011 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_qc_defer()
7048 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_qc_issue() local
7072 if (ioa_cfg->sis64) { in ipr_qc_issue()
7090 if (ioa_cfg->sis64) in ipr_qc_issue()
7203 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) in ipr_invalid_adapter() argument
7207 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { in ipr_invalid_adapter()
7216 #define ipr_invalid_adapter(ioa_cfg) 0 argument
7231 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_bringdown_done() local
7235 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_ioa_bringdown_done()
7237 ioa_cfg->scsi_unblock = 1; in ipr_ioa_bringdown_done()
7238 schedule_work(&ioa_cfg->work_q); in ipr_ioa_bringdown_done()
7241 ioa_cfg->in_reset_reload = 0; in ipr_ioa_bringdown_done()
7242 ioa_cfg->reset_retries = 0; in ipr_ioa_bringdown_done()
7243 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_ioa_bringdown_done()
7244 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
7245 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_ioa_bringdown_done()
7246 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
7251 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_bringdown_done()
7270 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_reset_done() local
7275 ioa_cfg->in_reset_reload = 0; in ipr_ioa_reset_done()
7276 for (j = 0; j < ioa_cfg->hrrq_num; j++) { in ipr_ioa_reset_done()
7277 spin_lock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7278 ioa_cfg->hrrq[j].allow_cmds = 1; in ipr_ioa_reset_done()
7279 spin_unlock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7282 ioa_cfg->reset_cmd = NULL; in ipr_ioa_reset_done()
7283 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; in ipr_ioa_reset_done()
7285 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_ioa_reset_done()
7291 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
7294 list_del_init(&ioa_cfg->hostrcb[j]->queue); in ipr_ioa_reset_done()
7296 ipr_send_hcam(ioa_cfg, in ipr_ioa_reset_done()
7298 ioa_cfg->hostrcb[j]); in ipr_ioa_reset_done()
7300 ipr_send_hcam(ioa_cfg, in ipr_ioa_reset_done()
7302 ioa_cfg->hostrcb[j]); in ipr_ioa_reset_done()
7305 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); in ipr_ioa_reset_done()
7306 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); in ipr_ioa_reset_done()
7308 ioa_cfg->reset_retries = 0; in ipr_ioa_reset_done()
7310 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_reset_done()
7312 ioa_cfg->scsi_unblock = 1; in ipr_ioa_reset_done()
7313 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
7348 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_set_supported_devs() local
7349 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; in ipr_set_supported_devs()
7355 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { in ipr_set_supported_devs()
7372 ioa_cfg->vpd_cbs_dma + in ipr_set_supported_devs()
7380 if (!ioa_cfg->sis64) in ipr_set_supported_devs()
7439 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg, in ipr_check_term_power() argument
7456 dev_err(&ioa_cfg->pdev->dev, in ipr_check_term_power()
7476 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) in ipr_scsi_bus_speed_limit() argument
7482 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i, in ipr_scsi_bus_speed_limit()
7483 ioa_cfg->bus_attr[i].bus_width); in ipr_scsi_bus_speed_limit()
7485 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) in ipr_scsi_bus_speed_limit()
7486 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; in ipr_scsi_bus_speed_limit()
7500 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, in ipr_modify_ioafp_mode_page_28() argument
7518 dev_err(&ioa_cfg->pdev->dev, in ipr_modify_ioafp_mode_page_28()
7524 bus_attr = &ioa_cfg->bus_attr[i]; in ipr_modify_ioafp_mode_page_28()
7575 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page28() local
7576 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page28()
7580 ipr_scsi_bus_speed_limit(ioa_cfg); in ipr_ioafp_mode_select_page28()
7581 ipr_check_term_power(ioa_cfg, mode_pages); in ipr_ioafp_mode_select_page28()
7582 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); in ipr_ioafp_mode_select_page28()
7587 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page28()
7591 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_ioafp_mode_select_page28()
7636 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cmd_failed() local
7639 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_cmd_failed()
7643 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_reset_cmd_failed()
7660 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_mode_sense_failed() local
7665 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_reset_mode_sense_failed()
7685 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page28() local
7689 0x28, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page28()
7713 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page24() local
7714 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page24()
7729 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page24()
7773 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page24() local
7777 0x24, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page24()
7804 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_init_res_table() local
7811 if (ioa_cfg->sis64) in ipr_init_res_table()
7812 flag = ioa_cfg->u.cfg_table64->hdr64.flags; in ipr_init_res_table()
7814 flag = ioa_cfg->u.cfg_table->hdr.flags; in ipr_init_res_table()
7817 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); in ipr_init_res_table()
7819 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) in ipr_init_res_table()
7822 if (ioa_cfg->sis64) in ipr_init_res_table()
7823 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); in ipr_init_res_table()
7825 entries = ioa_cfg->u.cfg_table->hdr.num_entries; in ipr_init_res_table()
7828 if (ioa_cfg->sis64) in ipr_init_res_table()
7829 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; in ipr_init_res_table()
7831 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; in ipr_init_res_table()
7836 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7843 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_init_res_table()
7844 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); in ipr_init_res_table()
7849 res = list_entry(ioa_cfg->free_res_q.next, in ipr_init_res_table()
7851 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7865 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7871 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_init_res_table()
7874 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_init_res_table()
7895 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_query_ioa_cfg() local
7897 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_ioafp_query_ioa_cfg()
7898 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_query_ioa_cfg()
7902 ioa_cfg->dual_raid = 1; in ipr_ioafp_query_ioa_cfg()
7903 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", in ipr_ioafp_query_ioa_cfg()
7910 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; in ipr_ioafp_query_ioa_cfg()
7911 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; in ipr_ioafp_query_ioa_cfg()
7912 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; in ipr_ioafp_query_ioa_cfg()
7914 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, in ipr_ioafp_query_ioa_cfg()
7956 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_set_caching_parameters() local
7957 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_set_caching_parameters()
8044 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_pageC4_inquiry() local
8045 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_pageC4_inquiry()
8046 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_pageC4_inquiry()
8054 (ioa_cfg->vpd_cbs_dma in ipr_ioafp_pageC4_inquiry()
8077 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_cap_inquiry() local
8078 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_cap_inquiry()
8079 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_cap_inquiry()
8087 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), in ipr_ioafp_cap_inquiry()
8108 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page3_inquiry() local
8115 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), in ipr_ioafp_page3_inquiry()
8134 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page0_inquiry() local
8140 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); in ipr_ioafp_page0_inquiry()
8142 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); in ipr_ioafp_page0_inquiry()
8144 if (ipr_invalid_adapter(ioa_cfg)) { in ipr_ioafp_page0_inquiry()
8145 dev_err(&ioa_cfg->pdev->dev, in ipr_ioafp_page0_inquiry()
8149 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_ioafp_page0_inquiry()
8150 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_ioafp_page0_inquiry()
8152 &ioa_cfg->hrrq->hrrq_free_q); in ipr_ioafp_page0_inquiry()
8160 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), in ipr_ioafp_page0_inquiry()
8178 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_std_inquiry() local
8184 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), in ipr_ioafp_std_inquiry()
8203 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_identify_hrrq() local
8209 if (ioa_cfg->identify_hrrq_index == 0) in ipr_ioafp_identify_hrrq()
8210 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); in ipr_ioafp_identify_hrrq()
8212 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { in ipr_ioafp_identify_hrrq()
8213 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; in ipr_ioafp_identify_hrrq()
8219 if (ioa_cfg->sis64) in ipr_ioafp_identify_hrrq()
8222 if (ioa_cfg->nvectors == 1) in ipr_ioafp_identify_hrrq()
8242 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
8244 if (ioa_cfg->sis64) { in ipr_ioafp_identify_hrrq()
8257 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
8262 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) in ipr_ioafp_identify_hrrq()
8289 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_timer_done() local
8292 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
8294 if (ioa_cfg->reset_cmd == ipr_cmd) { in ipr_reset_timer_done()
8299 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
8336 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) in ipr_init_ioa_mem() argument
8340 for_each_hrrq(hrrq, ioa_cfg) { in ipr_init_ioa_mem()
8353 ioa_cfg->identify_hrrq_index = 0; in ipr_init_ioa_mem()
8354 if (ioa_cfg->hrrq_num == 1) in ipr_init_ioa_mem()
8355 atomic_set(&ioa_cfg->hrrq_index, 0); in ipr_init_ioa_mem()
8357 atomic_set(&ioa_cfg->hrrq_index, 1); in ipr_init_ioa_mem()
8360 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); in ipr_init_ioa_mem()
8375 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_next_stage() local
8378 feedback = readl(ioa_cfg->regs.init_feedback_reg); in ipr_reset_next_stage()
8393 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
8394 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
8395 stage_time = ioa_cfg->transop_timeout; in ipr_reset_next_stage()
8398 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_next_stage()
8403 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
8404 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
8431 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_enable_ioa() local
8438 ipr_init_ioa_mem(ioa_cfg); in ipr_reset_enable_ioa()
8440 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_enable_ioa()
8441 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8442 ioa_cfg->hrrq[i].allow_interrupts = 1; in ipr_reset_enable_ioa()
8443 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8445 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8447 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
8448 int_reg = readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
8451 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_enable_ioa()
8455 ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
8456 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
8461 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_enable_ioa()
8463 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8466 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); in ipr_reset_enable_ioa()
8468 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
8470 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
8472 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); in ipr_reset_enable_ioa()
8474 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8479 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); in ipr_reset_enable_ioa()
8501 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_for_dump() local
8503 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_reset_wait_for_dump()
8504 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_reset_wait_for_dump()
8505 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_reset_wait_for_dump()
8506 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_reset_wait_for_dump()
8508 ioa_cfg->dump_timeout = 1; in ipr_reset_wait_for_dump()
8524 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg) in ipr_unit_check_no_data() argument
8526 ioa_cfg->errors_logged++; in ipr_unit_check_no_data()
8527 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); in ipr_unit_check_no_data()
8540 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) in ipr_get_unit_check_buffer() argument
8548 mailbox = readl(ioa_cfg->ioa_mailbox); in ipr_get_unit_check_buffer()
8550 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { in ipr_get_unit_check_buffer()
8551 ipr_unit_check_no_data(ioa_cfg); in ipr_get_unit_check_buffer()
8556 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, in ipr_get_unit_check_buffer()
8562 ipr_unit_check_no_data(ioa_cfg); in ipr_get_unit_check_buffer()
8574 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, in ipr_get_unit_check_buffer()
8579 rc = ipr_get_ldump_data_section(ioa_cfg, in ipr_get_unit_check_buffer()
8585 ipr_handle_log_data(ioa_cfg, hostrcb); in ipr_get_unit_check_buffer()
8588 ioa_cfg->sdt_state == GET_DUMP) in ipr_get_unit_check_buffer()
8589 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_get_unit_check_buffer()
8591 ipr_unit_check_no_data(ioa_cfg); in ipr_get_unit_check_buffer()
8593 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_get_unit_check_buffer()
8607 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_get_unit_check_job() local
8610 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_get_unit_check_job()
8611 ipr_get_unit_check_buffer(ioa_cfg); in ipr_reset_get_unit_check_job()
8621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_dump_mailbox_wait() local
8625 if (ioa_cfg->sdt_state != GET_DUMP) in ipr_dump_mailbox_wait()
8628 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left || in ipr_dump_mailbox_wait()
8629 (readl(ioa_cfg->regs.sense_interrupt_reg) & in ipr_dump_mailbox_wait()
8633 dev_err(&ioa_cfg->pdev->dev, in ipr_dump_mailbox_wait()
8636 ioa_cfg->sdt_state = READ_DUMP; in ipr_dump_mailbox_wait()
8637 ioa_cfg->dump_timeout = 0; in ipr_dump_mailbox_wait()
8638 if (ioa_cfg->sis64) in ipr_dump_mailbox_wait()
8643 schedule_work(&ioa_cfg->work_q); in ipr_dump_mailbox_wait()
8668 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_restore_cfg_space() local
8672 ioa_cfg->pdev->state_saved = true; in ipr_reset_restore_cfg_space()
8673 pci_restore_state(ioa_cfg->pdev); in ipr_reset_restore_cfg_space()
8675 if (ipr_set_pcix_cmd_reg(ioa_cfg)) { in ipr_reset_restore_cfg_space()
8680 ipr_fail_all_ops(ioa_cfg); in ipr_reset_restore_cfg_space()
8682 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
8684 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
8685 int_reg = readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
8688 if (ioa_cfg->ioa_unit_checked) { in ipr_reset_restore_cfg_space()
8689 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
8694 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_restore_cfg_space()
8695 ipr_get_unit_check_buffer(ioa_cfg); in ipr_reset_restore_cfg_space()
8702 if (ioa_cfg->in_ioa_bringdown) { in ipr_reset_restore_cfg_space()
8704 } else if (ioa_cfg->sdt_state == GET_DUMP) { in ipr_reset_restore_cfg_space()
8726 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_bist_done() local
8729 if (ioa_cfg->cfg_locked) in ipr_reset_bist_done()
8730 pci_cfg_access_unlock(ioa_cfg->pdev); in ipr_reset_bist_done()
8731 ioa_cfg->cfg_locked = 0; in ipr_reset_bist_done()
8748 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_start_bist() local
8752 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) in ipr_reset_start_bist()
8754 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_start_bist()
8756 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); in ipr_reset_start_bist()
8763 if (ioa_cfg->cfg_locked) in ipr_reset_start_bist()
8764 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); in ipr_reset_start_bist()
8765 ioa_cfg->cfg_locked = 0; in ipr_reset_start_bist()
8802 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_reset_work() local
8803 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_reset_reset_work()
8811 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8812 if (ioa_cfg->reset_cmd == ipr_cmd) in ipr_reset_reset_work()
8814 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8829 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_slot_reset() local
8833 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); in ipr_reset_slot_reset()
8850 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_block_config_access_wait() local
8853 if (pci_cfg_access_trylock(ioa_cfg->pdev)) { in ipr_reset_block_config_access_wait()
8854 ioa_cfg->cfg_locked = 1; in ipr_reset_block_config_access_wait()
8855 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8863 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8864 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_block_config_access_wait()
8883 ipr_cmd->ioa_cfg->cfg_locked = 0; in ipr_reset_block_config_access()
8896 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg) in ipr_reset_allowed() argument
8900 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_reset_allowed()
8921 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_to_start_bist() local
8924 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { in ipr_reset_wait_to_start_bist()
8949 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_alert() local
8954 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); in ipr_reset_alert()
8957 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); in ipr_reset_alert()
8958 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_alert()
8982 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_quiesce_done() local
8986 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); in ipr_reset_quiesce_done()
9003 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam_done() local
9012 for_each_hrrq(hrrq, ioa_cfg) { in ipr_reset_cancel_hcam_done()
9016 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_reset_cancel_hcam_done()
9042 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam() local
9046 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; in ipr_reset_cancel_hcam()
9052 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { in ipr_reset_cancel_hcam()
9098 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download_done() local
9099 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download_done()
9101 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, in ipr_reset_ucode_download_done()
9120 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download() local
9121 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download()
9137 if (ioa_cfg->sis64) in ipr_reset_ucode_download()
9163 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_shutdown_ioa() local
9172 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_reset_shutdown_ioa()
9182 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_reset_shutdown_ioa()
9210 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ioa_job() local
9215 if (ioa_cfg->reset_cmd != ipr_cmd) { in ipr_reset_ioa_job()
9251 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, in _ipr_initiate_ioa_reset() argument
9258 ioa_cfg->in_reset_reload = 1; in _ipr_initiate_ioa_reset()
9259 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in _ipr_initiate_ioa_reset()
9260 spin_lock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
9261 ioa_cfg->hrrq[i].allow_cmds = 0; in _ipr_initiate_ioa_reset()
9262 spin_unlock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
9265 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in _ipr_initiate_ioa_reset()
9266 ioa_cfg->scsi_unblock = 0; in _ipr_initiate_ioa_reset()
9267 ioa_cfg->scsi_blocked = 1; in _ipr_initiate_ioa_reset()
9268 scsi_block_requests(ioa_cfg->host); in _ipr_initiate_ioa_reset()
9271 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in _ipr_initiate_ioa_reset()
9272 ioa_cfg->reset_cmd = ipr_cmd; in _ipr_initiate_ioa_reset()
9291 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, in ipr_initiate_ioa_reset() argument
9296 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_initiate_ioa_reset()
9299 if (ioa_cfg->in_reset_reload) { in ipr_initiate_ioa_reset()
9300 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_initiate_ioa_reset()
9301 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_initiate_ioa_reset()
9302 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_initiate_ioa_reset()
9303 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_reset()
9306 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { in ipr_initiate_ioa_reset()
9307 dev_err(&ioa_cfg->pdev->dev, in ipr_initiate_ioa_reset()
9310 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_reset()
9311 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_initiate_ioa_reset()
9312 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
9313 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_initiate_ioa_reset()
9314 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
9318 if (ioa_cfg->in_ioa_bringdown) { in ipr_initiate_ioa_reset()
9319 ioa_cfg->reset_cmd = NULL; in ipr_initiate_ioa_reset()
9320 ioa_cfg->in_reset_reload = 0; in ipr_initiate_ioa_reset()
9321 ipr_fail_all_ops(ioa_cfg); in ipr_initiate_ioa_reset()
9322 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_initiate_ioa_reset()
9324 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_initiate_ioa_reset()
9325 ioa_cfg->scsi_unblock = 1; in ipr_initiate_ioa_reset()
9326 schedule_work(&ioa_cfg->work_q); in ipr_initiate_ioa_reset()
9330 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_reset()
9335 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa, in ipr_initiate_ioa_reset()
9349 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_freeze() local
9353 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_freeze()
9354 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
9355 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_reset_freeze()
9356 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
9374 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_pci_mmio_enabled() local
9376 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
9377 if (!ioa_cfg->probe_done) in ipr_pci_mmio_enabled()
9379 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
9394 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_pci_frozen() local
9396 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
9397 if (ioa_cfg->probe_done) in ipr_pci_frozen()
9398 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); in ipr_pci_frozen()
9399 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
9413 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_pci_slot_reset() local
9415 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
9416 if (ioa_cfg->probe_done) { in ipr_pci_slot_reset()
9417 if (ioa_cfg->needs_warm_reset) in ipr_pci_slot_reset()
9418 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_pci_slot_reset()
9420 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, in ipr_pci_slot_reset()
9423 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_slot_reset()
9424 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
9438 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_pci_perm_failure() local
9441 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
9442 if (ioa_cfg->probe_done) { in ipr_pci_perm_failure()
9443 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_pci_perm_failure()
9444 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_pci_perm_failure()
9445 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; in ipr_pci_perm_failure()
9446 ioa_cfg->in_ioa_bringdown = 1; in ipr_pci_perm_failure()
9447 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_pci_perm_failure()
9448 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9449 ioa_cfg->hrrq[i].allow_cmds = 0; in ipr_pci_perm_failure()
9450 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9453 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_pci_perm_failure()
9455 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_perm_failure()
9456 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
9497 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) in ipr_probe_ioa_part2() argument
9503 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
9504 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); in ipr_probe_ioa_part2()
9505 ioa_cfg->probe_done = 1; in ipr_probe_ioa_part2()
9506 if (ioa_cfg->needs_hard_reset) { in ipr_probe_ioa_part2()
9507 ioa_cfg->needs_hard_reset = 0; in ipr_probe_ioa_part2()
9508 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_probe_ioa_part2()
9510 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, in ipr_probe_ioa_part2()
9512 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
9525 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_cmd_blks() argument
9529 if (ioa_cfg->ipr_cmnd_list) { in ipr_free_cmd_blks()
9531 if (ioa_cfg->ipr_cmnd_list[i]) in ipr_free_cmd_blks()
9532 dma_pool_free(ioa_cfg->ipr_cmd_pool, in ipr_free_cmd_blks()
9533 ioa_cfg->ipr_cmnd_list[i], in ipr_free_cmd_blks()
9534 ioa_cfg->ipr_cmnd_list_dma[i]); in ipr_free_cmd_blks()
9536 ioa_cfg->ipr_cmnd_list[i] = NULL; in ipr_free_cmd_blks()
9540 if (ioa_cfg->ipr_cmd_pool) in ipr_free_cmd_blks()
9541 dma_pool_destroy(ioa_cfg->ipr_cmd_pool); in ipr_free_cmd_blks()
9543 kfree(ioa_cfg->ipr_cmnd_list); in ipr_free_cmd_blks()
9544 kfree(ioa_cfg->ipr_cmnd_list_dma); in ipr_free_cmd_blks()
9545 ioa_cfg->ipr_cmnd_list = NULL; in ipr_free_cmd_blks()
9546 ioa_cfg->ipr_cmnd_list_dma = NULL; in ipr_free_cmd_blks()
9547 ioa_cfg->ipr_cmd_pool = NULL; in ipr_free_cmd_blks()
9557 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_mem() argument
9561 kfree(ioa_cfg->res_entries); in ipr_free_mem()
9562 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), in ipr_free_mem()
9563 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_free_mem()
9564 ipr_free_cmd_blks(ioa_cfg); in ipr_free_mem()
9566 for (i = 0; i < ioa_cfg->hrrq_num; i++) in ipr_free_mem()
9567 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
9568 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_free_mem()
9569 ioa_cfg->hrrq[i].host_rrq, in ipr_free_mem()
9570 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_free_mem()
9572 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, in ipr_free_mem()
9573 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_free_mem()
9576 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
9578 ioa_cfg->hostrcb[i], in ipr_free_mem()
9579 ioa_cfg->hostrcb_dma[i]); in ipr_free_mem()
9582 ipr_free_dump(ioa_cfg); in ipr_free_mem()
9583 kfree(ioa_cfg->trace); in ipr_free_mem()
9596 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_irqs() argument
9598 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_irqs()
9601 for (i = 0; i < ioa_cfg->nvectors; i++) in ipr_free_irqs()
9602 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); in ipr_free_irqs()
9616 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_all_resources() argument
9618 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_all_resources()
9621 ipr_free_irqs(ioa_cfg); in ipr_free_all_resources()
9622 if (ioa_cfg->reset_work_q) in ipr_free_all_resources()
9623 destroy_workqueue(ioa_cfg->reset_work_q); in ipr_free_all_resources()
9624 iounmap(ioa_cfg->hdw_dma_regs); in ipr_free_all_resources()
9626 ipr_free_mem(ioa_cfg); in ipr_free_all_resources()
9627 scsi_host_put(ioa_cfg->host); in ipr_free_all_resources()
9639 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) in ipr_alloc_cmd_blks() argument
9646 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, in ipr_alloc_cmd_blks()
9649 if (!ioa_cfg->ipr_cmd_pool) in ipr_alloc_cmd_blks()
9652 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); in ipr_alloc_cmd_blks()
9653 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); in ipr_alloc_cmd_blks()
9655 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { in ipr_alloc_cmd_blks()
9656 ipr_free_cmd_blks(ioa_cfg); in ipr_alloc_cmd_blks()
9660 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_cmd_blks()
9661 if (ioa_cfg->hrrq_num > 1) { in ipr_alloc_cmd_blks()
9664 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9665 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9670 (ioa_cfg->hrrq_num - 1); in ipr_alloc_cmd_blks()
9671 ioa_cfg->hrrq[i].min_cmd_id = in ipr_alloc_cmd_blks()
9674 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9680 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9681 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9683 ioa_cfg->hrrq[i].size = entries_each_hrrq; in ipr_alloc_cmd_blks()
9686 BUG_ON(ioa_cfg->hrrq_num == 0); in ipr_alloc_cmd_blks()
9689 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; in ipr_alloc_cmd_blks()
9691 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; in ipr_alloc_cmd_blks()
9692 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; in ipr_alloc_cmd_blks()
9696 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool, in ipr_alloc_cmd_blks()
9700 ipr_free_cmd_blks(ioa_cfg); in ipr_alloc_cmd_blks()
9704 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; in ipr_alloc_cmd_blks()
9705 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; in ipr_alloc_cmd_blks()
9709 if (ioa_cfg->sis64) in ipr_alloc_cmd_blks()
9715 if (ioa_cfg->sis64) { in ipr_alloc_cmd_blks()
9729 ipr_cmd->ioa_cfg = ioa_cfg; in ipr_alloc_cmd_blks()
9734 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_alloc_cmd_blks()
9736 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) in ipr_alloc_cmd_blks()
9750 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) in ipr_alloc_mem() argument
9752 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_alloc_mem()
9756 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported, in ipr_alloc_mem()
9760 if (!ioa_cfg->res_entries) in ipr_alloc_mem()
9763 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { in ipr_alloc_mem()
9764 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); in ipr_alloc_mem()
9765 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9768 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9770 &ioa_cfg->vpd_cbs_dma, in ipr_alloc_mem()
9773 if (!ioa_cfg->vpd_cbs) in ipr_alloc_mem()
9776 if (ipr_alloc_cmd_blks(ioa_cfg)) in ipr_alloc_mem()
9779 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9780 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9781 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9782 &ioa_cfg->hrrq[i].host_rrq_dma, in ipr_alloc_mem()
9785 if (!ioa_cfg->hrrq[i].host_rrq) { in ipr_alloc_mem()
9788 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9789 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9790 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9793 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9796 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9797 ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9798 &ioa_cfg->cfg_table_dma, in ipr_alloc_mem()
9801 if (!ioa_cfg->u.cfg_table) in ipr_alloc_mem()
9805 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9807 &ioa_cfg->hostrcb_dma[i], in ipr_alloc_mem()
9810 if (!ioa_cfg->hostrcb[i]) in ipr_alloc_mem()
9813 ioa_cfg->hostrcb[i]->hostrcb_dma = in ipr_alloc_mem()
9814 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); in ipr_alloc_mem()
9815 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9816 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); in ipr_alloc_mem()
9819 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES, in ipr_alloc_mem()
9823 if (!ioa_cfg->trace) in ipr_alloc_mem()
9834 ioa_cfg->hostrcb[i], in ipr_alloc_mem()
9835 ioa_cfg->hostrcb_dma[i]); in ipr_alloc_mem()
9837 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9838 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_alloc_mem()
9840 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9842 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9843 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9844 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9847 ipr_free_cmd_blks(ioa_cfg); in ipr_alloc_mem()
9850 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_alloc_mem()
9852 kfree(ioa_cfg->res_entries); in ipr_alloc_mem()
9863 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) in ipr_initialize_bus_attr() argument
9868 ioa_cfg->bus_attr[i].bus = i; in ipr_initialize_bus_attr()
9869 ioa_cfg->bus_attr[i].qas_enabled = 0; in ipr_initialize_bus_attr()
9870 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; in ipr_initialize_bus_attr()
9872 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; in ipr_initialize_bus_attr()
9874 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; in ipr_initialize_bus_attr()
9885 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg) in ipr_init_regs() argument
9891 p = &ioa_cfg->chip_cfg->regs; in ipr_init_regs()
9892 t = &ioa_cfg->regs; in ipr_init_regs()
9893 base = ioa_cfg->hdw_dma_regs; in ipr_init_regs()
9912 if (ioa_cfg->sis64) { in ipr_init_regs()
9929 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, in ipr_init_ioa_cfg() argument
9934 ioa_cfg->host = host; in ipr_init_ioa_cfg()
9935 ioa_cfg->pdev = pdev; in ipr_init_ioa_cfg()
9936 ioa_cfg->log_level = ipr_log_level; in ipr_init_ioa_cfg()
9937 ioa_cfg->doorbell = IPR_DOORBELL; in ipr_init_ioa_cfg()
9938 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); in ipr_init_ioa_cfg()
9939 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); in ipr_init_ioa_cfg()
9940 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); in ipr_init_ioa_cfg()
9941 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); in ipr_init_ioa_cfg()
9942 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); in ipr_init_ioa_cfg()
9943 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); in ipr_init_ioa_cfg()
9945 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); in ipr_init_ioa_cfg()
9946 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); in ipr_init_ioa_cfg()
9947 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q); in ipr_init_ioa_cfg()
9948 INIT_LIST_HEAD(&ioa_cfg->free_res_q); in ipr_init_ioa_cfg()
9949 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in ipr_init_ioa_cfg()
9950 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); in ipr_init_ioa_cfg()
9951 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); in ipr_init_ioa_cfg()
9952 init_waitqueue_head(&ioa_cfg->reset_wait_q); in ipr_init_ioa_cfg()
9953 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_init_ioa_cfg()
9954 init_waitqueue_head(&ioa_cfg->eeh_wait_q); in ipr_init_ioa_cfg()
9955 ioa_cfg->sdt_state = INACTIVE; in ipr_init_ioa_cfg()
9957 ipr_initialize_bus_attr(ioa_cfg); in ipr_init_ioa_cfg()
9958 ioa_cfg->max_devs_supported = ipr_max_devs; in ipr_init_ioa_cfg()
9960 if (ioa_cfg->sis64) { in ipr_init_ioa_cfg()
9964 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; in ipr_init_ioa_cfg()
9965 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) in ipr_init_ioa_cfg()
9967 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9972 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; in ipr_init_ioa_cfg()
9973 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) in ipr_init_ioa_cfg()
9975 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9981 host->can_queue = ioa_cfg->max_cmds; in ipr_init_ioa_cfg()
9982 pci_set_drvdata(pdev, ioa_cfg); in ipr_init_ioa_cfg()
9984 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { in ipr_init_ioa_cfg()
9985 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); in ipr_init_ioa_cfg()
9986 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); in ipr_init_ioa_cfg()
9987 spin_lock_init(&ioa_cfg->hrrq[i]._lock); in ipr_init_ioa_cfg()
9989 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; in ipr_init_ioa_cfg()
9991 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; in ipr_init_ioa_cfg()
10022 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg) in ipr_wait_for_pci_err_recovery() argument
10024 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_wait_for_pci_err_recovery()
10027 wait_event_timeout(ioa_cfg->eeh_wait_q, in ipr_wait_for_pci_err_recovery()
10034 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) in name_msi_vectors() argument
10036 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; in name_msi_vectors()
10038 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { in name_msi_vectors()
10039 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, in name_msi_vectors()
10040 "host%d-%d", ioa_cfg->host->host_no, vec_idx); in name_msi_vectors()
10041 ioa_cfg->vectors_info[vec_idx]. in name_msi_vectors()
10042 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; in name_msi_vectors()
10046 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg, in ipr_request_other_msi_irqs() argument
10051 for (i = 1; i < ioa_cfg->nvectors; i++) { in ipr_request_other_msi_irqs()
10055 ioa_cfg->vectors_info[i].desc, in ipr_request_other_msi_irqs()
10056 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
10060 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
10079 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; in ipr_test_intr() local
10083 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); in ipr_test_intr()
10084 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
10086 ioa_cfg->msi_received = 1; in ipr_test_intr()
10087 wake_up(&ioa_cfg->msi_wait_q); in ipr_test_intr()
10089 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
10104 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) in ipr_test_msi() argument
10113 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10114 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_test_msi()
10115 ioa_cfg->msi_received = 0; in ipr_test_msi()
10116 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); in ipr_test_msi()
10117 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_test_msi()
10118 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_test_msi()
10119 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10121 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); in ipr_test_msi()
10128 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); in ipr_test_msi()
10129 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_test_msi()
10130 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); in ipr_test_msi()
10131 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10132 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); in ipr_test_msi()
10134 if (!ioa_cfg->msi_received) { in ipr_test_msi()
10141 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10143 free_irq(irq, ioa_cfg); in ipr_test_msi()
10160 struct ipr_ioa_cfg *ioa_cfg; in ipr_probe_ioa() local
10172 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); in ipr_probe_ioa()
10180 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; in ipr_probe_ioa()
10181 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); in ipr_probe_ioa()
10182 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops); in ipr_probe_ioa()
10184 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); in ipr_probe_ioa()
10186 if (!ioa_cfg->ipr_chip) { in ipr_probe_ioa()
10193 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; in ipr_probe_ioa()
10194 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; in ipr_probe_ioa()
10195 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; in ipr_probe_ioa()
10196 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; in ipr_probe_ioa()
10199 ioa_cfg->transop_timeout = ipr_transop_timeout; in ipr_probe_ioa()
10201 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
10203 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
10205 ioa_cfg->revid = pdev->revision; in ipr_probe_ioa()
10207 ipr_init_ioa_cfg(ioa_cfg, host, pdev); in ipr_probe_ioa()
10222 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10228 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10242 ioa_cfg->hdw_dma_regs = ipr_regs; in ipr_probe_ioa()
10243 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; in ipr_probe_ioa()
10244 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; in ipr_probe_ioa()
10246 ipr_init_regs(ioa_cfg); in ipr_probe_ioa()
10248 if (ioa_cfg->sis64) { in ipr_probe_ioa()
10264 ioa_cfg->chip_cfg->cache_line_size); in ipr_probe_ioa()
10268 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10274 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_probe_ioa()
10275 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10284 if (ioa_cfg->ipr_chip->has_msi) in ipr_probe_ioa()
10288 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10291 ioa_cfg->nvectors = rc; in ipr_probe_ioa()
10294 ioa_cfg->clear_isr = 1; in ipr_probe_ioa()
10299 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10308 rc = ipr_test_msi(ioa_cfg, pdev); in ipr_probe_ioa()
10312 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors, in ipr_probe_ioa()
10316 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10319 ioa_cfg->nvectors = 1; in ipr_probe_ioa()
10320 ioa_cfg->clear_isr = 1; in ipr_probe_ioa()
10327 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, in ipr_probe_ioa()
10331 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) in ipr_probe_ioa()
10334 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) in ipr_probe_ioa()
10337 rc = ipr_alloc_mem(ioa_cfg); in ipr_probe_ioa()
10357 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_probe_ioa()
10358 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_probe_ioa()
10359 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_probe_ioa()
10361 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
10363 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
10365 ioa_cfg->ioa_unit_checked = 1; in ipr_probe_ioa()
10367 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
10368 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); in ipr_probe_ioa()
10369 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
10372 name_msi_vectors(ioa_cfg); in ipr_probe_ioa()
10374 ioa_cfg->vectors_info[0].desc, in ipr_probe_ioa()
10375 &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10377 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev); in ipr_probe_ioa()
10381 IPR_NAME, &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10390 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { in ipr_probe_ioa()
10391 ioa_cfg->needs_warm_reset = 1; in ipr_probe_ioa()
10392 ioa_cfg->reset = ipr_reset_slot_reset; in ipr_probe_ioa()
10394 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", in ipr_probe_ioa()
10397 if (!ioa_cfg->reset_work_q) { in ipr_probe_ioa()
10403 ioa_cfg->reset = ipr_reset_start_bist; in ipr_probe_ioa()
10406 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); in ipr_probe_ioa()
10414 ipr_free_irqs(ioa_cfg); in ipr_probe_ioa()
10416 ipr_free_mem(ioa_cfg); in ipr_probe_ioa()
10418 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10445 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg, in ipr_initiate_ioa_bringdown() argument
10449 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_initiate_ioa_bringdown()
10450 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_bringdown()
10451 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_bringdown()
10452 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_bringdown()
10453 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); in ipr_initiate_ioa_bringdown()
10469 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in __ipr_remove() local
10474 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10475 while (ioa_cfg->in_reset_reload) { in __ipr_remove()
10476 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10477 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
10478 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10481 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in __ipr_remove()
10482 spin_lock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10483 ioa_cfg->hrrq[i].removing_ioa = 1; in __ipr_remove()
10484 spin_unlock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10487 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); in __ipr_remove()
10489 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10490 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
10491 flush_work(&ioa_cfg->work_q); in __ipr_remove()
10492 if (ioa_cfg->reset_work_q) in __ipr_remove()
10493 flush_workqueue(ioa_cfg->reset_work_q); in __ipr_remove()
10494 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in __ipr_remove()
10495 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10498 list_del(&ioa_cfg->queue); in __ipr_remove()
10501 if (ioa_cfg->sdt_state == ABORT_DUMP) in __ipr_remove()
10502 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in __ipr_remove()
10503 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10505 ipr_free_all_resources(ioa_cfg); in __ipr_remove()
10521 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_remove() local
10525 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10527 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10529 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10531 scsi_remove_host(ioa_cfg->host); in ipr_remove()
10546 struct ipr_ioa_cfg *ioa_cfg; in ipr_probe() local
10555 ioa_cfg = pci_get_drvdata(pdev); in ipr_probe()
10556 rc = ipr_probe_ioa_part2(ioa_cfg); in ipr_probe()
10563 rc = scsi_add_host(ioa_cfg->host, &pdev->dev); in ipr_probe()
10570 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10574 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10579 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10583 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10585 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10587 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10592 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10596 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10598 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10600 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10604 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_probe()
10605 ioa_cfg->scan_enabled = 1; in ipr_probe()
10606 schedule_work(&ioa_cfg->work_q); in ipr_probe()
10607 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_probe()
10609 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; in ipr_probe()
10611 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_probe()
10612 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_probe()
10613 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_probe()
10614 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_probe()
10618 scsi_scan_host(ioa_cfg->host); in ipr_probe()
10635 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_shutdown() local
10640 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10641 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_shutdown()
10642 ioa_cfg->iopoll_weight = 0; in ipr_shutdown()
10643 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_shutdown()
10644 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_shutdown()
10647 while (ioa_cfg->in_reset_reload) { in ipr_shutdown()
10648 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10649 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
10650 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10653 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) in ipr_shutdown()
10656 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type); in ipr_shutdown()
10657 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10658 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
10659 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { in ipr_shutdown()
10660 ipr_free_irqs(ioa_cfg); in ipr_shutdown()
10661 pci_disable_device(ioa_cfg->pdev); in ipr_shutdown()
10814 struct ipr_ioa_cfg *ioa_cfg; in ipr_halt() local
10822 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { in ipr_halt()
10823 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_halt()
10824 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || in ipr_halt()
10825 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { in ipr_halt()
10826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()
10830 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_halt()
10837 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()