Lines Matching +full:ld +full:- +full:pulse +full:- +full:delay +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ipr.c -- driver for IBM Power Linux RAID adapters
5 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
17 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
18 * PCI-X Dual Channel Ultra 320 SCSI Adapter
19 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
23 * - Ultra 320 SCSI controller
24 * - PCI-X host interface
25 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
26 * - Non-Volatile Write Cache
27 * - Supports attachment of non-RAID disks, tape, and optical devices
28 * - RAID Levels 0, 5, 10
29 * - Hot spare
30 * - Background Parity Checking
31 * - Background Data Scrubbing
32 * - Ability to increase the capacity of an existing RAID 5 disk array
36 * - Tagged command queuing
37 * - Adapter microcode download
38 * - PCI hot plug
39 * - SCSI device hot plug
51 #include <linux/delay.h>
93 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
191 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
194 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2…
196 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 …_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
379 "9073: Invalid multi-adapter configuration"},
401 "Illegal request, command not allowed to a non-optimized resource"},
551 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
552 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
557 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
558 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
578 * ipr_trc_hook - Add a trace entry to the driver trace
590 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_trc_hook()
593 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; in ipr_trc_hook()
594 trace_entry = &ioa_cfg->trace[trace_index]; in ipr_trc_hook()
595 trace_entry->time = jiffies; in ipr_trc_hook()
596 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; in ipr_trc_hook()
597 trace_entry->type = type; in ipr_trc_hook()
598 if (ipr_cmd->ioa_cfg->sis64) in ipr_trc_hook()
599 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command; in ipr_trc_hook()
601 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command; in ipr_trc_hook()
602 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; in ipr_trc_hook()
603 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; in ipr_trc_hook()
604 trace_entry->u.add_data = add_data; in ipr_trc_hook()
612 * ipr_lock_and_done - Acquire lock and complete command
621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_lock_and_done()
623 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
624 ipr_cmd->done(ipr_cmd); in ipr_lock_and_done()
625 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
629 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
637 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_reinit_ipr_cmnd()
638 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_reinit_ipr_cmnd()
639 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; in ipr_reinit_ipr_cmnd()
640 dma_addr_t dma_addr = ipr_cmd->dma_addr; in ipr_reinit_ipr_cmnd()
643 hrrq_id = ioarcb->cmd_pkt.hrrq_id; in ipr_reinit_ipr_cmnd()
644 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); in ipr_reinit_ipr_cmnd()
645 ioarcb->cmd_pkt.hrrq_id = hrrq_id; in ipr_reinit_ipr_cmnd()
646 ioarcb->data_transfer_length = 0; in ipr_reinit_ipr_cmnd()
647 ioarcb->read_data_transfer_length = 0; in ipr_reinit_ipr_cmnd()
648 ioarcb->ioadl_len = 0; in ipr_reinit_ipr_cmnd()
649 ioarcb->read_ioadl_len = 0; in ipr_reinit_ipr_cmnd()
651 if (ipr_cmd->ioa_cfg->sis64) { in ipr_reinit_ipr_cmnd()
652 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_reinit_ipr_cmnd()
654 ioasa64->u.gata.status = 0; in ipr_reinit_ipr_cmnd()
656 ioarcb->write_ioadl_addr = in ipr_reinit_ipr_cmnd()
658 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_reinit_ipr_cmnd()
659 ioasa->u.gata.status = 0; in ipr_reinit_ipr_cmnd()
662 ioasa->hdr.ioasc = 0; in ipr_reinit_ipr_cmnd()
663 ioasa->hdr.residual_data_len = 0; in ipr_reinit_ipr_cmnd()
664 ipr_cmd->scsi_cmd = NULL; in ipr_reinit_ipr_cmnd()
665 ipr_cmd->qc = NULL; in ipr_reinit_ipr_cmnd()
666 ipr_cmd->sense_buffer[0] = 0; in ipr_reinit_ipr_cmnd()
667 ipr_cmd->dma_use_sg = 0; in ipr_reinit_ipr_cmnd()
671 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
673 * @fast_done: fast done function call-back
682 ipr_cmd->u.scratch = 0; in ipr_init_ipr_cmnd()
683 ipr_cmd->sibling = NULL; in ipr_init_ipr_cmnd()
684 ipr_cmd->eh_comp = NULL; in ipr_init_ipr_cmnd()
685 ipr_cmd->fast_done = fast_done; in ipr_init_ipr_cmnd()
686 timer_setup(&ipr_cmd->timer, NULL, 0); in ipr_init_ipr_cmnd()
690 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
701 if (likely(!list_empty(&hrrq->hrrq_free_q))) { in __ipr_get_free_ipr_cmnd()
702 ipr_cmd = list_entry(hrrq->hrrq_free_q.next, in __ipr_get_free_ipr_cmnd()
704 list_del(&ipr_cmd->queue); in __ipr_get_free_ipr_cmnd()
712 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
722 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); in ipr_get_free_ipr_cmnd()
728 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
744 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_mask_and_clear_interrupts()
745 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
746 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_mask_and_clear_interrupts()
747 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
751 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
752 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
754 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
757 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
758 writel(~0, ioa_cfg->regs.clr_interrupt_reg); in ipr_mask_and_clear_interrupts()
759 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); in ipr_mask_and_clear_interrupts()
760 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_mask_and_clear_interrupts()
764 * ipr_save_pcix_cmd_reg - Save PCI-X command register
768 * 0 on success / -EIO on failure
772 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_save_pcix_cmd_reg()
777 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_save_pcix_cmd_reg()
778 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_save_pcix_cmd_reg()
779 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); in ipr_save_pcix_cmd_reg()
780 return -EIO; in ipr_save_pcix_cmd_reg()
783 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; in ipr_save_pcix_cmd_reg()
788 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792 * 0 on success / -EIO on failure
796 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_set_pcix_cmd_reg()
799 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_set_pcix_cmd_reg()
800 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_set_pcix_cmd_reg()
801 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); in ipr_set_pcix_cmd_reg()
802 return -EIO; in ipr_set_pcix_cmd_reg()
810 * __ipr_sata_eh_done - done function for aborted SATA commands
821 struct ata_queued_cmd *qc = ipr_cmd->qc; in __ipr_sata_eh_done()
822 struct ipr_sata_port *sata_port = qc->ap->private_data; in __ipr_sata_eh_done()
824 qc->err_mask |= AC_ERR_OTHER; in __ipr_sata_eh_done()
825 sata_port->ioasa.status |= ATA_BUSY; in __ipr_sata_eh_done()
827 if (ipr_cmd->eh_comp) in __ipr_sata_eh_done()
828 complete(ipr_cmd->eh_comp); in __ipr_sata_eh_done()
829 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in __ipr_sata_eh_done()
833 * ipr_sata_eh_done - done function for aborted SATA commands
844 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_sata_eh_done()
847 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_sata_eh_done()
849 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_sata_eh_done()
853 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
857 * ops generated by the SCSI mid-layer which are being aborted.
864 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in __ipr_scsi_eh_done()
866 scsi_cmd->result |= (DID_ERROR << 16); in __ipr_scsi_eh_done()
868 scsi_dma_unmap(ipr_cmd->scsi_cmd); in __ipr_scsi_eh_done()
870 if (ipr_cmd->eh_comp) in __ipr_scsi_eh_done()
871 complete(ipr_cmd->eh_comp); in __ipr_scsi_eh_done()
872 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in __ipr_scsi_eh_done()
876 * ipr_scsi_eh_done - mid-layer done function for aborted ops
880 * ops generated by the SCSI mid-layer which are being aborted.
888 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_scsi_eh_done()
890 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_scsi_eh_done()
892 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_scsi_eh_done()
896 * ipr_fail_all_ops - Fails all outstanding ops.
911 spin_lock(&hrrq->_lock); in ipr_fail_all_ops()
913 temp, &hrrq->hrrq_pending_q, queue) { in ipr_fail_all_ops()
914 list_del(&ipr_cmd->queue); in ipr_fail_all_ops()
916 ipr_cmd->s.ioasa.hdr.ioasc = in ipr_fail_all_ops()
918 ipr_cmd->s.ioasa.hdr.ilid = in ipr_fail_all_ops()
921 if (ipr_cmd->scsi_cmd) in ipr_fail_all_ops()
922 ipr_cmd->done = __ipr_scsi_eh_done; in ipr_fail_all_ops()
923 else if (ipr_cmd->qc) in ipr_fail_all_ops()
924 ipr_cmd->done = __ipr_sata_eh_done; in ipr_fail_all_ops()
928 del_timer(&ipr_cmd->timer); in ipr_fail_all_ops()
929 ipr_cmd->done(ipr_cmd); in ipr_fail_all_ops()
931 spin_unlock(&hrrq->_lock); in ipr_fail_all_ops()
937 * ipr_send_command - Send driver initiated requests.
949 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_command()
950 dma_addr_t send_dma_addr = ipr_cmd->dma_addr; in ipr_send_command()
952 if (ioa_cfg->sis64) { in ipr_send_command()
958 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) in ipr_send_command()
960 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
962 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
966 * ipr_do_req - Send driver initiated requests.
982 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_do_req()
984 ipr_cmd->done = done; in ipr_do_req()
986 ipr_cmd->timer.expires = jiffies + timeout; in ipr_do_req()
987 ipr_cmd->timer.function = timeout_func; in ipr_do_req()
989 add_timer(&ipr_cmd->timer); in ipr_do_req()
997 * ipr_internal_cmd_done - Op done function for an internally generated op.
1008 if (ipr_cmd->sibling) in ipr_internal_cmd_done()
1009 ipr_cmd->sibling = NULL; in ipr_internal_cmd_done()
1011 complete(&ipr_cmd->completion); in ipr_internal_cmd_done()
1015 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_init_ioadl()
1031 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_init_ioadl()
1033 ipr_cmd->dma_use_sg = 1; in ipr_init_ioadl()
1035 if (ipr_cmd->ioa_cfg->sis64) { in ipr_init_ioadl()
1036 ioadl64->flags = cpu_to_be32(flags); in ipr_init_ioadl()
1037 ioadl64->data_len = cpu_to_be32(len); in ipr_init_ioadl()
1038 ioadl64->address = cpu_to_be64(dma_addr); in ipr_init_ioadl()
1040 ipr_cmd->ioarcb.ioadl_len = in ipr_init_ioadl()
1042 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
1044 ioadl->flags_and_data_len = cpu_to_be32(flags | len); in ipr_init_ioadl()
1045 ioadl->address = cpu_to_be32(dma_addr); in ipr_init_ioadl()
1048 ipr_cmd->ioarcb.read_ioadl_len = in ipr_init_ioadl()
1050 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
1052 ipr_cmd->ioarcb.ioadl_len = in ipr_init_ioadl()
1054 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
1060 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1072 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_blocking_cmd()
1074 init_completion(&ipr_cmd->completion); in ipr_send_blocking_cmd()
1077 spin_unlock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1078 wait_for_completion(&ipr_cmd->completion); in ipr_send_blocking_cmd()
1079 spin_lock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1086 if (ioa_cfg->hrrq_num == 1) in ipr_get_hrrq_index()
1089 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); in ipr_get_hrrq_index()
1090 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; in ipr_get_hrrq_index()
1096 * ipr_send_hcam - Send an HCAM to the adapter.
1114 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_send_hcam()
1116 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_send_hcam()
1117 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); in ipr_send_hcam()
1119 ipr_cmd->u.hostrcb = hostrcb; in ipr_send_hcam()
1120 ioarcb = &ipr_cmd->ioarcb; in ipr_send_hcam()
1122 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_send_hcam()
1123 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; in ipr_send_hcam()
1124 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; in ipr_send_hcam()
1125 ioarcb->cmd_pkt.cdb[1] = type; in ipr_send_hcam()
1126 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; in ipr_send_hcam()
1127 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; in ipr_send_hcam()
1129 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, in ipr_send_hcam()
1130 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); in ipr_send_hcam()
1133 ipr_cmd->done = ipr_process_ccn; in ipr_send_hcam()
1135 ipr_cmd->done = ipr_process_error; in ipr_send_hcam()
1141 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_send_hcam()
1146 * ipr_update_ata_class - Update the ata class in the resource entry
1158 res->ata_class = ATA_DEV_ATA; in ipr_update_ata_class()
1162 res->ata_class = ATA_DEV_ATAPI; in ipr_update_ata_class()
1165 res->ata_class = ATA_DEV_UNKNOWN; in ipr_update_ata_class()
1171 * ipr_init_res_entry - Initialize a resource entry struct.
1183 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_init_res_entry()
1186 res->needs_sync_complete = 0; in ipr_init_res_entry()
1187 res->in_erp = 0; in ipr_init_res_entry()
1188 res->add_to_ml = 0; in ipr_init_res_entry()
1189 res->del_from_ml = 0; in ipr_init_res_entry()
1190 res->resetting_device = 0; in ipr_init_res_entry()
1191 res->reset_occurred = 0; in ipr_init_res_entry()
1192 res->sdev = NULL; in ipr_init_res_entry()
1193 res->sata_port = NULL; in ipr_init_res_entry()
1195 if (ioa_cfg->sis64) { in ipr_init_res_entry()
1196 proto = cfgtew->u.cfgte64->proto; in ipr_init_res_entry()
1197 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); in ipr_init_res_entry()
1198 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); in ipr_init_res_entry()
1199 res->qmodel = IPR_QUEUEING_MODEL64(res); in ipr_init_res_entry()
1200 res->type = cfgtew->u.cfgte64->res_type; in ipr_init_res_entry()
1202 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_init_res_entry()
1203 sizeof(res->res_path)); in ipr_init_res_entry()
1205 res->bus = 0; in ipr_init_res_entry()
1206 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_init_res_entry()
1207 sizeof(res->dev_lun.scsi_lun)); in ipr_init_res_entry()
1208 res->lun = scsilun_to_int(&res->dev_lun); in ipr_init_res_entry()
1210 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { in ipr_init_res_entry()
1211 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { in ipr_init_res_entry()
1212 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { in ipr_init_res_entry()
1214 res->target = gscsi_res->target; in ipr_init_res_entry()
1219 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1220 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1221 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1223 } else if (res->type == IPR_RES_TYPE_IOAFP) { in ipr_init_res_entry()
1224 res->bus = IPR_IOAFP_VIRTUAL_BUS; in ipr_init_res_entry()
1225 res->target = 0; in ipr_init_res_entry()
1226 } else if (res->type == IPR_RES_TYPE_ARRAY) { in ipr_init_res_entry()
1227 res->bus = IPR_ARRAY_VIRTUAL_BUS; in ipr_init_res_entry()
1228 res->target = find_first_zero_bit(ioa_cfg->array_ids, in ipr_init_res_entry()
1229 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1230 set_bit(res->target, ioa_cfg->array_ids); in ipr_init_res_entry()
1231 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { in ipr_init_res_entry()
1232 res->bus = IPR_VSET_VIRTUAL_BUS; in ipr_init_res_entry()
1233 res->target = find_first_zero_bit(ioa_cfg->vset_ids, in ipr_init_res_entry()
1234 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1235 set_bit(res->target, ioa_cfg->vset_ids); in ipr_init_res_entry()
1237 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1238 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1239 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1242 proto = cfgtew->u.cfgte->proto; in ipr_init_res_entry()
1243 res->qmodel = IPR_QUEUEING_MODEL(res); in ipr_init_res_entry()
1244 res->flags = cfgtew->u.cfgte->flags; in ipr_init_res_entry()
1245 if (res->flags & IPR_IS_IOA_RESOURCE) in ipr_init_res_entry()
1246 res->type = IPR_RES_TYPE_IOAFP; in ipr_init_res_entry()
1248 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; in ipr_init_res_entry()
1250 res->bus = cfgtew->u.cfgte->res_addr.bus; in ipr_init_res_entry()
1251 res->target = cfgtew->u.cfgte->res_addr.target; in ipr_init_res_entry()
1252 res->lun = cfgtew->u.cfgte->res_addr.lun; in ipr_init_res_entry()
1253 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); in ipr_init_res_entry()
1260 * ipr_is_same_device - Determine if two devices are the same.
1270 if (res->ioa_cfg->sis64) { in ipr_is_same_device()
1271 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, in ipr_is_same_device()
1272 sizeof(cfgtew->u.cfgte64->dev_id)) && in ipr_is_same_device()
1273 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_is_same_device()
1274 sizeof(cfgtew->u.cfgte64->lun))) { in ipr_is_same_device()
1278 if (res->bus == cfgtew->u.cfgte->res_addr.bus && in ipr_is_same_device()
1279 res->target == cfgtew->u.cfgte->res_addr.target && in ipr_is_same_device()
1280 res->lun == cfgtew->u.cfgte->res_addr.lun) in ipr_is_same_device()
1288 * __ipr_format_res_path - Format the resource path for printing.
1302 p += scnprintf(p, buffer + len - p, "%02X", res_path[0]); in __ipr_format_res_path()
1304 p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]); in __ipr_format_res_path()
1310 * ipr_format_res_path - Format the resource path for printing.
1325 p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); in ipr_format_res_path()
1326 __ipr_format_res_path(res_path, p, len - (p - buffer)); in ipr_format_res_path()
1331 * ipr_update_res_entry - Update the resource entry.
1345 if (res->ioa_cfg->sis64) { in ipr_update_res_entry()
1346 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); in ipr_update_res_entry()
1347 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); in ipr_update_res_entry()
1348 res->type = cfgtew->u.cfgte64->res_type; in ipr_update_res_entry()
1350 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, in ipr_update_res_entry()
1353 res->qmodel = IPR_QUEUEING_MODEL64(res); in ipr_update_res_entry()
1354 proto = cfgtew->u.cfgte64->proto; in ipr_update_res_entry()
1355 res->res_handle = cfgtew->u.cfgte64->res_handle; in ipr_update_res_entry()
1356 res->dev_id = cfgtew->u.cfgte64->dev_id; in ipr_update_res_entry()
1358 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_update_res_entry()
1359 sizeof(res->dev_lun.scsi_lun)); in ipr_update_res_entry()
1361 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_update_res_entry()
1362 sizeof(res->res_path))) { in ipr_update_res_entry()
1363 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_update_res_entry()
1364 sizeof(res->res_path)); in ipr_update_res_entry()
1368 if (res->sdev && new_path) in ipr_update_res_entry()
1369 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", in ipr_update_res_entry()
1370 ipr_format_res_path(res->ioa_cfg, in ipr_update_res_entry()
1371 res->res_path, buffer, sizeof(buffer))); in ipr_update_res_entry()
1373 res->flags = cfgtew->u.cfgte->flags; in ipr_update_res_entry()
1374 if (res->flags & IPR_IS_IOA_RESOURCE) in ipr_update_res_entry()
1375 res->type = IPR_RES_TYPE_IOAFP; in ipr_update_res_entry()
1377 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; in ipr_update_res_entry()
1379 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, in ipr_update_res_entry()
1382 res->qmodel = IPR_QUEUEING_MODEL(res); in ipr_update_res_entry()
1383 proto = cfgtew->u.cfgte->proto; in ipr_update_res_entry()
1384 res->res_handle = cfgtew->u.cfgte->res_handle; in ipr_update_res_entry()
1391 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1401 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_clear_res_target()
1403 if (!ioa_cfg->sis64) in ipr_clear_res_target()
1406 if (res->bus == IPR_ARRAY_VIRTUAL_BUS) in ipr_clear_res_target()
1407 clear_bit(res->target, ioa_cfg->array_ids); in ipr_clear_res_target()
1408 else if (res->bus == IPR_VSET_VIRTUAL_BUS) in ipr_clear_res_target()
1409 clear_bit(res->target, ioa_cfg->vset_ids); in ipr_clear_res_target()
1410 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { in ipr_clear_res_target()
1411 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) in ipr_clear_res_target()
1412 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) in ipr_clear_res_target()
1414 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1416 } else if (res->bus == 0) in ipr_clear_res_target()
1417 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1421 * ipr_handle_config_change - Handle a config change from the adapter
1437 if (ioa_cfg->sis64) { in ipr_handle_config_change()
1438 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; in ipr_handle_config_change()
1439 cc_res_handle = cfgtew.u.cfgte64->res_handle; in ipr_handle_config_change()
1441 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; in ipr_handle_config_change()
1442 cc_res_handle = cfgtew.u.cfgte->res_handle; in ipr_handle_config_change()
1445 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_handle_config_change()
1446 if (res->res_handle == cc_res_handle) { in ipr_handle_config_change()
1453 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_handle_config_change()
1460 res = list_entry(ioa_cfg->free_res_q.next, in ipr_handle_config_change()
1463 list_del(&res->queue); in ipr_handle_config_change()
1465 list_add_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_handle_config_change()
1470 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { in ipr_handle_config_change()
1471 if (res->sdev) { in ipr_handle_config_change()
1472 res->del_from_ml = 1; in ipr_handle_config_change()
1473 res->res_handle = IPR_INVALID_RES_HANDLE; in ipr_handle_config_change()
1474 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1477 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_handle_config_change()
1479 } else if (!res->sdev || res->del_from_ml) { in ipr_handle_config_change()
1480 res->add_to_ml = 1; in ipr_handle_config_change()
1481 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1488 * ipr_process_ccn - Op done function for a CCN.
1499 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_ccn()
1500 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; in ipr_process_ccn()
1501 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_ccn()
1503 list_del_init(&hostrcb->queue); in ipr_process_ccn()
1504 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_ccn()
1509 dev_err(&ioa_cfg->pdev->dev, in ipr_process_ccn()
1519 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1532 i--; in strip_and_pad_whitespace()
1539 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1553 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); in ipr_log_vpd_compact()
1554 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer); in ipr_log_vpd_compact()
1556 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN); in ipr_log_vpd_compact()
1557 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer); in ipr_log_vpd_compact()
1559 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN); in ipr_log_vpd_compact()
1566 * ipr_log_vpd - Log the passed VPD to the error log.
1577 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); in ipr_log_vpd()
1578 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, in ipr_log_vpd()
1583 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); in ipr_log_vpd()
1589 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1600 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); in ipr_log_ext_vpd_compact()
1602 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); in ipr_log_ext_vpd_compact()
1606 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1614 ipr_log_vpd(&vpd->vpd); in ipr_log_ext_vpd()
1615 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), in ipr_log_ext_vpd()
1616 be32_to_cpu(vpd->wwid[1])); in ipr_log_ext_vpd()
1620 * ipr_log_enhanced_cache_error - Log a cache error.
1632 if (ioa_cfg->sis64) in ipr_log_enhanced_cache_error()
1633 error = &hostrcb->hcam.u.error64.u.type_12_error; in ipr_log_enhanced_cache_error()
1635 error = &hostrcb->hcam.u.error.u.type_12_error; in ipr_log_enhanced_cache_error()
1637 ipr_err("-----Current Configuration-----\n"); in ipr_log_enhanced_cache_error()
1639 ipr_log_ext_vpd(&error->ioa_vpd); in ipr_log_enhanced_cache_error()
1641 ipr_log_ext_vpd(&error->cfc_vpd); in ipr_log_enhanced_cache_error()
1643 ipr_err("-----Expected Configuration-----\n"); in ipr_log_enhanced_cache_error()
1645 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); in ipr_log_enhanced_cache_error()
1647 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); in ipr_log_enhanced_cache_error()
1650 be32_to_cpu(error->ioa_data[0]), in ipr_log_enhanced_cache_error()
1651 be32_to_cpu(error->ioa_data[1]), in ipr_log_enhanced_cache_error()
1652 be32_to_cpu(error->ioa_data[2])); in ipr_log_enhanced_cache_error()
1656 * ipr_log_cache_error - Log a cache error.
1667 &hostrcb->hcam.u.error.u.type_02_error; in ipr_log_cache_error()
1669 ipr_err("-----Current Configuration-----\n"); in ipr_log_cache_error()
1671 ipr_log_vpd(&error->ioa_vpd); in ipr_log_cache_error()
1673 ipr_log_vpd(&error->cfc_vpd); in ipr_log_cache_error()
1675 ipr_err("-----Expected Configuration-----\n"); in ipr_log_cache_error()
1677 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); in ipr_log_cache_error()
1679 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); in ipr_log_cache_error()
1682 be32_to_cpu(error->ioa_data[0]), in ipr_log_cache_error()
1683 be32_to_cpu(error->ioa_data[1]), in ipr_log_cache_error()
1684 be32_to_cpu(error->ioa_data[2])); in ipr_log_cache_error()
1688 * ipr_log_enhanced_config_error - Log a configuration error.
1702 error = &hostrcb->hcam.u.error.u.type_13_error; in ipr_log_enhanced_config_error()
1703 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_enhanced_config_error()
1706 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_enhanced_config_error()
1708 dev_entry = error->dev; in ipr_log_enhanced_config_error()
1713 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_enhanced_config_error()
1714 ipr_log_ext_vpd(&dev_entry->vpd); in ipr_log_enhanced_config_error()
1716 ipr_err("-----New Device Information-----\n"); in ipr_log_enhanced_config_error()
1717 ipr_log_ext_vpd(&dev_entry->new_vpd); in ipr_log_enhanced_config_error()
1720 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_enhanced_config_error()
1723 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_enhanced_config_error()
1728 * ipr_log_sis64_config_error - Log a device error.
1743 error = &hostrcb->hcam.u.error64.u.type_23_error; in ipr_log_sis64_config_error()
1744 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_sis64_config_error()
1747 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_sis64_config_error()
1749 dev_entry = error->dev; in ipr_log_sis64_config_error()
1755 __ipr_format_res_path(dev_entry->res_path, in ipr_log_sis64_config_error()
1757 ipr_log_ext_vpd(&dev_entry->vpd); in ipr_log_sis64_config_error()
1759 ipr_err("-----New Device Information-----\n"); in ipr_log_sis64_config_error()
1760 ipr_log_ext_vpd(&dev_entry->new_vpd); in ipr_log_sis64_config_error()
1763 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_sis64_config_error()
1766 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_sis64_config_error()
1771 * ipr_log_config_error - Log a configuration error.
1785 error = &hostrcb->hcam.u.error.u.type_03_error; in ipr_log_config_error()
1786 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_config_error()
1789 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_config_error()
1791 dev_entry = error->dev; in ipr_log_config_error()
1796 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_config_error()
1797 ipr_log_vpd(&dev_entry->vpd); in ipr_log_config_error()
1799 ipr_err("-----New Device Information-----\n"); in ipr_log_config_error()
1800 ipr_log_vpd(&dev_entry->new_vpd); in ipr_log_config_error()
1803 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_config_error()
1806 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_config_error()
1809 be32_to_cpu(dev_entry->ioa_data[0]), in ipr_log_config_error()
1810 be32_to_cpu(dev_entry->ioa_data[1]), in ipr_log_config_error()
1811 be32_to_cpu(dev_entry->ioa_data[2]), in ipr_log_config_error()
1812 be32_to_cpu(dev_entry->ioa_data[3]), in ipr_log_config_error()
1813 be32_to_cpu(dev_entry->ioa_data[4])); in ipr_log_config_error()
1818 * ipr_log_enhanced_array_error - Log an array configuration error.
1831 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_enhanced_array_error()
1833 error = &hostrcb->hcam.u.error.u.type_14_error; in ipr_log_enhanced_array_error()
1838 error->protection_level, in ipr_log_enhanced_array_error()
1839 ioa_cfg->host->host_no, in ipr_log_enhanced_array_error()
1840 error->last_func_vset_res_addr.bus, in ipr_log_enhanced_array_error()
1841 error->last_func_vset_res_addr.target, in ipr_log_enhanced_array_error()
1842 error->last_func_vset_res_addr.lun); in ipr_log_enhanced_array_error()
1846 array_entry = error->array_member; in ipr_log_enhanced_array_error()
1847 num_entries = min_t(u32, be32_to_cpu(error->num_entries), in ipr_log_enhanced_array_error()
1848 ARRAY_SIZE(error->array_member)); in ipr_log_enhanced_array_error()
1851 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_enhanced_array_error()
1854 if (be32_to_cpu(error->exposed_mode_adn) == i) in ipr_log_enhanced_array_error()
1859 ipr_log_ext_vpd(&array_entry->vpd); in ipr_log_enhanced_array_error()
1860 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_enhanced_array_error()
1861 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_enhanced_array_error()
1869 * ipr_log_array_error - Log an array configuration error.
1882 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_array_error()
1884 error = &hostrcb->hcam.u.error.u.type_04_error; in ipr_log_array_error()
1889 error->protection_level, in ipr_log_array_error()
1890 ioa_cfg->host->host_no, in ipr_log_array_error()
1891 error->last_func_vset_res_addr.bus, in ipr_log_array_error()
1892 error->last_func_vset_res_addr.target, in ipr_log_array_error()
1893 error->last_func_vset_res_addr.lun); in ipr_log_array_error()
1897 array_entry = error->array_member; in ipr_log_array_error()
1900 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_array_error()
1903 if (be32_to_cpu(error->exposed_mode_adn) == i) in ipr_log_array_error()
1908 ipr_log_vpd(&array_entry->vpd); in ipr_log_array_error()
1910 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_array_error()
1911 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_array_error()
1917 array_entry = error->array_member2; in ipr_log_array_error()
1924 * ipr_log_hex_data - Log additional hex IOA error data.
1939 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_log_hex_data()
1952 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1964 if (ioa_cfg->sis64) in ipr_log_enhanced_dual_ioa_error()
1965 error = &hostrcb->hcam.u.error64.u.type_17_error; in ipr_log_enhanced_dual_ioa_error()
1967 error = &hostrcb->hcam.u.error.u.type_17_error; in ipr_log_enhanced_dual_ioa_error()
1969 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_enhanced_dual_ioa_error()
1970 strim(error->failure_reason); in ipr_log_enhanced_dual_ioa_error()
1972 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, in ipr_log_enhanced_dual_ioa_error()
1973 be32_to_cpu(hostrcb->hcam.u.error.prc)); in ipr_log_enhanced_dual_ioa_error()
1974 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); in ipr_log_enhanced_dual_ioa_error()
1975 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_enhanced_dual_ioa_error()
1976 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_enhanced_dual_ioa_error()
1982 * ipr_log_dual_ioa_error - Log a dual adapter error.
1994 error = &hostrcb->hcam.u.error.u.type_07_error; in ipr_log_dual_ioa_error()
1995 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_dual_ioa_error()
1996 strim(error->failure_reason); in ipr_log_dual_ioa_error()
1998 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, in ipr_log_dual_ioa_error()
1999 be32_to_cpu(hostrcb->hcam.u.error.prc)); in ipr_log_dual_ioa_error()
2000 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); in ipr_log_dual_ioa_error()
2001 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_dual_ioa_error()
2002 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_dual_ioa_error()
2027 * ipr_log_fabric_path - Log a fabric path error
2038 u8 path_state = fabric->path_state; in ipr_log_fabric_path()
2050 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) { in ipr_log_fabric_path()
2053 fabric->ioa_port); in ipr_log_fabric_path()
2054 } else if (fabric->cascaded_expander == 0xff) { in ipr_log_fabric_path()
2057 fabric->ioa_port, fabric->phy); in ipr_log_fabric_path()
2058 } else if (fabric->phy == 0xff) { in ipr_log_fabric_path()
2061 fabric->ioa_port, fabric->cascaded_expander); in ipr_log_fabric_path()
2065 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); in ipr_log_fabric_path()
2072 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); in ipr_log_fabric_path()
2076 * ipr_log64_fabric_path - Log a fabric path error
2087 u8 path_state = fabric->path_state; in ipr_log64_fabric_path()
2102 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_fabric_path()
2103 fabric->res_path, in ipr_log64_fabric_path()
2110 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, in ipr_log64_fabric_path()
2156 * ipr_log_path_elem - Log a fabric path element.
2167 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; in ipr_log_path_elem()
2168 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; in ipr_log_path_elem()
2184 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2185 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2187 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) { in ipr_log_path_elem()
2190 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2191 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2192 } else if (cfg->cascaded_expander == 0xff) { in ipr_log_path_elem()
2195 path_type_desc[i].desc, cfg->phy, in ipr_log_path_elem()
2196 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2197 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2198 } else if (cfg->phy == 0xff) { in ipr_log_path_elem()
2201 path_type_desc[i].desc, cfg->cascaded_expander, in ipr_log_path_elem()
2202 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2203 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2207 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy, in ipr_log_path_elem()
2208 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2209 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2217 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy, in ipr_log_path_elem()
2218 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2219 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2223 * ipr_log64_path_elem - Log a fabric path element.
2234 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; in ipr_log64_path_elem()
2235 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; in ipr_log64_path_elem()
2236 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; in ipr_log64_path_elem()
2252 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2253 cfg->res_path, buffer, sizeof(buffer)), in ipr_log64_path_elem()
2254 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log64_path_elem()
2255 be32_to_cpu(cfg->wwid[0]), in ipr_log64_path_elem()
2256 be32_to_cpu(cfg->wwid[1])); in ipr_log64_path_elem()
2261 "WWN=%08X%08X\n", cfg->type_status, in ipr_log64_path_elem()
2262 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2263 cfg->res_path, buffer, sizeof(buffer)), in ipr_log64_path_elem()
2264 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log64_path_elem()
2265 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log64_path_elem()
2269 * ipr_log_fabric_error - Log a fabric error.
2284 error = &hostrcb->hcam.u.error.u.type_20_error; in ipr_log_fabric_error()
2285 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_fabric_error()
2286 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); in ipr_log_fabric_error()
2288 add_len = be32_to_cpu(hostrcb->hcam.length) - in ipr_log_fabric_error()
2292 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { in ipr_log_fabric_error()
2297 add_len -= be16_to_cpu(fabric->length); in ipr_log_fabric_error()
2299 ((unsigned long)fabric + be16_to_cpu(fabric->length)); in ipr_log_fabric_error()
2306 * ipr_log_sis64_array_error - Log a sis64 array error.
2320 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_sis64_array_error()
2322 error = &hostrcb->hcam.u.error64.u.type_24_error; in ipr_log_sis64_array_error()
2327 error->protection_level, in ipr_log_sis64_array_error()
2328 ipr_format_res_path(ioa_cfg, error->last_res_path, in ipr_log_sis64_array_error()
2333 array_entry = error->array_member; in ipr_log_sis64_array_error()
2334 num_entries = min_t(u32, error->num_entries, in ipr_log_sis64_array_error()
2335 ARRAY_SIZE(error->array_member)); in ipr_log_sis64_array_error()
2339 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_sis64_array_error()
2342 if (error->exposed_mode_adn == i) in ipr_log_sis64_array_error()
2348 ipr_log_ext_vpd(&array_entry->vpd); in ipr_log_sis64_array_error()
2350 ipr_format_res_path(ioa_cfg, array_entry->res_path, in ipr_log_sis64_array_error()
2354 array_entry->expected_res_path, in ipr_log_sis64_array_error()
2362 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2377 error = &hostrcb->hcam.u.error64.u.type_30_error; in ipr_log_sis64_fabric_error()
2379 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_sis64_fabric_error()
2380 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); in ipr_log_sis64_fabric_error()
2382 add_len = be32_to_cpu(hostrcb->hcam.length) - in ipr_log_sis64_fabric_error()
2386 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { in ipr_log_sis64_fabric_error()
2391 add_len -= be16_to_cpu(fabric->length); in ipr_log_sis64_fabric_error()
2393 ((unsigned long)fabric + be16_to_cpu(fabric->length)); in ipr_log_sis64_fabric_error()
2400 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2412 error = &hostrcb->hcam.u.error64.u.type_41_error; in ipr_log_sis64_service_required_error()
2414 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_sis64_service_required_error()
2415 ipr_err("Primary Failure Reason: %s\n", error->failure_reason); in ipr_log_sis64_service_required_error()
2416 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_sis64_service_required_error()
2417 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_sis64_service_required_error()
2422 * ipr_log_generic_error - Log an adapter error.
2432 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, in ipr_log_generic_error()
2433 be32_to_cpu(hostrcb->hcam.length)); in ipr_log_generic_error()
2437 * ipr_log_sis64_device_error - Log a cache error.
2450 error = &hostrcb->hcam.u.error64.u.type_21_error; in ipr_log_sis64_device_error()
2452 ipr_err("-----Failing Device Information-----\n"); in ipr_log_sis64_device_error()
2454 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]), in ipr_log_sis64_device_error()
2455 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3])); in ipr_log_sis64_device_error()
2457 __ipr_format_res_path(error->res_path, in ipr_log_sis64_device_error()
2459 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0'; in ipr_log_sis64_device_error()
2460 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0'; in ipr_log_sis64_device_error()
2461 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc); in ipr_log_sis64_device_error()
2462 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc); in ipr_log_sis64_device_error()
2464 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); in ipr_log_sis64_device_error()
2466 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); in ipr_log_sis64_device_error()
2469 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); in ipr_log_sis64_device_error()
2473 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2495 * ipr_handle_log_data - Log an adapter error.
2511 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) in ipr_handle_log_data()
2514 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) in ipr_handle_log_data()
2515 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); in ipr_handle_log_data()
2517 if (ioa_cfg->sis64) in ipr_handle_log_data()
2518 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); in ipr_handle_log_data()
2520 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_handle_log_data()
2522 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || in ipr_handle_log_data()
2525 scsi_report_bus_reset(ioa_cfg->host, in ipr_handle_log_data()
2526 hostrcb->hcam.u.error.fd_res_addr.bus); in ipr_handle_log_data()
2535 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) { in ipr_handle_log_data()
2536 error = &hostrcb->hcam.u.error64.u.type_21_error; in ipr_handle_log_data()
2538 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST && in ipr_handle_log_data()
2539 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_handle_log_data()
2546 ioa_cfg->errors_logged++; in ipr_handle_log_data()
2548 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) in ipr_handle_log_data()
2550 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) in ipr_handle_log_data()
2551 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); in ipr_handle_log_data()
2553 switch (hostrcb->hcam.overlay_id) { in ipr_handle_log_data()
2611 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q, in ipr_get_free_hostrcb()
2615 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers."); in ipr_get_free_hostrcb()
2616 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q, in ipr_get_free_hostrcb()
2620 list_del_init(&hostrcb->queue); in ipr_get_free_hostrcb()
2625 * ipr_process_error - Op done function for an adapter error log.
2637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_error()
2638 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; in ipr_process_error()
2639 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_error()
2642 if (ioa_cfg->sis64) in ipr_process_error()
2643 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); in ipr_process_error()
2645 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_process_error()
2647 list_del_init(&hostrcb->queue); in ipr_process_error()
2648 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_error()
2656 dev_err(&ioa_cfg->pdev->dev, in ipr_process_error()
2660 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); in ipr_process_error()
2661 schedule_work(&ioa_cfg->work_q); in ipr_process_error()
2668 * ipr_timeout - An internally generated op has timed out.
2681 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_timeout()
2684 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2686 ioa_cfg->errors_logged++; in ipr_timeout()
2687 dev_err(&ioa_cfg->pdev->dev, in ipr_timeout()
2690 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_timeout()
2691 ioa_cfg->sdt_state = GET_DUMP; in ipr_timeout()
2693 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) in ipr_timeout()
2696 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2701 * ipr_oper_timeout - Adapter timed out transitioning to operational
2714 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_oper_timeout()
2717 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2719 ioa_cfg->errors_logged++; in ipr_oper_timeout()
2720 dev_err(&ioa_cfg->pdev->dev, in ipr_oper_timeout()
2723 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_oper_timeout()
2724 ioa_cfg->sdt_state = GET_DUMP; in ipr_oper_timeout()
2726 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { in ipr_oper_timeout()
2728 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_oper_timeout()
2732 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2737 * ipr_find_ses_entry - Find matching SES in SES table
2752 if (ste->compare_product_id_byte[j] == 'X') { in ipr_find_ses_entry()
2753 vpids = &res->std_inq_data.vpids; in ipr_find_ses_entry()
2754 if (vpids->product_id[j] == ste->product_id[j]) in ipr_find_ses_entry()
2770 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2777 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2788 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_get_max_scsi_speed()
2789 if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) in ipr_get_max_scsi_speed()
2792 if (bus != res->bus) in ipr_get_max_scsi_speed()
2798 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); in ipr_get_max_scsi_speed()
2805 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2807 * @max_delay: max delay in micro-seconds to wait
2817 int delay = 1; in ipr_wait_iodbg_ack() local
2820 while (delay < max_delay) { in ipr_wait_iodbg_ack()
2821 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_wait_iodbg_ack()
2826 /* udelay cannot be used if delay is more than a few milliseconds */ in ipr_wait_iodbg_ack()
2827 if ((delay / 1000) > MAX_UDELAY_MS) in ipr_wait_iodbg_ack()
2828 mdelay(delay / 1000); in ipr_wait_iodbg_ack()
2830 udelay(delay); in ipr_wait_iodbg_ack()
2832 delay += delay; in ipr_wait_iodbg_ack()
2834 return -EIO; in ipr_wait_iodbg_ack()
2838 * ipr_get_sis64_dump_data_section - Dump IOA memory
2854 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); in ipr_get_sis64_dump_data_section()
2855 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); in ipr_get_sis64_dump_data_section()
2863 * ipr_get_ldump_data_section - Dump IOA memory
2870 * 0 on success / -EIO on failure
2877 int i, delay = 0; in ipr_get_ldump_data_section() local
2879 if (ioa_cfg->sis64) in ipr_get_ldump_data_section()
2885 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2890 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2892 return -EIO; in ipr_get_ldump_data_section()
2895 /* Signal LDUMP interlocked - clear IO debug ack */ in ipr_get_ldump_data_section()
2897 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2900 writel(start_addr, ioa_cfg->ioa_mailbox); in ipr_get_ldump_data_section()
2902 /* Signal address valid - clear IOA Reset alert */ in ipr_get_ldump_data_section()
2904 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2910 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2912 return -EIO; in ipr_get_ldump_data_section()
2916 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); in ipr_get_ldump_data_section()
2920 if (i < (length_in_words - 1)) { in ipr_get_ldump_data_section()
2921 /* Signal dump data received - Clear IO debug Ack */ in ipr_get_ldump_data_section()
2923 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2929 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2932 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2934 /* Signal dump data received - Clear IO debug Ack */ in ipr_get_ldump_data_section()
2936 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2938 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ in ipr_get_ldump_data_section()
2939 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { in ipr_get_ldump_data_section()
2941 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2947 delay += 10; in ipr_get_ldump_data_section()
2955 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2972 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; in ipr_sdt_copy()
2974 if (ioa_cfg->sis64) in ipr_sdt_copy()
2980 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { in ipr_sdt_copy()
2981 if (ioa_dump->page_offset >= PAGE_SIZE || in ipr_sdt_copy()
2982 ioa_dump->page_offset == 0) { in ipr_sdt_copy()
2990 ioa_dump->page_offset = 0; in ipr_sdt_copy()
2991 ioa_dump->ioa_data[ioa_dump->next_page_index] = page; in ipr_sdt_copy()
2992 ioa_dump->next_page_index++; in ipr_sdt_copy()
2994 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; in ipr_sdt_copy()
2996 rem_len = length - bytes_copied; in ipr_sdt_copy()
2997 rem_page_len = PAGE_SIZE - ioa_dump->page_offset; in ipr_sdt_copy()
3000 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
3001 if (ioa_cfg->sdt_state == ABORT_DUMP) { in ipr_sdt_copy()
3002 rc = -EIO; in ipr_sdt_copy()
3006 &page[ioa_dump->page_offset / 4], in ipr_sdt_copy()
3009 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
3012 ioa_dump->page_offset += cur_len; in ipr_sdt_copy()
3025 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3033 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; in ipr_init_dump_entry_hdr()
3034 hdr->num_elems = 1; in ipr_init_dump_entry_hdr()
3035 hdr->offset = sizeof(*hdr); in ipr_init_dump_entry_hdr()
3036 hdr->status = IPR_DUMP_STATUS_SUCCESS; in ipr_init_dump_entry_hdr()
3040 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3050 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_dump_ioa_type_data()
3052 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); in ipr_dump_ioa_type_data()
3053 driver_dump->ioa_type_entry.hdr.len = in ipr_dump_ioa_type_data()
3054 sizeof(struct ipr_dump_ioa_type_entry) - in ipr_dump_ioa_type_data()
3056 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_dump_ioa_type_data()
3057 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; in ipr_dump_ioa_type_data()
3058 driver_dump->ioa_type_entry.type = ioa_cfg->type; in ipr_dump_ioa_type_data()
3059 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | in ipr_dump_ioa_type_data()
3060 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | in ipr_dump_ioa_type_data()
3061 ucode_vpd->minor_release[1]; in ipr_dump_ioa_type_data()
3062 driver_dump->hdr.num_entries++; in ipr_dump_ioa_type_data()
3066 * ipr_dump_version_data - Fill in the driver version in the dump.
3076 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); in ipr_dump_version_data()
3077 driver_dump->version_entry.hdr.len = in ipr_dump_version_data()
3078 sizeof(struct ipr_dump_version_entry) - in ipr_dump_version_data()
3080 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; in ipr_dump_version_data()
3081 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; in ipr_dump_version_data()
3082 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); in ipr_dump_version_data()
3083 driver_dump->hdr.num_entries++; in ipr_dump_version_data()
3087 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3097 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); in ipr_dump_trace_data()
3098 driver_dump->trace_entry.hdr.len = in ipr_dump_trace_data()
3099 sizeof(struct ipr_dump_trace_entry) - in ipr_dump_trace_data()
3101 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_dump_trace_data()
3102 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; in ipr_dump_trace_data()
3103 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); in ipr_dump_trace_data()
3104 driver_dump->hdr.num_entries++; in ipr_dump_trace_data()
3108 * ipr_dump_location_data - Fill in the IOA location in the dump.
3118 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); in ipr_dump_location_data()
3119 driver_dump->location_entry.hdr.len = in ipr_dump_location_data()
3120 sizeof(struct ipr_dump_location_entry) - in ipr_dump_location_data()
3122 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; in ipr_dump_location_data()
3123 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; in ipr_dump_location_data()
3124 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); in ipr_dump_location_data()
3125 driver_dump->hdr.num_entries++; in ipr_dump_location_data()
3129 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3140 struct ipr_driver_dump *driver_dump = &dump->driver_dump; in ipr_get_ioa_dump()
3141 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; in ipr_get_ioa_dump()
3150 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3152 if (ioa_cfg->sdt_state != READ_DUMP) { in ipr_get_ioa_dump()
3153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3157 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3158 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3160 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3163 start_addr = readl(ioa_cfg->ioa_mailbox); in ipr_get_ioa_dump()
3165 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { in ipr_get_ioa_dump()
3166 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3172 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); in ipr_get_ioa_dump()
3174 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; in ipr_get_ioa_dump()
3177 driver_dump->hdr.len = sizeof(struct ipr_driver_dump); in ipr_get_ioa_dump()
3178 driver_dump->hdr.num_entries = 1; in ipr_get_ioa_dump()
3179 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); in ipr_get_ioa_dump()
3180 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; in ipr_get_ioa_dump()
3181 driver_dump->hdr.os = IPR_DUMP_OS_LINUX; in ipr_get_ioa_dump()
3182 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; in ipr_get_ioa_dump()
3190 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); in ipr_get_ioa_dump()
3193 ipr_init_dump_entry_hdr(&ioa_dump->hdr); in ipr_get_ioa_dump()
3194 ioa_dump->hdr.len = 0; in ipr_get_ioa_dump()
3195 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_get_ioa_dump()
3196 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; in ipr_get_ioa_dump()
3202 sdt = &ioa_dump->sdt; in ipr_get_ioa_dump()
3204 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3218 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && in ipr_get_ioa_dump()
3219 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { in ipr_get_ioa_dump()
3220 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3222 rc, be32_to_cpu(sdt->hdr.state)); in ipr_get_ioa_dump()
3223 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; in ipr_get_ioa_dump()
3224 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3229 num_entries = be32_to_cpu(sdt->hdr.num_entries_used); in ipr_get_ioa_dump()
3235 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); in ipr_get_ioa_dump()
3236 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3237 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); in ipr_get_ioa_dump()
3239 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); in ipr_get_ioa_dump()
3241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3244 if (ioa_dump->hdr.len > max_dump_size) { in ipr_get_ioa_dump()
3245 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; in ipr_get_ioa_dump()
3249 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { in ipr_get_ioa_dump()
3250 sdt_word = be32_to_cpu(sdt->entry[i].start_token); in ipr_get_ioa_dump()
3251 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3252 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); in ipr_get_ioa_dump()
3255 end_off = be32_to_cpu(sdt->entry[i].end_token); in ipr_get_ioa_dump()
3258 bytes_to_copy = end_off - start_off; in ipr_get_ioa_dump()
3264 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; in ipr_get_ioa_dump()
3272 ioa_dump->hdr.len += bytes_copied; in ipr_get_ioa_dump()
3275 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; in ipr_get_ioa_dump()
3282 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); in ipr_get_ioa_dump()
3285 driver_dump->hdr.len += ioa_dump->hdr.len; in ipr_get_ioa_dump()
3287 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3296 * ipr_release_dump - Free adapter dump memory
3305 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; in ipr_release_dump()
3310 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3311 ioa_cfg->dump = NULL; in ipr_release_dump()
3312 ioa_cfg->sdt_state = INACTIVE; in ipr_release_dump()
3313 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3315 for (i = 0; i < dump->ioa_dump.next_page_index; i++) in ipr_release_dump()
3316 free_page((unsigned long) dump->ioa_dump.ioa_data[i]); in ipr_release_dump()
3318 vfree(dump->ioa_dump.ioa_data); in ipr_release_dump()
3334 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3339 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_add_remove_thread()
3340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3344 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_add_remove_thread()
3345 if (res->del_from_ml && res->sdev) { in ipr_add_remove_thread()
3347 sdev = res->sdev; in ipr_add_remove_thread()
3349 if (!res->add_to_ml) in ipr_add_remove_thread()
3350 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_add_remove_thread()
3352 res->del_from_ml = 0; in ipr_add_remove_thread()
3353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3356 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3363 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_add_remove_thread()
3364 if (res->add_to_ml) { in ipr_add_remove_thread()
3365 bus = res->bus; in ipr_add_remove_thread()
3366 target = res->target; in ipr_add_remove_thread()
3367 lun = res->lun; in ipr_add_remove_thread()
3368 res->add_to_ml = 0; in ipr_add_remove_thread()
3369 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3370 scsi_add_device(ioa_cfg->host, bus, target, lun); in ipr_add_remove_thread()
3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3376 ioa_cfg->scan_done = 1; in ipr_add_remove_thread()
3377 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3378 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); in ipr_add_remove_thread()
3383 * ipr_worker_thread - Worker thread
3387 * of adding and removing device from the mid-layer as configuration
3401 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3403 if (ioa_cfg->sdt_state == READ_DUMP) { in ipr_worker_thread()
3404 dump = ioa_cfg->dump; in ipr_worker_thread()
3406 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3409 kref_get(&dump->kref); in ipr_worker_thread()
3410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3412 kref_put(&dump->kref, ipr_release_dump); in ipr_worker_thread()
3414 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3415 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) in ipr_worker_thread()
3417 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3421 if (ioa_cfg->scsi_unblock) { in ipr_worker_thread()
3422 ioa_cfg->scsi_unblock = 0; in ipr_worker_thread()
3423 ioa_cfg->scsi_blocked = 0; in ipr_worker_thread()
3424 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3425 scsi_unblock_requests(ioa_cfg->host); in ipr_worker_thread()
3426 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3427 if (ioa_cfg->scsi_blocked) in ipr_worker_thread()
3428 scsi_block_requests(ioa_cfg->host); in ipr_worker_thread()
3431 if (!ioa_cfg->scan_enabled) { in ipr_worker_thread()
3432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3436 schedule_work(&ioa_cfg->scsi_add_work_q); in ipr_worker_thread()
3438 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3444 * ipr_read_trace - Dump the adapter trace
3461 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_trace()
3465 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3466 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, in ipr_read_trace()
3468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3484 * ipr_show_fw_version - Show the firmware version
3496 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_version()
3497 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_show_fw_version()
3501 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3503 ucode_vpd->major_release, ucode_vpd->card_type, in ipr_show_fw_version()
3504 ucode_vpd->minor_release[0], in ipr_show_fw_version()
3505 ucode_vpd->minor_release[1]); in ipr_show_fw_version()
3506 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3519 * ipr_show_log_level - Show the adapter's error logging level
3531 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_log_level()
3535 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3536 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); in ipr_show_log_level()
3537 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3542 * ipr_store_log_level - Change the adapter's error logging level
3556 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_log_level()
3559 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3560 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); in ipr_store_log_level()
3561 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3575 * ipr_store_diagnostics - IOA Diagnostics interface
3592 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_diagnostics()
3597 return -EACCES; in ipr_store_diagnostics()
3599 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3600 while (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3601 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3602 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3603 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3606 ioa_cfg->errors_logged = 0; in ipr_store_diagnostics()
3609 if (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3610 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3611 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3616 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3617 return -EIO; in ipr_store_diagnostics()
3620 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3621 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) in ipr_store_diagnostics()
3622 rc = -EIO; in ipr_store_diagnostics()
3623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3637 * ipr_show_adapter_state - Show the adapter's state
3649 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_adapter_state()
3653 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3654 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_show_adapter_state()
3658 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3663 * ipr_store_adapter_state - Change adapter state
3679 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_adapter_state()
3684 return -EACCES; in ipr_store_adapter_state()
3686 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3687 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && in ipr_store_adapter_state()
3689 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_store_adapter_state()
3690 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3691 ioa_cfg->hrrq[i].ioa_is_dead = 0; in ipr_store_adapter_state()
3692 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3695 ioa_cfg->reset_retries = 0; in ipr_store_adapter_state()
3696 ioa_cfg->in_ioa_bringdown = 0; in ipr_store_adapter_state()
3699 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3700 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_adapter_state()
3715 * ipr_store_reset_adapter - Reset the adapter
3731 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_reset_adapter()
3736 return -EACCES; in ipr_store_reset_adapter()
3738 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3739 if (!ioa_cfg->in_reset_reload) in ipr_store_reset_adapter()
3741 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3742 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_reset_adapter()
3757 * ipr_show_iopoll_weight - Show ipr polling mode
3769 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_iopoll_weight()
3773 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_show_iopoll_weight()
3774 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); in ipr_show_iopoll_weight()
3775 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_show_iopoll_weight()
3781 * ipr_store_iopoll_weight - Change the adapter's polling mode
3795 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_iopoll_weight()
3800 if (!ioa_cfg->sis64) { in ipr_store_iopoll_weight()
3801 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n"); in ipr_store_iopoll_weight()
3802 return -EINVAL; in ipr_store_iopoll_weight()
3805 return -EINVAL; in ipr_store_iopoll_weight()
3808 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n"); in ipr_store_iopoll_weight()
3809 return -EINVAL; in ipr_store_iopoll_weight()
3812 if (user_iopoll_weight == ioa_cfg->iopoll_weight) { in ipr_store_iopoll_weight()
3813 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n"); in ipr_store_iopoll_weight()
3817 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3818 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_store_iopoll_weight()
3819 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
3822 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_store_iopoll_weight()
3823 ioa_cfg->iopoll_weight = user_iopoll_weight; in ipr_store_iopoll_weight()
3824 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3825 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_store_iopoll_weight()
3826 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_store_iopoll_weight()
3827 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_store_iopoll_weight()
3830 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_store_iopoll_weight()
3845 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3860 sg_size = buf_len / (IPR_MAX_SGLIST - 1); in ipr_alloc_ucode_buffer()
3871 sglist->order = order; in ipr_alloc_ucode_buffer()
3872 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL, in ipr_alloc_ucode_buffer()
3873 &sglist->num_sg); in ipr_alloc_ucode_buffer()
3874 if (!sglist->scatterlist) { in ipr_alloc_ucode_buffer()
3883 * ipr_free_ucode_buffer - Frees a microcode download buffer
3894 sgl_free_order(sglist->scatterlist, sglist->order); in ipr_free_ucode_buffer()
3899 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3918 bsize_elem = PAGE_SIZE * (1 << sglist->order); in ipr_copy_ucode_buffer()
3920 sg = sglist->scatterlist; in ipr_copy_ucode_buffer()
3930 sg->length = bsize_elem; in ipr_copy_ucode_buffer()
3945 sg->length = len % bsize_elem; in ipr_copy_ucode_buffer()
3948 sglist->buffer_len = len; in ipr_copy_ucode_buffer()
3953 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3963 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ucode_ioadl64()
3964 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_build_ucode_ioadl64()
3965 struct scatterlist *scatterlist = sglist->scatterlist; in ipr_build_ucode_ioadl64()
3969 ipr_cmd->dma_use_sg = sglist->num_dma_sg; in ipr_build_ucode_ioadl64()
3970 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ucode_ioadl64()
3971 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); in ipr_build_ucode_ioadl64()
3973 ioarcb->ioadl_len = in ipr_build_ucode_ioadl64()
3974 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); in ipr_build_ucode_ioadl64()
3975 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ucode_ioadl64()
3981 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ucode_ioadl64()
3985 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3995 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ucode_ioadl()
3996 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_build_ucode_ioadl()
3997 struct scatterlist *scatterlist = sglist->scatterlist; in ipr_build_ucode_ioadl()
4001 ipr_cmd->dma_use_sg = sglist->num_dma_sg; in ipr_build_ucode_ioadl()
4002 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ucode_ioadl()
4003 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); in ipr_build_ucode_ioadl()
4005 ioarcb->ioadl_len = in ipr_build_ucode_ioadl()
4006 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ucode_ioadl()
4008 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ucode_ioadl()
4015 ioadl[i-1].flags_and_data_len |= in ipr_build_ucode_ioadl()
4020 * ipr_update_ioa_ucode - Update IOA's microcode
4027 * 0 on success / -EIO on failure
4034 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4035 while (ioa_cfg->in_reset_reload) { in ipr_update_ioa_ucode()
4036 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4037 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
4038 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4041 if (ioa_cfg->ucode_sglist) { in ipr_update_ioa_ucode()
4042 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4043 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
4045 return -EIO; in ipr_update_ioa_ucode()
4048 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
4049 sglist->scatterlist, sglist->num_sg, in ipr_update_ioa_ucode()
4052 if (!sglist->num_dma_sg) { in ipr_update_ioa_ucode()
4053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4054 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
4056 return -EIO; in ipr_update_ioa_ucode()
4059 ioa_cfg->ucode_sglist = sglist; in ipr_update_ioa_ucode()
4061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4062 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
4064 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4065 ioa_cfg->ucode_sglist = NULL; in ipr_update_ioa_ucode()
4066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4071 * ipr_store_update_fw - Update the firmware on the adapter
4087 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_update_fw()
4097 return -EACCES; in ipr_store_update_fw()
4105 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { in ipr_store_update_fw()
4106 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); in ipr_store_update_fw()
4107 return -EIO; in ipr_store_update_fw()
4110 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; in ipr_store_update_fw()
4112 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); in ipr_store_update_fw()
4113 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); in ipr_store_update_fw()
4117 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); in ipr_store_update_fw()
4119 return -ENOMEM; in ipr_store_update_fw()
4125 dev_err(&ioa_cfg->pdev->dev, in ipr_store_update_fw()
4151 * ipr_show_fw_type - Show the adapter's firmware type.
4163 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_type()
4167 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4168 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); in ipr_show_fw_type()
4169 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4187 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_async_err_log()
4192 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4193 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, in ipr_read_async_err_log()
4196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4199 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam, in ipr_read_async_err_log()
4200 sizeof(hostrcb->hcam)); in ipr_read_async_err_log()
4201 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4211 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_next_async_err_log()
4215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4216 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, in ipr_next_async_err_log()
4219 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4224 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_next_async_err_log()
4225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4255 * ipr_read_dump - Dump the adapter
4272 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_dump()
4280 return -EACCES; in ipr_read_dump()
4282 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4283 dump = ioa_cfg->dump; in ipr_read_dump()
4285 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { in ipr_read_dump()
4286 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4289 kref_get(&dump->kref); in ipr_read_dump()
4290 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4292 if (off > dump->driver_dump.hdr.len) { in ipr_read_dump()
4293 kref_put(&dump->kref, ipr_release_dump); in ipr_read_dump()
4297 if (off + count > dump->driver_dump.hdr.len) { in ipr_read_dump()
4298 count = dump->driver_dump.hdr.len - off; in ipr_read_dump()
4302 if (count && off < sizeof(dump->driver_dump)) { in ipr_read_dump()
4303 if (off + count > sizeof(dump->driver_dump)) in ipr_read_dump()
4304 len = sizeof(dump->driver_dump) - off; in ipr_read_dump()
4307 src = (u8 *)&dump->driver_dump + off; in ipr_read_dump()
4311 count -= len; in ipr_read_dump()
4314 off -= sizeof(dump->driver_dump); in ipr_read_dump()
4316 if (ioa_cfg->sis64) in ipr_read_dump()
4318 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * in ipr_read_dump()
4326 len = sdt_end - off; in ipr_read_dump()
4329 src = (u8 *)&dump->ioa_dump + off; in ipr_read_dump()
4333 count -= len; in ipr_read_dump()
4336 off -= sdt_end; in ipr_read_dump()
4340 len = PAGE_ALIGN(off) - off; in ipr_read_dump()
4343 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; in ipr_read_dump()
4348 count -= len; in ipr_read_dump()
4351 kref_put(&dump->kref, ipr_release_dump); in ipr_read_dump()
4356 * ipr_alloc_dump - Prepare for adapter dump
4372 return -ENOMEM; in ipr_alloc_dump()
4375 if (ioa_cfg->sis64) in ipr_alloc_dump()
4385 return -ENOMEM; in ipr_alloc_dump()
4388 dump->ioa_dump.ioa_data = ioa_data; in ipr_alloc_dump()
4390 kref_init(&dump->kref); in ipr_alloc_dump()
4391 dump->ioa_cfg = ioa_cfg; in ipr_alloc_dump()
4393 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4395 if (INACTIVE != ioa_cfg->sdt_state) { in ipr_alloc_dump()
4396 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4397 vfree(dump->ioa_dump.ioa_data); in ipr_alloc_dump()
4402 ioa_cfg->dump = dump; in ipr_alloc_dump()
4403 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_alloc_dump()
4404 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { in ipr_alloc_dump()
4405 ioa_cfg->dump_taken = 1; in ipr_alloc_dump()
4406 schedule_work(&ioa_cfg->work_q); in ipr_alloc_dump()
4408 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4414 * ipr_free_dump - Free adapter dump memory
4427 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4428 dump = ioa_cfg->dump; in ipr_free_dump()
4430 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4434 ioa_cfg->dump = NULL; in ipr_free_dump()
4435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4437 kref_put(&dump->kref, ipr_release_dump); in ipr_free_dump()
4444 * ipr_write_dump - Setup dump state of adapter
4461 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_write_dump()
4465 return -EACCES; in ipr_write_dump()
4472 return -EINVAL; in ipr_write_dump()
4494 * ipr_change_queue_depth - Change the device's queue depth
4503 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_change_queue_depth()
4507 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_change_queue_depth()
4508 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_change_queue_depth()
4512 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_change_queue_depth()
4515 return sdev->queue_depth; in ipr_change_queue_depth()
4519 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4530 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_adapter_handle()
4533 ssize_t len = -ENXIO; in ipr_show_adapter_handle()
4535 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4536 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_adapter_handle()
4538 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); in ipr_show_adapter_handle()
4539 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4552 * ipr_show_resource_path - Show the resource path or the resource address for
4564 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_path()
4567 ssize_t len = -ENXIO; in ipr_show_resource_path()
4570 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4571 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_resource_path()
4572 if (res && ioa_cfg->sis64) in ipr_show_resource_path()
4574 __ipr_format_res_path(res->res_path, buffer, in ipr_show_resource_path()
4577 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, in ipr_show_resource_path()
4578 res->bus, res->target, res->lun); in ipr_show_resource_path()
4580 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4593 * ipr_show_device_id - Show the device_id for this device.
4604 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_device_id()
4607 ssize_t len = -ENXIO; in ipr_show_device_id()
4609 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4610 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_device_id()
4611 if (res && ioa_cfg->sis64) in ipr_show_device_id()
4612 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id)); in ipr_show_device_id()
4614 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); in ipr_show_device_id()
4616 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4629 * ipr_show_resource_type - Show the resource type for this device.
4640 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_type()
4643 ssize_t len = -ENXIO; in ipr_show_resource_type()
4645 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4646 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_resource_type()
4649 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type); in ipr_show_resource_type()
4651 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4664 * ipr_show_raw_mode - Show the adapter's raw mode
4676 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_raw_mode()
4681 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4682 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_raw_mode()
4684 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode); in ipr_show_raw_mode()
4686 len = -ENXIO; in ipr_show_raw_mode()
4687 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4692 * ipr_store_raw_mode - Change the adapter's raw mode
4706 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_store_raw_mode()
4711 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4712 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_store_raw_mode()
4715 res->raw_mode = simple_strtoul(buf, NULL, 10); in ipr_store_raw_mode()
4717 if (res->sdev) in ipr_store_raw_mode()
4718 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n", in ipr_store_raw_mode()
4719 res->raw_mode ? "enabled" : "disabled"); in ipr_store_raw_mode()
4721 len = -EINVAL; in ipr_store_raw_mode()
4723 len = -ENXIO; in ipr_store_raw_mode()
4724 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4749 * ipr_biosparam - Return the HSC mapping
4784 * ipr_find_starget - Find target based on bus/target.
4792 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in ipr_find_starget()
4793 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_find_starget()
4796 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_starget()
4797 if ((res->bus == starget->channel) && in ipr_find_starget()
4798 (res->target == starget->id)) { in ipr_find_starget()
4809 * ipr_target_alloc - Prepare for commands to a SCSI target
4816 * 0 on success / non-0 on failure
4820 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in ipr_target_alloc()
4821 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_alloc()
4827 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4829 starget->hostdata = NULL; in ipr_target_alloc()
4832 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4835 return -ENOMEM; in ipr_target_alloc()
4837 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost); in ipr_target_alloc()
4839 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4840 sata_port->ioa_cfg = ioa_cfg; in ipr_target_alloc()
4841 sata_port->ap = ap; in ipr_target_alloc()
4842 sata_port->res = res; in ipr_target_alloc()
4844 res->sata_port = sata_port; in ipr_target_alloc()
4845 ap->private_data = sata_port; in ipr_target_alloc()
4846 starget->hostdata = sata_port; in ipr_target_alloc()
4849 return -ENOMEM; in ipr_target_alloc()
4852 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4858 * ipr_target_destroy - Destroy a SCSI target
4867 struct ipr_sata_port *sata_port = starget->hostdata; in ipr_target_destroy()
4868 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in ipr_target_destroy()
4869 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_destroy()
4871 if (ioa_cfg->sis64) { in ipr_target_destroy()
4873 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) in ipr_target_destroy()
4874 clear_bit(starget->id, ioa_cfg->array_ids); in ipr_target_destroy()
4875 else if (starget->channel == IPR_VSET_VIRTUAL_BUS) in ipr_target_destroy()
4876 clear_bit(starget->id, ioa_cfg->vset_ids); in ipr_target_destroy()
4877 else if (starget->channel == 0) in ipr_target_destroy()
4878 clear_bit(starget->id, ioa_cfg->target_ids); in ipr_target_destroy()
4883 starget->hostdata = NULL; in ipr_target_destroy()
4884 ata_sas_port_destroy(sata_port->ap); in ipr_target_destroy()
4890 * ipr_find_sdev - Find device based on bus/target/lun.
4898 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_find_sdev()
4901 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_sdev()
4902 if ((res->bus == sdev->channel) && in ipr_find_sdev()
4903 (res->target == sdev->id) && in ipr_find_sdev()
4904 (res->lun == sdev->lun)) in ipr_find_sdev()
4912 * ipr_slave_destroy - Unconfigure a SCSI device
4924 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_destroy()
4926 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4927 res = (struct ipr_resource_entry *) sdev->hostdata; in ipr_slave_destroy()
4929 if (res->sata_port) in ipr_slave_destroy()
4930 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE; in ipr_slave_destroy()
4931 sdev->hostdata = NULL; in ipr_slave_destroy()
4932 res->sdev = NULL; in ipr_slave_destroy()
4933 res->sata_port = NULL; in ipr_slave_destroy()
4935 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4939 * ipr_slave_configure - Configure a SCSI device
4949 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_configure()
4955 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4956 res = sdev->hostdata; in ipr_slave_configure()
4959 sdev->type = TYPE_RAID; in ipr_slave_configure()
4961 sdev->scsi_level = 4; in ipr_slave_configure()
4962 sdev->no_uld_attach = 1; in ipr_slave_configure()
4965 sdev->scsi_level = SCSI_SPC_3; in ipr_slave_configure()
4966 sdev->no_report_opcodes = 1; in ipr_slave_configure()
4967 blk_queue_rq_timeout(sdev->request_queue, in ipr_slave_configure()
4969 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); in ipr_slave_configure()
4971 if (ipr_is_gata(res) && res->sata_port) in ipr_slave_configure()
4972 ap = res->sata_port->ap; in ipr_slave_configure()
4973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4980 if (ioa_cfg->sis64) in ipr_slave_configure()
4983 res->res_path, buffer, sizeof(buffer))); in ipr_slave_configure()
4986 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4991 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
5003 int rc = -ENXIO; in ipr_ata_slave_alloc()
5006 if (sdev->sdev_target) in ipr_ata_slave_alloc()
5007 sata_port = sdev->sdev_target->hostdata; in ipr_ata_slave_alloc()
5009 rc = ata_sas_port_init(sata_port->ap); in ipr_ata_slave_alloc()
5011 rc = ata_sas_sync_probe(sata_port->ap); in ipr_ata_slave_alloc()
5022 * ipr_slave_alloc - Prepare for commands to a device.
5031 * 0 on success / -ENXIO if device does not exist
5035 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_alloc()
5038 int rc = -ENXIO; in ipr_slave_alloc()
5040 sdev->hostdata = NULL; in ipr_slave_alloc()
5042 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
5046 res->sdev = sdev; in ipr_slave_alloc()
5047 res->add_to_ml = 0; in ipr_slave_alloc()
5048 res->in_erp = 0; in ipr_slave_alloc()
5049 sdev->hostdata = res; in ipr_slave_alloc()
5051 res->needs_sync_complete = 1; in ipr_slave_alloc()
5054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
5059 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
5065 * ipr_match_lun - Match function for specified LUN
5074 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) in ipr_match_lun()
5080 * ipr_cmnd_is_free - Check if a command is free or not
5090 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) { in ipr_cmnd_is_free()
5099 * ipr_match_res - Match function for specified resource entry
5110 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle) in ipr_match_res()
5116 * ipr_wait_for_ops - Wait for matching commands to complete
5139 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
5140 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_wait_for_ops()
5141 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in ipr_wait_for_ops()
5144 ipr_cmd->eh_comp = &comp; in ipr_wait_for_ops()
5149 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
5159 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
5160 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_wait_for_ops()
5161 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in ipr_wait_for_ops()
5164 ipr_cmd->eh_comp = NULL; in ipr_wait_for_ops()
5169 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
5173 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); in ipr_wait_for_ops()
5191 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_host_reset()
5192 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5194 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5196 dev_err(&ioa_cfg->pdev->dev, in ipr_eh_host_reset()
5199 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_eh_host_reset()
5200 ioa_cfg->sdt_state = GET_DUMP; in ipr_eh_host_reset()
5203 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5204 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_eh_host_reset()
5205 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5209 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5214 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5220 * ipr_device_reset - Reset the device
5231 * 0 on success / non-zero on failure
5244 ioarcb = &ipr_cmd->ioarcb; in ipr_device_reset()
5245 cmd_pkt = &ioarcb->cmd_pkt; in ipr_device_reset()
5247 if (ipr_cmd->ioa_cfg->sis64) { in ipr_device_reset()
5248 regs = &ipr_cmd->i.ata_ioadl.regs; in ipr_device_reset()
5249 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); in ipr_device_reset()
5251 regs = &ioarcb->u.add_data.u.regs; in ipr_device_reset()
5253 ioarcb->res_handle = res->res_handle; in ipr_device_reset()
5254 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_device_reset()
5255 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; in ipr_device_reset()
5257 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; in ipr_device_reset()
5258 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags)); in ipr_device_reset()
5259 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; in ipr_device_reset()
5263 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_device_reset()
5264 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_device_reset()
5265 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { in ipr_device_reset()
5266 if (ipr_cmd->ioa_cfg->sis64) in ipr_device_reset()
5267 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, in ipr_device_reset()
5270 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, in ipr_device_reset()
5275 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; in ipr_device_reset()
5279 * ipr_sata_reset - Reset the SATA port
5287 * 0 on success / non-zero on failure
5292 struct ipr_sata_port *sata_port = link->ap->private_data; in ipr_sata_reset()
5293 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_sata_reset()
5296 int rc = -ENXIO, ret; in ipr_sata_reset()
5299 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5300 while (ioa_cfg->in_reset_reload) { in ipr_sata_reset()
5301 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5302 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_sata_reset()
5303 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5306 res = sata_port->res; in ipr_sata_reset()
5309 *classes = res->ata_class; in ipr_sata_reset()
5310 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5314 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5316 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5318 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_sata_reset()
5321 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5328 * __ipr_eh_dev_reset - Reset the device
5348 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in __ipr_eh_dev_reset()
5349 res = scsi_cmd->device->hostdata; in __ipr_eh_dev_reset()
5353 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the in __ipr_eh_dev_reset()
5356 if (ioa_cfg->in_reset_reload) in __ipr_eh_dev_reset()
5358 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in __ipr_eh_dev_reset()
5362 spin_lock(&hrrq->_lock); in __ipr_eh_dev_reset()
5363 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in __ipr_eh_dev_reset()
5364 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in __ipr_eh_dev_reset()
5366 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { in __ipr_eh_dev_reset()
5367 if (!ipr_cmd->qc) in __ipr_eh_dev_reset()
5372 ipr_cmd->done = ipr_sata_eh_done; in __ipr_eh_dev_reset()
5373 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { in __ipr_eh_dev_reset()
5374 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; in __ipr_eh_dev_reset()
5375 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; in __ipr_eh_dev_reset()
5379 spin_unlock(&hrrq->_lock); in __ipr_eh_dev_reset()
5381 res->resetting_device = 1; in __ipr_eh_dev_reset()
5384 if (ipr_is_gata(res) && res->sata_port) { in __ipr_eh_dev_reset()
5385 ap = res->sata_port->ap; in __ipr_eh_dev_reset()
5386 spin_unlock_irq(scsi_cmd->device->host->host_lock); in __ipr_eh_dev_reset()
5388 spin_lock_irq(scsi_cmd->device->host->host_lock); in __ipr_eh_dev_reset()
5391 res->resetting_device = 0; in __ipr_eh_dev_reset()
5392 res->reset_occurred = 1; in __ipr_eh_dev_reset()
5404 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_dev_reset()
5405 res = cmd->device->hostdata; in ipr_eh_dev_reset()
5410 spin_lock_irq(cmd->device->host->host_lock); in ipr_eh_dev_reset()
5412 spin_unlock_irq(cmd->device->host->host_lock); in ipr_eh_dev_reset()
5415 if (ipr_is_gata(res) && res->sata_port) in ipr_eh_dev_reset()
5418 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); in ipr_eh_dev_reset()
5425 * ipr_bus_reset_done - Op done function for bus reset.
5435 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_bus_reset_done()
5439 if (!ioa_cfg->sis64) in ipr_bus_reset_done()
5440 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_bus_reset_done()
5441 if (res->res_handle == ipr_cmd->ioarcb.res_handle) { in ipr_bus_reset_done()
5442 scsi_report_bus_reset(ioa_cfg->host, res->bus); in ipr_bus_reset_done()
5451 if (ipr_cmd->sibling->sibling) in ipr_bus_reset_done()
5452 ipr_cmd->sibling->sibling = NULL; in ipr_bus_reset_done()
5454 ipr_cmd->sibling->done(ipr_cmd->sibling); in ipr_bus_reset_done()
5456 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_bus_reset_done()
5461 * ipr_abort_timeout - An abort task has timed out
5475 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_abort_timeout()
5480 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5481 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { in ipr_abort_timeout()
5482 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5486 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n"); in ipr_abort_timeout()
5488 ipr_cmd->sibling = reset_cmd; in ipr_abort_timeout()
5489 reset_cmd->sibling = ipr_cmd; in ipr_abort_timeout()
5490 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; in ipr_abort_timeout()
5491 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; in ipr_abort_timeout()
5492 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_abort_timeout()
5493 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; in ipr_abort_timeout()
5494 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; in ipr_abort_timeout()
5497 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5502 * ipr_cancel_op - Cancel specified op
5521 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; in ipr_cancel_op()
5522 res = scsi_cmd->device->hostdata; in ipr_cancel_op()
5525 * This will force the mid-layer to call ipr_eh_host_reset, in ipr_cancel_op()
5528 if (ioa_cfg->in_reset_reload || in ipr_cancel_op()
5529 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_cancel_op()
5539 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_cancel_op()
5545 spin_lock(&hrrq->_lock); in ipr_cancel_op()
5546 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_cancel_op()
5547 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) { in ipr_cancel_op()
5548 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) { in ipr_cancel_op()
5554 spin_unlock(&hrrq->_lock); in ipr_cancel_op()
5561 ipr_cmd->ioarcb.res_handle = res->res_handle; in ipr_cancel_op()
5562 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_cancel_op()
5563 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_cancel_op()
5564 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; in ipr_cancel_op()
5565 ipr_cmd->u.sdev = scsi_cmd->device; in ipr_cancel_op()
5568 scsi_cmd->cmnd[0]); in ipr_cancel_op()
5570 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_cancel_op()
5581 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_cancel_op()
5583 res->needs_sync_complete = 1; in ipr_cancel_op()
5590 * ipr_scan_finished - Report whether scan is done
5600 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_scan_finished()
5603 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_scan_finished()
5604 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) in ipr_scan_finished()
5606 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) in ipr_scan_finished()
5608 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_scan_finished()
5613 * ipr_eh_abort - Reset the host adapter
5627 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in ipr_eh_abort()
5629 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); in ipr_eh_abort()
5631 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); in ipr_eh_abort()
5634 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); in ipr_eh_abort()
5640 * ipr_handle_other_interrupt - Handle "other" interrupts
5653 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_handle_other_interrupt()
5660 if (ioa_cfg->sis64) { in ipr_handle_other_interrupt()
5661 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_handle_other_interrupt()
5662 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5666 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); in ipr_handle_other_interrupt()
5667 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5668 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5669 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5670 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5680 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_handle_other_interrupt()
5681 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_handle_other_interrupt()
5683 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5684 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5685 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5687 if (ioa_cfg->clear_isr) { in ipr_handle_other_interrupt()
5689 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5691 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); in ipr_handle_other_interrupt()
5692 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_handle_other_interrupt()
5697 ioa_cfg->ioa_unit_checked = 1; in ipr_handle_other_interrupt()
5699 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5702 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5705 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_handle_other_interrupt()
5706 ioa_cfg->sdt_state = GET_DUMP; in ipr_handle_other_interrupt()
5716 * ipr_isr_eh - Interrupt service routine error handler
5726 ioa_cfg->errors_logged++; in ipr_isr_eh()
5727 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); in ipr_isr_eh()
5729 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_isr_eh()
5730 ioa_cfg->sdt_state = GET_DUMP; in ipr_isr_eh()
5741 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; in ipr_process_hrrq()
5745 if (!hrr_queue->allow_interrupts) in ipr_process_hrrq()
5748 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_process_hrrq()
5749 hrr_queue->toggle_bit) { in ipr_process_hrrq()
5751 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) & in ipr_process_hrrq()
5755 if (unlikely(cmd_index > hrr_queue->max_cmd_id || in ipr_process_hrrq()
5756 cmd_index < hrr_queue->min_cmd_id)) { in ipr_process_hrrq()
5763 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; in ipr_process_hrrq()
5764 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_hrrq()
5768 list_move_tail(&ipr_cmd->queue, doneq); in ipr_process_hrrq()
5770 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) { in ipr_process_hrrq()
5771 hrr_queue->hrrq_curr++; in ipr_process_hrrq()
5773 hrr_queue->hrrq_curr = hrr_queue->hrrq_start; in ipr_process_hrrq()
5774 hrr_queue->toggle_bit ^= 1u; in ipr_process_hrrq()
5794 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_iopoll()
5799 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_iopoll()
5802 list_del(&ipr_cmd->queue); in ipr_iopoll()
5803 del_timer(&ipr_cmd->timer); in ipr_iopoll()
5804 ipr_cmd->fast_done(ipr_cmd); in ipr_iopoll()
5811 * ipr_isr - Interrupt service routine
5821 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr()
5830 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr()
5832 if (!hrrq->allow_interrupts) { in ipr_isr()
5833 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5838 if (ipr_process_hrrq(hrrq, -1, &doneq)) { in ipr_isr()
5841 if (!ioa_cfg->clear_isr) in ipr_isr()
5848 ioa_cfg->regs.clr_interrupt_reg32); in ipr_isr()
5849 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5854 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5869 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5871 list_del(&ipr_cmd->queue); in ipr_isr()
5872 del_timer(&ipr_cmd->timer); in ipr_isr()
5873 ipr_cmd->fast_done(ipr_cmd); in ipr_isr()
5879 * ipr_isr_mhrrq - Interrupt service routine
5889 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr_mhrrq()
5895 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5898 if (!hrrq->allow_interrupts) { in ipr_isr_mhrrq()
5899 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5903 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_isr_mhrrq()
5904 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5905 hrrq->toggle_bit) { in ipr_isr_mhrrq()
5906 irq_poll_sched(&hrrq->iopoll); in ipr_isr_mhrrq()
5907 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5911 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5912 hrrq->toggle_bit) in ipr_isr_mhrrq()
5914 if (ipr_process_hrrq(hrrq, -1, &doneq)) in ipr_isr_mhrrq()
5918 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5921 list_del(&ipr_cmd->queue); in ipr_isr_mhrrq()
5922 del_timer(&ipr_cmd->timer); in ipr_isr_mhrrq()
5923 ipr_cmd->fast_done(ipr_cmd); in ipr_isr_mhrrq()
5929 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5934 * 0 on success / -1 on failure
5943 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_build_ioadl64()
5944 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioadl64()
5945 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_build_ioadl64()
5954 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl64()
5955 return -1; in ipr_build_ioadl64()
5958 ipr_cmd->dma_use_sg = nseg; in ipr_build_ioadl64()
5960 ioarcb->data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl64()
5961 ioarcb->ioadl_len = in ipr_build_ioadl64()
5962 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl64()
5964 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { in ipr_build_ioadl64()
5966 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ioadl64()
5967 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) in ipr_build_ioadl64()
5970 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ioadl64()
5976 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ioadl64()
5981 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5986 * 0 on success / -1 on failure
5995 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_build_ioadl()
5996 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioadl()
5997 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_build_ioadl()
6005 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl()
6006 return -1; in ipr_build_ioadl()
6009 ipr_cmd->dma_use_sg = nseg; in ipr_build_ioadl()
6011 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { in ipr_build_ioadl()
6013 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ioadl()
6014 ioarcb->data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl()
6015 ioarcb->ioadl_len = in ipr_build_ioadl()
6016 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl()
6017 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { in ipr_build_ioadl()
6019 ioarcb->read_data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl()
6020 ioarcb->read_ioadl_len = in ipr_build_ioadl()
6021 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl()
6024 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { in ipr_build_ioadl()
6025 ioadl = ioarcb->u.add_data.u.ioadl; in ipr_build_ioadl()
6026 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + in ipr_build_ioadl()
6028 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_build_ioadl()
6031 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ioadl()
6037 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ioadl()
6042 * __ipr_erp_done - Process completion of ERP for a device
6053 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in __ipr_erp_done()
6054 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in __ipr_erp_done()
6055 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in __ipr_erp_done()
6058 scsi_cmd->result |= (DID_ERROR << 16); in __ipr_erp_done()
6062 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, in __ipr_erp_done()
6068 res->needs_sync_complete = 1; in __ipr_erp_done()
6069 res->in_erp = 0; in __ipr_erp_done()
6071 scsi_dma_unmap(ipr_cmd->scsi_cmd); in __ipr_erp_done()
6073 if (ipr_cmd->eh_comp) in __ipr_erp_done()
6074 complete(ipr_cmd->eh_comp); in __ipr_erp_done()
6075 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in __ipr_erp_done()
6079 * ipr_erp_done - Process completion of ERP for a device
6090 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_erp_done()
6093 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_erp_done()
6095 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_erp_done()
6099 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6107 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_reinit_ipr_cmnd_for_erp()
6108 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_reinit_ipr_cmnd_for_erp()
6109 dma_addr_t dma_addr = ipr_cmd->dma_addr; in ipr_reinit_ipr_cmnd_for_erp()
6111 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); in ipr_reinit_ipr_cmnd_for_erp()
6112 ioarcb->data_transfer_length = 0; in ipr_reinit_ipr_cmnd_for_erp()
6113 ioarcb->read_data_transfer_length = 0; in ipr_reinit_ipr_cmnd_for_erp()
6114 ioarcb->ioadl_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
6115 ioarcb->read_ioadl_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
6116 ioasa->hdr.ioasc = 0; in ipr_reinit_ipr_cmnd_for_erp()
6117 ioasa->hdr.residual_data_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
6119 if (ipr_cmd->ioa_cfg->sis64) in ipr_reinit_ipr_cmnd_for_erp()
6120 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_reinit_ipr_cmnd_for_erp()
6123 ioarcb->write_ioadl_addr = in ipr_reinit_ipr_cmnd_for_erp()
6125 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_reinit_ipr_cmnd_for_erp()
6130 * __ipr_erp_request_sense - Send request sense to a device
6141 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in __ipr_erp_request_sense()
6142 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in __ipr_erp_request_sense()
6151 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; in __ipr_erp_request_sense()
6152 cmd_pkt->cdb[0] = REQUEST_SENSE; in __ipr_erp_request_sense()
6153 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; in __ipr_erp_request_sense()
6154 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; in __ipr_erp_request_sense()
6155 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in __ipr_erp_request_sense()
6156 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); in __ipr_erp_request_sense()
6158 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, in __ipr_erp_request_sense()
6166 * ipr_erp_request_sense - Send request sense to a device
6177 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_erp_request_sense()
6180 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_erp_request_sense()
6182 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_erp_request_sense()
6186 * ipr_erp_cancel_all - Send cancel all to a device
6192 * Cancel all will return them to us.
6199 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_erp_cancel_all()
6200 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in ipr_erp_cancel_all()
6203 res->in_erp = 1; in ipr_erp_cancel_all()
6207 if (!scsi_cmd->device->simple_tags) { in ipr_erp_cancel_all()
6212 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_erp_cancel_all()
6213 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_erp_cancel_all()
6214 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; in ipr_erp_cancel_all()
6221 * ipr_dump_ioasa - Dump contents of IOASA
6239 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_dump_ioasa()
6243 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; in ipr_dump_ioasa()
6244 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; in ipr_dump_ioasa()
6249 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) in ipr_dump_ioasa()
6257 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { in ipr_dump_ioasa()
6259 if (ioasa->hdr.ilid != 0) in ipr_dump_ioasa()
6271 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); in ipr_dump_ioasa()
6272 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) in ipr_dump_ioasa()
6274 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) in ipr_dump_ioasa()
6289 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6298 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; in ipr_gen_sense()
6299 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; in ipr_gen_sense()
6300 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_gen_sense()
6301 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); in ipr_gen_sense()
6308 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; in ipr_gen_sense()
6312 ioasa->u.vset.failing_lba_hi != 0) { in ipr_gen_sense()
6323 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); in ipr_gen_sense()
6330 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); in ipr_gen_sense()
6344 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { in ipr_gen_sense()
6355 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; in ipr_gen_sense()
6358 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; in ipr_gen_sense()
6362 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); in ipr_gen_sense()
6364 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); in ipr_gen_sense()
6379 * ipr_get_autosense - Copy autosense data to sense buffer
6390 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_get_autosense()
6391 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; in ipr_get_autosense()
6393 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) in ipr_get_autosense()
6396 if (ipr_cmd->ioa_cfg->sis64) in ipr_get_autosense()
6397 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, in ipr_get_autosense()
6398 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), in ipr_get_autosense()
6401 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, in ipr_get_autosense()
6402 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), in ipr_get_autosense()
6408 * ipr_erp_start - Process an error response for a SCSI op
6421 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_erp_start()
6422 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in ipr_erp_start()
6423 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_erp_start()
6439 scsi_cmd->result |= (DID_ABORT << 16); in ipr_erp_start()
6441 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6445 scsi_cmd->result |= (DID_NO_CONNECT << 16); in ipr_erp_start()
6448 scsi_cmd->result |= (DID_NO_CONNECT << 16); in ipr_erp_start()
6450 res->needs_sync_complete = 1; in ipr_erp_start()
6453 if (!res->in_erp) in ipr_erp_start()
6454 res->needs_sync_complete = 1; in ipr_erp_start()
6455 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6461 * so SCSI mid-layer and upper layers handle it accordingly. in ipr_erp_start()
6463 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION) in ipr_erp_start()
6464 scsi_cmd->result |= (DID_PASSTHROUGH << 16); in ipr_erp_start()
6472 if (!res->resetting_device) in ipr_erp_start()
6473 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); in ipr_erp_start()
6474 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6476 res->needs_sync_complete = 1; in ipr_erp_start()
6479 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); in ipr_erp_start()
6489 res->needs_sync_complete = 1; in ipr_erp_start()
6494 if (res->raw_mode) { in ipr_erp_start()
6495 res->raw_mode = 0; in ipr_erp_start()
6496 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6498 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6502 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6504 res->needs_sync_complete = 1; in ipr_erp_start()
6508 scsi_dma_unmap(ipr_cmd->scsi_cmd); in ipr_erp_start()
6510 if (ipr_cmd->eh_comp) in ipr_erp_start()
6511 complete(ipr_cmd->eh_comp); in ipr_erp_start()
6512 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_erp_start()
6516 * ipr_scsi_done - mid-layer done function
6520 * ops generated by the SCSI mid-layer
6527 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_scsi_done()
6528 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_scsi_done()
6529 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_scsi_done()
6532 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); in ipr_scsi_done()
6537 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6539 if (ipr_cmd->eh_comp) in ipr_scsi_done()
6540 complete(ipr_cmd->eh_comp); in ipr_scsi_done()
6541 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_scsi_done()
6542 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6544 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6545 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6547 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6548 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6553 * ipr_queuecommand - Queue a mid-layer request
6557 * This function queues a request generated by the mid-layer.
6576 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_queuecommand()
6578 scsi_cmd->result = (DID_OK << 16); in ipr_queuecommand()
6579 res = scsi_cmd->device->hostdata; in ipr_queuecommand()
6581 if (ipr_is_gata(res) && res->sata_port) { in ipr_queuecommand()
6582 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_queuecommand()
6583 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); in ipr_queuecommand()
6584 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_queuecommand()
6589 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_queuecommand()
6591 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6594 * We have told the host to stop giving us new requests, but in ipr_queuecommand()
6597 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { in ipr_queuecommand()
6598 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6603 * FIXME - Create scsi_set_host_offline interface in ipr_queuecommand()
6606 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { in ipr_queuecommand()
6607 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6613 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6616 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6619 ioarcb = &ipr_cmd->ioarcb; in ipr_queuecommand()
6621 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); in ipr_queuecommand()
6622 ipr_cmd->scsi_cmd = scsi_cmd; in ipr_queuecommand()
6623 ipr_cmd->done = ipr_scsi_eh_done; in ipr_queuecommand()
6626 if (scsi_cmd->underflow == 0) in ipr_queuecommand()
6627 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in ipr_queuecommand()
6629 if (res->reset_occurred) { in ipr_queuecommand()
6630 res->reset_occurred = 0; in ipr_queuecommand()
6631 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; in ipr_queuecommand()
6636 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; in ipr_queuecommand()
6638 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; in ipr_queuecommand()
6639 if (scsi_cmd->flags & SCMD_TAGGED) in ipr_queuecommand()
6640 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK; in ipr_queuecommand()
6642 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK; in ipr_queuecommand()
6645 if (scsi_cmd->cmnd[0] >= 0xC0 && in ipr_queuecommand()
6646 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { in ipr_queuecommand()
6647 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_queuecommand()
6649 if (res->raw_mode && ipr_is_af_dasd_device(res)) { in ipr_queuecommand()
6650 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE; in ipr_queuecommand()
6652 if (scsi_cmd->underflow == 0) in ipr_queuecommand()
6653 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in ipr_queuecommand()
6656 if (ioa_cfg->sis64) in ipr_queuecommand()
6661 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6662 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) { in ipr_queuecommand()
6663 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6664 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6670 if (unlikely(hrrq->ioa_is_dead)) { in ipr_queuecommand()
6671 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6672 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6677 ioarcb->res_handle = res->res_handle; in ipr_queuecommand()
6678 if (res->needs_sync_complete) { in ipr_queuecommand()
6679 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; in ipr_queuecommand()
6680 res->needs_sync_complete = 0; in ipr_queuecommand()
6682 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); in ipr_queuecommand()
6685 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6689 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6690 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); in ipr_queuecommand()
6691 scsi_cmd->result = (DID_NO_CONNECT << 16); in ipr_queuecommand()
6693 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6698 * ipr_ioctl - IOCTL handler
6711 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_ioctl()
6714 return -ENOTTY; in ipr_ioctl()
6715 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg); in ipr_ioctl()
6718 return -EINVAL; in ipr_ioctl()
6722 * ipr_ioa_info - Get information about the card/driver
6734 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; in ipr_ioa_info()
6736 spin_lock_irqsave(host->host_lock, lock_flags); in ipr_ioa_info()
6737 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); in ipr_ioa_info()
6738 spin_unlock_irqrestore(host->host_lock, lock_flags); in ipr_ioa_info()
6765 .this_id = -1,
6775 * ipr_ata_phy_reset - libata phy_reset handler
6782 struct ipr_sata_port *sata_port = ap->private_data; in ipr_ata_phy_reset()
6783 struct ipr_resource_entry *res = sata_port->res; in ipr_ata_phy_reset()
6784 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_ata_phy_reset()
6788 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6789 while (ioa_cfg->in_reset_reload) { in ipr_ata_phy_reset()
6790 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6791 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_ata_phy_reset()
6792 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6795 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) in ipr_ata_phy_reset()
6801 ap->link.device[0].class = ATA_DEV_NONE; in ipr_ata_phy_reset()
6805 ap->link.device[0].class = res->ata_class; in ipr_ata_phy_reset()
6806 if (ap->link.device[0].class == ATA_DEV_UNKNOWN) in ipr_ata_phy_reset()
6807 ap->link.device[0].class = ATA_DEV_NONE; in ipr_ata_phy_reset()
6810 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6815 * ipr_ata_post_internal - Cleanup after an internal command
6823 struct ipr_sata_port *sata_port = qc->ap->private_data; in ipr_ata_post_internal()
6824 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_ata_post_internal()
6829 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6830 while (ioa_cfg->in_reset_reload) { in ipr_ata_post_internal()
6831 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6832 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_ata_post_internal()
6833 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6837 spin_lock(&hrrq->_lock); in ipr_ata_post_internal()
6838 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_ata_post_internal()
6839 if (ipr_cmd->qc == qc) { in ipr_ata_post_internal()
6840 ipr_device_reset(ioa_cfg, sata_port->res); in ipr_ata_post_internal()
6844 spin_unlock(&hrrq->_lock); in ipr_ata_post_internal()
6846 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6850 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6860 regs->feature = tf->feature; in ipr_copy_sata_tf()
6861 regs->nsect = tf->nsect; in ipr_copy_sata_tf()
6862 regs->lbal = tf->lbal; in ipr_copy_sata_tf()
6863 regs->lbam = tf->lbam; in ipr_copy_sata_tf()
6864 regs->lbah = tf->lbah; in ipr_copy_sata_tf()
6865 regs->device = tf->device; in ipr_copy_sata_tf()
6866 regs->command = tf->command; in ipr_copy_sata_tf()
6867 regs->hob_feature = tf->hob_feature; in ipr_copy_sata_tf()
6868 regs->hob_nsect = tf->hob_nsect; in ipr_copy_sata_tf()
6869 regs->hob_lbal = tf->hob_lbal; in ipr_copy_sata_tf()
6870 regs->hob_lbam = tf->hob_lbam; in ipr_copy_sata_tf()
6871 regs->hob_lbah = tf->hob_lbah; in ipr_copy_sata_tf()
6872 regs->ctl = tf->ctl; in ipr_copy_sata_tf()
6876 * ipr_sata_done - done function for SATA commands
6880 * ops generated by the SCSI mid-layer to SATA devices
6887 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_sata_done()
6888 struct ata_queued_cmd *qc = ipr_cmd->qc; in ipr_sata_done()
6889 struct ipr_sata_port *sata_port = qc->ap->private_data; in ipr_sata_done()
6890 struct ipr_resource_entry *res = sata_port->res; in ipr_sata_done()
6891 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_sata_done()
6893 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_sata_done()
6894 if (ipr_cmd->ioa_cfg->sis64) in ipr_sata_done()
6895 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, in ipr_sata_done()
6898 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, in ipr_sata_done()
6902 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) in ipr_sata_done()
6903 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); in ipr_sata_done()
6906 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); in ipr_sata_done()
6908 qc->err_mask |= ac_err_mask(sata_port->ioasa.status); in ipr_sata_done()
6909 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_sata_done()
6910 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_sata_done()
6915 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6924 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ata_ioadl64()
6925 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64; in ipr_build_ata_ioadl64()
6927 int len = qc->nbytes; in ipr_build_ata_ioadl64()
6930 dma_addr_t dma_addr = ipr_cmd->dma_addr; in ipr_build_ata_ioadl64()
6935 if (qc->dma_dir == DMA_TO_DEVICE) { in ipr_build_ata_ioadl64()
6937 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ata_ioadl64()
6938 } else if (qc->dma_dir == DMA_FROM_DEVICE) in ipr_build_ata_ioadl64()
6941 ioarcb->data_transfer_length = cpu_to_be32(len); in ipr_build_ata_ioadl64()
6942 ioarcb->ioadl_len = in ipr_build_ata_ioadl64()
6943 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); in ipr_build_ata_ioadl64()
6944 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_build_ata_ioadl64()
6947 for_each_sg(qc->sg, sg, qc->n_elem, si) { in ipr_build_ata_ioadl64()
6948 ioadl64->flags = cpu_to_be32(ioadl_flags); in ipr_build_ata_ioadl64()
6949 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg)); in ipr_build_ata_ioadl64()
6950 ioadl64->address = cpu_to_be64(sg_dma_address(sg)); in ipr_build_ata_ioadl64()
6957 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ata_ioadl64()
6961 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6970 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ata_ioadl()
6971 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_build_ata_ioadl()
6973 int len = qc->nbytes; in ipr_build_ata_ioadl()
6980 if (qc->dma_dir == DMA_TO_DEVICE) { in ipr_build_ata_ioadl()
6982 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ata_ioadl()
6983 ioarcb->data_transfer_length = cpu_to_be32(len); in ipr_build_ata_ioadl()
6984 ioarcb->ioadl_len = in ipr_build_ata_ioadl()
6985 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ata_ioadl()
6986 } else if (qc->dma_dir == DMA_FROM_DEVICE) { in ipr_build_ata_ioadl()
6988 ioarcb->read_data_transfer_length = cpu_to_be32(len); in ipr_build_ata_ioadl()
6989 ioarcb->read_ioadl_len = in ipr_build_ata_ioadl()
6990 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ata_ioadl()
6993 for_each_sg(qc->sg, sg, qc->n_elem, si) { in ipr_build_ata_ioadl()
6994 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg)); in ipr_build_ata_ioadl()
6995 ioadl->address = cpu_to_be32(sg_dma_address(sg)); in ipr_build_ata_ioadl()
7002 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ata_ioadl()
7006 * ipr_qc_defer - Get a free ipr_cmd
7014 struct ata_port *ap = qc->ap; in ipr_qc_defer()
7015 struct ipr_sata_port *sata_port = ap->private_data; in ipr_qc_defer()
7016 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_qc_defer()
7022 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_qc_defer()
7024 qc->lldd_task = NULL; in ipr_qc_defer()
7025 spin_lock(&hrrq->_lock); in ipr_qc_defer()
7026 if (unlikely(hrrq->ioa_is_dead)) { in ipr_qc_defer()
7027 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
7031 if (unlikely(!hrrq->allow_cmds)) { in ipr_qc_defer()
7032 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
7038 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
7042 qc->lldd_task = ipr_cmd; in ipr_qc_defer()
7043 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
7048 * ipr_qc_issue - Issue a SATA qc to a device
7056 struct ata_port *ap = qc->ap; in ipr_qc_issue()
7057 struct ipr_sata_port *sata_port = ap->private_data; in ipr_qc_issue()
7058 struct ipr_resource_entry *res = sata_port->res; in ipr_qc_issue()
7059 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_qc_issue()
7064 if (qc->lldd_task == NULL) in ipr_qc_issue()
7067 ipr_cmd = qc->lldd_task; in ipr_qc_issue()
7071 qc->lldd_task = NULL; in ipr_qc_issue()
7072 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
7073 if (unlikely(!ipr_cmd->hrrq->allow_cmds || in ipr_qc_issue()
7074 ipr_cmd->hrrq->ioa_is_dead)) { in ipr_qc_issue()
7075 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_qc_issue()
7076 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
7081 ioarcb = &ipr_cmd->ioarcb; in ipr_qc_issue()
7083 if (ioa_cfg->sis64) { in ipr_qc_issue()
7084 regs = &ipr_cmd->i.ata_ioadl.regs; in ipr_qc_issue()
7085 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); in ipr_qc_issue()
7087 regs = &ioarcb->u.add_data.u.regs; in ipr_qc_issue()
7090 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); in ipr_qc_issue()
7092 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_qc_issue()
7093 ipr_cmd->qc = qc; in ipr_qc_issue()
7094 ipr_cmd->done = ipr_sata_done; in ipr_qc_issue()
7095 ipr_cmd->ioarcb.res_handle = res->res_handle; in ipr_qc_issue()
7096 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; in ipr_qc_issue()
7097 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; in ipr_qc_issue()
7098 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in ipr_qc_issue()
7099 ipr_cmd->dma_use_sg = qc->n_elem; in ipr_qc_issue()
7101 if (ioa_cfg->sis64) in ipr_qc_issue()
7106 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; in ipr_qc_issue()
7107 ipr_copy_sata_tf(regs, &qc->tf); in ipr_qc_issue()
7108 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); in ipr_qc_issue()
7111 switch (qc->tf.protocol) { in ipr_qc_issue()
7117 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; in ipr_qc_issue()
7122 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; in ipr_qc_issue()
7126 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; in ipr_qc_issue()
7127 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; in ipr_qc_issue()
7132 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
7137 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
7143 * ipr_qc_fill_rtf - Read result TF
7151 struct ipr_sata_port *sata_port = qc->ap->private_data; in ipr_qc_fill_rtf()
7152 struct ipr_ioasa_gata *g = &sata_port->ioasa; in ipr_qc_fill_rtf()
7153 struct ata_taskfile *tf = &qc->result_tf; in ipr_qc_fill_rtf()
7155 tf->feature = g->error; in ipr_qc_fill_rtf()
7156 tf->nsect = g->nsect; in ipr_qc_fill_rtf()
7157 tf->lbal = g->lbal; in ipr_qc_fill_rtf()
7158 tf->lbam = g->lbam; in ipr_qc_fill_rtf()
7159 tf->lbah = g->lbah; in ipr_qc_fill_rtf()
7160 tf->device = g->device; in ipr_qc_fill_rtf()
7161 tf->command = g->status; in ipr_qc_fill_rtf()
7162 tf->hob_nsect = g->hob_nsect; in ipr_qc_fill_rtf()
7163 tf->hob_lbal = g->hob_lbal; in ipr_qc_fill_rtf()
7164 tf->hob_lbam = g->hob_lbam; in ipr_qc_fill_rtf()
7165 tf->hob_lbah = g->hob_lbah; in ipr_qc_fill_rtf()
7204 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7218 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { in ipr_invalid_adapter()
7231 * ipr_ioa_bringdown_done - IOA bring down completion.
7242 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_bringdown_done()
7246 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_ioa_bringdown_done()
7248 ioa_cfg->scsi_unblock = 1; in ipr_ioa_bringdown_done()
7249 schedule_work(&ioa_cfg->work_q); in ipr_ioa_bringdown_done()
7252 ioa_cfg->in_reset_reload = 0; in ipr_ioa_bringdown_done()
7253 ioa_cfg->reset_retries = 0; in ipr_ioa_bringdown_done()
7254 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_ioa_bringdown_done()
7255 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
7256 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_ioa_bringdown_done()
7257 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
7261 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_bringdown_done()
7262 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_bringdown_done()
7269 * ipr_ioa_reset_done - IOA reset completion.
7273 * It schedules any necessary mid-layer add/removes and
7281 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_reset_done()
7286 ioa_cfg->in_reset_reload = 0; in ipr_ioa_reset_done()
7287 for (j = 0; j < ioa_cfg->hrrq_num; j++) { in ipr_ioa_reset_done()
7288 spin_lock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7289 ioa_cfg->hrrq[j].allow_cmds = 1; in ipr_ioa_reset_done()
7290 spin_unlock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7293 ioa_cfg->reset_cmd = NULL; in ipr_ioa_reset_done()
7294 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; in ipr_ioa_reset_done()
7296 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_ioa_reset_done()
7297 if (res->add_to_ml || res->del_from_ml) { in ipr_ioa_reset_done()
7302 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
7305 list_del_init(&ioa_cfg->hostrcb[j]->queue); in ipr_ioa_reset_done()
7309 ioa_cfg->hostrcb[j]); in ipr_ioa_reset_done()
7313 ioa_cfg->hostrcb[j]); in ipr_ioa_reset_done()
7316 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); in ipr_ioa_reset_done()
7317 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); in ipr_ioa_reset_done()
7319 ioa_cfg->reset_retries = 0; in ipr_ioa_reset_done()
7320 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_reset_done()
7321 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_reset_done()
7323 ioa_cfg->scsi_unblock = 1; in ipr_ioa_reset_done()
7324 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
7330 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7341 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); in ipr_set_sup_dev_dflt()
7342 supported_dev->num_records = 1; in ipr_set_sup_dev_dflt()
7343 supported_dev->data_length = in ipr_set_sup_dev_dflt()
7345 supported_dev->reserved = 0; in ipr_set_sup_dev_dflt()
7349 * ipr_set_supported_devs - Send Set Supported Devices for a device
7359 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_set_supported_devs()
7360 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; in ipr_set_supported_devs()
7361 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_set_supported_devs()
7362 struct ipr_resource_entry *res = ipr_cmd->u.res; in ipr_set_supported_devs()
7364 ipr_cmd->job_step = ipr_ioa_reset_done; in ipr_set_supported_devs()
7366 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { in ipr_set_supported_devs()
7370 ipr_cmd->u.res = res; in ipr_set_supported_devs()
7371 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); in ipr_set_supported_devs()
7373 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_set_supported_devs()
7374 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_set_supported_devs()
7375 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_set_supported_devs()
7377 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; in ipr_set_supported_devs()
7378 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; in ipr_set_supported_devs()
7379 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; in ipr_set_supported_devs()
7380 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; in ipr_set_supported_devs()
7383 ioa_cfg->vpd_cbs_dma + in ipr_set_supported_devs()
7391 if (!ioa_cfg->sis64) in ipr_set_supported_devs()
7392 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_set_supported_devs()
7402 * ipr_get_mode_page - Locate specified mode page
7417 if (!mode_pages || (mode_pages->hdr.length == 0)) in ipr_get_mode_page()
7420 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; in ipr_get_mode_page()
7422 (mode_pages->data + mode_pages->hdr.block_desc_len); in ipr_get_mode_page()
7426 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) in ipr_get_mode_page()
7431 mode_hdr->page_length); in ipr_get_mode_page()
7432 length -= page_length; in ipr_get_mode_page()
7441 * ipr_check_term_power - Check for term power errors
7461 entry_length = mode_page->entry_length; in ipr_check_term_power()
7463 bus = mode_page->bus; in ipr_check_term_power()
7465 for (i = 0; i < mode_page->num_entries; i++) { in ipr_check_term_power()
7466 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { in ipr_check_term_power()
7467 dev_err(&ioa_cfg->pdev->dev, in ipr_check_term_power()
7469 bus->res_addr.bus); in ipr_check_term_power()
7477 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7494 ioa_cfg->bus_attr[i].bus_width); in ipr_scsi_bus_speed_limit()
7496 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) in ipr_scsi_bus_speed_limit()
7497 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; in ipr_scsi_bus_speed_limit()
7502 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7522 entry_length = mode_page->entry_length; in ipr_modify_ioafp_mode_page_28()
7525 for (i = 0, bus = mode_page->bus; in ipr_modify_ioafp_mode_page_28()
7526 i < mode_page->num_entries; in ipr_modify_ioafp_mode_page_28()
7528 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { in ipr_modify_ioafp_mode_page_28()
7529 dev_err(&ioa_cfg->pdev->dev, in ipr_modify_ioafp_mode_page_28()
7531 IPR_GET_PHYS_LOC(bus->res_addr)); in ipr_modify_ioafp_mode_page_28()
7535 bus_attr = &ioa_cfg->bus_attr[i]; in ipr_modify_ioafp_mode_page_28()
7536 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; in ipr_modify_ioafp_mode_page_28()
7537 bus->bus_width = bus_attr->bus_width; in ipr_modify_ioafp_mode_page_28()
7538 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); in ipr_modify_ioafp_mode_page_28()
7539 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; in ipr_modify_ioafp_mode_page_28()
7540 if (bus_attr->qas_enabled) in ipr_modify_ioafp_mode_page_28()
7541 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; in ipr_modify_ioafp_mode_page_28()
7543 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; in ipr_modify_ioafp_mode_page_28()
7548 * ipr_build_mode_select - Build a mode select command
7562 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_mode_select()
7564 ioarcb->res_handle = res_handle; in ipr_build_mode_select()
7565 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_build_mode_select()
7566 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_mode_select()
7567 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; in ipr_build_mode_select()
7568 ioarcb->cmd_pkt.cdb[1] = parm; in ipr_build_mode_select()
7569 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_build_mode_select()
7575 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7586 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page28()
7587 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page28()
7594 length = mode_pages->hdr.length + 1; in ipr_ioafp_mode_select_page28()
7595 mode_pages->hdr.length = 0; in ipr_ioafp_mode_select_page28()
7598 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page28()
7601 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_ioafp_mode_select_page28()
7602 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_ioafp_mode_select_page28()
7611 * ipr_build_mode_sense - Builds a mode sense command
7625 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_mode_sense()
7627 ioarcb->res_handle = res_handle; in ipr_build_mode_sense()
7628 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; in ipr_build_mode_sense()
7629 ioarcb->cmd_pkt.cdb[2] = parm; in ipr_build_mode_sense()
7630 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_build_mode_sense()
7631 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_build_mode_sense()
7637 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7647 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cmd_failed()
7648 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_cmd_failed()
7650 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_cmd_failed()
7652 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); in ipr_reset_cmd_failed()
7655 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cmd_failed()
7660 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7671 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_mode_sense_failed()
7672 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_mode_sense_failed()
7675 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_reset_mode_sense_failed()
7676 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_reset_mode_sense_failed()
7685 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7696 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page28()
7700 0x28, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page28()
7704 ipr_cmd->job_step = ipr_ioafp_mode_select_page28; in ipr_ioafp_mode_sense_page28()
7705 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; in ipr_ioafp_mode_sense_page28()
7714 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7724 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page24()
7725 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page24()
7734 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; in ipr_ioafp_mode_select_page24()
7736 length = mode_pages->hdr.length + 1; in ipr_ioafp_mode_select_page24()
7737 mode_pages->hdr.length = 0; in ipr_ioafp_mode_select_page24()
7740 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page24()
7743 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_ioafp_mode_select_page24()
7751 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7762 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_mode_sense_page24_failed()
7765 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_reset_mode_sense_page24_failed()
7773 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7784 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page24()
7788 0x24, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page24()
7792 ipr_cmd->job_step = ipr_ioafp_mode_select_page24; in ipr_ioafp_mode_sense_page24()
7793 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; in ipr_ioafp_mode_sense_page24()
7802 * ipr_init_res_table - Initialize the resource table
7807 * devices and schedule adding/removing them from the mid-layer
7815 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_init_res_table()
7822 if (ioa_cfg->sis64) in ipr_init_res_table()
7823 flag = ioa_cfg->u.cfg_table64->hdr64.flags; in ipr_init_res_table()
7825 flag = ioa_cfg->u.cfg_table->hdr.flags; in ipr_init_res_table()
7828 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); in ipr_init_res_table()
7830 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) in ipr_init_res_table()
7831 list_move_tail(&res->queue, &old_res); in ipr_init_res_table()
7833 if (ioa_cfg->sis64) in ipr_init_res_table()
7834 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); in ipr_init_res_table()
7836 entries = ioa_cfg->u.cfg_table->hdr.num_entries; in ipr_init_res_table()
7839 if (ioa_cfg->sis64) in ipr_init_res_table()
7840 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; in ipr_init_res_table()
7842 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; in ipr_init_res_table()
7847 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7854 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_init_res_table()
7855 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); in ipr_init_res_table()
7860 res = list_entry(ioa_cfg->free_res_q.next, in ipr_init_res_table()
7862 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7864 res->add_to_ml = 1; in ipr_init_res_table()
7865 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))) in ipr_init_res_table()
7866 res->sdev->allow_restart = 1; in ipr_init_res_table()
7873 if (res->sdev) { in ipr_init_res_table()
7874 res->del_from_ml = 1; in ipr_init_res_table()
7875 res->res_handle = IPR_INVALID_RES_HANDLE; in ipr_init_res_table()
7876 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7882 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_init_res_table()
7885 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_init_res_table()
7886 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; in ipr_init_res_table()
7888 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_init_res_table()
7895 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7906 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_query_ioa_cfg()
7907 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_query_ioa_cfg()
7908 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_ioafp_query_ioa_cfg()
7909 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_query_ioa_cfg()
7912 if (cap->cap & IPR_CAP_DUAL_IOA_RAID) in ipr_ioafp_query_ioa_cfg()
7913 ioa_cfg->dual_raid = 1; in ipr_ioafp_query_ioa_cfg()
7914 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", in ipr_ioafp_query_ioa_cfg()
7915 ucode_vpd->major_release, ucode_vpd->card_type, in ipr_ioafp_query_ioa_cfg()
7916 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); in ipr_ioafp_query_ioa_cfg()
7917 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_ioafp_query_ioa_cfg()
7918 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_query_ioa_cfg()
7920 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; in ipr_ioafp_query_ioa_cfg()
7921 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; in ipr_ioafp_query_ioa_cfg()
7922 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; in ipr_ioafp_query_ioa_cfg()
7923 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; in ipr_ioafp_query_ioa_cfg()
7925 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, in ipr_ioafp_query_ioa_cfg()
7928 ipr_cmd->job_step = ipr_init_res_table; in ipr_ioafp_query_ioa_cfg()
7938 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_ioa_service_action_failed()
7949 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioa_service_action()
7951 ioarcb->res_handle = res_handle; in ipr_build_ioa_service_action()
7952 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION; in ipr_build_ioa_service_action()
7953 ioarcb->cmd_pkt.cdb[1] = sa_code; in ipr_build_ioa_service_action()
7954 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_build_ioa_service_action()
7958 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7967 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_set_caching_parameters()
7968 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_set_caching_parameters()
7969 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_set_caching_parameters()
7973 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; in ipr_ioafp_set_caching_parameters()
7975 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) { in ipr_ioafp_set_caching_parameters()
7980 ioarcb->cmd_pkt.cdb[2] = 0x40; in ipr_ioafp_set_caching_parameters()
7982 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed; in ipr_ioafp_set_caching_parameters()
7995 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
8010 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_inquiry()
8013 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_ioafp_inquiry()
8014 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_inquiry()
8016 ioarcb->cmd_pkt.cdb[0] = INQUIRY; in ipr_ioafp_inquiry()
8017 ioarcb->cmd_pkt.cdb[1] = flags; in ipr_ioafp_inquiry()
8018 ioarcb->cmd_pkt.cdb[2] = page; in ipr_ioafp_inquiry()
8019 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_ioafp_inquiry()
8028 * ipr_inquiry_page_supported - Is the given inquiry page supported
8041 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) in ipr_inquiry_page_supported()
8042 if (page0->page[i] == page) in ipr_inquiry_page_supported()
8049 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8060 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_pageC4_inquiry()
8061 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_pageC4_inquiry()
8062 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_pageC4_inquiry()
8065 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters; in ipr_ioafp_pageC4_inquiry()
8070 (ioa_cfg->vpd_cbs_dma in ipr_ioafp_pageC4_inquiry()
8082 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8093 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_cap_inquiry()
8094 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_cap_inquiry()
8095 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_cap_inquiry()
8098 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry; in ipr_ioafp_cap_inquiry()
8103 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), in ipr_ioafp_cap_inquiry()
8113 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8124 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page3_inquiry()
8128 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; in ipr_ioafp_page3_inquiry()
8131 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), in ipr_ioafp_page3_inquiry()
8139 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8150 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page0_inquiry()
8156 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); in ipr_ioafp_page0_inquiry()
8158 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); in ipr_ioafp_page0_inquiry()
8161 dev_err(&ioa_cfg->pdev->dev, in ipr_ioafp_page0_inquiry()
8165 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_ioafp_page0_inquiry()
8167 list_add_tail(&ipr_cmd->queue, in ipr_ioafp_page0_inquiry()
8168 &ioa_cfg->hrrq->hrrq_free_q); in ipr_ioafp_page0_inquiry()
8173 ipr_cmd->job_step = ipr_ioafp_page3_inquiry; in ipr_ioafp_page0_inquiry()
8176 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), in ipr_ioafp_page0_inquiry()
8184 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8194 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_std_inquiry()
8197 ipr_cmd->job_step = ipr_ioafp_page0_inquiry; in ipr_ioafp_std_inquiry()
8200 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), in ipr_ioafp_std_inquiry()
8208 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8219 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_identify_hrrq()
8220 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_identify_hrrq()
8224 ipr_cmd->job_step = ipr_ioafp_std_inquiry; in ipr_ioafp_identify_hrrq()
8225 if (ioa_cfg->identify_hrrq_index == 0) in ipr_ioafp_identify_hrrq()
8226 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); in ipr_ioafp_identify_hrrq()
8228 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { in ipr_ioafp_identify_hrrq()
8229 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; in ipr_ioafp_identify_hrrq()
8231 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; in ipr_ioafp_identify_hrrq()
8232 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_identify_hrrq()
8234 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_ioafp_identify_hrrq()
8235 if (ioa_cfg->sis64) in ipr_ioafp_identify_hrrq()
8236 ioarcb->cmd_pkt.cdb[1] = 0x1; in ipr_ioafp_identify_hrrq()
8238 if (ioa_cfg->nvectors == 1) in ipr_ioafp_identify_hrrq()
8239 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE; in ipr_ioafp_identify_hrrq()
8241 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE; in ipr_ioafp_identify_hrrq()
8243 ioarcb->cmd_pkt.cdb[2] = in ipr_ioafp_identify_hrrq()
8244 ((u64) hrrq->host_rrq_dma >> 24) & 0xff; in ipr_ioafp_identify_hrrq()
8245 ioarcb->cmd_pkt.cdb[3] = in ipr_ioafp_identify_hrrq()
8246 ((u64) hrrq->host_rrq_dma >> 16) & 0xff; in ipr_ioafp_identify_hrrq()
8247 ioarcb->cmd_pkt.cdb[4] = in ipr_ioafp_identify_hrrq()
8248 ((u64) hrrq->host_rrq_dma >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
8249 ioarcb->cmd_pkt.cdb[5] = in ipr_ioafp_identify_hrrq()
8250 ((u64) hrrq->host_rrq_dma) & 0xff; in ipr_ioafp_identify_hrrq()
8251 ioarcb->cmd_pkt.cdb[7] = in ipr_ioafp_identify_hrrq()
8252 ((sizeof(u32) * hrrq->size) >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
8253 ioarcb->cmd_pkt.cdb[8] = in ipr_ioafp_identify_hrrq()
8254 (sizeof(u32) * hrrq->size) & 0xff; in ipr_ioafp_identify_hrrq()
8256 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) in ipr_ioafp_identify_hrrq()
8257 ioarcb->cmd_pkt.cdb[9] = in ipr_ioafp_identify_hrrq()
8258 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
8260 if (ioa_cfg->sis64) { in ipr_ioafp_identify_hrrq()
8261 ioarcb->cmd_pkt.cdb[10] = in ipr_ioafp_identify_hrrq()
8262 ((u64) hrrq->host_rrq_dma >> 56) & 0xff; in ipr_ioafp_identify_hrrq()
8263 ioarcb->cmd_pkt.cdb[11] = in ipr_ioafp_identify_hrrq()
8264 ((u64) hrrq->host_rrq_dma >> 48) & 0xff; in ipr_ioafp_identify_hrrq()
8265 ioarcb->cmd_pkt.cdb[12] = in ipr_ioafp_identify_hrrq()
8266 ((u64) hrrq->host_rrq_dma >> 40) & 0xff; in ipr_ioafp_identify_hrrq()
8267 ioarcb->cmd_pkt.cdb[13] = in ipr_ioafp_identify_hrrq()
8268 ((u64) hrrq->host_rrq_dma >> 32) & 0xff; in ipr_ioafp_identify_hrrq()
8271 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) in ipr_ioafp_identify_hrrq()
8272 ioarcb->cmd_pkt.cdb[14] = in ipr_ioafp_identify_hrrq()
8273 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
8278 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) in ipr_ioafp_identify_hrrq()
8279 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_ioafp_identify_hrrq()
8290 * ipr_reset_timer_done - Adapter reset timer function
8305 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_timer_done()
8308 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
8310 if (ioa_cfg->reset_cmd == ipr_cmd) { in ipr_reset_timer_done()
8311 list_del(&ipr_cmd->queue); in ipr_reset_timer_done()
8312 ipr_cmd->done(ipr_cmd); in ipr_reset_timer_done()
8315 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
8319 * ipr_reset_start_timer - Start a timer for adapter reset job
8337 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_start_timer()
8338 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_start_timer()
8340 ipr_cmd->timer.expires = jiffies + timeout; in ipr_reset_start_timer()
8341 ipr_cmd->timer.function = ipr_reset_timer_done; in ipr_reset_start_timer()
8342 add_timer(&ipr_cmd->timer); in ipr_reset_start_timer()
8346 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8357 spin_lock(&hrrq->_lock); in ipr_init_ioa_mem()
8358 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); in ipr_init_ioa_mem()
8361 hrrq->hrrq_start = hrrq->host_rrq; in ipr_init_ioa_mem()
8362 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; in ipr_init_ioa_mem()
8363 hrrq->hrrq_curr = hrrq->hrrq_start; in ipr_init_ioa_mem()
8364 hrrq->toggle_bit = 1; in ipr_init_ioa_mem()
8365 spin_unlock(&hrrq->_lock); in ipr_init_ioa_mem()
8369 ioa_cfg->identify_hrrq_index = 0; in ipr_init_ioa_mem()
8370 if (ioa_cfg->hrrq_num == 1) in ipr_init_ioa_mem()
8371 atomic_set(&ioa_cfg->hrrq_index, 0); in ipr_init_ioa_mem()
8373 atomic_set(&ioa_cfg->hrrq_index, 1); in ipr_init_ioa_mem()
8376 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); in ipr_init_ioa_mem()
8380 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8391 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_next_stage()
8394 feedback = readl(ioa_cfg->regs.init_feedback_reg); in ipr_reset_next_stage()
8398 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); in ipr_reset_next_stage()
8409 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
8410 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
8411 stage_time = ioa_cfg->transop_timeout; in ipr_reset_next_stage()
8412 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_next_stage()
8414 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_next_stage()
8416 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_next_stage()
8419 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
8420 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
8425 ipr_cmd->timer.expires = jiffies + stage_time * HZ; in ipr_reset_next_stage()
8426 ipr_cmd->timer.function = ipr_oper_timeout; in ipr_reset_next_stage()
8427 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_next_stage()
8428 add_timer(&ipr_cmd->timer); in ipr_reset_next_stage()
8430 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_next_stage()
8436 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8447 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_enable_ioa()
8453 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_enable_ioa()
8456 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_enable_ioa()
8457 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8458 ioa_cfg->hrrq[i].allow_interrupts = 1; in ipr_reset_enable_ioa()
8459 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8461 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8463 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
8464 int_reg = readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
8467 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_enable_ioa()
8471 ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
8472 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
8477 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_enable_ioa()
8479 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8482 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); in ipr_reset_enable_ioa()
8484 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
8486 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
8488 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); in ipr_reset_enable_ioa()
8490 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8491 ipr_cmd->job_step = ipr_reset_next_stage; in ipr_reset_enable_ioa()
8495 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); in ipr_reset_enable_ioa()
8496 ipr_cmd->timer.function = ipr_oper_timeout; in ipr_reset_enable_ioa()
8497 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_enable_ioa()
8498 add_timer(&ipr_cmd->timer); in ipr_reset_enable_ioa()
8499 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_enable_ioa()
8506 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8517 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_for_dump()
8519 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_reset_wait_for_dump()
8520 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_reset_wait_for_dump()
8521 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_reset_wait_for_dump()
8522 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_reset_wait_for_dump()
8524 ioa_cfg->dump_timeout = 1; in ipr_reset_wait_for_dump()
8525 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_wait_for_dump()
8531 * ipr_unit_check_no_data - Log a unit check/no data error log
8542 ioa_cfg->errors_logged++; in ipr_unit_check_no_data()
8543 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); in ipr_unit_check_no_data()
8547 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8564 mailbox = readl(ioa_cfg->ioa_mailbox); in ipr_get_unit_check_buffer()
8566 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { in ipr_get_unit_check_buffer()
8586 length = (be32_to_cpu(sdt.entry[0].end_token) - in ipr_get_unit_check_buffer()
8590 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, in ipr_get_unit_check_buffer()
8592 list_del_init(&hostrcb->queue); in ipr_get_unit_check_buffer()
8593 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); in ipr_get_unit_check_buffer()
8597 (__be32 *)&hostrcb->hcam, in ipr_get_unit_check_buffer()
8598 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); in ipr_get_unit_check_buffer()
8602 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_get_unit_check_buffer()
8604 ioa_cfg->sdt_state == GET_DUMP) in ipr_get_unit_check_buffer()
8605 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_get_unit_check_buffer()
8609 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_get_unit_check_buffer()
8613 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8623 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_get_unit_check_job()
8626 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_get_unit_check_job()
8628 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_get_unit_check_job()
8637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_dump_mailbox_wait()
8641 if (ioa_cfg->sdt_state != GET_DUMP) in ipr_dump_mailbox_wait()
8644 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left || in ipr_dump_mailbox_wait()
8645 (readl(ioa_cfg->regs.sense_interrupt_reg) & in ipr_dump_mailbox_wait()
8648 if (!ipr_cmd->u.time_left) in ipr_dump_mailbox_wait()
8649 dev_err(&ioa_cfg->pdev->dev, in ipr_dump_mailbox_wait()
8652 ioa_cfg->sdt_state = READ_DUMP; in ipr_dump_mailbox_wait()
8653 ioa_cfg->dump_timeout = 0; in ipr_dump_mailbox_wait()
8654 if (ioa_cfg->sis64) in ipr_dump_mailbox_wait()
8658 ipr_cmd->job_step = ipr_reset_wait_for_dump; in ipr_dump_mailbox_wait()
8659 schedule_work(&ioa_cfg->work_q); in ipr_dump_mailbox_wait()
8662 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_dump_mailbox_wait()
8672 * ipr_reset_restore_cfg_space - Restore PCI config space.
8684 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_restore_cfg_space()
8687 ioa_cfg->pdev->state_saved = true; in ipr_reset_restore_cfg_space()
8688 pci_restore_state(ioa_cfg->pdev); in ipr_reset_restore_cfg_space()
8691 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); in ipr_reset_restore_cfg_space()
8697 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
8699 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
8700 readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
8703 if (ioa_cfg->ioa_unit_checked) { in ipr_reset_restore_cfg_space()
8704 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
8705 ipr_cmd->job_step = ipr_reset_get_unit_check_job; in ipr_reset_restore_cfg_space()
8709 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_restore_cfg_space()
8711 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_restore_cfg_space()
8717 if (ioa_cfg->in_ioa_bringdown) { in ipr_reset_restore_cfg_space()
8718 ipr_cmd->job_step = ipr_ioa_bringdown_done; in ipr_reset_restore_cfg_space()
8719 } else if (ioa_cfg->sdt_state == GET_DUMP) { in ipr_reset_restore_cfg_space()
8720 ipr_cmd->job_step = ipr_dump_mailbox_wait; in ipr_reset_restore_cfg_space()
8721 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX; in ipr_reset_restore_cfg_space()
8723 ipr_cmd->job_step = ipr_reset_enable_ioa; in ipr_reset_restore_cfg_space()
8731 * ipr_reset_bist_done - BIST has completed on the adapter.
8741 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_bist_done()
8744 if (ioa_cfg->cfg_locked) in ipr_reset_bist_done()
8745 pci_cfg_access_unlock(ioa_cfg->pdev); in ipr_reset_bist_done()
8746 ioa_cfg->cfg_locked = 0; in ipr_reset_bist_done()
8747 ipr_cmd->job_step = ipr_reset_restore_cfg_space; in ipr_reset_bist_done()
8753 * ipr_reset_start_bist - Run BIST on the adapter.
8763 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_start_bist()
8767 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) in ipr_reset_start_bist()
8769 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_start_bist()
8771 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); in ipr_reset_start_bist()
8774 ipr_cmd->job_step = ipr_reset_bist_done; in ipr_reset_start_bist()
8778 if (ioa_cfg->cfg_locked) in ipr_reset_start_bist()
8779 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); in ipr_reset_start_bist()
8780 ioa_cfg->cfg_locked = 0; in ipr_reset_start_bist()
8781 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); in ipr_reset_start_bist()
8790 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8801 ipr_cmd->job_step = ipr_reset_bist_done; in ipr_reset_slot_reset_done()
8808 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8817 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_reset_work()
8818 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_reset_reset_work()
8826 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8827 if (ioa_cfg->reset_cmd == ipr_cmd) in ipr_reset_reset_work()
8829 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8834 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8844 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_slot_reset()
8847 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work); in ipr_reset_slot_reset()
8848 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); in ipr_reset_slot_reset()
8849 ipr_cmd->job_step = ipr_reset_slot_reset_done; in ipr_reset_slot_reset()
8855 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8865 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_block_config_access_wait()
8868 if (pci_cfg_access_trylock(ioa_cfg->pdev)) { in ipr_reset_block_config_access_wait()
8869 ioa_cfg->cfg_locked = 1; in ipr_reset_block_config_access_wait()
8870 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8872 if (ipr_cmd->u.time_left) { in ipr_reset_block_config_access_wait()
8874 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_reset_block_config_access_wait()
8878 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8879 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_block_config_access_wait()
8888 * ipr_reset_block_config_access - Block config access to the IOA
8898 ipr_cmd->ioa_cfg->cfg_locked = 0; in ipr_reset_block_config_access()
8899 ipr_cmd->job_step = ipr_reset_block_config_access_wait; in ipr_reset_block_config_access()
8900 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; in ipr_reset_block_config_access()
8905 * ipr_reset_allowed - Query whether or not IOA can be reset
8909 * 0 if reset not allowed / non-zero if reset is allowed
8915 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_reset_allowed()
8920 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8936 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_to_start_bist()
8939 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { in ipr_reset_wait_to_start_bist()
8940 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_reset_wait_to_start_bist()
8943 ipr_cmd->job_step = ipr_reset_block_config_access; in ipr_reset_wait_to_start_bist()
8951 * ipr_reset_alert - Alert the adapter of a pending reset
8964 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_alert()
8969 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); in ipr_reset_alert()
8973 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_alert()
8974 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; in ipr_reset_alert()
8976 ipr_cmd->job_step = ipr_reset_block_config_access; in ipr_reset_alert()
8979 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; in ipr_reset_alert()
8987 * ipr_reset_quiesce_done - Complete IOA disconnect
8997 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_quiesce_done()
9000 ipr_cmd->job_step = ipr_ioa_bringdown_done; in ipr_reset_quiesce_done()
9007 * ipr_reset_cancel_hcam_done - Check for outstanding commands
9018 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam_done()
9025 ipr_cmd->job_step = ipr_reset_quiesce_done; in ipr_reset_cancel_hcam_done()
9028 spin_lock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
9029 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam_done()
9032 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cancel_hcam_done()
9036 spin_unlock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
9047 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9057 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam()
9061 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; in ipr_reset_cancel_hcam()
9064 ipr_cmd->job_step = ipr_reset_cancel_hcam_done; in ipr_reset_cancel_hcam()
9066 if (!hrrq->ioa_is_dead) { in ipr_reset_cancel_hcam()
9067 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { in ipr_reset_cancel_hcam()
9068 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam()
9069 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC) in ipr_reset_cancel_hcam()
9072 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_cancel_hcam()
9073 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_reset_cancel_hcam()
9074 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_reset_cancel_hcam()
9075 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_reset_cancel_hcam()
9076 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST; in ipr_reset_cancel_hcam()
9077 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB; in ipr_reset_cancel_hcam()
9078 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff; in ipr_reset_cancel_hcam()
9079 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff; in ipr_reset_cancel_hcam()
9080 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff; in ipr_reset_cancel_hcam()
9081 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff; in ipr_reset_cancel_hcam()
9082 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff; in ipr_reset_cancel_hcam()
9083 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff; in ipr_reset_cancel_hcam()
9084 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff; in ipr_reset_cancel_hcam()
9085 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff; in ipr_reset_cancel_hcam()
9091 ipr_cmd->job_step = ipr_reset_cancel_hcam; in ipr_reset_cancel_hcam()
9096 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_cancel_hcam()
9103 * ipr_reset_ucode_download_done - Microcode download completion
9113 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download_done()
9114 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download_done()
9116 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, in ipr_reset_ucode_download_done()
9117 sglist->num_sg, DMA_TO_DEVICE); in ipr_reset_ucode_download_done()
9119 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_ucode_download_done()
9124 * ipr_reset_ucode_download - Download microcode to the adapter
9135 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download()
9136 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download()
9139 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_ucode_download()
9144 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_ucode_download()
9145 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_reset_ucode_download()
9146 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; in ipr_reset_ucode_download()
9147 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; in ipr_reset_ucode_download()
9148 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; in ipr_reset_ucode_download()
9149 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; in ipr_reset_ucode_download()
9150 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; in ipr_reset_ucode_download()
9152 if (ioa_cfg->sis64) in ipr_reset_ucode_download()
9156 ipr_cmd->job_step = ipr_reset_ucode_download_done; in ipr_reset_ucode_download()
9166 * ipr_reset_shutdown_ioa - Shutdown the adapter
9178 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_shutdown_ioa()
9179 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; in ipr_reset_shutdown_ioa()
9185 ipr_cmd->job_step = ipr_reset_cancel_hcam; in ipr_reset_shutdown_ioa()
9187 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_reset_shutdown_ioa()
9188 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_shutdown_ioa()
9189 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_reset_shutdown_ioa()
9190 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; in ipr_reset_shutdown_ioa()
9191 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; in ipr_reset_shutdown_ioa()
9197 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_reset_shutdown_ioa()
9205 ipr_cmd->job_step = ipr_reset_ucode_download; in ipr_reset_shutdown_ioa()
9207 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_shutdown_ioa()
9214 * ipr_reset_ioa_job - Adapter reset job
9225 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ioa_job()
9228 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_ioa_job()
9230 if (ioa_cfg->reset_cmd != ipr_cmd) { in ipr_reset_ioa_job()
9235 list_add_tail(&ipr_cmd->queue, in ipr_reset_ioa_job()
9236 &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_ioa_job()
9241 rc = ipr_cmd->job_step_failed(ipr_cmd); in ipr_reset_ioa_job()
9247 ipr_cmd->job_step_failed = ipr_reset_cmd_failed; in ipr_reset_ioa_job()
9248 rc = ipr_cmd->job_step(ipr_cmd); in ipr_reset_ioa_job()
9253 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9273 ioa_cfg->in_reset_reload = 1; in _ipr_initiate_ioa_reset()
9274 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in _ipr_initiate_ioa_reset()
9275 spin_lock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
9276 ioa_cfg->hrrq[i].allow_cmds = 0; in _ipr_initiate_ioa_reset()
9277 spin_unlock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
9280 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in _ipr_initiate_ioa_reset()
9281 ioa_cfg->scsi_unblock = 0; in _ipr_initiate_ioa_reset()
9282 ioa_cfg->scsi_blocked = 1; in _ipr_initiate_ioa_reset()
9283 scsi_block_requests(ioa_cfg->host); in _ipr_initiate_ioa_reset()
9287 ioa_cfg->reset_cmd = ipr_cmd; in _ipr_initiate_ioa_reset()
9288 ipr_cmd->job_step = job_step; in _ipr_initiate_ioa_reset()
9289 ipr_cmd->u.shutdown_type = shutdown_type; in _ipr_initiate_ioa_reset()
9295 * ipr_initiate_ioa_reset - Initiate an adapter reset
9311 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_initiate_ioa_reset()
9314 if (ioa_cfg->in_reset_reload) { in ipr_initiate_ioa_reset()
9315 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_initiate_ioa_reset()
9316 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_initiate_ioa_reset()
9317 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_initiate_ioa_reset()
9318 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_reset()
9321 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { in ipr_initiate_ioa_reset()
9322 dev_err(&ioa_cfg->pdev->dev, in ipr_initiate_ioa_reset()
9323 "IOA taken offline - error recovery failed\n"); in ipr_initiate_ioa_reset()
9325 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_reset()
9326 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_initiate_ioa_reset()
9327 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
9328 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_initiate_ioa_reset()
9329 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
9333 if (ioa_cfg->in_ioa_bringdown) { in ipr_initiate_ioa_reset()
9334 ioa_cfg->reset_cmd = NULL; in ipr_initiate_ioa_reset()
9335 ioa_cfg->in_reset_reload = 0; in ipr_initiate_ioa_reset()
9337 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_initiate_ioa_reset()
9339 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_initiate_ioa_reset()
9340 ioa_cfg->scsi_unblock = 1; in ipr_initiate_ioa_reset()
9341 schedule_work(&ioa_cfg->work_q); in ipr_initiate_ioa_reset()
9345 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_reset()
9355 * ipr_reset_freeze - Hold off all I/O activity
9364 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_freeze()
9368 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_freeze()
9369 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
9370 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_reset_freeze()
9371 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
9374 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_freeze()
9375 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_freeze()
9380 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9383 * Description: This routine is called to tell us that the MMIO
9391 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
9392 if (!ioa_cfg->probe_done) in ipr_pci_mmio_enabled()
9394 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
9399 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9402 * Description: This routine is called to tell us that the PCI bus
9411 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
9412 if (ioa_cfg->probe_done) in ipr_pci_frozen()
9414 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
9418 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9430 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
9431 if (ioa_cfg->probe_done) { in ipr_pci_slot_reset()
9432 if (ioa_cfg->needs_warm_reset) in ipr_pci_slot_reset()
9438 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_slot_reset()
9439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
9444 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9456 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
9457 if (ioa_cfg->probe_done) { in ipr_pci_perm_failure()
9458 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_pci_perm_failure()
9459 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_pci_perm_failure()
9460 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; in ipr_pci_perm_failure()
9461 ioa_cfg->in_ioa_bringdown = 1; in ipr_pci_perm_failure()
9462 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_pci_perm_failure()
9463 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9464 ioa_cfg->hrrq[i].allow_cmds = 0; in ipr_pci_perm_failure()
9465 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9470 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_perm_failure()
9471 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
9475 * ipr_pci_error_detected - Called when a PCI error is detected.
9501 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9508 * 0 on success / -EIO on failure
9516 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
9517 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); in ipr_probe_ioa_part2()
9518 ioa_cfg->probe_done = 1; in ipr_probe_ioa_part2()
9519 if (ioa_cfg->needs_hard_reset) { in ipr_probe_ioa_part2()
9520 ioa_cfg->needs_hard_reset = 0; in ipr_probe_ioa_part2()
9525 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
9532 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9542 if (ioa_cfg->ipr_cmnd_list) { in ipr_free_cmd_blks()
9544 if (ioa_cfg->ipr_cmnd_list[i]) in ipr_free_cmd_blks()
9545 dma_pool_free(ioa_cfg->ipr_cmd_pool, in ipr_free_cmd_blks()
9546 ioa_cfg->ipr_cmnd_list[i], in ipr_free_cmd_blks()
9547 ioa_cfg->ipr_cmnd_list_dma[i]); in ipr_free_cmd_blks()
9549 ioa_cfg->ipr_cmnd_list[i] = NULL; in ipr_free_cmd_blks()
9553 dma_pool_destroy(ioa_cfg->ipr_cmd_pool); in ipr_free_cmd_blks()
9555 kfree(ioa_cfg->ipr_cmnd_list); in ipr_free_cmd_blks()
9556 kfree(ioa_cfg->ipr_cmnd_list_dma); in ipr_free_cmd_blks()
9557 ioa_cfg->ipr_cmnd_list = NULL; in ipr_free_cmd_blks()
9558 ioa_cfg->ipr_cmnd_list_dma = NULL; in ipr_free_cmd_blks()
9559 ioa_cfg->ipr_cmd_pool = NULL; in ipr_free_cmd_blks()
9563 * ipr_free_mem - Frees memory allocated for an adapter
9573 kfree(ioa_cfg->res_entries); in ipr_free_mem()
9574 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), in ipr_free_mem()
9575 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_free_mem()
9578 for (i = 0; i < ioa_cfg->hrrq_num; i++) in ipr_free_mem()
9579 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
9580 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_free_mem()
9581 ioa_cfg->hrrq[i].host_rrq, in ipr_free_mem()
9582 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_free_mem()
9584 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, in ipr_free_mem()
9585 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_free_mem()
9588 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
9590 ioa_cfg->hostrcb[i], in ipr_free_mem()
9591 ioa_cfg->hostrcb_dma[i]); in ipr_free_mem()
9595 kfree(ioa_cfg->trace); in ipr_free_mem()
9599 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9610 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_irqs()
9613 for (i = 0; i < ioa_cfg->nvectors; i++) in ipr_free_irqs()
9614 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); in ipr_free_irqs()
9619 * ipr_free_all_resources - Free all allocated resources for an adapter.
9630 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_all_resources()
9634 if (ioa_cfg->reset_work_q) in ipr_free_all_resources()
9635 destroy_workqueue(ioa_cfg->reset_work_q); in ipr_free_all_resources()
9636 iounmap(ioa_cfg->hdw_dma_regs); in ipr_free_all_resources()
9639 scsi_host_put(ioa_cfg->host); in ipr_free_all_resources()
9645 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9649 * 0 on success / -ENOMEM on allocation failure
9658 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, in ipr_alloc_cmd_blks()
9661 if (!ioa_cfg->ipr_cmd_pool) in ipr_alloc_cmd_blks()
9662 return -ENOMEM; in ipr_alloc_cmd_blks()
9664 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); in ipr_alloc_cmd_blks()
9665 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); in ipr_alloc_cmd_blks()
9667 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { in ipr_alloc_cmd_blks()
9669 return -ENOMEM; in ipr_alloc_cmd_blks()
9672 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_cmd_blks()
9673 if (ioa_cfg->hrrq_num > 1) { in ipr_alloc_cmd_blks()
9676 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9677 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9678 (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9682 (ioa_cfg->hrrq_num - 1); in ipr_alloc_cmd_blks()
9683 ioa_cfg->hrrq[i].min_cmd_id = in ipr_alloc_cmd_blks()
9685 (i - 1) * entries_each_hrrq; in ipr_alloc_cmd_blks()
9686 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9688 i * entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9692 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9693 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9695 ioa_cfg->hrrq[i].size = entries_each_hrrq; in ipr_alloc_cmd_blks()
9698 BUG_ON(ioa_cfg->hrrq_num == 0); in ipr_alloc_cmd_blks()
9700 i = IPR_NUM_CMD_BLKS - in ipr_alloc_cmd_blks()
9701 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; in ipr_alloc_cmd_blks()
9703 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; in ipr_alloc_cmd_blks()
9704 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; in ipr_alloc_cmd_blks()
9708 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool, in ipr_alloc_cmd_blks()
9713 return -ENOMEM; in ipr_alloc_cmd_blks()
9716 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; in ipr_alloc_cmd_blks()
9717 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; in ipr_alloc_cmd_blks()
9719 ioarcb = &ipr_cmd->ioarcb; in ipr_alloc_cmd_blks()
9720 ipr_cmd->dma_addr = dma_addr; in ipr_alloc_cmd_blks()
9721 if (ioa_cfg->sis64) in ipr_alloc_cmd_blks()
9722 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); in ipr_alloc_cmd_blks()
9724 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); in ipr_alloc_cmd_blks()
9726 ioarcb->host_response_handle = cpu_to_be32(i << 2); in ipr_alloc_cmd_blks()
9727 if (ioa_cfg->sis64) { in ipr_alloc_cmd_blks()
9728 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_alloc_cmd_blks()
9730 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = in ipr_alloc_cmd_blks()
9733 ioarcb->write_ioadl_addr = in ipr_alloc_cmd_blks()
9735 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_alloc_cmd_blks()
9736 ioarcb->ioasa_host_pci_addr = in ipr_alloc_cmd_blks()
9739 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); in ipr_alloc_cmd_blks()
9740 ipr_cmd->cmd_index = i; in ipr_alloc_cmd_blks()
9741 ipr_cmd->ioa_cfg = ioa_cfg; in ipr_alloc_cmd_blks()
9742 ipr_cmd->sense_buffer_dma = dma_addr + in ipr_alloc_cmd_blks()
9745 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id; in ipr_alloc_cmd_blks()
9746 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_alloc_cmd_blks()
9747 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_alloc_cmd_blks()
9748 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) in ipr_alloc_cmd_blks()
9756 * ipr_alloc_mem - Allocate memory for an adapter
9760 * 0 on success / non-zero for error
9764 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_alloc_mem()
9765 int i, rc = -ENOMEM; in ipr_alloc_mem()
9768 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported, in ipr_alloc_mem()
9772 if (!ioa_cfg->res_entries) in ipr_alloc_mem()
9775 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { in ipr_alloc_mem()
9776 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); in ipr_alloc_mem()
9777 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9780 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9782 &ioa_cfg->vpd_cbs_dma, in ipr_alloc_mem()
9785 if (!ioa_cfg->vpd_cbs) in ipr_alloc_mem()
9791 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9792 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9793 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9794 &ioa_cfg->hrrq[i].host_rrq_dma, in ipr_alloc_mem()
9797 if (!ioa_cfg->hrrq[i].host_rrq) { in ipr_alloc_mem()
9798 while (--i >= 0) in ipr_alloc_mem()
9799 dma_free_coherent(&pdev->dev, in ipr_alloc_mem()
9800 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9801 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9802 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9805 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9808 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9809 ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9810 &ioa_cfg->cfg_table_dma, in ipr_alloc_mem()
9813 if (!ioa_cfg->u.cfg_table) in ipr_alloc_mem()
9817 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9819 &ioa_cfg->hostrcb_dma[i], in ipr_alloc_mem()
9822 if (!ioa_cfg->hostrcb[i]) in ipr_alloc_mem()
9825 ioa_cfg->hostrcb[i]->hostrcb_dma = in ipr_alloc_mem()
9826 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); in ipr_alloc_mem()
9827 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9828 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); in ipr_alloc_mem()
9831 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES, in ipr_alloc_mem()
9835 if (!ioa_cfg->trace) in ipr_alloc_mem()
9844 while (i-- > 0) { in ipr_alloc_mem()
9845 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb), in ipr_alloc_mem()
9846 ioa_cfg->hostrcb[i], in ipr_alloc_mem()
9847 ioa_cfg->hostrcb_dma[i]); in ipr_alloc_mem()
9849 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9850 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_alloc_mem()
9852 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9853 dma_free_coherent(&pdev->dev, in ipr_alloc_mem()
9854 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9855 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9856 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9861 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs), in ipr_alloc_mem()
9862 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_alloc_mem()
9864 kfree(ioa_cfg->res_entries); in ipr_alloc_mem()
9869 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9880 ioa_cfg->bus_attr[i].bus = i; in ipr_initialize_bus_attr()
9881 ioa_cfg->bus_attr[i].qas_enabled = 0; in ipr_initialize_bus_attr()
9882 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; in ipr_initialize_bus_attr()
9884 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; in ipr_initialize_bus_attr()
9886 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; in ipr_initialize_bus_attr()
9891 * ipr_init_regs - Initialize IOA registers
9903 p = &ioa_cfg->chip_cfg->regs; in ipr_init_regs()
9904 t = &ioa_cfg->regs; in ipr_init_regs()
9905 base = ioa_cfg->hdw_dma_regs; in ipr_init_regs()
9907 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; in ipr_init_regs()
9908 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; in ipr_init_regs()
9909 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; in ipr_init_regs()
9910 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; in ipr_init_regs()
9911 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; in ipr_init_regs()
9912 t->clr_interrupt_reg = base + p->clr_interrupt_reg; in ipr_init_regs()
9913 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; in ipr_init_regs()
9914 t->sense_interrupt_reg = base + p->sense_interrupt_reg; in ipr_init_regs()
9915 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; in ipr_init_regs()
9916 t->ioarrin_reg = base + p->ioarrin_reg; in ipr_init_regs()
9917 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; in ipr_init_regs()
9918 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; in ipr_init_regs()
9919 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; in ipr_init_regs()
9920 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; in ipr_init_regs()
9921 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; in ipr_init_regs()
9922 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; in ipr_init_regs()
9924 if (ioa_cfg->sis64) { in ipr_init_regs()
9925 t->init_feedback_reg = base + p->init_feedback_reg; in ipr_init_regs()
9926 t->dump_addr_reg = base + p->dump_addr_reg; in ipr_init_regs()
9927 t->dump_data_reg = base + p->dump_data_reg; in ipr_init_regs()
9928 t->endian_swap_reg = base + p->endian_swap_reg; in ipr_init_regs()
9933 * ipr_init_ioa_cfg - Initialize IOA config struct
9946 ioa_cfg->host = host; in ipr_init_ioa_cfg()
9947 ioa_cfg->pdev = pdev; in ipr_init_ioa_cfg()
9948 ioa_cfg->log_level = ipr_log_level; in ipr_init_ioa_cfg()
9949 ioa_cfg->doorbell = IPR_DOORBELL; in ipr_init_ioa_cfg()
9950 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); in ipr_init_ioa_cfg()
9951 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); in ipr_init_ioa_cfg()
9952 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); in ipr_init_ioa_cfg()
9953 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); in ipr_init_ioa_cfg()
9954 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); in ipr_init_ioa_cfg()
9955 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); in ipr_init_ioa_cfg()
9957 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); in ipr_init_ioa_cfg()
9958 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); in ipr_init_ioa_cfg()
9959 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q); in ipr_init_ioa_cfg()
9960 INIT_LIST_HEAD(&ioa_cfg->free_res_q); in ipr_init_ioa_cfg()
9961 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in ipr_init_ioa_cfg()
9962 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); in ipr_init_ioa_cfg()
9963 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); in ipr_init_ioa_cfg()
9964 init_waitqueue_head(&ioa_cfg->reset_wait_q); in ipr_init_ioa_cfg()
9965 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_init_ioa_cfg()
9966 init_waitqueue_head(&ioa_cfg->eeh_wait_q); in ipr_init_ioa_cfg()
9967 ioa_cfg->sdt_state = INACTIVE; in ipr_init_ioa_cfg()
9970 ioa_cfg->max_devs_supported = ipr_max_devs; in ipr_init_ioa_cfg()
9972 if (ioa_cfg->sis64) { in ipr_init_ioa_cfg()
9973 host->max_channel = IPR_MAX_SIS64_BUSES; in ipr_init_ioa_cfg()
9974 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; in ipr_init_ioa_cfg()
9975 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; in ipr_init_ioa_cfg()
9977 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; in ipr_init_ioa_cfg()
9978 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) in ipr_init_ioa_cfg()
9980 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9982 host->max_channel = IPR_VSET_BUS; in ipr_init_ioa_cfg()
9983 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; in ipr_init_ioa_cfg()
9984 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; in ipr_init_ioa_cfg()
9986 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; in ipr_init_ioa_cfg()
9987 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) in ipr_init_ioa_cfg()
9989 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9992 host->unique_id = host->host_no; in ipr_init_ioa_cfg()
9993 host->max_cmd_len = IPR_MAX_CDB_LEN; in ipr_init_ioa_cfg()
9994 host->can_queue = ioa_cfg->max_cmds; in ipr_init_ioa_cfg()
9997 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { in ipr_init_ioa_cfg()
9998 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); in ipr_init_ioa_cfg()
9999 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); in ipr_init_ioa_cfg()
10000 spin_lock_init(&ioa_cfg->hrrq[i]._lock); in ipr_init_ioa_cfg()
10002 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; in ipr_init_ioa_cfg()
10004 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; in ipr_init_ioa_cfg()
10009 * ipr_get_chip_info - Find adapter chip information
10021 if (ipr_chip[i].vendor == dev_id->vendor && in ipr_get_chip_info()
10022 ipr_chip[i].device == dev_id->device) in ipr_get_chip_info()
10028 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10037 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_wait_for_pci_err_recovery()
10040 wait_event_timeout(ioa_cfg->eeh_wait_q, in ipr_wait_for_pci_err_recovery()
10049 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; in name_msi_vectors()
10051 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { in name_msi_vectors()
10052 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, in name_msi_vectors()
10053 "host%d-%d", ioa_cfg->host->host_no, vec_idx); in name_msi_vectors()
10054 ioa_cfg->vectors_info[vec_idx]. in name_msi_vectors()
10055 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; in name_msi_vectors()
10064 for (i = 1; i < ioa_cfg->nvectors; i++) { in ipr_request_other_msi_irqs()
10068 ioa_cfg->vectors_info[i].desc, in ipr_request_other_msi_irqs()
10069 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
10071 while (--i > 0) in ipr_request_other_msi_irqs()
10073 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
10081 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10089 * 0 on success / non-zero on failure
10096 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); in ipr_test_intr()
10097 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
10099 ioa_cfg->msi_received = 1; in ipr_test_intr()
10100 wake_up(&ioa_cfg->msi_wait_q); in ipr_test_intr()
10102 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
10107 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10116 * 0 on success / non-zero on failure
10126 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10127 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_test_msi()
10128 ioa_cfg->msi_received = 0; in ipr_test_msi()
10130 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_test_msi()
10131 readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_test_msi()
10132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10136 dev_err(&pdev->dev, "Can not assign irq %d\n", irq); in ipr_test_msi()
10139 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq); in ipr_test_msi()
10141 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); in ipr_test_msi()
10142 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_test_msi()
10143 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); in ipr_test_msi()
10144 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10147 if (!ioa_cfg->msi_received) { in ipr_test_msi()
10149 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); in ipr_test_msi()
10150 rc = -EOPNOTSUPP; in ipr_test_msi()
10152 dev_info(&pdev->dev, "MSI test succeeded.\n"); in ipr_test_msi()
10154 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10163 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10168 * 0 on success / non-zero on failure
10184 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); in ipr_probe_ioa()
10188 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); in ipr_probe_ioa()
10189 rc = -ENOMEM; in ipr_probe_ioa()
10193 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; in ipr_probe_ioa()
10195 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops); in ipr_probe_ioa()
10197 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); in ipr_probe_ioa()
10199 if (!ioa_cfg->ipr_chip) { in ipr_probe_ioa()
10200 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", in ipr_probe_ioa()
10201 dev_id->vendor, dev_id->device); in ipr_probe_ioa()
10206 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; in ipr_probe_ioa()
10207 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; in ipr_probe_ioa()
10208 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; in ipr_probe_ioa()
10209 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; in ipr_probe_ioa()
10212 ioa_cfg->transop_timeout = ipr_transop_timeout; in ipr_probe_ioa()
10213 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) in ipr_probe_ioa()
10214 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
10216 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
10218 ioa_cfg->revid = pdev->revision; in ipr_probe_ioa()
10226 dev_err(&pdev->dev, in ipr_probe_ioa()
10240 dev_err(&pdev->dev, "Cannot enable adapter\n"); in ipr_probe_ioa()
10249 dev_err(&pdev->dev, in ipr_probe_ioa()
10251 rc = -ENOMEM; in ipr_probe_ioa()
10255 ioa_cfg->hdw_dma_regs = ipr_regs; in ipr_probe_ioa()
10256 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; in ipr_probe_ioa()
10257 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; in ipr_probe_ioa()
10261 if (ioa_cfg->sis64) { in ipr_probe_ioa()
10262 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in ipr_probe_ioa()
10264 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n"); in ipr_probe_ioa()
10265 rc = dma_set_mask_and_coherent(&pdev->dev, in ipr_probe_ioa()
10269 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in ipr_probe_ioa()
10272 dev_err(&pdev->dev, "Failed to set DMA mask\n"); in ipr_probe_ioa()
10277 ioa_cfg->chip_cfg->cache_line_size); in ipr_probe_ioa()
10280 dev_err(&pdev->dev, "Write of cache line size failed\n"); in ipr_probe_ioa()
10282 rc = -EIO; in ipr_probe_ioa()
10287 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_probe_ioa()
10291 dev_err(&pdev->dev, "The max number of MSIX is %d\n", in ipr_probe_ioa()
10297 if (ioa_cfg->ipr_chip->has_msi) in ipr_probe_ioa()
10304 ioa_cfg->nvectors = rc; in ipr_probe_ioa()
10306 if (!pdev->msi_enabled && !pdev->msix_enabled) in ipr_probe_ioa()
10307 ioa_cfg->clear_isr = 1; in ipr_probe_ioa()
10315 rc = -EIO; in ipr_probe_ioa()
10320 if (pdev->msi_enabled || pdev->msix_enabled) { in ipr_probe_ioa()
10324 dev_info(&pdev->dev, in ipr_probe_ioa()
10325 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors, in ipr_probe_ioa()
10326 pdev->msix_enabled ? "-X" : ""); in ipr_probe_ioa()
10328 case -EOPNOTSUPP: in ipr_probe_ioa()
10332 ioa_cfg->nvectors = 1; in ipr_probe_ioa()
10333 ioa_cfg->clear_isr = 1; in ipr_probe_ioa()
10340 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, in ipr_probe_ioa()
10352 dev_err(&pdev->dev, in ipr_probe_ioa()
10361 dev_err(&pdev->dev, "Failed to save PCI config space\n"); in ipr_probe_ioa()
10362 rc = -EIO; in ipr_probe_ioa()
10370 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_probe_ioa()
10371 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_probe_ioa()
10372 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_probe_ioa()
10374 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
10376 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
10378 ioa_cfg->ioa_unit_checked = 1; in ipr_probe_ioa()
10380 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
10382 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
10384 if (pdev->msi_enabled || pdev->msix_enabled) { in ipr_probe_ioa()
10387 ioa_cfg->vectors_info[0].desc, in ipr_probe_ioa()
10388 &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10392 rc = request_irq(pdev->irq, ipr_isr, in ipr_probe_ioa()
10394 IPR_NAME, &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10397 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", in ipr_probe_ioa()
10398 pdev->irq, rc); in ipr_probe_ioa()
10402 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || in ipr_probe_ioa()
10403 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { in ipr_probe_ioa()
10404 ioa_cfg->needs_warm_reset = 1; in ipr_probe_ioa()
10405 ioa_cfg->reset = ipr_reset_slot_reset; in ipr_probe_ioa()
10407 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", in ipr_probe_ioa()
10408 WQ_MEM_RECLAIM, host->host_no); in ipr_probe_ioa()
10410 if (!ioa_cfg->reset_work_q) { in ipr_probe_ioa()
10411 dev_err(&pdev->dev, "Couldn't register reset workqueue\n"); in ipr_probe_ioa()
10412 rc = -ENOMEM; in ipr_probe_ioa()
10416 ioa_cfg->reset = ipr_reset_start_bist; in ipr_probe_ioa()
10419 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); in ipr_probe_ioa()
10445 * ipr_initiate_ioa_bringdown - Bring down an adapter
10462 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_initiate_ioa_bringdown()
10463 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_bringdown()
10464 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_bringdown()
10465 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_bringdown()
10471 * __ipr_remove - Remove a single adapter
10487 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10488 while (ioa_cfg->in_reset_reload) { in __ipr_remove()
10489 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10490 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
10491 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10494 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in __ipr_remove()
10495 spin_lock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10496 ioa_cfg->hrrq[i].removing_ioa = 1; in __ipr_remove()
10497 spin_unlock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10502 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10503 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
10504 flush_work(&ioa_cfg->work_q); in __ipr_remove()
10505 if (ioa_cfg->reset_work_q) in __ipr_remove()
10506 flush_workqueue(ioa_cfg->reset_work_q); in __ipr_remove()
10507 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in __ipr_remove()
10508 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10511 list_del(&ioa_cfg->queue); in __ipr_remove()
10514 if (ioa_cfg->sdt_state == ABORT_DUMP) in __ipr_remove()
10515 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in __ipr_remove()
10516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10524 * ipr_remove - IOA hot plug remove entry point
10538 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10540 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10542 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10544 scsi_remove_host(ioa_cfg->host); in ipr_remove()
10552 * ipr_probe - Adapter hot plug add entry point
10557 * 0 on success / non-zero on failure
10578 rc = scsi_add_host(ioa_cfg->host, &pdev->dev); in ipr_probe()
10585 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10589 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10594 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10598 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10600 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10602 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10607 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10611 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10613 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10615 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10619 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_probe()
10620 ioa_cfg->scan_enabled = 1; in ipr_probe()
10621 schedule_work(&ioa_cfg->work_q); in ipr_probe()
10622 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_probe()
10624 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; in ipr_probe()
10626 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_probe()
10627 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_probe()
10628 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_probe()
10629 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_probe()
10633 scsi_scan_host(ioa_cfg->host); in ipr_probe()
10639 * ipr_shutdown - Shutdown handler.
10655 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10656 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_shutdown()
10657 ioa_cfg->iopoll_weight = 0; in ipr_shutdown()
10658 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_shutdown()
10659 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_shutdown()
10662 while (ioa_cfg->in_reset_reload) { in ipr_shutdown()
10663 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10664 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
10665 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10668 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) in ipr_shutdown()
10672 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10673 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
10674 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { in ipr_shutdown()
10676 pci_disable_device(ioa_cfg->pdev); in ipr_shutdown()
10810 * ipr_halt_done - Shutdown prepare completion
10818 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_halt_done()
10822 * ipr_halt - Issue shutdown prepare to all adapters
10842 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_halt()
10843 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || in ipr_halt()
10844 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { in ipr_halt()
10845 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()
10850 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_halt()
10851 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_halt()
10852 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; in ipr_halt()
10853 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; in ipr_halt()
10856 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()
10868 * ipr_init - Module entry point
10883 * ipr_exit - Module unload