Lines Matching +full:ld +full:- +full:pulse +full:- +full:delay +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ipr.c -- driver for IBM Power Linux RAID adapters
5 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
17 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
18 * PCI-X Dual Channel Ultra 320 SCSI Adapter
19 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
23 * - Ultra 320 SCSI controller
24 * - PCI-X host interface
25 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
26 * - Non-Volatile Write Cache
27 * - Supports attachment of non-RAID disks, tape, and optical devices
28 * - RAID Levels 0, 5, 10
29 * - Hot spare
30 * - Background Parity Checking
31 * - Background Data Scrubbing
32 * - Ability to increase the capacity of an existing RAID 5 disk array
36 * - Tagged command queuing
37 * - Adapter microcode download
38 * - PCI hot plug
39 * - SCSI device hot plug
51 #include <linux/delay.h>
93 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
191 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
194 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2…
196 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 …_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
379 "9073: Invalid multi-adapter configuration"},
401 "Illegal request, command not allowed to a non-optimized resource"},
551 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
552 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
557 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
558 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
578 * ipr_trc_hook - Add a trace entry to the driver trace
590 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_trc_hook()
593 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; in ipr_trc_hook()
594 trace_entry = &ioa_cfg->trace[trace_index]; in ipr_trc_hook()
595 trace_entry->time = jiffies; in ipr_trc_hook()
596 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; in ipr_trc_hook()
597 trace_entry->type = type; in ipr_trc_hook()
598 if (ipr_cmd->ioa_cfg->sis64) in ipr_trc_hook()
599 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command; in ipr_trc_hook()
601 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command; in ipr_trc_hook()
602 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; in ipr_trc_hook()
603 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; in ipr_trc_hook()
604 trace_entry->u.add_data = add_data; in ipr_trc_hook()
612 * ipr_lock_and_done - Acquire lock and complete command
621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_lock_and_done()
623 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
624 ipr_cmd->done(ipr_cmd); in ipr_lock_and_done()
625 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
629 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
637 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_reinit_ipr_cmnd()
638 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_reinit_ipr_cmnd()
639 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; in ipr_reinit_ipr_cmnd()
640 dma_addr_t dma_addr = ipr_cmd->dma_addr; in ipr_reinit_ipr_cmnd()
643 hrrq_id = ioarcb->cmd_pkt.hrrq_id; in ipr_reinit_ipr_cmnd()
644 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); in ipr_reinit_ipr_cmnd()
645 ioarcb->cmd_pkt.hrrq_id = hrrq_id; in ipr_reinit_ipr_cmnd()
646 ioarcb->data_transfer_length = 0; in ipr_reinit_ipr_cmnd()
647 ioarcb->read_data_transfer_length = 0; in ipr_reinit_ipr_cmnd()
648 ioarcb->ioadl_len = 0; in ipr_reinit_ipr_cmnd()
649 ioarcb->read_ioadl_len = 0; in ipr_reinit_ipr_cmnd()
651 if (ipr_cmd->ioa_cfg->sis64) { in ipr_reinit_ipr_cmnd()
652 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_reinit_ipr_cmnd()
654 ioasa64->u.gata.status = 0; in ipr_reinit_ipr_cmnd()
656 ioarcb->write_ioadl_addr = in ipr_reinit_ipr_cmnd()
658 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_reinit_ipr_cmnd()
659 ioasa->u.gata.status = 0; in ipr_reinit_ipr_cmnd()
662 ioasa->hdr.ioasc = 0; in ipr_reinit_ipr_cmnd()
663 ioasa->hdr.residual_data_len = 0; in ipr_reinit_ipr_cmnd()
664 ipr_cmd->scsi_cmd = NULL; in ipr_reinit_ipr_cmnd()
665 ipr_cmd->qc = NULL; in ipr_reinit_ipr_cmnd()
666 ipr_cmd->sense_buffer[0] = 0; in ipr_reinit_ipr_cmnd()
667 ipr_cmd->dma_use_sg = 0; in ipr_reinit_ipr_cmnd()
671 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
673 * @fast_done: fast done function call-back
682 ipr_cmd->u.scratch = 0; in ipr_init_ipr_cmnd()
683 ipr_cmd->sibling = NULL; in ipr_init_ipr_cmnd()
684 ipr_cmd->eh_comp = NULL; in ipr_init_ipr_cmnd()
685 ipr_cmd->fast_done = fast_done; in ipr_init_ipr_cmnd()
686 timer_setup(&ipr_cmd->timer, NULL, 0); in ipr_init_ipr_cmnd()
690 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
701 if (likely(!list_empty(&hrrq->hrrq_free_q))) { in __ipr_get_free_ipr_cmnd()
702 ipr_cmd = list_entry(hrrq->hrrq_free_q.next, in __ipr_get_free_ipr_cmnd()
704 list_del(&ipr_cmd->queue); in __ipr_get_free_ipr_cmnd()
712 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
722 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); in ipr_get_free_ipr_cmnd()
728 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
744 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_mask_and_clear_interrupts()
745 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
746 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_mask_and_clear_interrupts()
747 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
751 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
752 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
754 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
757 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
758 writel(~0, ioa_cfg->regs.clr_interrupt_reg); in ipr_mask_and_clear_interrupts()
759 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); in ipr_mask_and_clear_interrupts()
760 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_mask_and_clear_interrupts()
764 * ipr_save_pcix_cmd_reg - Save PCI-X command register
768 * 0 on success / -EIO on failure
772 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_save_pcix_cmd_reg()
777 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_save_pcix_cmd_reg()
778 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_save_pcix_cmd_reg()
779 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); in ipr_save_pcix_cmd_reg()
780 return -EIO; in ipr_save_pcix_cmd_reg()
783 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; in ipr_save_pcix_cmd_reg()
788 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792 * 0 on success / -EIO on failure
796 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_set_pcix_cmd_reg()
799 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_set_pcix_cmd_reg()
800 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_set_pcix_cmd_reg()
801 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); in ipr_set_pcix_cmd_reg()
802 return -EIO; in ipr_set_pcix_cmd_reg()
810 * __ipr_sata_eh_done - done function for aborted SATA commands
821 struct ata_queued_cmd *qc = ipr_cmd->qc; in __ipr_sata_eh_done()
822 struct ipr_sata_port *sata_port = qc->ap->private_data; in __ipr_sata_eh_done()
824 qc->err_mask |= AC_ERR_OTHER; in __ipr_sata_eh_done()
825 sata_port->ioasa.status |= ATA_BUSY; in __ipr_sata_eh_done()
827 if (ipr_cmd->eh_comp) in __ipr_sata_eh_done()
828 complete(ipr_cmd->eh_comp); in __ipr_sata_eh_done()
829 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in __ipr_sata_eh_done()
833 * ipr_sata_eh_done - done function for aborted SATA commands
844 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_sata_eh_done()
847 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_sata_eh_done()
849 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_sata_eh_done()
853 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
857 * ops generated by the SCSI mid-layer which are being aborted.
864 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in __ipr_scsi_eh_done()
866 scsi_cmd->result |= (DID_ERROR << 16); in __ipr_scsi_eh_done()
868 scsi_dma_unmap(ipr_cmd->scsi_cmd); in __ipr_scsi_eh_done()
869 scsi_cmd->scsi_done(scsi_cmd); in __ipr_scsi_eh_done()
870 if (ipr_cmd->eh_comp) in __ipr_scsi_eh_done()
871 complete(ipr_cmd->eh_comp); in __ipr_scsi_eh_done()
872 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in __ipr_scsi_eh_done()
876 * ipr_scsi_eh_done - mid-layer done function for aborted ops
880 * ops generated by the SCSI mid-layer which are being aborted.
888 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_scsi_eh_done()
890 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_scsi_eh_done()
892 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_scsi_eh_done()
896 * ipr_fail_all_ops - Fails all outstanding ops.
911 spin_lock(&hrrq->_lock); in ipr_fail_all_ops()
913 temp, &hrrq->hrrq_pending_q, queue) { in ipr_fail_all_ops()
914 list_del(&ipr_cmd->queue); in ipr_fail_all_ops()
916 ipr_cmd->s.ioasa.hdr.ioasc = in ipr_fail_all_ops()
918 ipr_cmd->s.ioasa.hdr.ilid = in ipr_fail_all_ops()
921 if (ipr_cmd->scsi_cmd) in ipr_fail_all_ops()
922 ipr_cmd->done = __ipr_scsi_eh_done; in ipr_fail_all_ops()
923 else if (ipr_cmd->qc) in ipr_fail_all_ops()
924 ipr_cmd->done = __ipr_sata_eh_done; in ipr_fail_all_ops()
928 del_timer(&ipr_cmd->timer); in ipr_fail_all_ops()
929 ipr_cmd->done(ipr_cmd); in ipr_fail_all_ops()
931 spin_unlock(&hrrq->_lock); in ipr_fail_all_ops()
937 * ipr_send_command - Send driver initiated requests.
949 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_command()
950 dma_addr_t send_dma_addr = ipr_cmd->dma_addr; in ipr_send_command()
952 if (ioa_cfg->sis64) { in ipr_send_command()
958 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) in ipr_send_command()
960 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
962 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
966 * ipr_do_req - Send driver initiated requests.
982 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_do_req()
984 ipr_cmd->done = done; in ipr_do_req()
986 ipr_cmd->timer.expires = jiffies + timeout; in ipr_do_req()
987 ipr_cmd->timer.function = timeout_func; in ipr_do_req()
989 add_timer(&ipr_cmd->timer); in ipr_do_req()
997 * ipr_internal_cmd_done - Op done function for an internally generated op.
1008 if (ipr_cmd->sibling) in ipr_internal_cmd_done()
1009 ipr_cmd->sibling = NULL; in ipr_internal_cmd_done()
1011 complete(&ipr_cmd->completion); in ipr_internal_cmd_done()
1015 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_init_ioadl()
1031 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_init_ioadl()
1033 ipr_cmd->dma_use_sg = 1; in ipr_init_ioadl()
1035 if (ipr_cmd->ioa_cfg->sis64) { in ipr_init_ioadl()
1036 ioadl64->flags = cpu_to_be32(flags); in ipr_init_ioadl()
1037 ioadl64->data_len = cpu_to_be32(len); in ipr_init_ioadl()
1038 ioadl64->address = cpu_to_be64(dma_addr); in ipr_init_ioadl()
1040 ipr_cmd->ioarcb.ioadl_len = in ipr_init_ioadl()
1042 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
1044 ioadl->flags_and_data_len = cpu_to_be32(flags | len); in ipr_init_ioadl()
1045 ioadl->address = cpu_to_be32(dma_addr); in ipr_init_ioadl()
1048 ipr_cmd->ioarcb.read_ioadl_len = in ipr_init_ioadl()
1050 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
1052 ipr_cmd->ioarcb.ioadl_len = in ipr_init_ioadl()
1054 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
1060 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1072 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_blocking_cmd()
1074 init_completion(&ipr_cmd->completion); in ipr_send_blocking_cmd()
1077 spin_unlock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1078 wait_for_completion(&ipr_cmd->completion); in ipr_send_blocking_cmd()
1079 spin_lock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1086 if (ioa_cfg->hrrq_num == 1) in ipr_get_hrrq_index()
1089 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); in ipr_get_hrrq_index()
1090 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; in ipr_get_hrrq_index()
1096 * ipr_send_hcam - Send an HCAM to the adapter.
1114 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_send_hcam()
1116 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_send_hcam()
1117 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); in ipr_send_hcam()
1119 ipr_cmd->u.hostrcb = hostrcb; in ipr_send_hcam()
1120 ioarcb = &ipr_cmd->ioarcb; in ipr_send_hcam()
1122 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_send_hcam()
1123 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; in ipr_send_hcam()
1124 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; in ipr_send_hcam()
1125 ioarcb->cmd_pkt.cdb[1] = type; in ipr_send_hcam()
1126 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; in ipr_send_hcam()
1127 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; in ipr_send_hcam()
1129 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, in ipr_send_hcam()
1130 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); in ipr_send_hcam()
1133 ipr_cmd->done = ipr_process_ccn; in ipr_send_hcam()
1135 ipr_cmd->done = ipr_process_error; in ipr_send_hcam()
1141 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_send_hcam()
1146 * ipr_update_ata_class - Update the ata class in the resource entry
1158 res->ata_class = ATA_DEV_ATA; in ipr_update_ata_class()
1162 res->ata_class = ATA_DEV_ATAPI; in ipr_update_ata_class()
1165 res->ata_class = ATA_DEV_UNKNOWN; in ipr_update_ata_class()
1171 * ipr_init_res_entry - Initialize a resource entry struct.
1183 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_init_res_entry()
1186 res->needs_sync_complete = 0; in ipr_init_res_entry()
1187 res->in_erp = 0; in ipr_init_res_entry()
1188 res->add_to_ml = 0; in ipr_init_res_entry()
1189 res->del_from_ml = 0; in ipr_init_res_entry()
1190 res->resetting_device = 0; in ipr_init_res_entry()
1191 res->reset_occurred = 0; in ipr_init_res_entry()
1192 res->sdev = NULL; in ipr_init_res_entry()
1193 res->sata_port = NULL; in ipr_init_res_entry()
1195 if (ioa_cfg->sis64) { in ipr_init_res_entry()
1196 proto = cfgtew->u.cfgte64->proto; in ipr_init_res_entry()
1197 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); in ipr_init_res_entry()
1198 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); in ipr_init_res_entry()
1199 res->qmodel = IPR_QUEUEING_MODEL64(res); in ipr_init_res_entry()
1200 res->type = cfgtew->u.cfgte64->res_type; in ipr_init_res_entry()
1202 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_init_res_entry()
1203 sizeof(res->res_path)); in ipr_init_res_entry()
1205 res->bus = 0; in ipr_init_res_entry()
1206 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_init_res_entry()
1207 sizeof(res->dev_lun.scsi_lun)); in ipr_init_res_entry()
1208 res->lun = scsilun_to_int(&res->dev_lun); in ipr_init_res_entry()
1210 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { in ipr_init_res_entry()
1211 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { in ipr_init_res_entry()
1212 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { in ipr_init_res_entry()
1214 res->target = gscsi_res->target; in ipr_init_res_entry()
1219 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1220 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1221 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1223 } else if (res->type == IPR_RES_TYPE_IOAFP) { in ipr_init_res_entry()
1224 res->bus = IPR_IOAFP_VIRTUAL_BUS; in ipr_init_res_entry()
1225 res->target = 0; in ipr_init_res_entry()
1226 } else if (res->type == IPR_RES_TYPE_ARRAY) { in ipr_init_res_entry()
1227 res->bus = IPR_ARRAY_VIRTUAL_BUS; in ipr_init_res_entry()
1228 res->target = find_first_zero_bit(ioa_cfg->array_ids, in ipr_init_res_entry()
1229 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1230 set_bit(res->target, ioa_cfg->array_ids); in ipr_init_res_entry()
1231 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { in ipr_init_res_entry()
1232 res->bus = IPR_VSET_VIRTUAL_BUS; in ipr_init_res_entry()
1233 res->target = find_first_zero_bit(ioa_cfg->vset_ids, in ipr_init_res_entry()
1234 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1235 set_bit(res->target, ioa_cfg->vset_ids); in ipr_init_res_entry()
1237 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1238 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1239 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1242 proto = cfgtew->u.cfgte->proto; in ipr_init_res_entry()
1243 res->qmodel = IPR_QUEUEING_MODEL(res); in ipr_init_res_entry()
1244 res->flags = cfgtew->u.cfgte->flags; in ipr_init_res_entry()
1245 if (res->flags & IPR_IS_IOA_RESOURCE) in ipr_init_res_entry()
1246 res->type = IPR_RES_TYPE_IOAFP; in ipr_init_res_entry()
1248 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; in ipr_init_res_entry()
1250 res->bus = cfgtew->u.cfgte->res_addr.bus; in ipr_init_res_entry()
1251 res->target = cfgtew->u.cfgte->res_addr.target; in ipr_init_res_entry()
1252 res->lun = cfgtew->u.cfgte->res_addr.lun; in ipr_init_res_entry()
1253 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); in ipr_init_res_entry()
1260 * ipr_is_same_device - Determine if two devices are the same.
1270 if (res->ioa_cfg->sis64) { in ipr_is_same_device()
1271 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, in ipr_is_same_device()
1272 sizeof(cfgtew->u.cfgte64->dev_id)) && in ipr_is_same_device()
1273 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_is_same_device()
1274 sizeof(cfgtew->u.cfgte64->lun))) { in ipr_is_same_device()
1278 if (res->bus == cfgtew->u.cfgte->res_addr.bus && in ipr_is_same_device()
1279 res->target == cfgtew->u.cfgte->res_addr.target && in ipr_is_same_device()
1280 res->lun == cfgtew->u.cfgte->res_addr.lun) in ipr_is_same_device()
1288 * __ipr_format_res_path - Format the resource path for printing.
1302 p += scnprintf(p, buffer + len - p, "%02X", res_path[0]); in __ipr_format_res_path()
1304 p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]); in __ipr_format_res_path()
1310 * ipr_format_res_path - Format the resource path for printing.
1325 p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); in ipr_format_res_path()
1326 __ipr_format_res_path(res_path, p, len - (buffer - p)); in ipr_format_res_path()
1331 * ipr_update_res_entry - Update the resource entry.
1345 if (res->ioa_cfg->sis64) { in ipr_update_res_entry()
1346 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); in ipr_update_res_entry()
1347 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); in ipr_update_res_entry()
1348 res->type = cfgtew->u.cfgte64->res_type; in ipr_update_res_entry()
1350 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, in ipr_update_res_entry()
1353 res->qmodel = IPR_QUEUEING_MODEL64(res); in ipr_update_res_entry()
1354 proto = cfgtew->u.cfgte64->proto; in ipr_update_res_entry()
1355 res->res_handle = cfgtew->u.cfgte64->res_handle; in ipr_update_res_entry()
1356 res->dev_id = cfgtew->u.cfgte64->dev_id; in ipr_update_res_entry()
1358 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_update_res_entry()
1359 sizeof(res->dev_lun.scsi_lun)); in ipr_update_res_entry()
1361 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_update_res_entry()
1362 sizeof(res->res_path))) { in ipr_update_res_entry()
1363 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_update_res_entry()
1364 sizeof(res->res_path)); in ipr_update_res_entry()
1368 if (res->sdev && new_path) in ipr_update_res_entry()
1369 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", in ipr_update_res_entry()
1370 ipr_format_res_path(res->ioa_cfg, in ipr_update_res_entry()
1371 res->res_path, buffer, sizeof(buffer))); in ipr_update_res_entry()
1373 res->flags = cfgtew->u.cfgte->flags; in ipr_update_res_entry()
1374 if (res->flags & IPR_IS_IOA_RESOURCE) in ipr_update_res_entry()
1375 res->type = IPR_RES_TYPE_IOAFP; in ipr_update_res_entry()
1377 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; in ipr_update_res_entry()
1379 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, in ipr_update_res_entry()
1382 res->qmodel = IPR_QUEUEING_MODEL(res); in ipr_update_res_entry()
1383 proto = cfgtew->u.cfgte->proto; in ipr_update_res_entry()
1384 res->res_handle = cfgtew->u.cfgte->res_handle; in ipr_update_res_entry()
1391 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1401 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_clear_res_target()
1403 if (!ioa_cfg->sis64) in ipr_clear_res_target()
1406 if (res->bus == IPR_ARRAY_VIRTUAL_BUS) in ipr_clear_res_target()
1407 clear_bit(res->target, ioa_cfg->array_ids); in ipr_clear_res_target()
1408 else if (res->bus == IPR_VSET_VIRTUAL_BUS) in ipr_clear_res_target()
1409 clear_bit(res->target, ioa_cfg->vset_ids); in ipr_clear_res_target()
1410 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { in ipr_clear_res_target()
1411 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) in ipr_clear_res_target()
1412 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) in ipr_clear_res_target()
1414 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1416 } else if (res->bus == 0) in ipr_clear_res_target()
1417 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1421 * ipr_handle_config_change - Handle a config change from the adapter
1437 if (ioa_cfg->sis64) { in ipr_handle_config_change()
1438 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; in ipr_handle_config_change()
1439 cc_res_handle = cfgtew.u.cfgte64->res_handle; in ipr_handle_config_change()
1441 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; in ipr_handle_config_change()
1442 cc_res_handle = cfgtew.u.cfgte->res_handle; in ipr_handle_config_change()
1445 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_handle_config_change()
1446 if (res->res_handle == cc_res_handle) { in ipr_handle_config_change()
1453 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_handle_config_change()
1460 res = list_entry(ioa_cfg->free_res_q.next, in ipr_handle_config_change()
1463 list_del(&res->queue); in ipr_handle_config_change()
1465 list_add_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_handle_config_change()
1470 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { in ipr_handle_config_change()
1471 if (res->sdev) { in ipr_handle_config_change()
1472 res->del_from_ml = 1; in ipr_handle_config_change()
1473 res->res_handle = IPR_INVALID_RES_HANDLE; in ipr_handle_config_change()
1474 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1477 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_handle_config_change()
1479 } else if (!res->sdev || res->del_from_ml) { in ipr_handle_config_change()
1480 res->add_to_ml = 1; in ipr_handle_config_change()
1481 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1488 * ipr_process_ccn - Op done function for a CCN.
1499 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_ccn()
1500 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; in ipr_process_ccn()
1501 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_ccn()
1503 list_del_init(&hostrcb->queue); in ipr_process_ccn()
1504 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_ccn()
1509 dev_err(&ioa_cfg->pdev->dev, in ipr_process_ccn()
1519 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1532 i--; in strip_and_pad_whitespace()
1539 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1553 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); in ipr_log_vpd_compact()
1554 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer); in ipr_log_vpd_compact()
1556 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN); in ipr_log_vpd_compact()
1557 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer); in ipr_log_vpd_compact()
1559 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN); in ipr_log_vpd_compact()
1566 * ipr_log_vpd - Log the passed VPD to the error log.
1577 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); in ipr_log_vpd()
1578 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, in ipr_log_vpd()
1583 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); in ipr_log_vpd()
1589 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1600 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); in ipr_log_ext_vpd_compact()
1602 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); in ipr_log_ext_vpd_compact()
1606 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1614 ipr_log_vpd(&vpd->vpd); in ipr_log_ext_vpd()
1615 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), in ipr_log_ext_vpd()
1616 be32_to_cpu(vpd->wwid[1])); in ipr_log_ext_vpd()
1620 * ipr_log_enhanced_cache_error - Log a cache error.
1632 if (ioa_cfg->sis64) in ipr_log_enhanced_cache_error()
1633 error = &hostrcb->hcam.u.error64.u.type_12_error; in ipr_log_enhanced_cache_error()
1635 error = &hostrcb->hcam.u.error.u.type_12_error; in ipr_log_enhanced_cache_error()
1637 ipr_err("-----Current Configuration-----\n"); in ipr_log_enhanced_cache_error()
1639 ipr_log_ext_vpd(&error->ioa_vpd); in ipr_log_enhanced_cache_error()
1641 ipr_log_ext_vpd(&error->cfc_vpd); in ipr_log_enhanced_cache_error()
1643 ipr_err("-----Expected Configuration-----\n"); in ipr_log_enhanced_cache_error()
1645 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); in ipr_log_enhanced_cache_error()
1647 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); in ipr_log_enhanced_cache_error()
1650 be32_to_cpu(error->ioa_data[0]), in ipr_log_enhanced_cache_error()
1651 be32_to_cpu(error->ioa_data[1]), in ipr_log_enhanced_cache_error()
1652 be32_to_cpu(error->ioa_data[2])); in ipr_log_enhanced_cache_error()
1656 * ipr_log_cache_error - Log a cache error.
1667 &hostrcb->hcam.u.error.u.type_02_error; in ipr_log_cache_error()
1669 ipr_err("-----Current Configuration-----\n"); in ipr_log_cache_error()
1671 ipr_log_vpd(&error->ioa_vpd); in ipr_log_cache_error()
1673 ipr_log_vpd(&error->cfc_vpd); in ipr_log_cache_error()
1675 ipr_err("-----Expected Configuration-----\n"); in ipr_log_cache_error()
1677 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); in ipr_log_cache_error()
1679 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); in ipr_log_cache_error()
1682 be32_to_cpu(error->ioa_data[0]), in ipr_log_cache_error()
1683 be32_to_cpu(error->ioa_data[1]), in ipr_log_cache_error()
1684 be32_to_cpu(error->ioa_data[2])); in ipr_log_cache_error()
1688 * ipr_log_enhanced_config_error - Log a configuration error.
1702 error = &hostrcb->hcam.u.error.u.type_13_error; in ipr_log_enhanced_config_error()
1703 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_enhanced_config_error()
1706 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_enhanced_config_error()
1708 dev_entry = error->dev; in ipr_log_enhanced_config_error()
1713 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_enhanced_config_error()
1714 ipr_log_ext_vpd(&dev_entry->vpd); in ipr_log_enhanced_config_error()
1716 ipr_err("-----New Device Information-----\n"); in ipr_log_enhanced_config_error()
1717 ipr_log_ext_vpd(&dev_entry->new_vpd); in ipr_log_enhanced_config_error()
1720 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_enhanced_config_error()
1723 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_enhanced_config_error()
1728 * ipr_log_sis64_config_error - Log a device error.
1743 error = &hostrcb->hcam.u.error64.u.type_23_error; in ipr_log_sis64_config_error()
1744 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_sis64_config_error()
1747 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_sis64_config_error()
1749 dev_entry = error->dev; in ipr_log_sis64_config_error()
1755 __ipr_format_res_path(dev_entry->res_path, in ipr_log_sis64_config_error()
1757 ipr_log_ext_vpd(&dev_entry->vpd); in ipr_log_sis64_config_error()
1759 ipr_err("-----New Device Information-----\n"); in ipr_log_sis64_config_error()
1760 ipr_log_ext_vpd(&dev_entry->new_vpd); in ipr_log_sis64_config_error()
1763 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_sis64_config_error()
1766 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_sis64_config_error()
1771 * ipr_log_config_error - Log a configuration error.
1785 error = &hostrcb->hcam.u.error.u.type_03_error; in ipr_log_config_error()
1786 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_config_error()
1789 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_config_error()
1791 dev_entry = error->dev; in ipr_log_config_error()
1796 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_config_error()
1797 ipr_log_vpd(&dev_entry->vpd); in ipr_log_config_error()
1799 ipr_err("-----New Device Information-----\n"); in ipr_log_config_error()
1800 ipr_log_vpd(&dev_entry->new_vpd); in ipr_log_config_error()
1803 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_config_error()
1806 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_config_error()
1809 be32_to_cpu(dev_entry->ioa_data[0]), in ipr_log_config_error()
1810 be32_to_cpu(dev_entry->ioa_data[1]), in ipr_log_config_error()
1811 be32_to_cpu(dev_entry->ioa_data[2]), in ipr_log_config_error()
1812 be32_to_cpu(dev_entry->ioa_data[3]), in ipr_log_config_error()
1813 be32_to_cpu(dev_entry->ioa_data[4])); in ipr_log_config_error()
1818 * ipr_log_enhanced_array_error - Log an array configuration error.
1831 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_enhanced_array_error()
1833 error = &hostrcb->hcam.u.error.u.type_14_error; in ipr_log_enhanced_array_error()
1838 error->protection_level, in ipr_log_enhanced_array_error()
1839 ioa_cfg->host->host_no, in ipr_log_enhanced_array_error()
1840 error->last_func_vset_res_addr.bus, in ipr_log_enhanced_array_error()
1841 error->last_func_vset_res_addr.target, in ipr_log_enhanced_array_error()
1842 error->last_func_vset_res_addr.lun); in ipr_log_enhanced_array_error()
1846 array_entry = error->array_member; in ipr_log_enhanced_array_error()
1847 num_entries = min_t(u32, be32_to_cpu(error->num_entries), in ipr_log_enhanced_array_error()
1848 ARRAY_SIZE(error->array_member)); in ipr_log_enhanced_array_error()
1851 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_enhanced_array_error()
1854 if (be32_to_cpu(error->exposed_mode_adn) == i) in ipr_log_enhanced_array_error()
1859 ipr_log_ext_vpd(&array_entry->vpd); in ipr_log_enhanced_array_error()
1860 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_enhanced_array_error()
1861 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_enhanced_array_error()
1869 * ipr_log_array_error - Log an array configuration error.
1882 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_array_error()
1884 error = &hostrcb->hcam.u.error.u.type_04_error; in ipr_log_array_error()
1889 error->protection_level, in ipr_log_array_error()
1890 ioa_cfg->host->host_no, in ipr_log_array_error()
1891 error->last_func_vset_res_addr.bus, in ipr_log_array_error()
1892 error->last_func_vset_res_addr.target, in ipr_log_array_error()
1893 error->last_func_vset_res_addr.lun); in ipr_log_array_error()
1897 array_entry = error->array_member; in ipr_log_array_error()
1900 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_array_error()
1903 if (be32_to_cpu(error->exposed_mode_adn) == i) in ipr_log_array_error()
1908 ipr_log_vpd(&array_entry->vpd); in ipr_log_array_error()
1910 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_array_error()
1911 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_array_error()
1917 array_entry = error->array_member2; in ipr_log_array_error()
1924 * ipr_log_hex_data - Log additional hex IOA error data.
1939 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_log_hex_data()
1952 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1964 if (ioa_cfg->sis64) in ipr_log_enhanced_dual_ioa_error()
1965 error = &hostrcb->hcam.u.error64.u.type_17_error; in ipr_log_enhanced_dual_ioa_error()
1967 error = &hostrcb->hcam.u.error.u.type_17_error; in ipr_log_enhanced_dual_ioa_error()
1969 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_enhanced_dual_ioa_error()
1970 strim(error->failure_reason); in ipr_log_enhanced_dual_ioa_error()
1972 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, in ipr_log_enhanced_dual_ioa_error()
1973 be32_to_cpu(hostrcb->hcam.u.error.prc)); in ipr_log_enhanced_dual_ioa_error()
1974 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); in ipr_log_enhanced_dual_ioa_error()
1975 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_enhanced_dual_ioa_error()
1976 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_enhanced_dual_ioa_error()
1982 * ipr_log_dual_ioa_error - Log a dual adapter error.
1994 error = &hostrcb->hcam.u.error.u.type_07_error; in ipr_log_dual_ioa_error()
1995 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_dual_ioa_error()
1996 strim(error->failure_reason); in ipr_log_dual_ioa_error()
1998 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, in ipr_log_dual_ioa_error()
1999 be32_to_cpu(hostrcb->hcam.u.error.prc)); in ipr_log_dual_ioa_error()
2000 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); in ipr_log_dual_ioa_error()
2001 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_dual_ioa_error()
2002 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_dual_ioa_error()
2027 * ipr_log_fabric_path - Log a fabric path error
2038 u8 path_state = fabric->path_state; in ipr_log_fabric_path()
2050 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) { in ipr_log_fabric_path()
2053 fabric->ioa_port); in ipr_log_fabric_path()
2054 } else if (fabric->cascaded_expander == 0xff) { in ipr_log_fabric_path()
2057 fabric->ioa_port, fabric->phy); in ipr_log_fabric_path()
2058 } else if (fabric->phy == 0xff) { in ipr_log_fabric_path()
2061 fabric->ioa_port, fabric->cascaded_expander); in ipr_log_fabric_path()
2065 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); in ipr_log_fabric_path()
2072 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); in ipr_log_fabric_path()
2076 * ipr_log64_fabric_path - Log a fabric path error
2087 u8 path_state = fabric->path_state; in ipr_log64_fabric_path()
2102 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_fabric_path()
2103 fabric->res_path, in ipr_log64_fabric_path()
2110 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, in ipr_log64_fabric_path()
2156 * ipr_log_path_elem - Log a fabric path element.
2167 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; in ipr_log_path_elem()
2168 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; in ipr_log_path_elem()
2184 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2185 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2187 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) { in ipr_log_path_elem()
2190 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2191 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2192 } else if (cfg->cascaded_expander == 0xff) { in ipr_log_path_elem()
2195 path_type_desc[i].desc, cfg->phy, in ipr_log_path_elem()
2196 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2197 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2198 } else if (cfg->phy == 0xff) { in ipr_log_path_elem()
2201 path_type_desc[i].desc, cfg->cascaded_expander, in ipr_log_path_elem()
2202 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2203 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2207 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy, in ipr_log_path_elem()
2208 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2209 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2217 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy, in ipr_log_path_elem()
2218 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2219 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2223 * ipr_log64_path_elem - Log a fabric path element.
2234 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; in ipr_log64_path_elem()
2235 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; in ipr_log64_path_elem()
2236 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; in ipr_log64_path_elem()
2252 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2253 cfg->res_path, buffer, sizeof(buffer)), in ipr_log64_path_elem()
2254 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log64_path_elem()
2255 be32_to_cpu(cfg->wwid[0]), in ipr_log64_path_elem()
2256 be32_to_cpu(cfg->wwid[1])); in ipr_log64_path_elem()
2261 "WWN=%08X%08X\n", cfg->type_status, in ipr_log64_path_elem()
2262 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2263 cfg->res_path, buffer, sizeof(buffer)), in ipr_log64_path_elem()
2264 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log64_path_elem()
2265 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log64_path_elem()
2269 * ipr_log_fabric_error - Log a fabric error.
2284 error = &hostrcb->hcam.u.error.u.type_20_error; in ipr_log_fabric_error()
2285 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_fabric_error()
2286 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); in ipr_log_fabric_error()
2288 add_len = be32_to_cpu(hostrcb->hcam.length) - in ipr_log_fabric_error()
2292 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { in ipr_log_fabric_error()
2297 add_len -= be16_to_cpu(fabric->length); in ipr_log_fabric_error()
2299 ((unsigned long)fabric + be16_to_cpu(fabric->length)); in ipr_log_fabric_error()
2306 * ipr_log_sis64_array_error - Log a sis64 array error.
2320 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_sis64_array_error()
2322 error = &hostrcb->hcam.u.error64.u.type_24_error; in ipr_log_sis64_array_error()
2327 error->protection_level, in ipr_log_sis64_array_error()
2328 ipr_format_res_path(ioa_cfg, error->last_res_path, in ipr_log_sis64_array_error()
2333 array_entry = error->array_member; in ipr_log_sis64_array_error()
2334 num_entries = min_t(u32, error->num_entries, in ipr_log_sis64_array_error()
2335 ARRAY_SIZE(error->array_member)); in ipr_log_sis64_array_error()
2339 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_sis64_array_error()
2342 if (error->exposed_mode_adn == i) in ipr_log_sis64_array_error()
2348 ipr_log_ext_vpd(&array_entry->vpd); in ipr_log_sis64_array_error()
2350 ipr_format_res_path(ioa_cfg, array_entry->res_path, in ipr_log_sis64_array_error()
2354 array_entry->expected_res_path, in ipr_log_sis64_array_error()
2362 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2377 error = &hostrcb->hcam.u.error64.u.type_30_error; in ipr_log_sis64_fabric_error()
2379 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_sis64_fabric_error()
2380 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); in ipr_log_sis64_fabric_error()
2382 add_len = be32_to_cpu(hostrcb->hcam.length) - in ipr_log_sis64_fabric_error()
2386 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { in ipr_log_sis64_fabric_error()
2391 add_len -= be16_to_cpu(fabric->length); in ipr_log_sis64_fabric_error()
2393 ((unsigned long)fabric + be16_to_cpu(fabric->length)); in ipr_log_sis64_fabric_error()
2400 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2412 error = &hostrcb->hcam.u.error64.u.type_41_error; in ipr_log_sis64_service_required_error()
2414 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_sis64_service_required_error()
2415 ipr_err("Primary Failure Reason: %s\n", error->failure_reason); in ipr_log_sis64_service_required_error()
2416 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_sis64_service_required_error()
2417 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_sis64_service_required_error()
2422 * ipr_log_generic_error - Log an adapter error.
2432 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, in ipr_log_generic_error()
2433 be32_to_cpu(hostrcb->hcam.length)); in ipr_log_generic_error()
2437 * ipr_log_sis64_device_error - Log a cache error.
2450 error = &hostrcb->hcam.u.error64.u.type_21_error; in ipr_log_sis64_device_error()
2452 ipr_err("-----Failing Device Information-----\n"); in ipr_log_sis64_device_error()
2454 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]), in ipr_log_sis64_device_error()
2455 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3])); in ipr_log_sis64_device_error()
2457 __ipr_format_res_path(error->res_path, in ipr_log_sis64_device_error()
2459 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0'; in ipr_log_sis64_device_error()
2460 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0'; in ipr_log_sis64_device_error()
2461 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc); in ipr_log_sis64_device_error()
2462 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc); in ipr_log_sis64_device_error()
2464 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); in ipr_log_sis64_device_error()
2466 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); in ipr_log_sis64_device_error()
2469 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); in ipr_log_sis64_device_error()
2473 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2495 * ipr_handle_log_data - Log an adapter error.
2511 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) in ipr_handle_log_data()
2514 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) in ipr_handle_log_data()
2515 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); in ipr_handle_log_data()
2517 if (ioa_cfg->sis64) in ipr_handle_log_data()
2518 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); in ipr_handle_log_data()
2520 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_handle_log_data()
2522 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || in ipr_handle_log_data()
2525 scsi_report_bus_reset(ioa_cfg->host, in ipr_handle_log_data()
2526 hostrcb->hcam.u.error.fd_res_addr.bus); in ipr_handle_log_data()
2535 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) { in ipr_handle_log_data()
2536 error = &hostrcb->hcam.u.error64.u.type_21_error; in ipr_handle_log_data()
2538 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST && in ipr_handle_log_data()
2539 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_handle_log_data()
2546 ioa_cfg->errors_logged++; in ipr_handle_log_data()
2548 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) in ipr_handle_log_data()
2550 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) in ipr_handle_log_data()
2551 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); in ipr_handle_log_data()
2553 switch (hostrcb->hcam.overlay_id) { in ipr_handle_log_data()
2611 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q, in ipr_get_free_hostrcb()
2615 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers."); in ipr_get_free_hostrcb()
2616 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q, in ipr_get_free_hostrcb()
2620 list_del_init(&hostrcb->queue); in ipr_get_free_hostrcb()
2625 * ipr_process_error - Op done function for an adapter error log.
2637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_error()
2638 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; in ipr_process_error()
2639 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_error()
2642 if (ioa_cfg->sis64) in ipr_process_error()
2643 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); in ipr_process_error()
2645 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_process_error()
2647 list_del_init(&hostrcb->queue); in ipr_process_error()
2648 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_error()
2656 dev_err(&ioa_cfg->pdev->dev, in ipr_process_error()
2660 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); in ipr_process_error()
2661 schedule_work(&ioa_cfg->work_q); in ipr_process_error()
2668 * ipr_timeout - An internally generated op has timed out.
2681 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_timeout()
2684 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2686 ioa_cfg->errors_logged++; in ipr_timeout()
2687 dev_err(&ioa_cfg->pdev->dev, in ipr_timeout()
2690 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_timeout()
2691 ioa_cfg->sdt_state = GET_DUMP; in ipr_timeout()
2693 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) in ipr_timeout()
2696 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2701 * ipr_oper_timeout - Adapter timed out transitioning to operational
2714 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_oper_timeout()
2717 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2719 ioa_cfg->errors_logged++; in ipr_oper_timeout()
2720 dev_err(&ioa_cfg->pdev->dev, in ipr_oper_timeout()
2723 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_oper_timeout()
2724 ioa_cfg->sdt_state = GET_DUMP; in ipr_oper_timeout()
2726 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { in ipr_oper_timeout()
2728 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_oper_timeout()
2732 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2737 * ipr_find_ses_entry - Find matching SES in SES table
2752 if (ste->compare_product_id_byte[j] == 'X') { in ipr_find_ses_entry()
2753 vpids = &res->std_inq_data.vpids; in ipr_find_ses_entry()
2754 if (vpids->product_id[j] == ste->product_id[j]) in ipr_find_ses_entry()
2770 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2777 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2788 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_get_max_scsi_speed()
2789 if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) in ipr_get_max_scsi_speed()
2792 if (bus != res->bus) in ipr_get_max_scsi_speed()
2798 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); in ipr_get_max_scsi_speed()
2805 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2807 * @max_delay: max delay in micro-seconds to wait
2817 int delay = 1; in ipr_wait_iodbg_ack() local
2820 while (delay < max_delay) { in ipr_wait_iodbg_ack()
2821 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_wait_iodbg_ack()
2826 /* udelay cannot be used if delay is more than a few milliseconds */ in ipr_wait_iodbg_ack()
2827 if ((delay / 1000) > MAX_UDELAY_MS) in ipr_wait_iodbg_ack()
2828 mdelay(delay / 1000); in ipr_wait_iodbg_ack()
2830 udelay(delay); in ipr_wait_iodbg_ack()
2832 delay += delay; in ipr_wait_iodbg_ack()
2834 return -EIO; in ipr_wait_iodbg_ack()
2838 * ipr_get_sis64_dump_data_section - Dump IOA memory
2854 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); in ipr_get_sis64_dump_data_section()
2855 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); in ipr_get_sis64_dump_data_section()
2863 * ipr_get_ldump_data_section - Dump IOA memory
2870 * 0 on success / -EIO on failure
2877 int i, delay = 0; in ipr_get_ldump_data_section() local
2879 if (ioa_cfg->sis64) in ipr_get_ldump_data_section()
2885 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2890 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2892 return -EIO; in ipr_get_ldump_data_section()
2895 /* Signal LDUMP interlocked - clear IO debug ack */ in ipr_get_ldump_data_section()
2897 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2900 writel(start_addr, ioa_cfg->ioa_mailbox); in ipr_get_ldump_data_section()
2902 /* Signal address valid - clear IOA Reset alert */ in ipr_get_ldump_data_section()
2904 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2910 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2912 return -EIO; in ipr_get_ldump_data_section()
2916 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); in ipr_get_ldump_data_section()
2920 if (i < (length_in_words - 1)) { in ipr_get_ldump_data_section()
2921 /* Signal dump data received - Clear IO debug Ack */ in ipr_get_ldump_data_section()
2923 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2929 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2932 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2934 /* Signal dump data received - Clear IO debug Ack */ in ipr_get_ldump_data_section()
2936 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2938 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ in ipr_get_ldump_data_section()
2939 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { in ipr_get_ldump_data_section()
2941 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2947 delay += 10; in ipr_get_ldump_data_section()
2955 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2972 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; in ipr_sdt_copy()
2974 if (ioa_cfg->sis64) in ipr_sdt_copy()
2980 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { in ipr_sdt_copy()
2981 if (ioa_dump->page_offset >= PAGE_SIZE || in ipr_sdt_copy()
2982 ioa_dump->page_offset == 0) { in ipr_sdt_copy()
2990 ioa_dump->page_offset = 0; in ipr_sdt_copy()
2991 ioa_dump->ioa_data[ioa_dump->next_page_index] = page; in ipr_sdt_copy()
2992 ioa_dump->next_page_index++; in ipr_sdt_copy()
2994 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; in ipr_sdt_copy()
2996 rem_len = length - bytes_copied; in ipr_sdt_copy()
2997 rem_page_len = PAGE_SIZE - ioa_dump->page_offset; in ipr_sdt_copy()
3000 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
3001 if (ioa_cfg->sdt_state == ABORT_DUMP) { in ipr_sdt_copy()
3002 rc = -EIO; in ipr_sdt_copy()
3006 &page[ioa_dump->page_offset / 4], in ipr_sdt_copy()
3009 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
3012 ioa_dump->page_offset += cur_len; in ipr_sdt_copy()
3025 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3033 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; in ipr_init_dump_entry_hdr()
3034 hdr->num_elems = 1; in ipr_init_dump_entry_hdr()
3035 hdr->offset = sizeof(*hdr); in ipr_init_dump_entry_hdr()
3036 hdr->status = IPR_DUMP_STATUS_SUCCESS; in ipr_init_dump_entry_hdr()
3040 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3050 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_dump_ioa_type_data()
3052 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); in ipr_dump_ioa_type_data()
3053 driver_dump->ioa_type_entry.hdr.len = in ipr_dump_ioa_type_data()
3054 sizeof(struct ipr_dump_ioa_type_entry) - in ipr_dump_ioa_type_data()
3056 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_dump_ioa_type_data()
3057 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; in ipr_dump_ioa_type_data()
3058 driver_dump->ioa_type_entry.type = ioa_cfg->type; in ipr_dump_ioa_type_data()
3059 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | in ipr_dump_ioa_type_data()
3060 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | in ipr_dump_ioa_type_data()
3061 ucode_vpd->minor_release[1]; in ipr_dump_ioa_type_data()
3062 driver_dump->hdr.num_entries++; in ipr_dump_ioa_type_data()
3066 * ipr_dump_version_data - Fill in the driver version in the dump.
3076 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); in ipr_dump_version_data()
3077 driver_dump->version_entry.hdr.len = in ipr_dump_version_data()
3078 sizeof(struct ipr_dump_version_entry) - in ipr_dump_version_data()
3080 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; in ipr_dump_version_data()
3081 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; in ipr_dump_version_data()
3082 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); in ipr_dump_version_data()
3083 driver_dump->hdr.num_entries++; in ipr_dump_version_data()
3087 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3097 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); in ipr_dump_trace_data()
3098 driver_dump->trace_entry.hdr.len = in ipr_dump_trace_data()
3099 sizeof(struct ipr_dump_trace_entry) - in ipr_dump_trace_data()
3101 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_dump_trace_data()
3102 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; in ipr_dump_trace_data()
3103 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); in ipr_dump_trace_data()
3104 driver_dump->hdr.num_entries++; in ipr_dump_trace_data()
3108 * ipr_dump_location_data - Fill in the IOA location in the dump.
3118 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); in ipr_dump_location_data()
3119 driver_dump->location_entry.hdr.len = in ipr_dump_location_data()
3120 sizeof(struct ipr_dump_location_entry) - in ipr_dump_location_data()
3122 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; in ipr_dump_location_data()
3123 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; in ipr_dump_location_data()
3124 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); in ipr_dump_location_data()
3125 driver_dump->hdr.num_entries++; in ipr_dump_location_data()
3129 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3140 struct ipr_driver_dump *driver_dump = &dump->driver_dump; in ipr_get_ioa_dump()
3141 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; in ipr_get_ioa_dump()
3150 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3152 if (ioa_cfg->sdt_state != READ_DUMP) { in ipr_get_ioa_dump()
3153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3157 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3158 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3160 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3163 start_addr = readl(ioa_cfg->ioa_mailbox); in ipr_get_ioa_dump()
3165 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { in ipr_get_ioa_dump()
3166 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3172 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); in ipr_get_ioa_dump()
3174 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; in ipr_get_ioa_dump()
3177 driver_dump->hdr.len = sizeof(struct ipr_driver_dump); in ipr_get_ioa_dump()
3178 driver_dump->hdr.num_entries = 1; in ipr_get_ioa_dump()
3179 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); in ipr_get_ioa_dump()
3180 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; in ipr_get_ioa_dump()
3181 driver_dump->hdr.os = IPR_DUMP_OS_LINUX; in ipr_get_ioa_dump()
3182 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; in ipr_get_ioa_dump()
3190 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); in ipr_get_ioa_dump()
3193 ipr_init_dump_entry_hdr(&ioa_dump->hdr); in ipr_get_ioa_dump()
3194 ioa_dump->hdr.len = 0; in ipr_get_ioa_dump()
3195 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_get_ioa_dump()
3196 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; in ipr_get_ioa_dump()
3202 sdt = &ioa_dump->sdt; in ipr_get_ioa_dump()
3204 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3218 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && in ipr_get_ioa_dump()
3219 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { in ipr_get_ioa_dump()
3220 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3222 rc, be32_to_cpu(sdt->hdr.state)); in ipr_get_ioa_dump()
3223 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; in ipr_get_ioa_dump()
3224 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3229 num_entries = be32_to_cpu(sdt->hdr.num_entries_used); in ipr_get_ioa_dump()
3235 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); in ipr_get_ioa_dump()
3236 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3237 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); in ipr_get_ioa_dump()
3239 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); in ipr_get_ioa_dump()
3241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3244 if (ioa_dump->hdr.len > max_dump_size) { in ipr_get_ioa_dump()
3245 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; in ipr_get_ioa_dump()
3249 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { in ipr_get_ioa_dump()
3250 sdt_word = be32_to_cpu(sdt->entry[i].start_token); in ipr_get_ioa_dump()
3251 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3252 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); in ipr_get_ioa_dump()
3255 end_off = be32_to_cpu(sdt->entry[i].end_token); in ipr_get_ioa_dump()
3258 bytes_to_copy = end_off - start_off; in ipr_get_ioa_dump()
3264 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; in ipr_get_ioa_dump()
3272 ioa_dump->hdr.len += bytes_copied; in ipr_get_ioa_dump()
3275 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; in ipr_get_ioa_dump()
3282 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); in ipr_get_ioa_dump()
3285 driver_dump->hdr.len += ioa_dump->hdr.len; in ipr_get_ioa_dump()
3287 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3296 * ipr_release_dump - Free adapter dump memory
3305 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; in ipr_release_dump()
3310 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3311 ioa_cfg->dump = NULL; in ipr_release_dump()
3312 ioa_cfg->sdt_state = INACTIVE; in ipr_release_dump()
3313 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3315 for (i = 0; i < dump->ioa_dump.next_page_index; i++) in ipr_release_dump()
3316 free_page((unsigned long) dump->ioa_dump.ioa_data[i]); in ipr_release_dump()
3318 vfree(dump->ioa_dump.ioa_data); in ipr_release_dump()
3334 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3339 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_add_remove_thread()
3340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3344 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_add_remove_thread()
3345 if (res->del_from_ml && res->sdev) { in ipr_add_remove_thread()
3347 sdev = res->sdev; in ipr_add_remove_thread()
3349 if (!res->add_to_ml) in ipr_add_remove_thread()
3350 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_add_remove_thread()
3352 res->del_from_ml = 0; in ipr_add_remove_thread()
3353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3356 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3363 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_add_remove_thread()
3364 if (res->add_to_ml) { in ipr_add_remove_thread()
3365 bus = res->bus; in ipr_add_remove_thread()
3366 target = res->target; in ipr_add_remove_thread()
3367 lun = res->lun; in ipr_add_remove_thread()
3368 res->add_to_ml = 0; in ipr_add_remove_thread()
3369 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3370 scsi_add_device(ioa_cfg->host, bus, target, lun); in ipr_add_remove_thread()
3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3376 ioa_cfg->scan_done = 1; in ipr_add_remove_thread()
3377 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3378 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); in ipr_add_remove_thread()
3383 * ipr_worker_thread - Worker thread
3387 * of adding and removing device from the mid-layer as configuration
3401 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3403 if (ioa_cfg->sdt_state == READ_DUMP) { in ipr_worker_thread()
3404 dump = ioa_cfg->dump; in ipr_worker_thread()
3406 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3409 kref_get(&dump->kref); in ipr_worker_thread()
3410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3412 kref_put(&dump->kref, ipr_release_dump); in ipr_worker_thread()
3414 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3415 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) in ipr_worker_thread()
3417 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3421 if (ioa_cfg->scsi_unblock) { in ipr_worker_thread()
3422 ioa_cfg->scsi_unblock = 0; in ipr_worker_thread()
3423 ioa_cfg->scsi_blocked = 0; in ipr_worker_thread()
3424 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3425 scsi_unblock_requests(ioa_cfg->host); in ipr_worker_thread()
3426 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3427 if (ioa_cfg->scsi_blocked) in ipr_worker_thread()
3428 scsi_block_requests(ioa_cfg->host); in ipr_worker_thread()
3431 if (!ioa_cfg->scan_enabled) { in ipr_worker_thread()
3432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3436 schedule_work(&ioa_cfg->scsi_add_work_q); in ipr_worker_thread()
3438 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3444 * ipr_read_trace - Dump the adapter trace
3461 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_trace()
3465 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3466 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, in ipr_read_trace()
3468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3484 * ipr_show_fw_version - Show the firmware version
3496 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_version()
3497 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_show_fw_version()
3501 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3503 ucode_vpd->major_release, ucode_vpd->card_type, in ipr_show_fw_version()
3504 ucode_vpd->minor_release[0], in ipr_show_fw_version()
3505 ucode_vpd->minor_release[1]); in ipr_show_fw_version()
3506 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3519 * ipr_show_log_level - Show the adapter's error logging level
3531 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_log_level()
3535 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3536 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); in ipr_show_log_level()
3537 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3542 * ipr_store_log_level - Change the adapter's error logging level
3556 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_log_level()
3559 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3560 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); in ipr_store_log_level()
3561 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3575 * ipr_store_diagnostics - IOA Diagnostics interface
3592 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_diagnostics()
3597 return -EACCES; in ipr_store_diagnostics()
3599 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3600 while (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3601 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3602 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3603 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3606 ioa_cfg->errors_logged = 0; in ipr_store_diagnostics()
3609 if (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3610 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3611 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3616 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3617 return -EIO; in ipr_store_diagnostics()
3620 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3621 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) in ipr_store_diagnostics()
3622 rc = -EIO; in ipr_store_diagnostics()
3623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3637 * ipr_show_adapter_state - Show the adapter's state
3649 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_adapter_state()
3653 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3654 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_show_adapter_state()
3658 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3663 * ipr_store_adapter_state - Change adapter state
3679 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_adapter_state()
3684 return -EACCES; in ipr_store_adapter_state()
3686 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3687 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && in ipr_store_adapter_state()
3689 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_store_adapter_state()
3690 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3691 ioa_cfg->hrrq[i].ioa_is_dead = 0; in ipr_store_adapter_state()
3692 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3695 ioa_cfg->reset_retries = 0; in ipr_store_adapter_state()
3696 ioa_cfg->in_ioa_bringdown = 0; in ipr_store_adapter_state()
3699 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3700 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_adapter_state()
3715 * ipr_store_reset_adapter - Reset the adapter
3731 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_reset_adapter()
3736 return -EACCES; in ipr_store_reset_adapter()
3738 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3739 if (!ioa_cfg->in_reset_reload) in ipr_store_reset_adapter()
3741 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3742 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_reset_adapter()
3757 * ipr_show_iopoll_weight - Show ipr polling mode
3769 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_iopoll_weight()
3773 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_show_iopoll_weight()
3774 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); in ipr_show_iopoll_weight()
3775 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_show_iopoll_weight()
3781 * ipr_store_iopoll_weight - Change the adapter's polling mode
3795 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_iopoll_weight()
3800 if (!ioa_cfg->sis64) { in ipr_store_iopoll_weight()
3801 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n"); in ipr_store_iopoll_weight()
3802 return -EINVAL; in ipr_store_iopoll_weight()
3805 return -EINVAL; in ipr_store_iopoll_weight()
3808 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n"); in ipr_store_iopoll_weight()
3809 return -EINVAL; in ipr_store_iopoll_weight()
3812 if (user_iopoll_weight == ioa_cfg->iopoll_weight) { in ipr_store_iopoll_weight()
3813 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n"); in ipr_store_iopoll_weight()
3817 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3818 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_store_iopoll_weight()
3819 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
3822 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_store_iopoll_weight()
3823 ioa_cfg->iopoll_weight = user_iopoll_weight; in ipr_store_iopoll_weight()
3824 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3825 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_store_iopoll_weight()
3826 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_store_iopoll_weight()
3827 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_store_iopoll_weight()
3830 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_store_iopoll_weight()
3845 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3860 sg_size = buf_len / (IPR_MAX_SGLIST - 1); in ipr_alloc_ucode_buffer()
3871 sglist->order = order; in ipr_alloc_ucode_buffer()
3872 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL, in ipr_alloc_ucode_buffer()
3873 &sglist->num_sg); in ipr_alloc_ucode_buffer()
3874 if (!sglist->scatterlist) { in ipr_alloc_ucode_buffer()
3883 * ipr_free_ucode_buffer - Frees a microcode download buffer
3894 sgl_free_order(sglist->scatterlist, sglist->order); in ipr_free_ucode_buffer()
3899 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3918 bsize_elem = PAGE_SIZE * (1 << sglist->order); in ipr_copy_ucode_buffer()
3920 sg = sglist->scatterlist; in ipr_copy_ucode_buffer()
3930 sg->length = bsize_elem; in ipr_copy_ucode_buffer()
3945 sg->length = len % bsize_elem; in ipr_copy_ucode_buffer()
3948 sglist->buffer_len = len; in ipr_copy_ucode_buffer()
3953 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3963 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ucode_ioadl64()
3964 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_build_ucode_ioadl64()
3965 struct scatterlist *scatterlist = sglist->scatterlist; in ipr_build_ucode_ioadl64()
3969 ipr_cmd->dma_use_sg = sglist->num_dma_sg; in ipr_build_ucode_ioadl64()
3970 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ucode_ioadl64()
3971 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); in ipr_build_ucode_ioadl64()
3973 ioarcb->ioadl_len = in ipr_build_ucode_ioadl64()
3974 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); in ipr_build_ucode_ioadl64()
3975 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ucode_ioadl64()
3981 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ucode_ioadl64()
3985 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3995 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ucode_ioadl()
3996 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_build_ucode_ioadl()
3997 struct scatterlist *scatterlist = sglist->scatterlist; in ipr_build_ucode_ioadl()
4001 ipr_cmd->dma_use_sg = sglist->num_dma_sg; in ipr_build_ucode_ioadl()
4002 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ucode_ioadl()
4003 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); in ipr_build_ucode_ioadl()
4005 ioarcb->ioadl_len = in ipr_build_ucode_ioadl()
4006 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ucode_ioadl()
4008 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ucode_ioadl()
4015 ioadl[i-1].flags_and_data_len |= in ipr_build_ucode_ioadl()
4020 * ipr_update_ioa_ucode - Update IOA's microcode
4027 * 0 on success / -EIO on failure
4034 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4035 while (ioa_cfg->in_reset_reload) { in ipr_update_ioa_ucode()
4036 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4037 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
4038 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4041 if (ioa_cfg->ucode_sglist) { in ipr_update_ioa_ucode()
4042 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4043 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
4045 return -EIO; in ipr_update_ioa_ucode()
4048 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
4049 sglist->scatterlist, sglist->num_sg, in ipr_update_ioa_ucode()
4052 if (!sglist->num_dma_sg) { in ipr_update_ioa_ucode()
4053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4054 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
4056 return -EIO; in ipr_update_ioa_ucode()
4059 ioa_cfg->ucode_sglist = sglist; in ipr_update_ioa_ucode()
4061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4062 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
4064 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4065 ioa_cfg->ucode_sglist = NULL; in ipr_update_ioa_ucode()
4066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4071 * ipr_store_update_fw - Update the firmware on the adapter
4087 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_update_fw()
4097 return -EACCES; in ipr_store_update_fw()
4105 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { in ipr_store_update_fw()
4106 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); in ipr_store_update_fw()
4107 return -EIO; in ipr_store_update_fw()
4110 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; in ipr_store_update_fw()
4112 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); in ipr_store_update_fw()
4113 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); in ipr_store_update_fw()
4117 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); in ipr_store_update_fw()
4119 return -ENOMEM; in ipr_store_update_fw()
4125 dev_err(&ioa_cfg->pdev->dev, in ipr_store_update_fw()
4151 * ipr_show_fw_type - Show the adapter's firmware type.
4163 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_type()
4167 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4168 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); in ipr_show_fw_type()
4169 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4187 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_async_err_log()
4192 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4193 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, in ipr_read_async_err_log()
4196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4199 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam, in ipr_read_async_err_log()
4200 sizeof(hostrcb->hcam)); in ipr_read_async_err_log()
4201 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4211 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_next_async_err_log()
4215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4216 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, in ipr_next_async_err_log()
4219 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4224 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_next_async_err_log()
4225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4253 * ipr_read_dump - Dump the adapter
4270 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_dump()
4278 return -EACCES; in ipr_read_dump()
4280 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4281 dump = ioa_cfg->dump; in ipr_read_dump()
4283 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { in ipr_read_dump()
4284 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4287 kref_get(&dump->kref); in ipr_read_dump()
4288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4290 if (off > dump->driver_dump.hdr.len) { in ipr_read_dump()
4291 kref_put(&dump->kref, ipr_release_dump); in ipr_read_dump()
4295 if (off + count > dump->driver_dump.hdr.len) { in ipr_read_dump()
4296 count = dump->driver_dump.hdr.len - off; in ipr_read_dump()
4300 if (count && off < sizeof(dump->driver_dump)) { in ipr_read_dump()
4301 if (off + count > sizeof(dump->driver_dump)) in ipr_read_dump()
4302 len = sizeof(dump->driver_dump) - off; in ipr_read_dump()
4305 src = (u8 *)&dump->driver_dump + off; in ipr_read_dump()
4309 count -= len; in ipr_read_dump()
4312 off -= sizeof(dump->driver_dump); in ipr_read_dump()
4314 if (ioa_cfg->sis64) in ipr_read_dump()
4316 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * in ipr_read_dump()
4324 len = sdt_end - off; in ipr_read_dump()
4327 src = (u8 *)&dump->ioa_dump + off; in ipr_read_dump()
4331 count -= len; in ipr_read_dump()
4334 off -= sdt_end; in ipr_read_dump()
4338 len = PAGE_ALIGN(off) - off; in ipr_read_dump()
4341 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; in ipr_read_dump()
4346 count -= len; in ipr_read_dump()
4349 kref_put(&dump->kref, ipr_release_dump); in ipr_read_dump()
4354 * ipr_alloc_dump - Prepare for adapter dump
4370 return -ENOMEM; in ipr_alloc_dump()
4373 if (ioa_cfg->sis64) in ipr_alloc_dump()
4383 return -ENOMEM; in ipr_alloc_dump()
4386 dump->ioa_dump.ioa_data = ioa_data; in ipr_alloc_dump()
4388 kref_init(&dump->kref); in ipr_alloc_dump()
4389 dump->ioa_cfg = ioa_cfg; in ipr_alloc_dump()
4391 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4393 if (INACTIVE != ioa_cfg->sdt_state) { in ipr_alloc_dump()
4394 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4395 vfree(dump->ioa_dump.ioa_data); in ipr_alloc_dump()
4400 ioa_cfg->dump = dump; in ipr_alloc_dump()
4401 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_alloc_dump()
4402 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { in ipr_alloc_dump()
4403 ioa_cfg->dump_taken = 1; in ipr_alloc_dump()
4404 schedule_work(&ioa_cfg->work_q); in ipr_alloc_dump()
4406 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4412 * ipr_free_dump - Free adapter dump memory
4425 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4426 dump = ioa_cfg->dump; in ipr_free_dump()
4428 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4432 ioa_cfg->dump = NULL; in ipr_free_dump()
4433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4435 kref_put(&dump->kref, ipr_release_dump); in ipr_free_dump()
4442 * ipr_write_dump - Setup dump state of adapter
4459 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_write_dump()
4463 return -EACCES; in ipr_write_dump()
4470 return -EINVAL; in ipr_write_dump()
4492 * ipr_change_queue_depth - Change the device's queue depth
4501 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_change_queue_depth()
4505 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_change_queue_depth()
4506 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_change_queue_depth()
4510 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_change_queue_depth()
4513 return sdev->queue_depth; in ipr_change_queue_depth()
4517 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4528 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_adapter_handle()
4531 ssize_t len = -ENXIO; in ipr_show_adapter_handle()
4533 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4534 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_adapter_handle()
4536 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); in ipr_show_adapter_handle()
4537 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4550 * ipr_show_resource_path - Show the resource path or the resource address for
4562 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_path()
4565 ssize_t len = -ENXIO; in ipr_show_resource_path()
4568 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4569 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_resource_path()
4570 if (res && ioa_cfg->sis64) in ipr_show_resource_path()
4572 __ipr_format_res_path(res->res_path, buffer, in ipr_show_resource_path()
4575 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, in ipr_show_resource_path()
4576 res->bus, res->target, res->lun); in ipr_show_resource_path()
4578 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4591 * ipr_show_device_id - Show the device_id for this device.
4602 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_device_id()
4605 ssize_t len = -ENXIO; in ipr_show_device_id()
4607 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4608 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_device_id()
4609 if (res && ioa_cfg->sis64) in ipr_show_device_id()
4610 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id)); in ipr_show_device_id()
4612 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); in ipr_show_device_id()
4614 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4627 * ipr_show_resource_type - Show the resource type for this device.
4638 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_type()
4641 ssize_t len = -ENXIO; in ipr_show_resource_type()
4643 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4644 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_resource_type()
4647 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type); in ipr_show_resource_type()
4649 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4662 * ipr_show_raw_mode - Show the adapter's raw mode
4674 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_raw_mode()
4679 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4680 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_raw_mode()
4682 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode); in ipr_show_raw_mode()
4684 len = -ENXIO; in ipr_show_raw_mode()
4685 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4690 * ipr_store_raw_mode - Change the adapter's raw mode
4704 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_store_raw_mode()
4709 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4710 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_store_raw_mode()
4713 res->raw_mode = simple_strtoul(buf, NULL, 10); in ipr_store_raw_mode()
4715 if (res->sdev) in ipr_store_raw_mode()
4716 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n", in ipr_store_raw_mode()
4717 res->raw_mode ? "enabled" : "disabled"); in ipr_store_raw_mode()
4719 len = -EINVAL; in ipr_store_raw_mode()
4721 len = -ENXIO; in ipr_store_raw_mode()
4722 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4745 * ipr_biosparam - Return the HSC mapping
4780 * ipr_find_starget - Find target based on bus/target.
4788 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in ipr_find_starget()
4789 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_find_starget()
4792 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_starget()
4793 if ((res->bus == starget->channel) && in ipr_find_starget()
4794 (res->target == starget->id)) { in ipr_find_starget()
4805 * ipr_target_alloc - Prepare for commands to a SCSI target
4812 * 0 on success / non-0 on failure
4816 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in ipr_target_alloc()
4817 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_alloc()
4823 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4825 starget->hostdata = NULL; in ipr_target_alloc()
4828 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4831 return -ENOMEM; in ipr_target_alloc()
4833 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost); in ipr_target_alloc()
4835 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4836 sata_port->ioa_cfg = ioa_cfg; in ipr_target_alloc()
4837 sata_port->ap = ap; in ipr_target_alloc()
4838 sata_port->res = res; in ipr_target_alloc()
4840 res->sata_port = sata_port; in ipr_target_alloc()
4841 ap->private_data = sata_port; in ipr_target_alloc()
4842 starget->hostdata = sata_port; in ipr_target_alloc()
4845 return -ENOMEM; in ipr_target_alloc()
4848 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4854 * ipr_target_destroy - Destroy a SCSI target
4863 struct ipr_sata_port *sata_port = starget->hostdata; in ipr_target_destroy()
4864 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in ipr_target_destroy()
4865 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_destroy()
4867 if (ioa_cfg->sis64) { in ipr_target_destroy()
4869 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) in ipr_target_destroy()
4870 clear_bit(starget->id, ioa_cfg->array_ids); in ipr_target_destroy()
4871 else if (starget->channel == IPR_VSET_VIRTUAL_BUS) in ipr_target_destroy()
4872 clear_bit(starget->id, ioa_cfg->vset_ids); in ipr_target_destroy()
4873 else if (starget->channel == 0) in ipr_target_destroy()
4874 clear_bit(starget->id, ioa_cfg->target_ids); in ipr_target_destroy()
4879 starget->hostdata = NULL; in ipr_target_destroy()
4880 ata_sas_port_destroy(sata_port->ap); in ipr_target_destroy()
4886 * ipr_find_sdev - Find device based on bus/target/lun.
4894 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_find_sdev()
4897 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_sdev()
4898 if ((res->bus == sdev->channel) && in ipr_find_sdev()
4899 (res->target == sdev->id) && in ipr_find_sdev()
4900 (res->lun == sdev->lun)) in ipr_find_sdev()
4908 * ipr_slave_destroy - Unconfigure a SCSI device
4920 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_destroy()
4922 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4923 res = (struct ipr_resource_entry *) sdev->hostdata; in ipr_slave_destroy()
4925 if (res->sata_port) in ipr_slave_destroy()
4926 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE; in ipr_slave_destroy()
4927 sdev->hostdata = NULL; in ipr_slave_destroy()
4928 res->sdev = NULL; in ipr_slave_destroy()
4929 res->sata_port = NULL; in ipr_slave_destroy()
4931 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4935 * ipr_slave_configure - Configure a SCSI device
4945 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_configure()
4951 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4952 res = sdev->hostdata; in ipr_slave_configure()
4955 sdev->type = TYPE_RAID; in ipr_slave_configure()
4957 sdev->scsi_level = 4; in ipr_slave_configure()
4958 sdev->no_uld_attach = 1; in ipr_slave_configure()
4961 sdev->scsi_level = SCSI_SPC_3; in ipr_slave_configure()
4962 sdev->no_report_opcodes = 1; in ipr_slave_configure()
4963 blk_queue_rq_timeout(sdev->request_queue, in ipr_slave_configure()
4965 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); in ipr_slave_configure()
4967 if (ipr_is_gata(res) && res->sata_port) in ipr_slave_configure()
4968 ap = res->sata_port->ap; in ipr_slave_configure()
4969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4976 if (ioa_cfg->sis64) in ipr_slave_configure()
4979 res->res_path, buffer, sizeof(buffer))); in ipr_slave_configure()
4982 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4987 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4999 int rc = -ENXIO; in ipr_ata_slave_alloc()
5002 if (sdev->sdev_target) in ipr_ata_slave_alloc()
5003 sata_port = sdev->sdev_target->hostdata; in ipr_ata_slave_alloc()
5005 rc = ata_sas_port_init(sata_port->ap); in ipr_ata_slave_alloc()
5007 rc = ata_sas_sync_probe(sata_port->ap); in ipr_ata_slave_alloc()
5018 * ipr_slave_alloc - Prepare for commands to a device.
5027 * 0 on success / -ENXIO if device does not exist
5031 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_alloc()
5034 int rc = -ENXIO; in ipr_slave_alloc()
5036 sdev->hostdata = NULL; in ipr_slave_alloc()
5038 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
5042 res->sdev = sdev; in ipr_slave_alloc()
5043 res->add_to_ml = 0; in ipr_slave_alloc()
5044 res->in_erp = 0; in ipr_slave_alloc()
5045 sdev->hostdata = res; in ipr_slave_alloc()
5047 res->needs_sync_complete = 1; in ipr_slave_alloc()
5050 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
5055 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
5061 * ipr_match_lun - Match function for specified LUN
5070 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) in ipr_match_lun()
5076 * ipr_cmnd_is_free - Check if a command is free or not
5086 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) { in ipr_cmnd_is_free()
5095 * ipr_match_res - Match function for specified resource entry
5106 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle) in ipr_match_res()
5112 * ipr_wait_for_ops - Wait for matching commands to complete
5135 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
5136 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_wait_for_ops()
5137 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in ipr_wait_for_ops()
5140 ipr_cmd->eh_comp = &comp; in ipr_wait_for_ops()
5145 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
5155 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
5156 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_wait_for_ops()
5157 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in ipr_wait_for_ops()
5160 ipr_cmd->eh_comp = NULL; in ipr_wait_for_ops()
5165 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
5169 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); in ipr_wait_for_ops()
5187 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_host_reset()
5188 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5190 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5192 dev_err(&ioa_cfg->pdev->dev, in ipr_eh_host_reset()
5195 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_eh_host_reset()
5196 ioa_cfg->sdt_state = GET_DUMP; in ipr_eh_host_reset()
5199 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5200 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_eh_host_reset()
5201 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5205 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5210 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5216 * ipr_device_reset - Reset the device
5227 * 0 on success / non-zero on failure
5240 ioarcb = &ipr_cmd->ioarcb; in ipr_device_reset()
5241 cmd_pkt = &ioarcb->cmd_pkt; in ipr_device_reset()
5243 if (ipr_cmd->ioa_cfg->sis64) { in ipr_device_reset()
5244 regs = &ipr_cmd->i.ata_ioadl.regs; in ipr_device_reset()
5245 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); in ipr_device_reset()
5247 regs = &ioarcb->u.add_data.u.regs; in ipr_device_reset()
5249 ioarcb->res_handle = res->res_handle; in ipr_device_reset()
5250 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_device_reset()
5251 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; in ipr_device_reset()
5253 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; in ipr_device_reset()
5254 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags)); in ipr_device_reset()
5255 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; in ipr_device_reset()
5259 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_device_reset()
5260 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_device_reset()
5261 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { in ipr_device_reset()
5262 if (ipr_cmd->ioa_cfg->sis64) in ipr_device_reset()
5263 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, in ipr_device_reset()
5266 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, in ipr_device_reset()
5271 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; in ipr_device_reset()
5275 * ipr_sata_reset - Reset the SATA port
5283 * 0 on success / non-zero on failure
5288 struct ipr_sata_port *sata_port = link->ap->private_data; in ipr_sata_reset()
5289 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_sata_reset()
5292 int rc = -ENXIO, ret; in ipr_sata_reset()
5295 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5296 while (ioa_cfg->in_reset_reload) { in ipr_sata_reset()
5297 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5298 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_sata_reset()
5299 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5302 res = sata_port->res; in ipr_sata_reset()
5305 *classes = res->ata_class; in ipr_sata_reset()
5306 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5310 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5312 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5314 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_sata_reset()
5317 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5324 * ipr_eh_dev_reset - Reset the device
5344 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in __ipr_eh_dev_reset()
5345 res = scsi_cmd->device->hostdata; in __ipr_eh_dev_reset()
5349 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the in __ipr_eh_dev_reset()
5352 if (ioa_cfg->in_reset_reload) in __ipr_eh_dev_reset()
5354 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in __ipr_eh_dev_reset()
5358 spin_lock(&hrrq->_lock); in __ipr_eh_dev_reset()
5359 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in __ipr_eh_dev_reset()
5360 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in __ipr_eh_dev_reset()
5362 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { in __ipr_eh_dev_reset()
5363 if (!ipr_cmd->qc) in __ipr_eh_dev_reset()
5368 ipr_cmd->done = ipr_sata_eh_done; in __ipr_eh_dev_reset()
5369 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { in __ipr_eh_dev_reset()
5370 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; in __ipr_eh_dev_reset()
5371 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; in __ipr_eh_dev_reset()
5375 spin_unlock(&hrrq->_lock); in __ipr_eh_dev_reset()
5377 res->resetting_device = 1; in __ipr_eh_dev_reset()
5380 if (ipr_is_gata(res) && res->sata_port) { in __ipr_eh_dev_reset()
5381 ap = res->sata_port->ap; in __ipr_eh_dev_reset()
5382 spin_unlock_irq(scsi_cmd->device->host->host_lock); in __ipr_eh_dev_reset()
5384 spin_lock_irq(scsi_cmd->device->host->host_lock); in __ipr_eh_dev_reset()
5387 res->resetting_device = 0; in __ipr_eh_dev_reset()
5388 res->reset_occurred = 1; in __ipr_eh_dev_reset()
5400 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_dev_reset()
5401 res = cmd->device->hostdata; in ipr_eh_dev_reset()
5406 spin_lock_irq(cmd->device->host->host_lock); in ipr_eh_dev_reset()
5408 spin_unlock_irq(cmd->device->host->host_lock); in ipr_eh_dev_reset()
5411 if (ipr_is_gata(res) && res->sata_port) in ipr_eh_dev_reset()
5414 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); in ipr_eh_dev_reset()
5421 * ipr_bus_reset_done - Op done function for bus reset.
5431 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_bus_reset_done()
5435 if (!ioa_cfg->sis64) in ipr_bus_reset_done()
5436 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_bus_reset_done()
5437 if (res->res_handle == ipr_cmd->ioarcb.res_handle) { in ipr_bus_reset_done()
5438 scsi_report_bus_reset(ioa_cfg->host, res->bus); in ipr_bus_reset_done()
5447 if (ipr_cmd->sibling->sibling) in ipr_bus_reset_done()
5448 ipr_cmd->sibling->sibling = NULL; in ipr_bus_reset_done()
5450 ipr_cmd->sibling->done(ipr_cmd->sibling); in ipr_bus_reset_done()
5452 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_bus_reset_done()
5457 * ipr_abort_timeout - An abort task has timed out
5471 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_abort_timeout()
5476 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5477 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { in ipr_abort_timeout()
5478 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5482 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n"); in ipr_abort_timeout()
5484 ipr_cmd->sibling = reset_cmd; in ipr_abort_timeout()
5485 reset_cmd->sibling = ipr_cmd; in ipr_abort_timeout()
5486 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; in ipr_abort_timeout()
5487 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; in ipr_abort_timeout()
5488 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_abort_timeout()
5489 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; in ipr_abort_timeout()
5490 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; in ipr_abort_timeout()
5493 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5498 * ipr_cancel_op - Cancel specified op
5517 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; in ipr_cancel_op()
5518 res = scsi_cmd->device->hostdata; in ipr_cancel_op()
5521 * This will force the mid-layer to call ipr_eh_host_reset, in ipr_cancel_op()
5524 if (ioa_cfg->in_reset_reload || in ipr_cancel_op()
5525 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_cancel_op()
5535 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_cancel_op()
5541 spin_lock(&hrrq->_lock); in ipr_cancel_op()
5542 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_cancel_op()
5543 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) { in ipr_cancel_op()
5544 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) { in ipr_cancel_op()
5550 spin_unlock(&hrrq->_lock); in ipr_cancel_op()
5557 ipr_cmd->ioarcb.res_handle = res->res_handle; in ipr_cancel_op()
5558 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_cancel_op()
5559 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_cancel_op()
5560 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; in ipr_cancel_op()
5561 ipr_cmd->u.sdev = scsi_cmd->device; in ipr_cancel_op()
5564 scsi_cmd->cmnd[0]); in ipr_cancel_op()
5566 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_cancel_op()
5577 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_cancel_op()
5579 res->needs_sync_complete = 1; in ipr_cancel_op()
5586 * ipr_eh_abort - Abort a single op
5596 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_scan_finished()
5599 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_scan_finished()
5600 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) in ipr_scan_finished()
5602 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) in ipr_scan_finished()
5604 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_scan_finished()
5609 * ipr_eh_host_reset - Reset the host adapter
5623 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in ipr_eh_abort()
5625 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); in ipr_eh_abort()
5627 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); in ipr_eh_abort()
5630 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); in ipr_eh_abort()
5636 * ipr_handle_other_interrupt - Handle "other" interrupts
5649 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_handle_other_interrupt()
5656 if (ioa_cfg->sis64) { in ipr_handle_other_interrupt()
5657 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_handle_other_interrupt()
5658 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5662 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); in ipr_handle_other_interrupt()
5663 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5664 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5665 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5666 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5676 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_handle_other_interrupt()
5677 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_handle_other_interrupt()
5679 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5680 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5681 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5683 if (ioa_cfg->clear_isr) { in ipr_handle_other_interrupt()
5685 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5687 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); in ipr_handle_other_interrupt()
5688 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_handle_other_interrupt()
5693 ioa_cfg->ioa_unit_checked = 1; in ipr_handle_other_interrupt()
5695 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5698 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5701 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_handle_other_interrupt()
5702 ioa_cfg->sdt_state = GET_DUMP; in ipr_handle_other_interrupt()
5712 * ipr_isr_eh - Interrupt service routine error handler
5722 ioa_cfg->errors_logged++; in ipr_isr_eh()
5723 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); in ipr_isr_eh()
5725 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_isr_eh()
5726 ioa_cfg->sdt_state = GET_DUMP; in ipr_isr_eh()
5737 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; in ipr_process_hrrq()
5741 if (!hrr_queue->allow_interrupts) in ipr_process_hrrq()
5744 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_process_hrrq()
5745 hrr_queue->toggle_bit) { in ipr_process_hrrq()
5747 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) & in ipr_process_hrrq()
5751 if (unlikely(cmd_index > hrr_queue->max_cmd_id || in ipr_process_hrrq()
5752 cmd_index < hrr_queue->min_cmd_id)) { in ipr_process_hrrq()
5759 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; in ipr_process_hrrq()
5760 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_hrrq()
5764 list_move_tail(&ipr_cmd->queue, doneq); in ipr_process_hrrq()
5766 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) { in ipr_process_hrrq()
5767 hrr_queue->hrrq_curr++; in ipr_process_hrrq()
5769 hrr_queue->hrrq_curr = hrr_queue->hrrq_start; in ipr_process_hrrq()
5770 hrr_queue->toggle_bit ^= 1u; in ipr_process_hrrq()
5790 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_iopoll()
5795 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_iopoll()
5798 list_del(&ipr_cmd->queue); in ipr_iopoll()
5799 del_timer(&ipr_cmd->timer); in ipr_iopoll()
5800 ipr_cmd->fast_done(ipr_cmd); in ipr_iopoll()
5807 * ipr_isr - Interrupt service routine
5817 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr()
5826 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr()
5828 if (!hrrq->allow_interrupts) { in ipr_isr()
5829 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5834 if (ipr_process_hrrq(hrrq, -1, &doneq)) { in ipr_isr()
5837 if (!ioa_cfg->clear_isr) in ipr_isr()
5844 ioa_cfg->regs.clr_interrupt_reg32); in ipr_isr()
5845 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5850 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5865 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5867 list_del(&ipr_cmd->queue); in ipr_isr()
5868 del_timer(&ipr_cmd->timer); in ipr_isr()
5869 ipr_cmd->fast_done(ipr_cmd); in ipr_isr()
5875 * ipr_isr_mhrrq - Interrupt service routine
5885 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr_mhrrq()
5891 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5894 if (!hrrq->allow_interrupts) { in ipr_isr_mhrrq()
5895 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5899 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_isr_mhrrq()
5900 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5901 hrrq->toggle_bit) { in ipr_isr_mhrrq()
5902 irq_poll_sched(&hrrq->iopoll); in ipr_isr_mhrrq()
5903 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5907 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5908 hrrq->toggle_bit) in ipr_isr_mhrrq()
5910 if (ipr_process_hrrq(hrrq, -1, &doneq)) in ipr_isr_mhrrq()
5914 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5917 list_del(&ipr_cmd->queue); in ipr_isr_mhrrq()
5918 del_timer(&ipr_cmd->timer); in ipr_isr_mhrrq()
5919 ipr_cmd->fast_done(ipr_cmd); in ipr_isr_mhrrq()
5925 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5930 * 0 on success / -1 on failure
5939 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_build_ioadl64()
5940 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioadl64()
5941 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_build_ioadl64()
5950 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl64()
5951 return -1; in ipr_build_ioadl64()
5954 ipr_cmd->dma_use_sg = nseg; in ipr_build_ioadl64()
5956 ioarcb->data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl64()
5957 ioarcb->ioadl_len = in ipr_build_ioadl64()
5958 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl64()
5960 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { in ipr_build_ioadl64()
5962 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ioadl64()
5963 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) in ipr_build_ioadl64()
5966 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ioadl64()
5972 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ioadl64()
5977 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5982 * 0 on success / -1 on failure
5991 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_build_ioadl()
5992 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioadl()
5993 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_build_ioadl()
6001 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl()
6002 return -1; in ipr_build_ioadl()
6005 ipr_cmd->dma_use_sg = nseg; in ipr_build_ioadl()
6007 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { in ipr_build_ioadl()
6009 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ioadl()
6010 ioarcb->data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl()
6011 ioarcb->ioadl_len = in ipr_build_ioadl()
6012 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl()
6013 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { in ipr_build_ioadl()
6015 ioarcb->read_data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl()
6016 ioarcb->read_ioadl_len = in ipr_build_ioadl()
6017 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl()
6020 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { in ipr_build_ioadl()
6021 ioadl = ioarcb->u.add_data.u.ioadl; in ipr_build_ioadl()
6022 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + in ipr_build_ioadl()
6024 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_build_ioadl()
6027 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ioadl()
6033 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ioadl()
6038 * __ipr_erp_done - Process completion of ERP for a device
6049 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in __ipr_erp_done()
6050 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in __ipr_erp_done()
6051 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in __ipr_erp_done()
6054 scsi_cmd->result |= (DID_ERROR << 16); in __ipr_erp_done()
6058 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, in __ipr_erp_done()
6064 res->needs_sync_complete = 1; in __ipr_erp_done()
6065 res->in_erp = 0; in __ipr_erp_done()
6067 scsi_dma_unmap(ipr_cmd->scsi_cmd); in __ipr_erp_done()
6068 scsi_cmd->scsi_done(scsi_cmd); in __ipr_erp_done()
6069 if (ipr_cmd->eh_comp) in __ipr_erp_done()
6070 complete(ipr_cmd->eh_comp); in __ipr_erp_done()
6071 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in __ipr_erp_done()
6075 * ipr_erp_done - Process completion of ERP for a device
6086 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_erp_done()
6089 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_erp_done()
6091 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_erp_done()
6095 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6103 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_reinit_ipr_cmnd_for_erp()
6104 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_reinit_ipr_cmnd_for_erp()
6105 dma_addr_t dma_addr = ipr_cmd->dma_addr; in ipr_reinit_ipr_cmnd_for_erp()
6107 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); in ipr_reinit_ipr_cmnd_for_erp()
6108 ioarcb->data_transfer_length = 0; in ipr_reinit_ipr_cmnd_for_erp()
6109 ioarcb->read_data_transfer_length = 0; in ipr_reinit_ipr_cmnd_for_erp()
6110 ioarcb->ioadl_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
6111 ioarcb->read_ioadl_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
6112 ioasa->hdr.ioasc = 0; in ipr_reinit_ipr_cmnd_for_erp()
6113 ioasa->hdr.residual_data_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
6115 if (ipr_cmd->ioa_cfg->sis64) in ipr_reinit_ipr_cmnd_for_erp()
6116 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_reinit_ipr_cmnd_for_erp()
6119 ioarcb->write_ioadl_addr = in ipr_reinit_ipr_cmnd_for_erp()
6121 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_reinit_ipr_cmnd_for_erp()
6126 * __ipr_erp_request_sense - Send request sense to a device
6137 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in __ipr_erp_request_sense()
6138 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in __ipr_erp_request_sense()
6147 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; in __ipr_erp_request_sense()
6148 cmd_pkt->cdb[0] = REQUEST_SENSE; in __ipr_erp_request_sense()
6149 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; in __ipr_erp_request_sense()
6150 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; in __ipr_erp_request_sense()
6151 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in __ipr_erp_request_sense()
6152 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); in __ipr_erp_request_sense()
6154 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, in __ipr_erp_request_sense()
6162 * ipr_erp_request_sense - Send request sense to a device
6173 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_erp_request_sense()
6176 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_erp_request_sense()
6178 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_erp_request_sense()
6182 * ipr_erp_cancel_all - Send cancel all to a device
6188 * Cancel all will return them to us.
6195 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_erp_cancel_all()
6196 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in ipr_erp_cancel_all()
6199 res->in_erp = 1; in ipr_erp_cancel_all()
6203 if (!scsi_cmd->device->simple_tags) { in ipr_erp_cancel_all()
6208 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_erp_cancel_all()
6209 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_erp_cancel_all()
6210 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; in ipr_erp_cancel_all()
6217 * ipr_dump_ioasa - Dump contents of IOASA
6235 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_dump_ioasa()
6239 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; in ipr_dump_ioasa()
6240 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; in ipr_dump_ioasa()
6245 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) in ipr_dump_ioasa()
6253 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { in ipr_dump_ioasa()
6255 if (ioasa->hdr.ilid != 0) in ipr_dump_ioasa()
6267 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); in ipr_dump_ioasa()
6268 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) in ipr_dump_ioasa()
6270 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) in ipr_dump_ioasa()
6285 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6294 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; in ipr_gen_sense()
6295 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; in ipr_gen_sense()
6296 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_gen_sense()
6297 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); in ipr_gen_sense()
6304 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; in ipr_gen_sense()
6308 ioasa->u.vset.failing_lba_hi != 0) { in ipr_gen_sense()
6319 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); in ipr_gen_sense()
6326 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); in ipr_gen_sense()
6340 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { in ipr_gen_sense()
6351 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; in ipr_gen_sense()
6354 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; in ipr_gen_sense()
6358 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); in ipr_gen_sense()
6360 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); in ipr_gen_sense()
6375 * ipr_get_autosense - Copy autosense data to sense buffer
6386 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_get_autosense()
6387 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; in ipr_get_autosense()
6389 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) in ipr_get_autosense()
6392 if (ipr_cmd->ioa_cfg->sis64) in ipr_get_autosense()
6393 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, in ipr_get_autosense()
6394 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), in ipr_get_autosense()
6397 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, in ipr_get_autosense()
6398 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), in ipr_get_autosense()
6404 * ipr_erp_start - Process an error response for a SCSI op
6417 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_erp_start()
6418 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in ipr_erp_start()
6419 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_erp_start()
6435 scsi_cmd->result |= (DID_ABORT << 16); in ipr_erp_start()
6437 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6441 scsi_cmd->result |= (DID_NO_CONNECT << 16); in ipr_erp_start()
6444 scsi_cmd->result |= (DID_NO_CONNECT << 16); in ipr_erp_start()
6446 res->needs_sync_complete = 1; in ipr_erp_start()
6449 if (!res->in_erp) in ipr_erp_start()
6450 res->needs_sync_complete = 1; in ipr_erp_start()
6451 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6457 * so SCSI mid-layer and upper layers handle it accordingly. in ipr_erp_start()
6459 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION) in ipr_erp_start()
6460 scsi_cmd->result |= (DID_PASSTHROUGH << 16); in ipr_erp_start()
6468 if (!res->resetting_device) in ipr_erp_start()
6469 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); in ipr_erp_start()
6470 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6472 res->needs_sync_complete = 1; in ipr_erp_start()
6475 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); in ipr_erp_start()
6485 res->needs_sync_complete = 1; in ipr_erp_start()
6490 if (res->raw_mode) { in ipr_erp_start()
6491 res->raw_mode = 0; in ipr_erp_start()
6492 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6494 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6498 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6500 res->needs_sync_complete = 1; in ipr_erp_start()
6504 scsi_dma_unmap(ipr_cmd->scsi_cmd); in ipr_erp_start()
6505 scsi_cmd->scsi_done(scsi_cmd); in ipr_erp_start()
6506 if (ipr_cmd->eh_comp) in ipr_erp_start()
6507 complete(ipr_cmd->eh_comp); in ipr_erp_start()
6508 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_erp_start()
6512 * ipr_scsi_done - mid-layer done function
6516 * ops generated by the SCSI mid-layer
6523 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_scsi_done()
6524 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_scsi_done()
6525 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_scsi_done()
6528 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); in ipr_scsi_done()
6533 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6534 scsi_cmd->scsi_done(scsi_cmd); in ipr_scsi_done()
6535 if (ipr_cmd->eh_comp) in ipr_scsi_done()
6536 complete(ipr_cmd->eh_comp); in ipr_scsi_done()
6537 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_scsi_done()
6538 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6540 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6541 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6543 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6544 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6549 * ipr_queuecommand - Queue a mid-layer request
6553 * This function queues a request generated by the mid-layer.
6572 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_queuecommand()
6574 scsi_cmd->result = (DID_OK << 16); in ipr_queuecommand()
6575 res = scsi_cmd->device->hostdata; in ipr_queuecommand()
6577 if (ipr_is_gata(res) && res->sata_port) { in ipr_queuecommand()
6578 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_queuecommand()
6579 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); in ipr_queuecommand()
6580 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_queuecommand()
6585 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_queuecommand()
6587 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6590 * We have told the host to stop giving us new requests, but in ipr_queuecommand()
6593 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { in ipr_queuecommand()
6594 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6599 * FIXME - Create scsi_set_host_offline interface in ipr_queuecommand()
6602 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { in ipr_queuecommand()
6603 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6609 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6612 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6615 ioarcb = &ipr_cmd->ioarcb; in ipr_queuecommand()
6617 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); in ipr_queuecommand()
6618 ipr_cmd->scsi_cmd = scsi_cmd; in ipr_queuecommand()
6619 ipr_cmd->done = ipr_scsi_eh_done; in ipr_queuecommand()
6622 if (scsi_cmd->underflow == 0) in ipr_queuecommand()
6623 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in ipr_queuecommand()
6625 if (res->reset_occurred) { in ipr_queuecommand()
6626 res->reset_occurred = 0; in ipr_queuecommand()
6627 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; in ipr_queuecommand()
6632 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; in ipr_queuecommand()
6634 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; in ipr_queuecommand()
6635 if (scsi_cmd->flags & SCMD_TAGGED) in ipr_queuecommand()
6636 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK; in ipr_queuecommand()
6638 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK; in ipr_queuecommand()
6641 if (scsi_cmd->cmnd[0] >= 0xC0 && in ipr_queuecommand()
6642 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { in ipr_queuecommand()
6643 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_queuecommand()
6645 if (res->raw_mode && ipr_is_af_dasd_device(res)) { in ipr_queuecommand()
6646 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE; in ipr_queuecommand()
6648 if (scsi_cmd->underflow == 0) in ipr_queuecommand()
6649 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in ipr_queuecommand()
6652 if (ioa_cfg->sis64) in ipr_queuecommand()
6657 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6658 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) { in ipr_queuecommand()
6659 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6660 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6666 if (unlikely(hrrq->ioa_is_dead)) { in ipr_queuecommand()
6667 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6668 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6673 ioarcb->res_handle = res->res_handle; in ipr_queuecommand()
6674 if (res->needs_sync_complete) { in ipr_queuecommand()
6675 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; in ipr_queuecommand()
6676 res->needs_sync_complete = 0; in ipr_queuecommand()
6678 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); in ipr_queuecommand()
6681 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6685 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6686 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); in ipr_queuecommand()
6687 scsi_cmd->result = (DID_NO_CONNECT << 16); in ipr_queuecommand()
6688 scsi_cmd->scsi_done(scsi_cmd); in ipr_queuecommand()
6689 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6694 * ipr_ioctl - IOCTL handler
6707 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_ioctl()
6710 return -ENOTTY; in ipr_ioctl()
6711 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg); in ipr_ioctl()
6714 return -EINVAL; in ipr_ioctl()
6718 * ipr_info - Get information about the card/driver
6730 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; in ipr_ioa_info()
6732 spin_lock_irqsave(host->host_lock, lock_flags); in ipr_ioa_info()
6733 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); in ipr_ioa_info()
6734 spin_unlock_irqrestore(host->host_lock, lock_flags); in ipr_ioa_info()
6761 .this_id = -1,
6771 * ipr_ata_phy_reset - libata phy_reset handler
6778 struct ipr_sata_port *sata_port = ap->private_data; in ipr_ata_phy_reset()
6779 struct ipr_resource_entry *res = sata_port->res; in ipr_ata_phy_reset()
6780 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_ata_phy_reset()
6784 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6785 while (ioa_cfg->in_reset_reload) { in ipr_ata_phy_reset()
6786 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6787 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_ata_phy_reset()
6788 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6791 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) in ipr_ata_phy_reset()
6797 ap->link.device[0].class = ATA_DEV_NONE; in ipr_ata_phy_reset()
6801 ap->link.device[0].class = res->ata_class; in ipr_ata_phy_reset()
6802 if (ap->link.device[0].class == ATA_DEV_UNKNOWN) in ipr_ata_phy_reset()
6803 ap->link.device[0].class = ATA_DEV_NONE; in ipr_ata_phy_reset()
6806 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6811 * ipr_ata_post_internal - Cleanup after an internal command
6819 struct ipr_sata_port *sata_port = qc->ap->private_data; in ipr_ata_post_internal()
6820 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_ata_post_internal()
6825 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6826 while (ioa_cfg->in_reset_reload) { in ipr_ata_post_internal()
6827 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6828 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_ata_post_internal()
6829 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6833 spin_lock(&hrrq->_lock); in ipr_ata_post_internal()
6834 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_ata_post_internal()
6835 if (ipr_cmd->qc == qc) { in ipr_ata_post_internal()
6836 ipr_device_reset(ioa_cfg, sata_port->res); in ipr_ata_post_internal()
6840 spin_unlock(&hrrq->_lock); in ipr_ata_post_internal()
6842 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6846 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6856 regs->feature = tf->feature; in ipr_copy_sata_tf()
6857 regs->nsect = tf->nsect; in ipr_copy_sata_tf()
6858 regs->lbal = tf->lbal; in ipr_copy_sata_tf()
6859 regs->lbam = tf->lbam; in ipr_copy_sata_tf()
6860 regs->lbah = tf->lbah; in ipr_copy_sata_tf()
6861 regs->device = tf->device; in ipr_copy_sata_tf()
6862 regs->command = tf->command; in ipr_copy_sata_tf()
6863 regs->hob_feature = tf->hob_feature; in ipr_copy_sata_tf()
6864 regs->hob_nsect = tf->hob_nsect; in ipr_copy_sata_tf()
6865 regs->hob_lbal = tf->hob_lbal; in ipr_copy_sata_tf()
6866 regs->hob_lbam = tf->hob_lbam; in ipr_copy_sata_tf()
6867 regs->hob_lbah = tf->hob_lbah; in ipr_copy_sata_tf()
6868 regs->ctl = tf->ctl; in ipr_copy_sata_tf()
6872 * ipr_sata_done - done function for SATA commands
6876 * ops generated by the SCSI mid-layer to SATA devices
6883 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_sata_done()
6884 struct ata_queued_cmd *qc = ipr_cmd->qc; in ipr_sata_done()
6885 struct ipr_sata_port *sata_port = qc->ap->private_data; in ipr_sata_done()
6886 struct ipr_resource_entry *res = sata_port->res; in ipr_sata_done()
6887 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_sata_done()
6889 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_sata_done()
6890 if (ipr_cmd->ioa_cfg->sis64) in ipr_sata_done()
6891 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, in ipr_sata_done()
6894 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, in ipr_sata_done()
6898 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) in ipr_sata_done()
6899 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); in ipr_sata_done()
6902 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); in ipr_sata_done()
6904 qc->err_mask |= ac_err_mask(sata_port->ioasa.status); in ipr_sata_done()
6905 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_sata_done()
6906 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_sata_done()
6911 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6920 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ata_ioadl64()
6921 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64; in ipr_build_ata_ioadl64()
6923 int len = qc->nbytes; in ipr_build_ata_ioadl64()
6926 dma_addr_t dma_addr = ipr_cmd->dma_addr; in ipr_build_ata_ioadl64()
6931 if (qc->dma_dir == DMA_TO_DEVICE) { in ipr_build_ata_ioadl64()
6933 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ata_ioadl64()
6934 } else if (qc->dma_dir == DMA_FROM_DEVICE) in ipr_build_ata_ioadl64()
6937 ioarcb->data_transfer_length = cpu_to_be32(len); in ipr_build_ata_ioadl64()
6938 ioarcb->ioadl_len = in ipr_build_ata_ioadl64()
6939 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); in ipr_build_ata_ioadl64()
6940 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_build_ata_ioadl64()
6943 for_each_sg(qc->sg, sg, qc->n_elem, si) { in ipr_build_ata_ioadl64()
6944 ioadl64->flags = cpu_to_be32(ioadl_flags); in ipr_build_ata_ioadl64()
6945 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg)); in ipr_build_ata_ioadl64()
6946 ioadl64->address = cpu_to_be64(sg_dma_address(sg)); in ipr_build_ata_ioadl64()
6953 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ata_ioadl64()
6957 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6966 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ata_ioadl()
6967 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_build_ata_ioadl()
6969 int len = qc->nbytes; in ipr_build_ata_ioadl()
6976 if (qc->dma_dir == DMA_TO_DEVICE) { in ipr_build_ata_ioadl()
6978 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ata_ioadl()
6979 ioarcb->data_transfer_length = cpu_to_be32(len); in ipr_build_ata_ioadl()
6980 ioarcb->ioadl_len = in ipr_build_ata_ioadl()
6981 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ata_ioadl()
6982 } else if (qc->dma_dir == DMA_FROM_DEVICE) { in ipr_build_ata_ioadl()
6984 ioarcb->read_data_transfer_length = cpu_to_be32(len); in ipr_build_ata_ioadl()
6985 ioarcb->read_ioadl_len = in ipr_build_ata_ioadl()
6986 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ata_ioadl()
6989 for_each_sg(qc->sg, sg, qc->n_elem, si) { in ipr_build_ata_ioadl()
6990 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg)); in ipr_build_ata_ioadl()
6991 ioadl->address = cpu_to_be32(sg_dma_address(sg)); in ipr_build_ata_ioadl()
6998 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ata_ioadl()
7002 * ipr_qc_defer - Get a free ipr_cmd
7010 struct ata_port *ap = qc->ap; in ipr_qc_defer()
7011 struct ipr_sata_port *sata_port = ap->private_data; in ipr_qc_defer()
7012 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_qc_defer()
7018 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_qc_defer()
7020 qc->lldd_task = NULL; in ipr_qc_defer()
7021 spin_lock(&hrrq->_lock); in ipr_qc_defer()
7022 if (unlikely(hrrq->ioa_is_dead)) { in ipr_qc_defer()
7023 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
7027 if (unlikely(!hrrq->allow_cmds)) { in ipr_qc_defer()
7028 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
7034 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
7038 qc->lldd_task = ipr_cmd; in ipr_qc_defer()
7039 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
7044 * ipr_qc_issue - Issue a SATA qc to a device
7052 struct ata_port *ap = qc->ap; in ipr_qc_issue()
7053 struct ipr_sata_port *sata_port = ap->private_data; in ipr_qc_issue()
7054 struct ipr_resource_entry *res = sata_port->res; in ipr_qc_issue()
7055 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_qc_issue()
7060 if (qc->lldd_task == NULL) in ipr_qc_issue()
7063 ipr_cmd = qc->lldd_task; in ipr_qc_issue()
7067 qc->lldd_task = NULL; in ipr_qc_issue()
7068 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
7069 if (unlikely(!ipr_cmd->hrrq->allow_cmds || in ipr_qc_issue()
7070 ipr_cmd->hrrq->ioa_is_dead)) { in ipr_qc_issue()
7071 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_qc_issue()
7072 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
7077 ioarcb = &ipr_cmd->ioarcb; in ipr_qc_issue()
7079 if (ioa_cfg->sis64) { in ipr_qc_issue()
7080 regs = &ipr_cmd->i.ata_ioadl.regs; in ipr_qc_issue()
7081 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); in ipr_qc_issue()
7083 regs = &ioarcb->u.add_data.u.regs; in ipr_qc_issue()
7086 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); in ipr_qc_issue()
7088 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_qc_issue()
7089 ipr_cmd->qc = qc; in ipr_qc_issue()
7090 ipr_cmd->done = ipr_sata_done; in ipr_qc_issue()
7091 ipr_cmd->ioarcb.res_handle = res->res_handle; in ipr_qc_issue()
7092 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; in ipr_qc_issue()
7093 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; in ipr_qc_issue()
7094 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in ipr_qc_issue()
7095 ipr_cmd->dma_use_sg = qc->n_elem; in ipr_qc_issue()
7097 if (ioa_cfg->sis64) in ipr_qc_issue()
7102 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; in ipr_qc_issue()
7103 ipr_copy_sata_tf(regs, &qc->tf); in ipr_qc_issue()
7104 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); in ipr_qc_issue()
7107 switch (qc->tf.protocol) { in ipr_qc_issue()
7113 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; in ipr_qc_issue()
7118 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; in ipr_qc_issue()
7122 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; in ipr_qc_issue()
7123 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; in ipr_qc_issue()
7128 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
7133 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
7139 * ipr_qc_fill_rtf - Read result TF
7147 struct ipr_sata_port *sata_port = qc->ap->private_data; in ipr_qc_fill_rtf()
7148 struct ipr_ioasa_gata *g = &sata_port->ioasa; in ipr_qc_fill_rtf()
7149 struct ata_taskfile *tf = &qc->result_tf; in ipr_qc_fill_rtf()
7151 tf->feature = g->error; in ipr_qc_fill_rtf()
7152 tf->nsect = g->nsect; in ipr_qc_fill_rtf()
7153 tf->lbal = g->lbal; in ipr_qc_fill_rtf()
7154 tf->lbam = g->lbam; in ipr_qc_fill_rtf()
7155 tf->lbah = g->lbah; in ipr_qc_fill_rtf()
7156 tf->device = g->device; in ipr_qc_fill_rtf()
7157 tf->command = g->status; in ipr_qc_fill_rtf()
7158 tf->hob_nsect = g->hob_nsect; in ipr_qc_fill_rtf()
7159 tf->hob_lbal = g->hob_lbal; in ipr_qc_fill_rtf()
7160 tf->hob_lbam = g->hob_lbam; in ipr_qc_fill_rtf()
7161 tf->hob_lbah = g->hob_lbah; in ipr_qc_fill_rtf()
7200 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7214 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { in ipr_invalid_adapter()
7227 * ipr_ioa_bringdown_done - IOA bring down completion.
7238 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_bringdown_done()
7242 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_ioa_bringdown_done()
7244 ioa_cfg->scsi_unblock = 1; in ipr_ioa_bringdown_done()
7245 schedule_work(&ioa_cfg->work_q); in ipr_ioa_bringdown_done()
7248 ioa_cfg->in_reset_reload = 0; in ipr_ioa_bringdown_done()
7249 ioa_cfg->reset_retries = 0; in ipr_ioa_bringdown_done()
7250 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_ioa_bringdown_done()
7251 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
7252 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_ioa_bringdown_done()
7253 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
7257 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_bringdown_done()
7258 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_bringdown_done()
7265 * ipr_ioa_reset_done - IOA reset completion.
7269 * It schedules any necessary mid-layer add/removes and
7277 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_reset_done()
7282 ioa_cfg->in_reset_reload = 0; in ipr_ioa_reset_done()
7283 for (j = 0; j < ioa_cfg->hrrq_num; j++) { in ipr_ioa_reset_done()
7284 spin_lock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7285 ioa_cfg->hrrq[j].allow_cmds = 1; in ipr_ioa_reset_done()
7286 spin_unlock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7289 ioa_cfg->reset_cmd = NULL; in ipr_ioa_reset_done()
7290 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; in ipr_ioa_reset_done()
7292 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_ioa_reset_done()
7293 if (res->add_to_ml || res->del_from_ml) { in ipr_ioa_reset_done()
7298 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
7301 list_del_init(&ioa_cfg->hostrcb[j]->queue); in ipr_ioa_reset_done()
7305 ioa_cfg->hostrcb[j]); in ipr_ioa_reset_done()
7309 ioa_cfg->hostrcb[j]); in ipr_ioa_reset_done()
7312 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); in ipr_ioa_reset_done()
7313 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); in ipr_ioa_reset_done()
7315 ioa_cfg->reset_retries = 0; in ipr_ioa_reset_done()
7316 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_reset_done()
7317 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_reset_done()
7319 ioa_cfg->scsi_unblock = 1; in ipr_ioa_reset_done()
7320 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
7326 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7337 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); in ipr_set_sup_dev_dflt()
7338 supported_dev->num_records = 1; in ipr_set_sup_dev_dflt()
7339 supported_dev->data_length = in ipr_set_sup_dev_dflt()
7341 supported_dev->reserved = 0; in ipr_set_sup_dev_dflt()
7345 * ipr_set_supported_devs - Send Set Supported Devices for a device
7355 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_set_supported_devs()
7356 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; in ipr_set_supported_devs()
7357 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_set_supported_devs()
7358 struct ipr_resource_entry *res = ipr_cmd->u.res; in ipr_set_supported_devs()
7360 ipr_cmd->job_step = ipr_ioa_reset_done; in ipr_set_supported_devs()
7362 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { in ipr_set_supported_devs()
7366 ipr_cmd->u.res = res; in ipr_set_supported_devs()
7367 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); in ipr_set_supported_devs()
7369 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_set_supported_devs()
7370 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_set_supported_devs()
7371 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_set_supported_devs()
7373 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; in ipr_set_supported_devs()
7374 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; in ipr_set_supported_devs()
7375 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; in ipr_set_supported_devs()
7376 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; in ipr_set_supported_devs()
7379 ioa_cfg->vpd_cbs_dma + in ipr_set_supported_devs()
7387 if (!ioa_cfg->sis64) in ipr_set_supported_devs()
7388 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_set_supported_devs()
7398 * ipr_get_mode_page - Locate specified mode page
7413 if (!mode_pages || (mode_pages->hdr.length == 0)) in ipr_get_mode_page()
7416 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; in ipr_get_mode_page()
7418 (mode_pages->data + mode_pages->hdr.block_desc_len); in ipr_get_mode_page()
7422 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) in ipr_get_mode_page()
7427 mode_hdr->page_length); in ipr_get_mode_page()
7428 length -= page_length; in ipr_get_mode_page()
7437 * ipr_check_term_power - Check for term power errors
7457 entry_length = mode_page->entry_length; in ipr_check_term_power()
7459 bus = mode_page->bus; in ipr_check_term_power()
7461 for (i = 0; i < mode_page->num_entries; i++) { in ipr_check_term_power()
7462 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { in ipr_check_term_power()
7463 dev_err(&ioa_cfg->pdev->dev, in ipr_check_term_power()
7465 bus->res_addr.bus); in ipr_check_term_power()
7473 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7490 ioa_cfg->bus_attr[i].bus_width); in ipr_scsi_bus_speed_limit()
7492 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) in ipr_scsi_bus_speed_limit()
7493 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; in ipr_scsi_bus_speed_limit()
7498 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7518 entry_length = mode_page->entry_length; in ipr_modify_ioafp_mode_page_28()
7521 for (i = 0, bus = mode_page->bus; in ipr_modify_ioafp_mode_page_28()
7522 i < mode_page->num_entries; in ipr_modify_ioafp_mode_page_28()
7524 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { in ipr_modify_ioafp_mode_page_28()
7525 dev_err(&ioa_cfg->pdev->dev, in ipr_modify_ioafp_mode_page_28()
7527 IPR_GET_PHYS_LOC(bus->res_addr)); in ipr_modify_ioafp_mode_page_28()
7531 bus_attr = &ioa_cfg->bus_attr[i]; in ipr_modify_ioafp_mode_page_28()
7532 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; in ipr_modify_ioafp_mode_page_28()
7533 bus->bus_width = bus_attr->bus_width; in ipr_modify_ioafp_mode_page_28()
7534 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); in ipr_modify_ioafp_mode_page_28()
7535 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; in ipr_modify_ioafp_mode_page_28()
7536 if (bus_attr->qas_enabled) in ipr_modify_ioafp_mode_page_28()
7537 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; in ipr_modify_ioafp_mode_page_28()
7539 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; in ipr_modify_ioafp_mode_page_28()
7544 * ipr_build_mode_select - Build a mode select command
7558 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_mode_select()
7560 ioarcb->res_handle = res_handle; in ipr_build_mode_select()
7561 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_build_mode_select()
7562 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_mode_select()
7563 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; in ipr_build_mode_select()
7564 ioarcb->cmd_pkt.cdb[1] = parm; in ipr_build_mode_select()
7565 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_build_mode_select()
7571 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7582 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page28()
7583 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page28()
7590 length = mode_pages->hdr.length + 1; in ipr_ioafp_mode_select_page28()
7591 mode_pages->hdr.length = 0; in ipr_ioafp_mode_select_page28()
7594 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page28()
7597 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_ioafp_mode_select_page28()
7598 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_ioafp_mode_select_page28()
7607 * ipr_build_mode_sense - Builds a mode sense command
7621 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_mode_sense()
7623 ioarcb->res_handle = res_handle; in ipr_build_mode_sense()
7624 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; in ipr_build_mode_sense()
7625 ioarcb->cmd_pkt.cdb[2] = parm; in ipr_build_mode_sense()
7626 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_build_mode_sense()
7627 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_build_mode_sense()
7633 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7643 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cmd_failed()
7644 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_cmd_failed()
7646 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_cmd_failed()
7648 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); in ipr_reset_cmd_failed()
7651 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cmd_failed()
7656 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7667 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_mode_sense_failed()
7668 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_mode_sense_failed()
7671 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_reset_mode_sense_failed()
7672 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_reset_mode_sense_failed()
7681 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7692 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page28()
7696 0x28, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page28()
7700 ipr_cmd->job_step = ipr_ioafp_mode_select_page28; in ipr_ioafp_mode_sense_page28()
7701 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; in ipr_ioafp_mode_sense_page28()
7710 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7720 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page24()
7721 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page24()
7730 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; in ipr_ioafp_mode_select_page24()
7732 length = mode_pages->hdr.length + 1; in ipr_ioafp_mode_select_page24()
7733 mode_pages->hdr.length = 0; in ipr_ioafp_mode_select_page24()
7736 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page24()
7739 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_ioafp_mode_select_page24()
7747 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7758 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_mode_sense_page24_failed()
7761 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_reset_mode_sense_page24_failed()
7769 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7780 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page24()
7784 0x24, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page24()
7788 ipr_cmd->job_step = ipr_ioafp_mode_select_page24; in ipr_ioafp_mode_sense_page24()
7789 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; in ipr_ioafp_mode_sense_page24()
7798 * ipr_init_res_table - Initialize the resource table
7803 * devices and schedule adding/removing them from the mid-layer
7811 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_init_res_table()
7818 if (ioa_cfg->sis64) in ipr_init_res_table()
7819 flag = ioa_cfg->u.cfg_table64->hdr64.flags; in ipr_init_res_table()
7821 flag = ioa_cfg->u.cfg_table->hdr.flags; in ipr_init_res_table()
7824 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); in ipr_init_res_table()
7826 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) in ipr_init_res_table()
7827 list_move_tail(&res->queue, &old_res); in ipr_init_res_table()
7829 if (ioa_cfg->sis64) in ipr_init_res_table()
7830 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); in ipr_init_res_table()
7832 entries = ioa_cfg->u.cfg_table->hdr.num_entries; in ipr_init_res_table()
7835 if (ioa_cfg->sis64) in ipr_init_res_table()
7836 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; in ipr_init_res_table()
7838 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; in ipr_init_res_table()
7843 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7850 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_init_res_table()
7851 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); in ipr_init_res_table()
7856 res = list_entry(ioa_cfg->free_res_q.next, in ipr_init_res_table()
7858 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7860 res->add_to_ml = 1; in ipr_init_res_table()
7861 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))) in ipr_init_res_table()
7862 res->sdev->allow_restart = 1; in ipr_init_res_table()
7869 if (res->sdev) { in ipr_init_res_table()
7870 res->del_from_ml = 1; in ipr_init_res_table()
7871 res->res_handle = IPR_INVALID_RES_HANDLE; in ipr_init_res_table()
7872 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7878 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_init_res_table()
7881 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_init_res_table()
7882 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; in ipr_init_res_table()
7884 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_init_res_table()
7891 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7902 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_query_ioa_cfg()
7903 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_query_ioa_cfg()
7904 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_ioafp_query_ioa_cfg()
7905 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_query_ioa_cfg()
7908 if (cap->cap & IPR_CAP_DUAL_IOA_RAID) in ipr_ioafp_query_ioa_cfg()
7909 ioa_cfg->dual_raid = 1; in ipr_ioafp_query_ioa_cfg()
7910 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", in ipr_ioafp_query_ioa_cfg()
7911 ucode_vpd->major_release, ucode_vpd->card_type, in ipr_ioafp_query_ioa_cfg()
7912 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); in ipr_ioafp_query_ioa_cfg()
7913 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_ioafp_query_ioa_cfg()
7914 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_query_ioa_cfg()
7916 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; in ipr_ioafp_query_ioa_cfg()
7917 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; in ipr_ioafp_query_ioa_cfg()
7918 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; in ipr_ioafp_query_ioa_cfg()
7919 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; in ipr_ioafp_query_ioa_cfg()
7921 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, in ipr_ioafp_query_ioa_cfg()
7924 ipr_cmd->job_step = ipr_init_res_table; in ipr_ioafp_query_ioa_cfg()
7934 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_ioa_service_action_failed()
7945 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioa_service_action()
7947 ioarcb->res_handle = res_handle; in ipr_build_ioa_service_action()
7948 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION; in ipr_build_ioa_service_action()
7949 ioarcb->cmd_pkt.cdb[1] = sa_code; in ipr_build_ioa_service_action()
7950 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_build_ioa_service_action()
7954 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7963 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_set_caching_parameters()
7964 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_set_caching_parameters()
7965 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_set_caching_parameters()
7969 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; in ipr_ioafp_set_caching_parameters()
7971 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) { in ipr_ioafp_set_caching_parameters()
7976 ioarcb->cmd_pkt.cdb[2] = 0x40; in ipr_ioafp_set_caching_parameters()
7978 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed; in ipr_ioafp_set_caching_parameters()
7991 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
8006 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_inquiry()
8009 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_ioafp_inquiry()
8010 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_inquiry()
8012 ioarcb->cmd_pkt.cdb[0] = INQUIRY; in ipr_ioafp_inquiry()
8013 ioarcb->cmd_pkt.cdb[1] = flags; in ipr_ioafp_inquiry()
8014 ioarcb->cmd_pkt.cdb[2] = page; in ipr_ioafp_inquiry()
8015 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_ioafp_inquiry()
8024 * ipr_inquiry_page_supported - Is the given inquiry page supported
8037 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) in ipr_inquiry_page_supported()
8038 if (page0->page[i] == page) in ipr_inquiry_page_supported()
8045 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8056 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_pageC4_inquiry()
8057 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_pageC4_inquiry()
8058 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_pageC4_inquiry()
8061 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters; in ipr_ioafp_pageC4_inquiry()
8066 (ioa_cfg->vpd_cbs_dma in ipr_ioafp_pageC4_inquiry()
8078 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8089 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_cap_inquiry()
8090 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_cap_inquiry()
8091 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_cap_inquiry()
8094 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry; in ipr_ioafp_cap_inquiry()
8099 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), in ipr_ioafp_cap_inquiry()
8109 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8120 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page3_inquiry()
8124 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; in ipr_ioafp_page3_inquiry()
8127 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), in ipr_ioafp_page3_inquiry()
8135 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8146 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page0_inquiry()
8152 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); in ipr_ioafp_page0_inquiry()
8154 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); in ipr_ioafp_page0_inquiry()
8157 dev_err(&ioa_cfg->pdev->dev, in ipr_ioafp_page0_inquiry()
8161 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_ioafp_page0_inquiry()
8163 list_add_tail(&ipr_cmd->queue, in ipr_ioafp_page0_inquiry()
8164 &ioa_cfg->hrrq->hrrq_free_q); in ipr_ioafp_page0_inquiry()
8169 ipr_cmd->job_step = ipr_ioafp_page3_inquiry; in ipr_ioafp_page0_inquiry()
8172 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), in ipr_ioafp_page0_inquiry()
8180 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8190 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_std_inquiry()
8193 ipr_cmd->job_step = ipr_ioafp_page0_inquiry; in ipr_ioafp_std_inquiry()
8196 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), in ipr_ioafp_std_inquiry()
8204 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8215 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_identify_hrrq()
8216 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_identify_hrrq()
8220 ipr_cmd->job_step = ipr_ioafp_std_inquiry; in ipr_ioafp_identify_hrrq()
8221 if (ioa_cfg->identify_hrrq_index == 0) in ipr_ioafp_identify_hrrq()
8222 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); in ipr_ioafp_identify_hrrq()
8224 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { in ipr_ioafp_identify_hrrq()
8225 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; in ipr_ioafp_identify_hrrq()
8227 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; in ipr_ioafp_identify_hrrq()
8228 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_identify_hrrq()
8230 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_ioafp_identify_hrrq()
8231 if (ioa_cfg->sis64) in ipr_ioafp_identify_hrrq()
8232 ioarcb->cmd_pkt.cdb[1] = 0x1; in ipr_ioafp_identify_hrrq()
8234 if (ioa_cfg->nvectors == 1) in ipr_ioafp_identify_hrrq()
8235 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE; in ipr_ioafp_identify_hrrq()
8237 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE; in ipr_ioafp_identify_hrrq()
8239 ioarcb->cmd_pkt.cdb[2] = in ipr_ioafp_identify_hrrq()
8240 ((u64) hrrq->host_rrq_dma >> 24) & 0xff; in ipr_ioafp_identify_hrrq()
8241 ioarcb->cmd_pkt.cdb[3] = in ipr_ioafp_identify_hrrq()
8242 ((u64) hrrq->host_rrq_dma >> 16) & 0xff; in ipr_ioafp_identify_hrrq()
8243 ioarcb->cmd_pkt.cdb[4] = in ipr_ioafp_identify_hrrq()
8244 ((u64) hrrq->host_rrq_dma >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
8245 ioarcb->cmd_pkt.cdb[5] = in ipr_ioafp_identify_hrrq()
8246 ((u64) hrrq->host_rrq_dma) & 0xff; in ipr_ioafp_identify_hrrq()
8247 ioarcb->cmd_pkt.cdb[7] = in ipr_ioafp_identify_hrrq()
8248 ((sizeof(u32) * hrrq->size) >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
8249 ioarcb->cmd_pkt.cdb[8] = in ipr_ioafp_identify_hrrq()
8250 (sizeof(u32) * hrrq->size) & 0xff; in ipr_ioafp_identify_hrrq()
8252 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) in ipr_ioafp_identify_hrrq()
8253 ioarcb->cmd_pkt.cdb[9] = in ipr_ioafp_identify_hrrq()
8254 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
8256 if (ioa_cfg->sis64) { in ipr_ioafp_identify_hrrq()
8257 ioarcb->cmd_pkt.cdb[10] = in ipr_ioafp_identify_hrrq()
8258 ((u64) hrrq->host_rrq_dma >> 56) & 0xff; in ipr_ioafp_identify_hrrq()
8259 ioarcb->cmd_pkt.cdb[11] = in ipr_ioafp_identify_hrrq()
8260 ((u64) hrrq->host_rrq_dma >> 48) & 0xff; in ipr_ioafp_identify_hrrq()
8261 ioarcb->cmd_pkt.cdb[12] = in ipr_ioafp_identify_hrrq()
8262 ((u64) hrrq->host_rrq_dma >> 40) & 0xff; in ipr_ioafp_identify_hrrq()
8263 ioarcb->cmd_pkt.cdb[13] = in ipr_ioafp_identify_hrrq()
8264 ((u64) hrrq->host_rrq_dma >> 32) & 0xff; in ipr_ioafp_identify_hrrq()
8267 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) in ipr_ioafp_identify_hrrq()
8268 ioarcb->cmd_pkt.cdb[14] = in ipr_ioafp_identify_hrrq()
8269 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
8274 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) in ipr_ioafp_identify_hrrq()
8275 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_ioafp_identify_hrrq()
8286 * ipr_reset_timer_done - Adapter reset timer function
8301 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_timer_done()
8304 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
8306 if (ioa_cfg->reset_cmd == ipr_cmd) { in ipr_reset_timer_done()
8307 list_del(&ipr_cmd->queue); in ipr_reset_timer_done()
8308 ipr_cmd->done(ipr_cmd); in ipr_reset_timer_done()
8311 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
8315 * ipr_reset_start_timer - Start a timer for adapter reset job
8333 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_start_timer()
8334 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_start_timer()
8336 ipr_cmd->timer.expires = jiffies + timeout; in ipr_reset_start_timer()
8337 ipr_cmd->timer.function = ipr_reset_timer_done; in ipr_reset_start_timer()
8338 add_timer(&ipr_cmd->timer); in ipr_reset_start_timer()
8342 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8353 spin_lock(&hrrq->_lock); in ipr_init_ioa_mem()
8354 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); in ipr_init_ioa_mem()
8357 hrrq->hrrq_start = hrrq->host_rrq; in ipr_init_ioa_mem()
8358 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; in ipr_init_ioa_mem()
8359 hrrq->hrrq_curr = hrrq->hrrq_start; in ipr_init_ioa_mem()
8360 hrrq->toggle_bit = 1; in ipr_init_ioa_mem()
8361 spin_unlock(&hrrq->_lock); in ipr_init_ioa_mem()
8365 ioa_cfg->identify_hrrq_index = 0; in ipr_init_ioa_mem()
8366 if (ioa_cfg->hrrq_num == 1) in ipr_init_ioa_mem()
8367 atomic_set(&ioa_cfg->hrrq_index, 0); in ipr_init_ioa_mem()
8369 atomic_set(&ioa_cfg->hrrq_index, 1); in ipr_init_ioa_mem()
8372 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); in ipr_init_ioa_mem()
8376 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8387 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_next_stage()
8390 feedback = readl(ioa_cfg->regs.init_feedback_reg); in ipr_reset_next_stage()
8394 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); in ipr_reset_next_stage()
8405 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
8406 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
8407 stage_time = ioa_cfg->transop_timeout; in ipr_reset_next_stage()
8408 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_next_stage()
8410 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_next_stage()
8412 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_next_stage()
8415 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
8416 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
8421 ipr_cmd->timer.expires = jiffies + stage_time * HZ; in ipr_reset_next_stage()
8422 ipr_cmd->timer.function = ipr_oper_timeout; in ipr_reset_next_stage()
8423 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_next_stage()
8424 add_timer(&ipr_cmd->timer); in ipr_reset_next_stage()
8426 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_next_stage()
8432 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8443 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_enable_ioa()
8449 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_enable_ioa()
8452 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_enable_ioa()
8453 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8454 ioa_cfg->hrrq[i].allow_interrupts = 1; in ipr_reset_enable_ioa()
8455 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8457 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8459 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
8460 int_reg = readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
8463 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_enable_ioa()
8467 ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
8468 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
8473 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_enable_ioa()
8475 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8478 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); in ipr_reset_enable_ioa()
8480 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
8482 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
8484 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); in ipr_reset_enable_ioa()
8486 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8487 ipr_cmd->job_step = ipr_reset_next_stage; in ipr_reset_enable_ioa()
8491 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); in ipr_reset_enable_ioa()
8492 ipr_cmd->timer.function = ipr_oper_timeout; in ipr_reset_enable_ioa()
8493 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_enable_ioa()
8494 add_timer(&ipr_cmd->timer); in ipr_reset_enable_ioa()
8495 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_enable_ioa()
8502 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8513 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_for_dump()
8515 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_reset_wait_for_dump()
8516 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_reset_wait_for_dump()
8517 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_reset_wait_for_dump()
8518 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_reset_wait_for_dump()
8520 ioa_cfg->dump_timeout = 1; in ipr_reset_wait_for_dump()
8521 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_wait_for_dump()
8527 * ipr_unit_check_no_data - Log a unit check/no data error log
8538 ioa_cfg->errors_logged++; in ipr_unit_check_no_data()
8539 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); in ipr_unit_check_no_data()
8543 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8560 mailbox = readl(ioa_cfg->ioa_mailbox); in ipr_get_unit_check_buffer()
8562 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { in ipr_get_unit_check_buffer()
8582 length = (be32_to_cpu(sdt.entry[0].end_token) - in ipr_get_unit_check_buffer()
8586 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, in ipr_get_unit_check_buffer()
8588 list_del_init(&hostrcb->queue); in ipr_get_unit_check_buffer()
8589 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); in ipr_get_unit_check_buffer()
8593 (__be32 *)&hostrcb->hcam, in ipr_get_unit_check_buffer()
8594 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); in ipr_get_unit_check_buffer()
8598 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_get_unit_check_buffer()
8600 ioa_cfg->sdt_state == GET_DUMP) in ipr_get_unit_check_buffer()
8601 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_get_unit_check_buffer()
8605 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_get_unit_check_buffer()
8609 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8619 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_get_unit_check_job()
8622 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_get_unit_check_job()
8624 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_get_unit_check_job()
8633 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_dump_mailbox_wait()
8637 if (ioa_cfg->sdt_state != GET_DUMP) in ipr_dump_mailbox_wait()
8640 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left || in ipr_dump_mailbox_wait()
8641 (readl(ioa_cfg->regs.sense_interrupt_reg) & in ipr_dump_mailbox_wait()
8644 if (!ipr_cmd->u.time_left) in ipr_dump_mailbox_wait()
8645 dev_err(&ioa_cfg->pdev->dev, in ipr_dump_mailbox_wait()
8648 ioa_cfg->sdt_state = READ_DUMP; in ipr_dump_mailbox_wait()
8649 ioa_cfg->dump_timeout = 0; in ipr_dump_mailbox_wait()
8650 if (ioa_cfg->sis64) in ipr_dump_mailbox_wait()
8654 ipr_cmd->job_step = ipr_reset_wait_for_dump; in ipr_dump_mailbox_wait()
8655 schedule_work(&ioa_cfg->work_q); in ipr_dump_mailbox_wait()
8658 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_dump_mailbox_wait()
8668 * ipr_reset_restore_cfg_space - Restore PCI config space.
8680 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_restore_cfg_space()
8683 ioa_cfg->pdev->state_saved = true; in ipr_reset_restore_cfg_space()
8684 pci_restore_state(ioa_cfg->pdev); in ipr_reset_restore_cfg_space()
8687 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); in ipr_reset_restore_cfg_space()
8693 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
8695 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
8696 readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
8699 if (ioa_cfg->ioa_unit_checked) { in ipr_reset_restore_cfg_space()
8700 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
8701 ipr_cmd->job_step = ipr_reset_get_unit_check_job; in ipr_reset_restore_cfg_space()
8705 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_restore_cfg_space()
8707 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_restore_cfg_space()
8713 if (ioa_cfg->in_ioa_bringdown) { in ipr_reset_restore_cfg_space()
8714 ipr_cmd->job_step = ipr_ioa_bringdown_done; in ipr_reset_restore_cfg_space()
8715 } else if (ioa_cfg->sdt_state == GET_DUMP) { in ipr_reset_restore_cfg_space()
8716 ipr_cmd->job_step = ipr_dump_mailbox_wait; in ipr_reset_restore_cfg_space()
8717 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX; in ipr_reset_restore_cfg_space()
8719 ipr_cmd->job_step = ipr_reset_enable_ioa; in ipr_reset_restore_cfg_space()
8727 * ipr_reset_bist_done - BIST has completed on the adapter.
8737 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_bist_done()
8740 if (ioa_cfg->cfg_locked) in ipr_reset_bist_done()
8741 pci_cfg_access_unlock(ioa_cfg->pdev); in ipr_reset_bist_done()
8742 ioa_cfg->cfg_locked = 0; in ipr_reset_bist_done()
8743 ipr_cmd->job_step = ipr_reset_restore_cfg_space; in ipr_reset_bist_done()
8749 * ipr_reset_start_bist - Run BIST on the adapter.
8759 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_start_bist()
8763 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) in ipr_reset_start_bist()
8765 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_start_bist()
8767 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); in ipr_reset_start_bist()
8770 ipr_cmd->job_step = ipr_reset_bist_done; in ipr_reset_start_bist()
8774 if (ioa_cfg->cfg_locked) in ipr_reset_start_bist()
8775 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); in ipr_reset_start_bist()
8776 ioa_cfg->cfg_locked = 0; in ipr_reset_start_bist()
8777 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); in ipr_reset_start_bist()
8786 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8797 ipr_cmd->job_step = ipr_reset_bist_done; in ipr_reset_slot_reset_done()
8804 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8813 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_reset_work()
8814 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_reset_reset_work()
8822 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8823 if (ioa_cfg->reset_cmd == ipr_cmd) in ipr_reset_reset_work()
8825 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8830 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8840 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_slot_reset()
8843 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work); in ipr_reset_slot_reset()
8844 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); in ipr_reset_slot_reset()
8845 ipr_cmd->job_step = ipr_reset_slot_reset_done; in ipr_reset_slot_reset()
8851 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8861 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_block_config_access_wait()
8864 if (pci_cfg_access_trylock(ioa_cfg->pdev)) { in ipr_reset_block_config_access_wait()
8865 ioa_cfg->cfg_locked = 1; in ipr_reset_block_config_access_wait()
8866 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8868 if (ipr_cmd->u.time_left) { in ipr_reset_block_config_access_wait()
8870 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_reset_block_config_access_wait()
8874 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8875 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_block_config_access_wait()
8884 * ipr_reset_block_config_access - Block config access to the IOA
8894 ipr_cmd->ioa_cfg->cfg_locked = 0; in ipr_reset_block_config_access()
8895 ipr_cmd->job_step = ipr_reset_block_config_access_wait; in ipr_reset_block_config_access()
8896 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; in ipr_reset_block_config_access()
8901 * ipr_reset_allowed - Query whether or not IOA can be reset
8905 * 0 if reset not allowed / non-zero if reset is allowed
8911 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_reset_allowed()
8916 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8932 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_to_start_bist()
8935 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { in ipr_reset_wait_to_start_bist()
8936 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_reset_wait_to_start_bist()
8939 ipr_cmd->job_step = ipr_reset_block_config_access; in ipr_reset_wait_to_start_bist()
8947 * ipr_reset_alert - Alert the adapter of a pending reset
8960 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_alert()
8965 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); in ipr_reset_alert()
8969 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_alert()
8970 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; in ipr_reset_alert()
8972 ipr_cmd->job_step = ipr_reset_block_config_access; in ipr_reset_alert()
8975 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; in ipr_reset_alert()
8983 * ipr_reset_quiesce_done - Complete IOA disconnect
8993 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_quiesce_done()
8996 ipr_cmd->job_step = ipr_ioa_bringdown_done; in ipr_reset_quiesce_done()
9003 * ipr_reset_cancel_hcam_done - Check for outstanding commands
9014 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam_done()
9021 ipr_cmd->job_step = ipr_reset_quiesce_done; in ipr_reset_cancel_hcam_done()
9024 spin_lock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
9025 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam_done()
9028 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cancel_hcam_done()
9032 spin_unlock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
9043 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9053 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam()
9057 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; in ipr_reset_cancel_hcam()
9060 ipr_cmd->job_step = ipr_reset_cancel_hcam_done; in ipr_reset_cancel_hcam()
9062 if (!hrrq->ioa_is_dead) { in ipr_reset_cancel_hcam()
9063 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { in ipr_reset_cancel_hcam()
9064 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam()
9065 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC) in ipr_reset_cancel_hcam()
9068 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_cancel_hcam()
9069 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_reset_cancel_hcam()
9070 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_reset_cancel_hcam()
9071 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_reset_cancel_hcam()
9072 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST; in ipr_reset_cancel_hcam()
9073 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB; in ipr_reset_cancel_hcam()
9074 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff; in ipr_reset_cancel_hcam()
9075 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff; in ipr_reset_cancel_hcam()
9076 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff; in ipr_reset_cancel_hcam()
9077 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff; in ipr_reset_cancel_hcam()
9078 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff; in ipr_reset_cancel_hcam()
9079 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff; in ipr_reset_cancel_hcam()
9080 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff; in ipr_reset_cancel_hcam()
9081 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff; in ipr_reset_cancel_hcam()
9087 ipr_cmd->job_step = ipr_reset_cancel_hcam; in ipr_reset_cancel_hcam()
9092 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_cancel_hcam()
9099 * ipr_reset_ucode_download_done - Microcode download completion
9109 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download_done()
9110 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download_done()
9112 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, in ipr_reset_ucode_download_done()
9113 sglist->num_sg, DMA_TO_DEVICE); in ipr_reset_ucode_download_done()
9115 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_ucode_download_done()
9120 * ipr_reset_ucode_download - Download microcode to the adapter
9131 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download()
9132 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download()
9135 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_ucode_download()
9140 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_ucode_download()
9141 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_reset_ucode_download()
9142 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; in ipr_reset_ucode_download()
9143 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; in ipr_reset_ucode_download()
9144 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; in ipr_reset_ucode_download()
9145 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; in ipr_reset_ucode_download()
9146 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; in ipr_reset_ucode_download()
9148 if (ioa_cfg->sis64) in ipr_reset_ucode_download()
9152 ipr_cmd->job_step = ipr_reset_ucode_download_done; in ipr_reset_ucode_download()
9162 * ipr_reset_shutdown_ioa - Shutdown the adapter
9174 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_shutdown_ioa()
9175 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; in ipr_reset_shutdown_ioa()
9181 ipr_cmd->job_step = ipr_reset_cancel_hcam; in ipr_reset_shutdown_ioa()
9183 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_reset_shutdown_ioa()
9184 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_shutdown_ioa()
9185 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_reset_shutdown_ioa()
9186 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; in ipr_reset_shutdown_ioa()
9187 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; in ipr_reset_shutdown_ioa()
9193 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_reset_shutdown_ioa()
9201 ipr_cmd->job_step = ipr_reset_ucode_download; in ipr_reset_shutdown_ioa()
9203 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_shutdown_ioa()
9210 * ipr_reset_ioa_job - Adapter reset job
9221 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ioa_job()
9224 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_ioa_job()
9226 if (ioa_cfg->reset_cmd != ipr_cmd) { in ipr_reset_ioa_job()
9231 list_add_tail(&ipr_cmd->queue, in ipr_reset_ioa_job()
9232 &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_ioa_job()
9237 rc = ipr_cmd->job_step_failed(ipr_cmd); in ipr_reset_ioa_job()
9243 ipr_cmd->job_step_failed = ipr_reset_cmd_failed; in ipr_reset_ioa_job()
9244 rc = ipr_cmd->job_step(ipr_cmd); in ipr_reset_ioa_job()
9249 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9269 ioa_cfg->in_reset_reload = 1; in _ipr_initiate_ioa_reset()
9270 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in _ipr_initiate_ioa_reset()
9271 spin_lock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
9272 ioa_cfg->hrrq[i].allow_cmds = 0; in _ipr_initiate_ioa_reset()
9273 spin_unlock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
9276 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in _ipr_initiate_ioa_reset()
9277 ioa_cfg->scsi_unblock = 0; in _ipr_initiate_ioa_reset()
9278 ioa_cfg->scsi_blocked = 1; in _ipr_initiate_ioa_reset()
9279 scsi_block_requests(ioa_cfg->host); in _ipr_initiate_ioa_reset()
9283 ioa_cfg->reset_cmd = ipr_cmd; in _ipr_initiate_ioa_reset()
9284 ipr_cmd->job_step = job_step; in _ipr_initiate_ioa_reset()
9285 ipr_cmd->u.shutdown_type = shutdown_type; in _ipr_initiate_ioa_reset()
9291 * ipr_initiate_ioa_reset - Initiate an adapter reset
9307 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_initiate_ioa_reset()
9310 if (ioa_cfg->in_reset_reload) { in ipr_initiate_ioa_reset()
9311 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_initiate_ioa_reset()
9312 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_initiate_ioa_reset()
9313 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_initiate_ioa_reset()
9314 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_reset()
9317 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { in ipr_initiate_ioa_reset()
9318 dev_err(&ioa_cfg->pdev->dev, in ipr_initiate_ioa_reset()
9319 "IOA taken offline - error recovery failed\n"); in ipr_initiate_ioa_reset()
9321 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_reset()
9322 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_initiate_ioa_reset()
9323 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
9324 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_initiate_ioa_reset()
9325 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
9329 if (ioa_cfg->in_ioa_bringdown) { in ipr_initiate_ioa_reset()
9330 ioa_cfg->reset_cmd = NULL; in ipr_initiate_ioa_reset()
9331 ioa_cfg->in_reset_reload = 0; in ipr_initiate_ioa_reset()
9333 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_initiate_ioa_reset()
9335 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_initiate_ioa_reset()
9336 ioa_cfg->scsi_unblock = 1; in ipr_initiate_ioa_reset()
9337 schedule_work(&ioa_cfg->work_q); in ipr_initiate_ioa_reset()
9341 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_reset()
9351 * ipr_reset_freeze - Hold off all I/O activity
9360 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_freeze()
9364 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_freeze()
9365 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
9366 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_reset_freeze()
9367 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
9370 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_freeze()
9371 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_freeze()
9376 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9379 * Description: This routine is called to tell us that the MMIO
9387 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
9388 if (!ioa_cfg->probe_done) in ipr_pci_mmio_enabled()
9390 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
9395 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9398 * Description: This routine is called to tell us that the PCI bus
9407 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
9408 if (ioa_cfg->probe_done) in ipr_pci_frozen()
9410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
9414 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9426 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
9427 if (ioa_cfg->probe_done) { in ipr_pci_slot_reset()
9428 if (ioa_cfg->needs_warm_reset) in ipr_pci_slot_reset()
9434 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_slot_reset()
9435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
9440 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9452 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
9453 if (ioa_cfg->probe_done) { in ipr_pci_perm_failure()
9454 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_pci_perm_failure()
9455 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_pci_perm_failure()
9456 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; in ipr_pci_perm_failure()
9457 ioa_cfg->in_ioa_bringdown = 1; in ipr_pci_perm_failure()
9458 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_pci_perm_failure()
9459 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9460 ioa_cfg->hrrq[i].allow_cmds = 0; in ipr_pci_perm_failure()
9461 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9466 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_perm_failure()
9467 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
9471 * ipr_pci_error_detected - Called when a PCI error is detected.
9498 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9505 * 0 on success / -EIO on failure
9513 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
9514 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); in ipr_probe_ioa_part2()
9515 ioa_cfg->probe_done = 1; in ipr_probe_ioa_part2()
9516 if (ioa_cfg->needs_hard_reset) { in ipr_probe_ioa_part2()
9517 ioa_cfg->needs_hard_reset = 0; in ipr_probe_ioa_part2()
9522 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
9529 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9539 if (ioa_cfg->ipr_cmnd_list) { in ipr_free_cmd_blks()
9541 if (ioa_cfg->ipr_cmnd_list[i]) in ipr_free_cmd_blks()
9542 dma_pool_free(ioa_cfg->ipr_cmd_pool, in ipr_free_cmd_blks()
9543 ioa_cfg->ipr_cmnd_list[i], in ipr_free_cmd_blks()
9544 ioa_cfg->ipr_cmnd_list_dma[i]); in ipr_free_cmd_blks()
9546 ioa_cfg->ipr_cmnd_list[i] = NULL; in ipr_free_cmd_blks()
9550 dma_pool_destroy(ioa_cfg->ipr_cmd_pool); in ipr_free_cmd_blks()
9552 kfree(ioa_cfg->ipr_cmnd_list); in ipr_free_cmd_blks()
9553 kfree(ioa_cfg->ipr_cmnd_list_dma); in ipr_free_cmd_blks()
9554 ioa_cfg->ipr_cmnd_list = NULL; in ipr_free_cmd_blks()
9555 ioa_cfg->ipr_cmnd_list_dma = NULL; in ipr_free_cmd_blks()
9556 ioa_cfg->ipr_cmd_pool = NULL; in ipr_free_cmd_blks()
9560 * ipr_free_mem - Frees memory allocated for an adapter
9570 kfree(ioa_cfg->res_entries); in ipr_free_mem()
9571 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), in ipr_free_mem()
9572 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_free_mem()
9575 for (i = 0; i < ioa_cfg->hrrq_num; i++) in ipr_free_mem()
9576 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
9577 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_free_mem()
9578 ioa_cfg->hrrq[i].host_rrq, in ipr_free_mem()
9579 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_free_mem()
9581 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, in ipr_free_mem()
9582 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_free_mem()
9585 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
9587 ioa_cfg->hostrcb[i], in ipr_free_mem()
9588 ioa_cfg->hostrcb_dma[i]); in ipr_free_mem()
9592 kfree(ioa_cfg->trace); in ipr_free_mem()
9596 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9607 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_irqs()
9610 for (i = 0; i < ioa_cfg->nvectors; i++) in ipr_free_irqs()
9611 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); in ipr_free_irqs()
9616 * ipr_free_all_resources - Free all allocated resources for an adapter.
9627 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_all_resources()
9631 if (ioa_cfg->reset_work_q) in ipr_free_all_resources()
9632 destroy_workqueue(ioa_cfg->reset_work_q); in ipr_free_all_resources()
9633 iounmap(ioa_cfg->hdw_dma_regs); in ipr_free_all_resources()
9636 scsi_host_put(ioa_cfg->host); in ipr_free_all_resources()
9642 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9646 * 0 on success / -ENOMEM on allocation failure
9655 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, in ipr_alloc_cmd_blks()
9658 if (!ioa_cfg->ipr_cmd_pool) in ipr_alloc_cmd_blks()
9659 return -ENOMEM; in ipr_alloc_cmd_blks()
9661 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); in ipr_alloc_cmd_blks()
9662 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); in ipr_alloc_cmd_blks()
9664 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { in ipr_alloc_cmd_blks()
9666 return -ENOMEM; in ipr_alloc_cmd_blks()
9669 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_cmd_blks()
9670 if (ioa_cfg->hrrq_num > 1) { in ipr_alloc_cmd_blks()
9673 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9674 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9675 (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9679 (ioa_cfg->hrrq_num - 1); in ipr_alloc_cmd_blks()
9680 ioa_cfg->hrrq[i].min_cmd_id = in ipr_alloc_cmd_blks()
9682 (i - 1) * entries_each_hrrq; in ipr_alloc_cmd_blks()
9683 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9685 i * entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9689 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9690 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9692 ioa_cfg->hrrq[i].size = entries_each_hrrq; in ipr_alloc_cmd_blks()
9695 BUG_ON(ioa_cfg->hrrq_num == 0); in ipr_alloc_cmd_blks()
9697 i = IPR_NUM_CMD_BLKS - in ipr_alloc_cmd_blks()
9698 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; in ipr_alloc_cmd_blks()
9700 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; in ipr_alloc_cmd_blks()
9701 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; in ipr_alloc_cmd_blks()
9705 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool, in ipr_alloc_cmd_blks()
9710 return -ENOMEM; in ipr_alloc_cmd_blks()
9713 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; in ipr_alloc_cmd_blks()
9714 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; in ipr_alloc_cmd_blks()
9716 ioarcb = &ipr_cmd->ioarcb; in ipr_alloc_cmd_blks()
9717 ipr_cmd->dma_addr = dma_addr; in ipr_alloc_cmd_blks()
9718 if (ioa_cfg->sis64) in ipr_alloc_cmd_blks()
9719 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); in ipr_alloc_cmd_blks()
9721 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); in ipr_alloc_cmd_blks()
9723 ioarcb->host_response_handle = cpu_to_be32(i << 2); in ipr_alloc_cmd_blks()
9724 if (ioa_cfg->sis64) { in ipr_alloc_cmd_blks()
9725 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_alloc_cmd_blks()
9727 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = in ipr_alloc_cmd_blks()
9730 ioarcb->write_ioadl_addr = in ipr_alloc_cmd_blks()
9732 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_alloc_cmd_blks()
9733 ioarcb->ioasa_host_pci_addr = in ipr_alloc_cmd_blks()
9736 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); in ipr_alloc_cmd_blks()
9737 ipr_cmd->cmd_index = i; in ipr_alloc_cmd_blks()
9738 ipr_cmd->ioa_cfg = ioa_cfg; in ipr_alloc_cmd_blks()
9739 ipr_cmd->sense_buffer_dma = dma_addr + in ipr_alloc_cmd_blks()
9742 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id; in ipr_alloc_cmd_blks()
9743 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_alloc_cmd_blks()
9744 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_alloc_cmd_blks()
9745 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) in ipr_alloc_cmd_blks()
9753 * ipr_alloc_mem - Allocate memory for an adapter
9757 * 0 on success / non-zero for error
9761 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_alloc_mem()
9762 int i, rc = -ENOMEM; in ipr_alloc_mem()
9765 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported, in ipr_alloc_mem()
9769 if (!ioa_cfg->res_entries) in ipr_alloc_mem()
9772 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { in ipr_alloc_mem()
9773 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); in ipr_alloc_mem()
9774 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9777 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9779 &ioa_cfg->vpd_cbs_dma, in ipr_alloc_mem()
9782 if (!ioa_cfg->vpd_cbs) in ipr_alloc_mem()
9788 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9789 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9790 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9791 &ioa_cfg->hrrq[i].host_rrq_dma, in ipr_alloc_mem()
9794 if (!ioa_cfg->hrrq[i].host_rrq) { in ipr_alloc_mem()
9795 while (--i > 0) in ipr_alloc_mem()
9796 dma_free_coherent(&pdev->dev, in ipr_alloc_mem()
9797 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9798 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9799 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9802 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9805 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9806 ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9807 &ioa_cfg->cfg_table_dma, in ipr_alloc_mem()
9810 if (!ioa_cfg->u.cfg_table) in ipr_alloc_mem()
9814 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9816 &ioa_cfg->hostrcb_dma[i], in ipr_alloc_mem()
9819 if (!ioa_cfg->hostrcb[i]) in ipr_alloc_mem()
9822 ioa_cfg->hostrcb[i]->hostrcb_dma = in ipr_alloc_mem()
9823 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); in ipr_alloc_mem()
9824 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9825 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); in ipr_alloc_mem()
9828 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES, in ipr_alloc_mem()
9832 if (!ioa_cfg->trace) in ipr_alloc_mem()
9841 while (i-- > 0) { in ipr_alloc_mem()
9842 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb), in ipr_alloc_mem()
9843 ioa_cfg->hostrcb[i], in ipr_alloc_mem()
9844 ioa_cfg->hostrcb_dma[i]); in ipr_alloc_mem()
9846 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9847 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_alloc_mem()
9849 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9850 dma_free_coherent(&pdev->dev, in ipr_alloc_mem()
9851 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9852 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9853 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9858 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs), in ipr_alloc_mem()
9859 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_alloc_mem()
9861 kfree(ioa_cfg->res_entries); in ipr_alloc_mem()
9866 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9877 ioa_cfg->bus_attr[i].bus = i; in ipr_initialize_bus_attr()
9878 ioa_cfg->bus_attr[i].qas_enabled = 0; in ipr_initialize_bus_attr()
9879 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; in ipr_initialize_bus_attr()
9881 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; in ipr_initialize_bus_attr()
9883 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; in ipr_initialize_bus_attr()
9888 * ipr_init_regs - Initialize IOA registers
9900 p = &ioa_cfg->chip_cfg->regs; in ipr_init_regs()
9901 t = &ioa_cfg->regs; in ipr_init_regs()
9902 base = ioa_cfg->hdw_dma_regs; in ipr_init_regs()
9904 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; in ipr_init_regs()
9905 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; in ipr_init_regs()
9906 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; in ipr_init_regs()
9907 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; in ipr_init_regs()
9908 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; in ipr_init_regs()
9909 t->clr_interrupt_reg = base + p->clr_interrupt_reg; in ipr_init_regs()
9910 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; in ipr_init_regs()
9911 t->sense_interrupt_reg = base + p->sense_interrupt_reg; in ipr_init_regs()
9912 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; in ipr_init_regs()
9913 t->ioarrin_reg = base + p->ioarrin_reg; in ipr_init_regs()
9914 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; in ipr_init_regs()
9915 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; in ipr_init_regs()
9916 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; in ipr_init_regs()
9917 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; in ipr_init_regs()
9918 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; in ipr_init_regs()
9919 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; in ipr_init_regs()
9921 if (ioa_cfg->sis64) { in ipr_init_regs()
9922 t->init_feedback_reg = base + p->init_feedback_reg; in ipr_init_regs()
9923 t->dump_addr_reg = base + p->dump_addr_reg; in ipr_init_regs()
9924 t->dump_data_reg = base + p->dump_data_reg; in ipr_init_regs()
9925 t->endian_swap_reg = base + p->endian_swap_reg; in ipr_init_regs()
9930 * ipr_init_ioa_cfg - Initialize IOA config struct
9943 ioa_cfg->host = host; in ipr_init_ioa_cfg()
9944 ioa_cfg->pdev = pdev; in ipr_init_ioa_cfg()
9945 ioa_cfg->log_level = ipr_log_level; in ipr_init_ioa_cfg()
9946 ioa_cfg->doorbell = IPR_DOORBELL; in ipr_init_ioa_cfg()
9947 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); in ipr_init_ioa_cfg()
9948 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); in ipr_init_ioa_cfg()
9949 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); in ipr_init_ioa_cfg()
9950 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); in ipr_init_ioa_cfg()
9951 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); in ipr_init_ioa_cfg()
9952 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); in ipr_init_ioa_cfg()
9954 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); in ipr_init_ioa_cfg()
9955 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); in ipr_init_ioa_cfg()
9956 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q); in ipr_init_ioa_cfg()
9957 INIT_LIST_HEAD(&ioa_cfg->free_res_q); in ipr_init_ioa_cfg()
9958 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in ipr_init_ioa_cfg()
9959 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); in ipr_init_ioa_cfg()
9960 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); in ipr_init_ioa_cfg()
9961 init_waitqueue_head(&ioa_cfg->reset_wait_q); in ipr_init_ioa_cfg()
9962 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_init_ioa_cfg()
9963 init_waitqueue_head(&ioa_cfg->eeh_wait_q); in ipr_init_ioa_cfg()
9964 ioa_cfg->sdt_state = INACTIVE; in ipr_init_ioa_cfg()
9967 ioa_cfg->max_devs_supported = ipr_max_devs; in ipr_init_ioa_cfg()
9969 if (ioa_cfg->sis64) { in ipr_init_ioa_cfg()
9970 host->max_channel = IPR_MAX_SIS64_BUSES; in ipr_init_ioa_cfg()
9971 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; in ipr_init_ioa_cfg()
9972 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; in ipr_init_ioa_cfg()
9974 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; in ipr_init_ioa_cfg()
9975 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) in ipr_init_ioa_cfg()
9977 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9979 host->max_channel = IPR_VSET_BUS; in ipr_init_ioa_cfg()
9980 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; in ipr_init_ioa_cfg()
9981 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; in ipr_init_ioa_cfg()
9983 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; in ipr_init_ioa_cfg()
9984 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) in ipr_init_ioa_cfg()
9986 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9989 host->unique_id = host->host_no; in ipr_init_ioa_cfg()
9990 host->max_cmd_len = IPR_MAX_CDB_LEN; in ipr_init_ioa_cfg()
9991 host->can_queue = ioa_cfg->max_cmds; in ipr_init_ioa_cfg()
9994 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { in ipr_init_ioa_cfg()
9995 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); in ipr_init_ioa_cfg()
9996 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); in ipr_init_ioa_cfg()
9997 spin_lock_init(&ioa_cfg->hrrq[i]._lock); in ipr_init_ioa_cfg()
9999 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; in ipr_init_ioa_cfg()
10001 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; in ipr_init_ioa_cfg()
10006 * ipr_get_chip_info - Find adapter chip information
10018 if (ipr_chip[i].vendor == dev_id->vendor && in ipr_get_chip_info()
10019 ipr_chip[i].device == dev_id->device) in ipr_get_chip_info()
10025 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10034 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_wait_for_pci_err_recovery()
10037 wait_event_timeout(ioa_cfg->eeh_wait_q, in ipr_wait_for_pci_err_recovery()
10046 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; in name_msi_vectors()
10048 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { in name_msi_vectors()
10049 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, in name_msi_vectors()
10050 "host%d-%d", ioa_cfg->host->host_no, vec_idx); in name_msi_vectors()
10051 ioa_cfg->vectors_info[vec_idx]. in name_msi_vectors()
10052 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; in name_msi_vectors()
10061 for (i = 1; i < ioa_cfg->nvectors; i++) { in ipr_request_other_msi_irqs()
10065 ioa_cfg->vectors_info[i].desc, in ipr_request_other_msi_irqs()
10066 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
10068 while (--i >= 0) in ipr_request_other_msi_irqs()
10070 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
10078 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10086 * 0 on success / non-zero on failure
10094 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); in ipr_test_intr()
10095 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
10097 ioa_cfg->msi_received = 1; in ipr_test_intr()
10098 wake_up(&ioa_cfg->msi_wait_q); in ipr_test_intr()
10100 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
10105 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10114 * 0 on success / non-zero on failure
10124 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10125 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_test_msi()
10126 ioa_cfg->msi_received = 0; in ipr_test_msi()
10128 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_test_msi()
10129 readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_test_msi()
10130 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10134 dev_err(&pdev->dev, "Can not assign irq %d\n", irq); in ipr_test_msi()
10137 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq); in ipr_test_msi()
10139 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); in ipr_test_msi()
10140 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_test_msi()
10141 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); in ipr_test_msi()
10142 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10145 if (!ioa_cfg->msi_received) { in ipr_test_msi()
10147 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); in ipr_test_msi()
10148 rc = -EOPNOTSUPP; in ipr_test_msi()
10150 dev_info(&pdev->dev, "MSI test succeeded.\n"); in ipr_test_msi()
10152 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10161 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10166 * 0 on success / non-zero on failure
10182 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); in ipr_probe_ioa()
10186 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); in ipr_probe_ioa()
10187 rc = -ENOMEM; in ipr_probe_ioa()
10191 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; in ipr_probe_ioa()
10193 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops); in ipr_probe_ioa()
10195 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); in ipr_probe_ioa()
10197 if (!ioa_cfg->ipr_chip) { in ipr_probe_ioa()
10198 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", in ipr_probe_ioa()
10199 dev_id->vendor, dev_id->device); in ipr_probe_ioa()
10204 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; in ipr_probe_ioa()
10205 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; in ipr_probe_ioa()
10206 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; in ipr_probe_ioa()
10207 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; in ipr_probe_ioa()
10210 ioa_cfg->transop_timeout = ipr_transop_timeout; in ipr_probe_ioa()
10211 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) in ipr_probe_ioa()
10212 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
10214 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
10216 ioa_cfg->revid = pdev->revision; in ipr_probe_ioa()
10224 dev_err(&pdev->dev, in ipr_probe_ioa()
10238 dev_err(&pdev->dev, "Cannot enable adapter\n"); in ipr_probe_ioa()
10247 dev_err(&pdev->dev, in ipr_probe_ioa()
10249 rc = -ENOMEM; in ipr_probe_ioa()
10253 ioa_cfg->hdw_dma_regs = ipr_regs; in ipr_probe_ioa()
10254 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; in ipr_probe_ioa()
10255 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; in ipr_probe_ioa()
10259 if (ioa_cfg->sis64) { in ipr_probe_ioa()
10260 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in ipr_probe_ioa()
10262 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n"); in ipr_probe_ioa()
10263 rc = dma_set_mask_and_coherent(&pdev->dev, in ipr_probe_ioa()
10267 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in ipr_probe_ioa()
10270 dev_err(&pdev->dev, "Failed to set DMA mask\n"); in ipr_probe_ioa()
10275 ioa_cfg->chip_cfg->cache_line_size); in ipr_probe_ioa()
10278 dev_err(&pdev->dev, "Write of cache line size failed\n"); in ipr_probe_ioa()
10280 rc = -EIO; in ipr_probe_ioa()
10285 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_probe_ioa()
10289 dev_err(&pdev->dev, "The max number of MSIX is %d\n", in ipr_probe_ioa()
10295 if (ioa_cfg->ipr_chip->has_msi) in ipr_probe_ioa()
10302 ioa_cfg->nvectors = rc; in ipr_probe_ioa()
10304 if (!pdev->msi_enabled && !pdev->msix_enabled) in ipr_probe_ioa()
10305 ioa_cfg->clear_isr = 1; in ipr_probe_ioa()
10313 rc = -EIO; in ipr_probe_ioa()
10318 if (pdev->msi_enabled || pdev->msix_enabled) { in ipr_probe_ioa()
10322 dev_info(&pdev->dev, in ipr_probe_ioa()
10323 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors, in ipr_probe_ioa()
10324 pdev->msix_enabled ? "-X" : ""); in ipr_probe_ioa()
10326 case -EOPNOTSUPP: in ipr_probe_ioa()
10330 ioa_cfg->nvectors = 1; in ipr_probe_ioa()
10331 ioa_cfg->clear_isr = 1; in ipr_probe_ioa()
10338 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, in ipr_probe_ioa()
10350 dev_err(&pdev->dev, in ipr_probe_ioa()
10359 dev_err(&pdev->dev, "Failed to save PCI config space\n"); in ipr_probe_ioa()
10360 rc = -EIO; in ipr_probe_ioa()
10368 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_probe_ioa()
10369 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_probe_ioa()
10370 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_probe_ioa()
10372 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
10374 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
10376 ioa_cfg->ioa_unit_checked = 1; in ipr_probe_ioa()
10378 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
10380 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
10382 if (pdev->msi_enabled || pdev->msix_enabled) { in ipr_probe_ioa()
10385 ioa_cfg->vectors_info[0].desc, in ipr_probe_ioa()
10386 &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10390 rc = request_irq(pdev->irq, ipr_isr, in ipr_probe_ioa()
10392 IPR_NAME, &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10395 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", in ipr_probe_ioa()
10396 pdev->irq, rc); in ipr_probe_ioa()
10400 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || in ipr_probe_ioa()
10401 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { in ipr_probe_ioa()
10402 ioa_cfg->needs_warm_reset = 1; in ipr_probe_ioa()
10403 ioa_cfg->reset = ipr_reset_slot_reset; in ipr_probe_ioa()
10405 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", in ipr_probe_ioa()
10406 WQ_MEM_RECLAIM, host->host_no); in ipr_probe_ioa()
10408 if (!ioa_cfg->reset_work_q) { in ipr_probe_ioa()
10409 dev_err(&pdev->dev, "Couldn't register reset workqueue\n"); in ipr_probe_ioa()
10410 rc = -ENOMEM; in ipr_probe_ioa()
10414 ioa_cfg->reset = ipr_reset_start_bist; in ipr_probe_ioa()
10417 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); in ipr_probe_ioa()
10443 * ipr_initiate_ioa_bringdown - Bring down an adapter
10460 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_initiate_ioa_bringdown()
10461 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_bringdown()
10462 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_bringdown()
10463 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_bringdown()
10469 * __ipr_remove - Remove a single adapter
10485 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10486 while (ioa_cfg->in_reset_reload) { in __ipr_remove()
10487 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10488 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
10489 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10492 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in __ipr_remove()
10493 spin_lock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10494 ioa_cfg->hrrq[i].removing_ioa = 1; in __ipr_remove()
10495 spin_unlock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10500 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10501 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
10502 flush_work(&ioa_cfg->work_q); in __ipr_remove()
10503 if (ioa_cfg->reset_work_q) in __ipr_remove()
10504 flush_workqueue(ioa_cfg->reset_work_q); in __ipr_remove()
10505 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in __ipr_remove()
10506 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10509 list_del(&ioa_cfg->queue); in __ipr_remove()
10512 if (ioa_cfg->sdt_state == ABORT_DUMP) in __ipr_remove()
10513 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in __ipr_remove()
10514 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10522 * ipr_remove - IOA hot plug remove entry point
10536 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10538 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10540 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10542 scsi_remove_host(ioa_cfg->host); in ipr_remove()
10550 * ipr_probe - Adapter hot plug add entry point
10555 * 0 on success / non-zero on failure
10576 rc = scsi_add_host(ioa_cfg->host, &pdev->dev); in ipr_probe()
10583 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10587 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10592 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10596 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10598 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10600 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10605 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10609 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10611 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10613 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10617 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_probe()
10618 ioa_cfg->scan_enabled = 1; in ipr_probe()
10619 schedule_work(&ioa_cfg->work_q); in ipr_probe()
10620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_probe()
10622 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; in ipr_probe()
10624 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_probe()
10625 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_probe()
10626 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_probe()
10627 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_probe()
10631 scsi_scan_host(ioa_cfg->host); in ipr_probe()
10637 * ipr_shutdown - Shutdown handler.
10653 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10654 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_shutdown()
10655 ioa_cfg->iopoll_weight = 0; in ipr_shutdown()
10656 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_shutdown()
10657 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_shutdown()
10660 while (ioa_cfg->in_reset_reload) { in ipr_shutdown()
10661 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10662 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
10663 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10666 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) in ipr_shutdown()
10670 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10671 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
10672 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { in ipr_shutdown()
10674 pci_disable_device(ioa_cfg->pdev); in ipr_shutdown()
10808 * ipr_halt_done - Shutdown prepare completion
10816 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_halt_done()
10820 * ipr_halt - Issue shutdown prepare to all adapters
10840 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_halt()
10841 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || in ipr_halt()
10842 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { in ipr_halt()
10843 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()
10848 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_halt()
10849 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_halt()
10850 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; in ipr_halt()
10851 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; in ipr_halt()
10854 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()
10866 * ipr_init - Module entry point
10881 * ipr_exit - Module unload