Lines Matching refs:ha
81 struct qla_hw_data *ha = vha->hw; in qla2x00_get_async_timeout() local
84 tmo = ha->r_a_tov / 10 * 2; in qla2x00_get_async_timeout()
85 if (IS_QLAFX00(ha)) { in qla2x00_get_async_timeout()
87 } else if (!IS_FWI2_CAPABLE(ha)) { in qla2x00_get_async_timeout()
92 tmo = ha->login_timeout; in qla2x00_get_async_timeout()
614 struct qla_hw_data *ha = vha->hw; in qla2x00_is_reserved_id() local
616 if (IS_FWI2_CAPABLE(ha)) in qla2x00_is_reserved_id()
619 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || in qla2x00_is_reserved_id()
637 struct qla_hw_data *ha = vha->hw; in qla2x00_find_new_loop_id() local
642 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_find_new_loop_id()
644 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE); in qla2x00_find_new_loop_id()
650 set_bit(dev->loop_id, ha->loop_id_map); in qla2x00_find_new_loop_id()
652 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_find_new_loop_id()
668 struct qla_hw_data *ha = fcport->vha->hw; in qla2x00_clear_loop_id() local
674 clear_bit(fcport->loop_id, ha->loop_id_map); in qla2x00_clear_loop_id()
1162 struct qla_hw_data *ha = vha->hw; in qla24xx_async_gpdb_sp_done() local
1183 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, in qla24xx_async_gpdb_sp_done()
1312 struct qla_hw_data *ha = vha->hw; in qla24xx_async_gpdb() local
1338 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); in qla24xx_async_gpdb()
1371 dma_pool_free(ha->s_dma_pool, pd, pd_dma); in qla24xx_async_gpdb()
2109 struct qla_hw_data *ha = vha->hw; in qla83xx_nic_core_fw_load() local
2118 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; in qla83xx_nic_core_fw_load()
2119 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; in qla83xx_nic_core_fw_load()
2139 if (ha->flags.nic_core_reset_owner) { in qla83xx_nic_core_fw_load()
2160 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); in qla83xx_nic_core_fw_load()
2163 if (ha->flags.nic_core_reset_owner) { in qla83xx_nic_core_fw_load()
2192 struct qla_hw_data *ha = vha->hw; in qla2x00_initialize_adapter() local
2193 struct req_que *req = ha->req_q_map[0]; in qla2x00_initialize_adapter()
2194 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla2x00_initialize_adapter()
2201 ha->flags.chip_reset_done = 0; in qla2x00_initialize_adapter()
2203 ha->flags.pci_channel_io_perm_failure = 0; in qla2x00_initialize_adapter()
2204 ha->flags.eeh_busy = 0; in qla2x00_initialize_adapter()
2212 ha->isp_abort_cnt = 0; in qla2x00_initialize_adapter()
2213 ha->beacon_blink_led = 0; in qla2x00_initialize_adapter()
2215 set_bit(0, ha->req_qid_map); in qla2x00_initialize_adapter()
2216 set_bit(0, ha->rsp_qid_map); in qla2x00_initialize_adapter()
2220 rval = ha->isp_ops->pci_config(vha); in qla2x00_initialize_adapter()
2227 ha->isp_ops->reset_chip(vha); in qla2x00_initialize_adapter()
2230 if (IS_QLA28XX(ha)) { in qla2x00_initialize_adapter()
2232 ha->flags.secure_adapter = 1; in qla2x00_initialize_adapter()
2234 (ha->flags.secure_adapter) ? "Yes" : "No"); in qla2x00_initialize_adapter()
2245 if (IS_QLA8044(ha)) { in qla2x00_initialize_adapter()
2256 ha->isp_ops->get_flash_version(vha, req->ring); in qla2x00_initialize_adapter()
2261 ha->fc4_type_priority = FC4_PRIORITY_FCP; in qla2x00_initialize_adapter()
2263 ha->isp_ops->nvram_config(vha); in qla2x00_initialize_adapter()
2265 if (ha->fc4_type_priority != FC4_PRIORITY_FCP && in qla2x00_initialize_adapter()
2266 ha->fc4_type_priority != FC4_PRIORITY_NVME) in qla2x00_initialize_adapter()
2267 ha->fc4_type_priority = FC4_PRIORITY_FCP; in qla2x00_initialize_adapter()
2270 ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe"); in qla2x00_initialize_adapter()
2272 if (ha->flags.disable_serdes) { in qla2x00_initialize_adapter()
2289 rval = ha->isp_ops->chip_diag(vha); in qla2x00_initialize_adapter()
2297 if (IS_QLA84XX(ha)) { in qla2x00_initialize_adapter()
2298 ha->cs84xx = qla84xx_get_chip(vha); in qla2x00_initialize_adapter()
2299 if (!ha->cs84xx) { in qla2x00_initialize_adapter()
2313 ha->flags.chip_reset_done = 1; in qla2x00_initialize_adapter()
2315 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { in qla2x00_initialize_adapter()
2326 if (IS_QLA8031(ha)) { in qla2x00_initialize_adapter()
2333 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) in qla2x00_initialize_adapter()
2336 if (IS_P3P_TYPE(ha)) in qla2x00_initialize_adapter()
2355 struct qla_hw_data *ha = vha->hw; in qla2100_pci_config() local
2356 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2100_pci_config()
2358 pci_set_master(ha->pdev); in qla2100_pci_config()
2359 pci_try_set_mwi(ha->pdev); in qla2100_pci_config()
2361 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla2100_pci_config()
2363 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla2100_pci_config()
2365 pci_disable_rom(ha->pdev); in qla2100_pci_config()
2368 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2100_pci_config()
2369 ha->pci_attr = rd_reg_word(®->ctrl_status); in qla2100_pci_config()
2370 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2100_pci_config()
2387 struct qla_hw_data *ha = vha->hw; in qla2300_pci_config() local
2388 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2300_pci_config()
2390 pci_set_master(ha->pdev); in qla2300_pci_config()
2391 pci_try_set_mwi(ha->pdev); in qla2300_pci_config()
2393 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla2300_pci_config()
2396 if (IS_QLA2322(ha) || IS_QLA6322(ha)) in qla2300_pci_config()
2398 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla2300_pci_config()
2407 if (IS_QLA2300(ha)) { in qla2300_pci_config()
2408 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2300_pci_config()
2424 ha->fb_rev = RD_FB_CMD_REG(ha, reg); in qla2300_pci_config()
2426 if (ha->fb_rev == FPM_2300) in qla2300_pci_config()
2427 pci_clear_mwi(ha->pdev); in qla2300_pci_config()
2442 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2300_pci_config()
2445 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); in qla2300_pci_config()
2447 pci_disable_rom(ha->pdev); in qla2300_pci_config()
2450 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2300_pci_config()
2451 ha->pci_attr = rd_reg_word(®->ctrl_status); in qla2300_pci_config()
2452 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2300_pci_config()
2468 struct qla_hw_data *ha = vha->hw; in qla24xx_pci_config() local
2469 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla24xx_pci_config()
2471 pci_set_master(ha->pdev); in qla24xx_pci_config()
2472 pci_try_set_mwi(ha->pdev); in qla24xx_pci_config()
2474 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla24xx_pci_config()
2477 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla24xx_pci_config()
2479 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); in qla24xx_pci_config()
2482 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) in qla24xx_pci_config()
2483 pcix_set_mmrbc(ha->pdev, 2048); in qla24xx_pci_config()
2486 if (pci_is_pcie(ha->pdev)) in qla24xx_pci_config()
2487 pcie_set_readrq(ha->pdev, 4096); in qla24xx_pci_config()
2489 pci_disable_rom(ha->pdev); in qla24xx_pci_config()
2491 ha->chip_revision = ha->pdev->revision; in qla24xx_pci_config()
2494 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_pci_config()
2495 ha->pci_attr = rd_reg_dword(®->ctrl_status); in qla24xx_pci_config()
2496 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_pci_config()
2511 struct qla_hw_data *ha = vha->hw; in qla25xx_pci_config() local
2513 pci_set_master(ha->pdev); in qla25xx_pci_config()
2514 pci_try_set_mwi(ha->pdev); in qla25xx_pci_config()
2516 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla25xx_pci_config()
2519 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla25xx_pci_config()
2522 if (pci_is_pcie(ha->pdev)) in qla25xx_pci_config()
2523 pcie_set_readrq(ha->pdev, 4096); in qla25xx_pci_config()
2525 pci_disable_rom(ha->pdev); in qla25xx_pci_config()
2527 ha->chip_revision = ha->pdev->revision; in qla25xx_pci_config()
2544 struct qla_hw_data *ha = vha->hw; in qla2x00_isp_firmware() local
2549 if (ha->flags.disable_risc_code_load) { in qla2x00_isp_firmware()
2553 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); in qla2x00_isp_firmware()
2578 struct qla_hw_data *ha = vha->hw; in qla2x00_reset_chip() local
2579 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_reset_chip()
2584 if (unlikely(pci_channel_offline(ha->pdev))) in qla2x00_reset_chip()
2587 ha->isp_ops->disable_intrs(ha); in qla2x00_reset_chip()
2589 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_reset_chip()
2593 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); in qla2x00_reset_chip()
2595 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); in qla2x00_reset_chip()
2597 if (!IS_QLA2100(ha)) { in qla2x00_reset_chip()
2600 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_reset_chip()
2621 if (!IS_QLA2200(ha)) { in qla2x00_reset_chip()
2631 if (IS_QLA2200(ha)) { in qla2x00_reset_chip()
2632 WRT_FB_CMD_REG(ha, reg, 0xa000); in qla2x00_reset_chip()
2633 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ in qla2x00_reset_chip()
2635 WRT_FB_CMD_REG(ha, reg, 0x00fc); in qla2x00_reset_chip()
2639 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) in qla2x00_reset_chip()
2665 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_reset_chip()
2690 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_reset_chip()
2692 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) in qla2x00_reset_chip()
2702 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); in qla2x00_reset_chip()
2705 if (!IS_QLA2100(ha)) { in qla2x00_reset_chip()
2710 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_reset_chip()
2742 struct qla_hw_data *ha = vha->hw; in qla24xx_reset_risc() local
2743 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla24xx_reset_risc()
2749 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_reset_risc()
2761 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
2771 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); in qla24xx_reset_risc()
2787 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
2805 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
2848 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
2855 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_reset_risc()
2859 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); in qla24xx_reset_risc()
2861 if (IS_NOPOLLING_TYPE(ha)) in qla24xx_reset_risc()
2862 ha->isp_ops->enable_intrs(ha); in qla24xx_reset_risc()
2955 struct qla_hw_data *ha = vha->hw; in qla24xx_reset_chip() local
2958 if (pci_channel_offline(ha->pdev) && in qla24xx_reset_chip()
2959 ha->flags.pci_channel_io_perm_failure) { in qla24xx_reset_chip()
2963 ha->isp_ops->disable_intrs(ha); in qla24xx_reset_chip()
2983 struct qla_hw_data *ha = vha->hw; in qla2x00_chip_diag() local
2984 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_chip_diag()
2989 struct req_que *req = ha->req_q_map[0]; in qla2x00_chip_diag()
2997 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_chip_diag()
3025 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_chip_diag()
3026 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); in qla2x00_chip_diag()
3029 data = RD_MAILBOX_REG(ha, reg, 0); in qla2x00_chip_diag()
3041 mb[1] = RD_MAILBOX_REG(ha, reg, 1); in qla2x00_chip_diag()
3042 mb[2] = RD_MAILBOX_REG(ha, reg, 2); in qla2x00_chip_diag()
3043 mb[3] = RD_MAILBOX_REG(ha, reg, 3); in qla2x00_chip_diag()
3044 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); in qla2x00_chip_diag()
3053 ha->product_id[0] = mb[1]; in qla2x00_chip_diag()
3054 ha->product_id[1] = mb[2]; in qla2x00_chip_diag()
3055 ha->product_id[2] = mb[3]; in qla2x00_chip_diag()
3056 ha->product_id[3] = mb[4]; in qla2x00_chip_diag()
3060 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; in qla2x00_chip_diag()
3062 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * in qla2x00_chip_diag()
3065 if (IS_QLA2200(ha) && in qla2x00_chip_diag()
3066 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { in qla2x00_chip_diag()
3070 ha->device_type |= DT_ISP2200A; in qla2x00_chip_diag()
3071 ha->fw_transfer_size = 128; in qla2x00_chip_diag()
3075 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_chip_diag()
3085 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_chip_diag()
3092 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_chip_diag()
3107 struct qla_hw_data *ha = vha->hw; in qla24xx_chip_diag() local
3108 struct req_que *req = ha->req_q_map[0]; in qla24xx_chip_diag()
3110 if (IS_P3P_TYPE(ha)) in qla24xx_chip_diag()
3113 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; in qla24xx_chip_diag()
3133 struct qla_hw_data *ha = vha->hw; in qla2x00_init_fce_trace() local
3135 if (!IS_FWI2_CAPABLE(ha)) in qla2x00_init_fce_trace()
3138 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && in qla2x00_init_fce_trace()
3139 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) in qla2x00_init_fce_trace()
3142 if (ha->fce) { in qla2x00_init_fce_trace()
3150 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, in qla2x00_init_fce_trace()
3160 ha->fce_mb, &ha->fce_bufs); in qla2x00_init_fce_trace()
3164 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma); in qla2x00_init_fce_trace()
3171 ha->flags.fce_enabled = 1; in qla2x00_init_fce_trace()
3172 ha->fce_dma = tc_dma; in qla2x00_init_fce_trace()
3173 ha->fce = tc; in qla2x00_init_fce_trace()
3182 struct qla_hw_data *ha = vha->hw; in qla2x00_init_eft_trace() local
3184 if (!IS_FWI2_CAPABLE(ha)) in qla2x00_init_eft_trace()
3187 if (ha->eft) { in qla2x00_init_eft_trace()
3195 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, in qla2x00_init_eft_trace()
3208 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma); in qla2x00_init_eft_trace()
3215 ha->eft_dma = tc_dma; in qla2x00_init_eft_trace()
3216 ha->eft = tc; in qla2x00_init_eft_trace()
3231 struct qla_hw_data *ha = vha->hw; in qla2x00_alloc_fw_dump() local
3232 struct req_que *req = ha->req_q_map[0]; in qla2x00_alloc_fw_dump()
3233 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla2x00_alloc_fw_dump()
3239 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { in qla2x00_alloc_fw_dump()
3241 } else if (IS_QLA23XX(ha)) { in qla2x00_alloc_fw_dump()
3243 mem_size = (ha->fw_memory_size - 0x11000 + 1) * in qla2x00_alloc_fw_dump()
3245 } else if (IS_FWI2_CAPABLE(ha)) { in qla2x00_alloc_fw_dump()
3246 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) in qla2x00_alloc_fw_dump()
3248 else if (IS_QLA81XX(ha)) in qla2x00_alloc_fw_dump()
3250 else if (IS_QLA25XX(ha)) in qla2x00_alloc_fw_dump()
3255 mem_size = (ha->fw_memory_size - 0x100000 + 1) * in qla2x00_alloc_fw_dump()
3257 if (ha->mqenable) { in qla2x00_alloc_fw_dump()
3258 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && in qla2x00_alloc_fw_dump()
3259 !IS_QLA28XX(ha)) in qla2x00_alloc_fw_dump()
3265 mq_size += (ha->max_req_queues - 1) * in qla2x00_alloc_fw_dump()
3267 mq_size += (ha->max_rsp_queues - 1) * in qla2x00_alloc_fw_dump()
3270 if (ha->tgt.atio_ring) in qla2x00_alloc_fw_dump()
3271 mq_size += ha->tgt.atio_q_length * sizeof(request_t); in qla2x00_alloc_fw_dump()
3274 if (ha->fce) in qla2x00_alloc_fw_dump()
3277 if (ha->eft) in qla2x00_alloc_fw_dump()
3281 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { in qla2x00_alloc_fw_dump()
3282 struct fwdt *fwdt = ha->fwdt; in qla2x00_alloc_fw_dump()
3301 dump_size += ha->fwdt[1].dump_size; in qla2x00_alloc_fw_dump()
3308 ha->chain_offset = dump_size; in qla2x00_alloc_fw_dump()
3310 if (ha->exchoffld_buf) in qla2x00_alloc_fw_dump()
3312 ha->exchoffld_size; in qla2x00_alloc_fw_dump()
3313 if (ha->exlogin_buf) in qla2x00_alloc_fw_dump()
3315 ha->exlogin_size; in qla2x00_alloc_fw_dump()
3318 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) { in qla2x00_alloc_fw_dump()
3322 __func__, dump_size, ha->fw_dump_len, in qla2x00_alloc_fw_dump()
3323 ha->fw_dump_alloc_len); in qla2x00_alloc_fw_dump()
3331 mutex_lock(&ha->optrom_mutex); in qla2x00_alloc_fw_dump()
3332 if (ha->fw_dumped) { in qla2x00_alloc_fw_dump()
3333 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len); in qla2x00_alloc_fw_dump()
3334 vfree(ha->fw_dump); in qla2x00_alloc_fw_dump()
3335 ha->fw_dump = fw_dump; in qla2x00_alloc_fw_dump()
3336 ha->fw_dump_alloc_len = dump_size; in qla2x00_alloc_fw_dump()
3341 if (ha->fw_dump) in qla2x00_alloc_fw_dump()
3342 vfree(ha->fw_dump); in qla2x00_alloc_fw_dump()
3343 ha->fw_dump = fw_dump; in qla2x00_alloc_fw_dump()
3345 ha->fw_dump_len = ha->fw_dump_alloc_len = in qla2x00_alloc_fw_dump()
3351 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { in qla2x00_alloc_fw_dump()
3352 ha->mpi_fw_dump = (char *)fw_dump + in qla2x00_alloc_fw_dump()
3353 ha->fwdt[1].dump_size; in qla2x00_alloc_fw_dump()
3354 mutex_unlock(&ha->optrom_mutex); in qla2x00_alloc_fw_dump()
3358 ha->fw_dump->signature[0] = 'Q'; in qla2x00_alloc_fw_dump()
3359 ha->fw_dump->signature[1] = 'L'; in qla2x00_alloc_fw_dump()
3360 ha->fw_dump->signature[2] = 'G'; in qla2x00_alloc_fw_dump()
3361 ha->fw_dump->signature[3] = 'C'; in qla2x00_alloc_fw_dump()
3362 ha->fw_dump->version = htonl(1); in qla2x00_alloc_fw_dump()
3364 ha->fw_dump->fixed_size = htonl(fixed_size); in qla2x00_alloc_fw_dump()
3365 ha->fw_dump->mem_size = htonl(mem_size); in qla2x00_alloc_fw_dump()
3366 ha->fw_dump->req_q_size = htonl(req_q_size); in qla2x00_alloc_fw_dump()
3367 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); in qla2x00_alloc_fw_dump()
3369 ha->fw_dump->eft_size = htonl(eft_size); in qla2x00_alloc_fw_dump()
3370 ha->fw_dump->eft_addr_l = in qla2x00_alloc_fw_dump()
3371 htonl(LSD(ha->eft_dma)); in qla2x00_alloc_fw_dump()
3372 ha->fw_dump->eft_addr_h = in qla2x00_alloc_fw_dump()
3373 htonl(MSD(ha->eft_dma)); in qla2x00_alloc_fw_dump()
3375 ha->fw_dump->header_size = in qla2x00_alloc_fw_dump()
3379 mutex_unlock(&ha->optrom_mutex); in qla2x00_alloc_fw_dump()
3432 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) in qla2x00_alloc_outstanding_cmds() argument
3438 if (!IS_FWI2_CAPABLE(ha)) in qla2x00_alloc_outstanding_cmds()
3441 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) in qla2x00_alloc_outstanding_cmds()
3442 req->num_outstanding_cmds = ha->cur_fw_xcb_count; in qla2x00_alloc_outstanding_cmds()
3444 req->num_outstanding_cmds = ha->cur_fw_iocb_count; in qla2x00_alloc_outstanding_cmds()
3584 struct qla_hw_data *ha = vha->hw; in qla24xx_detect_sfp() local
3585 struct nvram_81xx *nv = ha->nvram; in qla24xx_detect_sfp()
3593 ha->flags.lr_detected = 0; in qla24xx_detect_sfp()
3594 if (IS_BPM_RANGE_CAPABLE(ha) && in qla24xx_detect_sfp()
3597 ha->flags.lr_detected = 1; in qla24xx_detect_sfp()
3598 ha->lr_distance = in qla24xx_detect_sfp()
3614 ha->flags.lr_detected = 0; in qla24xx_detect_sfp()
3618 ha->flags.lr_detected = 1; in qla24xx_detect_sfp()
3621 ha->lr_distance = LR_DISTANCE_10K; in qla24xx_detect_sfp()
3623 ha->lr_distance = LR_DISTANCE_5K; in qla24xx_detect_sfp()
3629 types[ha->flags.lr_detected], in qla24xx_detect_sfp()
3630 ha->flags.lr_detected ? lengths[ha->lr_distance] : in qla24xx_detect_sfp()
3632 used_nvram, ll, ha->flags.lr_detected, ha->lr_distance); in qla24xx_detect_sfp()
3633 return ha->flags.lr_detected; in qla24xx_detect_sfp()
3640 struct qla_hw_data *ha = vha->hw; in qla_init_iocb_limit() local
3642 num_qps = ha->num_qpairs + 1; in qla_init_iocb_limit()
3643 limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100; in qla_init_iocb_limit()
3645 ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count; in qla_init_iocb_limit()
3646 ha->base_qpair->fwres.iocbs_limit = limit; in qla_init_iocb_limit()
3647 ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps; in qla_init_iocb_limit()
3648 ha->base_qpair->fwres.iocbs_used = 0; in qla_init_iocb_limit()
3649 for (i = 0; i < ha->max_qpairs; i++) { in qla_init_iocb_limit()
3650 if (ha->queue_pair_map[i]) { in qla_init_iocb_limit()
3651 ha->queue_pair_map[i]->fwres.iocbs_total = in qla_init_iocb_limit()
3652 ha->orig_fw_iocb_count; in qla_init_iocb_limit()
3653 ha->queue_pair_map[i]->fwres.iocbs_limit = limit; in qla_init_iocb_limit()
3654 ha->queue_pair_map[i]->fwres.iocbs_qp_limit = in qla_init_iocb_limit()
3656 ha->queue_pair_map[i]->fwres.iocbs_used = 0; in qla_init_iocb_limit()
3672 struct qla_hw_data *ha = vha->hw; in qla2x00_setup_chip() local
3673 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_setup_chip()
3678 if (IS_P3P_TYPE(ha)) { in qla2x00_setup_chip()
3679 rval = ha->isp_ops->load_risc(vha, &srisc_address); in qla2x00_setup_chip()
3687 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { in qla2x00_setup_chip()
3689 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_setup_chip()
3692 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_setup_chip()
3699 rval = ha->isp_ops->load_risc(vha, &srisc_address); in qla2x00_setup_chip()
3711 ha->flags.exlogins_enabled = 1; in qla2x00_setup_chip()
3714 ha->flags.exchoffld_enabled = 1; in qla2x00_setup_chip()
3724 ha->isp_ops->reset_chip(vha); in qla2x00_setup_chip()
3725 ha->isp_ops->chip_diag(vha); in qla2x00_setup_chip()
3729 if (IS_ZIO_THRESHOLD_CAPABLE(ha)) in qla2x00_setup_chip()
3731 ha->last_zio_threshold); in qla2x00_setup_chip()
3742 fw_major_version = ha->fw_major_version; in qla2x00_setup_chip()
3743 if (IS_P3P_TYPE(ha)) in qla2x00_setup_chip()
3749 ha->flags.npiv_supported = 0; in qla2x00_setup_chip()
3750 if (IS_QLA2XXX_MIDTYPE(ha) && in qla2x00_setup_chip()
3751 (ha->fw_attributes & BIT_2)) { in qla2x00_setup_chip()
3752 ha->flags.npiv_supported = 1; in qla2x00_setup_chip()
3753 if ((!ha->max_npiv_vports) || in qla2x00_setup_chip()
3754 ((ha->max_npiv_vports + 1) % in qla2x00_setup_chip()
3756 ha->max_npiv_vports = in qla2x00_setup_chip()
3766 rval = qla2x00_alloc_outstanding_cmds(ha, in qla2x00_setup_chip()
3771 if (!fw_major_version && !(IS_P3P_TYPE(ha))) in qla2x00_setup_chip()
3774 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha))) in qla2x00_setup_chip()
3787 if (ql2xrdpenable || ha->flags.scm_supported_f) in qla2x00_setup_chip()
3792 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { in qla2x00_setup_chip()
3794 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_setup_chip()
3795 if (IS_QLA2300(ha)) in qla2x00_setup_chip()
3802 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_setup_chip()
3805 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) in qla2x00_setup_chip()
3806 ha->flags.fac_supported = 1; in qla2x00_setup_chip()
3807 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { in qla2x00_setup_chip()
3812 ha->flags.fac_supported = 1; in qla2x00_setup_chip()
3813 ha->fdt_block_size = size << 2; in qla2x00_setup_chip()
3817 ha->fw_major_version, ha->fw_minor_version, in qla2x00_setup_chip()
3818 ha->fw_subminor_version); in qla2x00_setup_chip()
3820 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || in qla2x00_setup_chip()
3821 IS_QLA28XX(ha)) { in qla2x00_setup_chip()
3822 ha->flags.fac_supported = 0; in qla2x00_setup_chip()
3871 struct qla_hw_data *ha = vha->hw; in qla2x00_update_fw_options() local
3873 memset(ha->fw_options, 0, sizeof(ha->fw_options)); in qla2x00_update_fw_options()
3874 qla2x00_get_fw_options(vha, ha->fw_options); in qla2x00_update_fw_options()
3876 if (IS_QLA2100(ha) || IS_QLA2200(ha)) in qla2x00_update_fw_options()
3883 ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options)); in qla2x00_update_fw_options()
3885 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; in qla2x00_update_fw_options()
3886 if (ha->fw_seriallink_options[3] & BIT_2) { in qla2x00_update_fw_options()
3887 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; in qla2x00_update_fw_options()
3890 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); in qla2x00_update_fw_options()
3891 emphasis = (ha->fw_seriallink_options[2] & in qla2x00_update_fw_options()
3893 tx_sens = ha->fw_seriallink_options[0] & in qla2x00_update_fw_options()
3895 rx_sens = (ha->fw_seriallink_options[0] & in qla2x00_update_fw_options()
3897 ha->fw_options[10] = (emphasis << 14) | (swing << 8); in qla2x00_update_fw_options()
3898 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { in qla2x00_update_fw_options()
3901 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; in qla2x00_update_fw_options()
3902 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) in qla2x00_update_fw_options()
3903 ha->fw_options[10] |= BIT_5 | in qla2x00_update_fw_options()
3908 swing = (ha->fw_seriallink_options[2] & in qla2x00_update_fw_options()
3910 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); in qla2x00_update_fw_options()
3911 tx_sens = ha->fw_seriallink_options[1] & in qla2x00_update_fw_options()
3913 rx_sens = (ha->fw_seriallink_options[1] & in qla2x00_update_fw_options()
3915 ha->fw_options[11] = (emphasis << 14) | (swing << 8); in qla2x00_update_fw_options()
3916 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { in qla2x00_update_fw_options()
3919 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; in qla2x00_update_fw_options()
3920 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) in qla2x00_update_fw_options()
3921 ha->fw_options[11] |= BIT_5 | in qla2x00_update_fw_options()
3928 ha->fw_options[3] |= BIT_13; in qla2x00_update_fw_options()
3931 if (ha->flags.enable_led_scheme) in qla2x00_update_fw_options()
3932 ha->fw_options[2] |= BIT_12; in qla2x00_update_fw_options()
3935 if (IS_QLA6312(ha)) in qla2x00_update_fw_options()
3936 ha->fw_options[2] |= BIT_13; in qla2x00_update_fw_options()
3939 if (ha->operating_mode == P2P) { in qla2x00_update_fw_options()
3940 ha->fw_options[2] |= BIT_3; in qla2x00_update_fw_options()
3943 __func__, ha->fw_options[2]); in qla2x00_update_fw_options()
3947 qla2x00_set_fw_options(vha, ha->fw_options); in qla2x00_update_fw_options()
3954 struct qla_hw_data *ha = vha->hw; in qla24xx_update_fw_options() local
3956 if (IS_P3P_TYPE(ha)) in qla24xx_update_fw_options()
3961 ha->fw_options[3] |= BIT_12; in qla24xx_update_fw_options()
3964 if (ha->operating_mode == P2P) { in qla24xx_update_fw_options()
3965 ha->fw_options[2] |= BIT_3; in qla24xx_update_fw_options()
3968 __func__, ha->fw_options[2]); in qla24xx_update_fw_options()
3973 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { in qla24xx_update_fw_options()
3976 ha->fw_options[2] |= BIT_11; in qla24xx_update_fw_options()
3978 ha->fw_options[2] &= ~BIT_11; in qla24xx_update_fw_options()
3981 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || in qla24xx_update_fw_options()
3982 IS_QLA28XX(ha)) { in qla24xx_update_fw_options()
3989 ha->fw_options[2] |= BIT_4; in qla24xx_update_fw_options()
3991 ha->fw_options[2] &= ~BIT_4; in qla24xx_update_fw_options()
3995 ha->fw_options[2] |= BIT_8; in qla24xx_update_fw_options()
3997 ha->fw_options[2] &= ~BIT_8; in qla24xx_update_fw_options()
4000 if (ql2xrdpenable || ha->flags.scm_supported_f) in qla24xx_update_fw_options()
4001 ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB; in qla24xx_update_fw_options()
4004 if (IS_BPM_RANGE_CAPABLE(ha)) in qla24xx_update_fw_options()
4005 ha->fw_options[3] |= BIT_10; in qla24xx_update_fw_options()
4009 __func__, ha->fw_options[1], ha->fw_options[2], in qla24xx_update_fw_options()
4010 ha->fw_options[3], vha->host->active_mode); in qla24xx_update_fw_options()
4012 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) in qla24xx_update_fw_options()
4013 qla2x00_set_fw_options(vha, ha->fw_options); in qla24xx_update_fw_options()
4016 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) in qla24xx_update_fw_options()
4020 le16_to_cpu(ha->fw_seriallink_options24[1]), in qla24xx_update_fw_options()
4021 le16_to_cpu(ha->fw_seriallink_options24[2]), in qla24xx_update_fw_options()
4022 le16_to_cpu(ha->fw_seriallink_options24[3])); in qla24xx_update_fw_options()
4032 struct qla_hw_data *ha = vha->hw; in qla2x00_config_rings() local
4033 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_config_rings()
4034 struct req_que *req = ha->req_q_map[0]; in qla2x00_config_rings()
4035 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla2x00_config_rings()
4038 ha->init_cb->request_q_outpointer = cpu_to_le16(0); in qla2x00_config_rings()
4039 ha->init_cb->response_q_inpointer = cpu_to_le16(0); in qla2x00_config_rings()
4040 ha->init_cb->request_q_length = cpu_to_le16(req->length); in qla2x00_config_rings()
4041 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); in qla2x00_config_rings()
4042 put_unaligned_le64(req->dma, &ha->init_cb->request_q_address); in qla2x00_config_rings()
4043 put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address); in qla2x00_config_rings()
4045 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0); in qla2x00_config_rings()
4046 wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0); in qla2x00_config_rings()
4047 wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0); in qla2x00_config_rings()
4048 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0); in qla2x00_config_rings()
4049 rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ in qla2x00_config_rings()
4055 struct qla_hw_data *ha = vha->hw; in qla24xx_config_rings() local
4056 device_reg_t *reg = ISP_QUE_REG(ha, 0); in qla24xx_config_rings()
4057 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; in qla24xx_config_rings()
4061 struct req_que *req = ha->req_q_map[0]; in qla24xx_config_rings()
4062 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla24xx_config_rings()
4065 icb = (struct init_cb_24xx *)ha->init_cb; in qla24xx_config_rings()
4075 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); in qla24xx_config_rings()
4076 put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address); in qla24xx_config_rings()
4078 if (IS_SHADOW_REG_CAPABLE(ha)) in qla24xx_config_rings()
4081 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || in qla24xx_config_rings()
4082 IS_QLA28XX(ha)) { in qla24xx_config_rings()
4085 if (ha->flags.msix_enabled) { in qla24xx_config_rings()
4086 msix = &ha->msix_entries[1]; in qla24xx_config_rings()
4100 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && in qla24xx_config_rings()
4101 (ha->flags.msix_enabled)) { in qla24xx_config_rings()
4103 ha->flags.disable_msix_handshake = 1; in qla24xx_config_rings()
4125 if (ha->set_data_rate) { in qla24xx_config_rings()
4128 qla2x00_get_link_speed_str(ha, ha->set_data_rate)); in qla24xx_config_rings()
4129 icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13); in qla24xx_config_rings()
4151 struct qla_hw_data *ha = vha->hw; in qla2x00_init_rings() local
4155 (struct mid_init_cb_24xx *) ha->init_cb; in qla2x00_init_rings()
4157 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_init_rings()
4160 for (que = 0; que < ha->max_req_queues; que++) { in qla2x00_init_rings()
4161 req = ha->req_q_map[que]; in qla2x00_init_rings()
4162 if (!req || !test_bit(que, ha->req_qid_map)) in qla2x00_init_rings()
4177 for (que = 0; que < ha->max_rsp_queues; que++) { in qla2x00_init_rings()
4178 rsp = ha->rsp_q_map[que]; in qla2x00_init_rings()
4179 if (!rsp || !test_bit(que, ha->rsp_qid_map)) in qla2x00_init_rings()
4184 if (IS_QLAFX00(ha)) in qla2x00_init_rings()
4190 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; in qla2x00_init_rings()
4191 ha->tgt.atio_ring_index = 0; in qla2x00_init_rings()
4195 ha->isp_ops->config_rings(vha); in qla2x00_init_rings()
4197 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_init_rings()
4201 if (IS_QLAFX00(ha)) { in qla2x00_init_rings()
4202 rval = qlafx00_init_firmware(vha, ha->init_cb_size); in qla2x00_init_rings()
4207 ha->isp_ops->update_fw_options(vha); in qla2x00_init_rings()
4209 if (ha->flags.npiv_supported) { in qla2x00_init_rings()
4210 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) in qla2x00_init_rings()
4211 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; in qla2x00_init_rings()
4212 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); in qla2x00_init_rings()
4215 if (IS_FWI2_CAPABLE(ha)) { in qla2x00_init_rings()
4218 cpu_to_le16(ha->cur_fw_xcb_count); in qla2x00_init_rings()
4219 ha->flags.dport_enabled = in qla2x00_init_rings()
4223 (ha->flags.dport_enabled) ? "enabled" : "disabled"); in qla2x00_init_rings()
4225 ha->flags.fawwpn_enabled = in qla2x00_init_rings()
4229 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); in qla2x00_init_rings()
4232 rval = qla2x00_init_firmware(vha, ha->init_cb_size); in qla2x00_init_rings()
4240 QLA_FW_STARTED(ha); in qla2x00_init_rings()
4261 struct qla_hw_data *ha = vha->hw; in qla2x00_fw_ready() local
4269 if (IS_P3P_TYPE(ha)) in qla2x00_fw_ready()
4278 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { in qla2x00_fw_ready()
4300 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { in qla2x00_fw_ready()
4331 qla2x00_get_retry_cnt(vha, &ha->retry_count, in qla2x00_fw_ready()
4332 &ha->login_timeout, &ha->r_a_tov); in qla2x00_fw_ready()
4356 ha->flags.isp82xx_fw_hung) in qla2x00_fw_ready()
4403 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_hba() local
4404 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); in qla2x00_configure_hba()
4412 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || in qla2x00_configure_hba()
4413 IS_CNA_CAPABLE(ha) || in qla2x00_configure_hba()
4420 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && in qla2x00_configure_hba()
4441 ha->min_external_loopid = SNS_FIRST_LOOP_ID; in qla2x00_configure_hba()
4442 ha->operating_mode = LOOP; in qla2x00_configure_hba()
4443 ha->switch_cap = 0; in qla2x00_configure_hba()
4448 ha->current_topology = ISP_CFG_NL; in qla2x00_configure_hba()
4454 ha->switch_cap = sw_cap; in qla2x00_configure_hba()
4455 ha->current_topology = ISP_CFG_FL; in qla2x00_configure_hba()
4461 ha->operating_mode = P2P; in qla2x00_configure_hba()
4462 ha->current_topology = ISP_CFG_N; in qla2x00_configure_hba()
4468 ha->switch_cap = sw_cap; in qla2x00_configure_hba()
4469 ha->operating_mode = P2P; in qla2x00_configure_hba()
4470 ha->current_topology = ISP_CFG_F; in qla2x00_configure_hba()
4477 ha->current_topology = ISP_CFG_NL; in qla2x00_configure_hba()
4488 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_configure_hba()
4489 if (!(topo == 2 && ha->flags.n2n_bigger)) in qla2x00_configure_hba()
4491 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_configure_hba()
4508 struct qla_hw_data *ha = vha->hw; in qla2x00_set_model_info() local
4509 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && in qla2x00_set_model_info()
4510 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); in qla2x00_set_model_info()
4515 memcpy(ha->model_number, model, len); in qla2x00_set_model_info()
4516 st = en = ha->model_number; in qla2x00_set_model_info()
4524 index = (ha->pdev->subsystem_device & 0xff); in qla2x00_set_model_info()
4526 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && in qla2x00_set_model_info()
4528 strlcpy(ha->model_desc, in qla2x00_set_model_info()
4530 sizeof(ha->model_desc)); in qla2x00_set_model_info()
4532 index = (ha->pdev->subsystem_device & 0xff); in qla2x00_set_model_info()
4534 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && in qla2x00_set_model_info()
4536 strlcpy(ha->model_number, in qla2x00_set_model_info()
4538 sizeof(ha->model_number)); in qla2x00_set_model_info()
4539 strlcpy(ha->model_desc, in qla2x00_set_model_info()
4541 sizeof(ha->model_desc)); in qla2x00_set_model_info()
4543 strlcpy(ha->model_number, def, in qla2x00_set_model_info()
4544 sizeof(ha->model_number)); in qla2x00_set_model_info()
4547 if (IS_FWI2_CAPABLE(ha)) in qla2x00_set_model_info()
4548 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, in qla2x00_set_model_info()
4549 sizeof(ha->model_desc)); in qla2x00_set_model_info()
4558 struct qla_hw_data *ha = vha->hw; in qla2xxx_nvram_wwn_from_ofw() local
4559 struct pci_dev *pdev = ha->pdev; in qla2xxx_nvram_wwn_from_ofw()
4594 struct qla_hw_data *ha = vha->hw; in qla2x00_nvram_config() local
4595 init_cb_t *icb = ha->init_cb; in qla2x00_nvram_config()
4596 nvram_t *nv = ha->nvram; in qla2x00_nvram_config()
4597 uint8_t *ptr = ha->nvram; in qla2x00_nvram_config()
4598 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_nvram_config()
4603 ha->nvram_size = sizeof(*nv); in qla2x00_nvram_config()
4604 ha->nvram_base = 0; in qla2x00_nvram_config()
4605 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) in qla2x00_nvram_config()
4607 ha->nvram_base = 0x80; in qla2x00_nvram_config()
4610 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); in qla2x00_nvram_config()
4611 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) in qla2x00_nvram_config()
4617 nv, ha->nvram_size); in qla2x00_nvram_config()
4633 memset(nv, 0, ha->nvram_size); in qla2x00_nvram_config()
4636 if (IS_QLA23XX(ha)) { in qla2x00_nvram_config()
4643 } else if (IS_QLA2200(ha)) { in qla2x00_nvram_config()
4649 } else if (IS_QLA2100(ha)) { in qla2x00_nvram_config()
4681 memset(icb, 0, ha->init_cb_size); in qla2x00_nvram_config()
4691 if (IS_QLA23XX(ha)) { in qla2x00_nvram_config()
4697 if (IS_QLA2300(ha)) { in qla2x00_nvram_config()
4698 if (ha->fb_rev == FPM_2310) { in qla2x00_nvram_config()
4699 strcpy(ha->model_number, "QLA2310"); in qla2x00_nvram_config()
4701 strcpy(ha->model_number, "QLA2300"); in qla2x00_nvram_config()
4707 } else if (IS_QLA2200(ha)) { in qla2x00_nvram_config()
4719 strcpy(ha->model_number, "QLA22xx"); in qla2x00_nvram_config()
4721 strcpy(ha->model_number, "QLA2100"); in qla2x00_nvram_config()
4738 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); in qla2x00_nvram_config()
4765 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); in qla2x00_nvram_config()
4767 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) in qla2x00_nvram_config()
4768 ha->flags.disable_risc_code_load = 0; in qla2x00_nvram_config()
4769 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); in qla2x00_nvram_config()
4770 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); in qla2x00_nvram_config()
4771 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); in qla2x00_nvram_config()
4772 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; in qla2x00_nvram_config()
4773 ha->flags.disable_serdes = 0; in qla2x00_nvram_config()
4775 ha->operating_mode = in qla2x00_nvram_config()
4778 memcpy(ha->fw_seriallink_options, nv->seriallink_options, in qla2x00_nvram_config()
4779 sizeof(ha->fw_seriallink_options)); in qla2x00_nvram_config()
4782 ha->serial0 = icb->port_name[5]; in qla2x00_nvram_config()
4783 ha->serial1 = icb->port_name[6]; in qla2x00_nvram_config()
4784 ha->serial2 = icb->port_name[7]; in qla2x00_nvram_config()
4790 ha->retry_count = nv->retry_count; in qla2x00_nvram_config()
4797 ha->login_timeout = nv->login_timeout; in qla2x00_nvram_config()
4800 ha->r_a_tov = 100; in qla2x00_nvram_config()
4802 ha->loop_reset_delay = nv->reset_delay; in qla2x00_nvram_config()
4815 ha->loop_down_abort_time = in qla2x00_nvram_config()
4818 ha->link_down_timeout = nv->link_down_timeout; in qla2x00_nvram_config()
4819 ha->loop_down_abort_time = in qla2x00_nvram_config()
4820 (LOOP_DOWN_TIME - ha->link_down_timeout); in qla2x00_nvram_config()
4826 ha->port_down_retry_count = nv->port_down_retry_count; in qla2x00_nvram_config()
4828 ha->port_down_retry_count = qlport_down_retry; in qla2x00_nvram_config()
4830 ha->login_retry_count = nv->retry_count; in qla2x00_nvram_config()
4831 if (ha->port_down_retry_count == nv->port_down_retry_count && in qla2x00_nvram_config()
4832 ha->port_down_retry_count > 3) in qla2x00_nvram_config()
4833 ha->login_retry_count = ha->port_down_retry_count; in qla2x00_nvram_config()
4834 else if (ha->port_down_retry_count > (int)ha->login_retry_count) in qla2x00_nvram_config()
4835 ha->login_retry_count = ha->port_down_retry_count; in qla2x00_nvram_config()
4837 ha->login_retry_count = ql2xloginretrycount; in qla2x00_nvram_config()
4844 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { in qla2x00_nvram_config()
4857 ha->zio_mode = icb->add_firmware_options[0] & in qla2x00_nvram_config()
4859 ha->zio_timer = icb->interrupt_delay_timer ? in qla2x00_nvram_config()
4865 if (ha->zio_mode != QLA_ZIO_DISABLED) { in qla2x00_nvram_config()
4866 ha->zio_mode = QLA_ZIO_MODE_6; in qla2x00_nvram_config()
4870 ha->zio_mode, ha->zio_timer * 100); in qla2x00_nvram_config()
4872 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; in qla2x00_nvram_config()
4873 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; in qla2x00_nvram_config()
4997 struct qla_hw_data *ha = vha->hw; in qla_get_login_template() local
5002 memset(ha->init_cb, 0, ha->init_cb_size); in qla_get_login_template()
5003 sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size); in qla_get_login_template()
5004 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, in qla_get_login_template()
5005 ha->init_cb, sz); in qla_get_login_template()
5011 q = (__be32 *)&ha->plogi_els_payld.fl_csp; in qla_get_login_template()
5013 bp = (uint32_t *)ha->init_cb; in qla_get_login_template()
5015 ha->flags.plogi_template_valid = 1; in qla_get_login_template()
5035 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_loop() local
5064 if ((ha->current_topology == ISP_CFG_FL || in qla2x00_configure_loop()
5065 ha->current_topology == ISP_CFG_F) && in qla2x00_configure_loop()
5071 } else if (ha->current_topology == ISP_CFG_NL || in qla2x00_configure_loop()
5072 ha->current_topology == ISP_CFG_N) { in qla2x00_configure_loop()
5108 ha->flags.fw_init_done = 1; in qla2x00_configure_loop()
5116 spin_lock_irqsave(&ha->tgt.atio_lock, flags); in qla2x00_configure_loop()
5118 spin_unlock_irqrestore(&ha->tgt.atio_lock, in qla2x00_configure_loop()
5192 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_local_loop() local
5196 if (N2N_TOPO(ha)) in qla2x00_configure_local_loop()
5204 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); in qla2x00_configure_local_loop()
5205 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, in qla2x00_configure_local_loop()
5213 ha->gid_list, entries * sizeof(*ha->gid_list)); in qla2x00_configure_local_loop()
5243 gid = ha->gid_list; in qla2x00_configure_local_loop()
5248 if (IS_QLA2100(ha) || IS_QLA2200(ha)) in qla2x00_configure_local_loop()
5252 gid = (void *)gid + ha->gid_list_info_size; in qla2x00_configure_local_loop()
5261 (ha->current_topology == ISP_CFG_NL)) in qla2x00_configure_local_loop()
5285 if (ha->current_topology != ISP_CFG_N) { in qla2x00_configure_local_loop()
5337 fcport->fp_speed = ha->link_data_rate; in qla2x00_configure_local_loop()
5386 struct qla_hw_data *ha = vha->hw; in qla2x00_iidma_fcport() local
5388 if (!IS_IIDMA_CAPABLE(ha)) in qla2x00_iidma_fcport()
5395 fcport->fp_speed > ha->link_data_rate || in qla2x00_iidma_fcport()
5396 !ha->flags.gpsc_supported) in qla2x00_iidma_fcport()
5408 qla2x00_get_link_speed_str(ha, fcport->fp_speed), in qla2x00_iidma_fcport()
5625 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_fabric() local
5629 if (IS_FWI2_CAPABLE(ha)) in qla2x00_configure_fabric()
5660 loop_id = NPH_SNS_LID(ha); in qla2x00_configure_fabric()
5661 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, in qla2x00_configure_fabric()
5724 if (USE_ASYNC_SCAN(ha)) { in qla2x00_configure_fabric()
5774 struct qla_hw_data *ha = vha->hw; in qla2x00_find_all_fabric_devs() local
5775 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla2x00_find_all_fabric_devs()
5781 if (!ha->swl) in qla2x00_find_all_fabric_devs()
5782 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), in qla2x00_find_all_fabric_devs()
5784 swl = ha->swl; in qla2x00_find_all_fabric_devs()
5790 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); in qla2x00_find_all_fabric_devs()
5831 loop_id = ha->min_external_loopid; in qla2x00_find_all_fabric_devs()
5832 for (; loop_id <= ha->max_loop_id; loop_id++) { in qla2x00_find_all_fabric_devs()
5836 if (ha->current_topology == ISP_CFG_FL && in qla2x00_find_all_fabric_devs()
5907 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == in qla2x00_find_all_fabric_devs()
6063 struct qla_hw_data *ha = vha->hw; in qla2x00_reserve_mgmt_server_loop_id() local
6066 set_bit(NPH_MGMT_SERVER, ha->loop_id_map); in qla2x00_reserve_mgmt_server_loop_id()
6071 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_reserve_mgmt_server_loop_id()
6079 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_reserve_mgmt_server_loop_id()
6106 struct qla_hw_data *ha = vha->hw; in qla2x00_fabric_login() local
6119 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, in qla2x00_fabric_login()
6172 if (IS_FWI2_CAPABLE(ha)) { in qla2x00_fabric_login()
6197 ha->isp_ops->fabric_logout(vha, fcport->loop_id, in qla2x00_fabric_login()
6215 ha->isp_ops->fabric_logout(vha, fcport->loop_id, in qla2x00_fabric_login()
6328 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) in qla2x00_perform_loop_resync() argument
6332 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { in qla2x00_perform_loop_resync()
6334 atomic_set(&ha->loop_down_timer, 0); in qla2x00_perform_loop_resync()
6335 if (!(ha->device_flags & DFLG_NO_CABLE)) { in qla2x00_perform_loop_resync()
6336 atomic_set(&ha->loop_state, LOOP_UP); in qla2x00_perform_loop_resync()
6337 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); in qla2x00_perform_loop_resync()
6338 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); in qla2x00_perform_loop_resync()
6339 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); in qla2x00_perform_loop_resync()
6341 rval = qla2x00_loop_resync(ha); in qla2x00_perform_loop_resync()
6343 atomic_set(&ha->loop_state, LOOP_DEAD); in qla2x00_perform_loop_resync()
6345 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); in qla2x00_perform_loop_resync()
6356 struct qla_hw_data *ha = base_vha->hw; in qla2x00_update_fcports() local
6359 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_update_fcports()
6366 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_update_fcports()
6369 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_update_fcports()
6375 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_update_fcports()
6382 struct qla_hw_data *ha = vha->hw; in qla83xx_reset_ownership() local
6388 if (IS_QLA8044(ha)) { in qla83xx_reset_ownership()
6403 (i != ha->portnum)) { in qla83xx_reset_ownership()
6413 ((i + 8) != ha->portnum)) { in qla83xx_reset_ownership()
6423 drv_presence_mask = ~((1 << (ha->portnum)) | in qla83xx_reset_ownership()
6431 (ha->portnum < fcoe_other_function)) { in qla83xx_reset_ownership()
6434 ha->flags.nic_core_reset_owner = 1; in qla83xx_reset_ownership()
6442 struct qla_hw_data *ha = vha->hw; in __qla83xx_set_drv_ack() local
6447 drv_ack |= (1 << ha->portnum); in __qla83xx_set_drv_ack()
6458 struct qla_hw_data *ha = vha->hw; in __qla83xx_clear_drv_ack() local
6463 drv_ack &= ~(1 << ha->portnum); in __qla83xx_clear_drv_ack()
6497 struct qla_hw_data *ha = vha->hw; in qla83xx_idc_audit() local
6502 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); in qla83xx_idc_audit()
6503 idc_audit_reg = (ha->portnum) | in qla83xx_idc_audit()
6504 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); in qla83xx_idc_audit()
6510 jiffies_to_msecs(ha->idc_audit_ts)) / 1000); in qla83xx_idc_audit()
6511 idc_audit_reg = (ha->portnum) | in qla83xx_idc_audit()
6527 struct qla_hw_data *ha = vha->hw; in qla83xx_initiating_reset() local
6540 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { in qla83xx_initiating_reset()
6582 struct qla_hw_data *ha = vha->hw; in qla83xx_check_driver_presence() local
6585 if (drv_presence & (1 << ha->portnum)) in qla83xx_check_driver_presence()
6595 struct qla_hw_data *ha = vha->hw; in qla83xx_nic_core_reset() local
6611 ha->portnum); in qla83xx_nic_core_reset()
6628 ha->flags.nic_core_hung = 0; in qla83xx_nic_core_reset()
6643 struct qla_hw_data *ha = vha->hw; in qla2xxx_mctp_dump() local
6646 if (!IS_MCTP_CAPABLE(ha)) { in qla2xxx_mctp_dump()
6653 if (!ha->mctp_dump) { in qla2xxx_mctp_dump()
6654 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, in qla2xxx_mctp_dump()
6655 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); in qla2xxx_mctp_dump()
6657 if (!ha->mctp_dump) { in qla2xxx_mctp_dump()
6665 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, in qla2xxx_mctp_dump()
6673 vha->host_no, ha->mctp_dump); in qla2xxx_mctp_dump()
6674 ha->mctp_dumped = 1; in qla2xxx_mctp_dump()
6677 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { in qla2xxx_mctp_dump()
6678 ha->flags.nic_core_reset_hdlr_active = 1; in qla2xxx_mctp_dump()
6687 ha->flags.nic_core_reset_hdlr_active = 0; in qla2xxx_mctp_dump()
6705 struct qla_hw_data *ha = vha->hw; in qla2x00_quiesce_io() local
6709 "Quiescing I/O - ha=%p.\n", ha); in qla2x00_quiesce_io()
6711 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); in qla2x00_quiesce_io()
6715 list_for_each_entry(vp, &ha->vp_list, list) in qla2x00_quiesce_io()
6730 struct qla_hw_data *ha = vha->hw; in qla2x00_abort_isp_cleanup() local
6739 if (!(IS_P3P_TYPE(ha))) in qla2x00_abort_isp_cleanup()
6741 ha->flags.chip_reset_done = 0; in qla2x00_abort_isp_cleanup()
6746 "Performing ISP error recovery - ha=%p.\n", ha); in qla2x00_abort_isp_cleanup()
6748 ha->flags.purge_mbox = 1; in qla2x00_abort_isp_cleanup()
6753 if (!(IS_P3P_TYPE(ha))) in qla2x00_abort_isp_cleanup()
6754 ha->isp_ops->reset_chip(vha); in qla2x00_abort_isp_cleanup()
6756 ha->link_data_rate = PORT_SPEED_UNKNOWN; in qla2x00_abort_isp_cleanup()
6757 SAVE_TOPO(ha); in qla2x00_abort_isp_cleanup()
6758 ha->flags.rida_fmt2 = 0; in qla2x00_abort_isp_cleanup()
6759 ha->flags.n2n_ae = 0; in qla2x00_abort_isp_cleanup()
6760 ha->flags.lip_ae = 0; in qla2x00_abort_isp_cleanup()
6761 ha->current_topology = 0; in qla2x00_abort_isp_cleanup()
6762 QLA_FW_STOPPED(ha); in qla2x00_abort_isp_cleanup()
6763 ha->flags.fw_init_done = 0; in qla2x00_abort_isp_cleanup()
6764 ha->chip_reset++; in qla2x00_abort_isp_cleanup()
6765 ha->base_qpair->chip_reset = ha->chip_reset; in qla2x00_abort_isp_cleanup()
6766 for (i = 0; i < ha->max_qpairs; i++) { in qla2x00_abort_isp_cleanup()
6767 if (ha->queue_pair_map[i]) in qla2x00_abort_isp_cleanup()
6768 ha->queue_pair_map[i]->chip_reset = in qla2x00_abort_isp_cleanup()
6769 ha->base_qpair->chip_reset; in qla2x00_abort_isp_cleanup()
6773 if (atomic_read(&ha->num_pend_mbx_stage3)) { in qla2x00_abort_isp_cleanup()
6774 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); in qla2x00_abort_isp_cleanup()
6775 complete(&ha->mbx_intr_comp); in qla2x00_abort_isp_cleanup()
6779 while (atomic_read(&ha->num_pend_mbx_stage3) || in qla2x00_abort_isp_cleanup()
6780 atomic_read(&ha->num_pend_mbx_stage2) || in qla2x00_abort_isp_cleanup()
6781 atomic_read(&ha->num_pend_mbx_stage1)) { in qla2x00_abort_isp_cleanup()
6787 ha->flags.purge_mbox = 0; in qla2x00_abort_isp_cleanup()
6794 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
6795 list_for_each_entry(vp, &ha->vp_list, list) { in qla2x00_abort_isp_cleanup()
6797 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
6801 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
6804 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
6816 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
6817 list_for_each_entry(vp, &ha->vp_list, list) { in qla2x00_abort_isp_cleanup()
6819 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
6824 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
6827 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
6829 if (!ha->flags.eeh_busy) { in qla2x00_abort_isp_cleanup()
6831 if (IS_P3P_TYPE(ha)) { in qla2x00_abort_isp_cleanup()
6864 struct qla_hw_data *ha = vha->hw; in qla2x00_abort_isp() local
6866 struct req_que *req = ha->req_q_map[0]; in qla2x00_abort_isp()
6873 ha->flags.chip_reset_done = 1; in qla2x00_abort_isp()
6880 if (IS_QLA8031(ha)) { in qla2x00_abort_isp()
6888 if (unlikely(pci_channel_offline(ha->pdev) && in qla2x00_abort_isp()
6889 ha->flags.pci_channel_io_perm_failure)) { in qla2x00_abort_isp()
6909 ha->isp_ops->get_flash_version(vha, req->ring); in qla2x00_abort_isp()
6911 ha->isp_ops->nvram_config(vha); in qla2x00_abort_isp()
6926 ha->isp_ops->enable_intrs(ha); in qla2x00_abort_isp()
6928 ha->isp_abort_cnt = 0; in qla2x00_abort_isp()
6931 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) in qla2x00_abort_isp()
6933 if (ha->fce) { in qla2x00_abort_isp()
6934 ha->flags.fce_enabled = 1; in qla2x00_abort_isp()
6935 memset(ha->fce, 0, in qla2x00_abort_isp()
6936 fce_calc_size(ha->fce_bufs)); in qla2x00_abort_isp()
6938 ha->fce_dma, ha->fce_bufs, ha->fce_mb, in qla2x00_abort_isp()
6939 &ha->fce_bufs); in qla2x00_abort_isp()
6944 ha->flags.fce_enabled = 0; in qla2x00_abort_isp()
6948 if (ha->eft) { in qla2x00_abort_isp()
6949 memset(ha->eft, 0, EFT_SIZE); in qla2x00_abort_isp()
6951 ha->eft_dma, EFT_NUM_BUFFERS); in qla2x00_abort_isp()
6961 if (ha->isp_abort_cnt == 0) { in qla2x00_abort_isp()
6975 ha->isp_abort_cnt--; in qla2x00_abort_isp()
6978 ha->isp_abort_cnt); in qla2x00_abort_isp()
6982 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; in qla2x00_abort_isp()
6985 "more times.\n", ha->isp_abort_cnt); in qla2x00_abort_isp()
6996 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp()
6997 list_for_each_entry(vp, &ha->vp_list, list) { in qla2x00_abort_isp()
7000 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp()
7004 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp()
7008 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp()
7010 if (IS_QLA8031(ha)) { in qla2x00_abort_isp()
7039 struct qla_hw_data *ha = vha->hw; in qla2x00_restart_isp() local
7044 status = ha->isp_ops->chip_diag(vha); in qla2x00_restart_isp()
7057 ha->flags.chip_reset_done = 1; in qla2x00_restart_isp()
7060 qla25xx_init_queues(ha); in qla2x00_restart_isp()
7069 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); in qla2x00_restart_isp()
7076 qla25xx_init_queues(struct qla_hw_data *ha) in qla25xx_init_queues() argument
7080 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla25xx_init_queues()
7084 for (i = 1; i < ha->max_rsp_queues; i++) { in qla25xx_init_queues()
7085 rsp = ha->rsp_q_map[i]; in qla25xx_init_queues()
7086 if (rsp && test_bit(i, ha->rsp_qid_map)) { in qla25xx_init_queues()
7099 for (i = 1; i < ha->max_req_queues; i++) { in qla25xx_init_queues()
7100 req = ha->req_q_map[i]; in qla25xx_init_queues()
7101 if (req && test_bit(i, ha->req_qid_map)) { in qla25xx_init_queues()
7129 struct qla_hw_data *ha = vha->hw; in qla2x00_reset_adapter() local
7130 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_reset_adapter()
7133 ha->isp_ops->disable_intrs(ha); in qla2x00_reset_adapter()
7135 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_reset_adapter()
7140 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_reset_adapter()
7149 struct qla_hw_data *ha = vha->hw; in qla24xx_reset_adapter() local
7150 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla24xx_reset_adapter()
7152 if (IS_P3P_TYPE(ha)) in qla24xx_reset_adapter()
7156 ha->isp_ops->disable_intrs(ha); in qla24xx_reset_adapter()
7158 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_reset_adapter()
7163 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_reset_adapter()
7165 if (IS_NOPOLLING_TYPE(ha)) in qla24xx_reset_adapter()
7166 ha->isp_ops->enable_intrs(ha); in qla24xx_reset_adapter()
7178 struct qla_hw_data *ha = vha->hw; in qla24xx_nvram_wwn_from_ofw() local
7179 struct pci_dev *pdev = ha->pdev; in qla24xx_nvram_wwn_from_ofw()
7204 struct qla_hw_data *ha = vha->hw; in qla24xx_nvram_config() local
7207 icb = (struct init_cb_24xx *)ha->init_cb; in qla24xx_nvram_config()
7208 nv = ha->nvram; in qla24xx_nvram_config()
7211 if (ha->port_no == 0) { in qla24xx_nvram_config()
7212 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; in qla24xx_nvram_config()
7213 ha->vpd_base = FA_NVRAM_VPD0_ADDR; in qla24xx_nvram_config()
7215 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; in qla24xx_nvram_config()
7216 ha->vpd_base = FA_NVRAM_VPD1_ADDR; in qla24xx_nvram_config()
7219 ha->nvram_size = sizeof(*nv); in qla24xx_nvram_config()
7220 ha->vpd_size = FA_NVRAM_VPD_SIZE; in qla24xx_nvram_config()
7223 ha->vpd = ha->nvram + VPD_OFFSET; in qla24xx_nvram_config()
7224 ha->isp_ops->read_nvram(vha, ha->vpd, in qla24xx_nvram_config()
7225 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); in qla24xx_nvram_config()
7229 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size); in qla24xx_nvram_config()
7230 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) in qla24xx_nvram_config()
7236 nv, ha->nvram_size); in qla24xx_nvram_config()
7253 memset(nv, 0, ha->nvram_size); in qla24xx_nvram_config()
7261 nv->port_name[1] = 0x00 + ha->port_no + 1; in qla24xx_nvram_config()
7305 memset(icb, 0, ha->init_cb_size); in qla24xx_nvram_config()
7324 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); in qla24xx_nvram_config()
7350 ha->flags.disable_risc_code_load = 0; in qla24xx_nvram_config()
7351 ha->flags.enable_lip_reset = 0; in qla24xx_nvram_config()
7352 ha->flags.enable_lip_full_login = in qla24xx_nvram_config()
7354 ha->flags.enable_target_reset = in qla24xx_nvram_config()
7356 ha->flags.enable_led_scheme = 0; in qla24xx_nvram_config()
7357 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; in qla24xx_nvram_config()
7359 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & in qla24xx_nvram_config()
7362 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, in qla24xx_nvram_config()
7363 sizeof(ha->fw_seriallink_options24)); in qla24xx_nvram_config()
7366 ha->serial0 = icb->port_name[5]; in qla24xx_nvram_config()
7367 ha->serial1 = icb->port_name[6]; in qla24xx_nvram_config()
7368 ha->serial2 = icb->port_name[7]; in qla24xx_nvram_config()
7374 ha->retry_count = le16_to_cpu(nv->login_retry_count); in qla24xx_nvram_config()
7381 ha->login_timeout = le16_to_cpu(nv->login_timeout); in qla24xx_nvram_config()
7384 ha->r_a_tov = 100; in qla24xx_nvram_config()
7386 ha->loop_reset_delay = nv->reset_delay; in qla24xx_nvram_config()
7399 ha->loop_down_abort_time = in qla24xx_nvram_config()
7402 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); in qla24xx_nvram_config()
7403 ha->loop_down_abort_time = in qla24xx_nvram_config()
7404 (LOOP_DOWN_TIME - ha->link_down_timeout); in qla24xx_nvram_config()
7408 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); in qla24xx_nvram_config()
7410 ha->port_down_retry_count = qlport_down_retry; in qla24xx_nvram_config()
7413 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); in qla24xx_nvram_config()
7414 if (ha->port_down_retry_count == in qla24xx_nvram_config()
7416 ha->port_down_retry_count > 3) in qla24xx_nvram_config()
7417 ha->login_retry_count = ha->port_down_retry_count; in qla24xx_nvram_config()
7418 else if (ha->port_down_retry_count > (int)ha->login_retry_count) in qla24xx_nvram_config()
7419 ha->login_retry_count = ha->port_down_retry_count; in qla24xx_nvram_config()
7421 ha->login_retry_count = ql2xloginretrycount; in qla24xx_nvram_config()
7428 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & in qla24xx_nvram_config()
7430 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? in qla24xx_nvram_config()
7435 if (ha->zio_mode != QLA_ZIO_DISABLED) { in qla24xx_nvram_config()
7436 ha->zio_mode = QLA_ZIO_MODE_6; in qla24xx_nvram_config()
7440 ha->zio_mode, ha->zio_timer * 100); in qla24xx_nvram_config()
7443 (uint32_t)ha->zio_mode); in qla24xx_nvram_config()
7444 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); in qla24xx_nvram_config()
7545 struct qla_hw_data *ha = vha->hw; in qla28xx_get_aux_images() local
7550 if (!ha->flt_region_aux_img_status_pri) { in qla28xx_get_aux_images()
7556 ha->flt_region_aux_img_status_pri, in qla28xx_get_aux_images()
7582 if (!ha->flt_region_aux_img_status_sec) { in qla28xx_get_aux_images()
7589 ha->flt_region_aux_img_status_sec, in qla28xx_get_aux_images()
7643 struct qla_hw_data *ha = vha->hw; in qla27xx_get_active_image() local
7648 if (!ha->flt_region_img_status_pri) { in qla27xx_get_active_image()
7654 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) != in qla27xx_get_active_image()
7683 if (!ha->flt_region_img_status_sec) { in qla27xx_get_active_image()
7689 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2); in qla27xx_get_active_image()
7753 struct qla_hw_data *ha = vha->hw; in qla24xx_load_risc_flash() local
7754 struct req_que *req = ha->req_q_map[0]; in qla24xx_load_risc_flash()
7755 struct fwdt *fwdt = ha->fwdt; in qla24xx_load_risc_flash()
7787 dlen = ha->fw_transfer_size >> 2; in qla24xx_load_risc_flash()
7813 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) in qla24xx_load_risc_flash()
7899 struct qla_hw_data *ha = vha->hw; in qla2x00_load_risc() local
7900 struct req_que *req = ha->req_q_map[0]; in qla2x00_load_risc()
7956 wlen = (uint16_t)(ha->fw_transfer_size >> 1); in qla2x00_load_risc()
8002 struct qla_hw_data *ha = vha->hw; in qla24xx_load_risc_blob() local
8003 struct req_que *req = ha->req_q_map[0]; in qla24xx_load_risc_blob()
8004 struct fwdt *fwdt = ha->fwdt; in qla24xx_load_risc_blob()
8043 dlen = ha->fw_transfer_size >> 2; in qla24xx_load_risc_blob()
8071 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) in qla24xx_load_risc_blob()
8171 struct qla_hw_data *ha = vha->hw; in qla81xx_load_risc() local
8183 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) in qla81xx_load_risc()
8193 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec); in qla81xx_load_risc()
8200 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); in qla81xx_load_risc()
8206 if (!rval || !ha->flt_region_gold_fw) in qla81xx_load_risc()
8211 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); in qla81xx_load_risc()
8216 ha->flags.running_gold_fw = 1; in qla81xx_load_risc()
8224 struct qla_hw_data *ha = vha->hw; in qla2x00_try_to_stop_firmware() local
8226 if (ha->flags.pci_channel_io_perm_failure) in qla2x00_try_to_stop_firmware()
8228 if (!IS_FWI2_CAPABLE(ha)) in qla2x00_try_to_stop_firmware()
8230 if (!ha->fw_major_version) in qla2x00_try_to_stop_firmware()
8232 if (!ha->flags.fw_started) in qla2x00_try_to_stop_firmware()
8238 ha->isp_ops->reset_chip(vha); in qla2x00_try_to_stop_firmware()
8239 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) in qla2x00_try_to_stop_firmware()
8248 QLA_FW_STOPPED(ha); in qla2x00_try_to_stop_firmware()
8249 ha->flags.fw_init_done = 0; in qla2x00_try_to_stop_firmware()
8258 struct qla_hw_data *ha = vha->hw; in qla24xx_configure_vhba() local
8259 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla24xx_configure_vhba()
8268 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); in qla24xx_configure_vhba()
8274 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, in qla24xx_configure_vhba()
8307 struct qla_hw_data *ha = vha->hw; in qla84xx_get_chip() local
8313 if (cs84xx->bus == ha->pdev->bus) { in qla84xx_get_chip()
8326 cs84xx->bus = ha->pdev->bus; in qla84xx_get_chip()
8349 struct qla_hw_data *ha = vha->hw; in qla84xx_put_chip() local
8351 if (ha->cs84xx) in qla84xx_put_chip()
8352 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); in qla84xx_put_chip()
8360 struct qla_hw_data *ha = vha->hw; in qla84xx_init_chip() local
8362 mutex_lock(&ha->cs84xx->fw_update_mutex); in qla84xx_init_chip()
8366 mutex_unlock(&ha->cs84xx->fw_update_mutex); in qla84xx_init_chip()
8384 struct qla_hw_data *ha = vha->hw; in qla81xx_nvram_config() local
8389 icb = (struct init_cb_81xx *)ha->init_cb; in qla81xx_nvram_config()
8390 nv = ha->nvram; in qla81xx_nvram_config()
8393 ha->nvram_size = sizeof(*nv); in qla81xx_nvram_config()
8394 ha->vpd_size = FA_NVRAM_VPD_SIZE; in qla81xx_nvram_config()
8395 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) in qla81xx_nvram_config()
8396 ha->vpd_size = FA_VPD_SIZE_82XX; in qla81xx_nvram_config()
8398 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) in qla81xx_nvram_config()
8402 ha->vpd = ha->nvram + VPD_OFFSET; in qla81xx_nvram_config()
8404 faddr = ha->flt_region_vpd; in qla81xx_nvram_config()
8405 if (IS_QLA28XX(ha)) { in qla81xx_nvram_config()
8407 faddr = ha->flt_region_vpd_sec; in qla81xx_nvram_config()
8413 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size); in qla81xx_nvram_config()
8416 faddr = ha->flt_region_nvram; in qla81xx_nvram_config()
8417 if (IS_QLA28XX(ha)) { in qla81xx_nvram_config()
8419 faddr = ha->flt_region_nvram_sec; in qla81xx_nvram_config()
8425 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); in qla81xx_nvram_config()
8428 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) in qla81xx_nvram_config()
8434 nv, ha->nvram_size); in qla81xx_nvram_config()
8451 memset(nv, 0, ha->nvram_size); in qla81xx_nvram_config()
8458 nv->port_name[1] = 0x00 + ha->port_no + 1; in qla81xx_nvram_config()
8492 nv->enode_mac[5] = 0x06 + ha->port_no + 1; in qla81xx_nvram_config()
8497 if (IS_T10_PI_CAPABLE(ha)) in qla81xx_nvram_config()
8503 memset(icb, 0, ha->init_cb_size); in qla81xx_nvram_config()
8530 icb->enode_mac[5] = 0x06 + ha->port_no + 1; in qla81xx_nvram_config()
8534 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); in qla81xx_nvram_config()
8535 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); in qla81xx_nvram_config()
8560 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { in qla81xx_nvram_config()
8562 ha->flags.scm_supported_a = 1; in qla81xx_nvram_config()
8566 ha->flags.disable_risc_code_load = 0; in qla81xx_nvram_config()
8567 ha->flags.enable_lip_reset = 0; in qla81xx_nvram_config()
8568 ha->flags.enable_lip_full_login = in qla81xx_nvram_config()
8570 ha->flags.enable_target_reset = in qla81xx_nvram_config()
8572 ha->flags.enable_led_scheme = 0; in qla81xx_nvram_config()
8573 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; in qla81xx_nvram_config()
8575 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & in qla81xx_nvram_config()
8579 ha->serial0 = icb->port_name[5]; in qla81xx_nvram_config()
8580 ha->serial1 = icb->port_name[6]; in qla81xx_nvram_config()
8581 ha->serial2 = icb->port_name[7]; in qla81xx_nvram_config()
8587 ha->retry_count = le16_to_cpu(nv->login_retry_count); in qla81xx_nvram_config()
8594 ha->login_timeout = le16_to_cpu(nv->login_timeout); in qla81xx_nvram_config()
8597 ha->r_a_tov = 100; in qla81xx_nvram_config()
8599 ha->loop_reset_delay = nv->reset_delay; in qla81xx_nvram_config()
8612 ha->loop_down_abort_time = in qla81xx_nvram_config()
8615 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); in qla81xx_nvram_config()
8616 ha->loop_down_abort_time = in qla81xx_nvram_config()
8617 (LOOP_DOWN_TIME - ha->link_down_timeout); in qla81xx_nvram_config()
8621 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); in qla81xx_nvram_config()
8623 ha->port_down_retry_count = qlport_down_retry; in qla81xx_nvram_config()
8626 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); in qla81xx_nvram_config()
8627 if (ha->port_down_retry_count == in qla81xx_nvram_config()
8629 ha->port_down_retry_count > 3) in qla81xx_nvram_config()
8630 ha->login_retry_count = ha->port_down_retry_count; in qla81xx_nvram_config()
8631 else if (ha->port_down_retry_count > (int)ha->login_retry_count) in qla81xx_nvram_config()
8632 ha->login_retry_count = ha->port_down_retry_count; in qla81xx_nvram_config()
8634 ha->login_retry_count = ql2xloginretrycount; in qla81xx_nvram_config()
8638 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) in qla81xx_nvram_config()
8643 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & in qla81xx_nvram_config()
8645 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? in qla81xx_nvram_config()
8651 if (ha->zio_mode != QLA_ZIO_DISABLED) { in qla81xx_nvram_config()
8652 ha->zio_mode = QLA_ZIO_MODE_6; in qla81xx_nvram_config()
8656 ha->zio_mode, in qla81xx_nvram_config()
8657 ha->zio_timer * 100); in qla81xx_nvram_config()
8660 (uint32_t)ha->zio_mode); in qla81xx_nvram_config()
8661 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); in qla81xx_nvram_config()
8672 ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha); in qla81xx_nvram_config()
8685 struct qla_hw_data *ha = vha->hw; in qla82xx_restart_isp() local
8692 ha->flags.chip_reset_done = 1; in qla82xx_restart_isp()
8697 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); in qla82xx_restart_isp()
8718 ha->isp_ops->enable_intrs(ha); in qla82xx_restart_isp()
8720 ha->isp_abort_cnt = 0; in qla82xx_restart_isp()
8726 if (ha->fce) { in qla82xx_restart_isp()
8727 ha->flags.fce_enabled = 1; in qla82xx_restart_isp()
8728 memset(ha->fce, 0, in qla82xx_restart_isp()
8729 fce_calc_size(ha->fce_bufs)); in qla82xx_restart_isp()
8731 ha->fce_dma, ha->fce_bufs, ha->fce_mb, in qla82xx_restart_isp()
8732 &ha->fce_bufs); in qla82xx_restart_isp()
8737 ha->flags.fce_enabled = 0; in qla82xx_restart_isp()
8741 if (ha->eft) { in qla82xx_restart_isp()
8742 memset(ha->eft, 0, EFT_SIZE); in qla82xx_restart_isp()
8744 ha->eft_dma, EFT_NUM_BUFFERS); in qla82xx_restart_isp()
8757 spin_lock_irqsave(&ha->vport_slock, flags); in qla82xx_restart_isp()
8758 list_for_each_entry(vp, &ha->vp_list, list) { in qla82xx_restart_isp()
8761 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla82xx_restart_isp()
8765 spin_lock_irqsave(&ha->vport_slock, flags); in qla82xx_restart_isp()
8769 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla82xx_restart_isp()
8806 struct qla_hw_data *ha = vha->hw; in qla24xx_get_fcp_prio() local
8808 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) in qla24xx_get_fcp_prio()
8812 entries = ha->fcp_prio_cfg->num_entries; in qla24xx_get_fcp_prio()
8813 pri_entry = &ha->fcp_prio_cfg->entry[0]; in qla24xx_get_fcp_prio()
8961 struct qla_hw_data *ha = vha->hw; in qla2xxx_create_qpair() local
8966 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) { in qla2xxx_create_qpair()
8984 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; in qla2xxx_create_qpair()
8987 mutex_lock(&ha->mq_lock); in qla2xxx_create_qpair()
8988 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); in qla2xxx_create_qpair()
8989 if (ha->num_qpairs >= ha->max_qpairs) { in qla2xxx_create_qpair()
8990 mutex_unlock(&ha->mq_lock); in qla2xxx_create_qpair()
8995 ha->num_qpairs++; in qla2xxx_create_qpair()
8996 set_bit(qpair_id, ha->qpair_qid_map); in qla2xxx_create_qpair()
8997 ha->queue_pair_map[qpair_id] = qpair; in qla2xxx_create_qpair()
9000 qpair->fw_started = ha->flags.fw_started; in qla2xxx_create_qpair()
9002 qpair->chip_reset = ha->base_qpair->chip_reset; in qla2xxx_create_qpair()
9003 qpair->enable_class_2 = ha->base_qpair->enable_class_2; in qla2xxx_create_qpair()
9005 ha->base_qpair->enable_explicit_conf; in qla2xxx_create_qpair()
9007 for (i = 0; i < ha->msix_count; i++) { in qla2xxx_create_qpair()
9008 msix = &ha->msix_entries[i]; in qla2xxx_create_qpair()
9024 qpair->pdev = ha->pdev; in qla2xxx_create_qpair()
9025 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) in qla2xxx_create_qpair()
9028 mutex_unlock(&ha->mq_lock); in qla2xxx_create_qpair()
9031 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp); in qla2xxx_create_qpair()
9038 qpair->rsp = ha->rsp_q_map[rsp_id]; in qla2xxx_create_qpair()
9041 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos, in qla2xxx_create_qpair()
9049 qpair->req = ha->req_q_map[req_id]; in qla2xxx_create_qpair()
9055 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { in qla2xxx_create_qpair()
9056 if (ha->fw_attributes & BIT_4) in qla2xxx_create_qpair()
9087 mutex_lock(&ha->mq_lock); in qla2xxx_create_qpair()
9093 ha->queue_pair_map[qpair_id] = NULL; in qla2xxx_create_qpair()
9094 clear_bit(qpair_id, ha->qpair_qid_map); in qla2xxx_create_qpair()
9095 ha->num_qpairs--; in qla2xxx_create_qpair()
9096 mutex_unlock(&ha->mq_lock); in qla2xxx_create_qpair()
9105 struct qla_hw_data *ha = qpair->hw; in qla2xxx_delete_qpair() local
9117 mutex_lock(&ha->mq_lock); in qla2xxx_delete_qpair()
9118 ha->queue_pair_map[qpair->id] = NULL; in qla2xxx_delete_qpair()
9119 clear_bit(qpair->id, ha->qpair_qid_map); in qla2xxx_delete_qpair()
9120 ha->num_qpairs--; in qla2xxx_delete_qpair()
9129 mutex_unlock(&ha->mq_lock); in qla2xxx_delete_qpair()