Lines Matching +full:cpu +full:- +full:centric
4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
26 #include <linux/dma-mapping.h>
43 #include <linux/cpu.h>
69 /* Used when mapping IRQ vectors in a driver centric manner */
107 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
116 * 0 - success.
117 * -ERESTART - requests the SLI layer to reset the HBA and try again.
118 * Any other value - indicates an error.
123 lpfc_vpd_t *vp = &phba->vpd; in lpfc_config_port_prep()
133 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_config_port_prep()
135 phba->link_state = LPFC_HBA_ERROR; in lpfc_config_port_prep()
136 return -ENOMEM; in lpfc_config_port_prep()
139 mb = &pmb->u.mb; in lpfc_config_port_prep()
140 phba->link_state = LPFC_INIT_MBX_CMDS; in lpfc_config_port_prep()
142 if (lpfc_is_LC_HBA(phba->pcidev->device)) { in lpfc_config_port_prep()
152 memset((char*)mb->un.varRDnvp.rsvd3, 0, in lpfc_config_port_prep()
153 sizeof (mb->un.varRDnvp.rsvd3)); in lpfc_config_port_prep()
154 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, in lpfc_config_port_prep()
164 mb->mbxCommand, mb->mbxStatus); in lpfc_config_port_prep()
165 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_prep()
166 return -ERESTART; in lpfc_config_port_prep()
168 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, in lpfc_config_port_prep()
169 sizeof(phba->wwnn)); in lpfc_config_port_prep()
170 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, in lpfc_config_port_prep()
171 sizeof(phba->wwpn)); in lpfc_config_port_prep()
178 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; in lpfc_config_port_prep()
187 mb->mbxCommand, mb->mbxStatus); in lpfc_config_port_prep()
188 mempool_free( pmb, phba->mbox_mem_pool); in lpfc_config_port_prep()
189 return -ERESTART; in lpfc_config_port_prep()
197 if (mb->un.varRdRev.rr == 0) { in lpfc_config_port_prep()
198 vp->rev.rBit = 0; in lpfc_config_port_prep()
202 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_prep()
203 return -ERESTART; in lpfc_config_port_prep()
206 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { in lpfc_config_port_prep()
207 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_prep()
208 return -EINVAL; in lpfc_config_port_prep()
212 vp->rev.rBit = 1; in lpfc_config_port_prep()
213 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); in lpfc_config_port_prep()
214 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; in lpfc_config_port_prep()
215 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); in lpfc_config_port_prep()
216 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; in lpfc_config_port_prep()
217 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); in lpfc_config_port_prep()
218 vp->rev.biuRev = mb->un.varRdRev.biuRev; in lpfc_config_port_prep()
219 vp->rev.smRev = mb->un.varRdRev.smRev; in lpfc_config_port_prep()
220 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; in lpfc_config_port_prep()
221 vp->rev.endecRev = mb->un.varRdRev.endecRev; in lpfc_config_port_prep()
222 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; in lpfc_config_port_prep()
223 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; in lpfc_config_port_prep()
224 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; in lpfc_config_port_prep()
225 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; in lpfc_config_port_prep()
226 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; in lpfc_config_port_prep()
227 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; in lpfc_config_port_prep()
233 if (vp->rev.feaLevelHigh < 9) in lpfc_config_port_prep()
234 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; in lpfc_config_port_prep()
236 if (lpfc_is_LC_HBA(phba->pcidev->device)) in lpfc_config_port_prep()
237 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], in lpfc_config_port_prep()
238 sizeof (phba->RandomData)); in lpfc_config_port_prep()
252 mb->mbxCommand, mb->mbxStatus); in lpfc_config_port_prep()
253 mb->un.varDmp.word_cnt = 0; in lpfc_config_port_prep()
258 if (mb->un.varDmp.word_cnt == 0) in lpfc_config_port_prep()
261 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) in lpfc_config_port_prep()
262 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; in lpfc_config_port_prep()
265 mb->un.varDmp.word_cnt); in lpfc_config_port_prep()
266 offset += mb->un.varDmp.word_cnt; in lpfc_config_port_prep()
267 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); in lpfc_config_port_prep()
273 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_prep()
278 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
290 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) in lpfc_config_async_cmpl()
291 phba->temp_sensor_support = 1; in lpfc_config_async_cmpl()
293 phba->temp_sensor_support = 0; in lpfc_config_async_cmpl()
294 mempool_free(pmboxq, phba->mbox_mem_pool); in lpfc_config_async_cmpl()
299 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
317 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { in lpfc_dump_wakeup_param_cmpl()
318 mempool_free(pmboxq, phba->mbox_mem_pool); in lpfc_dump_wakeup_param_cmpl()
325 prog_id_word = pmboxq->u.mb.un.varWords[7]; in lpfc_dump_wakeup_param_cmpl()
328 dist = dist_char[prg->dist]; in lpfc_dump_wakeup_param_cmpl()
330 if ((prg->dist == 3) && (prg->num == 0)) in lpfc_dump_wakeup_param_cmpl()
331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", in lpfc_dump_wakeup_param_cmpl()
332 prg->ver, prg->rev, prg->lev); in lpfc_dump_wakeup_param_cmpl()
334 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", in lpfc_dump_wakeup_param_cmpl()
335 prg->ver, prg->rev, prg->lev, in lpfc_dump_wakeup_param_cmpl()
336 dist, prg->num); in lpfc_dump_wakeup_param_cmpl()
337 mempool_free(pmboxq, phba->mbox_mem_pool); in lpfc_dump_wakeup_param_cmpl()
342 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
352 struct lpfc_hba *phba = vport->phba; in lpfc_update_vport_wwn()
358 if (vport->fc_nodename.u.wwn[0] == 0) in lpfc_update_vport_wwn()
359 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, in lpfc_update_vport_wwn()
362 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, in lpfc_update_vport_wwn()
369 if (vport->fc_portname.u.wwn[0] != 0 && in lpfc_update_vport_wwn()
370 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, in lpfc_update_vport_wwn()
372 vport->vport_flag |= FAWWPN_PARAM_CHG; in lpfc_update_vport_wwn()
374 if (phba->sli_rev == LPFC_SLI_REV4 && in lpfc_update_vport_wwn()
375 vport->port_type == LPFC_PHYSICAL_PORT && in lpfc_update_vport_wwn()
376 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) { in lpfc_update_vport_wwn()
377 if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)) in lpfc_update_vport_wwn()
378 phba->sli4_hba.fawwpn_flag &= in lpfc_update_vport_wwn()
382 "2701 FA-PWWN change WWPN from %llx to " in lpfc_update_vport_wwn()
384 wwn_to_u64(vport->fc_portname.u.wwn), in lpfc_update_vport_wwn()
386 (vport->fc_sparam.portName.u.wwn), in lpfc_update_vport_wwn()
387 vport->vport_flag, in lpfc_update_vport_wwn()
388 phba->sli4_hba.fawwpn_flag); in lpfc_update_vport_wwn()
389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, in lpfc_update_vport_wwn()
394 if (vport->fc_portname.u.wwn[0] == 0) in lpfc_update_vport_wwn()
395 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, in lpfc_update_vport_wwn()
398 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, in lpfc_update_vport_wwn()
403 * lpfc_config_port_post - Perform lpfc initialization after config port
412 * 0 - success.
413 * Any other value - error.
418 struct lpfc_vport *vport = phba->pport; in lpfc_config_port_post()
423 struct lpfc_sli *psli = &phba->sli; in lpfc_config_port_post()
428 spin_lock_irq(&phba->hbalock); in lpfc_config_port_post()
433 if (phba->over_temp_state == HBA_OVER_TEMP) in lpfc_config_port_post()
434 phba->over_temp_state = HBA_NORMAL_TEMP; in lpfc_config_port_post()
435 spin_unlock_irq(&phba->hbalock); in lpfc_config_port_post()
437 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_config_port_post()
439 phba->link_state = LPFC_HBA_ERROR; in lpfc_config_port_post()
440 return -ENOMEM; in lpfc_config_port_post()
442 mb = &pmb->u.mb; in lpfc_config_port_post()
447 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
448 return -ENOMEM; in lpfc_config_port_post()
451 pmb->vport = vport; in lpfc_config_port_post()
456 mb->mbxCommand, mb->mbxStatus); in lpfc_config_port_post()
457 phba->link_state = LPFC_HBA_ERROR; in lpfc_config_port_post()
459 return -EIO; in lpfc_config_port_post()
462 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; in lpfc_config_port_post()
468 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); in lpfc_config_port_post()
469 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_config_port_post()
471 pmb->ctx_buf = NULL; in lpfc_config_port_post()
475 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); in lpfc_config_port_post()
476 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); in lpfc_config_port_post()
477 fc_host_max_npiv_vports(shost) = phba->max_vpi; in lpfc_config_port_post()
480 /* This should be consolidated into parse_vpd ? - mr */ in lpfc_config_port_post()
481 if (phba->SerialNumber[0] == 0) { in lpfc_config_port_post()
484 outptr = &vport->fc_nodename.u.s.IEEE[0]; in lpfc_config_port_post()
489 phba->SerialNumber[i] = in lpfc_config_port_post()
492 phba->SerialNumber[i] = in lpfc_config_port_post()
493 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); in lpfc_config_port_post()
497 phba->SerialNumber[i] = in lpfc_config_port_post()
500 phba->SerialNumber[i] = in lpfc_config_port_post()
501 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); in lpfc_config_port_post()
506 pmb->vport = vport; in lpfc_config_port_post()
511 mb->mbxCommand, mb->mbxStatus); in lpfc_config_port_post()
512 phba->link_state = LPFC_HBA_ERROR; in lpfc_config_port_post()
513 mempool_free( pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
514 return -EIO; in lpfc_config_port_post()
521 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { in lpfc_config_port_post()
524 phba->cfg_hba_queue_depth, in lpfc_config_port_post()
525 mb->un.varRdConfig.max_xri); in lpfc_config_port_post()
526 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; in lpfc_config_port_post()
529 phba->lmt = mb->un.varRdConfig.lmt; in lpfc_config_port_post()
532 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); in lpfc_config_port_post()
534 phba->link_state = LPFC_LINK_DOWN; in lpfc_config_port_post()
537 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) in lpfc_config_port_post()
538 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; in lpfc_config_port_post()
539 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) in lpfc_config_port_post()
540 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; in lpfc_config_port_post()
543 if (phba->sli_rev != 3) in lpfc_config_port_post()
547 * Configure HBA MSI-X attention conditions to messages if MSI-X mode in lpfc_config_port_post()
549 if (phba->intr_type == MSIX) { in lpfc_config_port_post()
552 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
553 return -EIO; in lpfc_config_port_post()
560 pmb->u.mb.mbxCommand, in lpfc_config_port_post()
561 pmb->u.mb.mbxStatus); in lpfc_config_port_post()
562 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
563 return -EIO; in lpfc_config_port_post()
567 spin_lock_irq(&phba->hbalock); in lpfc_config_port_post()
569 phba->hba_flag &= ~HBA_ERATT_HANDLED; in lpfc_config_port_post()
572 if (lpfc_readl(phba->HCregaddr, &status)) { in lpfc_config_port_post()
573 spin_unlock_irq(&phba->hbalock); in lpfc_config_port_post()
574 return -EIO; in lpfc_config_port_post()
577 if (psli->num_rings > 0) in lpfc_config_port_post()
579 if (psli->num_rings > 1) in lpfc_config_port_post()
581 if (psli->num_rings > 2) in lpfc_config_port_post()
583 if (psli->num_rings > 3) in lpfc_config_port_post()
586 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && in lpfc_config_port_post()
587 (phba->cfg_poll & DISABLE_FCP_RING_INT)) in lpfc_config_port_post()
590 writel(status, phba->HCregaddr); in lpfc_config_port_post()
591 readl(phba->HCregaddr); /* flush */ in lpfc_config_port_post()
592 spin_unlock_irq(&phba->hbalock); in lpfc_config_port_post()
594 /* Set up ring-0 (ELS) timer */ in lpfc_config_port_post()
595 timeout = phba->fc_ratov * 2; in lpfc_config_port_post()
596 mod_timer(&vport->els_tmofunc, in lpfc_config_port_post()
599 mod_timer(&phba->hb_tmofunc, in lpfc_config_port_post()
601 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); in lpfc_config_port_post()
602 phba->last_completion_time = jiffies; in lpfc_config_port_post()
604 mod_timer(&phba->eratt_poll, in lpfc_config_port_post()
605 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); in lpfc_config_port_post()
607 if (phba->hba_flag & LINK_DISABLED) { in lpfc_config_port_post()
611 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; in lpfc_config_port_post()
618 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
619 return -EIO; in lpfc_config_port_post()
621 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { in lpfc_config_port_post()
622 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
623 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); in lpfc_config_port_post()
628 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_config_port_post()
630 phba->link_state = LPFC_HBA_ERROR; in lpfc_config_port_post()
631 return -ENOMEM; in lpfc_config_port_post()
635 pmb->mbox_cmpl = lpfc_config_async_cmpl; in lpfc_config_port_post()
636 pmb->vport = phba->pport; in lpfc_config_port_post()
644 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
648 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_config_port_post()
650 phba->link_state = LPFC_HBA_ERROR; in lpfc_config_port_post()
651 return -ENOMEM; in lpfc_config_port_post()
655 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; in lpfc_config_port_post()
656 pmb->vport = phba->pport; in lpfc_config_port_post()
663 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
670 * lpfc_sli4_refresh_params - update driver copy of params.
684 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_sli4_refresh_params()
686 return -ENOMEM; in lpfc_sli4_refresh_params()
688 mqe = &mboxq->u.mqe; in lpfc_sli4_refresh_params()
690 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - in lpfc_sli4_refresh_params()
698 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_refresh_params()
701 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; in lpfc_sli4_refresh_params()
704 if (phba->cfg_enable_mi) in lpfc_sli4_refresh_params()
705 phba->sli4_hba.pc_sli4_params.mi_ver = in lpfc_sli4_refresh_params()
708 phba->sli4_hba.pc_sli4_params.mi_ver = 0; in lpfc_sli4_refresh_params()
710 phba->sli4_hba.pc_sli4_params.cmf = in lpfc_sli4_refresh_params()
712 phba->sli4_hba.pc_sli4_params.pls = in lpfc_sli4_refresh_params()
715 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_refresh_params()
720 * lpfc_hba_init_link - Initialize the FC link
722 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
730 * 0 - success
731 * Any other value - error
736 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); in lpfc_hba_init_link()
740 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
743 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
751 * 0 - success
752 * Any other value - error
758 struct lpfc_vport *vport = phba->pport; in lpfc_hba_init_link_fc_topology()
763 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_hba_init_link_fc_topology()
765 phba->link_state = LPFC_HBA_ERROR; in lpfc_hba_init_link_fc_topology()
766 return -ENOMEM; in lpfc_hba_init_link_fc_topology()
768 mb = &pmb->u.mb; in lpfc_hba_init_link_fc_topology()
769 pmb->vport = vport; in lpfc_hba_init_link_fc_topology()
771 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || in lpfc_hba_init_link_fc_topology()
772 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && in lpfc_hba_init_link_fc_topology()
773 !(phba->lmt & LMT_1Gb)) || in lpfc_hba_init_link_fc_topology()
774 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && in lpfc_hba_init_link_fc_topology()
775 !(phba->lmt & LMT_2Gb)) || in lpfc_hba_init_link_fc_topology()
776 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && in lpfc_hba_init_link_fc_topology()
777 !(phba->lmt & LMT_4Gb)) || in lpfc_hba_init_link_fc_topology()
778 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && in lpfc_hba_init_link_fc_topology()
779 !(phba->lmt & LMT_8Gb)) || in lpfc_hba_init_link_fc_topology()
780 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && in lpfc_hba_init_link_fc_topology()
781 !(phba->lmt & LMT_10Gb)) || in lpfc_hba_init_link_fc_topology()
782 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && in lpfc_hba_init_link_fc_topology()
783 !(phba->lmt & LMT_16Gb)) || in lpfc_hba_init_link_fc_topology()
784 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && in lpfc_hba_init_link_fc_topology()
785 !(phba->lmt & LMT_32Gb)) || in lpfc_hba_init_link_fc_topology()
786 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && in lpfc_hba_init_link_fc_topology()
787 !(phba->lmt & LMT_64Gb))) { in lpfc_hba_init_link_fc_topology()
792 phba->cfg_link_speed); in lpfc_hba_init_link_fc_topology()
793 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; in lpfc_hba_init_link_fc_topology()
795 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); in lpfc_hba_init_link_fc_topology()
796 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; in lpfc_hba_init_link_fc_topology()
797 if (phba->sli_rev < LPFC_SLI_REV4) in lpfc_hba_init_link_fc_topology()
804 mb->mbxCommand, mb->mbxStatus); in lpfc_hba_init_link_fc_topology()
805 if (phba->sli_rev <= LPFC_SLI_REV3) { in lpfc_hba_init_link_fc_topology()
807 writel(0, phba->HCregaddr); in lpfc_hba_init_link_fc_topology()
808 readl(phba->HCregaddr); /* flush */ in lpfc_hba_init_link_fc_topology()
810 writel(0xffffffff, phba->HAregaddr); in lpfc_hba_init_link_fc_topology()
811 readl(phba->HAregaddr); /* flush */ in lpfc_hba_init_link_fc_topology()
813 phba->link_state = LPFC_HBA_ERROR; in lpfc_hba_init_link_fc_topology()
815 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_hba_init_link_fc_topology()
816 return -EIO; in lpfc_hba_init_link_fc_topology()
818 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; in lpfc_hba_init_link_fc_topology()
820 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_hba_init_link_fc_topology()
826 * lpfc_hba_down_link - this routine downs the FC link
828 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
835 * 0 - success
836 * Any other value - error
844 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_hba_down_link()
846 phba->link_state = LPFC_HBA_ERROR; in lpfc_hba_down_link()
847 return -ENOMEM; in lpfc_hba_down_link()
853 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; in lpfc_hba_down_link()
860 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_hba_down_link()
861 return -EIO; in lpfc_hba_down_link()
864 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_hba_down_link()
870 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
877 * 0 - success.
878 * Any other value - error.
886 if (phba->sli_rev <= LPFC_SLI_REV3) { in lpfc_hba_down_prep()
888 writel(0, phba->HCregaddr); in lpfc_hba_down_prep()
889 readl(phba->HCregaddr); /* flush */ in lpfc_hba_down_prep()
892 if (phba->pport->load_flag & FC_UNLOADING) in lpfc_hba_down_prep()
893 lpfc_cleanup_discovery_resources(phba->pport); in lpfc_hba_down_prep()
897 for (i = 0; i <= phba->max_vports && in lpfc_hba_down_prep()
906 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
925 spin_lock_irq(&phba->hbalock); in lpfc_sli4_free_sp_events()
926 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; in lpfc_sli4_free_sp_events()
927 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_free_sp_events()
929 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { in lpfc_sli4_free_sp_events()
931 spin_lock_irq(&phba->hbalock); in lpfc_sli4_free_sp_events()
932 list_remove_head(&phba->sli4_hba.sp_queue_event, in lpfc_sli4_free_sp_events()
934 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_free_sp_events()
936 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { in lpfc_sli4_free_sp_events()
946 lpfc_in_buf_free(phba, &dmabuf->dbuf); in lpfc_sli4_free_sp_events()
952 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
965 struct lpfc_sli *psli = &phba->sli; in lpfc_hba_free_post_buf()
971 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) in lpfc_hba_free_post_buf()
975 pring = &psli->sli3_ring[LPFC_ELS_RING]; in lpfc_hba_free_post_buf()
976 spin_lock_irq(&phba->hbalock); in lpfc_hba_free_post_buf()
977 list_splice_init(&pring->postbufq, &buflist); in lpfc_hba_free_post_buf()
978 spin_unlock_irq(&phba->hbalock); in lpfc_hba_free_post_buf()
982 list_del(&mp->list); in lpfc_hba_free_post_buf()
984 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_hba_free_post_buf()
988 spin_lock_irq(&phba->hbalock); in lpfc_hba_free_post_buf()
989 pring->postbufq_cnt -= count; in lpfc_hba_free_post_buf()
990 spin_unlock_irq(&phba->hbalock); in lpfc_hba_free_post_buf()
995 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
1007 struct lpfc_sli *psli = &phba->sli; in lpfc_hba_clean_txcmplq()
1014 if (phba->sli_rev != LPFC_SLI_REV4) { in lpfc_hba_clean_txcmplq()
1015 for (i = 0; i < psli->num_rings; i++) { in lpfc_hba_clean_txcmplq()
1016 pring = &psli->sli3_ring[i]; in lpfc_hba_clean_txcmplq()
1017 spin_lock_irq(&phba->hbalock); in lpfc_hba_clean_txcmplq()
1022 list_splice_init(&pring->txcmplq, &completions); in lpfc_hba_clean_txcmplq()
1023 pring->txcmplq_cnt = 0; in lpfc_hba_clean_txcmplq()
1024 spin_unlock_irq(&phba->hbalock); in lpfc_hba_clean_txcmplq()
1033 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { in lpfc_hba_clean_txcmplq()
1034 pring = qp->pring; in lpfc_hba_clean_txcmplq()
1037 spin_lock_irq(&pring->ring_lock); in lpfc_hba_clean_txcmplq()
1039 &pring->txcmplq, list) in lpfc_hba_clean_txcmplq()
1040 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; in lpfc_hba_clean_txcmplq()
1041 list_splice_init(&pring->txcmplq, &completions); in lpfc_hba_clean_txcmplq()
1042 pring->txcmplq_cnt = 0; in lpfc_hba_clean_txcmplq()
1043 spin_unlock_irq(&pring->ring_lock); in lpfc_hba_clean_txcmplq()
1052 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1059 * 0 - success.
1060 * Any other value - error.
1071 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1078 * 0 - success.
1079 * Any other value - error.
1107 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_hba_down_post_s4()
1109 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) in lpfc_hba_down_post_s4()
1110 sglq_entry->state = SGL_FREED; in lpfc_hba_down_post_s4()
1112 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, in lpfc_hba_down_post_s4()
1113 &phba->sli4_hba.lpfc_els_sgl_list); in lpfc_hba_down_post_s4()
1116 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_hba_down_post_s4()
1121 spin_lock_irq(&phba->hbalock); in lpfc_hba_down_post_s4()
1123 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_hba_down_post_s4()
1124 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_hba_down_post_s4()
1126 spin_lock(&qp->abts_io_buf_list_lock); in lpfc_hba_down_post_s4()
1127 list_splice_init(&qp->lpfc_abts_io_buf_list, in lpfc_hba_down_post_s4()
1131 psb->pCmd = NULL; in lpfc_hba_down_post_s4()
1132 psb->status = IOSTAT_SUCCESS; in lpfc_hba_down_post_s4()
1135 spin_lock(&qp->io_buf_list_put_lock); in lpfc_hba_down_post_s4()
1136 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); in lpfc_hba_down_post_s4()
1137 qp->put_io_bufs += qp->abts_scsi_io_bufs; in lpfc_hba_down_post_s4()
1138 qp->put_io_bufs += qp->abts_nvme_io_bufs; in lpfc_hba_down_post_s4()
1139 qp->abts_scsi_io_bufs = 0; in lpfc_hba_down_post_s4()
1140 qp->abts_nvme_io_bufs = 0; in lpfc_hba_down_post_s4()
1141 spin_unlock(&qp->io_buf_list_put_lock); in lpfc_hba_down_post_s4()
1142 spin_unlock(&qp->abts_io_buf_list_lock); in lpfc_hba_down_post_s4()
1144 spin_unlock_irq(&phba->hbalock); in lpfc_hba_down_post_s4()
1146 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_hba_down_post_s4()
1147 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_hba_down_post_s4()
1148 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, in lpfc_hba_down_post_s4()
1150 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_hba_down_post_s4()
1152 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); in lpfc_hba_down_post_s4()
1153 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); in lpfc_hba_down_post_s4()
1162 * lpfc_hba_down_post - Wrapper func for hba down post routine
1169 * 0 - success.
1170 * Any other value - error.
1175 return (*phba->lpfc_hba_down_post)(phba); in lpfc_hba_down_post()
1179 * lpfc_hb_timeout - The HBA-timer timeout handler
1182 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1184 * work-port-events bitmap and the worker thread is notified. This timeout
1200 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); in lpfc_hb_timeout()
1201 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; in lpfc_hb_timeout()
1203 phba->pport->work_port_events |= WORKER_HB_TMO; in lpfc_hb_timeout()
1204 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); in lpfc_hb_timeout()
1213 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1216 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1218 * work-port-events bitmap and the worker thread is notified. This timeout
1231 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); in lpfc_rrq_timeout()
1232 if (!(phba->pport->load_flag & FC_UNLOADING)) in lpfc_rrq_timeout()
1233 phba->hba_flag |= HBA_RRQ_ACTIVE; in lpfc_rrq_timeout()
1235 phba->hba_flag &= ~HBA_RRQ_ACTIVE; in lpfc_rrq_timeout()
1236 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); in lpfc_rrq_timeout()
1238 if (!(phba->pport->load_flag & FC_UNLOADING)) in lpfc_rrq_timeout()
1243 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1247 * This is the callback function to the lpfc heart-beat mailbox command.
1248 * If configured, the lpfc driver issues the heart-beat mailbox command to
1250 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1252 * heart-beat outstanding state. Once the mailbox command comes back and
1253 * no error conditions detected, the heart-beat mailbox command timer is
1254 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1255 * state is cleared for the next heart-beat. If the timer expired with the
1256 * heart-beat outstanding state set, the driver will put the HBA offline.
1263 spin_lock_irqsave(&phba->hbalock, drvr_flag); in lpfc_hb_mbox_cmpl()
1264 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); in lpfc_hb_mbox_cmpl()
1265 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); in lpfc_hb_mbox_cmpl()
1267 /* Check and reset heart-beat timer if necessary */ in lpfc_hb_mbox_cmpl()
1268 mempool_free(pmboxq, phba->mbox_mem_pool); in lpfc_hb_mbox_cmpl()
1269 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && in lpfc_hb_mbox_cmpl()
1270 !(phba->link_state == LPFC_HBA_ERROR) && in lpfc_hb_mbox_cmpl()
1271 !(phba->pport->load_flag & FC_UNLOADING)) in lpfc_hb_mbox_cmpl()
1272 mod_timer(&phba->hb_tmofunc, in lpfc_hb_mbox_cmpl()
1279 * lpfc_idle_stat_delay_work - idle_stat tracking
1281 * This routine tracks per-cq idle_stat and determines polling decisions.
1298 if (phba->pport->load_flag & FC_UNLOADING) in lpfc_idle_stat_delay_work()
1301 if (phba->link_state == LPFC_HBA_ERROR || in lpfc_idle_stat_delay_work()
1302 phba->pport->fc_flag & FC_OFFLINE_MODE || in lpfc_idle_stat_delay_work()
1303 phba->cmf_active_mode != LPFC_CFG_OFF) in lpfc_idle_stat_delay_work()
1307 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; in lpfc_idle_stat_delay_work()
1308 cq = hdwq->io_cq; in lpfc_idle_stat_delay_work()
1310 /* Skip if we've already handled this cq's primary CPU */ in lpfc_idle_stat_delay_work()
1311 if (cq->chann != i) in lpfc_idle_stat_delay_work()
1314 idle_stat = &phba->sli4_hba.idle_stat[i]; in lpfc_idle_stat_delay_work()
1320 * percentage of 100 - the sum of the other consumption times. in lpfc_idle_stat_delay_work()
1323 diff_idle = wall_idle - idle_stat->prev_idle; in lpfc_idle_stat_delay_work()
1324 diff_wall = wall - idle_stat->prev_wall; in lpfc_idle_stat_delay_work()
1329 busy_time = diff_wall - diff_idle; in lpfc_idle_stat_delay_work()
1332 idle_percent = 100 - idle_percent; in lpfc_idle_stat_delay_work()
1335 cq->poll_mode = LPFC_QUEUE_WORK; in lpfc_idle_stat_delay_work()
1337 cq->poll_mode = LPFC_IRQ_POLL; in lpfc_idle_stat_delay_work()
1339 idle_stat->prev_idle = wall_idle; in lpfc_idle_stat_delay_work()
1340 idle_stat->prev_wall = wall; in lpfc_idle_stat_delay_work()
1344 schedule_delayed_work(&phba->idle_stat_delay_work, in lpfc_idle_stat_delay_work()
1359 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) in lpfc_hb_eq_delay_work()
1362 if (phba->link_state == LPFC_HBA_ERROR || in lpfc_hb_eq_delay_work()
1363 phba->pport->fc_flag & FC_OFFLINE_MODE) in lpfc_hb_eq_delay_work()
1366 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), in lpfc_hb_eq_delay_work()
1371 for (i = 0; i < phba->cfg_irq_chann; i++) { in lpfc_hb_eq_delay_work()
1373 eq = phba->sli4_hba.hba_eq_hdl[i].eq; in lpfc_hb_eq_delay_work()
1376 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { in lpfc_hb_eq_delay_work()
1377 eq->q_flag &= ~HBA_EQ_DELAY_CHK; in lpfc_hb_eq_delay_work()
1378 ena_delay[eq->last_cpu] = 1; in lpfc_hb_eq_delay_work()
1383 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); in lpfc_hb_eq_delay_work()
1385 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; in lpfc_hb_eq_delay_work()
1392 eqi->icnt = 0; in lpfc_hb_eq_delay_work()
1394 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { in lpfc_hb_eq_delay_work()
1395 if (unlikely(eq->last_cpu != i)) { in lpfc_hb_eq_delay_work()
1396 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, in lpfc_hb_eq_delay_work()
1397 eq->last_cpu); in lpfc_hb_eq_delay_work()
1398 list_move_tail(&eq->cpu_list, &eqi_new->list); in lpfc_hb_eq_delay_work()
1401 if (usdelay != eq->q_mode) in lpfc_hb_eq_delay_work()
1402 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, in lpfc_hb_eq_delay_work()
1410 queue_delayed_work(phba->wq, &phba->eq_delay_work, in lpfc_hb_eq_delay_work()
1415 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1426 hwq_count = phba->cfg_hdw_queue; in lpfc_hb_mxp_handler()
1442 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1456 if (phba->hba_flag & HBA_HBEAT_INP) in lpfc_issue_hb_mbox()
1459 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_issue_hb_mbox()
1461 return -ENOMEM; in lpfc_issue_hb_mbox()
1464 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; in lpfc_issue_hb_mbox()
1465 pmboxq->vport = phba->pport; in lpfc_issue_hb_mbox()
1469 mempool_free(pmboxq, phba->mbox_mem_pool); in lpfc_issue_hb_mbox()
1470 return -ENXIO; in lpfc_issue_hb_mbox()
1472 phba->hba_flag |= HBA_HBEAT_INP; in lpfc_issue_hb_mbox()
1478 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1490 if (phba->cfg_enable_hba_heartbeat) in lpfc_issue_hb_tmo()
1492 phba->hba_flag |= HBA_HBEAT_TMO; in lpfc_issue_hb_tmo()
1496 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1499 * This is the actual HBA-timer timeout handler to be invoked by the worker
1500 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1503 * or by processing slow-ring or fast-ring events within the HBA-timer
1505 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1506 * is configured and there is no heart-beat mailbox command outstanding, a
1507 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1508 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1518 struct lpfc_sli *psli = &phba->sli; in lpfc_hb_timeout_handler()
1521 if (phba->cfg_xri_rebalancing) { in lpfc_hb_timeout_handler()
1522 /* Multi-XRI pools handler */ in lpfc_hb_timeout_handler()
1528 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_hb_timeout_handler()
1534 if ((phba->link_state == LPFC_HBA_ERROR) || in lpfc_hb_timeout_handler()
1535 (phba->pport->load_flag & FC_UNLOADING) || in lpfc_hb_timeout_handler()
1536 (phba->pport->fc_flag & FC_OFFLINE_MODE)) in lpfc_hb_timeout_handler()
1539 if (phba->elsbuf_cnt && in lpfc_hb_timeout_handler()
1540 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { in lpfc_hb_timeout_handler()
1541 spin_lock_irq(&phba->hbalock); in lpfc_hb_timeout_handler()
1542 list_splice_init(&phba->elsbuf, &completions); in lpfc_hb_timeout_handler()
1543 phba->elsbuf_cnt = 0; in lpfc_hb_timeout_handler()
1544 phba->elsbuf_prev_cnt = 0; in lpfc_hb_timeout_handler()
1545 spin_unlock_irq(&phba->hbalock); in lpfc_hb_timeout_handler()
1550 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); in lpfc_hb_timeout_handler()
1554 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; in lpfc_hb_timeout_handler()
1557 if (phba->cfg_enable_hba_heartbeat) { in lpfc_hb_timeout_handler()
1559 spin_lock_irq(&phba->pport->work_port_lock); in lpfc_hb_timeout_handler()
1560 if (time_after(phba->last_completion_time + in lpfc_hb_timeout_handler()
1563 spin_unlock_irq(&phba->pport->work_port_lock); in lpfc_hb_timeout_handler()
1564 if (phba->hba_flag & HBA_HBEAT_INP) in lpfc_hb_timeout_handler()
1570 spin_unlock_irq(&phba->pport->work_port_lock); in lpfc_hb_timeout_handler()
1573 if (phba->hba_flag & HBA_HBEAT_INP) { in lpfc_hb_timeout_handler()
1583 - phba->last_completion_time)); in lpfc_hb_timeout_handler()
1586 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && in lpfc_hb_timeout_handler()
1587 (list_empty(&psli->mboxq))) { in lpfc_hb_timeout_handler()
1594 phba->skipped_hb = 0; in lpfc_hb_timeout_handler()
1595 } else if (time_before_eq(phba->last_completion_time, in lpfc_hb_timeout_handler()
1596 phba->skipped_hb)) { in lpfc_hb_timeout_handler()
1601 - phba->last_completion_time)); in lpfc_hb_timeout_handler()
1603 phba->skipped_hb = jiffies; in lpfc_hb_timeout_handler()
1610 if (phba->hba_flag & HBA_HBEAT_TMO) { in lpfc_hb_timeout_handler()
1621 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo)); in lpfc_hb_timeout_handler()
1625 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1634 struct lpfc_sli *psli = &phba->sli; in lpfc_offline_eratt()
1636 spin_lock_irq(&phba->hbalock); in lpfc_offline_eratt()
1637 psli->sli_flag &= ~LPFC_SLI_ACTIVE; in lpfc_offline_eratt()
1638 spin_unlock_irq(&phba->hbalock); in lpfc_offline_eratt()
1643 spin_lock_irq(&phba->hbalock); in lpfc_offline_eratt()
1645 spin_unlock_irq(&phba->hbalock); in lpfc_offline_eratt()
1649 phba->link_state = LPFC_HBA_ERROR; in lpfc_offline_eratt()
1654 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1663 spin_lock_irq(&phba->hbalock); in lpfc_sli4_offline_eratt()
1664 if (phba->link_state == LPFC_HBA_ERROR && in lpfc_sli4_offline_eratt()
1665 test_bit(HBA_PCI_ERR, &phba->bit_flags)) { in lpfc_sli4_offline_eratt()
1666 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_offline_eratt()
1669 phba->link_state = LPFC_HBA_ERROR; in lpfc_sli4_offline_eratt()
1670 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_offline_eratt()
1680 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1691 uint32_t old_host_status = phba->work_hs; in lpfc_handle_deferred_eratt()
1692 struct lpfc_sli *psli = &phba->sli; in lpfc_handle_deferred_eratt()
1697 if (pci_channel_offline(phba->pcidev)) { in lpfc_handle_deferred_eratt()
1698 spin_lock_irq(&phba->hbalock); in lpfc_handle_deferred_eratt()
1699 phba->hba_flag &= ~DEFER_ERATT; in lpfc_handle_deferred_eratt()
1700 spin_unlock_irq(&phba->hbalock); in lpfc_handle_deferred_eratt()
1707 phba->work_hs, phba->work_status[0], in lpfc_handle_deferred_eratt()
1708 phba->work_status[1]); in lpfc_handle_deferred_eratt()
1710 spin_lock_irq(&phba->hbalock); in lpfc_handle_deferred_eratt()
1711 psli->sli_flag &= ~LPFC_SLI_ACTIVE; in lpfc_handle_deferred_eratt()
1712 spin_unlock_irq(&phba->hbalock); in lpfc_handle_deferred_eratt()
1718 * SCSI layer retry it after re-establishing link. in lpfc_handle_deferred_eratt()
1730 while (phba->work_hs & HS_FFER1) { in lpfc_handle_deferred_eratt()
1732 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { in lpfc_handle_deferred_eratt()
1733 phba->work_hs = UNPLUG_ERR ; in lpfc_handle_deferred_eratt()
1737 if (phba->pport->load_flag & FC_UNLOADING) { in lpfc_handle_deferred_eratt()
1738 phba->work_hs = 0; in lpfc_handle_deferred_eratt()
1748 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) in lpfc_handle_deferred_eratt()
1749 phba->work_hs = old_host_status & ~HS_FFER1; in lpfc_handle_deferred_eratt()
1751 spin_lock_irq(&phba->hbalock); in lpfc_handle_deferred_eratt()
1752 phba->hba_flag &= ~DEFER_ERATT; in lpfc_handle_deferred_eratt()
1753 spin_unlock_irq(&phba->hbalock); in lpfc_handle_deferred_eratt()
1754 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); in lpfc_handle_deferred_eratt()
1755 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); in lpfc_handle_deferred_eratt()
1766 shost = lpfc_shost_from_vport(phba->pport); in lpfc_board_errevt_to_mgmt()
1774 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1779 * 1 - HBA error attention interrupt
1780 * 2 - DMA ring index out of range
1781 * 3 - Mailbox command came back as unknown
1786 struct lpfc_vport *vport = phba->pport; in lpfc_handle_eratt_s3()
1787 struct lpfc_sli *psli = &phba->sli; in lpfc_handle_eratt_s3()
1796 if (pci_channel_offline(phba->pcidev)) { in lpfc_handle_eratt_s3()
1797 spin_lock_irq(&phba->hbalock); in lpfc_handle_eratt_s3()
1798 phba->hba_flag &= ~DEFER_ERATT; in lpfc_handle_eratt_s3()
1799 spin_unlock_irq(&phba->hbalock); in lpfc_handle_eratt_s3()
1804 if (!phba->cfg_enable_hba_reset) in lpfc_handle_eratt_s3()
1810 if (phba->hba_flag & DEFER_ERATT) in lpfc_handle_eratt_s3()
1813 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { in lpfc_handle_eratt_s3()
1814 if (phba->work_hs & HS_FFER6) in lpfc_handle_eratt_s3()
1815 /* Re-establishing Link */ in lpfc_handle_eratt_s3()
1817 "1301 Re-establishing Link " in lpfc_handle_eratt_s3()
1819 phba->work_hs, phba->work_status[0], in lpfc_handle_eratt_s3()
1820 phba->work_status[1]); in lpfc_handle_eratt_s3()
1821 if (phba->work_hs & HS_FFER8) in lpfc_handle_eratt_s3()
1826 phba->work_hs, phba->work_status[0], in lpfc_handle_eratt_s3()
1827 phba->work_status[1]); in lpfc_handle_eratt_s3()
1829 spin_lock_irq(&phba->hbalock); in lpfc_handle_eratt_s3()
1830 psli->sli_flag &= ~LPFC_SLI_ACTIVE; in lpfc_handle_eratt_s3()
1831 spin_unlock_irq(&phba->hbalock); in lpfc_handle_eratt_s3()
1837 * retry it after re-establishing link. in lpfc_handle_eratt_s3()
1853 } else if (phba->work_hs & HS_CRIT_TEMP) { in lpfc_handle_eratt_s3()
1854 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); in lpfc_handle_eratt_s3()
1863 temperature, phba->work_hs, in lpfc_handle_eratt_s3()
1864 phba->work_status[0], phba->work_status[1]); in lpfc_handle_eratt_s3()
1866 shost = lpfc_shost_from_vport(phba->pport); in lpfc_handle_eratt_s3()
1873 spin_lock_irq(&phba->hbalock); in lpfc_handle_eratt_s3()
1874 phba->over_temp_state = HBA_OVER_TEMP; in lpfc_handle_eratt_s3()
1875 spin_unlock_irq(&phba->hbalock); in lpfc_handle_eratt_s3()
1886 phba->work_hs, in lpfc_handle_eratt_s3()
1887 phba->work_status[0], phba->work_status[1]); in lpfc_handle_eratt_s3()
1901 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1919 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= in lpfc_sli4_port_sta_fn_reset()
1941 spin_lock_irq(&phba->hbalock); in lpfc_sli4_port_sta_fn_reset()
1942 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; in lpfc_sli4_port_sta_fn_reset()
1943 if (phba->sli.mbox_active) { in lpfc_sli4_port_sta_fn_reset()
1944 mboxq = phba->sli.mbox_active; in lpfc_sli4_port_sta_fn_reset()
1945 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; in lpfc_sli4_port_sta_fn_reset()
1947 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; in lpfc_sli4_port_sta_fn_reset()
1948 phba->sli.mbox_active = NULL; in lpfc_sli4_port_sta_fn_reset()
1950 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_port_sta_fn_reset()
1965 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); in lpfc_sli4_port_sta_fn_reset()
1969 return -EIO; in lpfc_sli4_port_sta_fn_reset()
1971 phba->intr_mode = intr_mode; in lpfc_sli4_port_sta_fn_reset()
1980 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1989 struct lpfc_vport *vport = phba->pport; in lpfc_handle_eratt_s4()
2005 if (pci_channel_offline(phba->pcidev)) { in lpfc_handle_eratt_s4()
2013 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); in lpfc_handle_eratt_s4()
2017 phba->sli4_hba.u.if_type0.UERRLOregaddr, in lpfc_handle_eratt_s4()
2020 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, in lpfc_handle_eratt_s4()
2023 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) in lpfc_handle_eratt_s4()
2025 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { in lpfc_handle_eratt_s4()
2032 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { in lpfc_handle_eratt_s4()
2033 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, in lpfc_handle_eratt_s4()
2055 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, in lpfc_handle_eratt_s4()
2074 phba->link_state = LPFC_HBA_ERROR; in lpfc_handle_eratt_s4()
2080 phba->sli4_hba.u.if_type2.STATUSregaddr, in lpfc_handle_eratt_s4()
2083 if (pci_rd_rc1 == -EIO) { in lpfc_handle_eratt_s4()
2086 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); in lpfc_handle_eratt_s4()
2090 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); in lpfc_handle_eratt_s4()
2091 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); in lpfc_handle_eratt_s4()
2098 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; in lpfc_handle_eratt_s4()
2103 shost = lpfc_shost_from_vport(phba->pport); in lpfc_handle_eratt_s4()
2110 spin_lock_irq(&phba->hbalock); in lpfc_handle_eratt_s4()
2111 phba->over_temp_state = HBA_OVER_TEMP; in lpfc_handle_eratt_s4()
2112 spin_unlock_irq(&phba->hbalock); in lpfc_handle_eratt_s4()
2132 if (!phba->cfg_enable_hba_reset) in lpfc_handle_eratt_s4()
2149 phba->link_state = LPFC_HBA_ERROR; in lpfc_handle_eratt_s4()
2168 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2175 * 0 - success.
2176 * Any other value - error.
2181 (*phba->lpfc_handle_eratt)(phba); in lpfc_handle_eratt()
2185 * lpfc_handle_latt - The HBA link event handler
2194 struct lpfc_vport *vport = phba->pport; in lpfc_handle_latt()
2195 struct lpfc_sli *psli = &phba->sli; in lpfc_handle_latt()
2200 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_handle_latt()
2209 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_handle_latt()
2215 psli->slistat.link_event++; in lpfc_handle_latt()
2216 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); in lpfc_handle_latt()
2217 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; in lpfc_handle_latt()
2218 pmb->vport = vport; in lpfc_handle_latt()
2220 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; in lpfc_handle_latt()
2228 spin_lock_irq(&phba->hbalock); in lpfc_handle_latt()
2229 writel(HA_LATT, phba->HAregaddr); in lpfc_handle_latt()
2230 readl(phba->HAregaddr); /* flush */ in lpfc_handle_latt()
2231 spin_unlock_irq(&phba->hbalock); in lpfc_handle_latt()
2236 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; in lpfc_handle_latt()
2240 spin_lock_irq(&phba->hbalock); in lpfc_handle_latt()
2241 psli->sli_flag |= LPFC_PROCESS_LA; in lpfc_handle_latt()
2242 control = readl(phba->HCregaddr); in lpfc_handle_latt()
2244 writel(control, phba->HCregaddr); in lpfc_handle_latt()
2245 readl(phba->HCregaddr); /* flush */ in lpfc_handle_latt()
2248 writel(HA_LATT, phba->HAregaddr); in lpfc_handle_latt()
2249 readl(phba->HAregaddr); /* flush */ in lpfc_handle_latt()
2250 spin_unlock_irq(&phba->hbalock); in lpfc_handle_latt()
2252 phba->link_state = LPFC_HBA_ERROR; in lpfc_handle_latt()
2272 length -= (3+i); in lpfc_fill_vpd()
2273 while (i--) { in lpfc_fill_vpd()
2274 phba->SerialNumber[j++] = vpd[(*pindex)++]; in lpfc_fill_vpd()
2278 phba->SerialNumber[j] = 0; in lpfc_fill_vpd()
2281 phba->vpd_flag |= VPD_MODEL_DESC; in lpfc_fill_vpd()
2286 length -= (3+i); in lpfc_fill_vpd()
2287 while (i--) { in lpfc_fill_vpd()
2288 phba->ModelDesc[j++] = vpd[(*pindex)++]; in lpfc_fill_vpd()
2292 phba->ModelDesc[j] = 0; in lpfc_fill_vpd()
2295 phba->vpd_flag |= VPD_MODEL_NAME; in lpfc_fill_vpd()
2300 length -= (3+i); in lpfc_fill_vpd()
2301 while (i--) { in lpfc_fill_vpd()
2302 phba->ModelName[j++] = vpd[(*pindex)++]; in lpfc_fill_vpd()
2306 phba->ModelName[j] = 0; in lpfc_fill_vpd()
2309 phba->vpd_flag |= VPD_PROGRAM_TYPE; in lpfc_fill_vpd()
2314 length -= (3+i); in lpfc_fill_vpd()
2315 while (i--) { in lpfc_fill_vpd()
2316 phba->ProgramType[j++] = vpd[(*pindex)++]; in lpfc_fill_vpd()
2320 phba->ProgramType[j] = 0; in lpfc_fill_vpd()
2323 phba->vpd_flag |= VPD_PORT; in lpfc_fill_vpd()
2328 length -= (3 + i); in lpfc_fill_vpd()
2329 while (i--) { in lpfc_fill_vpd()
2330 if ((phba->sli_rev == LPFC_SLI_REV4) && in lpfc_fill_vpd()
2331 (phba->sli4_hba.pport_name_sta == in lpfc_fill_vpd()
2336 phba->Port[j++] = vpd[(*pindex)++]; in lpfc_fill_vpd()
2340 if ((phba->sli_rev != LPFC_SLI_REV4) || in lpfc_fill_vpd()
2341 (phba->sli4_hba.pport_name_sta == in lpfc_fill_vpd()
2343 phba->Port[j] = 0; in lpfc_fill_vpd()
2350 length -= (3 + i); in lpfc_fill_vpd()
2356 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2366 * 0 - pointer to the VPD passed in is NULL
2367 * 1 - success
2386 while (!finished && (index < (len - 4))) { in lpfc_parse_vpd()
2405 if (Length > len - index) in lpfc_parse_vpd()
2406 Length = len - index; in lpfc_parse_vpd()
2424 * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
2438 uint16_t sub_dev_id = phba->pcidev->subsystem_device; in lpfc_get_atto_model_desc()
2502 (tbolt) ? "ThunderLink FC " : "Celerity FC-", in lpfc_get_atto_model_desc()
2504 phba->Port); in lpfc_get_atto_model_desc()
2508 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2523 uint16_t dev_id = phba->pcidev->device; in lpfc_get_hba_model_desc()
2537 if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) { in lpfc_get_hba_model_desc()
2542 if (phba->lmt & LMT_64Gb) in lpfc_get_hba_model_desc()
2544 else if (phba->lmt & LMT_32Gb) in lpfc_get_hba_model_desc()
2546 else if (phba->lmt & LMT_16Gb) in lpfc_get_hba_model_desc()
2548 else if (phba->lmt & LMT_10Gb) in lpfc_get_hba_model_desc()
2550 else if (phba->lmt & LMT_8Gb) in lpfc_get_hba_model_desc()
2552 else if (phba->lmt & LMT_4Gb) in lpfc_get_hba_model_desc()
2554 else if (phba->lmt & LMT_2Gb) in lpfc_get_hba_model_desc()
2556 else if (phba->lmt & LMT_1Gb) in lpfc_get_hba_model_desc()
2561 vp = &phba->vpd; in lpfc_get_hba_model_desc()
2569 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) in lpfc_get_hba_model_desc()
2580 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) in lpfc_get_hba_model_desc()
2591 m = (typeof(m)){"LP9802", "PCI-X", in lpfc_get_hba_model_desc()
2595 m = (typeof(m)){"LP10000", "PCI-X", in lpfc_get_hba_model_desc()
2599 m = (typeof(m)){"LPX1000", "PCI-X", in lpfc_get_hba_model_desc()
2603 m = (typeof(m)){"LP982", "PCI-X", in lpfc_get_hba_model_desc()
2607 m = (typeof(m)){"LP1050", "PCI-X", in lpfc_get_hba_model_desc()
2611 m = (typeof(m)){"LP11000", "PCI-X2", in lpfc_get_hba_model_desc()
2615 m = (typeof(m)){"LP11000-SP", "PCI-X2", in lpfc_get_hba_model_desc()
2619 m = (typeof(m)){"LP11002-SP", "PCI-X2", in lpfc_get_hba_model_desc()
2627 m = (typeof(m)){"LPe1000-SP", "PCIe", in lpfc_get_hba_model_desc()
2631 m = (typeof(m)){"LPe1002-SP", "PCIe", in lpfc_get_hba_model_desc()
2635 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; in lpfc_get_hba_model_desc()
2638 m = (typeof(m)){"LP111", "PCI-X2", in lpfc_get_hba_model_desc()
2658 m = (typeof(m)){"LP101", "PCI-X", in lpfc_get_hba_model_desc()
2662 m = (typeof(m)){"LP10000-S", "PCI", in lpfc_get_hba_model_desc()
2666 m = (typeof(m)){"LP11000-S", "PCI-X2", in lpfc_get_hba_model_desc()
2670 m = (typeof(m)){"LPe11000-S", "PCIe", in lpfc_get_hba_model_desc()
2683 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; in lpfc_get_hba_model_desc()
2686 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; in lpfc_get_hba_model_desc()
2689 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; in lpfc_get_hba_model_desc()
2700 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", in lpfc_get_hba_model_desc()
2712 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", in lpfc_get_hba_model_desc()
2765 phba->Port); in lpfc_get_hba_model_desc()
2779 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2797 cnt += pring->missbufcnt; in lpfc_sli3_post_buffer()
2804 pring->missbufcnt = cnt; in lpfc_sli3_post_buffer()
2807 icmd = &iocb->iocb; in lpfc_sli3_post_buffer()
2813 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); in lpfc_sli3_post_buffer()
2814 if (!mp1 || !mp1->virt) { in lpfc_sli3_post_buffer()
2817 pring->missbufcnt = cnt; in lpfc_sli3_post_buffer()
2821 INIT_LIST_HEAD(&mp1->list); in lpfc_sli3_post_buffer()
2826 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, in lpfc_sli3_post_buffer()
2827 &mp2->phys); in lpfc_sli3_post_buffer()
2828 if (!mp2 || !mp2->virt) { in lpfc_sli3_post_buffer()
2830 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); in lpfc_sli3_post_buffer()
2833 pring->missbufcnt = cnt; in lpfc_sli3_post_buffer()
2837 INIT_LIST_HEAD(&mp2->list); in lpfc_sli3_post_buffer()
2842 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); in lpfc_sli3_post_buffer()
2843 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); in lpfc_sli3_post_buffer()
2844 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; in lpfc_sli3_post_buffer()
2845 icmd->ulpBdeCount = 1; in lpfc_sli3_post_buffer()
2846 cnt--; in lpfc_sli3_post_buffer()
2848 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); in lpfc_sli3_post_buffer()
2849 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); in lpfc_sli3_post_buffer()
2850 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; in lpfc_sli3_post_buffer()
2851 cnt--; in lpfc_sli3_post_buffer()
2852 icmd->ulpBdeCount = 2; in lpfc_sli3_post_buffer()
2855 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; in lpfc_sli3_post_buffer()
2856 icmd->ulpLe = 1; in lpfc_sli3_post_buffer()
2858 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == in lpfc_sli3_post_buffer()
2860 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); in lpfc_sli3_post_buffer()
2864 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); in lpfc_sli3_post_buffer()
2869 pring->missbufcnt = cnt; in lpfc_sli3_post_buffer()
2876 pring->missbufcnt = 0; in lpfc_sli3_post_buffer()
2881 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2889 * 0 - success (currently always success)
2894 struct lpfc_sli *psli = &phba->sli; in lpfc_post_rcv_buf()
2897 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); in lpfc_post_rcv_buf()
2898 /* Ring 2 - FCP no buffers needed */ in lpfc_post_rcv_buf()
2903 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2906 * lpfc_sha_init - Set up initial array of hash table entries
2923 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2942 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - in lpfc_sha_iterate()
2944 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); in lpfc_sha_iterate()
2980 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2996 * lpfc_hba_init - Perform special handling for LC HBA initialization
2998 * @hbainit: pointer to an array of unsigned 32-bit integers.
3007 uint32_t *pwwnn = (uint32_t *) phba->wwnn; in lpfc_hba_init()
3017 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); in lpfc_hba_init()
3025 * lpfc_cleanup - Performs vport cleanups before deleting a vport
3036 struct lpfc_hba *phba = vport->phba; in lpfc_cleanup()
3040 if (phba->link_state > LPFC_LINK_DOWN) in lpfc_cleanup()
3047 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { in lpfc_cleanup()
3048 if (vport->port_type != LPFC_PHYSICAL_PORT && in lpfc_cleanup()
3049 ndlp->nlp_DID == Fabric_DID) { in lpfc_cleanup()
3055 if (ndlp->nlp_DID == Fabric_Cntl_DID && in lpfc_cleanup()
3056 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { in lpfc_cleanup()
3064 if (ndlp->nlp_type & NLP_FABRIC && in lpfc_cleanup()
3065 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) in lpfc_cleanup()
3069 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD))) in lpfc_cleanup()
3086 if (vport->load_flag & FC_UNLOADING && in lpfc_cleanup()
3087 pci_channel_offline(phba->pcidev)) in lpfc_cleanup()
3088 lpfc_sli_flush_io_rings(vport->phba); in lpfc_cleanup()
3094 while (!list_empty(&vport->fc_nodes)) { in lpfc_cleanup()
3100 &vport->fc_nodes, nlp_listp) { in lpfc_cleanup()
3101 lpfc_printf_vlog(ndlp->vport, KERN_ERR, in lpfc_cleanup()
3105 ndlp->nlp_DID, (void *)ndlp, in lpfc_cleanup()
3106 kref_read(&ndlp->kref), in lpfc_cleanup()
3107 ndlp->fc4_xpt_flags, in lpfc_cleanup()
3108 ndlp->nlp_flag); in lpfc_cleanup()
3120 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
3130 del_timer_sync(&vport->els_tmofunc); in lpfc_stop_vport_timers()
3131 del_timer_sync(&vport->delayed_disc_tmo); in lpfc_stop_vport_timers()
3137 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3147 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; in __lpfc_sli4_stop_fcf_redisc_wait_timer()
3150 del_timer(&phba->fcf.redisc_wait); in __lpfc_sli4_stop_fcf_redisc_wait_timer()
3154 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3165 spin_lock_irq(&phba->hbalock); in lpfc_sli4_stop_fcf_redisc_wait_timer()
3166 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { in lpfc_sli4_stop_fcf_redisc_wait_timer()
3168 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_stop_fcf_redisc_wait_timer()
3173 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); in lpfc_sli4_stop_fcf_redisc_wait_timer()
3174 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_stop_fcf_redisc_wait_timer()
3178 * lpfc_cmf_stop - Stop CMF processing
3188 int cpu; in lpfc_cmf_stop() local
3192 if (!phba->sli4_hba.pc_sli4_params.cmf) in lpfc_cmf_stop()
3199 hrtimer_cancel(&phba->cmf_timer); in lpfc_cmf_stop()
3202 atomic_set(&phba->cmf_busy, 0); in lpfc_cmf_stop()
3203 for_each_present_cpu(cpu) { in lpfc_cmf_stop()
3204 cgs = per_cpu_ptr(phba->cmf_stat, cpu); in lpfc_cmf_stop()
3205 atomic64_set(&cgs->total_bytes, 0); in lpfc_cmf_stop()
3206 atomic64_set(&cgs->rcv_bytes, 0); in lpfc_cmf_stop()
3207 atomic_set(&cgs->rx_io_cnt, 0); in lpfc_cmf_stop()
3208 atomic64_set(&cgs->rx_latency, 0); in lpfc_cmf_stop()
3210 atomic_set(&phba->cmf_bw_wait, 0); in lpfc_cmf_stop()
3212 /* Resume any blocked IO - Queue unblock on workqueue */ in lpfc_cmf_stop()
3213 queue_work(phba->wq, &phba->unblock_request_work); in lpfc_cmf_stop()
3231 phba->cmf_interval_rate = LPFC_CMF_INTERVAL; in lpfc_cmf_signal_init()
3232 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba); in lpfc_cmf_signal_init()
3233 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * in lpfc_cmf_signal_init()
3234 phba->cmf_interval_rate, 1000); in lpfc_cmf_signal_init()
3235 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; in lpfc_cmf_signal_init()
3242 * lpfc_cmf_start - Start CMF processing
3252 int cpu; in lpfc_cmf_start() local
3255 if (!phba->sli4_hba.pc_sli4_params.cmf || in lpfc_cmf_start()
3256 phba->cmf_active_mode == LPFC_CFG_OFF) in lpfc_cmf_start()
3262 atomic_set(&phba->cgn_fabric_warn_cnt, 0); in lpfc_cmf_start()
3263 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); in lpfc_cmf_start()
3264 atomic_set(&phba->cgn_sync_alarm_cnt, 0); in lpfc_cmf_start()
3265 atomic_set(&phba->cgn_sync_warn_cnt, 0); in lpfc_cmf_start()
3267 atomic_set(&phba->cmf_busy, 0); in lpfc_cmf_start()
3268 for_each_present_cpu(cpu) { in lpfc_cmf_start()
3269 cgs = per_cpu_ptr(phba->cmf_stat, cpu); in lpfc_cmf_start()
3270 atomic64_set(&cgs->total_bytes, 0); in lpfc_cmf_start()
3271 atomic64_set(&cgs->rcv_bytes, 0); in lpfc_cmf_start()
3272 atomic_set(&cgs->rx_io_cnt, 0); in lpfc_cmf_start()
3273 atomic64_set(&cgs->rx_latency, 0); in lpfc_cmf_start()
3275 phba->cmf_latency.tv_sec = 0; in lpfc_cmf_start()
3276 phba->cmf_latency.tv_nsec = 0; in lpfc_cmf_start()
3283 phba->cmf_timer_cnt = 0; in lpfc_cmf_start()
3284 hrtimer_start(&phba->cmf_timer, in lpfc_cmf_start()
3288 ktime_get_real_ts64(&phba->cmf_latency); in lpfc_cmf_start()
3290 atomic_set(&phba->cmf_bw_wait, 0); in lpfc_cmf_start()
3291 atomic_set(&phba->cmf_stop_io, 0); in lpfc_cmf_start()
3295 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3304 if (phba->pport) in lpfc_stop_hba_timers()
3305 lpfc_stop_vport_timers(phba->pport); in lpfc_stop_hba_timers()
3306 cancel_delayed_work_sync(&phba->eq_delay_work); in lpfc_stop_hba_timers()
3307 cancel_delayed_work_sync(&phba->idle_stat_delay_work); in lpfc_stop_hba_timers()
3308 del_timer_sync(&phba->sli.mbox_tmo); in lpfc_stop_hba_timers()
3309 del_timer_sync(&phba->fabric_block_timer); in lpfc_stop_hba_timers()
3310 del_timer_sync(&phba->eratt_poll); in lpfc_stop_hba_timers()
3311 del_timer_sync(&phba->hb_tmofunc); in lpfc_stop_hba_timers()
3312 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_stop_hba_timers()
3313 del_timer_sync(&phba->rrq_tmr); in lpfc_stop_hba_timers()
3314 phba->hba_flag &= ~HBA_RRQ_ACTIVE; in lpfc_stop_hba_timers()
3316 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); in lpfc_stop_hba_timers()
3318 switch (phba->pci_dev_grp) { in lpfc_stop_hba_timers()
3321 del_timer_sync(&phba->fcp_poll_timer); in lpfc_stop_hba_timers()
3330 phba->pci_dev_grp); in lpfc_stop_hba_timers()
3337 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3354 spin_lock_irqsave(&phba->hbalock, iflag); in lpfc_block_mgmt_io()
3355 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; in lpfc_block_mgmt_io()
3356 spin_unlock_irqrestore(&phba->hbalock, iflag); in lpfc_block_mgmt_io()
3360 spin_lock_irqsave(&phba->hbalock, iflag); in lpfc_block_mgmt_io()
3361 if (phba->sli.mbox_active) { in lpfc_block_mgmt_io()
3362 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; in lpfc_block_mgmt_io()
3367 phba->sli.mbox_active) * 1000) + jiffies; in lpfc_block_mgmt_io()
3369 spin_unlock_irqrestore(&phba->hbalock, iflag); in lpfc_block_mgmt_io()
3372 while (phba->sli.mbox_active) { in lpfc_block_mgmt_io()
3378 "- mbox cmd %x still active\n", in lpfc_block_mgmt_io()
3379 phba->sli.sli_flag, actcmd); in lpfc_block_mgmt_io()
3386 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3400 if (phba->sli_rev != LPFC_SLI_REV4) in lpfc_sli4_node_prep()
3407 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_sli4_node_prep()
3408 if (vports[i]->load_flag & FC_UNLOADING) in lpfc_sli4_node_prep()
3412 &vports[i]->fc_nodes, in lpfc_sli4_node_prep()
3419 ndlp->nlp_rpi = rpi; in lpfc_sli4_node_prep()
3420 lpfc_printf_vlog(ndlp->vport, KERN_INFO, in lpfc_sli4_node_prep()
3424 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, in lpfc_sli4_node_prep()
3425 ndlp->nlp_flag); in lpfc_sli4_node_prep()
3432 * lpfc_create_expedite_pool - create expedite pool
3446 epd_pool = &phba->epd_pool; in lpfc_create_expedite_pool()
3447 qp = &phba->sli4_hba.hdwq[0]; in lpfc_create_expedite_pool()
3449 spin_lock_init(&epd_pool->lock); in lpfc_create_expedite_pool()
3450 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); in lpfc_create_expedite_pool()
3451 spin_lock(&epd_pool->lock); in lpfc_create_expedite_pool()
3452 INIT_LIST_HEAD(&epd_pool->list); in lpfc_create_expedite_pool()
3454 &qp->lpfc_io_buf_list_put, list) { in lpfc_create_expedite_pool()
3455 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); in lpfc_create_expedite_pool()
3456 lpfc_ncmd->expedite = true; in lpfc_create_expedite_pool()
3457 qp->put_io_bufs--; in lpfc_create_expedite_pool()
3458 epd_pool->count++; in lpfc_create_expedite_pool()
3459 if (epd_pool->count >= XRI_BATCH) in lpfc_create_expedite_pool()
3462 spin_unlock(&epd_pool->lock); in lpfc_create_expedite_pool()
3463 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); in lpfc_create_expedite_pool()
3467 * lpfc_destroy_expedite_pool - destroy expedite pool
3481 epd_pool = &phba->epd_pool; in lpfc_destroy_expedite_pool()
3482 qp = &phba->sli4_hba.hdwq[0]; in lpfc_destroy_expedite_pool()
3484 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); in lpfc_destroy_expedite_pool()
3485 spin_lock(&epd_pool->lock); in lpfc_destroy_expedite_pool()
3487 &epd_pool->list, list) { in lpfc_destroy_expedite_pool()
3488 list_move_tail(&lpfc_ncmd->list, in lpfc_destroy_expedite_pool()
3489 &qp->lpfc_io_buf_list_put); in lpfc_destroy_expedite_pool()
3490 lpfc_ncmd->flags = false; in lpfc_destroy_expedite_pool()
3491 qp->put_io_bufs++; in lpfc_destroy_expedite_pool()
3492 epd_pool->count--; in lpfc_destroy_expedite_pool()
3494 spin_unlock(&epd_pool->lock); in lpfc_destroy_expedite_pool()
3495 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); in lpfc_destroy_expedite_pool()
3499 * lpfc_create_multixri_pools - create multi-XRI pools
3521 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, in lpfc_create_multixri_pools()
3522 phba->sli4_hba.io_xri_cnt); in lpfc_create_multixri_pools()
3524 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) in lpfc_create_multixri_pools()
3527 hwq_count = phba->cfg_hdw_queue; in lpfc_create_multixri_pools()
3528 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; in lpfc_create_multixri_pools()
3538 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) in lpfc_create_multixri_pools()
3543 qp = &phba->sli4_hba.hdwq[j]; in lpfc_create_multixri_pools()
3544 kfree(qp->p_multixri_pool); in lpfc_create_multixri_pools()
3547 phba->cfg_xri_rebalancing = 0; in lpfc_create_multixri_pools()
3551 qp = &phba->sli4_hba.hdwq[i]; in lpfc_create_multixri_pools()
3552 qp->p_multixri_pool = multixri_pool; in lpfc_create_multixri_pools()
3554 multixri_pool->xri_limit = count_per_hwq; in lpfc_create_multixri_pools()
3555 multixri_pool->rrb_next_hwqid = i; in lpfc_create_multixri_pools()
3558 pbl_pool = &multixri_pool->pbl_pool; in lpfc_create_multixri_pools()
3559 spin_lock_init(&pbl_pool->lock); in lpfc_create_multixri_pools()
3560 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); in lpfc_create_multixri_pools()
3561 spin_lock(&pbl_pool->lock); in lpfc_create_multixri_pools()
3562 INIT_LIST_HEAD(&pbl_pool->list); in lpfc_create_multixri_pools()
3564 &qp->lpfc_io_buf_list_put, list) { in lpfc_create_multixri_pools()
3565 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); in lpfc_create_multixri_pools()
3566 qp->put_io_bufs--; in lpfc_create_multixri_pools()
3567 pbl_pool->count++; in lpfc_create_multixri_pools()
3571 pbl_pool->count, i); in lpfc_create_multixri_pools()
3572 spin_unlock(&pbl_pool->lock); in lpfc_create_multixri_pools()
3573 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); in lpfc_create_multixri_pools()
3576 pvt_pool = &multixri_pool->pvt_pool; in lpfc_create_multixri_pools()
3577 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; in lpfc_create_multixri_pools()
3578 pvt_pool->low_watermark = XRI_BATCH; in lpfc_create_multixri_pools()
3579 spin_lock_init(&pvt_pool->lock); in lpfc_create_multixri_pools()
3580 spin_lock_irqsave(&pvt_pool->lock, iflag); in lpfc_create_multixri_pools()
3581 INIT_LIST_HEAD(&pvt_pool->list); in lpfc_create_multixri_pools()
3582 pvt_pool->count = 0; in lpfc_create_multixri_pools()
3583 spin_unlock_irqrestore(&pvt_pool->lock, iflag); in lpfc_create_multixri_pools()
3588 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3605 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) in lpfc_destroy_multixri_pools()
3608 if (!(phba->pport->load_flag & FC_UNLOADING)) in lpfc_destroy_multixri_pools()
3611 hwq_count = phba->cfg_hdw_queue; in lpfc_destroy_multixri_pools()
3614 qp = &phba->sli4_hba.hdwq[i]; in lpfc_destroy_multixri_pools()
3615 multixri_pool = qp->p_multixri_pool; in lpfc_destroy_multixri_pools()
3619 qp->p_multixri_pool = NULL; in lpfc_destroy_multixri_pools()
3621 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); in lpfc_destroy_multixri_pools()
3624 pbl_pool = &multixri_pool->pbl_pool; in lpfc_destroy_multixri_pools()
3625 spin_lock(&pbl_pool->lock); in lpfc_destroy_multixri_pools()
3629 pbl_pool->count, i); in lpfc_destroy_multixri_pools()
3632 &pbl_pool->list, list) { in lpfc_destroy_multixri_pools()
3633 list_move_tail(&lpfc_ncmd->list, in lpfc_destroy_multixri_pools()
3634 &qp->lpfc_io_buf_list_put); in lpfc_destroy_multixri_pools()
3635 qp->put_io_bufs++; in lpfc_destroy_multixri_pools()
3636 pbl_pool->count--; in lpfc_destroy_multixri_pools()
3639 INIT_LIST_HEAD(&pbl_pool->list); in lpfc_destroy_multixri_pools()
3640 pbl_pool->count = 0; in lpfc_destroy_multixri_pools()
3642 spin_unlock(&pbl_pool->lock); in lpfc_destroy_multixri_pools()
3645 pvt_pool = &multixri_pool->pvt_pool; in lpfc_destroy_multixri_pools()
3646 spin_lock(&pvt_pool->lock); in lpfc_destroy_multixri_pools()
3650 pvt_pool->count, i); in lpfc_destroy_multixri_pools()
3653 &pvt_pool->list, list) { in lpfc_destroy_multixri_pools()
3654 list_move_tail(&lpfc_ncmd->list, in lpfc_destroy_multixri_pools()
3655 &qp->lpfc_io_buf_list_put); in lpfc_destroy_multixri_pools()
3656 qp->put_io_bufs++; in lpfc_destroy_multixri_pools()
3657 pvt_pool->count--; in lpfc_destroy_multixri_pools()
3660 INIT_LIST_HEAD(&pvt_pool->list); in lpfc_destroy_multixri_pools()
3661 pvt_pool->count = 0; in lpfc_destroy_multixri_pools()
3663 spin_unlock(&pvt_pool->lock); in lpfc_destroy_multixri_pools()
3664 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); in lpfc_destroy_multixri_pools()
3671 * lpfc_online - Initialize and bring a HBA online
3679 * 0 - successful
3680 * 1 - failed
3692 vport = phba->pport; in lpfc_online()
3694 if (!(vport->fc_flag & FC_OFFLINE_MODE)) in lpfc_online()
3702 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_online()
3707 spin_lock_irq(&phba->hbalock); in lpfc_online()
3708 if (!phba->sli4_hba.max_cfg_param.vpi_used) in lpfc_online()
3710 spin_unlock_irq(&phba->hbalock); in lpfc_online()
3715 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && in lpfc_online()
3716 !phba->nvmet_support) { in lpfc_online()
3717 error = lpfc_nvme_create_localport(phba->pport); in lpfc_online()
3733 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_online()
3736 spin_lock_irq(shost->host_lock); in lpfc_online()
3737 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; in lpfc_online()
3738 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) in lpfc_online()
3739 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; in lpfc_online()
3740 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_online()
3741 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; in lpfc_online()
3743 (vports[i]->port_type != in lpfc_online()
3745 vports[i]->vpi = 0; in lpfc_online()
3747 spin_unlock_irq(shost->host_lock); in lpfc_online()
3752 if (phba->cfg_xri_rebalancing) in lpfc_online()
3762 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3777 spin_lock_irqsave(&phba->hbalock, iflag); in lpfc_unblock_mgmt_io()
3778 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; in lpfc_unblock_mgmt_io()
3779 spin_unlock_irqrestore(&phba->hbalock, iflag); in lpfc_unblock_mgmt_io()
3783 * lpfc_offline_prep - Prepare a HBA to be brought offline
3794 struct lpfc_vport *vport = phba->pport; in lpfc_offline_prep()
3802 if (vport->fc_flag & FC_OFFLINE_MODE) in lpfc_offline_prep()
3809 offline = pci_channel_offline(phba->pcidev); in lpfc_offline_prep()
3810 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); in lpfc_offline_prep()
3815 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_offline_prep()
3816 if (vports[i]->load_flag & FC_UNLOADING) in lpfc_offline_prep()
3819 spin_lock_irq(shost->host_lock); in lpfc_offline_prep()
3820 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; in lpfc_offline_prep()
3821 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; in lpfc_offline_prep()
3822 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; in lpfc_offline_prep()
3823 spin_unlock_irq(shost->host_lock); in lpfc_offline_prep()
3827 &vports[i]->fc_nodes, in lpfc_offline_prep()
3830 spin_lock_irq(&ndlp->lock); in lpfc_offline_prep()
3831 ndlp->nlp_flag &= ~NLP_NPR_ADISC; in lpfc_offline_prep()
3832 spin_unlock_irq(&ndlp->lock); in lpfc_offline_prep()
3835 spin_lock_irq(&ndlp->lock); in lpfc_offline_prep()
3836 ndlp->nlp_flag &= ~(NLP_UNREG_INP | in lpfc_offline_prep()
3838 spin_unlock_irq(&ndlp->lock); in lpfc_offline_prep()
3839 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_offline_prep()
3850 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_offline_prep()
3855 ndlp->nlp_rpi, ndlp, in lpfc_offline_prep()
3856 ndlp->nlp_DID); in lpfc_offline_prep()
3857 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); in lpfc_offline_prep()
3858 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; in lpfc_offline_prep()
3861 if (ndlp->nlp_type & NLP_FABRIC) { in lpfc_offline_prep()
3872 if (!(ndlp->save_flags & in lpfc_offline_prep()
3874 !(ndlp->fc4_xpt_flags & in lpfc_offline_prep()
3888 if (phba->wq) in lpfc_offline_prep()
3889 flush_workqueue(phba->wq); in lpfc_offline_prep()
3893 * lpfc_offline - Bring a HBA offline
3907 if (phba->pport->fc_flag & FC_OFFLINE_MODE) in lpfc_offline()
3917 lpfc_nvme_destroy_localport(phba->pport); in lpfc_offline()
3921 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) in lpfc_offline()
3929 spin_lock_irq(&phba->hbalock); in lpfc_offline()
3930 phba->work_ha = 0; in lpfc_offline()
3931 spin_unlock_irq(&phba->hbalock); in lpfc_offline()
3934 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_offline()
3936 spin_lock_irq(shost->host_lock); in lpfc_offline()
3937 vports[i]->work_port_events = 0; in lpfc_offline()
3938 vports[i]->fc_flag |= FC_OFFLINE_MODE; in lpfc_offline()
3939 spin_unlock_irq(shost->host_lock); in lpfc_offline()
3945 if (phba->pport->fc_flag & FC_OFFLINE_MODE) in lpfc_offline()
3948 if (phba->cfg_xri_rebalancing) in lpfc_offline()
3953 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3965 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) in lpfc_scsi_free()
3968 spin_lock_irq(&phba->hbalock); in lpfc_scsi_free()
3972 spin_lock(&phba->scsi_buf_list_put_lock); in lpfc_scsi_free()
3973 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, in lpfc_scsi_free()
3975 list_del(&sb->list); in lpfc_scsi_free()
3976 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, in lpfc_scsi_free()
3977 sb->dma_handle); in lpfc_scsi_free()
3979 phba->total_scsi_bufs--; in lpfc_scsi_free()
3981 spin_unlock(&phba->scsi_buf_list_put_lock); in lpfc_scsi_free()
3983 spin_lock(&phba->scsi_buf_list_get_lock); in lpfc_scsi_free()
3984 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, in lpfc_scsi_free()
3986 list_del(&sb->list); in lpfc_scsi_free()
3987 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, in lpfc_scsi_free()
3988 sb->dma_handle); in lpfc_scsi_free()
3990 phba->total_scsi_bufs--; in lpfc_scsi_free()
3992 spin_unlock(&phba->scsi_buf_list_get_lock); in lpfc_scsi_free()
3993 spin_unlock_irq(&phba->hbalock); in lpfc_scsi_free()
3997 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
4011 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_io_free()
4012 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_io_free()
4014 spin_lock(&qp->io_buf_list_put_lock); in lpfc_io_free()
4016 &qp->lpfc_io_buf_list_put, in lpfc_io_free()
4018 list_del(&lpfc_ncmd->list); in lpfc_io_free()
4019 qp->put_io_bufs--; in lpfc_io_free()
4020 dma_pool_free(phba->lpfc_sg_dma_buf_pool, in lpfc_io_free()
4021 lpfc_ncmd->data, lpfc_ncmd->dma_handle); in lpfc_io_free()
4022 if (phba->cfg_xpsgl && !phba->nvmet_support) in lpfc_io_free()
4026 qp->total_io_bufs--; in lpfc_io_free()
4028 spin_unlock(&qp->io_buf_list_put_lock); in lpfc_io_free()
4030 spin_lock(&qp->io_buf_list_get_lock); in lpfc_io_free()
4032 &qp->lpfc_io_buf_list_get, in lpfc_io_free()
4034 list_del(&lpfc_ncmd->list); in lpfc_io_free()
4035 qp->get_io_bufs--; in lpfc_io_free()
4036 dma_pool_free(phba->lpfc_sg_dma_buf_pool, in lpfc_io_free()
4037 lpfc_ncmd->data, lpfc_ncmd->dma_handle); in lpfc_io_free()
4038 if (phba->cfg_xpsgl && !phba->nvmet_support) in lpfc_io_free()
4042 qp->total_io_bufs--; in lpfc_io_free()
4044 spin_unlock(&qp->io_buf_list_get_lock); in lpfc_io_free()
4049 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
4058 * 0 - successful (for now, it always returns 0)
4069 * update on pci function's els xri-sgl list in lpfc_sli4_els_sgl_update()
4073 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { in lpfc_sli4_els_sgl_update()
4074 /* els xri-sgl expanded */ in lpfc_sli4_els_sgl_update()
4075 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; in lpfc_sli4_els_sgl_update()
4077 "3157 ELS xri-sgl count increased from " in lpfc_sli4_els_sgl_update()
4078 "%d to %d\n", phba->sli4_hba.els_xri_cnt, in lpfc_sli4_els_sgl_update()
4089 rc = -ENOMEM; in lpfc_sli4_els_sgl_update()
4092 sglq_entry->buff_type = GEN_BUFF_TYPE; in lpfc_sli4_els_sgl_update()
4093 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, in lpfc_sli4_els_sgl_update()
4094 &sglq_entry->phys); in lpfc_sli4_els_sgl_update()
4095 if (sglq_entry->virt == NULL) { in lpfc_sli4_els_sgl_update()
4101 rc = -ENOMEM; in lpfc_sli4_els_sgl_update()
4104 sglq_entry->sgl = sglq_entry->virt; in lpfc_sli4_els_sgl_update()
4105 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); in lpfc_sli4_els_sgl_update()
4106 sglq_entry->state = SGL_FREED; in lpfc_sli4_els_sgl_update()
4107 list_add_tail(&sglq_entry->list, &els_sgl_list); in lpfc_sli4_els_sgl_update()
4109 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_els_sgl_update()
4111 &phba->sli4_hba.lpfc_els_sgl_list); in lpfc_sli4_els_sgl_update()
4112 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_els_sgl_update()
4113 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { in lpfc_sli4_els_sgl_update()
4114 /* els xri-sgl shrinked */ in lpfc_sli4_els_sgl_update()
4115 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; in lpfc_sli4_els_sgl_update()
4117 "3158 ELS xri-sgl count decreased from " in lpfc_sli4_els_sgl_update()
4118 "%d to %d\n", phba->sli4_hba.els_xri_cnt, in lpfc_sli4_els_sgl_update()
4120 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_els_sgl_update()
4121 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, in lpfc_sli4_els_sgl_update()
4128 __lpfc_mbuf_free(phba, sglq_entry->virt, in lpfc_sli4_els_sgl_update()
4129 sglq_entry->phys); in lpfc_sli4_els_sgl_update()
4134 &phba->sli4_hba.lpfc_els_sgl_list); in lpfc_sli4_els_sgl_update()
4135 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_els_sgl_update()
4138 "3163 ELS xri-sgl count unchanged: %d\n", in lpfc_sli4_els_sgl_update()
4140 phba->sli4_hba.els_xri_cnt = els_xri_cnt; in lpfc_sli4_els_sgl_update()
4146 &phba->sli4_hba.lpfc_els_sgl_list, list) { in lpfc_sli4_els_sgl_update()
4153 rc = -ENOMEM; in lpfc_sli4_els_sgl_update()
4156 sglq_entry->sli4_lxritag = lxri; in lpfc_sli4_els_sgl_update()
4157 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; in lpfc_sli4_els_sgl_update()
4167 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4176 * 0 - successful (for now, it always returns 0)
4188 * update on pci function's nvmet xri-sgl list in lpfc_sli4_nvmet_sgl_update()
4193 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; in lpfc_sli4_nvmet_sgl_update()
4194 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { in lpfc_sli4_nvmet_sgl_update()
4195 /* els xri-sgl expanded */ in lpfc_sli4_nvmet_sgl_update()
4196 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; in lpfc_sli4_nvmet_sgl_update()
4198 "6302 NVMET xri-sgl cnt grew from %d to %d\n", in lpfc_sli4_nvmet_sgl_update()
4199 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); in lpfc_sli4_nvmet_sgl_update()
4209 rc = -ENOMEM; in lpfc_sli4_nvmet_sgl_update()
4212 sglq_entry->buff_type = NVMET_BUFF_TYPE; in lpfc_sli4_nvmet_sgl_update()
4213 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, in lpfc_sli4_nvmet_sgl_update()
4214 &sglq_entry->phys); in lpfc_sli4_nvmet_sgl_update()
4215 if (sglq_entry->virt == NULL) { in lpfc_sli4_nvmet_sgl_update()
4221 rc = -ENOMEM; in lpfc_sli4_nvmet_sgl_update()
4224 sglq_entry->sgl = sglq_entry->virt; in lpfc_sli4_nvmet_sgl_update()
4225 memset(sglq_entry->sgl, 0, in lpfc_sli4_nvmet_sgl_update()
4226 phba->cfg_sg_dma_buf_size); in lpfc_sli4_nvmet_sgl_update()
4227 sglq_entry->state = SGL_FREED; in lpfc_sli4_nvmet_sgl_update()
4228 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); in lpfc_sli4_nvmet_sgl_update()
4230 spin_lock_irq(&phba->hbalock); in lpfc_sli4_nvmet_sgl_update()
4231 spin_lock(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_nvmet_sgl_update()
4233 &phba->sli4_hba.lpfc_nvmet_sgl_list); in lpfc_sli4_nvmet_sgl_update()
4234 spin_unlock(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_nvmet_sgl_update()
4235 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_nvmet_sgl_update()
4236 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { in lpfc_sli4_nvmet_sgl_update()
4237 /* nvmet xri-sgl shrunk */ in lpfc_sli4_nvmet_sgl_update()
4238 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; in lpfc_sli4_nvmet_sgl_update()
4240 "6305 NVMET xri-sgl count decreased from " in lpfc_sli4_nvmet_sgl_update()
4241 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, in lpfc_sli4_nvmet_sgl_update()
4243 spin_lock_irq(&phba->hbalock); in lpfc_sli4_nvmet_sgl_update()
4244 spin_lock(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_nvmet_sgl_update()
4245 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, in lpfc_sli4_nvmet_sgl_update()
4252 lpfc_nvmet_buf_free(phba, sglq_entry->virt, in lpfc_sli4_nvmet_sgl_update()
4253 sglq_entry->phys); in lpfc_sli4_nvmet_sgl_update()
4258 &phba->sli4_hba.lpfc_nvmet_sgl_list); in lpfc_sli4_nvmet_sgl_update()
4259 spin_unlock(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_nvmet_sgl_update()
4260 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_nvmet_sgl_update()
4263 "6306 NVMET xri-sgl count unchanged: %d\n", in lpfc_sli4_nvmet_sgl_update()
4265 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; in lpfc_sli4_nvmet_sgl_update()
4271 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { in lpfc_sli4_nvmet_sgl_update()
4278 rc = -ENOMEM; in lpfc_sli4_nvmet_sgl_update()
4281 sglq_entry->sli4_lxritag = lxri; in lpfc_sli4_nvmet_sgl_update()
4282 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; in lpfc_sli4_nvmet_sgl_update()
4301 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_io_buf_flush()
4302 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_io_buf_flush()
4303 spin_lock_irq(&qp->io_buf_list_get_lock); in lpfc_io_buf_flush()
4304 spin_lock(&qp->io_buf_list_put_lock); in lpfc_io_buf_flush()
4307 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); in lpfc_io_buf_flush()
4308 list_splice(&qp->lpfc_io_buf_list_put, &blist); in lpfc_io_buf_flush()
4309 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); in lpfc_io_buf_flush()
4310 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); in lpfc_io_buf_flush()
4311 cnt += qp->get_io_bufs + qp->put_io_bufs; in lpfc_io_buf_flush()
4312 qp->get_io_bufs = 0; in lpfc_io_buf_flush()
4313 qp->put_io_bufs = 0; in lpfc_io_buf_flush()
4314 qp->total_io_bufs = 0; in lpfc_io_buf_flush()
4315 spin_unlock(&qp->io_buf_list_put_lock); in lpfc_io_buf_flush()
4316 spin_unlock_irq(&qp->io_buf_list_get_lock); in lpfc_io_buf_flush()
4329 list_add_tail(&lpfc_cmd->list, cbuf); in lpfc_io_buf_flush()
4332 xri = lpfc_cmd->cur_iocbq.sli4_xritag; in lpfc_io_buf_flush()
4336 if (xri < iobufp->cur_iocbq.sli4_xritag) { in lpfc_io_buf_flush()
4338 list_add(&lpfc_cmd->list, in lpfc_io_buf_flush()
4339 &prev_iobufp->list); in lpfc_io_buf_flush()
4341 list_add(&lpfc_cmd->list, cbuf); in lpfc_io_buf_flush()
4348 list_add_tail(&lpfc_cmd->list, cbuf); in lpfc_io_buf_flush()
4360 qp = phba->sli4_hba.hdwq; in lpfc_io_buf_replenish()
4363 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_io_buf_replenish()
4369 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_io_buf_replenish()
4370 lpfc_cmd->hdwq_no = idx; in lpfc_io_buf_replenish()
4371 lpfc_cmd->hdwq = qp; in lpfc_io_buf_replenish()
4372 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL; in lpfc_io_buf_replenish()
4373 spin_lock(&qp->io_buf_list_put_lock); in lpfc_io_buf_replenish()
4374 list_add_tail(&lpfc_cmd->list, in lpfc_io_buf_replenish()
4375 &qp->lpfc_io_buf_list_put); in lpfc_io_buf_replenish()
4376 qp->put_io_bufs++; in lpfc_io_buf_replenish()
4377 qp->total_io_bufs++; in lpfc_io_buf_replenish()
4378 spin_unlock(&qp->io_buf_list_put_lock); in lpfc_io_buf_replenish()
4385 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4394 * 0 - successful (for now, it always returns 0)
4406 * update on pci function's allocated nvme xri-sgl list in lpfc_sli4_io_sgl_update()
4411 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; in lpfc_sli4_io_sgl_update()
4412 phba->sli4_hba.io_xri_max = io_xri_max; in lpfc_sli4_io_sgl_update()
4417 phba->sli4_hba.io_xri_cnt, in lpfc_sli4_io_sgl_update()
4418 phba->sli4_hba.io_xri_max, in lpfc_sli4_io_sgl_update()
4423 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { in lpfc_sli4_io_sgl_update()
4425 io_xri_cnt = phba->sli4_hba.io_xri_cnt - in lpfc_sli4_io_sgl_update()
4426 phba->sli4_hba.io_xri_max; in lpfc_sli4_io_sgl_update()
4432 dma_pool_free(phba->lpfc_sg_dma_buf_pool, in lpfc_sli4_io_sgl_update()
4433 lpfc_ncmd->data, in lpfc_sli4_io_sgl_update()
4434 lpfc_ncmd->dma_handle); in lpfc_sli4_io_sgl_update()
4438 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; in lpfc_sli4_io_sgl_update()
4444 phba->sli4_hba.io_xri_cnt = cnt; in lpfc_sli4_io_sgl_update()
4453 rc = -ENOMEM; in lpfc_sli4_io_sgl_update()
4456 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; in lpfc_sli4_io_sgl_update()
4457 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; in lpfc_sli4_io_sgl_update()
4468 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4472 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4478 * int - number of IO buffers that were allocated and posted.
4492 phba->sli4_hba.io_xri_cnt = 0; in lpfc_new_io_buf()
4502 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, in lpfc_new_io_buf()
4504 &lpfc_ncmd->dma_handle); in lpfc_new_io_buf()
4505 if (!lpfc_ncmd->data) { in lpfc_new_io_buf()
4510 if (phba->cfg_xpsgl && !phba->nvmet_support) { in lpfc_new_io_buf()
4511 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); in lpfc_new_io_buf()
4517 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && in lpfc_new_io_buf()
4518 (((unsigned long)(lpfc_ncmd->data) & in lpfc_new_io_buf()
4519 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { in lpfc_new_io_buf()
4524 (unsigned long)lpfc_ncmd->data); in lpfc_new_io_buf()
4525 dma_pool_free(phba->lpfc_sg_dma_buf_pool, in lpfc_new_io_buf()
4526 lpfc_ncmd->data, in lpfc_new_io_buf()
4527 lpfc_ncmd->dma_handle); in lpfc_new_io_buf()
4533 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); in lpfc_new_io_buf()
4537 dma_pool_free(phba->lpfc_sg_dma_buf_pool, in lpfc_new_io_buf()
4538 lpfc_ncmd->data, lpfc_ncmd->dma_handle); in lpfc_new_io_buf()
4542 pwqeq = &lpfc_ncmd->cur_iocbq; in lpfc_new_io_buf()
4544 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ in lpfc_new_io_buf()
4547 dma_pool_free(phba->lpfc_sg_dma_buf_pool, in lpfc_new_io_buf()
4548 lpfc_ncmd->data, lpfc_ncmd->dma_handle); in lpfc_new_io_buf()
4556 pwqeq->sli4_lxritag = lxri; in lpfc_new_io_buf()
4557 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; in lpfc_new_io_buf()
4559 /* Initialize local short-hand pointers. */ in lpfc_new_io_buf()
4560 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; in lpfc_new_io_buf()
4561 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; in lpfc_new_io_buf()
4562 lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd; in lpfc_new_io_buf()
4563 spin_lock_init(&lpfc_ncmd->buf_lock); in lpfc_new_io_buf()
4566 list_add_tail(&lpfc_ncmd->list, &post_nblist); in lpfc_new_io_buf()
4567 phba->sli4_hba.io_xri_cnt++; in lpfc_new_io_buf()
4593 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, in lpfc_get_wwpn()
4596 return (uint64_t)-1; in lpfc_get_wwpn()
4605 bf_get(lpfc_mqe_command, &mboxq->u.mqe), in lpfc_get_wwpn()
4606 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); in lpfc_get_wwpn()
4607 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_get_wwpn()
4608 return (uint64_t) -1; in lpfc_get_wwpn()
4610 mb = &mboxq->u.mb; in lpfc_get_wwpn()
4611 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); in lpfc_get_wwpn()
4613 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_get_wwpn()
4614 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_get_wwpn()
4622 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_get_sg_tablesize()
4623 if (phba->cfg_xpsgl && !phba->nvmet_support) in lpfc_get_sg_tablesize()
4626 return phba->cfg_scsi_seg_cnt; in lpfc_get_sg_tablesize()
4628 return phba->cfg_sg_seg_cnt; in lpfc_get_sg_tablesize()
4632 * lpfc_vmid_res_alloc - Allocates resources for VMID
4640 * Non-0 on Failure
4646 if (phba->sli_rev == LPFC_SLI_REV3) { in lpfc_vmid_res_alloc()
4647 phba->cfg_vmid_app_header = 0; in lpfc_vmid_res_alloc()
4648 phba->cfg_vmid_priority_tagging = 0; in lpfc_vmid_res_alloc()
4652 vport->vmid = in lpfc_vmid_res_alloc()
4653 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid), in lpfc_vmid_res_alloc()
4655 if (!vport->vmid) in lpfc_vmid_res_alloc()
4656 return -ENOMEM; in lpfc_vmid_res_alloc()
4658 rwlock_init(&vport->vmid_lock); in lpfc_vmid_res_alloc()
4661 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging; in lpfc_vmid_res_alloc()
4662 vport->vmid_inactivity_timeout = in lpfc_vmid_res_alloc()
4663 phba->cfg_vmid_inactivity_timeout; in lpfc_vmid_res_alloc()
4664 vport->max_vmid = phba->cfg_max_vmid; in lpfc_vmid_res_alloc()
4665 vport->cur_vmid_cnt = 0; in lpfc_vmid_res_alloc()
4667 vport->vmid_priority_range = bitmap_zalloc in lpfc_vmid_res_alloc()
4670 if (!vport->vmid_priority_range) { in lpfc_vmid_res_alloc()
4671 kfree(vport->vmid); in lpfc_vmid_res_alloc()
4672 return -ENOMEM; in lpfc_vmid_res_alloc()
4675 hash_init(vport->hash_table); in lpfc_vmid_res_alloc()
4681 * lpfc_create_port - Create an FC port
4693 * @vport - pointer to the virtual N_Port data structure.
4694 * NULL - port create failed.
4709 if (phba->sli_rev < LPFC_SLI_REV4 && in lpfc_create_port()
4710 dev == &phba->pcidev->dev) { in lpfc_create_port()
4732 if (dev == &phba->pcidev->dev) { in lpfc_create_port()
4733 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { in lpfc_create_port()
4739 template->eh_host_reset_handler = NULL; in lpfc_create_port()
4742 template->sg_tablesize = lpfc_get_sg_tablesize(phba); in lpfc_create_port()
4752 template->sg_tablesize = lpfc_get_sg_tablesize(phba); in lpfc_create_port()
4759 vport = (struct lpfc_vport *) shost->hostdata; in lpfc_create_port()
4760 vport->phba = phba; in lpfc_create_port()
4761 vport->load_flag |= FC_LOADING; in lpfc_create_port()
4762 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; in lpfc_create_port()
4763 vport->fc_rscn_flush = 0; in lpfc_create_port()
4767 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; in lpfc_create_port()
4769 shost->unique_id = instance; in lpfc_create_port()
4770 shost->max_id = LPFC_MAX_TARGET; in lpfc_create_port()
4771 shost->max_lun = vport->cfg_max_luns; in lpfc_create_port()
4772 shost->this_id = -1; in lpfc_create_port()
4773 shost->max_cmd_len = 16; in lpfc_create_port()
4775 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_create_port()
4776 if (!phba->cfg_fcp_mq_threshold || in lpfc_create_port()
4777 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) in lpfc_create_port()
4778 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; in lpfc_create_port()
4780 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), in lpfc_create_port()
4781 phba->cfg_fcp_mq_threshold); in lpfc_create_port()
4783 shost->dma_boundary = in lpfc_create_port()
4784 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; in lpfc_create_port()
4786 /* SLI-3 has a limited number of hardware queues (3), in lpfc_create_port()
4789 shost->nr_hw_queues = 1; in lpfc_create_port()
4796 shost->can_queue = phba->cfg_hba_queue_depth - 10; in lpfc_create_port()
4797 if (dev != &phba->pcidev->dev) { in lpfc_create_port()
4798 shost->transportt = lpfc_vport_transport_template; in lpfc_create_port()
4799 vport->port_type = LPFC_NPIV_PORT; in lpfc_create_port()
4801 shost->transportt = lpfc_transport_template; in lpfc_create_port()
4802 vport->port_type = LPFC_PHYSICAL_PORT; in lpfc_create_port()
4808 vport->port_type, shost->sg_tablesize, in lpfc_create_port()
4809 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); in lpfc_create_port()
4818 INIT_LIST_HEAD(&vport->fc_nodes); in lpfc_create_port()
4819 INIT_LIST_HEAD(&vport->rcv_buffer_list); in lpfc_create_port()
4820 spin_lock_init(&vport->work_port_lock); in lpfc_create_port()
4822 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); in lpfc_create_port()
4824 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); in lpfc_create_port()
4826 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); in lpfc_create_port()
4828 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) in lpfc_create_port()
4831 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); in lpfc_create_port()
4835 spin_lock_irq(&phba->port_list_lock); in lpfc_create_port()
4836 list_add_tail(&vport->listentry, &phba->port_list); in lpfc_create_port()
4837 spin_unlock_irq(&phba->port_list_lock); in lpfc_create_port()
4841 kfree(vport->vmid); in lpfc_create_port()
4842 bitmap_free(vport->vmid_priority_range); in lpfc_create_port()
4850 * destroy_port - destroy an FC port
4860 struct lpfc_hba *phba = vport->phba; in destroy_port()
4866 spin_lock_irq(&phba->port_list_lock); in destroy_port()
4867 list_del_init(&vport->listentry); in destroy_port()
4868 spin_unlock_irq(&phba->port_list_lock); in destroy_port()
4875 * lpfc_get_instance - Get a unique integer ID
4881 * instance - a unique integer ID allocated as the new instance.
4882 * -1 - lpfc get instance failed.
4890 return ret < 0 ? -1 : ret; in lpfc_get_instance()
4894 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4905 * 0 - SCSI host scan is not over yet.
4906 * 1 - SCSI host scan is over.
4910 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; in lpfc_scan_finished()
4911 struct lpfc_hba *phba = vport->phba; in lpfc_scan_finished()
4914 spin_lock_irq(shost->host_lock); in lpfc_scan_finished()
4916 if (vport->load_flag & FC_UNLOADING) { in lpfc_scan_finished()
4928 phba->link_state <= LPFC_LINK_DOWN) { in lpfc_scan_finished()
4936 if (vport->port_state != LPFC_VPORT_READY) in lpfc_scan_finished()
4938 if (vport->num_disc_nodes || vport->fc_prli_sent) in lpfc_scan_finished()
4940 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) in lpfc_scan_finished()
4942 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) in lpfc_scan_finished()
4948 spin_unlock_irq(shost->host_lock); in lpfc_scan_finished()
4954 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; in lpfc_host_supported_speeds_set()
4955 struct lpfc_hba *phba = vport->phba; in lpfc_host_supported_speeds_set()
4962 if (phba->hba_flag & HBA_FCOE_MODE) in lpfc_host_supported_speeds_set()
4965 if (phba->lmt & LMT_256Gb) in lpfc_host_supported_speeds_set()
4967 if (phba->lmt & LMT_128Gb) in lpfc_host_supported_speeds_set()
4969 if (phba->lmt & LMT_64Gb) in lpfc_host_supported_speeds_set()
4971 if (phba->lmt & LMT_32Gb) in lpfc_host_supported_speeds_set()
4973 if (phba->lmt & LMT_16Gb) in lpfc_host_supported_speeds_set()
4975 if (phba->lmt & LMT_10Gb) in lpfc_host_supported_speeds_set()
4977 if (phba->lmt & LMT_8Gb) in lpfc_host_supported_speeds_set()
4979 if (phba->lmt & LMT_4Gb) in lpfc_host_supported_speeds_set()
4981 if (phba->lmt & LMT_2Gb) in lpfc_host_supported_speeds_set()
4983 if (phba->lmt & LMT_1Gb) in lpfc_host_supported_speeds_set()
4988 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4996 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; in lpfc_host_attrib_init()
4997 struct lpfc_hba *phba = vport->phba; in lpfc_host_attrib_init()
5002 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); in lpfc_host_attrib_init()
5003 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); in lpfc_host_attrib_init()
5017 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | in lpfc_host_attrib_init()
5018 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; in lpfc_host_attrib_init()
5020 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; in lpfc_host_attrib_init()
5028 fc_host_max_npiv_vports(shost) = phba->max_vpi; in lpfc_host_attrib_init()
5029 spin_lock_irq(shost->host_lock); in lpfc_host_attrib_init()
5030 vport->load_flag &= ~FC_LOADING; in lpfc_host_attrib_init()
5031 spin_unlock_irq(shost->host_lock); in lpfc_host_attrib_init()
5035 * lpfc_stop_port_s3 - Stop SLI3 device port
5046 writel(0, phba->HCregaddr); in lpfc_stop_port_s3()
5047 readl(phba->HCregaddr); /* flush */ in lpfc_stop_port_s3()
5049 writel(0xffffffff, phba->HAregaddr); in lpfc_stop_port_s3()
5050 readl(phba->HAregaddr); /* flush */ in lpfc_stop_port_s3()
5054 phba->pport->work_port_events = 0; in lpfc_stop_port_s3()
5058 * lpfc_stop_port_s4 - Stop SLI4 device port
5070 if (phba->pport) in lpfc_stop_port_s4()
5071 phba->pport->work_port_events = 0; in lpfc_stop_port_s4()
5072 phba->sli4_hba.intr_enable = 0; in lpfc_stop_port_s4()
5076 * lpfc_stop_port - Wrapper function for stopping hba port
5085 phba->lpfc_stop_port(phba); in lpfc_stop_port()
5087 if (phba->wq) in lpfc_stop_port()
5088 flush_workqueue(phba->wq); in lpfc_stop_port()
5092 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
5103 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); in lpfc_fcf_redisc_wait_start_timer()
5104 spin_lock_irq(&phba->hbalock); in lpfc_fcf_redisc_wait_start_timer()
5106 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); in lpfc_fcf_redisc_wait_start_timer()
5108 phba->fcf.fcf_flag |= FCF_REDISC_PEND; in lpfc_fcf_redisc_wait_start_timer()
5109 spin_unlock_irq(&phba->hbalock); in lpfc_fcf_redisc_wait_start_timer()
5113 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
5128 spin_lock_irq(&phba->hbalock); in lpfc_sli4_fcf_redisc_wait_tmo()
5129 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { in lpfc_sli4_fcf_redisc_wait_tmo()
5130 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_fcf_redisc_wait_tmo()
5134 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; in lpfc_sli4_fcf_redisc_wait_tmo()
5136 phba->fcf.fcf_flag |= FCF_REDISC_EVT; in lpfc_sli4_fcf_redisc_wait_tmo()
5137 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_fcf_redisc_wait_tmo()
5145 * lpfc_vmid_poll - VMID timeout detection
5160 if (phba->pport->vmid_priority_tagging) { in lpfc_vmid_poll()
5162 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; in lpfc_vmid_poll()
5166 if (phba->pport->vmid_inactivity_timeout || in lpfc_vmid_poll()
5167 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) { in lpfc_vmid_poll()
5169 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID; in lpfc_vmid_poll()
5176 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * in lpfc_vmid_poll()
5181 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5185 * This routine is to parse the SLI4 link-attention link fault code.
5206 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5227 /* Ignore physical link up events - wait for logical link up */ in lpfc_sli4_parse_latt_type()
5244 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5259 if (phba->sli_rev <= LPFC_SLI_REV3) { in lpfc_sli_port_speed_get()
5260 switch (phba->fc_linkspeed) { in lpfc_sli_port_speed_get()
5283 if (phba->sli4_hba.link_state.logical_speed) in lpfc_sli_port_speed_get()
5285 phba->sli4_hba.link_state.logical_speed; in lpfc_sli_port_speed_get()
5287 link_speed = phba->sli4_hba.link_state.speed; in lpfc_sli_port_speed_get()
5293 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5389 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5408 phba->fcoe_eventtag = acqe_link->event_tag; in lpfc_sli4_async_link_evt()
5409 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_sli4_async_link_evt()
5427 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; in lpfc_sli4_async_link_evt()
5430 phba->sli.slistat.link_event++; in lpfc_sli4_async_link_evt()
5433 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); in lpfc_sli4_async_link_evt()
5434 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; in lpfc_sli4_async_link_evt()
5435 pmb->vport = phba->pport; in lpfc_sli4_async_link_evt()
5438 phba->sli4_hba.link_state.speed = in lpfc_sli4_async_link_evt()
5441 phba->sli4_hba.link_state.duplex = in lpfc_sli4_async_link_evt()
5443 phba->sli4_hba.link_state.status = in lpfc_sli4_async_link_evt()
5445 phba->sli4_hba.link_state.type = in lpfc_sli4_async_link_evt()
5447 phba->sli4_hba.link_state.number = in lpfc_sli4_async_link_evt()
5449 phba->sli4_hba.link_state.fault = in lpfc_sli4_async_link_evt()
5451 phba->sli4_hba.link_state.logical_speed = in lpfc_sli4_async_link_evt()
5455 "2900 Async FC/FCoE Link event - Speed:%dGBit " in lpfc_sli4_async_link_evt()
5458 phba->sli4_hba.link_state.speed, in lpfc_sli4_async_link_evt()
5459 phba->sli4_hba.link_state.topology, in lpfc_sli4_async_link_evt()
5460 phba->sli4_hba.link_state.status, in lpfc_sli4_async_link_evt()
5461 phba->sli4_hba.link_state.type, in lpfc_sli4_async_link_evt()
5462 phba->sli4_hba.link_state.number, in lpfc_sli4_async_link_evt()
5463 phba->sli4_hba.link_state.logical_speed, in lpfc_sli4_async_link_evt()
5464 phba->sli4_hba.link_state.fault); in lpfc_sli4_async_link_evt()
5467 * topology info. Note: Optional for non FC-AL ports. in lpfc_sli4_async_link_evt()
5469 if (!(phba->hba_flag & HBA_FCOE_MODE)) { in lpfc_sli4_async_link_evt()
5481 mb = &pmb->u.mb; in lpfc_sli4_async_link_evt()
5482 mb->mbxStatus = MBX_SUCCESS; in lpfc_sli4_async_link_evt()
5488 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; in lpfc_sli4_async_link_evt()
5489 la->eventTag = acqe_link->event_tag; in lpfc_sli4_async_link_evt()
5512 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5566 if (!phba->rx_monitor) { in lpfc_cgn_dump_rxmonitor()
5570 lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0, in lpfc_cgn_dump_rxmonitor()
5576 * lpfc_cgn_update_stat - Save data into congestion stats buffer
5592 if (!phba->cgn_i) in lpfc_cgn_update_stat()
5594 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; in lpfc_cgn_update_stat()
5601 cnt = le32_to_cpu(cp->link_integ_notification); in lpfc_cgn_update_stat()
5603 cp->link_integ_notification = cpu_to_le32(cnt); in lpfc_cgn_update_stat()
5605 cp->cgn_stat_lnk_month = broken.tm_mon + 1; in lpfc_cgn_update_stat()
5606 cp->cgn_stat_lnk_day = broken.tm_mday; in lpfc_cgn_update_stat()
5607 cp->cgn_stat_lnk_year = broken.tm_year - 100; in lpfc_cgn_update_stat()
5608 cp->cgn_stat_lnk_hour = broken.tm_hour; in lpfc_cgn_update_stat()
5609 cp->cgn_stat_lnk_min = broken.tm_min; in lpfc_cgn_update_stat()
5610 cp->cgn_stat_lnk_sec = broken.tm_sec; in lpfc_cgn_update_stat()
5613 cnt = le32_to_cpu(cp->delivery_notification); in lpfc_cgn_update_stat()
5615 cp->delivery_notification = cpu_to_le32(cnt); in lpfc_cgn_update_stat()
5617 cp->cgn_stat_del_month = broken.tm_mon + 1; in lpfc_cgn_update_stat()
5618 cp->cgn_stat_del_day = broken.tm_mday; in lpfc_cgn_update_stat()
5619 cp->cgn_stat_del_year = broken.tm_year - 100; in lpfc_cgn_update_stat()
5620 cp->cgn_stat_del_hour = broken.tm_hour; in lpfc_cgn_update_stat()
5621 cp->cgn_stat_del_min = broken.tm_min; in lpfc_cgn_update_stat()
5622 cp->cgn_stat_del_sec = broken.tm_sec; in lpfc_cgn_update_stat()
5625 cnt = le32_to_cpu(cp->cgn_peer_notification); in lpfc_cgn_update_stat()
5627 cp->cgn_peer_notification = cpu_to_le32(cnt); in lpfc_cgn_update_stat()
5629 cp->cgn_stat_peer_month = broken.tm_mon + 1; in lpfc_cgn_update_stat()
5630 cp->cgn_stat_peer_day = broken.tm_mday; in lpfc_cgn_update_stat()
5631 cp->cgn_stat_peer_year = broken.tm_year - 100; in lpfc_cgn_update_stat()
5632 cp->cgn_stat_peer_hour = broken.tm_hour; in lpfc_cgn_update_stat()
5633 cp->cgn_stat_peer_min = broken.tm_min; in lpfc_cgn_update_stat()
5634 cp->cgn_stat_peer_sec = broken.tm_sec; in lpfc_cgn_update_stat()
5637 cnt = le32_to_cpu(cp->cgn_notification); in lpfc_cgn_update_stat()
5639 cp->cgn_notification = cpu_to_le32(cnt); in lpfc_cgn_update_stat()
5641 cp->cgn_stat_cgn_month = broken.tm_mon + 1; in lpfc_cgn_update_stat()
5642 cp->cgn_stat_cgn_day = broken.tm_mday; in lpfc_cgn_update_stat()
5643 cp->cgn_stat_cgn_year = broken.tm_year - 100; in lpfc_cgn_update_stat()
5644 cp->cgn_stat_cgn_hour = broken.tm_hour; in lpfc_cgn_update_stat()
5645 cp->cgn_stat_cgn_min = broken.tm_min; in lpfc_cgn_update_stat()
5646 cp->cgn_stat_cgn_sec = broken.tm_sec; in lpfc_cgn_update_stat()
5648 if (phba->cgn_fpin_frequency && in lpfc_cgn_update_stat()
5649 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { in lpfc_cgn_update_stat()
5650 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; in lpfc_cgn_update_stat()
5651 cp->cgn_stat_npm = value; in lpfc_cgn_update_stat()
5655 cp->cgn_info_crc = cpu_to_le32(value); in lpfc_cgn_update_stat()
5659 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
5685 if (!phba->cgn_i) in lpfc_cgn_save_evt_cnt()
5687 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; in lpfc_cgn_save_evt_cnt()
5689 if (time_before(jiffies, phba->cgn_evt_timestamp)) in lpfc_cgn_save_evt_cnt()
5691 phba->cgn_evt_timestamp = jiffies + in lpfc_cgn_save_evt_cnt()
5693 phba->cgn_evt_minute++; in lpfc_cgn_save_evt_cnt()
5700 if (phba->cgn_fpin_frequency && in lpfc_cgn_save_evt_cnt()
5701 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { in lpfc_cgn_save_evt_cnt()
5702 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; in lpfc_cgn_save_evt_cnt()
5703 cp->cgn_stat_npm = value; in lpfc_cgn_save_evt_cnt()
5707 lvalue = atomic_read(&phba->cgn_latency_evt_cnt); in lpfc_cgn_save_evt_cnt()
5708 latsum = atomic64_read(&phba->cgn_latency_evt); in lpfc_cgn_save_evt_cnt()
5709 atomic_set(&phba->cgn_latency_evt_cnt, 0); in lpfc_cgn_save_evt_cnt()
5710 atomic64_set(&phba->cgn_latency_evt, 0); in lpfc_cgn_save_evt_cnt()
5716 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512; in lpfc_cgn_save_evt_cnt()
5717 phba->rx_block_cnt = 0; in lpfc_cgn_save_evt_cnt()
5722 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; in lpfc_cgn_save_evt_cnt()
5723 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; in lpfc_cgn_save_evt_cnt()
5724 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; in lpfc_cgn_save_evt_cnt()
5725 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; in lpfc_cgn_save_evt_cnt()
5728 value = (uint16_t)(phba->pport->cfg_lun_queue_depth); in lpfc_cgn_save_evt_cnt()
5729 cp->cgn_lunq = cpu_to_le16(value); in lpfc_cgn_save_evt_cnt()
5731 /* Record congestion buffer info - every minute in lpfc_cgn_save_evt_cnt()
5737 index = ++cp->cgn_index_minute; in lpfc_cgn_save_evt_cnt()
5738 if (cp->cgn_index_minute == LPFC_MIN_HOUR) { in lpfc_cgn_save_evt_cnt()
5739 cp->cgn_index_minute = 0; in lpfc_cgn_save_evt_cnt()
5744 dvalue = atomic_read(&phba->cgn_driver_evt_cnt); in lpfc_cgn_save_evt_cnt()
5745 atomic_set(&phba->cgn_driver_evt_cnt, 0); in lpfc_cgn_save_evt_cnt()
5747 /* Get the number of warning events - FPIN and Signal for this minute */ in lpfc_cgn_save_evt_cnt()
5749 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) || in lpfc_cgn_save_evt_cnt()
5750 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || in lpfc_cgn_save_evt_cnt()
5751 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) in lpfc_cgn_save_evt_cnt()
5752 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt); in lpfc_cgn_save_evt_cnt()
5753 atomic_set(&phba->cgn_fabric_warn_cnt, 0); in lpfc_cgn_save_evt_cnt()
5755 /* Get the number of alarm events - FPIN and Signal for this minute */ in lpfc_cgn_save_evt_cnt()
5757 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) || in lpfc_cgn_save_evt_cnt()
5758 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) in lpfc_cgn_save_evt_cnt()
5759 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt); in lpfc_cgn_save_evt_cnt()
5760 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); in lpfc_cgn_save_evt_cnt()
5765 ptr = &cp->cgn_drvr_min[index]; in lpfc_cgn_save_evt_cnt()
5769 ptr = &cp->cgn_warn_min[index]; in lpfc_cgn_save_evt_cnt()
5773 ptr = &cp->cgn_alarm_min[index]; in lpfc_cgn_save_evt_cnt()
5777 lptr = &cp->cgn_latency_min[index]; in lpfc_cgn_save_evt_cnt()
5786 mptr = &cp->cgn_bw_min[index]; in lpfc_cgn_save_evt_cnt()
5790 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n", in lpfc_cgn_save_evt_cnt()
5794 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) { in lpfc_cgn_save_evt_cnt()
5795 /* Record congestion buffer info - every hour in lpfc_cgn_save_evt_cnt()
5798 index = ++cp->cgn_index_hour; in lpfc_cgn_save_evt_cnt()
5799 if (cp->cgn_index_hour == LPFC_HOUR_DAY) { in lpfc_cgn_save_evt_cnt()
5800 cp->cgn_index_hour = 0; in lpfc_cgn_save_evt_cnt()
5811 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]); in lpfc_cgn_save_evt_cnt()
5812 wvalue += le16_to_cpu(cp->cgn_warn_min[i]); in lpfc_cgn_save_evt_cnt()
5813 lvalue += le32_to_cpu(cp->cgn_latency_min[i]); in lpfc_cgn_save_evt_cnt()
5814 mbps += le16_to_cpu(cp->cgn_bw_min[i]); in lpfc_cgn_save_evt_cnt()
5815 avalue += le16_to_cpu(cp->cgn_alarm_min[i]); in lpfc_cgn_save_evt_cnt()
5822 lptr = &cp->cgn_drvr_hr[index]; in lpfc_cgn_save_evt_cnt()
5824 lptr = &cp->cgn_warn_hr[index]; in lpfc_cgn_save_evt_cnt()
5826 lptr = &cp->cgn_latency_hr[index]; in lpfc_cgn_save_evt_cnt()
5828 mptr = &cp->cgn_bw_hr[index]; in lpfc_cgn_save_evt_cnt()
5830 lptr = &cp->cgn_alarm_hr[index]; in lpfc_cgn_save_evt_cnt()
5834 "2419 Congestion Info - hour " in lpfc_cgn_save_evt_cnt()
5840 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) { in lpfc_cgn_save_evt_cnt()
5841 /* Record congestion buffer info - every hour in lpfc_cgn_save_evt_cnt()
5845 index = ++cp->cgn_index_day; in lpfc_cgn_save_evt_cnt()
5846 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) { in lpfc_cgn_save_evt_cnt()
5847 cp->cgn_index_day = 0; in lpfc_cgn_save_evt_cnt()
5857 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) { in lpfc_cgn_save_evt_cnt()
5858 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken); in lpfc_cgn_save_evt_cnt()
5860 cp->cgn_info_month = broken.tm_mon + 1; in lpfc_cgn_save_evt_cnt()
5861 cp->cgn_info_day = broken.tm_mday; in lpfc_cgn_save_evt_cnt()
5862 cp->cgn_info_year = broken.tm_year - 100; in lpfc_cgn_save_evt_cnt()
5863 cp->cgn_info_hour = broken.tm_hour; in lpfc_cgn_save_evt_cnt()
5864 cp->cgn_info_minute = broken.tm_min; in lpfc_cgn_save_evt_cnt()
5865 cp->cgn_info_second = broken.tm_sec; in lpfc_cgn_save_evt_cnt()
5871 cp->cgn_info_day, cp->cgn_info_month, in lpfc_cgn_save_evt_cnt()
5872 cp->cgn_info_year, cp->cgn_info_hour, in lpfc_cgn_save_evt_cnt()
5873 cp->cgn_info_minute, cp->cgn_info_second); in lpfc_cgn_save_evt_cnt()
5883 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); in lpfc_cgn_save_evt_cnt()
5884 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); in lpfc_cgn_save_evt_cnt()
5885 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); in lpfc_cgn_save_evt_cnt()
5886 mbps += le16_to_cpu(cp->cgn_bw_hr[i]); in lpfc_cgn_save_evt_cnt()
5887 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); in lpfc_cgn_save_evt_cnt()
5894 lptr = &cp->cgn_drvr_day[index]; in lpfc_cgn_save_evt_cnt()
5896 lptr = &cp->cgn_warn_day[index]; in lpfc_cgn_save_evt_cnt()
5898 lptr = &cp->cgn_latency_day[index]; in lpfc_cgn_save_evt_cnt()
5900 mptr = &cp->cgn_bw_day[index]; in lpfc_cgn_save_evt_cnt()
5902 lptr = &cp->cgn_alarm_day[index]; in lpfc_cgn_save_evt_cnt()
5906 "2420 Congestion Info - daily (%d): " in lpfc_cgn_save_evt_cnt()
5914 if (index == (LPFC_MAX_CGN_DAYS - 1)) { in lpfc_cgn_save_evt_cnt()
5915 phba->hba_flag |= HBA_CGN_DAY_WRAP; in lpfc_cgn_save_evt_cnt()
5916 ktime_get_real_ts64(&phba->cgn_daily_ts); in lpfc_cgn_save_evt_cnt()
5921 value = phba->cgn_fpin_frequency; in lpfc_cgn_save_evt_cnt()
5922 cp->cgn_warn_freq = cpu_to_le16(value); in lpfc_cgn_save_evt_cnt()
5923 cp->cgn_alarm_freq = cpu_to_le16(value); in lpfc_cgn_save_evt_cnt()
5927 cp->cgn_info_crc = cpu_to_le32(lvalue); in lpfc_cgn_save_evt_cnt()
5931 * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5949 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) { in lpfc_calc_cmf_latency()
5950 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / in lpfc_calc_cmf_latency()
5953 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) { in lpfc_calc_cmf_latency()
5954 msec = (cmpl_time.tv_sec - in lpfc_calc_cmf_latency()
5955 phba->cmf_latency.tv_sec) * MSEC_PER_SEC; in lpfc_calc_cmf_latency()
5956 msec += ((cmpl_time.tv_nsec - in lpfc_calc_cmf_latency()
5957 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC); in lpfc_calc_cmf_latency()
5959 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec - in lpfc_calc_cmf_latency()
5961 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) + in lpfc_calc_cmf_latency()
5969 * lpfc_cmf_timer - This is the timer function for one congestion
5985 int cpu; in lpfc_cmf_timer() local
5988 if (phba->cmf_active_mode == LPFC_CFG_OFF || in lpfc_cmf_timer()
5989 !phba->cmf_latency.tv_sec) { in lpfc_cmf_timer()
5992 phba->cmf_active_mode, in lpfc_cmf_timer()
5993 (uint64_t)phba->cmf_latency.tv_sec); in lpfc_cmf_timer()
6000 if (!phba->pport) in lpfc_cmf_timer()
6006 atomic_set(&phba->cmf_stop_io, 1); in lpfc_cmf_timer()
6020 ktime_get_real_ts64(&phba->cmf_latency); in lpfc_cmf_timer()
6022 phba->cmf_link_byte_count = in lpfc_cmf_timer()
6023 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000); in lpfc_cmf_timer()
6030 for_each_present_cpu(cpu) { in lpfc_cmf_timer()
6031 cgs = per_cpu_ptr(phba->cmf_stat, cpu); in lpfc_cmf_timer()
6032 total += atomic64_xchg(&cgs->total_bytes, 0); in lpfc_cmf_timer()
6033 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0); in lpfc_cmf_timer()
6034 lat += atomic64_xchg(&cgs->rx_latency, 0); in lpfc_cmf_timer()
6035 rcv += atomic64_xchg(&cgs->rcv_bytes, 0); in lpfc_cmf_timer()
6043 if (phba->cmf_active_mode == LPFC_CFG_MANAGED && in lpfc_cmf_timer()
6044 phba->link_state != LPFC_LINK_DOWN && in lpfc_cmf_timer()
6045 phba->hba_flag & HBA_SETUP) { in lpfc_cmf_timer()
6046 mbpi = phba->cmf_last_sync_bw; in lpfc_cmf_timer()
6047 phba->cmf_last_sync_bw = 0; in lpfc_cmf_timer()
6062 if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi) in lpfc_cmf_timer()
6065 extra = cnt - total; in lpfc_cmf_timer()
6072 mbpi = phba->cmf_link_byte_count; in lpfc_cmf_timer()
6075 phba->cmf_timer_cnt++; in lpfc_cmf_timer()
6079 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); in lpfc_cmf_timer()
6080 atomic64_add(lat, &phba->cgn_latency_evt); in lpfc_cmf_timer()
6082 busy = atomic_xchg(&phba->cmf_busy, 0); in lpfc_cmf_timer()
6083 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); in lpfc_cmf_timer()
6087 if (mbpi > phba->cmf_link_byte_count || in lpfc_cmf_timer()
6088 phba->cmf_active_mode == LPFC_CFG_MONITOR) in lpfc_cmf_timer()
6089 mbpi = phba->cmf_link_byte_count; in lpfc_cmf_timer()
6094 if (mbpi != phba->cmf_max_bytes_per_interval) in lpfc_cmf_timer()
6095 phba->cmf_max_bytes_per_interval = mbpi; in lpfc_cmf_timer()
6099 if (phba->rx_monitor) { in lpfc_cmf_timer()
6104 entry.cmf_info = phba->cmf_active_info; in lpfc_cmf_timer()
6115 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) in lpfc_cmf_timer()
6116 entry.timer_utilization = phba->cmf_last_ts; in lpfc_cmf_timer()
6120 phba->cmf_last_ts = 0; in lpfc_cmf_timer()
6122 lpfc_rx_monitor_record(phba->rx_monitor, &entry); in lpfc_cmf_timer()
6125 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { in lpfc_cmf_timer()
6130 atomic_inc(&phba->cgn_driver_evt_cnt); in lpfc_cmf_timer()
6132 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ in lpfc_cmf_timer()
6137 phba->hba_flag &= ~HBA_SHORT_CMF; in lpfc_cmf_timer()
6144 phba->cgn_evt_timestamp)) { in lpfc_cmf_timer()
6145 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp - in lpfc_cmf_timer()
6150 phba->hba_flag |= HBA_SHORT_CMF; in lpfc_cmf_timer()
6155 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * in lpfc_cmf_timer()
6157 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) in lpfc_cmf_timer()
6158 phba->cmf_max_bytes_per_interval = in lpfc_cmf_timer()
6159 phba->cmf_link_byte_count; in lpfc_cmf_timer()
6165 if (atomic_xchg(&phba->cmf_bw_wait, 0)) in lpfc_cmf_timer()
6166 queue_work(phba->wq, &phba->unblock_request_work); in lpfc_cmf_timer()
6169 atomic_set(&phba->cmf_stop_io, 0); in lpfc_cmf_timer()
6179 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6194 phba->sli4_hba.link_state.speed = in lpfc_update_trunk_link_status()
6198 phba->sli4_hba.link_state.logical_speed = in lpfc_update_trunk_link_status()
6201 phba->fc_linkspeed = in lpfc_update_trunk_link_status()
6207 phba->trunk_link.link0.state = in lpfc_update_trunk_link_status()
6210 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; in lpfc_update_trunk_link_status()
6214 phba->trunk_link.link1.state = in lpfc_update_trunk_link_status()
6217 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; in lpfc_update_trunk_link_status()
6221 phba->trunk_link.link2.state = in lpfc_update_trunk_link_status()
6224 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; in lpfc_update_trunk_link_status()
6228 phba->trunk_link.link3.state = in lpfc_update_trunk_link_status()
6231 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; in lpfc_update_trunk_link_status()
6236 phba->trunk_link.phy_lnk_speed = in lpfc_update_trunk_link_status()
6237 phba->sli4_hba.link_state.logical_speed / (cnt * 1000); in lpfc_update_trunk_link_status()
6239 phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN; in lpfc_update_trunk_link_status()
6242 "2910 Async FC Trunking Event - Speed:%d\n" in lpfc_update_trunk_link_status()
6245 phba->sli4_hba.link_state.speed, in lpfc_update_trunk_link_status()
6246 phba->sli4_hba.link_state.logical_speed, in lpfc_update_trunk_link_status()
6250 if (phba->cmf_active_mode != LPFC_CFG_OFF) in lpfc_update_trunk_link_status()
6257 * SLI-4: We have only 0xA error codes in lpfc_update_trunk_link_status()
6269 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6300 phba->sli4_hba.link_state.speed = in lpfc_sli4_async_fc_evt()
6303 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; in lpfc_sli4_async_fc_evt()
6304 phba->sli4_hba.link_state.topology = in lpfc_sli4_async_fc_evt()
6306 phba->sli4_hba.link_state.status = in lpfc_sli4_async_fc_evt()
6308 phba->sli4_hba.link_state.type = in lpfc_sli4_async_fc_evt()
6310 phba->sli4_hba.link_state.number = in lpfc_sli4_async_fc_evt()
6312 phba->sli4_hba.link_state.fault = in lpfc_sli4_async_fc_evt()
6317 phba->sli4_hba.link_state.logical_speed = 0; in lpfc_sli4_async_fc_evt()
6318 else if (!phba->sli4_hba.conf_trunk) in lpfc_sli4_async_fc_evt()
6319 phba->sli4_hba.link_state.logical_speed = in lpfc_sli4_async_fc_evt()
6323 "2896 Async FC event - Speed:%dGBaud Topology:x%x " in lpfc_sli4_async_fc_evt()
6326 phba->sli4_hba.link_state.speed, in lpfc_sli4_async_fc_evt()
6327 phba->sli4_hba.link_state.topology, in lpfc_sli4_async_fc_evt()
6328 phba->sli4_hba.link_state.status, in lpfc_sli4_async_fc_evt()
6329 phba->sli4_hba.link_state.type, in lpfc_sli4_async_fc_evt()
6330 phba->sli4_hba.link_state.number, in lpfc_sli4_async_fc_evt()
6331 phba->sli4_hba.link_state.logical_speed, in lpfc_sli4_async_fc_evt()
6332 phba->sli4_hba.link_state.fault); in lpfc_sli4_async_fc_evt()
6333 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_sli4_async_fc_evt()
6350 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; in lpfc_sli4_async_fc_evt()
6353 phba->sli.slistat.link_event++; in lpfc_sli4_async_fc_evt()
6356 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); in lpfc_sli4_async_fc_evt()
6357 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; in lpfc_sli4_async_fc_evt()
6358 pmb->vport = phba->pport; in lpfc_sli4_async_fc_evt()
6360 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { in lpfc_sli4_async_fc_evt()
6361 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); in lpfc_sli4_async_fc_evt()
6363 switch (phba->sli4_hba.link_state.status) { in lpfc_sli4_async_fc_evt()
6365 phba->link_flag |= LS_MDS_LINK_DOWN; in lpfc_sli4_async_fc_evt()
6368 phba->link_flag |= LS_MDS_LOOPBACK; in lpfc_sli4_async_fc_evt()
6375 mb = &pmb->u.mb; in lpfc_sli4_async_fc_evt()
6376 mb->mbxStatus = MBX_SUCCESS; in lpfc_sli4_async_fc_evt()
6382 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; in lpfc_sli4_async_fc_evt()
6383 la->eventTag = acqe_fc->event_tag; in lpfc_sli4_async_fc_evt()
6385 if (phba->sli4_hba.link_state.status == in lpfc_sli4_async_fc_evt()
6409 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6433 "2901 Async SLI event - Type:%d, Event Data: x%08x " in lpfc_sli4_async_sli_evt()
6435 acqe_sli->event_data1, acqe_sli->event_data2, in lpfc_sli4_async_sli_evt()
6436 acqe_sli->event_data3, acqe_sli->trailer); in lpfc_sli4_async_sli_evt()
6438 port_name = phba->Port[0]; in lpfc_sli4_async_sli_evt()
6446 temp_event_data.data = (uint32_t)acqe_sli->event_data1; in lpfc_sli4_async_sli_evt()
6449 "3190 Over Temperature:%d Celsius- Port Name %c\n", in lpfc_sli4_async_sli_evt()
6450 acqe_sli->event_data1, port_name); in lpfc_sli4_async_sli_evt()
6452 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; in lpfc_sli4_async_sli_evt()
6453 shost = lpfc_shost_from_vport(phba->pport); in lpfc_sli4_async_sli_evt()
6463 temp_event_data.data = (uint32_t)acqe_sli->event_data1; in lpfc_sli4_async_sli_evt()
6466 "3191 Normal Temperature:%d Celsius - Port Name %c\n", in lpfc_sli4_async_sli_evt()
6467 acqe_sli->event_data1, port_name); in lpfc_sli4_async_sli_evt()
6469 shost = lpfc_shost_from_vport(phba->pport); in lpfc_sli4_async_sli_evt()
6478 &acqe_sli->event_data1; in lpfc_sli4_async_sli_evt()
6481 switch (phba->sli4_hba.lnk_info.lnk_no) { in lpfc_sli4_async_sli_evt()
6484 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6486 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6490 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6492 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6496 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6498 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6502 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6504 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6511 phba->sli4_hba.lnk_info.lnk_no); in lpfc_sli4_async_sli_evt()
6516 if (phba->sli4_hba.lnk_info.optic_state == status) in lpfc_sli4_async_sli_evt()
6525 "installed/not installed - Reseat optics, " in lpfc_sli4_async_sli_evt()
6530 "Optics of two types installed - Remove one " in lpfc_sli4_async_sli_evt()
6534 sprintf(message, "Incompatible optics - Replace with " in lpfc_sli4_async_sli_evt()
6538 sprintf(message, "Unqualified optics - Replace with " in lpfc_sli4_async_sli_evt()
6540 "Support - Link is%s operational", in lpfc_sli4_async_sli_evt()
6544 sprintf(message, "Uncertified optics - Replace with " in lpfc_sli4_async_sli_evt()
6545 "Avago-certified optics to enable link " in lpfc_sli4_async_sli_evt()
6546 "operation - Link is%s operational", in lpfc_sli4_async_sli_evt()
6558 phba->lmt = 0; in lpfc_sli4_async_sli_evt()
6572 for (i = 0; i <= phba->max_vports && vports[i] != NULL; in lpfc_sli4_async_sli_evt()
6580 phba->sli4_hba.lnk_info.optic_state = status; in lpfc_sli4_async_sli_evt()
6586 "3192 Remote DPort Test Initiated - " in lpfc_sli4_async_sli_evt()
6588 acqe_sli->event_data1, acqe_sli->event_data2); in lpfc_sli4_async_sli_evt()
6596 * to use FA-WWN, but the attached device doesn’t support it. in lpfc_sli4_async_sli_evt()
6597 * Event Data1 - N.A, Event Data2 - N.A in lpfc_sli4_async_sli_evt()
6601 "2699 Misconfigured FA-PWWN - Attached device " in lpfc_sli4_async_sli_evt()
6602 "does not support FA-PWWN\n"); in lpfc_sli4_async_sli_evt()
6603 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC; in lpfc_sli4_async_sli_evt()
6604 memset(phba->pport->fc_portname.u.wwn, 0, in lpfc_sli4_async_sli_evt()
6610 "2518 EEPROM failure - " in lpfc_sli4_async_sli_evt()
6612 acqe_sli->event_data1, acqe_sli->event_data2); in lpfc_sli4_async_sli_evt()
6615 if (phba->cmf_active_mode == LPFC_CFG_OFF) in lpfc_sli4_async_sli_evt()
6618 &acqe_sli->event_data1; in lpfc_sli4_async_sli_evt()
6619 phba->cgn_acqe_cnt++; in lpfc_sli4_async_sli_evt()
6622 atomic64_add(cnt, &phba->cgn_acqe_stat.warn); in lpfc_sli4_async_sli_evt()
6623 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm); in lpfc_sli4_async_sli_evt()
6628 if (cgn_signal->alarm_cnt) { in lpfc_sli4_async_sli_evt()
6629 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { in lpfc_sli4_async_sli_evt()
6631 atomic_add(cgn_signal->alarm_cnt, in lpfc_sli4_async_sli_evt()
6632 &phba->cgn_sync_alarm_cnt); in lpfc_sli4_async_sli_evt()
6636 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || in lpfc_sli4_async_sli_evt()
6637 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { in lpfc_sli4_async_sli_evt()
6639 atomic_add(cnt, &phba->cgn_sync_warn_cnt); in lpfc_sli4_async_sli_evt()
6649 acqe_sli->event_data1, acqe_sli->event_data2, in lpfc_sli4_async_sli_evt()
6650 acqe_sli->event_data3); in lpfc_sli4_async_sli_evt()
6661 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6679 phba = vport->phba; in lpfc_sli4_perform_vport_cvl()
6689 ndlp->nlp_type |= NLP_FABRIC; in lpfc_sli4_perform_vport_cvl()
6693 if ((phba->pport->port_state < LPFC_FLOGI) && in lpfc_sli4_perform_vport_cvl()
6694 (phba->pport->port_state != LPFC_VPORT_FAILED)) in lpfc_sli4_perform_vport_cvl()
6697 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) in lpfc_sli4_perform_vport_cvl()
6698 && (vport->port_state != LPFC_VPORT_FAILED)) in lpfc_sli4_perform_vport_cvl()
6705 spin_lock_irq(shost->host_lock); in lpfc_sli4_perform_vport_cvl()
6706 vport->fc_flag |= FC_VPORT_CVL_RCVD; in lpfc_sli4_perform_vport_cvl()
6707 spin_unlock_irq(shost->host_lock); in lpfc_sli4_perform_vport_cvl()
6713 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6727 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) in lpfc_sli4_perform_all_vport_cvl()
6733 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6751 phba->fc_eventTag = acqe_fip->event_tag; in lpfc_sli4_async_fip_evt()
6752 phba->fcoe_eventtag = acqe_fip->event_tag; in lpfc_sli4_async_fip_evt()
6760 acqe_fip->event_tag, in lpfc_sli4_async_fip_evt()
6761 acqe_fip->index); in lpfc_sli4_async_fip_evt()
6767 acqe_fip->event_tag, in lpfc_sli4_async_fip_evt()
6768 acqe_fip->index); in lpfc_sli4_async_fip_evt()
6769 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { in lpfc_sli4_async_fip_evt()
6779 acqe_fip->index); in lpfc_sli4_async_fip_evt()
6780 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); in lpfc_sli4_async_fip_evt()
6784 spin_lock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6785 if (phba->hba_flag & FCF_TS_INPROG) { in lpfc_sli4_async_fip_evt()
6786 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6790 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { in lpfc_sli4_async_fip_evt()
6791 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6796 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { in lpfc_sli4_async_fip_evt()
6797 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6800 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6802 /* Otherwise, scan the entire FCF table and re-discover SAN */ in lpfc_sli4_async_fip_evt()
6806 acqe_fip->event_tag, acqe_fip->index); in lpfc_sli4_async_fip_evt()
6819 acqe_fip->event_tag); in lpfc_sli4_async_fip_evt()
6823 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; in lpfc_sli4_async_fip_evt()
6826 "tag:x%x\n", acqe_fip->index, in lpfc_sli4_async_fip_evt()
6827 acqe_fip->event_tag); in lpfc_sli4_async_fip_evt()
6832 spin_lock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6833 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && in lpfc_sli4_async_fip_evt()
6834 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { in lpfc_sli4_async_fip_evt()
6835 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6837 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); in lpfc_sli4_async_fip_evt()
6840 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6843 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) in lpfc_sli4_async_fip_evt()
6852 spin_lock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6854 phba->fcf.fcf_flag |= FCF_DEAD_DISC; in lpfc_sli4_async_fip_evt()
6855 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6860 "\n", acqe_fip->event_tag, acqe_fip->index); in lpfc_sli4_async_fip_evt()
6868 spin_lock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6869 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; in lpfc_sli4_async_fip_evt()
6870 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6887 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; in lpfc_sli4_async_fip_evt()
6891 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); in lpfc_sli4_async_fip_evt()
6894 acqe_fip->index); in lpfc_sli4_async_fip_evt()
6902 for (i = 0; i <= phba->max_vports && vports[i] != NULL; in lpfc_sli4_async_fip_evt()
6904 if ((!(vports[i]->fc_flag & in lpfc_sli4_async_fip_evt()
6906 (vports[i]->port_state > LPFC_FDISC)) { in lpfc_sli4_async_fip_evt()
6915 * Don't re-instantiate if vport is marked for deletion. in lpfc_sli4_async_fip_evt()
6919 if (!(vport->load_flag & FC_UNLOADING) && in lpfc_sli4_async_fip_evt()
6923 * re-instantiate the Vlink using FDISC. in lpfc_sli4_async_fip_evt()
6925 mod_timer(&ndlp->nlp_delayfunc, in lpfc_sli4_async_fip_evt()
6927 spin_lock_irq(&ndlp->lock); in lpfc_sli4_async_fip_evt()
6928 ndlp->nlp_flag |= NLP_DELAY_TMO; in lpfc_sli4_async_fip_evt()
6929 spin_unlock_irq(&ndlp->lock); in lpfc_sli4_async_fip_evt()
6930 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; in lpfc_sli4_async_fip_evt()
6931 vport->port_state = LPFC_FDISC; in lpfc_sli4_async_fip_evt()
6940 spin_lock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6941 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { in lpfc_sli4_async_fip_evt()
6942 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6946 phba->fcf.fcf_flag |= FCF_ACVL_DISC; in lpfc_sli4_async_fip_evt()
6947 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6951 "evt_tag:x%x\n", acqe_fip->event_tag); in lpfc_sli4_async_fip_evt()
6959 spin_lock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6960 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; in lpfc_sli4_async_fip_evt()
6961 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6963 * Last resort will be re-try on the in lpfc_sli4_async_fip_evt()
6978 "0x%x\n", event_type, acqe_fip->event_tag); in lpfc_sli4_async_fip_evt()
6984 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6994 phba->fc_eventTag = acqe_dcbx->event_tag; in lpfc_sli4_async_dcbx_evt()
7001 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
7015 phba->fc_eventTag = acqe_grp5->event_tag; in lpfc_sli4_async_grp5_evt()
7016 phba->fcoe_eventtag = acqe_grp5->event_tag; in lpfc_sli4_async_grp5_evt()
7017 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; in lpfc_sli4_async_grp5_evt()
7018 phba->sli4_hba.link_state.logical_speed = in lpfc_sli4_async_grp5_evt()
7023 phba->sli4_hba.link_state.logical_speed); in lpfc_sli4_async_grp5_evt()
7027 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
7036 if (!phba->cgn_i) in lpfc_sli4_async_cmstat_evt()
7042 * lpfc_cgn_params_val - Validate FW congestion parameters.
7052 spin_lock_irq(&phba->hbalock); in lpfc_cgn_params_val()
7054 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF, in lpfc_cgn_params_val()
7058 p_cfg_param->cgn_param_mode); in lpfc_cgn_params_val()
7059 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF; in lpfc_cgn_params_val()
7062 spin_unlock_irq(&phba->hbalock); in lpfc_cgn_params_val()
7072 * lpfc_cgn_params_parse - Process a FW cong parm change event
7079 * valid, in-range values. If the signature magic is correct and
7095 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) { in lpfc_cgn_params_parse()
7103 p_cgn_param->cgn_param_magic, in lpfc_cgn_params_parse()
7104 p_cgn_param->cgn_param_version, in lpfc_cgn_params_parse()
7105 p_cgn_param->cgn_param_mode, in lpfc_cgn_params_parse()
7106 p_cgn_param->cgn_param_level0, in lpfc_cgn_params_parse()
7107 p_cgn_param->cgn_param_level1, in lpfc_cgn_params_parse()
7108 p_cgn_param->cgn_param_level2, in lpfc_cgn_params_parse()
7109 p_cgn_param->byte13, in lpfc_cgn_params_parse()
7110 p_cgn_param->byte14, in lpfc_cgn_params_parse()
7111 p_cgn_param->byte15, in lpfc_cgn_params_parse()
7112 p_cgn_param->byte11, in lpfc_cgn_params_parse()
7113 p_cgn_param->byte12, in lpfc_cgn_params_parse()
7114 phba->cmf_active_mode); in lpfc_cgn_params_parse()
7116 oldmode = phba->cmf_active_mode; in lpfc_cgn_params_parse()
7124 spin_lock_irq(&phba->hbalock); in lpfc_cgn_params_parse()
7125 memcpy(&phba->cgn_p, p_cgn_param, in lpfc_cgn_params_parse()
7129 if (phba->cgn_i) { in lpfc_cgn_params_parse()
7130 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; in lpfc_cgn_params_parse()
7131 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; in lpfc_cgn_params_parse()
7132 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; in lpfc_cgn_params_parse()
7133 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; in lpfc_cgn_params_parse()
7134 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; in lpfc_cgn_params_parse()
7137 cp->cgn_info_crc = cpu_to_le32(crc); in lpfc_cgn_params_parse()
7139 spin_unlock_irq(&phba->hbalock); in lpfc_cgn_params_parse()
7141 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode; in lpfc_cgn_params_parse()
7145 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) { in lpfc_cgn_params_parse()
7149 if (phba->link_state >= LPFC_LINK_UP) { in lpfc_cgn_params_parse()
7150 phba->cgn_reg_fpin = in lpfc_cgn_params_parse()
7151 phba->cgn_init_reg_fpin; in lpfc_cgn_params_parse()
7152 phba->cgn_reg_signal = in lpfc_cgn_params_parse()
7153 phba->cgn_init_reg_signal; in lpfc_cgn_params_parse()
7154 lpfc_issue_els_edc(phba->pport, 0); in lpfc_cgn_params_parse()
7159 switch (phba->cgn_p.cgn_param_mode) { in lpfc_cgn_params_parse()
7163 if (phba->link_state >= LPFC_LINK_UP) in lpfc_cgn_params_parse()
7164 lpfc_issue_els_edc(phba->pport, 0); in lpfc_cgn_params_parse()
7167 phba->cmf_max_bytes_per_interval = in lpfc_cgn_params_parse()
7168 phba->cmf_link_byte_count; in lpfc_cgn_params_parse()
7170 /* Resume blocked IO - unblock on workqueue */ in lpfc_cgn_params_parse()
7171 queue_work(phba->wq, in lpfc_cgn_params_parse()
7172 &phba->unblock_request_work); in lpfc_cgn_params_parse()
7177 switch (phba->cgn_p.cgn_param_mode) { in lpfc_cgn_params_parse()
7181 if (phba->link_state >= LPFC_LINK_UP) in lpfc_cgn_params_parse()
7182 lpfc_issue_els_edc(phba->pport, 0); in lpfc_cgn_params_parse()
7191 oldmode != phba->cgn_p.cgn_param_mode) { in lpfc_cgn_params_parse()
7192 if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED) in lpfc_cgn_params_parse()
7194 phba->cgn_p.cgn_param_level0); in lpfc_cgn_params_parse()
7198 dev_info(&phba->pcidev->dev, "%d: " in lpfc_cgn_params_parse()
7200 phba->brd_no, in lpfc_cgn_params_parse()
7202 [phba->cgn_p.cgn_param_mode], in lpfc_cgn_params_parse()
7208 "version %d\n", p_cgn_param->cgn_param_magic, in lpfc_cgn_params_parse()
7209 p_cgn_param->cgn_param_version); in lpfc_cgn_params_parse()
7214 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7223 * -Eval if an error was encountered
7268 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7290 if (!phba->sli4_hba.pc_sli4_params.cmf) { in lpfc_sli4_cgn_parm_chg_evt()
7293 return -EACCES; in lpfc_sli4_cgn_parm_chg_evt()
7313 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7325 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_sli4_async_event_proc()
7326 phba->hba_flag &= ~ASYNC_EVENT; in lpfc_sli4_async_event_proc()
7327 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_sli4_async_event_proc()
7330 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); in lpfc_sli4_async_event_proc()
7331 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { in lpfc_sli4_async_event_proc()
7332 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, in lpfc_sli4_async_event_proc()
7334 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, in lpfc_sli4_async_event_proc()
7338 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { in lpfc_sli4_async_event_proc()
7341 &cq_event->cqe.acqe_link); in lpfc_sli4_async_event_proc()
7344 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); in lpfc_sli4_async_event_proc()
7348 &cq_event->cqe.acqe_dcbx); in lpfc_sli4_async_event_proc()
7352 &cq_event->cqe.acqe_grp5); in lpfc_sli4_async_event_proc()
7355 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); in lpfc_sli4_async_event_proc()
7358 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); in lpfc_sli4_async_event_proc()
7368 &cq_event->cqe.mcqe_cmpl)); in lpfc_sli4_async_event_proc()
7374 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); in lpfc_sli4_async_event_proc()
7376 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); in lpfc_sli4_async_event_proc()
7380 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7390 spin_lock_irq(&phba->hbalock); in lpfc_sli4_fcf_redisc_event_proc()
7392 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; in lpfc_sli4_fcf_redisc_event_proc()
7394 phba->fcf.failover_rec.flag = 0; in lpfc_sli4_fcf_redisc_event_proc()
7396 phba->fcf.fcf_flag |= FCF_REDISC_FOV; in lpfc_sli4_fcf_redisc_event_proc()
7397 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_fcf_redisc_event_proc()
7399 /* Scan FCF table from the first entry to re-discover SAN */ in lpfc_sli4_fcf_redisc_event_proc()
7401 "2777 Start post-quiescent FCF table scan\n"); in lpfc_sli4_fcf_redisc_event_proc()
7410 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7412 * @dev_grp: The HBA PCI-Device group number.
7414 * This routine is invoked to set up the per HBA PCI-Device group function
7417 * Return: 0 if success, otherwise -ENODEV
7424 /* Set up lpfc PCI-device group */ in lpfc_api_table_setup()
7425 phba->pci_dev_grp = dev_grp; in lpfc_api_table_setup()
7429 phba->sli_rev = LPFC_SLI_REV4; in lpfc_api_table_setup()
7434 return -ENODEV; in lpfc_api_table_setup()
7438 return -ENODEV; in lpfc_api_table_setup()
7442 return -ENODEV; in lpfc_api_table_setup()
7446 return -ENODEV; in lpfc_api_table_setup()
7452 * lpfc_log_intr_mode - Log the active interrupt mode
7472 "0480 Enabled MSI-X interrupt mode.\n"); in lpfc_log_intr_mode()
7483 * lpfc_enable_pci_dev - Enable a generic PCI device.
7490 * 0 - successful
7491 * other values - error
7499 if (!phba->pcidev) in lpfc_enable_pci_dev()
7502 pdev = phba->pcidev; in lpfc_enable_pci_dev()
7516 pdev->needs_freset = 1; in lpfc_enable_pci_dev()
7525 return -ENODEV; in lpfc_enable_pci_dev()
7529 * lpfc_disable_pci_dev - Disable a generic PCI device.
7541 if (!phba->pcidev) in lpfc_disable_pci_dev()
7544 pdev = phba->pcidev; in lpfc_disable_pci_dev()
7553 * lpfc_reset_hba - Reset a hba
7565 if (!phba->cfg_enable_hba_reset) { in lpfc_reset_hba()
7566 phba->link_state = LPFC_HBA_ERROR; in lpfc_reset_hba()
7571 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) { in lpfc_reset_hba()
7584 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7587 * This function enables the PCI SR-IOV virtual functions to a physical
7588 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7590 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7596 struct pci_dev *pdev = phba->pcidev; in lpfc_sli_sriov_nr_virtfn_get()
7609 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7613 * This function enables the PCI SR-IOV virtual functions to a physical
7614 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7616 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7622 struct pci_dev *pdev = phba->pcidev; in lpfc_sli_probe_sriov_nr_virtfn()
7631 return -EINVAL; in lpfc_sli_probe_sriov_nr_virtfn()
7657 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7664 * 0 - successful
7665 * other values - error
7670 struct lpfc_sli *psli = &phba->sli; in lpfc_setup_driver_resource_phase1()
7675 atomic_set(&phba->fast_event_count, 0); in lpfc_setup_driver_resource_phase1()
7676 atomic_set(&phba->dbg_log_idx, 0); in lpfc_setup_driver_resource_phase1()
7677 atomic_set(&phba->dbg_log_cnt, 0); in lpfc_setup_driver_resource_phase1()
7678 atomic_set(&phba->dbg_log_dmping, 0); in lpfc_setup_driver_resource_phase1()
7679 spin_lock_init(&phba->hbalock); in lpfc_setup_driver_resource_phase1()
7682 spin_lock_init(&phba->port_list_lock); in lpfc_setup_driver_resource_phase1()
7683 INIT_LIST_HEAD(&phba->port_list); in lpfc_setup_driver_resource_phase1()
7685 INIT_LIST_HEAD(&phba->work_list); in lpfc_setup_driver_resource_phase1()
7688 init_waitqueue_head(&phba->work_waitq); in lpfc_setup_driver_resource_phase1()
7692 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? in lpfc_setup_driver_resource_phase1()
7694 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? in lpfc_setup_driver_resource_phase1()
7696 (phba->nvmet_support ? "NVMET" : " ")); in lpfc_setup_driver_resource_phase1()
7699 spin_lock_init(&phba->scsi_buf_list_get_lock); in lpfc_setup_driver_resource_phase1()
7700 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); in lpfc_setup_driver_resource_phase1()
7701 spin_lock_init(&phba->scsi_buf_list_put_lock); in lpfc_setup_driver_resource_phase1()
7702 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); in lpfc_setup_driver_resource_phase1()
7705 INIT_LIST_HEAD(&phba->fabric_iocb_list); in lpfc_setup_driver_resource_phase1()
7708 INIT_LIST_HEAD(&phba->elsbuf); in lpfc_setup_driver_resource_phase1()
7711 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); in lpfc_setup_driver_resource_phase1()
7714 spin_lock_init(&phba->devicelock); in lpfc_setup_driver_resource_phase1()
7715 INIT_LIST_HEAD(&phba->luns); in lpfc_setup_driver_resource_phase1()
7718 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); in lpfc_setup_driver_resource_phase1()
7720 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); in lpfc_setup_driver_resource_phase1()
7722 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); in lpfc_setup_driver_resource_phase1()
7724 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); in lpfc_setup_driver_resource_phase1()
7726 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); in lpfc_setup_driver_resource_phase1()
7728 INIT_DELAYED_WORK(&phba->idle_stat_delay_work, in lpfc_setup_driver_resource_phase1()
7730 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work); in lpfc_setup_driver_resource_phase1()
7735 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7739 * support the SLI-3 HBA device it attached to.
7742 * 0 - successful
7743 * other values - error
7755 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); in lpfc_sli_driver_resource_setup()
7758 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); in lpfc_sli_driver_resource_setup()
7759 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); in lpfc_sli_driver_resource_setup()
7763 /* Set up phase-1 common device driver resources */ in lpfc_sli_driver_resource_setup()
7767 return -ENODEV; in lpfc_sli_driver_resource_setup()
7769 if (!phba->sli.sli3_ring) in lpfc_sli_driver_resource_setup()
7770 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, in lpfc_sli_driver_resource_setup()
7773 if (!phba->sli.sli3_ring) in lpfc_sli_driver_resource_setup()
7774 return -ENOMEM; in lpfc_sli_driver_resource_setup()
7781 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_sli_driver_resource_setup()
7787 if (phba->cfg_enable_bg) { in lpfc_sli_driver_resource_setup()
7789 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, in lpfc_sli_driver_resource_setup()
7797 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + in lpfc_sli_driver_resource_setup()
7801 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) in lpfc_sli_driver_resource_setup()
7802 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; in lpfc_sli_driver_resource_setup()
7805 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; in lpfc_sli_driver_resource_setup()
7812 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + in lpfc_sli_driver_resource_setup()
7814 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); in lpfc_sli_driver_resource_setup()
7817 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; in lpfc_sli_driver_resource_setup()
7822 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, in lpfc_sli_driver_resource_setup()
7823 phba->cfg_total_seg_cnt); in lpfc_sli_driver_resource_setup()
7825 phba->max_vpi = LPFC_MAX_VPI; in lpfc_sli_driver_resource_setup()
7827 phba->max_vports = 0; in lpfc_sli_driver_resource_setup()
7837 return -ENOMEM; in lpfc_sli_driver_resource_setup()
7839 phba->lpfc_sg_dma_buf_pool = in lpfc_sli_driver_resource_setup()
7841 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, in lpfc_sli_driver_resource_setup()
7844 if (!phba->lpfc_sg_dma_buf_pool) in lpfc_sli_driver_resource_setup()
7847 phba->lpfc_cmd_rsp_buf_pool = in lpfc_sli_driver_resource_setup()
7849 &phba->pcidev->dev, in lpfc_sli_driver_resource_setup()
7854 if (!phba->lpfc_cmd_rsp_buf_pool) in lpfc_sli_driver_resource_setup()
7858 * Enable sr-iov virtual functions if supported and configured in lpfc_sli_driver_resource_setup()
7861 if (phba->cfg_sriov_nr_virtfn > 0) { in lpfc_sli_driver_resource_setup()
7863 phba->cfg_sriov_nr_virtfn); in lpfc_sli_driver_resource_setup()
7866 "2808 Requested number of SR-IOV " in lpfc_sli_driver_resource_setup()
7869 phba->cfg_sriov_nr_virtfn); in lpfc_sli_driver_resource_setup()
7870 phba->cfg_sriov_nr_virtfn = 0; in lpfc_sli_driver_resource_setup()
7877 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); in lpfc_sli_driver_resource_setup()
7878 phba->lpfc_sg_dma_buf_pool = NULL; in lpfc_sli_driver_resource_setup()
7881 return -ENOMEM; in lpfc_sli_driver_resource_setup()
7885 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7889 * specific for supporting the SLI-3 HBA device it attached to.
7901 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7905 * support the SLI-4 HBA device it attached to.
7908 * 0 - successful
7909 * other values - error
7923 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; in lpfc_sli4_driver_resource_setup()
7924 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; in lpfc_sli4_driver_resource_setup()
7925 phba->sli4_hba.curr_disp_cpu = 0; in lpfc_sli4_driver_resource_setup()
7930 /* Set up phase-1 common device driver resources */ in lpfc_sli4_driver_resource_setup()
7933 return -ENODEV; in lpfc_sli4_driver_resource_setup()
7938 return -ENODEV; in lpfc_sli4_driver_resource_setup()
7943 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); in lpfc_sli4_driver_resource_setup()
7944 if (!phba->wq) in lpfc_sli4_driver_resource_setup()
7945 return -ENOMEM; in lpfc_sli4_driver_resource_setup()
7951 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); in lpfc_sli4_driver_resource_setup()
7954 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); in lpfc_sli4_driver_resource_setup()
7957 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in lpfc_sli4_driver_resource_setup()
7958 phba->cmf_timer.function = lpfc_cmf_timer; in lpfc_sli4_driver_resource_setup()
7961 * Control structure for handling external multi-buffer mailbox in lpfc_sli4_driver_resource_setup()
7962 * command pass-through. in lpfc_sli4_driver_resource_setup()
7964 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, in lpfc_sli4_driver_resource_setup()
7966 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); in lpfc_sli4_driver_resource_setup()
7968 phba->max_vpi = LPFC_MAX_VPI; in lpfc_sli4_driver_resource_setup()
7971 phba->max_vports = 0; in lpfc_sli4_driver_resource_setup()
7974 phba->valid_vlan = 0; in lpfc_sli4_driver_resource_setup()
7975 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; in lpfc_sli4_driver_resource_setup()
7976 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; in lpfc_sli4_driver_resource_setup()
7977 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; in lpfc_sli4_driver_resource_setup()
7986 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); in lpfc_sli4_driver_resource_setup()
7987 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; in lpfc_sli4_driver_resource_setup()
7988 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; in lpfc_sli4_driver_resource_setup()
7992 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0); in lpfc_sli4_driver_resource_setup()
7998 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); in lpfc_sli4_driver_resource_setup()
7999 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); in lpfc_sli4_driver_resource_setup()
8001 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_driver_resource_setup()
8003 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_sli4_driver_resource_setup()
8004 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); in lpfc_sli4_driver_resource_setup()
8005 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); in lpfc_sli4_driver_resource_setup()
8006 spin_lock_init(&phba->sli4_hba.t_active_list_lock); in lpfc_sli4_driver_resource_setup()
8007 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); in lpfc_sli4_driver_resource_setup()
8011 spin_lock_init(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_driver_resource_setup()
8012 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); in lpfc_sli4_driver_resource_setup()
8013 spin_lock_init(&phba->sli4_hba.asynce_list_lock); in lpfc_sli4_driver_resource_setup()
8014 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); in lpfc_sli4_driver_resource_setup()
8017 * Initialize driver internal slow-path work queues in lpfc_sli4_driver_resource_setup()
8020 /* Driver internel slow-path CQ Event pool */ in lpfc_sli4_driver_resource_setup()
8021 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); in lpfc_sli4_driver_resource_setup()
8023 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); in lpfc_sli4_driver_resource_setup()
8025 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); in lpfc_sli4_driver_resource_setup()
8026 /* Slow-path XRI aborted CQ Event work queue list */ in lpfc_sli4_driver_resource_setup()
8027 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); in lpfc_sli4_driver_resource_setup()
8029 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); in lpfc_sli4_driver_resource_setup()
8032 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); in lpfc_sli4_driver_resource_setup()
8033 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); in lpfc_sli4_driver_resource_setup()
8034 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); in lpfc_sli4_driver_resource_setup()
8035 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); in lpfc_sli4_driver_resource_setup()
8040 INIT_LIST_HEAD(&phba->sli.mboxq); in lpfc_sli4_driver_resource_setup()
8041 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); in lpfc_sli4_driver_resource_setup()
8044 phba->sli4_hba.lnk_info.optic_state = 0xff; in lpfc_sli4_driver_resource_setup()
8052 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= in lpfc_sli4_driver_resource_setup()
8056 rc = -ENODEV; in lpfc_sli4_driver_resource_setup()
8059 phba->temp_sensor_support = 1; in lpfc_sli4_driver_resource_setup()
8077 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) { in lpfc_sli4_driver_resource_setup()
8078 /* Right now the link is down, if FA-PWWN is configured the in lpfc_sli4_driver_resource_setup()
8085 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; in lpfc_sli4_driver_resource_setup()
8093 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == in lpfc_sli4_driver_resource_setup()
8100 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, in lpfc_sli4_driver_resource_setup()
8103 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8108 phba->nvmet_support = 0; in lpfc_sli4_driver_resource_setup()
8119 bf_get(lpfc_mqe_command, &mboxq->u.mqe), in lpfc_sli4_driver_resource_setup()
8120 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); in lpfc_sli4_driver_resource_setup()
8121 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_driver_resource_setup()
8122 rc = -EIO; in lpfc_sli4_driver_resource_setup()
8125 mb = &mboxq->u.mb; in lpfc_sli4_driver_resource_setup()
8126 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, in lpfc_sli4_driver_resource_setup()
8129 phba->sli4_hba.wwnn.u.name = wwn; in lpfc_sli4_driver_resource_setup()
8130 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, in lpfc_sli4_driver_resource_setup()
8134 phba->sli4_hba.wwpn.u.name = wwn; in lpfc_sli4_driver_resource_setup()
8143 phba->nvmet_support = 1; /* a match */ in lpfc_sli4_driver_resource_setup()
8157 phba->cfg_xri_rebalancing = 0; in lpfc_sli4_driver_resource_setup()
8158 if (phba->irq_chann_mode == NHT_MODE) { in lpfc_sli4_driver_resource_setup()
8159 phba->cfg_irq_chann = in lpfc_sli4_driver_resource_setup()
8160 phba->sli4_hba.num_present_cpu; in lpfc_sli4_driver_resource_setup()
8161 phba->cfg_hdw_queue = in lpfc_sli4_driver_resource_setup()
8162 phba->sli4_hba.num_present_cpu; in lpfc_sli4_driver_resource_setup()
8163 phba->irq_chann_mode = NORMAL_MODE; in lpfc_sli4_driver_resource_setup()
8180 &phba->sli4_hba.sli_intf); in lpfc_sli4_driver_resource_setup()
8182 &phba->sli4_hba.sli_intf); in lpfc_sli4_driver_resource_setup()
8183 if (phba->sli4_hba.extents_in_use && in lpfc_sli4_driver_resource_setup()
8184 phba->sli4_hba.rpi_hdrs_in_use) { in lpfc_sli4_driver_resource_setup()
8190 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_driver_resource_setup()
8191 rc = -EIO; in lpfc_sli4_driver_resource_setup()
8197 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_driver_resource_setup()
8198 rc = -EIO; in lpfc_sli4_driver_resource_setup()
8208 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) in lpfc_sli4_driver_resource_setup()
8222 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { in lpfc_sli4_driver_resource_setup()
8226 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, in lpfc_sli4_driver_resource_setup()
8234 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + in lpfc_sli4_driver_resource_setup()
8238 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; in lpfc_sli4_driver_resource_setup()
8244 if (phba->cfg_enable_bg && in lpfc_sli4_driver_resource_setup()
8245 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) in lpfc_sli4_driver_resource_setup()
8246 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; in lpfc_sli4_driver_resource_setup()
8248 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; in lpfc_sli4_driver_resource_setup()
8256 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + in lpfc_sli4_driver_resource_setup()
8258 ((phba->cfg_sg_seg_cnt + extra) * in lpfc_sli4_driver_resource_setup()
8262 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; in lpfc_sli4_driver_resource_setup()
8263 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; in lpfc_sli4_driver_resource_setup()
8266 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only in lpfc_sli4_driver_resource_setup()
8271 if (phba->cfg_xpsgl && !phba->nvmet_support) in lpfc_sli4_driver_resource_setup()
8272 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; in lpfc_sli4_driver_resource_setup()
8273 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) in lpfc_sli4_driver_resource_setup()
8274 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; in lpfc_sli4_driver_resource_setup()
8276 phba->cfg_sg_dma_buf_size = in lpfc_sli4_driver_resource_setup()
8277 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); in lpfc_sli4_driver_resource_setup()
8279 phba->border_sge_num = phba->cfg_sg_dma_buf_size / in lpfc_sli4_driver_resource_setup()
8283 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_driver_resource_setup()
8284 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { in lpfc_sli4_driver_resource_setup()
8289 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; in lpfc_sli4_driver_resource_setup()
8291 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; in lpfc_sli4_driver_resource_setup()
8297 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, in lpfc_sli4_driver_resource_setup()
8298 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, in lpfc_sli4_driver_resource_setup()
8299 phba->cfg_nvme_seg_cnt); in lpfc_sli4_driver_resource_setup()
8301 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) in lpfc_sli4_driver_resource_setup()
8302 i = phba->cfg_sg_dma_buf_size; in lpfc_sli4_driver_resource_setup()
8306 phba->lpfc_sg_dma_buf_pool = in lpfc_sli4_driver_resource_setup()
8308 &phba->pcidev->dev, in lpfc_sli4_driver_resource_setup()
8309 phba->cfg_sg_dma_buf_size, in lpfc_sli4_driver_resource_setup()
8311 if (!phba->lpfc_sg_dma_buf_pool) { in lpfc_sli4_driver_resource_setup()
8312 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8316 phba->lpfc_cmd_rsp_buf_pool = in lpfc_sli4_driver_resource_setup()
8318 &phba->pcidev->dev, in lpfc_sli4_driver_resource_setup()
8322 if (!phba->lpfc_cmd_rsp_buf_pool) { in lpfc_sli4_driver_resource_setup()
8323 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8327 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_driver_resource_setup()
8363 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; in lpfc_sli4_driver_resource_setup()
8364 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), in lpfc_sli4_driver_resource_setup()
8366 if (!phba->fcf.fcf_rr_bmask) { in lpfc_sli4_driver_resource_setup()
8370 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8374 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, in lpfc_sli4_driver_resource_setup()
8377 if (!phba->sli4_hba.hba_eq_hdl) { in lpfc_sli4_driver_resource_setup()
8380 "fast-path per-EQ handle array\n"); in lpfc_sli4_driver_resource_setup()
8381 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8385 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, in lpfc_sli4_driver_resource_setup()
8388 if (!phba->sli4_hba.cpu_map) { in lpfc_sli4_driver_resource_setup()
8390 "3327 Failed allocate memory for msi-x " in lpfc_sli4_driver_resource_setup()
8392 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8396 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); in lpfc_sli4_driver_resource_setup()
8397 if (!phba->sli4_hba.eq_info) { in lpfc_sli4_driver_resource_setup()
8400 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8404 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, in lpfc_sli4_driver_resource_setup()
8405 sizeof(*phba->sli4_hba.idle_stat), in lpfc_sli4_driver_resource_setup()
8407 if (!phba->sli4_hba.idle_stat) { in lpfc_sli4_driver_resource_setup()
8410 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8415 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); in lpfc_sli4_driver_resource_setup()
8416 if (!phba->sli4_hba.c_stat) { in lpfc_sli4_driver_resource_setup()
8418 "3332 Failed allocating per cpu hdwq stats\n"); in lpfc_sli4_driver_resource_setup()
8419 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8424 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat); in lpfc_sli4_driver_resource_setup()
8425 if (!phba->cmf_stat) { in lpfc_sli4_driver_resource_setup()
8427 "3331 Failed allocating per cpu cgn stats\n"); in lpfc_sli4_driver_resource_setup()
8428 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8433 * Enable sr-iov virtual functions if supported and configured in lpfc_sli4_driver_resource_setup()
8436 if (phba->cfg_sriov_nr_virtfn > 0) { in lpfc_sli4_driver_resource_setup()
8438 phba->cfg_sriov_nr_virtfn); in lpfc_sli4_driver_resource_setup()
8441 "3020 Requested number of SR-IOV " in lpfc_sli4_driver_resource_setup()
8444 phba->cfg_sriov_nr_virtfn); in lpfc_sli4_driver_resource_setup()
8445 phba->cfg_sriov_nr_virtfn = 0; in lpfc_sli4_driver_resource_setup()
8453 free_percpu(phba->sli4_hba.c_stat); in lpfc_sli4_driver_resource_setup()
8456 kfree(phba->sli4_hba.idle_stat); in lpfc_sli4_driver_resource_setup()
8458 free_percpu(phba->sli4_hba.eq_info); in lpfc_sli4_driver_resource_setup()
8460 kfree(phba->sli4_hba.cpu_map); in lpfc_sli4_driver_resource_setup()
8462 kfree(phba->sli4_hba.hba_eq_hdl); in lpfc_sli4_driver_resource_setup()
8464 kfree(phba->fcf.fcf_rr_bmask); in lpfc_sli4_driver_resource_setup()
8472 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); in lpfc_sli4_driver_resource_setup()
8473 phba->lpfc_cmd_rsp_buf_pool = NULL; in lpfc_sli4_driver_resource_setup()
8475 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); in lpfc_sli4_driver_resource_setup()
8476 phba->lpfc_sg_dma_buf_pool = NULL; in lpfc_sli4_driver_resource_setup()
8482 destroy_workqueue(phba->wq); in lpfc_sli4_driver_resource_setup()
8483 phba->wq = NULL; in lpfc_sli4_driver_resource_setup()
8488 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8492 * specific for supporting the SLI-4 HBA device it attached to.
8499 free_percpu(phba->sli4_hba.eq_info); in lpfc_sli4_driver_resource_unset()
8501 free_percpu(phba->sli4_hba.c_stat); in lpfc_sli4_driver_resource_unset()
8503 free_percpu(phba->cmf_stat); in lpfc_sli4_driver_resource_unset()
8504 kfree(phba->sli4_hba.idle_stat); in lpfc_sli4_driver_resource_unset()
8506 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ in lpfc_sli4_driver_resource_unset()
8507 kfree(phba->sli4_hba.cpu_map); in lpfc_sli4_driver_resource_unset()
8508 phba->sli4_hba.num_possible_cpu = 0; in lpfc_sli4_driver_resource_unset()
8509 phba->sli4_hba.num_present_cpu = 0; in lpfc_sli4_driver_resource_unset()
8510 phba->sli4_hba.curr_disp_cpu = 0; in lpfc_sli4_driver_resource_unset()
8511 cpumask_clear(&phba->sli4_hba.irq_aff_mask); in lpfc_sli4_driver_resource_unset()
8513 /* Free memory allocated for fast-path work queue handles */ in lpfc_sli4_driver_resource_unset()
8514 kfree(phba->sli4_hba.hba_eq_hdl); in lpfc_sli4_driver_resource_unset()
8521 kfree(phba->fcf.fcf_rr_bmask); in lpfc_sli4_driver_resource_unset()
8543 &phba->fcf_conn_rec_list, list) { in lpfc_sli4_driver_resource_unset()
8544 list_del_init(&conn_entry->list); in lpfc_sli4_driver_resource_unset()
8552 * lpfc_init_api_table_setup - Set up init api function jump table
8554 * @dev_grp: The HBA PCI-Device group number.
8559 * Returns: 0 - success, -ENODEV - failure.
8564 phba->lpfc_hba_init_link = lpfc_hba_init_link; in lpfc_init_api_table_setup()
8565 phba->lpfc_hba_down_link = lpfc_hba_down_link; in lpfc_init_api_table_setup()
8566 phba->lpfc_selective_reset = lpfc_selective_reset; in lpfc_init_api_table_setup()
8569 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; in lpfc_init_api_table_setup()
8570 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; in lpfc_init_api_table_setup()
8571 phba->lpfc_stop_port = lpfc_stop_port_s3; in lpfc_init_api_table_setup()
8574 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; in lpfc_init_api_table_setup()
8575 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; in lpfc_init_api_table_setup()
8576 phba->lpfc_stop_port = lpfc_stop_port_s4; in lpfc_init_api_table_setup()
8580 "1431 Invalid HBA PCI-device group: 0x%x\n", in lpfc_init_api_table_setup()
8582 return -ENODEV; in lpfc_init_api_table_setup()
8588 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8595 * 0 - successful
8596 * other values - error
8604 phba->worker_thread = kthread_run(lpfc_do_work, phba, in lpfc_setup_driver_resource_phase2()
8605 "lpfc_worker_%d", phba->brd_no); in lpfc_setup_driver_resource_phase2()
8606 if (IS_ERR(phba->worker_thread)) { in lpfc_setup_driver_resource_phase2()
8607 error = PTR_ERR(phba->worker_thread); in lpfc_setup_driver_resource_phase2()
8615 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8625 if (phba->wq) { in lpfc_unset_driver_resource_phase2()
8626 destroy_workqueue(phba->wq); in lpfc_unset_driver_resource_phase2()
8627 phba->wq = NULL; in lpfc_unset_driver_resource_phase2()
8631 if (phba->worker_thread) in lpfc_unset_driver_resource_phase2()
8632 kthread_stop(phba->worker_thread); in lpfc_unset_driver_resource_phase2()
8636 * lpfc_free_iocb_list - Free iocb list.
8646 spin_lock_irq(&phba->hbalock); in lpfc_free_iocb_list()
8648 &phba->lpfc_iocb_list, list) { in lpfc_free_iocb_list()
8649 list_del(&iocbq_entry->list); in lpfc_free_iocb_list()
8651 phba->total_iocbq_bufs--; in lpfc_free_iocb_list()
8653 spin_unlock_irq(&phba->hbalock); in lpfc_free_iocb_list()
8659 * lpfc_init_iocb_list - Allocate and initialize iocb list.
8667 * 0 - successful
8668 * other values - error
8678 INIT_LIST_HEAD(&phba->lpfc_iocb_list); in lpfc_init_iocb_list()
8695 iocbq_entry->sli4_lxritag = NO_XRI; in lpfc_init_iocb_list()
8696 iocbq_entry->sli4_xritag = NO_XRI; in lpfc_init_iocb_list()
8698 spin_lock_irq(&phba->hbalock); in lpfc_init_iocb_list()
8699 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); in lpfc_init_iocb_list()
8700 phba->total_iocbq_bufs++; in lpfc_init_iocb_list()
8701 spin_unlock_irq(&phba->hbalock); in lpfc_init_iocb_list()
8709 return -ENOMEM; in lpfc_init_iocb_list()
8713 * lpfc_free_sgl_list - Free a given sgl list.
8725 list_del(&sglq_entry->list); in lpfc_free_sgl_list()
8726 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); in lpfc_free_sgl_list()
8732 * lpfc_free_els_sgl_list - Free els sgl list.
8743 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_free_els_sgl_list()
8744 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); in lpfc_free_els_sgl_list()
8745 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_free_els_sgl_list()
8752 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8764 spin_lock_irq(&phba->hbalock); in lpfc_free_nvmet_sgl_list()
8765 spin_lock(&phba->sli4_hba.sgl_list_lock); in lpfc_free_nvmet_sgl_list()
8766 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); in lpfc_free_nvmet_sgl_list()
8767 spin_unlock(&phba->sli4_hba.sgl_list_lock); in lpfc_free_nvmet_sgl_list()
8768 spin_unlock_irq(&phba->hbalock); in lpfc_free_nvmet_sgl_list()
8772 list_del(&sglq_entry->list); in lpfc_free_nvmet_sgl_list()
8773 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); in lpfc_free_nvmet_sgl_list()
8781 phba->sli4_hba.nvmet_xri_cnt = 0; in lpfc_free_nvmet_sgl_list()
8785 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8796 size *= phba->sli4_hba.max_cfg_param.max_xri; in lpfc_init_active_sgl_array()
8798 phba->sli4_hba.lpfc_sglq_active_list = in lpfc_init_active_sgl_array()
8800 if (!phba->sli4_hba.lpfc_sglq_active_list) in lpfc_init_active_sgl_array()
8801 return -ENOMEM; in lpfc_init_active_sgl_array()
8806 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8816 kfree(phba->sli4_hba.lpfc_sglq_active_list); in lpfc_free_active_sgl()
8820 * lpfc_init_sgl_list - Allocate and initialize sgl list.
8831 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); in lpfc_init_sgl_list()
8832 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); in lpfc_init_sgl_list()
8833 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); in lpfc_init_sgl_list()
8834 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); in lpfc_init_sgl_list()
8836 /* els xri-sgl book keeping */ in lpfc_init_sgl_list()
8837 phba->sli4_hba.els_xri_cnt = 0; in lpfc_init_sgl_list()
8839 /* nvme xri-buffer book keeping */ in lpfc_init_sgl_list()
8840 phba->sli4_hba.io_xri_cnt = 0; in lpfc_init_sgl_list()
8844 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8854 * 0 - successful
8855 * -ERROR - otherwise.
8863 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); in lpfc_sli4_init_rpi_hdrs()
8864 if (!phba->sli4_hba.rpi_hdrs_in_use) in lpfc_sli4_init_rpi_hdrs()
8866 if (phba->sli4_hba.extents_in_use) in lpfc_sli4_init_rpi_hdrs()
8867 return -EIO; in lpfc_sli4_init_rpi_hdrs()
8874 rc = -ENODEV; in lpfc_sli4_init_rpi_hdrs()
8881 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8905 if (!phba->sli4_hba.rpi_hdrs_in_use) in lpfc_sli4_create_rpi_hdr()
8907 if (phba->sli4_hba.extents_in_use) in lpfc_sli4_create_rpi_hdr()
8911 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; in lpfc_sli4_create_rpi_hdr()
8913 spin_lock_irq(&phba->hbalock); in lpfc_sli4_create_rpi_hdr()
8919 curr_rpi_range = phba->sli4_hba.next_rpi; in lpfc_sli4_create_rpi_hdr()
8920 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_create_rpi_hdr()
8928 * port expects a 4KB DMA-mapped memory region that is 4K aligned. in lpfc_sli4_create_rpi_hdr()
8934 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, in lpfc_sli4_create_rpi_hdr()
8936 &dmabuf->phys, GFP_KERNEL); in lpfc_sli4_create_rpi_hdr()
8937 if (!dmabuf->virt) { in lpfc_sli4_create_rpi_hdr()
8942 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { in lpfc_sli4_create_rpi_hdr()
8952 rpi_hdr->dmabuf = dmabuf; in lpfc_sli4_create_rpi_hdr()
8953 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; in lpfc_sli4_create_rpi_hdr()
8954 rpi_hdr->page_count = 1; in lpfc_sli4_create_rpi_hdr()
8955 spin_lock_irq(&phba->hbalock); in lpfc_sli4_create_rpi_hdr()
8958 rpi_hdr->start_rpi = curr_rpi_range; in lpfc_sli4_create_rpi_hdr()
8959 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; in lpfc_sli4_create_rpi_hdr()
8960 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); in lpfc_sli4_create_rpi_hdr()
8962 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_create_rpi_hdr()
8966 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, in lpfc_sli4_create_rpi_hdr()
8967 dmabuf->virt, dmabuf->phys); in lpfc_sli4_create_rpi_hdr()
8974 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8987 if (!phba->sli4_hba.rpi_hdrs_in_use) in lpfc_sli4_remove_rpi_hdrs()
8991 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { in lpfc_sli4_remove_rpi_hdrs()
8992 list_del(&rpi_hdr->list); in lpfc_sli4_remove_rpi_hdrs()
8993 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, in lpfc_sli4_remove_rpi_hdrs()
8994 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); in lpfc_sli4_remove_rpi_hdrs()
8995 kfree(rpi_hdr->dmabuf); in lpfc_sli4_remove_rpi_hdrs()
9000 phba->sli4_hba.next_rpi = 0; in lpfc_sli4_remove_rpi_hdrs()
9004 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
9012 * pointer to @phba - successful
9013 * NULL - error
9023 dev_err(&pdev->dev, "failed to allocate hba struct\n"); in lpfc_hba_alloc()
9028 phba->pcidev = pdev; in lpfc_hba_alloc()
9031 phba->brd_no = lpfc_get_instance(); in lpfc_hba_alloc()
9032 if (phba->brd_no < 0) { in lpfc_hba_alloc()
9036 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; in lpfc_hba_alloc()
9038 spin_lock_init(&phba->ct_ev_lock); in lpfc_hba_alloc()
9039 INIT_LIST_HEAD(&phba->ct_ev_waiters); in lpfc_hba_alloc()
9045 * lpfc_hba_free - Free driver hba data structure with a device.
9054 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_hba_free()
9055 kfree(phba->sli4_hba.hdwq); in lpfc_hba_free()
9058 idr_remove(&lpfc_hba_index, phba->brd_no); in lpfc_hba_free()
9061 kfree(phba->sli.sli3_ring); in lpfc_hba_free()
9062 phba->sli.sli3_ring = NULL; in lpfc_hba_free()
9069 * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
9075 * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
9080 struct lpfc_hba *phba = vport->phba; in lpfc_setup_fdmi_mask()
9082 vport->load_flag |= FC_ALLOW_FDMI; in lpfc_setup_fdmi_mask()
9083 if (phba->cfg_enable_SmartSAN || in lpfc_setup_fdmi_mask()
9084 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) { in lpfc_setup_fdmi_mask()
9086 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; in lpfc_setup_fdmi_mask()
9087 if (phba->cfg_enable_SmartSAN) in lpfc_setup_fdmi_mask()
9088 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; in lpfc_setup_fdmi_mask()
9090 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; in lpfc_setup_fdmi_mask()
9095 vport->fdmi_hba_mask, vport->fdmi_port_mask); in lpfc_setup_fdmi_mask()
9099 * lpfc_create_shost - Create hba physical port with associated scsi host.
9106 * 0 - successful
9107 * other values - error
9116 phba->fc_edtov = FF_DEF_EDTOV; in lpfc_create_shost()
9117 phba->fc_ratov = FF_DEF_RATOV; in lpfc_create_shost()
9118 phba->fc_altov = FF_DEF_ALTOV; in lpfc_create_shost()
9119 phba->fc_arbtov = FF_DEF_ARBTOV; in lpfc_create_shost()
9121 atomic_set(&phba->sdev_cnt, 0); in lpfc_create_shost()
9122 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); in lpfc_create_shost()
9124 return -ENODEV; in lpfc_create_shost()
9127 phba->pport = vport; in lpfc_create_shost()
9129 if (phba->nvmet_support) { in lpfc_create_shost()
9131 phba->targetport = NULL; in lpfc_create_shost()
9132 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; in lpfc_create_shost()
9139 pci_set_drvdata(phba->pcidev, shost); in lpfc_create_shost()
9151 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
9160 struct lpfc_vport *vport = phba->pport; in lpfc_destroy_shost()
9169 * lpfc_setup_bg - Setup Block guard structures and debug areas.
9182 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { in lpfc_setup_bg()
9187 old_mask = phba->cfg_prot_mask; in lpfc_setup_bg()
9188 old_guard = phba->cfg_prot_guard; in lpfc_setup_bg()
9191 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | in lpfc_setup_bg()
9194 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | in lpfc_setup_bg()
9198 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) in lpfc_setup_bg()
9199 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; in lpfc_setup_bg()
9201 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { in lpfc_setup_bg()
9202 if ((old_mask != phba->cfg_prot_mask) || in lpfc_setup_bg()
9203 (old_guard != phba->cfg_prot_guard)) in lpfc_setup_bg()
9207 phba->cfg_prot_mask, in lpfc_setup_bg()
9208 phba->cfg_prot_guard); in lpfc_setup_bg()
9210 scsi_host_set_prot(shost, phba->cfg_prot_mask); in lpfc_setup_bg()
9211 scsi_host_set_guard(shost, phba->cfg_prot_guard); in lpfc_setup_bg()
9221 * lpfc_post_init_setup - Perform necessary device post initialization setup.
9234 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); in lpfc_post_init_setup()
9240 shost = pci_get_drvdata(phba->pcidev); in lpfc_post_init_setup()
9241 shost->can_queue = phba->cfg_hba_queue_depth - 10; in lpfc_post_init_setup()
9245 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { in lpfc_post_init_setup()
9246 spin_lock_irq(shost->host_lock); in lpfc_post_init_setup()
9248 spin_unlock_irq(shost->host_lock); in lpfc_post_init_setup()
9264 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9268 * with SLI-3 interface spec.
9271 * 0 - successful
9272 * other values - error
9277 struct pci_dev *pdev = phba->pcidev; in lpfc_sli_pci_mem_setup()
9284 return -ENODEV; in lpfc_sli_pci_mem_setup()
9287 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in lpfc_sli_pci_mem_setup()
9289 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in lpfc_sli_pci_mem_setup()
9292 error = -ENODEV; in lpfc_sli_pci_mem_setup()
9297 phba->pci_bar0_map = pci_resource_start(pdev, 0); in lpfc_sli_pci_mem_setup()
9300 phba->pci_bar2_map = pci_resource_start(pdev, 2); in lpfc_sli_pci_mem_setup()
9304 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); in lpfc_sli_pci_mem_setup()
9305 if (!phba->slim_memmap_p) { in lpfc_sli_pci_mem_setup()
9306 dev_printk(KERN_ERR, &pdev->dev, in lpfc_sli_pci_mem_setup()
9312 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); in lpfc_sli_pci_mem_setup()
9313 if (!phba->ctrl_regs_memmap_p) { in lpfc_sli_pci_mem_setup()
9314 dev_printk(KERN_ERR, &pdev->dev, in lpfc_sli_pci_mem_setup()
9319 /* Allocate memory for SLI-2 structures */ in lpfc_sli_pci_mem_setup()
9320 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, in lpfc_sli_pci_mem_setup()
9321 &phba->slim2p.phys, GFP_KERNEL); in lpfc_sli_pci_mem_setup()
9322 if (!phba->slim2p.virt) in lpfc_sli_pci_mem_setup()
9325 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); in lpfc_sli_pci_mem_setup()
9326 phba->mbox_ext = (phba->slim2p.virt + in lpfc_sli_pci_mem_setup()
9328 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); in lpfc_sli_pci_mem_setup()
9329 phba->IOCBs = (phba->slim2p.virt + in lpfc_sli_pci_mem_setup()
9332 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, in lpfc_sli_pci_mem_setup()
9334 &phba->hbqslimp.phys, in lpfc_sli_pci_mem_setup()
9336 if (!phba->hbqslimp.virt) in lpfc_sli_pci_mem_setup()
9340 ptr = phba->hbqslimp.virt; in lpfc_sli_pci_mem_setup()
9342 phba->hbqs[i].hbq_virt = ptr; in lpfc_sli_pci_mem_setup()
9343 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); in lpfc_sli_pci_mem_setup()
9344 ptr += (lpfc_hbq_defs[i]->entry_count * in lpfc_sli_pci_mem_setup()
9347 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; in lpfc_sli_pci_mem_setup()
9348 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; in lpfc_sli_pci_mem_setup()
9350 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); in lpfc_sli_pci_mem_setup()
9352 phba->MBslimaddr = phba->slim_memmap_p; in lpfc_sli_pci_mem_setup()
9353 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; in lpfc_sli_pci_mem_setup()
9354 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; in lpfc_sli_pci_mem_setup()
9355 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; in lpfc_sli_pci_mem_setup()
9356 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; in lpfc_sli_pci_mem_setup()
9361 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, in lpfc_sli_pci_mem_setup()
9362 phba->slim2p.virt, phba->slim2p.phys); in lpfc_sli_pci_mem_setup()
9364 iounmap(phba->ctrl_regs_memmap_p); in lpfc_sli_pci_mem_setup()
9366 iounmap(phba->slim_memmap_p); in lpfc_sli_pci_mem_setup()
9372 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9376 * with SLI-3 interface spec.
9384 if (!phba->pcidev) in lpfc_sli_pci_mem_unset()
9387 pdev = phba->pcidev; in lpfc_sli_pci_mem_unset()
9390 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), in lpfc_sli_pci_mem_unset()
9391 phba->hbqslimp.virt, phba->hbqslimp.phys); in lpfc_sli_pci_mem_unset()
9392 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, in lpfc_sli_pci_mem_unset()
9393 phba->slim2p.virt, phba->slim2p.phys); in lpfc_sli_pci_mem_unset()
9396 iounmap(phba->ctrl_regs_memmap_p); in lpfc_sli_pci_mem_unset()
9397 iounmap(phba->slim_memmap_p); in lpfc_sli_pci_mem_unset()
9403 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9409 * Return 0 if successful, otherwise -ENODEV.
9421 if (!phba->sli4_hba.PSMPHRregaddr) in lpfc_sli4_post_status_check()
9422 return -ENODEV; in lpfc_sli4_post_status_check()
9426 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, in lpfc_sli4_post_status_check()
9430 port_error = -ENODEV; in lpfc_sli4_post_status_check()
9445 "1408 Port Failed POST - portsmphr=0x%x, " in lpfc_sli4_post_status_check()
9463 &phba->sli4_hba.sli_intf), in lpfc_sli4_post_status_check()
9465 &phba->sli4_hba.sli_intf), in lpfc_sli4_post_status_check()
9467 &phba->sli4_hba.sli_intf), in lpfc_sli4_post_status_check()
9469 &phba->sli4_hba.sli_intf), in lpfc_sli4_post_status_check()
9471 &phba->sli4_hba.sli_intf), in lpfc_sli4_post_status_check()
9473 &phba->sli4_hba.sli_intf)); in lpfc_sli4_post_status_check()
9480 &phba->sli4_hba.sli_intf); in lpfc_sli4_post_status_check()
9483 phba->sli4_hba.ue_mask_lo = in lpfc_sli4_post_status_check()
9484 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); in lpfc_sli4_post_status_check()
9485 phba->sli4_hba.ue_mask_hi = in lpfc_sli4_post_status_check()
9486 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); in lpfc_sli4_post_status_check()
9488 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); in lpfc_sli4_post_status_check()
9490 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); in lpfc_sli4_post_status_check()
9491 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || in lpfc_sli4_post_status_check()
9492 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { in lpfc_sli4_post_status_check()
9503 phba->sli4_hba.ue_mask_lo, in lpfc_sli4_post_status_check()
9504 phba->sli4_hba.ue_mask_hi); in lpfc_sli4_post_status_check()
9505 port_error = -ENODEV; in lpfc_sli4_post_status_check()
9511 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, in lpfc_sli4_post_status_check()
9515 phba->work_status[0] = in lpfc_sli4_post_status_check()
9516 readl(phba->sli4_hba.u.if_type2. in lpfc_sli4_post_status_check()
9518 phba->work_status[1] = in lpfc_sli4_post_status_check()
9519 readl(phba->sli4_hba.u.if_type2. in lpfc_sli4_post_status_check()
9528 phba->work_status[0], in lpfc_sli4_post_status_check()
9529 phba->work_status[1]); in lpfc_sli4_post_status_check()
9530 port_error = -ENODEV; in lpfc_sli4_post_status_check()
9536 &phba->sli4_hba.sli_intf) == in lpfc_sli4_post_status_check()
9538 pci_write_config_byte(phba->pcidev, in lpfc_sli4_post_status_check()
9550 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9562 phba->sli4_hba.u.if_type0.UERRLOregaddr = in lpfc_sli4_bar0_register_memmap()
9563 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; in lpfc_sli4_bar0_register_memmap()
9564 phba->sli4_hba.u.if_type0.UERRHIregaddr = in lpfc_sli4_bar0_register_memmap()
9565 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; in lpfc_sli4_bar0_register_memmap()
9566 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = in lpfc_sli4_bar0_register_memmap()
9567 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; in lpfc_sli4_bar0_register_memmap()
9568 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = in lpfc_sli4_bar0_register_memmap()
9569 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; in lpfc_sli4_bar0_register_memmap()
9570 phba->sli4_hba.SLIINTFregaddr = in lpfc_sli4_bar0_register_memmap()
9571 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; in lpfc_sli4_bar0_register_memmap()
9574 phba->sli4_hba.u.if_type2.EQDregaddr = in lpfc_sli4_bar0_register_memmap()
9575 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9577 phba->sli4_hba.u.if_type2.ERR1regaddr = in lpfc_sli4_bar0_register_memmap()
9578 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9580 phba->sli4_hba.u.if_type2.ERR2regaddr = in lpfc_sli4_bar0_register_memmap()
9581 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9583 phba->sli4_hba.u.if_type2.CTRLregaddr = in lpfc_sli4_bar0_register_memmap()
9584 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9586 phba->sli4_hba.u.if_type2.STATUSregaddr = in lpfc_sli4_bar0_register_memmap()
9587 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9589 phba->sli4_hba.SLIINTFregaddr = in lpfc_sli4_bar0_register_memmap()
9590 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; in lpfc_sli4_bar0_register_memmap()
9591 phba->sli4_hba.PSMPHRregaddr = in lpfc_sli4_bar0_register_memmap()
9592 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9594 phba->sli4_hba.RQDBregaddr = in lpfc_sli4_bar0_register_memmap()
9595 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9597 phba->sli4_hba.WQDBregaddr = in lpfc_sli4_bar0_register_memmap()
9598 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9600 phba->sli4_hba.CQDBregaddr = in lpfc_sli4_bar0_register_memmap()
9601 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; in lpfc_sli4_bar0_register_memmap()
9602 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; in lpfc_sli4_bar0_register_memmap()
9603 phba->sli4_hba.MQDBregaddr = in lpfc_sli4_bar0_register_memmap()
9604 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; in lpfc_sli4_bar0_register_memmap()
9605 phba->sli4_hba.BMBXregaddr = in lpfc_sli4_bar0_register_memmap()
9606 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; in lpfc_sli4_bar0_register_memmap()
9609 phba->sli4_hba.u.if_type2.EQDregaddr = in lpfc_sli4_bar0_register_memmap()
9610 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9612 phba->sli4_hba.u.if_type2.ERR1regaddr = in lpfc_sli4_bar0_register_memmap()
9613 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9615 phba->sli4_hba.u.if_type2.ERR2regaddr = in lpfc_sli4_bar0_register_memmap()
9616 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9618 phba->sli4_hba.u.if_type2.CTRLregaddr = in lpfc_sli4_bar0_register_memmap()
9619 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9621 phba->sli4_hba.u.if_type2.STATUSregaddr = in lpfc_sli4_bar0_register_memmap()
9622 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9624 phba->sli4_hba.PSMPHRregaddr = in lpfc_sli4_bar0_register_memmap()
9625 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9627 phba->sli4_hba.BMBXregaddr = in lpfc_sli4_bar0_register_memmap()
9628 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; in lpfc_sli4_bar0_register_memmap()
9632 dev_printk(KERN_ERR, &phba->pcidev->dev, in lpfc_sli4_bar0_register_memmap()
9633 "FATAL - unsupported SLI4 interface type - %d\n", in lpfc_sli4_bar0_register_memmap()
9640 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9651 phba->sli4_hba.PSMPHRregaddr = in lpfc_sli4_bar1_register_memmap()
9652 phba->sli4_hba.ctrl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9654 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9656 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9658 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9662 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9664 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9666 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9668 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9670 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9676 dev_err(&phba->pcidev->dev, in lpfc_sli4_bar1_register_memmap()
9677 "FATAL - unsupported SLI4 interface type - %d\n", in lpfc_sli4_bar1_register_memmap()
9684 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9691 * Return 0 if successful, otherwise -ENODEV.
9697 return -ENODEV; in lpfc_sli4_bar2_register_memmap()
9699 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar2_register_memmap()
9702 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar2_register_memmap()
9705 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar2_register_memmap()
9708 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; in lpfc_sli4_bar2_register_memmap()
9709 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar2_register_memmap()
9711 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar2_register_memmap()
9717 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9721 * region consistent with the SLI-4 interface spec. This
9728 * 0 - successful
9729 * -ENOMEM - could not allocated memory.
9742 return -ENOMEM; in lpfc_create_bootstrap_mbox()
9748 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); in lpfc_create_bootstrap_mbox()
9749 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, in lpfc_create_bootstrap_mbox()
9750 &dmabuf->phys, GFP_KERNEL); in lpfc_create_bootstrap_mbox()
9751 if (!dmabuf->virt) { in lpfc_create_bootstrap_mbox()
9753 return -ENOMEM; in lpfc_create_bootstrap_mbox()
9759 * to be 16-byte aligned. Also align the virtual memory as each in lpfc_create_bootstrap_mbox()
9763 phba->sli4_hba.bmbx.dmabuf = dmabuf; in lpfc_create_bootstrap_mbox()
9764 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; in lpfc_create_bootstrap_mbox()
9766 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, in lpfc_create_bootstrap_mbox()
9768 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, in lpfc_create_bootstrap_mbox()
9774 * as two 30-bit addresses. The other data is a bit marking whether in lpfc_create_bootstrap_mbox()
9775 * the 30-bit address is the high or low address. in lpfc_create_bootstrap_mbox()
9779 dma_address = &phba->sli4_hba.bmbx.dma_address; in lpfc_create_bootstrap_mbox()
9780 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; in lpfc_create_bootstrap_mbox()
9782 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | in lpfc_create_bootstrap_mbox()
9785 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); in lpfc_create_bootstrap_mbox()
9786 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | in lpfc_create_bootstrap_mbox()
9792 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9805 dma_free_coherent(&phba->pcidev->dev, in lpfc_destroy_bootstrap_mbox()
9806 phba->sli4_hba.bmbx.bmbx_size, in lpfc_destroy_bootstrap_mbox()
9807 phba->sli4_hba.bmbx.dmabuf->virt, in lpfc_destroy_bootstrap_mbox()
9808 phba->sli4_hba.bmbx.dmabuf->phys); in lpfc_destroy_bootstrap_mbox()
9810 kfree(phba->sli4_hba.bmbx.dmabuf); in lpfc_destroy_bootstrap_mbox()
9811 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); in lpfc_destroy_bootstrap_mbox()
9828 * lpfc_map_topology - Map the topology read from READ_CONFIG
9853 lpfc_topo_to_str[phba->cfg_topology]); in lpfc_map_topology()
9856 /* FW supports persistent topology - override module parameter value */ in lpfc_map_topology()
9857 phba->hba_flag |= HBA_PERSISTENT_TOPO; in lpfc_map_topology()
9860 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == in lpfc_map_topology()
9862 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == in lpfc_map_topology()
9865 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) in lpfc_map_topology()
9869 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; in lpfc_map_topology()
9873 /* If topology failover set - pt is '0' or '1' */ in lpfc_map_topology()
9874 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : in lpfc_map_topology()
9877 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) in lpfc_map_topology()
9882 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { in lpfc_map_topology()
9885 lpfc_topo_to_str[phba->cfg_topology]); in lpfc_map_topology()
9890 lpfc_topo_to_str[phba->cfg_topology]); in lpfc_map_topology()
9895 * lpfc_sli4_read_config - Get the config parameters.
9904 * 0 - successful
9905 * -ENOMEM - No available memory
9906 * -EIO - The mailbox failed to complete successfully.
9922 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_sli4_read_config()
9927 return -ENOMEM; in lpfc_sli4_read_config()
9937 bf_get(lpfc_mqe_command, &pmb->u.mqe), in lpfc_sli4_read_config()
9938 bf_get(lpfc_mqe_status, &pmb->u.mqe)); in lpfc_sli4_read_config()
9939 rc = -EIO; in lpfc_sli4_read_config()
9941 rd_config = &pmb->u.mqe.un.rd_config; in lpfc_sli4_read_config()
9943 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; in lpfc_sli4_read_config()
9944 phba->sli4_hba.lnk_info.lnk_tp = in lpfc_sli4_read_config()
9946 phba->sli4_hba.lnk_info.lnk_no = in lpfc_sli4_read_config()
9950 phba->sli4_hba.lnk_info.lnk_tp, in lpfc_sli4_read_config()
9951 phba->sli4_hba.lnk_info.lnk_no); in lpfc_sli4_read_config()
9955 bf_get(lpfc_mqe_command, &pmb->u.mqe)); in lpfc_sli4_read_config()
9957 phba->bbcredit_support = 1; in lpfc_sli4_read_config()
9958 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; in lpfc_sli4_read_config()
9966 "2702 READ_CONFIG: FA-PWWN is " in lpfc_sli4_read_config()
9968 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG; in lpfc_sli4_read_config()
9971 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG; in lpfc_sli4_read_config()
9974 phba->sli4_hba.conf_trunk = in lpfc_sli4_read_config()
9976 phba->sli4_hba.extents_in_use = in lpfc_sli4_read_config()
9979 phba->sli4_hba.max_cfg_param.max_xri = in lpfc_sli4_read_config()
9983 phba->sli4_hba.max_cfg_param.max_xri > 512) in lpfc_sli4_read_config()
9984 phba->sli4_hba.max_cfg_param.max_xri = 512; in lpfc_sli4_read_config()
9985 phba->sli4_hba.max_cfg_param.xri_base = in lpfc_sli4_read_config()
9987 phba->sli4_hba.max_cfg_param.max_vpi = in lpfc_sli4_read_config()
9990 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) in lpfc_sli4_read_config()
9991 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; in lpfc_sli4_read_config()
9992 phba->sli4_hba.max_cfg_param.vpi_base = in lpfc_sli4_read_config()
9994 phba->sli4_hba.max_cfg_param.max_rpi = in lpfc_sli4_read_config()
9996 phba->sli4_hba.max_cfg_param.rpi_base = in lpfc_sli4_read_config()
9998 phba->sli4_hba.max_cfg_param.max_vfi = in lpfc_sli4_read_config()
10000 phba->sli4_hba.max_cfg_param.vfi_base = in lpfc_sli4_read_config()
10002 phba->sli4_hba.max_cfg_param.max_fcfi = in lpfc_sli4_read_config()
10004 phba->sli4_hba.max_cfg_param.max_eq = in lpfc_sli4_read_config()
10006 phba->sli4_hba.max_cfg_param.max_rq = in lpfc_sli4_read_config()
10008 phba->sli4_hba.max_cfg_param.max_wq = in lpfc_sli4_read_config()
10010 phba->sli4_hba.max_cfg_param.max_cq = in lpfc_sli4_read_config()
10012 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); in lpfc_sli4_read_config()
10013 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; in lpfc_sli4_read_config()
10014 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; in lpfc_sli4_read_config()
10015 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; in lpfc_sli4_read_config()
10016 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? in lpfc_sli4_read_config()
10017 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; in lpfc_sli4_read_config()
10018 phba->max_vports = phba->max_vpi; in lpfc_sli4_read_config()
10029 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; in lpfc_sli4_read_config()
10030 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; in lpfc_sli4_read_config()
10031 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; in lpfc_sli4_read_config()
10035 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; in lpfc_sli4_read_config()
10036 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; in lpfc_sli4_read_config()
10042 if (phba->cgn_reg_signal != in lpfc_sli4_read_config()
10045 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; in lpfc_sli4_read_config()
10046 phba->cgn_reg_signal = in lpfc_sli4_read_config()
10049 phba->cgn_reg_signal = in lpfc_sli4_read_config()
10051 phba->cgn_reg_fpin = in lpfc_sli4_read_config()
10058 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin; in lpfc_sli4_read_config()
10059 phba->cgn_init_reg_signal = phba->cgn_reg_signal; in lpfc_sli4_read_config()
10063 phba->cgn_reg_signal, phba->cgn_reg_fpin); in lpfc_sli4_read_config()
10073 phba->sli4_hba.extents_in_use, in lpfc_sli4_read_config()
10074 phba->sli4_hba.max_cfg_param.xri_base, in lpfc_sli4_read_config()
10075 phba->sli4_hba.max_cfg_param.max_xri, in lpfc_sli4_read_config()
10076 phba->sli4_hba.max_cfg_param.vpi_base, in lpfc_sli4_read_config()
10077 phba->sli4_hba.max_cfg_param.max_vpi, in lpfc_sli4_read_config()
10078 phba->sli4_hba.max_cfg_param.vfi_base, in lpfc_sli4_read_config()
10079 phba->sli4_hba.max_cfg_param.max_vfi, in lpfc_sli4_read_config()
10080 phba->sli4_hba.max_cfg_param.rpi_base, in lpfc_sli4_read_config()
10081 phba->sli4_hba.max_cfg_param.max_rpi, in lpfc_sli4_read_config()
10082 phba->sli4_hba.max_cfg_param.max_fcfi, in lpfc_sli4_read_config()
10083 phba->sli4_hba.max_cfg_param.max_eq, in lpfc_sli4_read_config()
10084 phba->sli4_hba.max_cfg_param.max_cq, in lpfc_sli4_read_config()
10085 phba->sli4_hba.max_cfg_param.max_wq, in lpfc_sli4_read_config()
10086 phba->sli4_hba.max_cfg_param.max_rq, in lpfc_sli4_read_config()
10087 phba->lmt); in lpfc_sli4_read_config()
10093 qmin = phba->sli4_hba.max_cfg_param.max_wq; in lpfc_sli4_read_config()
10094 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) in lpfc_sli4_read_config()
10095 qmin = phba->sli4_hba.max_cfg_param.max_cq; in lpfc_sli4_read_config()
10096 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) in lpfc_sli4_read_config()
10097 qmin = phba->sli4_hba.max_cfg_param.max_eq; in lpfc_sli4_read_config()
10104 qmin -= 4; in lpfc_sli4_read_config()
10107 if ((phba->cfg_irq_chann > qmin) || in lpfc_sli4_read_config()
10108 (phba->cfg_hdw_queue > qmin)) { in lpfc_sli4_read_config()
10110 "2005 Reducing Queues - " in lpfc_sli4_read_config()
10114 phba->sli4_hba.max_cfg_param.max_wq, in lpfc_sli4_read_config()
10115 phba->sli4_hba.max_cfg_param.max_cq, in lpfc_sli4_read_config()
10116 phba->sli4_hba.max_cfg_param.max_eq, in lpfc_sli4_read_config()
10117 qmin, phba->cfg_irq_chann, in lpfc_sli4_read_config()
10118 phba->cfg_hdw_queue); in lpfc_sli4_read_config()
10120 if (phba->cfg_irq_chann > qmin) in lpfc_sli4_read_config()
10121 phba->cfg_irq_chann = qmin; in lpfc_sli4_read_config()
10122 if (phba->cfg_hdw_queue > qmin) in lpfc_sli4_read_config()
10123 phba->cfg_hdw_queue = qmin; in lpfc_sli4_read_config()
10131 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); in lpfc_sli4_read_config()
10136 phba->hba_flag |= HBA_FORCED_LINK_SPEED; in lpfc_sli4_read_config()
10140 phba->cfg_link_speed = in lpfc_sli4_read_config()
10144 phba->cfg_link_speed = in lpfc_sli4_read_config()
10148 phba->cfg_link_speed = in lpfc_sli4_read_config()
10152 phba->cfg_link_speed = in lpfc_sli4_read_config()
10156 phba->cfg_link_speed = in lpfc_sli4_read_config()
10160 phba->cfg_link_speed = in lpfc_sli4_read_config()
10164 phba->cfg_link_speed = in lpfc_sli4_read_config()
10168 phba->cfg_link_speed = in lpfc_sli4_read_config()
10172 phba->cfg_link_speed = in lpfc_sli4_read_config()
10181 phba->cfg_link_speed = in lpfc_sli4_read_config()
10188 length = phba->sli4_hba.max_cfg_param.max_xri - in lpfc_sli4_read_config()
10190 if (phba->cfg_hba_queue_depth > length) { in lpfc_sli4_read_config()
10193 phba->cfg_hba_queue_depth, length); in lpfc_sli4_read_config()
10194 phba->cfg_hba_queue_depth = length; in lpfc_sli4_read_config()
10197 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < in lpfc_sli4_read_config()
10202 length = (sizeof(struct lpfc_mbx_get_func_cfg) - in lpfc_sli4_read_config()
10210 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; in lpfc_sli4_read_config()
10211 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); in lpfc_sli4_read_config()
10212 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); in lpfc_sli4_read_config()
10217 bf_get(lpfc_mqe_command, &pmb->u.mqe), in lpfc_sli4_read_config()
10218 bf_get(lpfc_mqe_status, &pmb->u.mqe)); in lpfc_sli4_read_config()
10223 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; in lpfc_sli4_read_config()
10225 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; in lpfc_sli4_read_config()
10237 phba->sli4_hba.iov.pf_number = in lpfc_sli4_read_config()
10239 phba->sli4_hba.iov.vf_number = in lpfc_sli4_read_config()
10248 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, in lpfc_sli4_read_config()
10249 phba->sli4_hba.iov.vf_number); in lpfc_sli4_read_config()
10257 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_sli4_read_config()
10262 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10265 * This routine is invoked to setup the port-side endian order when
10270 * 0 - successful
10271 * -ENOMEM - No available memory
10272 * -EIO - The mailbox failed to complete successfully.
10282 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); in lpfc_setup_endian_order()
10285 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, in lpfc_setup_endian_order()
10292 return -ENOMEM; in lpfc_setup_endian_order()
10300 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); in lpfc_setup_endian_order()
10307 rc = -EIO; in lpfc_setup_endian_order()
10309 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_setup_endian_order()
10321 * lpfc_sli4_queue_verify - Verify and update EQ counts
10330 * 0 - successful
10331 * -ENOMEM - No available memory
10337 * Sanity check for configured queue parameters against the run-time in lpfc_sli4_queue_verify()
10341 if (phba->nvmet_support) { in lpfc_sli4_queue_verify()
10342 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) in lpfc_sli4_queue_verify()
10343 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; in lpfc_sli4_queue_verify()
10344 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) in lpfc_sli4_queue_verify()
10345 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; in lpfc_sli4_queue_verify()
10350 phba->cfg_hdw_queue, phba->cfg_irq_chann, in lpfc_sli4_queue_verify()
10351 phba->cfg_nvmet_mrq); in lpfc_sli4_queue_verify()
10354 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; in lpfc_sli4_queue_verify()
10355 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; in lpfc_sli4_queue_verify()
10358 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; in lpfc_sli4_queue_verify()
10359 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; in lpfc_sli4_queue_verify()
10368 int cpu; in lpfc_alloc_io_wq_cq() local
10370 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); in lpfc_alloc_io_wq_cq()
10372 if (phba->enab_exp_wqcq_pages) in lpfc_alloc_io_wq_cq()
10375 phba->sli4_hba.cq_esize, in lpfc_alloc_io_wq_cq()
10376 LPFC_CQE_EXP_COUNT, cpu); in lpfc_alloc_io_wq_cq()
10380 phba->sli4_hba.cq_esize, in lpfc_alloc_io_wq_cq()
10381 phba->sli4_hba.cq_ecount, cpu); in lpfc_alloc_io_wq_cq()
10384 "0499 Failed allocate fast-path IO CQ (%d)\n", in lpfc_alloc_io_wq_cq()
10388 qdesc->qe_valid = 1; in lpfc_alloc_io_wq_cq()
10389 qdesc->hdwq = idx; in lpfc_alloc_io_wq_cq()
10390 qdesc->chann = cpu; in lpfc_alloc_io_wq_cq()
10391 phba->sli4_hba.hdwq[idx].io_cq = qdesc; in lpfc_alloc_io_wq_cq()
10394 if (phba->enab_exp_wqcq_pages) { in lpfc_alloc_io_wq_cq()
10396 wqesize = (phba->fcp_embed_io) ? in lpfc_alloc_io_wq_cq()
10397 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; in lpfc_alloc_io_wq_cq()
10400 LPFC_WQE_EXP_COUNT, cpu); in lpfc_alloc_io_wq_cq()
10403 phba->sli4_hba.wq_esize, in lpfc_alloc_io_wq_cq()
10404 phba->sli4_hba.wq_ecount, cpu); in lpfc_alloc_io_wq_cq()
10408 "0503 Failed allocate fast-path IO WQ (%d)\n", in lpfc_alloc_io_wq_cq()
10412 qdesc->hdwq = idx; in lpfc_alloc_io_wq_cq()
10413 qdesc->chann = cpu; in lpfc_alloc_io_wq_cq()
10414 phba->sli4_hba.hdwq[idx].io_wq = qdesc; in lpfc_alloc_io_wq_cq()
10415 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); in lpfc_alloc_io_wq_cq()
10420 * lpfc_sli4_queue_create - Create all the SLI4 queues
10429 * 0 - successful
10430 * -ENOMEM - No availble memory
10431 * -EIO - The mailbox failed to complete successfully.
10437 int idx, cpu, eqcpu; in lpfc_sli4_queue_create() local
10447 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; in lpfc_sli4_queue_create()
10448 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; in lpfc_sli4_queue_create()
10449 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; in lpfc_sli4_queue_create()
10450 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; in lpfc_sli4_queue_create()
10451 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; in lpfc_sli4_queue_create()
10452 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; in lpfc_sli4_queue_create()
10453 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; in lpfc_sli4_queue_create()
10454 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; in lpfc_sli4_queue_create()
10455 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; in lpfc_sli4_queue_create()
10456 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; in lpfc_sli4_queue_create()
10458 if (!phba->sli4_hba.hdwq) { in lpfc_sli4_queue_create()
10459 phba->sli4_hba.hdwq = kcalloc( in lpfc_sli4_queue_create()
10460 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), in lpfc_sli4_queue_create()
10462 if (!phba->sli4_hba.hdwq) { in lpfc_sli4_queue_create()
10465 "fast-path Hardware Queue array\n"); in lpfc_sli4_queue_create()
10469 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_queue_create()
10470 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_sli4_queue_create()
10471 spin_lock_init(&qp->io_buf_list_get_lock); in lpfc_sli4_queue_create()
10472 spin_lock_init(&qp->io_buf_list_put_lock); in lpfc_sli4_queue_create()
10473 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); in lpfc_sli4_queue_create()
10474 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); in lpfc_sli4_queue_create()
10475 qp->get_io_bufs = 0; in lpfc_sli4_queue_create()
10476 qp->put_io_bufs = 0; in lpfc_sli4_queue_create()
10477 qp->total_io_bufs = 0; in lpfc_sli4_queue_create()
10478 spin_lock_init(&qp->abts_io_buf_list_lock); in lpfc_sli4_queue_create()
10479 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); in lpfc_sli4_queue_create()
10480 qp->abts_scsi_io_bufs = 0; in lpfc_sli4_queue_create()
10481 qp->abts_nvme_io_bufs = 0; in lpfc_sli4_queue_create()
10482 INIT_LIST_HEAD(&qp->sgl_list); in lpfc_sli4_queue_create()
10483 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); in lpfc_sli4_queue_create()
10484 spin_lock_init(&qp->hdwq_lock); in lpfc_sli4_queue_create()
10488 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_queue_create()
10489 if (phba->nvmet_support) { in lpfc_sli4_queue_create()
10490 phba->sli4_hba.nvmet_cqset = kcalloc( in lpfc_sli4_queue_create()
10491 phba->cfg_nvmet_mrq, in lpfc_sli4_queue_create()
10494 if (!phba->sli4_hba.nvmet_cqset) { in lpfc_sli4_queue_create()
10497 "fast-path CQ set array\n"); in lpfc_sli4_queue_create()
10500 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( in lpfc_sli4_queue_create()
10501 phba->cfg_nvmet_mrq, in lpfc_sli4_queue_create()
10504 if (!phba->sli4_hba.nvmet_mrq_hdr) { in lpfc_sli4_queue_create()
10507 "fast-path RQ set hdr array\n"); in lpfc_sli4_queue_create()
10510 phba->sli4_hba.nvmet_mrq_data = kcalloc( in lpfc_sli4_queue_create()
10511 phba->cfg_nvmet_mrq, in lpfc_sli4_queue_create()
10514 if (!phba->sli4_hba.nvmet_mrq_data) { in lpfc_sli4_queue_create()
10517 "fast-path RQ set data array\n"); in lpfc_sli4_queue_create()
10523 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); in lpfc_sli4_queue_create()
10526 for_each_present_cpu(cpu) { in lpfc_sli4_queue_create()
10531 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_sli4_queue_create()
10532 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) in lpfc_sli4_queue_create()
10535 /* Get a ptr to the Hardware Queue associated with this CPU */ in lpfc_sli4_queue_create()
10536 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; in lpfc_sli4_queue_create()
10540 phba->sli4_hba.eq_esize, in lpfc_sli4_queue_create()
10541 phba->sli4_hba.eq_ecount, cpu); in lpfc_sli4_queue_create()
10545 cpup->hdwq); in lpfc_sli4_queue_create()
10548 qdesc->qe_valid = 1; in lpfc_sli4_queue_create()
10549 qdesc->hdwq = cpup->hdwq; in lpfc_sli4_queue_create()
10550 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ in lpfc_sli4_queue_create()
10551 qdesc->last_cpu = qdesc->chann; in lpfc_sli4_queue_create()
10554 qp->hba_eq = qdesc; in lpfc_sli4_queue_create()
10556 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); in lpfc_sli4_queue_create()
10557 list_add(&qdesc->cpu_list, &eqi->list); in lpfc_sli4_queue_create()
10563 for_each_present_cpu(cpu) { in lpfc_sli4_queue_create()
10564 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_sli4_queue_create()
10567 if (cpup->flag & LPFC_CPU_FIRST_IRQ) in lpfc_sli4_queue_create()
10571 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; in lpfc_sli4_queue_create()
10572 if (qp->hba_eq) in lpfc_sli4_queue_create()
10576 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); in lpfc_sli4_queue_create()
10577 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; in lpfc_sli4_queue_create()
10578 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; in lpfc_sli4_queue_create()
10582 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_queue_create()
10587 if (phba->nvmet_support) { in lpfc_sli4_queue_create()
10588 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { in lpfc_sli4_queue_create()
10589 cpu = lpfc_find_cpu_handle(phba, idx, in lpfc_sli4_queue_create()
10593 phba->sli4_hba.cq_esize, in lpfc_sli4_queue_create()
10594 phba->sli4_hba.cq_ecount, in lpfc_sli4_queue_create()
10595 cpu); in lpfc_sli4_queue_create()
10602 qdesc->qe_valid = 1; in lpfc_sli4_queue_create()
10603 qdesc->hdwq = idx; in lpfc_sli4_queue_create()
10604 qdesc->chann = cpu; in lpfc_sli4_queue_create()
10605 phba->sli4_hba.nvmet_cqset[idx] = qdesc; in lpfc_sli4_queue_create()
10613 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); in lpfc_sli4_queue_create()
10614 /* Create slow-path Mailbox Command Complete Queue */ in lpfc_sli4_queue_create()
10616 phba->sli4_hba.cq_esize, in lpfc_sli4_queue_create()
10617 phba->sli4_hba.cq_ecount, cpu); in lpfc_sli4_queue_create()
10620 "0500 Failed allocate slow-path mailbox CQ\n"); in lpfc_sli4_queue_create()
10623 qdesc->qe_valid = 1; in lpfc_sli4_queue_create()
10624 phba->sli4_hba.mbx_cq = qdesc; in lpfc_sli4_queue_create()
10626 /* Create slow-path ELS Complete Queue */ in lpfc_sli4_queue_create()
10628 phba->sli4_hba.cq_esize, in lpfc_sli4_queue_create()
10629 phba->sli4_hba.cq_ecount, cpu); in lpfc_sli4_queue_create()
10632 "0501 Failed allocate slow-path ELS CQ\n"); in lpfc_sli4_queue_create()
10635 qdesc->qe_valid = 1; in lpfc_sli4_queue_create()
10636 qdesc->chann = cpu; in lpfc_sli4_queue_create()
10637 phba->sli4_hba.els_cq = qdesc; in lpfc_sli4_queue_create()
10647 phba->sli4_hba.mq_esize, in lpfc_sli4_queue_create()
10648 phba->sli4_hba.mq_ecount, cpu); in lpfc_sli4_queue_create()
10651 "0505 Failed allocate slow-path MQ\n"); in lpfc_sli4_queue_create()
10654 qdesc->chann = cpu; in lpfc_sli4_queue_create()
10655 phba->sli4_hba.mbx_wq = qdesc; in lpfc_sli4_queue_create()
10661 /* Create slow-path ELS Work Queue */ in lpfc_sli4_queue_create()
10663 phba->sli4_hba.wq_esize, in lpfc_sli4_queue_create()
10664 phba->sli4_hba.wq_ecount, cpu); in lpfc_sli4_queue_create()
10667 "0504 Failed allocate slow-path ELS WQ\n"); in lpfc_sli4_queue_create()
10670 qdesc->chann = cpu; in lpfc_sli4_queue_create()
10671 phba->sli4_hba.els_wq = qdesc; in lpfc_sli4_queue_create()
10672 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); in lpfc_sli4_queue_create()
10674 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_queue_create()
10677 phba->sli4_hba.cq_esize, in lpfc_sli4_queue_create()
10678 phba->sli4_hba.cq_ecount, cpu); in lpfc_sli4_queue_create()
10684 qdesc->chann = cpu; in lpfc_sli4_queue_create()
10685 qdesc->qe_valid = 1; in lpfc_sli4_queue_create()
10686 phba->sli4_hba.nvmels_cq = qdesc; in lpfc_sli4_queue_create()
10690 phba->sli4_hba.wq_esize, in lpfc_sli4_queue_create()
10691 phba->sli4_hba.wq_ecount, cpu); in lpfc_sli4_queue_create()
10697 qdesc->chann = cpu; in lpfc_sli4_queue_create()
10698 phba->sli4_hba.nvmels_wq = qdesc; in lpfc_sli4_queue_create()
10699 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); in lpfc_sli4_queue_create()
10708 phba->sli4_hba.rq_esize, in lpfc_sli4_queue_create()
10709 phba->sli4_hba.rq_ecount, cpu); in lpfc_sli4_queue_create()
10715 phba->sli4_hba.hdr_rq = qdesc; in lpfc_sli4_queue_create()
10719 phba->sli4_hba.rq_esize, in lpfc_sli4_queue_create()
10720 phba->sli4_hba.rq_ecount, cpu); in lpfc_sli4_queue_create()
10726 phba->sli4_hba.dat_rq = qdesc; in lpfc_sli4_queue_create()
10728 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && in lpfc_sli4_queue_create()
10729 phba->nvmet_support) { in lpfc_sli4_queue_create()
10730 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { in lpfc_sli4_queue_create()
10731 cpu = lpfc_find_cpu_handle(phba, idx, in lpfc_sli4_queue_create()
10736 phba->sli4_hba.rq_esize, in lpfc_sli4_queue_create()
10738 cpu); in lpfc_sli4_queue_create()
10745 qdesc->hdwq = idx; in lpfc_sli4_queue_create()
10746 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; in lpfc_sli4_queue_create()
10749 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), in lpfc_sli4_queue_create()
10751 cpu_to_node(cpu)); in lpfc_sli4_queue_create()
10752 if (qdesc->rqbp == NULL) { in lpfc_sli4_queue_create()
10760 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); in lpfc_sli4_queue_create()
10765 phba->sli4_hba.rq_esize, in lpfc_sli4_queue_create()
10767 cpu); in lpfc_sli4_queue_create()
10774 qdesc->hdwq = idx; in lpfc_sli4_queue_create()
10775 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; in lpfc_sli4_queue_create()
10780 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_queue_create()
10781 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_queue_create()
10782 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, in lpfc_sli4_queue_create()
10783 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); in lpfc_sli4_queue_create()
10788 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { in lpfc_sli4_queue_create()
10789 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_queue_create()
10790 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, in lpfc_sli4_queue_create()
10791 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); in lpfc_sli4_queue_create()
10799 return -ENOMEM; in lpfc_sli4_queue_create()
10833 hdwq = phba->sli4_hba.hdwq; in lpfc_sli4_release_hdwq()
10836 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_release_hdwq()
10843 if (phba->cfg_xpsgl && !phba->nvmet_support) in lpfc_sli4_release_hdwq()
10848 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { in lpfc_sli4_release_hdwq()
10850 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; in lpfc_sli4_release_hdwq()
10852 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; in lpfc_sli4_release_hdwq()
10857 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10864 * 0 - successful
10865 * -ENOMEM - No available memory
10866 * -EIO - The mailbox failed to complete successfully.
10876 spin_lock_irq(&phba->hbalock); in lpfc_sli4_queue_destroy()
10877 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; in lpfc_sli4_queue_destroy()
10878 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { in lpfc_sli4_queue_destroy()
10879 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_queue_destroy()
10881 spin_lock_irq(&phba->hbalock); in lpfc_sli4_queue_destroy()
10883 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_queue_destroy()
10888 if (phba->sli4_hba.hdwq) in lpfc_sli4_queue_destroy()
10891 if (phba->nvmet_support) { in lpfc_sli4_queue_destroy()
10892 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, in lpfc_sli4_queue_destroy()
10893 phba->cfg_nvmet_mrq); in lpfc_sli4_queue_destroy()
10895 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, in lpfc_sli4_queue_destroy()
10896 phba->cfg_nvmet_mrq); in lpfc_sli4_queue_destroy()
10897 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, in lpfc_sli4_queue_destroy()
10898 phba->cfg_nvmet_mrq); in lpfc_sli4_queue_destroy()
10902 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); in lpfc_sli4_queue_destroy()
10905 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); in lpfc_sli4_queue_destroy()
10908 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); in lpfc_sli4_queue_destroy()
10911 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); in lpfc_sli4_queue_destroy()
10912 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); in lpfc_sli4_queue_destroy()
10915 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); in lpfc_sli4_queue_destroy()
10918 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); in lpfc_sli4_queue_destroy()
10921 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); in lpfc_sli4_queue_destroy()
10924 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); in lpfc_sli4_queue_destroy()
10927 spin_lock_irq(&phba->hbalock); in lpfc_sli4_queue_destroy()
10928 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; in lpfc_sli4_queue_destroy()
10929 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_queue_destroy()
10939 rqbp = rq->rqbp; in lpfc_free_rq_buffer()
10940 while (!list_empty(&rqbp->rqb_buffer_list)) { in lpfc_free_rq_buffer()
10941 list_remove_head(&rqbp->rqb_buffer_list, h_buf, in lpfc_free_rq_buffer()
10945 (rqbp->rqb_free_buffer)(phba, rqb_buffer); in lpfc_free_rq_buffer()
10946 rqbp->buffer_count--; in lpfc_free_rq_buffer()
10961 "6085 Fast-path %s (%d) not allocated\n", in lpfc_create_wq_cq()
10963 return -ENOMEM; in lpfc_create_wq_cq()
10979 *cq_map = cq->queue_id; in lpfc_create_wq_cq()
10982 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", in lpfc_create_wq_cq()
10983 qidx, cq->queue_id, qidx, eq->queue_id); in lpfc_create_wq_cq()
10991 /* no need to tear down cq - caller will do so */ in lpfc_create_wq_cq()
10996 pring = wq->pring; in lpfc_create_wq_cq()
10997 pring->sli.sli4.wqp = (void *)wq; in lpfc_create_wq_cq()
10998 cq->pring = pring; in lpfc_create_wq_cq()
11001 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", in lpfc_create_wq_cq()
11002 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); in lpfc_create_wq_cq()
11007 "0539 Failed setup of slow-path MQ: " in lpfc_create_wq_cq()
11009 /* no need to tear down cq - caller will do so */ in lpfc_create_wq_cq()
11014 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", in lpfc_create_wq_cq()
11015 phba->sli4_hba.mbx_wq->queue_id, in lpfc_create_wq_cq()
11016 phba->sli4_hba.mbx_cq->queue_id); in lpfc_create_wq_cq()
11023 * lpfc_setup_cq_lookup - Setup the CQ lookup table
11035 memset(phba->sli4_hba.cq_lookup, 0, in lpfc_setup_cq_lookup()
11036 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); in lpfc_setup_cq_lookup()
11038 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { in lpfc_setup_cq_lookup()
11040 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; in lpfc_setup_cq_lookup()
11044 list_for_each_entry(childq, &eq->child_list, list) { in lpfc_setup_cq_lookup()
11045 if (childq->queue_id > phba->sli4_hba.cq_max) in lpfc_setup_cq_lookup()
11047 if (childq->subtype == LPFC_IO) in lpfc_setup_cq_lookup()
11048 phba->sli4_hba.cq_lookup[childq->queue_id] = in lpfc_setup_cq_lookup()
11055 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
11062 * 0 - successful
11063 * -ENOMEM - No available memory
11064 * -EIO - The mailbox failed to complete successfully.
11074 int qidx, cpu; in lpfc_sli4_queue_setup() local
11076 int rc = -ENOMEM; in lpfc_sli4_queue_setup()
11078 /* Check for dual-ULP support */ in lpfc_sli4_queue_setup()
11079 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_sli4_queue_setup()
11084 return -ENOMEM; in lpfc_sli4_queue_setup()
11086 length = (sizeof(struct lpfc_mbx_query_fw_config) - in lpfc_sli4_queue_setup()
11095 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; in lpfc_sli4_queue_setup()
11096 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); in lpfc_sli4_queue_setup()
11097 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); in lpfc_sli4_queue_setup()
11103 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_queue_setup()
11104 rc = -ENXIO; in lpfc_sli4_queue_setup()
11108 phba->sli4_hba.fw_func_mode = in lpfc_sli4_queue_setup()
11109 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; in lpfc_sli4_queue_setup()
11110 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; in lpfc_sli4_queue_setup()
11111 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; in lpfc_sli4_queue_setup()
11112 phba->sli4_hba.physical_port = in lpfc_sli4_queue_setup()
11113 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; in lpfc_sli4_queue_setup()
11116 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, in lpfc_sli4_queue_setup()
11117 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); in lpfc_sli4_queue_setup()
11119 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_queue_setup()
11124 qp = phba->sli4_hba.hdwq; in lpfc_sli4_queue_setup()
11129 "3147 Fast-path EQs not allocated\n"); in lpfc_sli4_queue_setup()
11130 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11135 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { in lpfc_sli4_queue_setup()
11137 for_each_present_cpu(cpu) { in lpfc_sli4_queue_setup()
11138 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_sli4_queue_setup()
11140 /* Look for the CPU thats using that vector with in lpfc_sli4_queue_setup()
11143 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) in lpfc_sli4_queue_setup()
11145 if (qidx != cpup->eq) in lpfc_sli4_queue_setup()
11149 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, in lpfc_sli4_queue_setup()
11150 phba->cfg_fcp_imax); in lpfc_sli4_queue_setup()
11153 "0523 Failed setup of fast-path" in lpfc_sli4_queue_setup()
11155 cpup->eq, (uint32_t)rc); in lpfc_sli4_queue_setup()
11160 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = in lpfc_sli4_queue_setup()
11161 qp[cpup->hdwq].hba_eq; in lpfc_sli4_queue_setup()
11164 "2584 HBA EQ setup: queue[%d]-id=%d\n", in lpfc_sli4_queue_setup()
11165 cpup->eq, in lpfc_sli4_queue_setup()
11166 qp[cpup->hdwq].hba_eq->queue_id); in lpfc_sli4_queue_setup()
11171 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { in lpfc_sli4_queue_setup()
11172 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); in lpfc_sli4_queue_setup()
11173 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_sli4_queue_setup()
11177 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, in lpfc_sli4_queue_setup()
11180 &phba->sli4_hba.hdwq[qidx].io_cq_map, in lpfc_sli4_queue_setup()
11196 /* Set up slow-path MBOX CQ/MQ */ in lpfc_sli4_queue_setup()
11198 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { in lpfc_sli4_queue_setup()
11201 phba->sli4_hba.mbx_cq ? in lpfc_sli4_queue_setup()
11203 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11208 phba->sli4_hba.mbx_cq, in lpfc_sli4_queue_setup()
11209 phba->sli4_hba.mbx_wq, in lpfc_sli4_queue_setup()
11217 if (phba->nvmet_support) { in lpfc_sli4_queue_setup()
11218 if (!phba->sli4_hba.nvmet_cqset) { in lpfc_sli4_queue_setup()
11220 "3165 Fast-path NVME CQ Set " in lpfc_sli4_queue_setup()
11222 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11225 if (phba->cfg_nvmet_mrq > 1) { in lpfc_sli4_queue_setup()
11227 phba->sli4_hba.nvmet_cqset, in lpfc_sli4_queue_setup()
11239 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], in lpfc_sli4_queue_setup()
11248 phba->sli4_hba.nvmet_cqset[0]->chann = 0; in lpfc_sli4_queue_setup()
11251 "6090 NVMET CQ setup: cq-id=%d, " in lpfc_sli4_queue_setup()
11252 "parent eq-id=%d\n", in lpfc_sli4_queue_setup()
11253 phba->sli4_hba.nvmet_cqset[0]->queue_id, in lpfc_sli4_queue_setup()
11254 qp[0].hba_eq->queue_id); in lpfc_sli4_queue_setup()
11258 /* Set up slow-path ELS WQ/CQ */ in lpfc_sli4_queue_setup()
11259 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { in lpfc_sli4_queue_setup()
11262 phba->sli4_hba.els_cq ? "WQ" : "CQ"); in lpfc_sli4_queue_setup()
11263 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11267 phba->sli4_hba.els_cq, in lpfc_sli4_queue_setup()
11268 phba->sli4_hba.els_wq, in lpfc_sli4_queue_setup()
11277 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", in lpfc_sli4_queue_setup()
11278 phba->sli4_hba.els_wq->queue_id, in lpfc_sli4_queue_setup()
11279 phba->sli4_hba.els_cq->queue_id); in lpfc_sli4_queue_setup()
11281 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_queue_setup()
11283 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { in lpfc_sli4_queue_setup()
11286 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); in lpfc_sli4_queue_setup()
11287 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11291 phba->sli4_hba.nvmels_cq, in lpfc_sli4_queue_setup()
11292 phba->sli4_hba.nvmels_wq, in lpfc_sli4_queue_setup()
11302 "6096 ELS WQ setup: wq-id=%d, " in lpfc_sli4_queue_setup()
11303 "parent cq-id=%d\n", in lpfc_sli4_queue_setup()
11304 phba->sli4_hba.nvmels_wq->queue_id, in lpfc_sli4_queue_setup()
11305 phba->sli4_hba.nvmels_cq->queue_id); in lpfc_sli4_queue_setup()
11311 if (phba->nvmet_support) { in lpfc_sli4_queue_setup()
11312 if ((!phba->sli4_hba.nvmet_cqset) || in lpfc_sli4_queue_setup()
11313 (!phba->sli4_hba.nvmet_mrq_hdr) || in lpfc_sli4_queue_setup()
11314 (!phba->sli4_hba.nvmet_mrq_data)) { in lpfc_sli4_queue_setup()
11318 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11321 if (phba->cfg_nvmet_mrq > 1) { in lpfc_sli4_queue_setup()
11323 phba->sli4_hba.nvmet_mrq_hdr, in lpfc_sli4_queue_setup()
11324 phba->sli4_hba.nvmet_mrq_data, in lpfc_sli4_queue_setup()
11325 phba->sli4_hba.nvmet_cqset, in lpfc_sli4_queue_setup()
11337 phba->sli4_hba.nvmet_mrq_hdr[0], in lpfc_sli4_queue_setup()
11338 phba->sli4_hba.nvmet_mrq_data[0], in lpfc_sli4_queue_setup()
11339 phba->sli4_hba.nvmet_cqset[0], in lpfc_sli4_queue_setup()
11351 "6099 NVMET RQ setup: hdr-rq-id=%d, " in lpfc_sli4_queue_setup()
11352 "dat-rq-id=%d parent cq-id=%d\n", in lpfc_sli4_queue_setup()
11353 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, in lpfc_sli4_queue_setup()
11354 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, in lpfc_sli4_queue_setup()
11355 phba->sli4_hba.nvmet_cqset[0]->queue_id); in lpfc_sli4_queue_setup()
11360 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { in lpfc_sli4_queue_setup()
11363 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11367 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, in lpfc_sli4_queue_setup()
11368 phba->sli4_hba.els_cq, LPFC_USOL); in lpfc_sli4_queue_setup()
11377 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " in lpfc_sli4_queue_setup()
11378 "parent cq-id=%d\n", in lpfc_sli4_queue_setup()
11379 phba->sli4_hba.hdr_rq->queue_id, in lpfc_sli4_queue_setup()
11380 phba->sli4_hba.dat_rq->queue_id, in lpfc_sli4_queue_setup()
11381 phba->sli4_hba.els_cq->queue_id); in lpfc_sli4_queue_setup()
11383 if (phba->cfg_fcp_imax) in lpfc_sli4_queue_setup()
11384 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; in lpfc_sli4_queue_setup()
11388 for (qidx = 0; qidx < phba->cfg_irq_chann; in lpfc_sli4_queue_setup()
11393 if (phba->sli4_hba.cq_max) { in lpfc_sli4_queue_setup()
11394 kfree(phba->sli4_hba.cq_lookup); in lpfc_sli4_queue_setup()
11395 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), in lpfc_sli4_queue_setup()
11397 if (!phba->sli4_hba.cq_lookup) { in lpfc_sli4_queue_setup()
11400 "size 0x%x\n", phba->sli4_hba.cq_max); in lpfc_sli4_queue_setup()
11401 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11415 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11422 * 0 - successful
11423 * -ENOMEM - No available memory
11424 * -EIO - The mailbox failed to complete successfully.
11434 if (phba->sli4_hba.mbx_wq) in lpfc_sli4_queue_unset()
11435 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); in lpfc_sli4_queue_unset()
11438 if (phba->sli4_hba.nvmels_wq) in lpfc_sli4_queue_unset()
11439 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); in lpfc_sli4_queue_unset()
11442 if (phba->sli4_hba.els_wq) in lpfc_sli4_queue_unset()
11443 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); in lpfc_sli4_queue_unset()
11446 if (phba->sli4_hba.hdr_rq) in lpfc_sli4_queue_unset()
11447 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, in lpfc_sli4_queue_unset()
11448 phba->sli4_hba.dat_rq); in lpfc_sli4_queue_unset()
11451 if (phba->sli4_hba.mbx_cq) in lpfc_sli4_queue_unset()
11452 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); in lpfc_sli4_queue_unset()
11455 if (phba->sli4_hba.els_cq) in lpfc_sli4_queue_unset()
11456 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); in lpfc_sli4_queue_unset()
11459 if (phba->sli4_hba.nvmels_cq) in lpfc_sli4_queue_unset()
11460 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); in lpfc_sli4_queue_unset()
11462 if (phba->nvmet_support) { in lpfc_sli4_queue_unset()
11464 if (phba->sli4_hba.nvmet_mrq_hdr) { in lpfc_sli4_queue_unset()
11465 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) in lpfc_sli4_queue_unset()
11468 phba->sli4_hba.nvmet_mrq_hdr[qidx], in lpfc_sli4_queue_unset()
11469 phba->sli4_hba.nvmet_mrq_data[qidx]); in lpfc_sli4_queue_unset()
11473 if (phba->sli4_hba.nvmet_cqset) { in lpfc_sli4_queue_unset()
11474 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) in lpfc_sli4_queue_unset()
11476 phba, phba->sli4_hba.nvmet_cqset[qidx]); in lpfc_sli4_queue_unset()
11480 /* Unset fast-path SLI4 queues */ in lpfc_sli4_queue_unset()
11481 if (phba->sli4_hba.hdwq) { in lpfc_sli4_queue_unset()
11483 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { in lpfc_sli4_queue_unset()
11485 qp = &phba->sli4_hba.hdwq[qidx]; in lpfc_sli4_queue_unset()
11486 lpfc_wq_destroy(phba, qp->io_wq); in lpfc_sli4_queue_unset()
11487 lpfc_cq_destroy(phba, qp->io_cq); in lpfc_sli4_queue_unset()
11490 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { in lpfc_sli4_queue_unset()
11492 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; in lpfc_sli4_queue_unset()
11497 kfree(phba->sli4_hba.cq_lookup); in lpfc_sli4_queue_unset()
11498 phba->sli4_hba.cq_lookup = NULL; in lpfc_sli4_queue_unset()
11499 phba->sli4_hba.cq_max = 0; in lpfc_sli4_queue_unset()
11503 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11510 * - Mailbox asynchronous events
11511 * - Receive queue completion unsolicited events
11512 * Later, this can be used for all the slow-path events.
11515 * 0 - successful
11516 * -ENOMEM - No available memory
11524 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { in lpfc_sli4_cq_event_pool_create()
11528 list_add_tail(&cq_event->list, in lpfc_sli4_cq_event_pool_create()
11529 &phba->sli4_hba.sp_cqe_event_pool); in lpfc_sli4_cq_event_pool_create()
11535 return -ENOMEM; in lpfc_sli4_cq_event_pool_create()
11539 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11544 * cleanup routine to free all the outstanding completion-queue events
11554 &phba->sli4_hba.sp_cqe_event_pool, list) { in lpfc_sli4_cq_event_pool_destroy()
11555 list_del(&cq_event->list); in lpfc_sli4_cq_event_pool_destroy()
11561 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11565 * completion-queue event from the free pool.
11567 * Return: Pointer to the newly allocated completion-queue event if successful
11575 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, in __lpfc_sli4_cq_event_alloc()
11581 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11585 * completion-queue event from the free pool.
11587 * Return: Pointer to the newly allocated completion-queue event if successful
11596 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_sli4_cq_event_alloc()
11598 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_sli4_cq_event_alloc()
11603 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11608 * completion-queue event back into the free pool.
11614 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); in __lpfc_sli4_cq_event_release()
11618 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11623 * completion-queue event back into the free pool.
11630 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_sli4_cq_event_release()
11632 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_sli4_cq_event_release()
11636 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11639 * This routine is to free all the pending completion-queue events to the
11652 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); in lpfc_sli4_cq_event_release_all()
11653 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, in lpfc_sli4_cq_event_release_all()
11655 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); in lpfc_sli4_cq_event_release_all()
11658 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); in lpfc_sli4_cq_event_release_all()
11659 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, in lpfc_sli4_cq_event_release_all()
11661 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); in lpfc_sli4_cq_event_release_all()
11671 * lpfc_pci_function_reset - Reset pci function.
11678 * 0 - successful
11679 * -ENOMEM - No available memory
11680 * -EIO - The mailbox failed to complete successfully.
11694 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); in lpfc_pci_function_reset()
11697 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, in lpfc_pci_function_reset()
11704 return -ENOMEM; in lpfc_pci_function_reset()
11707 /* Setup PCI function reset mailbox-ioctl command */ in lpfc_pci_function_reset()
11713 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; in lpfc_pci_function_reset()
11714 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); in lpfc_pci_function_reset()
11716 &shdr->response); in lpfc_pci_function_reset()
11717 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_pci_function_reset()
11724 rc = -ENXIO; in lpfc_pci_function_reset()
11736 if (lpfc_readl(phba->sli4_hba.u.if_type2. in lpfc_pci_function_reset()
11738 rc = -ENODEV; in lpfc_pci_function_reset()
11747 phba->work_status[0] = readl( in lpfc_pci_function_reset()
11748 phba->sli4_hba.u.if_type2.ERR1regaddr); in lpfc_pci_function_reset()
11749 phba->work_status[1] = readl( in lpfc_pci_function_reset()
11750 phba->sli4_hba.u.if_type2.ERR2regaddr); in lpfc_pci_function_reset()
11755 phba->work_status[0], in lpfc_pci_function_reset()
11756 phba->work_status[1]); in lpfc_pci_function_reset()
11757 rc = -ENODEV; in lpfc_pci_function_reset()
11773 writel(reg_data.word0, phba->sli4_hba.u.if_type2. in lpfc_pci_function_reset()
11776 pci_read_config_word(phba->pcidev, in lpfc_pci_function_reset()
11783 rc = -ENODEV; in lpfc_pci_function_reset()
11794 /* Catch the not-ready port failure after a port reset. */ in lpfc_pci_function_reset()
11799 rc = -ENODEV; in lpfc_pci_function_reset()
11806 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11810 * with SLI-4 interface spec.
11813 * 0 - successful
11814 * other values - error
11819 struct pci_dev *pdev = phba->pcidev; in lpfc_sli4_pci_mem_setup()
11825 return -ENODEV; in lpfc_sli4_pci_mem_setup()
11828 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in lpfc_sli4_pci_mem_setup()
11830 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in lpfc_sli4_pci_mem_setup()
11839 &phba->sli4_hba.sli_intf.word0)) { in lpfc_sli4_pci_mem_setup()
11840 return -ENODEV; in lpfc_sli4_pci_mem_setup()
11844 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != in lpfc_sli4_pci_mem_setup()
11849 phba->sli4_hba.sli_intf.word0); in lpfc_sli4_pci_mem_setup()
11850 return -ENODEV; in lpfc_sli4_pci_mem_setup()
11853 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); in lpfc_sli4_pci_mem_setup()
11861 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); in lpfc_sli4_pci_mem_setup()
11868 phba->sli4_hba.conf_regs_memmap_p = in lpfc_sli4_pci_mem_setup()
11869 ioremap(phba->pci_bar0_map, bar0map_len); in lpfc_sli4_pci_mem_setup()
11870 if (!phba->sli4_hba.conf_regs_memmap_p) { in lpfc_sli4_pci_mem_setup()
11871 dev_printk(KERN_ERR, &pdev->dev, in lpfc_sli4_pci_mem_setup()
11874 return -ENODEV; in lpfc_sli4_pci_mem_setup()
11876 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; in lpfc_sli4_pci_mem_setup()
11880 phba->pci_bar0_map = pci_resource_start(pdev, 1); in lpfc_sli4_pci_mem_setup()
11883 dev_printk(KERN_ERR, &pdev->dev, in lpfc_sli4_pci_mem_setup()
11884 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); in lpfc_sli4_pci_mem_setup()
11885 return -ENODEV; in lpfc_sli4_pci_mem_setup()
11887 phba->sli4_hba.conf_regs_memmap_p = in lpfc_sli4_pci_mem_setup()
11888 ioremap(phba->pci_bar0_map, bar0map_len); in lpfc_sli4_pci_mem_setup()
11889 if (!phba->sli4_hba.conf_regs_memmap_p) { in lpfc_sli4_pci_mem_setup()
11890 dev_printk(KERN_ERR, &pdev->dev, in lpfc_sli4_pci_mem_setup()
11893 return -ENODEV; in lpfc_sli4_pci_mem_setup()
11904 phba->pci_bar1_map = pci_resource_start(pdev, in lpfc_sli4_pci_mem_setup()
11907 phba->sli4_hba.ctrl_regs_memmap_p = in lpfc_sli4_pci_mem_setup()
11908 ioremap(phba->pci_bar1_map, in lpfc_sli4_pci_mem_setup()
11910 if (!phba->sli4_hba.ctrl_regs_memmap_p) { in lpfc_sli4_pci_mem_setup()
11911 dev_err(&pdev->dev, in lpfc_sli4_pci_mem_setup()
11914 error = -ENOMEM; in lpfc_sli4_pci_mem_setup()
11917 phba->pci_bar2_memmap_p = in lpfc_sli4_pci_mem_setup()
11918 phba->sli4_hba.ctrl_regs_memmap_p; in lpfc_sli4_pci_mem_setup()
11921 error = -ENOMEM; in lpfc_sli4_pci_mem_setup()
11932 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); in lpfc_sli4_pci_mem_setup()
11934 phba->sli4_hba.drbl_regs_memmap_p = in lpfc_sli4_pci_mem_setup()
11935 ioremap(phba->pci_bar1_map, bar1map_len); in lpfc_sli4_pci_mem_setup()
11936 if (!phba->sli4_hba.drbl_regs_memmap_p) { in lpfc_sli4_pci_mem_setup()
11937 dev_err(&pdev->dev, in lpfc_sli4_pci_mem_setup()
11939 error = -ENOMEM; in lpfc_sli4_pci_mem_setup()
11942 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; in lpfc_sli4_pci_mem_setup()
11952 phba->pci_bar2_map = pci_resource_start(pdev, in lpfc_sli4_pci_mem_setup()
11955 phba->sli4_hba.drbl_regs_memmap_p = in lpfc_sli4_pci_mem_setup()
11956 ioremap(phba->pci_bar2_map, in lpfc_sli4_pci_mem_setup()
11958 if (!phba->sli4_hba.drbl_regs_memmap_p) { in lpfc_sli4_pci_mem_setup()
11959 dev_err(&pdev->dev, in lpfc_sli4_pci_mem_setup()
11962 error = -ENOMEM; in lpfc_sli4_pci_mem_setup()
11965 phba->pci_bar4_memmap_p = in lpfc_sli4_pci_mem_setup()
11966 phba->sli4_hba.drbl_regs_memmap_p; in lpfc_sli4_pci_mem_setup()
11971 error = -ENOMEM; in lpfc_sli4_pci_mem_setup()
11982 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); in lpfc_sli4_pci_mem_setup()
11984 phba->sli4_hba.dpp_regs_memmap_p = in lpfc_sli4_pci_mem_setup()
11985 ioremap(phba->pci_bar2_map, bar2map_len); in lpfc_sli4_pci_mem_setup()
11986 if (!phba->sli4_hba.dpp_regs_memmap_p) { in lpfc_sli4_pci_mem_setup()
11987 dev_err(&pdev->dev, in lpfc_sli4_pci_mem_setup()
11989 error = -ENOMEM; in lpfc_sli4_pci_mem_setup()
11992 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; in lpfc_sli4_pci_mem_setup()
11999 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; in lpfc_sli4_pci_mem_setup()
12000 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; in lpfc_sli4_pci_mem_setup()
12001 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; in lpfc_sli4_pci_mem_setup()
12004 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; in lpfc_sli4_pci_mem_setup()
12005 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; in lpfc_sli4_pci_mem_setup()
12006 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; in lpfc_sli4_pci_mem_setup()
12015 iounmap(phba->sli4_hba.drbl_regs_memmap_p); in lpfc_sli4_pci_mem_setup()
12017 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); in lpfc_sli4_pci_mem_setup()
12019 iounmap(phba->sli4_hba.conf_regs_memmap_p); in lpfc_sli4_pci_mem_setup()
12025 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
12029 * with SLI-4 interface spec.
12035 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); in lpfc_sli4_pci_mem_unset()
12039 iounmap(phba->sli4_hba.drbl_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
12040 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
12041 iounmap(phba->sli4_hba.conf_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
12044 iounmap(phba->sli4_hba.conf_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
12047 iounmap(phba->sli4_hba.drbl_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
12048 iounmap(phba->sli4_hba.conf_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
12049 if (phba->sli4_hba.dpp_regs_memmap_p) in lpfc_sli4_pci_mem_unset()
12050 iounmap(phba->sli4_hba.dpp_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
12054 dev_printk(KERN_ERR, &phba->pcidev->dev, in lpfc_sli4_pci_mem_unset()
12055 "FATAL - unsupported SLI4 interface type - %d\n", in lpfc_sli4_pci_mem_unset()
12062 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
12065 * This routine is invoked to enable the MSI-X interrupt vectors to device
12066 * with SLI-3 interface specs.
12069 * 0 - successful
12070 * other values - error
12078 /* Set up MSI-X multi-message vectors */ in lpfc_sli_enable_msix()
12079 rc = pci_alloc_irq_vectors(phba->pcidev, in lpfc_sli_enable_msix()
12083 "0420 PCI enable MSI-X failed (%d)\n", rc); in lpfc_sli_enable_msix()
12088 * Assign MSI-X vectors to interrupt handlers in lpfc_sli_enable_msix()
12091 /* vector-0 is associated to slow-path handler */ in lpfc_sli_enable_msix()
12092 rc = request_irq(pci_irq_vector(phba->pcidev, 0), in lpfc_sli_enable_msix()
12097 "0421 MSI-X slow-path request_irq failed " in lpfc_sli_enable_msix()
12102 /* vector-1 is associated to fast-path handler */ in lpfc_sli_enable_msix()
12103 rc = request_irq(pci_irq_vector(phba->pcidev, 1), in lpfc_sli_enable_msix()
12109 "0429 MSI-X fast-path request_irq failed " in lpfc_sli_enable_msix()
12115 * Configure HBA MSI-X attention conditions to messages in lpfc_sli_enable_msix()
12117 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_sli_enable_msix()
12120 rc = -ENOMEM; in lpfc_sli_enable_msix()
12134 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); in lpfc_sli_enable_msix()
12139 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_sli_enable_msix()
12144 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_sli_enable_msix()
12148 free_irq(pci_irq_vector(phba->pcidev, 1), phba); in lpfc_sli_enable_msix()
12152 free_irq(pci_irq_vector(phba->pcidev, 0), phba); in lpfc_sli_enable_msix()
12155 /* Unconfigure MSI-X capability structure */ in lpfc_sli_enable_msix()
12156 pci_free_irq_vectors(phba->pcidev); in lpfc_sli_enable_msix()
12163 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
12167 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
12173 * 0 - successful
12174 * other values - error
12181 rc = pci_enable_msi(phba->pcidev); in lpfc_sli_enable_msi()
12191 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, in lpfc_sli_enable_msi()
12194 pci_disable_msi(phba->pcidev); in lpfc_sli_enable_msi()
12202 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
12204 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12207 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
12212 * MSI-X -> MSI -> IRQ.
12215 * 0 - successful
12216 * other values - error
12228 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; in lpfc_sli_enable_intr()
12231 /* Now, try to enable MSI-X interrupt mode */ in lpfc_sli_enable_intr()
12234 /* Indicate initialization to MSI-X mode */ in lpfc_sli_enable_intr()
12235 phba->intr_type = MSIX; in lpfc_sli_enable_intr()
12240 /* Fallback to MSI if MSI-X initialization failed */ in lpfc_sli_enable_intr()
12241 if (cfg_mode >= 1 && phba->intr_type == NONE) { in lpfc_sli_enable_intr()
12245 phba->intr_type = MSI; in lpfc_sli_enable_intr()
12250 /* Fallback to INTx if both MSI-X/MSI initalization failed */ in lpfc_sli_enable_intr()
12251 if (phba->intr_type == NONE) { in lpfc_sli_enable_intr()
12252 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, in lpfc_sli_enable_intr()
12256 phba->intr_type = INTx; in lpfc_sli_enable_intr()
12264 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12269 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12277 if (phba->intr_type == MSIX) in lpfc_sli_disable_intr()
12283 free_irq(pci_irq_vector(phba->pcidev, i), phba); in lpfc_sli_disable_intr()
12284 pci_free_irq_vectors(phba->pcidev); in lpfc_sli_disable_intr()
12287 phba->intr_type = NONE; in lpfc_sli_disable_intr()
12288 phba->sli.slistat.sli_intr = 0; in lpfc_sli_disable_intr()
12292 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12297 * Return the CPU that matches the selection criteria
12303 int cpu; in lpfc_find_cpu_handle() local
12306 for_each_present_cpu(cpu) { in lpfc_find_cpu_handle()
12307 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_find_cpu_handle()
12314 (cpup->flag & LPFC_CPU_FIRST_IRQ) && in lpfc_find_cpu_handle()
12315 (cpup->eq == id)) in lpfc_find_cpu_handle()
12316 return cpu; in lpfc_find_cpu_handle()
12318 /* If matching by HDWQ, select the first CPU that matches */ in lpfc_find_cpu_handle()
12319 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) in lpfc_find_cpu_handle()
12320 return cpu; in lpfc_find_cpu_handle()
12327 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12329 * @cpu: CPU map index
12330 * @phys_id: CPU package physical id
12331 * @core_id: CPU core id
12334 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, in lpfc_find_hyper() argument
12341 cpup = &phba->sli4_hba.cpu_map[idx]; in lpfc_find_hyper()
12343 if ((cpup->phys_id == phys_id) && in lpfc_find_hyper()
12344 (cpup->core_id == core_id) && in lpfc_find_hyper()
12345 (cpu != idx)) in lpfc_find_hyper()
12353 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12357 * @cpu: cpu used to index vector_map structure
12363 unsigned int cpu) in lpfc_assign_eq_map_info() argument
12365 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_assign_eq_map_info()
12368 cpup->eq = eqidx; in lpfc_assign_eq_map_info()
12369 cpup->flag |= flag; in lpfc_assign_eq_map_info()
12372 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", in lpfc_assign_eq_map_info()
12373 cpu, eqhdl->irq, cpup->eq, cpup->flag); in lpfc_assign_eq_map_info()
12377 * lpfc_cpu_map_array_init - Initialize cpu_map structure
12387 int cpu; in lpfc_cpu_map_array_init() local
12389 for_each_possible_cpu(cpu) { in lpfc_cpu_map_array_init()
12390 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_map_array_init()
12391 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; in lpfc_cpu_map_array_init()
12392 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; in lpfc_cpu_map_array_init()
12393 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; in lpfc_cpu_map_array_init()
12394 cpup->eq = LPFC_VECTOR_MAP_EMPTY; in lpfc_cpu_map_array_init()
12395 cpup->flag = 0; in lpfc_cpu_map_array_init()
12396 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); in lpfc_cpu_map_array_init()
12397 INIT_LIST_HEAD(&eqi->list); in lpfc_cpu_map_array_init()
12398 eqi->icnt = 0; in lpfc_cpu_map_array_init()
12403 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12414 for (i = 0; i < phba->cfg_irq_chann; i++) { in lpfc_hba_eq_hdl_array_init()
12416 eqhdl->irq = LPFC_IRQ_EMPTY; in lpfc_hba_eq_hdl_array_init()
12417 eqhdl->phba = phba; in lpfc_hba_eq_hdl_array_init()
12422 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12426 * The routine will figure out the CPU affinity assignment for every
12427 * MSI-X vector allocated for the HBA.
12428 * In addition, the CPU to IO channel mapping will be calculated
12429 * and the phba->sli4_hba.cpu_map array will reflect this.
12434 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; in lpfc_cpu_affinity_check() local
12451 /* Update CPU map with physical id and core id of each CPU */ in lpfc_cpu_affinity_check()
12452 for_each_present_cpu(cpu) { in lpfc_cpu_affinity_check()
12453 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_affinity_check()
12455 cpuinfo = &cpu_data(cpu); in lpfc_cpu_affinity_check()
12456 cpup->phys_id = cpuinfo->phys_proc_id; in lpfc_cpu_affinity_check()
12457 cpup->core_id = cpuinfo->cpu_core_id; in lpfc_cpu_affinity_check()
12458 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) in lpfc_cpu_affinity_check()
12459 cpup->flag |= LPFC_CPU_MAP_HYPER; in lpfc_cpu_affinity_check()
12462 cpup->phys_id = 0; in lpfc_cpu_affinity_check()
12463 cpup->core_id = cpu; in lpfc_cpu_affinity_check()
12467 "3328 CPU %d physid %d coreid %d flag x%x\n", in lpfc_cpu_affinity_check()
12468 cpu, cpup->phys_id, cpup->core_id, cpup->flag); in lpfc_cpu_affinity_check()
12470 if (cpup->phys_id > max_phys_id) in lpfc_cpu_affinity_check()
12471 max_phys_id = cpup->phys_id; in lpfc_cpu_affinity_check()
12472 if (cpup->phys_id < min_phys_id) in lpfc_cpu_affinity_check()
12473 min_phys_id = cpup->phys_id; in lpfc_cpu_affinity_check()
12475 if (cpup->core_id > max_core_id) in lpfc_cpu_affinity_check()
12476 max_core_id = cpup->core_id; in lpfc_cpu_affinity_check()
12477 if (cpup->core_id < min_core_id) in lpfc_cpu_affinity_check()
12478 min_core_id = cpup->core_id; in lpfc_cpu_affinity_check()
12483 * Next we will set any unassigned (unaffinitized) cpu map in lpfc_cpu_affinity_check()
12489 for_each_present_cpu(cpu) { in lpfc_cpu_affinity_check()
12490 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_affinity_check()
12492 /* Is this CPU entry unassigned */ in lpfc_cpu_affinity_check()
12493 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { in lpfc_cpu_affinity_check()
12494 /* Mark CPU as IRQ not assigned by the kernel */ in lpfc_cpu_affinity_check()
12495 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; in lpfc_cpu_affinity_check()
12503 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { in lpfc_cpu_affinity_check()
12504 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; in lpfc_cpu_affinity_check()
12505 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && in lpfc_cpu_affinity_check()
12506 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && in lpfc_cpu_affinity_check()
12507 (new_cpup->phys_id == cpup->phys_id)) in lpfc_cpu_affinity_check()
12514 /* At this point, we leave the CPU as unassigned */ in lpfc_cpu_affinity_check()
12518 cpup->eq = new_cpup->eq; in lpfc_cpu_affinity_check()
12521 * chance of having multiple unassigned CPU entries in lpfc_cpu_affinity_check()
12529 "3337 Set Affinity: CPU %d " in lpfc_cpu_affinity_check()
12530 "eq %d from peer cpu %d same " in lpfc_cpu_affinity_check()
12532 cpu, cpup->eq, new_cpu, in lpfc_cpu_affinity_check()
12533 cpup->phys_id); in lpfc_cpu_affinity_check()
12537 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ in lpfc_cpu_affinity_check()
12540 for_each_present_cpu(cpu) { in lpfc_cpu_affinity_check()
12541 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_affinity_check()
12544 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { in lpfc_cpu_affinity_check()
12546 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; in lpfc_cpu_affinity_check()
12554 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { in lpfc_cpu_affinity_check()
12555 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; in lpfc_cpu_affinity_check()
12556 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && in lpfc_cpu_affinity_check()
12557 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) in lpfc_cpu_affinity_check()
12566 "3339 Set Affinity: CPU %d " in lpfc_cpu_affinity_check()
12568 cpup->hdwq, cpup->eq); in lpfc_cpu_affinity_check()
12572 cpup->eq = new_cpup->eq; in lpfc_cpu_affinity_check()
12575 * chance of having multiple unassigned CPU entries in lpfc_cpu_affinity_check()
12583 "3338 Set Affinity: CPU %d " in lpfc_cpu_affinity_check()
12584 "eq %d from peer cpu %d (%d/%d)\n", in lpfc_cpu_affinity_check()
12585 cpu, cpup->eq, new_cpu, in lpfc_cpu_affinity_check()
12586 new_cpup->phys_id, new_cpup->core_id); in lpfc_cpu_affinity_check()
12594 for_each_present_cpu(cpu) { in lpfc_cpu_affinity_check()
12595 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_affinity_check()
12598 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) in lpfc_cpu_affinity_check()
12602 cpup->hdwq = idx; in lpfc_cpu_affinity_check()
12605 "3333 Set Affinity: CPU %d (phys %d core %d): " in lpfc_cpu_affinity_check()
12607 cpu, cpup->phys_id, cpup->core_id, in lpfc_cpu_affinity_check()
12608 cpup->hdwq, cpup->eq, cpup->flag); in lpfc_cpu_affinity_check()
12611 * This will be 1 to 1 - hdwq to cpu, unless there are less in lpfc_cpu_affinity_check()
12612 * hardware queues then CPUs. For that case we will just round-robin in lpfc_cpu_affinity_check()
12615 * for irq_chann < hdwq. The idx is used for round-robin assignments in lpfc_cpu_affinity_check()
12621 for_each_present_cpu(cpu) { in lpfc_cpu_affinity_check()
12622 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_affinity_check()
12625 if (cpup->flag & LPFC_CPU_FIRST_IRQ) in lpfc_cpu_affinity_check()
12632 if (next_idx < phba->cfg_hdw_queue) { in lpfc_cpu_affinity_check()
12633 cpup->hdwq = next_idx; in lpfc_cpu_affinity_check()
12638 /* Not a First CPU and all hdw_queues are used. Reuse a in lpfc_cpu_affinity_check()
12639 * Hardware Queue for another CPU, so be smart about it in lpfc_cpu_affinity_check()
12641 * (CPU package) and core_id. in lpfc_cpu_affinity_check()
12644 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { in lpfc_cpu_affinity_check()
12645 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; in lpfc_cpu_affinity_check()
12646 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && in lpfc_cpu_affinity_check()
12647 new_cpup->phys_id == cpup->phys_id && in lpfc_cpu_affinity_check()
12648 new_cpup->core_id == cpup->core_id) { in lpfc_cpu_affinity_check()
12660 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { in lpfc_cpu_affinity_check()
12661 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; in lpfc_cpu_affinity_check()
12662 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && in lpfc_cpu_affinity_check()
12663 new_cpup->phys_id == cpup->phys_id) in lpfc_cpu_affinity_check()
12672 cpup->hdwq = idx % phba->cfg_hdw_queue; in lpfc_cpu_affinity_check()
12680 cpup->hdwq = new_cpup->hdwq; in lpfc_cpu_affinity_check()
12683 "3335 Set Affinity: CPU %d (phys %d core %d): " in lpfc_cpu_affinity_check()
12685 cpu, cpup->phys_id, cpup->core_id, in lpfc_cpu_affinity_check()
12686 cpup->hdwq, cpup->eq, cpup->flag); in lpfc_cpu_affinity_check()
12690 * Initialize the cpu_map slots for not-present cpus in case in lpfc_cpu_affinity_check()
12691 * a cpu is hot-added. Perform a simple hdwq round robin assignment. in lpfc_cpu_affinity_check()
12694 for_each_possible_cpu(cpu) { in lpfc_cpu_affinity_check()
12695 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_affinity_check()
12697 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); in lpfc_cpu_affinity_check()
12698 c_stat->hdwq_no = cpup->hdwq; in lpfc_cpu_affinity_check()
12700 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) in lpfc_cpu_affinity_check()
12703 cpup->hdwq = idx++ % phba->cfg_hdw_queue; in lpfc_cpu_affinity_check()
12705 c_stat->hdwq_no = cpup->hdwq; in lpfc_cpu_affinity_check()
12709 "CPU %d hdwq %d\n", in lpfc_cpu_affinity_check()
12710 cpu, cpup->hdwq); in lpfc_cpu_affinity_check()
12723 * @cpu: cpu going offline
12727 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, in lpfc_cpuhp_get_eq() argument
12737 return -ENOMEM; in lpfc_cpuhp_get_eq()
12739 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { in lpfc_cpuhp_get_eq()
12740 maskp = pci_irq_get_affinity(phba->pcidev, idx); in lpfc_cpuhp_get_eq()
12744 * if irq is not affinitized to the cpu going in lpfc_cpuhp_get_eq()
12748 if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) in lpfc_cpuhp_get_eq()
12750 /* get the cpus that are online and are affini- in lpfc_cpuhp_get_eq()
12752 * more than 1 then cpuhp is not going to shut- in lpfc_cpuhp_get_eq()
12753 * down this vector. Since this cpu has not in lpfc_cpuhp_get_eq()
12765 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; in lpfc_cpuhp_get_eq()
12766 list_add(&eq->_poll_list, eqlist); in lpfc_cpuhp_get_eq()
12774 if (phba->sli_rev != LPFC_SLI_REV4) in __lpfc_cpuhp_remove()
12778 &phba->cpuhp); in __lpfc_cpuhp_remove()
12784 del_timer_sync(&phba->cpuhp_poll_timer); in __lpfc_cpuhp_remove()
12789 if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE)) in lpfc_cpuhp_remove()
12797 if (phba->sli_rev != LPFC_SLI_REV4) in lpfc_cpuhp_add()
12802 if (!list_empty(&phba->poll_list)) in lpfc_cpuhp_add()
12803 mod_timer(&phba->cpuhp_poll_timer, in lpfc_cpuhp_add()
12809 &phba->cpuhp); in lpfc_cpuhp_add()
12814 if (phba->pport->load_flag & FC_UNLOADING) { in __lpfc_cpuhp_checks()
12815 *retval = -EAGAIN; in __lpfc_cpuhp_checks()
12819 if (phba->sli_rev != LPFC_SLI_REV4) { in __lpfc_cpuhp_checks()
12829 * lpfc_irq_set_aff - set IRQ affinity
12831 * @cpu: cpu to set affinity
12835 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) in lpfc_irq_set_aff() argument
12837 cpumask_clear(&eqhdl->aff_mask); in lpfc_irq_set_aff()
12838 cpumask_set_cpu(cpu, &eqhdl->aff_mask); in lpfc_irq_set_aff()
12839 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); in lpfc_irq_set_aff()
12840 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask); in lpfc_irq_set_aff()
12844 * lpfc_irq_clear_aff - clear IRQ affinity
12851 cpumask_clear(&eqhdl->aff_mask); in lpfc_irq_clear_aff()
12852 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); in lpfc_irq_clear_aff()
12856 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12858 * @cpu: cpu going offline/online
12859 * @offline: true, cpu is going offline. false, cpu is coming online.
12861 * If cpu is going offline, we'll try our best effort to find the next
12862 * online cpu on the phba's original_mask and migrate all offlining IRQ
12865 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12868 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12872 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) in lpfc_irq_rebalance() argument
12879 if (phba->irq_chann_mode == NORMAL_MODE) in lpfc_irq_rebalance()
12882 orig_mask = &phba->sli4_hba.irq_aff_mask; in lpfc_irq_rebalance()
12884 if (!cpumask_test_cpu(cpu, orig_mask)) in lpfc_irq_rebalance()
12887 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_irq_rebalance()
12889 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) in lpfc_irq_rebalance()
12893 /* Find next online CPU on original mask */ in lpfc_irq_rebalance()
12894 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); in lpfc_irq_rebalance()
12897 /* Found a valid CPU */ in lpfc_irq_rebalance()
12898 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { in lpfc_irq_rebalance()
12900 * cpu aff_mask is migrated in lpfc_irq_rebalance()
12902 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { in lpfc_irq_rebalance()
12906 if (cpumask_test_cpu(cpu, aff_mask)) in lpfc_irq_rebalance()
12912 for (idx = 0; idx < phba->cfg_irq_chann; idx++) in lpfc_irq_rebalance()
12916 /* Migrate affinity back to this CPU */ in lpfc_irq_rebalance()
12917 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); in lpfc_irq_rebalance()
12921 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) in lpfc_cpu_offline() argument
12929 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); in lpfc_cpu_offline()
12936 lpfc_irq_rebalance(phba, cpu, true); in lpfc_cpu_offline()
12938 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); in lpfc_cpu_offline()
12944 list_del_init(&eq->_poll_list); in lpfc_cpu_offline()
12951 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) in lpfc_cpu_online() argument
12959 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); in lpfc_cpu_online()
12966 lpfc_irq_rebalance(phba, cpu, false); in lpfc_cpu_online()
12968 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { in lpfc_cpu_online()
12969 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); in lpfc_cpu_online()
12970 if (n == cpu) in lpfc_cpu_online()
12978 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12981 * This routine is invoked to enable the MSI-X interrupt vectors to device
12982 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
12988 * allocated and assigned to each online and offline cpu. If the cpu is
12989 * online, then affinity will be set to that cpu. If the cpu is offline, then
12990 * affinity will be set to the nearest peer cpu within the numa node that is
12992 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
12993 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
12998 * cpu affinity. The driver will then use that affinity mapping to setup its
12999 * cpu mapping table.
13002 * 0 - successful
13003 * other values - error
13011 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; in lpfc_sli4_enable_msix() local
13017 /* Set up MSI-X multi-message vectors */ in lpfc_sli4_enable_msix()
13018 vectors = phba->cfg_irq_chann; in lpfc_sli4_enable_msix()
13020 if (phba->irq_chann_mode != NORMAL_MODE) in lpfc_sli4_enable_msix()
13021 aff_mask = &phba->sli4_hba.irq_aff_mask; in lpfc_sli4_enable_msix()
13025 vectors = min(phba->cfg_irq_chann, cpu_cnt); in lpfc_sli4_enable_msix()
13027 /* cpu: iterates over aff_mask including offline or online in lpfc_sli4_enable_msix()
13030 cpu = cpumask_first(aff_mask); in lpfc_sli4_enable_msix()
13031 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); in lpfc_sli4_enable_msix()
13036 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); in lpfc_sli4_enable_msix()
13039 "0484 PCI enable MSI-X failed (%d)\n", rc); in lpfc_sli4_enable_msix()
13044 /* Assign MSI-X vectors to interrupt handlers */ in lpfc_sli4_enable_msix()
13047 name = eqhdl->handler_name; in lpfc_sli4_enable_msix()
13052 eqhdl->idx = index; in lpfc_sli4_enable_msix()
13053 rc = pci_irq_vector(phba->pcidev, index); in lpfc_sli4_enable_msix()
13056 "0489 MSI-X fast-path (%d) " in lpfc_sli4_enable_msix()
13060 eqhdl->irq = rc; in lpfc_sli4_enable_msix()
13062 rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0, in lpfc_sli4_enable_msix()
13066 "0486 MSI-X fast-path (%d) " in lpfc_sli4_enable_msix()
13072 /* If found a neighboring online cpu, set affinity */ in lpfc_sli4_enable_msix()
13079 cpu); in lpfc_sli4_enable_msix()
13081 /* Iterate to next offline or online cpu in aff_mask */ in lpfc_sli4_enable_msix()
13082 cpu = cpumask_next(cpu, aff_mask); in lpfc_sli4_enable_msix()
13084 /* Find next online cpu in aff_mask to set affinity */ in lpfc_sli4_enable_msix()
13085 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); in lpfc_sli4_enable_msix()
13087 cpu = cpumask_first(cpu_present_mask); in lpfc_sli4_enable_msix()
13089 cpu); in lpfc_sli4_enable_msix()
13091 maskp = pci_irq_get_affinity(phba->pcidev, index); in lpfc_sli4_enable_msix()
13094 for_each_cpu_and(cpu, maskp, cpu_present_mask) { in lpfc_sli4_enable_msix()
13095 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_sli4_enable_msix()
13097 /* If this is the first CPU thats assigned to in lpfc_sli4_enable_msix()
13101 * vectors are affinitized to all the cpu's. in lpfc_sli4_enable_msix()
13109 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) in lpfc_sli4_enable_msix()
13113 cpu); in lpfc_sli4_enable_msix()
13119 if (vectors != phba->cfg_irq_chann) { in lpfc_sli4_enable_msix()
13122 "MSI-X vectors, requested %d got %d\n", in lpfc_sli4_enable_msix()
13123 phba->cfg_irq_chann, vectors); in lpfc_sli4_enable_msix()
13124 if (phba->cfg_irq_chann > vectors) in lpfc_sli4_enable_msix()
13125 phba->cfg_irq_chann = vectors; in lpfc_sli4_enable_msix()
13132 for (--index; index >= 0; index--) { in lpfc_sli4_enable_msix()
13135 free_irq(eqhdl->irq, eqhdl); in lpfc_sli4_enable_msix()
13138 /* Unconfigure MSI-X capability structure */ in lpfc_sli4_enable_msix()
13139 pci_free_irq_vectors(phba->pcidev); in lpfc_sli4_enable_msix()
13146 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
13150 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
13156 * 0 - successful
13157 * other values - error
13163 unsigned int cpu; in lpfc_sli4_enable_msi() local
13166 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, in lpfc_sli4_enable_msi()
13174 return rc ? rc : -1; in lpfc_sli4_enable_msi()
13177 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, in lpfc_sli4_enable_msi()
13180 pci_free_irq_vectors(phba->pcidev); in lpfc_sli4_enable_msi()
13187 rc = pci_irq_vector(phba->pcidev, 0); in lpfc_sli4_enable_msi()
13189 pci_free_irq_vectors(phba->pcidev); in lpfc_sli4_enable_msi()
13194 eqhdl->irq = rc; in lpfc_sli4_enable_msi()
13196 cpu = cpumask_first(cpu_present_mask); in lpfc_sli4_enable_msi()
13197 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); in lpfc_sli4_enable_msi()
13199 for (index = 0; index < phba->cfg_irq_chann; index++) { in lpfc_sli4_enable_msi()
13201 eqhdl->idx = index; in lpfc_sli4_enable_msi()
13208 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
13210 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
13213 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
13218 * MSI-X -> MSI -> IRQ.
13221 * Interrupt mode (2, 1, 0) - successful
13222 * LPFC_INTR_ERROR - error
13234 /* Now, try to enable MSI-X interrupt mode */ in lpfc_sli4_enable_intr()
13237 /* Indicate initialization to MSI-X mode */ in lpfc_sli4_enable_intr()
13238 phba->intr_type = MSIX; in lpfc_sli4_enable_intr()
13244 /* Fallback to MSI if MSI-X initialization failed */ in lpfc_sli4_enable_intr()
13245 if (cfg_mode >= 1 && phba->intr_type == NONE) { in lpfc_sli4_enable_intr()
13249 phba->intr_type = MSI; in lpfc_sli4_enable_intr()
13254 /* Fallback to INTx if both MSI-X/MSI initalization failed */ in lpfc_sli4_enable_intr()
13255 if (phba->intr_type == NONE) { in lpfc_sli4_enable_intr()
13256 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, in lpfc_sli4_enable_intr()
13260 unsigned int cpu; in lpfc_sli4_enable_intr() local
13263 phba->intr_type = INTx; in lpfc_sli4_enable_intr()
13267 retval = pci_irq_vector(phba->pcidev, 0); in lpfc_sli4_enable_intr()
13274 eqhdl->irq = retval; in lpfc_sli4_enable_intr()
13276 cpu = cpumask_first(cpu_present_mask); in lpfc_sli4_enable_intr()
13278 cpu); in lpfc_sli4_enable_intr()
13279 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { in lpfc_sli4_enable_intr()
13281 eqhdl->idx = idx; in lpfc_sli4_enable_intr()
13289 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13294 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13301 if (phba->intr_type == MSIX) { in lpfc_sli4_disable_intr()
13305 /* Free up MSI-X multi-message vectors */ in lpfc_sli4_disable_intr()
13306 for (index = 0; index < phba->cfg_irq_chann; index++) { in lpfc_sli4_disable_intr()
13309 free_irq(eqhdl->irq, eqhdl); in lpfc_sli4_disable_intr()
13312 free_irq(phba->pcidev->irq, phba); in lpfc_sli4_disable_intr()
13315 pci_free_irq_vectors(phba->pcidev); in lpfc_sli4_disable_intr()
13318 phba->intr_type = NONE; in lpfc_sli4_disable_intr()
13319 phba->sli.slistat.sli_intr = 0; in lpfc_sli4_disable_intr()
13323 * lpfc_unset_hba - Unset SLI3 hba device initialization
13327 * a device with SLI-3 interface spec.
13332 struct lpfc_vport *vport = phba->pport; in lpfc_unset_hba()
13335 spin_lock_irq(shost->host_lock); in lpfc_unset_hba()
13336 vport->load_flag |= FC_UNLOADING; in lpfc_unset_hba()
13337 spin_unlock_irq(shost->host_lock); in lpfc_unset_hba()
13339 kfree(phba->vpi_bmask); in lpfc_unset_hba()
13340 kfree(phba->vpi_ids); in lpfc_unset_hba()
13344 phba->pport->work_port_events = 0; in lpfc_unset_hba()
13356 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13376 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); in lpfc_sli4_xri_exchange_busy_wait()
13385 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) in lpfc_sli4_xri_exchange_busy_wait()
13389 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_xri_exchange_busy_wait()
13390 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_sli4_xri_exchange_busy_wait()
13391 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); in lpfc_sli4_xri_exchange_busy_wait()
13398 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_xri_exchange_busy_wait()
13400 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); in lpfc_sli4_xri_exchange_busy_wait()
13428 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_xri_exchange_busy_wait()
13429 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_sli4_xri_exchange_busy_wait()
13431 &qp->lpfc_abts_io_buf_list); in lpfc_sli4_xri_exchange_busy_wait()
13438 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_xri_exchange_busy_wait()
13440 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); in lpfc_sli4_xri_exchange_busy_wait()
13443 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); in lpfc_sli4_xri_exchange_busy_wait()
13449 * lpfc_sli4_hba_unset - Unset the fcoe hba
13463 struct pci_dev *pdev = phba->pcidev; in lpfc_sli4_hba_unset()
13466 hrtimer_cancel(&phba->cmf_timer); in lpfc_sli4_hba_unset()
13468 if (phba->pport) in lpfc_sli4_hba_unset()
13469 phba->sli4_hba.intr_enable = 0; in lpfc_sli4_hba_unset()
13477 spin_lock_irq(&phba->hbalock); in lpfc_sli4_hba_unset()
13478 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; in lpfc_sli4_hba_unset()
13479 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_hba_unset()
13481 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { in lpfc_sli4_hba_unset()
13487 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { in lpfc_sli4_hba_unset()
13488 spin_lock_irq(&phba->hbalock); in lpfc_sli4_hba_unset()
13489 mboxq = phba->sli.mbox_active; in lpfc_sli4_hba_unset()
13490 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; in lpfc_sli4_hba_unset()
13492 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; in lpfc_sli4_hba_unset()
13493 phba->sli.mbox_active = NULL; in lpfc_sli4_hba_unset()
13494 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_hba_unset()
13500 if (!pci_channel_offline(phba->pcidev)) in lpfc_sli4_hba_unset()
13504 /* per-phba callback de-registration for hotplug event */ in lpfc_sli4_hba_unset()
13505 if (phba->pport) in lpfc_sli4_hba_unset()
13511 /* Disable SR-IOV if enabled */ in lpfc_sli4_hba_unset()
13512 if (phba->cfg_sriov_nr_virtfn) in lpfc_sli4_hba_unset()
13516 kthread_stop(phba->worker_thread); in lpfc_sli4_hba_unset()
13528 if (phba->ras_fwlog.ras_enabled) in lpfc_sli4_hba_unset()
13532 if (phba->pport) in lpfc_sli4_hba_unset()
13533 phba->pport->work_port_events = 0; in lpfc_sli4_hba_unset()
13596 "6235 INIT Congestion Buffer %p\n", phba->cgn_i); in lpfc_init_congestion_buf()
13598 if (!phba->cgn_i) in lpfc_init_congestion_buf()
13600 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; in lpfc_init_congestion_buf()
13602 atomic_set(&phba->cgn_fabric_warn_cnt, 0); in lpfc_init_congestion_buf()
13603 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); in lpfc_init_congestion_buf()
13604 atomic_set(&phba->cgn_sync_alarm_cnt, 0); in lpfc_init_congestion_buf()
13605 atomic_set(&phba->cgn_sync_warn_cnt, 0); in lpfc_init_congestion_buf()
13607 atomic_set(&phba->cgn_driver_evt_cnt, 0); in lpfc_init_congestion_buf()
13608 atomic_set(&phba->cgn_latency_evt_cnt, 0); in lpfc_init_congestion_buf()
13609 atomic64_set(&phba->cgn_latency_evt, 0); in lpfc_init_congestion_buf()
13610 phba->cgn_evt_minute = 0; in lpfc_init_congestion_buf()
13611 phba->hba_flag &= ~HBA_CGN_DAY_WRAP; in lpfc_init_congestion_buf()
13614 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); in lpfc_init_congestion_buf()
13615 cp->cgn_info_version = LPFC_CGN_INFO_V3; in lpfc_init_congestion_buf()
13618 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; in lpfc_init_congestion_buf()
13619 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; in lpfc_init_congestion_buf()
13620 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; in lpfc_init_congestion_buf()
13621 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; in lpfc_init_congestion_buf()
13626 cp->cgn_info_month = broken.tm_mon + 1; in lpfc_init_congestion_buf()
13627 cp->cgn_info_day = broken.tm_mday; in lpfc_init_congestion_buf()
13628 cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */ in lpfc_init_congestion_buf()
13629 cp->cgn_info_hour = broken.tm_hour; in lpfc_init_congestion_buf()
13630 cp->cgn_info_minute = broken.tm_min; in lpfc_init_congestion_buf()
13631 cp->cgn_info_second = broken.tm_sec; in lpfc_init_congestion_buf()
13636 cp->cgn_info_day, cp->cgn_info_month, in lpfc_init_congestion_buf()
13637 cp->cgn_info_year, cp->cgn_info_hour, in lpfc_init_congestion_buf()
13638 cp->cgn_info_minute, cp->cgn_info_second); in lpfc_init_congestion_buf()
13641 if (phba->pport) { in lpfc_init_congestion_buf()
13642 size = (uint16_t)(phba->pport->cfg_lun_queue_depth); in lpfc_init_congestion_buf()
13643 cp->cgn_lunq = cpu_to_le16(size); in lpfc_init_congestion_buf()
13648 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); in lpfc_init_congestion_buf()
13649 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); in lpfc_init_congestion_buf()
13651 cp->cgn_info_crc = cpu_to_le32(crc); in lpfc_init_congestion_buf()
13653 phba->cgn_evt_timestamp = jiffies + in lpfc_init_congestion_buf()
13666 "6236 INIT Congestion Stat %p\n", phba->cgn_i); in lpfc_init_congestion_stat()
13668 if (!phba->cgn_i) in lpfc_init_congestion_stat()
13671 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; in lpfc_init_congestion_stat()
13672 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat)); in lpfc_init_congestion_stat()
13677 cp->cgn_stat_month = broken.tm_mon + 1; in lpfc_init_congestion_stat()
13678 cp->cgn_stat_day = broken.tm_mday; in lpfc_init_congestion_stat()
13679 cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */ in lpfc_init_congestion_stat()
13680 cp->cgn_stat_hour = broken.tm_hour; in lpfc_init_congestion_stat()
13681 cp->cgn_stat_minute = broken.tm_min; in lpfc_init_congestion_stat()
13686 cp->cgn_stat_day, cp->cgn_stat_month, in lpfc_init_congestion_stat()
13687 cp->cgn_stat_year, cp->cgn_stat_hour, in lpfc_init_congestion_stat()
13688 cp->cgn_stat_minute); in lpfc_init_congestion_stat()
13691 cp->cgn_info_crc = cpu_to_le32(crc); in lpfc_init_congestion_stat()
13695 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13708 if (!phba->cgn_i) in __lpfc_reg_congestion_buf()
13709 return -ENXIO; in __lpfc_reg_congestion_buf()
13711 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in __lpfc_reg_congestion_buf()
13716 phba->pport->port_state, reg); in __lpfc_reg_congestion_buf()
13717 return -ENOMEM; in __lpfc_reg_congestion_buf()
13720 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) - in __lpfc_reg_congestion_buf()
13725 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf; in __lpfc_reg_congestion_buf()
13731 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info); in __lpfc_reg_congestion_buf()
13732 reg_congestion_buf->addr_lo = in __lpfc_reg_congestion_buf()
13733 putPaddrLow(phba->cgn_i->phys); in __lpfc_reg_congestion_buf()
13734 reg_congestion_buf->addr_hi = in __lpfc_reg_congestion_buf()
13735 putPaddrHigh(phba->cgn_i->phys); in __lpfc_reg_congestion_buf()
13739 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; in __lpfc_reg_congestion_buf()
13740 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); in __lpfc_reg_congestion_buf()
13742 &shdr->response); in __lpfc_reg_congestion_buf()
13743 mempool_free(mboxq, phba->mbox_mem_pool); in __lpfc_reg_congestion_buf()
13750 return -ENXIO; in __lpfc_reg_congestion_buf()
13769 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13776 * This function may be be called from any context that can block-wait
13784 struct lpfc_mqe *mqe = &mboxq->u.mqe; in lpfc_get_sli4_parameters()
13796 phba->sli4_hba.rpi_hdrs_in_use = 1; in lpfc_get_sli4_parameters()
13799 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - in lpfc_get_sli4_parameters()
13804 if (!phba->sli4_hba.intr_enable) in lpfc_get_sli4_parameters()
13812 sli4_params = &phba->sli4_hba.pc_sli4_params; in lpfc_get_sli4_parameters()
13813 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; in lpfc_get_sli4_parameters()
13814 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13815 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13816 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13817 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, in lpfc_get_sli4_parameters()
13819 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, in lpfc_get_sli4_parameters()
13822 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; in lpfc_get_sli4_parameters()
13824 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; in lpfc_get_sli4_parameters()
13825 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; in lpfc_get_sli4_parameters()
13826 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope, in lpfc_get_sli4_parameters()
13828 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13829 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13830 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13831 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13832 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13833 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13834 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13835 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13836 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13837 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13838 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, in lpfc_get_sli4_parameters()
13840 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13841 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, in lpfc_get_sli4_parameters()
13843 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13844 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13846 /* Check for Extended Pre-Registered SGL support */ in lpfc_get_sli4_parameters()
13847 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13855 sli4_params->nvme = 1; in lpfc_get_sli4_parameters()
13858 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { in lpfc_get_sli4_parameters()
13862 phba->cfg_enable_fc4_type); in lpfc_get_sli4_parameters()
13867 sli4_params->nvme = 0; in lpfc_get_sli4_parameters()
13868 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_get_sli4_parameters()
13874 phba->cfg_enable_fc4_type); in lpfc_get_sli4_parameters()
13876 phba->nvmet_support = 0; in lpfc_get_sli4_parameters()
13877 phba->cfg_nvmet_mrq = 0; in lpfc_get_sli4_parameters()
13878 phba->cfg_nvme_seg_cnt = 0; in lpfc_get_sli4_parameters()
13881 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) in lpfc_get_sli4_parameters()
13882 return -ENODEV; in lpfc_get_sli4_parameters()
13883 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; in lpfc_get_sli4_parameters()
13890 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) in lpfc_get_sli4_parameters()
13891 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; in lpfc_get_sli4_parameters()
13895 phba->cfg_enable_pbde = 1; in lpfc_get_sli4_parameters()
13897 phba->cfg_enable_pbde = 0; in lpfc_get_sli4_parameters()
13902 * In SLI4-Parameters Descriptor: in lpfc_get_sli4_parameters()
13907 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && in lpfc_get_sli4_parameters()
13909 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; in lpfc_get_sli4_parameters()
13911 phba->cfg_suppress_rsp = 0; in lpfc_get_sli4_parameters()
13914 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; in lpfc_get_sli4_parameters()
13917 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) in lpfc_get_sli4_parameters()
13918 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; in lpfc_get_sli4_parameters()
13923 * to use this option, 128-byte WQEs must be used. in lpfc_get_sli4_parameters()
13926 phba->fcp_embed_io = 1; in lpfc_get_sli4_parameters()
13928 phba->fcp_embed_io = 0; in lpfc_get_sli4_parameters()
13933 phba->cfg_enable_pbde, in lpfc_get_sli4_parameters()
13934 phba->fcp_embed_io, sli4_params->nvme, in lpfc_get_sli4_parameters()
13935 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); in lpfc_get_sli4_parameters()
13937 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == in lpfc_get_sli4_parameters()
13939 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == in lpfc_get_sli4_parameters()
13946 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) in lpfc_get_sli4_parameters()
13947 phba->enab_exp_wqcq_pages = 1; in lpfc_get_sli4_parameters()
13949 phba->enab_exp_wqcq_pages = 0; in lpfc_get_sli4_parameters()
13954 phba->mds_diags_support = 1; in lpfc_get_sli4_parameters()
13956 phba->mds_diags_support = 0; in lpfc_get_sli4_parameters()
13962 phba->nsler = 1; in lpfc_get_sli4_parameters()
13964 phba->nsler = 0; in lpfc_get_sli4_parameters()
13970 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13974 * This routine is to be called to attach a device with SLI-3 interface spec
13975 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13976 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13983 * 0 - driver can claim the device
13984 * negative value - driver can not claim the device
13998 return -ENOMEM; in lpfc_pci_probe_one_s3()
14005 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ in lpfc_pci_probe_one_s3()
14010 /* Set up SLI-3 specific device PCI memory space */ in lpfc_pci_probe_one_s3()
14018 /* Set up SLI-3 specific device driver resources */ in lpfc_pci_probe_one_s3()
14044 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); in lpfc_pci_probe_one_s3()
14055 vport = phba->pport; in lpfc_pci_probe_one_s3()
14065 cfg_mode = phba->cfg_use_msi; in lpfc_pci_probe_one_s3()
14074 error = -ENODEV; in lpfc_pci_probe_one_s3()
14077 /* SLI-3 HBA setup */ in lpfc_pci_probe_one_s3()
14081 error = -ENODEV; in lpfc_pci_probe_one_s3()
14089 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { in lpfc_pci_probe_one_s3()
14091 phba->intr_mode = intr_mode; in lpfc_pci_probe_one_s3()
14102 cfg_mode = --intr_mode; in lpfc_pci_probe_one_s3()
14138 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
14141 * This routine is to be called to disattach a device with SLI-3 interface
14142 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
14150 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; in lpfc_pci_remove_one_s3()
14152 struct lpfc_hba *phba = vport->phba; in lpfc_pci_remove_one_s3()
14155 spin_lock_irq(&phba->hbalock); in lpfc_pci_remove_one_s3()
14156 vport->load_flag |= FC_UNLOADING; in lpfc_pci_remove_one_s3()
14157 spin_unlock_irq(&phba->hbalock); in lpfc_pci_remove_one_s3()
14164 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_pci_remove_one_s3()
14165 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) in lpfc_pci_remove_one_s3()
14167 fc_vport_terminate(vports[i]->fc_vport); in lpfc_pci_remove_one_s3()
14187 kthread_stop(phba->worker_thread); in lpfc_pci_remove_one_s3()
14191 kfree(phba->vpi_bmask); in lpfc_pci_remove_one_s3()
14192 kfree(phba->vpi_ids); in lpfc_pci_remove_one_s3()
14195 spin_lock_irq(&phba->port_list_lock); in lpfc_pci_remove_one_s3()
14196 list_del_init(&vport->listentry); in lpfc_pci_remove_one_s3()
14197 spin_unlock_irq(&phba->port_list_lock); in lpfc_pci_remove_one_s3()
14201 /* Disable SR-IOV if enabled */ in lpfc_pci_remove_one_s3()
14202 if (phba->cfg_sriov_nr_virtfn) in lpfc_pci_remove_one_s3()
14219 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), in lpfc_pci_remove_one_s3()
14220 phba->hbqslimp.virt, phba->hbqslimp.phys); in lpfc_pci_remove_one_s3()
14223 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, in lpfc_pci_remove_one_s3()
14224 phba->slim2p.virt, phba->slim2p.phys); in lpfc_pci_remove_one_s3()
14227 iounmap(phba->ctrl_regs_memmap_p); in lpfc_pci_remove_one_s3()
14228 iounmap(phba->slim_memmap_p); in lpfc_pci_remove_one_s3()
14237 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14241 * system Power Management (PM) to device with SLI-3 interface spec. When
14245 * minimum PM requirements to a power-aware driver's PM support for the
14246 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14253 * 0 - driver suspended the device
14260 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_suspend_one_s3()
14268 kthread_stop(phba->worker_thread); in lpfc_pci_suspend_one_s3()
14277 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14281 * system Power Management (PM) to device with SLI-3 interface spec. When PM
14284 * driver implements the minimum PM requirements to a power-aware driver's
14285 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14292 * 0 - driver suspended the device
14299 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_resume_one_s3()
14307 phba->worker_thread = kthread_run(lpfc_do_work, phba, in lpfc_pci_resume_one_s3()
14308 "lpfc_worker_%d", phba->brd_no); in lpfc_pci_resume_one_s3()
14309 if (IS_ERR(phba->worker_thread)) { in lpfc_pci_resume_one_s3()
14310 error = PTR_ERR(phba->worker_thread); in lpfc_pci_resume_one_s3()
14322 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); in lpfc_pci_resume_one_s3()
14326 return -EIO; in lpfc_pci_resume_one_s3()
14328 phba->intr_mode = intr_mode; in lpfc_pci_resume_one_s3()
14335 lpfc_log_intr_mode(phba, phba->intr_mode); in lpfc_pci_resume_one_s3()
14341 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14355 * and let the SCSI mid-layer to retry them to recover. in lpfc_sli_prep_dev_for_recover()
14361 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14388 pci_disable_device(phba->pcidev); in lpfc_sli_prep_dev_for_reset()
14392 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14416 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14421 * device with SLI-3 interface spec. This function is called by the PCI
14429 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14430 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14431 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14437 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_error_detected_s3()
14441 /* Non-fatal error, prepare for recovery */ in lpfc_io_error_detected_s3()
14462 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14466 * device with SLI-3 interface spec. This is called after PCI bus has been
14467 * reset to restart the PCI card from scratch, as if from a cold-boot.
14476 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
14477 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14483 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_slot_reset_s3()
14484 struct lpfc_sli *psli = &phba->sli; in lpfc_io_slot_reset_s3()
14487 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); in lpfc_io_slot_reset_s3()
14489 printk(KERN_ERR "lpfc: Cannot re-enable " in lpfc_io_slot_reset_s3()
14502 if (pdev->is_busmaster) in lpfc_io_slot_reset_s3()
14505 spin_lock_irq(&phba->hbalock); in lpfc_io_slot_reset_s3()
14506 psli->sli_flag &= ~LPFC_SLI_ACTIVE; in lpfc_io_slot_reset_s3()
14507 spin_unlock_irq(&phba->hbalock); in lpfc_io_slot_reset_s3()
14510 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); in lpfc_io_slot_reset_s3()
14513 "0427 Cannot re-enable interrupt after " in lpfc_io_slot_reset_s3()
14517 phba->intr_mode = intr_mode; in lpfc_io_slot_reset_s3()
14525 lpfc_log_intr_mode(phba, phba->intr_mode); in lpfc_io_slot_reset_s3()
14531 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14535 * with SLI-3 interface spec. It is called when kernel error recovery tells
14544 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_resume_s3()
14546 /* Bring device online, it will be no-op for non-fatal error resume */ in lpfc_io_resume_s3()
14551 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14559 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; in lpfc_sli4_get_els_iocb_cnt()
14561 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_sli4_get_els_iocb_cnt()
14581 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14591 if (phba->nvmet_support) in lpfc_sli4_get_iocb_cnt()
14605 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); in lpfc_log_write_firmware_error()
14623 phba->pcidev->device, magic_number, ftype, fid, in lpfc_log_write_firmware_error()
14624 fsize, fw->size); in lpfc_log_write_firmware_error()
14625 rc = -EINVAL; in lpfc_log_write_firmware_error()
14632 phba->pcidev->device, magic_number, ftype, fid, in lpfc_log_write_firmware_error()
14633 fsize, fw->size); in lpfc_log_write_firmware_error()
14634 rc = -EACCES; in lpfc_log_write_firmware_error()
14640 offset, phba->pcidev->device, magic_number, in lpfc_log_write_firmware_error()
14641 ftype, fid, fsize, fw->size); in lpfc_log_write_firmware_error()
14642 rc = -EIO; in lpfc_log_write_firmware_error()
14648 * lpfc_write_firmware - attempt to write a firmware image to the port
14665 /* It can be null in no-wait mode, sanity check */ in lpfc_write_firmware()
14667 rc = -ENXIO; in lpfc_write_firmware()
14670 image = (struct lpfc_grp_hdr *)fw->data; in lpfc_write_firmware()
14672 magic_number = be32_to_cpu(image->magic_number); in lpfc_write_firmware()
14675 fsize = be32_to_cpu(image->size); in lpfc_write_firmware()
14679 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { in lpfc_write_firmware()
14683 fwrev, image->revision); in lpfc_write_firmware()
14688 rc = -ENOMEM; in lpfc_write_firmware()
14691 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, in lpfc_write_firmware()
14693 &dmabuf->phys, in lpfc_write_firmware()
14695 if (!dmabuf->virt) { in lpfc_write_firmware()
14697 rc = -ENOMEM; in lpfc_write_firmware()
14700 list_add_tail(&dmabuf->list, &dma_buffer_list); in lpfc_write_firmware()
14702 while (offset < fw->size) { in lpfc_write_firmware()
14705 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { in lpfc_write_firmware()
14706 memcpy(dmabuf->virt, in lpfc_write_firmware()
14707 fw->data + temp_offset, in lpfc_write_firmware()
14708 fw->size - temp_offset); in lpfc_write_firmware()
14709 temp_offset = fw->size; in lpfc_write_firmware()
14712 memcpy(dmabuf->virt, fw->data + temp_offset, in lpfc_write_firmware()
14717 (fw->size - offset), &offset); in lpfc_write_firmware()
14733 fwrev, image->revision); in lpfc_write_firmware()
14737 list_del(&dmabuf->list); in lpfc_write_firmware()
14738 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, in lpfc_write_firmware()
14739 dmabuf->virt, dmabuf->phys); in lpfc_write_firmware()
14753 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14768 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < in lpfc_sli4_request_firmware_update()
14770 return -EPERM; in lpfc_sli4_request_firmware_update()
14772 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); in lpfc_sli4_request_firmware_update()
14776 file_name, &phba->pcidev->dev, in lpfc_sli4_request_firmware_update()
14780 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); in lpfc_sli4_request_firmware_update()
14784 ret = -EINVAL; in lpfc_sli4_request_firmware_update()
14791 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14796 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14797 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14805 * 0 - driver can claim the device
14806 * negative value - driver can not claim the device
14820 return -ENOMEM; in lpfc_pci_probe_one_s4()
14822 INIT_LIST_HEAD(&phba->poll_list); in lpfc_pci_probe_one_s4()
14829 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ in lpfc_pci_probe_one_s4()
14834 /* Set up SLI-4 specific device PCI memory space */ in lpfc_pci_probe_one_s4()
14842 /* Set up SLI-4 Specific device driver resources */ in lpfc_pci_probe_one_s4()
14850 INIT_LIST_HEAD(&phba->active_rrq_list); in lpfc_pci_probe_one_s4()
14851 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); in lpfc_pci_probe_one_s4()
14862 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); in lpfc_pci_probe_one_s4()
14865 cfg_mode = phba->cfg_use_msi; in lpfc_pci_probe_one_s4()
14868 phba->pport = NULL; in lpfc_pci_probe_one_s4()
14882 error = -ENODEV; in lpfc_pci_probe_one_s4()
14885 /* Default to single EQ for non-MSI-X */ in lpfc_pci_probe_one_s4()
14886 if (phba->intr_type != MSIX) { in lpfc_pci_probe_one_s4()
14887 phba->cfg_irq_chann = 1; in lpfc_pci_probe_one_s4()
14888 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_pci_probe_one_s4()
14889 if (phba->nvmet_support) in lpfc_pci_probe_one_s4()
14890 phba->cfg_nvmet_mrq = 1; in lpfc_pci_probe_one_s4()
14893 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); in lpfc_pci_probe_one_s4()
14902 vport = phba->pport; in lpfc_pci_probe_one_s4()
14913 /* Set up SLI-4 HBA */ in lpfc_pci_probe_one_s4()
14917 error = -ENODEV; in lpfc_pci_probe_one_s4()
14922 phba->intr_mode = intr_mode; in lpfc_pci_probe_one_s4()
14931 if (phba->nvmet_support == 0) { in lpfc_pci_probe_one_s4()
14932 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_pci_probe_one_s4()
14949 if (phba->cfg_request_firmware_upgrade) in lpfc_pci_probe_one_s4()
14955 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); in lpfc_pci_probe_one_s4()
14956 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); in lpfc_pci_probe_one_s4()
14982 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14986 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14994 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; in lpfc_pci_remove_one_s4()
14996 struct lpfc_hba *phba = vport->phba; in lpfc_pci_remove_one_s4()
15000 spin_lock_irq(&phba->hbalock); in lpfc_pci_remove_one_s4()
15001 vport->load_flag |= FC_UNLOADING; in lpfc_pci_remove_one_s4()
15002 spin_unlock_irq(&phba->hbalock); in lpfc_pci_remove_one_s4()
15003 if (phba->cgn_i) in lpfc_pci_remove_one_s4()
15011 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_pci_remove_one_s4()
15012 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) in lpfc_pci_remove_one_s4()
15014 fc_vport_terminate(vports[i]->fc_vport); in lpfc_pci_remove_one_s4()
15029 /* De-allocate multi-XRI pools */ in lpfc_pci_remove_one_s4()
15030 if (phba->cfg_xri_rebalancing) in lpfc_pci_remove_one_s4()
15041 spin_lock_irq(&phba->port_list_lock); in lpfc_pci_remove_one_s4()
15042 list_del_init(&vport->listentry); in lpfc_pci_remove_one_s4()
15043 spin_unlock_irq(&phba->port_list_lock); in lpfc_pci_remove_one_s4()
15069 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
15073 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
15077 * requirements to a power-aware driver's PM support for suspend/resume -- all
15085 * 0 - driver suspended the device
15092 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_suspend_one_s4()
15100 kthread_stop(phba->worker_thread); in lpfc_pci_suspend_one_s4()
15110 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
15114 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
15117 * implements the minimum PM requirements to a power-aware driver's PM for
15118 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
15125 * 0 - driver suspended the device
15132 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_resume_one_s4()
15140 phba->worker_thread = kthread_run(lpfc_do_work, phba, in lpfc_pci_resume_one_s4()
15141 "lpfc_worker_%d", phba->brd_no); in lpfc_pci_resume_one_s4()
15142 if (IS_ERR(phba->worker_thread)) { in lpfc_pci_resume_one_s4()
15143 error = PTR_ERR(phba->worker_thread); in lpfc_pci_resume_one_s4()
15151 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); in lpfc_pci_resume_one_s4()
15155 return -EIO; in lpfc_pci_resume_one_s4()
15157 phba->intr_mode = intr_mode; in lpfc_pci_resume_one_s4()
15164 lpfc_log_intr_mode(phba, phba->intr_mode); in lpfc_pci_resume_one_s4()
15170 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
15183 * and let the SCSI mid-layer to retry them to recover. in lpfc_sli4_prep_dev_for_recover()
15189 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
15199 int offline = pci_channel_offline(phba->pcidev); in lpfc_sli4_prep_dev_for_reset()
15221 pci_disable_device(phba->pcidev); in lpfc_sli4_prep_dev_for_reset()
15225 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15249 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15254 * with SLI-4 interface spec. This function is called by the PCI subsystem
15261 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15262 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15268 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_error_detected_s4()
15273 /* Non-fatal error, prepare for recovery */ in lpfc_io_error_detected_s4()
15277 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); in lpfc_io_error_detected_s4()
15287 set_bit(HBA_PCI_ERR, &phba->bit_flags); in lpfc_io_error_detected_s4()
15292 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); in lpfc_io_error_detected_s4()
15304 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15308 * with SLI-4 interface spec. It is called after PCI bus has been reset to
15309 * restart the PCI card from scratch, as if from a cold-boot. During the
15318 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15319 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15325 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_slot_reset_s4()
15326 struct lpfc_sli *psli = &phba->sli; in lpfc_io_slot_reset_s4()
15330 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); in lpfc_io_slot_reset_s4()
15332 printk(KERN_ERR "lpfc: Cannot re-enable " in lpfc_io_slot_reset_s4()
15339 hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags); in lpfc_io_slot_reset_s4()
15341 dev_info(&pdev->dev, in lpfc_io_slot_reset_s4()
15349 if (pdev->is_busmaster) in lpfc_io_slot_reset_s4()
15352 spin_lock_irq(&phba->hbalock); in lpfc_io_slot_reset_s4()
15353 psli->sli_flag &= ~LPFC_SLI_ACTIVE; in lpfc_io_slot_reset_s4()
15354 spin_unlock_irq(&phba->hbalock); in lpfc_io_slot_reset_s4()
15359 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); in lpfc_io_slot_reset_s4()
15362 "2824 Cannot re-enable interrupt after " in lpfc_io_slot_reset_s4()
15366 phba->intr_mode = intr_mode; in lpfc_io_slot_reset_s4()
15367 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); in lpfc_io_slot_reset_s4()
15370 lpfc_log_intr_mode(phba, phba->intr_mode); in lpfc_io_slot_reset_s4()
15376 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15380 * with SLI-4 interface spec. It is called when kernel error recovery tells
15389 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_resume_s4()
15397 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { in lpfc_io_resume_s4()
15406 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15412 * at PCI device-specific information of the device and driver to see if the
15415 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15420 * 0 - driver can claim the device
15421 * negative value - driver can not claim the device
15430 return -ENODEV; in lpfc_pci_probe_one()
15442 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15447 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15455 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_remove_one()
15457 switch (phba->pci_dev_grp) { in lpfc_pci_remove_one()
15467 phba->pci_dev_grp); in lpfc_pci_remove_one()
15474 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15479 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15483 * 0 - driver suspended the device
15490 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_suspend_one()
15491 int rc = -ENODEV; in lpfc_pci_suspend_one()
15493 switch (phba->pci_dev_grp) { in lpfc_pci_suspend_one()
15503 phba->pci_dev_grp); in lpfc_pci_suspend_one()
15510 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15515 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15519 * 0 - driver suspended the device
15526 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_resume_one()
15527 int rc = -ENODEV; in lpfc_pci_resume_one()
15529 switch (phba->pci_dev_grp) { in lpfc_pci_resume_one()
15539 phba->pci_dev_grp); in lpfc_pci_resume_one()
15546 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15553 * the action to the proper SLI-3 or SLI-4 device error detected handling
15557 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15558 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15564 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_error_detected()
15567 if (phba->link_state == LPFC_HBA_ERROR && in lpfc_io_error_detected()
15568 phba->hba_flag & HBA_IOQ_FLUSH) in lpfc_io_error_detected()
15571 switch (phba->pci_dev_grp) { in lpfc_io_error_detected()
15581 phba->pci_dev_grp); in lpfc_io_error_detected()
15588 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15593 * from scratch, as if from a cold-boot. When this routine is invoked, it
15594 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15598 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15599 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15605 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_slot_reset()
15608 switch (phba->pci_dev_grp) { in lpfc_io_slot_reset()
15618 phba->pci_dev_grp); in lpfc_io_slot_reset()
15625 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15631 * this routine is invoked, it dispatches the action to the proper SLI-3
15632 * or SLI-4 device io_resume routine, which will resume the device operation.
15638 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_resume()
15640 switch (phba->pci_dev_grp) { in lpfc_io_resume()
15650 phba->pci_dev_grp); in lpfc_io_resume()
15657 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15670 if (!phba->cfg_EnableXLane) in lpfc_sli4_oas_verify()
15673 if (phba->sli4_hba.pc_sli4_params.oas_supported) { in lpfc_sli4_oas_verify()
15674 phba->cfg_fof = 1; in lpfc_sli4_oas_verify()
15676 phba->cfg_fof = 0; in lpfc_sli4_oas_verify()
15677 mempool_destroy(phba->device_data_mem_pool); in lpfc_sli4_oas_verify()
15678 phba->device_data_mem_pool = NULL; in lpfc_sli4_oas_verify()
15685 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15695 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == in lpfc_sli4_ras_init()
15697 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == in lpfc_sli4_ras_init()
15699 phba->ras_fwlog.ras_hwsupport = true; in lpfc_sli4_ras_init()
15700 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && in lpfc_sli4_ras_init()
15701 phba->cfg_ras_fwlog_buffsize) in lpfc_sli4_ras_init()
15702 phba->ras_fwlog.ras_enabled = true; in lpfc_sli4_ras_init()
15704 phba->ras_fwlog.ras_enabled = false; in lpfc_sli4_ras_init()
15706 phba->ras_fwlog.ras_hwsupport = false; in lpfc_sli4_ras_init()
15744 * lpfc_init - lpfc module initialization routine
15751 * 0 - successful
15752 * -ENOMEM - FC attach transport failed
15753 * all others - failed
15768 error = -ENOMEM; in lpfc_init()
15822 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) in lpfc_dmp_dbg()
15825 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; in lpfc_dmp_dbg()
15826 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); in lpfc_dmp_dbg()
15832 temp_idx -= 1; in lpfc_dmp_dbg()
15834 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { in lpfc_dmp_dbg()
15838 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); in lpfc_dmp_dbg()
15840 start_idx -= dbg_cnt; in lpfc_dmp_dbg()
15843 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", in lpfc_dmp_dbg()
15851 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); in lpfc_dmp_dbg()
15852 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", in lpfc_dmp_dbg()
15854 (unsigned long)phba->dbg_log[temp_idx].t_ns, in lpfc_dmp_dbg()
15856 phba->dbg_log[temp_idx].log); in lpfc_dmp_dbg()
15859 atomic_set(&phba->dbg_log_cnt, 0); in lpfc_dmp_dbg()
15860 atomic_set(&phba->dbg_log_dmping, 0); in lpfc_dmp_dbg()
15868 int dbg_dmping = atomic_read(&phba->dbg_log_dmping); in lpfc_dbg_print()
15876 dev_info(&phba->pcidev->dev, "%pV", &vaf); in lpfc_dbg_print()
15880 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % in lpfc_dbg_print()
15883 atomic_inc(&phba->dbg_log_cnt); in lpfc_dbg_print()
15885 vscnprintf(phba->dbg_log[idx].log, in lpfc_dbg_print()
15886 sizeof(phba->dbg_log[idx].log), fmt, args); in lpfc_dbg_print()
15889 phba->dbg_log[idx].t_ns = local_clock(); in lpfc_dbg_print()
15893 * lpfc_exit - lpfc module removal routine