Lines Matching +full:cpu +full:- +full:centric
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
26 #include <linux/dma-mapping.h>
43 #include <linux/cpu.h>
69 /* Used when mapping IRQ vectors in a driver centric manner */
105 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
114 * 0 - success.
115 * -ERESTART - requests the SLI layer to reset the HBA and try again.
116 * Any other value - indicates an error.
121 lpfc_vpd_t *vp = &phba->vpd; in lpfc_config_port_prep()
131 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_config_port_prep()
133 phba->link_state = LPFC_HBA_ERROR; in lpfc_config_port_prep()
134 return -ENOMEM; in lpfc_config_port_prep()
137 mb = &pmb->u.mb; in lpfc_config_port_prep()
138 phba->link_state = LPFC_INIT_MBX_CMDS; in lpfc_config_port_prep()
140 if (lpfc_is_LC_HBA(phba->pcidev->device)) { in lpfc_config_port_prep()
150 memset((char*)mb->un.varRDnvp.rsvd3, 0, in lpfc_config_port_prep()
151 sizeof (mb->un.varRDnvp.rsvd3)); in lpfc_config_port_prep()
152 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, in lpfc_config_port_prep()
162 mb->mbxCommand, mb->mbxStatus); in lpfc_config_port_prep()
163 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_prep()
164 return -ERESTART; in lpfc_config_port_prep()
166 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, in lpfc_config_port_prep()
167 sizeof(phba->wwnn)); in lpfc_config_port_prep()
168 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, in lpfc_config_port_prep()
169 sizeof(phba->wwpn)); in lpfc_config_port_prep()
176 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; in lpfc_config_port_prep()
185 mb->mbxCommand, mb->mbxStatus); in lpfc_config_port_prep()
186 mempool_free( pmb, phba->mbox_mem_pool); in lpfc_config_port_prep()
187 return -ERESTART; in lpfc_config_port_prep()
195 if (mb->un.varRdRev.rr == 0) { in lpfc_config_port_prep()
196 vp->rev.rBit = 0; in lpfc_config_port_prep()
200 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_prep()
201 return -ERESTART; in lpfc_config_port_prep()
204 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { in lpfc_config_port_prep()
205 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_prep()
206 return -EINVAL; in lpfc_config_port_prep()
210 vp->rev.rBit = 1; in lpfc_config_port_prep()
211 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); in lpfc_config_port_prep()
212 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; in lpfc_config_port_prep()
213 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); in lpfc_config_port_prep()
214 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; in lpfc_config_port_prep()
215 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); in lpfc_config_port_prep()
216 vp->rev.biuRev = mb->un.varRdRev.biuRev; in lpfc_config_port_prep()
217 vp->rev.smRev = mb->un.varRdRev.smRev; in lpfc_config_port_prep()
218 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; in lpfc_config_port_prep()
219 vp->rev.endecRev = mb->un.varRdRev.endecRev; in lpfc_config_port_prep()
220 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; in lpfc_config_port_prep()
221 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; in lpfc_config_port_prep()
222 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; in lpfc_config_port_prep()
223 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; in lpfc_config_port_prep()
224 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; in lpfc_config_port_prep()
225 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; in lpfc_config_port_prep()
231 if (vp->rev.feaLevelHigh < 9) in lpfc_config_port_prep()
232 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; in lpfc_config_port_prep()
234 if (lpfc_is_LC_HBA(phba->pcidev->device)) in lpfc_config_port_prep()
235 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], in lpfc_config_port_prep()
236 sizeof (phba->RandomData)); in lpfc_config_port_prep()
250 mb->mbxCommand, mb->mbxStatus); in lpfc_config_port_prep()
251 mb->un.varDmp.word_cnt = 0; in lpfc_config_port_prep()
256 if (mb->un.varDmp.word_cnt == 0) in lpfc_config_port_prep()
259 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) in lpfc_config_port_prep()
260 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; in lpfc_config_port_prep()
263 mb->un.varDmp.word_cnt); in lpfc_config_port_prep()
264 offset += mb->un.varDmp.word_cnt; in lpfc_config_port_prep()
265 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); in lpfc_config_port_prep()
271 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_prep()
276 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
288 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) in lpfc_config_async_cmpl()
289 phba->temp_sensor_support = 1; in lpfc_config_async_cmpl()
291 phba->temp_sensor_support = 0; in lpfc_config_async_cmpl()
292 mempool_free(pmboxq, phba->mbox_mem_pool); in lpfc_config_async_cmpl()
297 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
315 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { in lpfc_dump_wakeup_param_cmpl()
316 mempool_free(pmboxq, phba->mbox_mem_pool); in lpfc_dump_wakeup_param_cmpl()
323 prog_id_word = pmboxq->u.mb.un.varWords[7]; in lpfc_dump_wakeup_param_cmpl()
326 if (prg->dist < 4) in lpfc_dump_wakeup_param_cmpl()
327 dist = dist_char[prg->dist]; in lpfc_dump_wakeup_param_cmpl()
329 if ((prg->dist == 3) && (prg->num == 0)) in lpfc_dump_wakeup_param_cmpl()
330 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", in lpfc_dump_wakeup_param_cmpl()
331 prg->ver, prg->rev, prg->lev); in lpfc_dump_wakeup_param_cmpl()
333 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", in lpfc_dump_wakeup_param_cmpl()
334 prg->ver, prg->rev, prg->lev, in lpfc_dump_wakeup_param_cmpl()
335 dist, prg->num); in lpfc_dump_wakeup_param_cmpl()
336 mempool_free(pmboxq, phba->mbox_mem_pool); in lpfc_dump_wakeup_param_cmpl()
341 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
352 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; in lpfc_update_vport_wwn()
353 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; in lpfc_update_vport_wwn()
356 if (vport->phba->cfg_soft_wwnn) in lpfc_update_vport_wwn()
357 u64_to_wwn(vport->phba->cfg_soft_wwnn, in lpfc_update_vport_wwn()
358 vport->fc_sparam.nodeName.u.wwn); in lpfc_update_vport_wwn()
359 if (vport->phba->cfg_soft_wwpn) in lpfc_update_vport_wwn()
360 u64_to_wwn(vport->phba->cfg_soft_wwpn, in lpfc_update_vport_wwn()
361 vport->fc_sparam.portName.u.wwn); in lpfc_update_vport_wwn()
367 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) in lpfc_update_vport_wwn()
368 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, in lpfc_update_vport_wwn()
371 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, in lpfc_update_vport_wwn()
378 if (vport->fc_portname.u.wwn[0] != 0 && in lpfc_update_vport_wwn()
379 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, in lpfc_update_vport_wwn()
381 vport->vport_flag |= FAWWPN_PARAM_CHG; in lpfc_update_vport_wwn()
383 if (vport->fc_portname.u.wwn[0] == 0 || in lpfc_update_vport_wwn()
384 vport->phba->cfg_soft_wwpn || in lpfc_update_vport_wwn()
386 vport->vport_flag & FAWWPN_SET) { in lpfc_update_vport_wwn()
387 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, in lpfc_update_vport_wwn()
389 vport->vport_flag &= ~FAWWPN_SET; in lpfc_update_vport_wwn()
391 vport->vport_flag |= FAWWPN_SET; in lpfc_update_vport_wwn()
394 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, in lpfc_update_vport_wwn()
399 * lpfc_config_port_post - Perform lpfc initialization after config port
408 * 0 - success.
409 * Any other value - error.
414 struct lpfc_vport *vport = phba->pport; in lpfc_config_port_post()
419 struct lpfc_sli *psli = &phba->sli; in lpfc_config_port_post()
424 spin_lock_irq(&phba->hbalock); in lpfc_config_port_post()
429 if (phba->over_temp_state == HBA_OVER_TEMP) in lpfc_config_port_post()
430 phba->over_temp_state = HBA_NORMAL_TEMP; in lpfc_config_port_post()
431 spin_unlock_irq(&phba->hbalock); in lpfc_config_port_post()
433 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_config_port_post()
435 phba->link_state = LPFC_HBA_ERROR; in lpfc_config_port_post()
436 return -ENOMEM; in lpfc_config_port_post()
438 mb = &pmb->u.mb; in lpfc_config_port_post()
443 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
444 return -ENOMEM; in lpfc_config_port_post()
447 pmb->vport = vport; in lpfc_config_port_post()
452 mb->mbxCommand, mb->mbxStatus); in lpfc_config_port_post()
453 phba->link_state = LPFC_HBA_ERROR; in lpfc_config_port_post()
454 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; in lpfc_config_port_post()
455 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
456 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_config_port_post()
458 return -EIO; in lpfc_config_port_post()
461 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; in lpfc_config_port_post()
463 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); in lpfc_config_port_post()
464 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_config_port_post()
466 pmb->ctx_buf = NULL; in lpfc_config_port_post()
470 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); in lpfc_config_port_post()
471 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); in lpfc_config_port_post()
472 fc_host_max_npiv_vports(shost) = phba->max_vpi; in lpfc_config_port_post()
475 /* This should be consolidated into parse_vpd ? - mr */ in lpfc_config_port_post()
476 if (phba->SerialNumber[0] == 0) { in lpfc_config_port_post()
479 outptr = &vport->fc_nodename.u.s.IEEE[0]; in lpfc_config_port_post()
484 phba->SerialNumber[i] = in lpfc_config_port_post()
487 phba->SerialNumber[i] = in lpfc_config_port_post()
488 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); in lpfc_config_port_post()
492 phba->SerialNumber[i] = in lpfc_config_port_post()
495 phba->SerialNumber[i] = in lpfc_config_port_post()
496 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); in lpfc_config_port_post()
501 pmb->vport = vport; in lpfc_config_port_post()
506 mb->mbxCommand, mb->mbxStatus); in lpfc_config_port_post()
507 phba->link_state = LPFC_HBA_ERROR; in lpfc_config_port_post()
508 mempool_free( pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
509 return -EIO; in lpfc_config_port_post()
516 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { in lpfc_config_port_post()
519 phba->cfg_hba_queue_depth, in lpfc_config_port_post()
520 mb->un.varRdConfig.max_xri); in lpfc_config_port_post()
521 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; in lpfc_config_port_post()
524 phba->lmt = mb->un.varRdConfig.lmt; in lpfc_config_port_post()
527 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); in lpfc_config_port_post()
529 phba->link_state = LPFC_LINK_DOWN; in lpfc_config_port_post()
532 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) in lpfc_config_port_post()
533 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; in lpfc_config_port_post()
534 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) in lpfc_config_port_post()
535 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; in lpfc_config_port_post()
538 if (phba->sli_rev != 3) in lpfc_config_port_post()
542 * Configure HBA MSI-X attention conditions to messages if MSI-X mode in lpfc_config_port_post()
544 if (phba->intr_type == MSIX) { in lpfc_config_port_post()
547 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
548 return -EIO; in lpfc_config_port_post()
555 pmb->u.mb.mbxCommand, in lpfc_config_port_post()
556 pmb->u.mb.mbxStatus); in lpfc_config_port_post()
557 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
558 return -EIO; in lpfc_config_port_post()
562 spin_lock_irq(&phba->hbalock); in lpfc_config_port_post()
564 phba->hba_flag &= ~HBA_ERATT_HANDLED; in lpfc_config_port_post()
567 if (lpfc_readl(phba->HCregaddr, &status)) { in lpfc_config_port_post()
568 spin_unlock_irq(&phba->hbalock); in lpfc_config_port_post()
569 return -EIO; in lpfc_config_port_post()
572 if (psli->num_rings > 0) in lpfc_config_port_post()
574 if (psli->num_rings > 1) in lpfc_config_port_post()
576 if (psli->num_rings > 2) in lpfc_config_port_post()
578 if (psli->num_rings > 3) in lpfc_config_port_post()
581 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && in lpfc_config_port_post()
582 (phba->cfg_poll & DISABLE_FCP_RING_INT)) in lpfc_config_port_post()
585 writel(status, phba->HCregaddr); in lpfc_config_port_post()
586 readl(phba->HCregaddr); /* flush */ in lpfc_config_port_post()
587 spin_unlock_irq(&phba->hbalock); in lpfc_config_port_post()
589 /* Set up ring-0 (ELS) timer */ in lpfc_config_port_post()
590 timeout = phba->fc_ratov * 2; in lpfc_config_port_post()
591 mod_timer(&vport->els_tmofunc, in lpfc_config_port_post()
594 mod_timer(&phba->hb_tmofunc, in lpfc_config_port_post()
596 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); in lpfc_config_port_post()
597 phba->last_completion_time = jiffies; in lpfc_config_port_post()
599 mod_timer(&phba->eratt_poll, in lpfc_config_port_post()
600 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); in lpfc_config_port_post()
602 if (phba->hba_flag & LINK_DISABLED) { in lpfc_config_port_post()
606 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; in lpfc_config_port_post()
613 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
614 return -EIO; in lpfc_config_port_post()
616 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { in lpfc_config_port_post()
617 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
618 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); in lpfc_config_port_post()
623 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_config_port_post()
625 phba->link_state = LPFC_HBA_ERROR; in lpfc_config_port_post()
626 return -ENOMEM; in lpfc_config_port_post()
630 pmb->mbox_cmpl = lpfc_config_async_cmpl; in lpfc_config_port_post()
631 pmb->vport = phba->pport; in lpfc_config_port_post()
639 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
643 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_config_port_post()
645 phba->link_state = LPFC_HBA_ERROR; in lpfc_config_port_post()
646 return -ENOMEM; in lpfc_config_port_post()
650 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; in lpfc_config_port_post()
651 pmb->vport = phba->pport; in lpfc_config_port_post()
658 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_config_port_post()
665 * lpfc_hba_init_link - Initialize the FC link
667 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
675 * 0 - success
676 * Any other value - error
681 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); in lpfc_hba_init_link()
685 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
688 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
696 * 0 - success
697 * Any other value - error
703 struct lpfc_vport *vport = phba->pport; in lpfc_hba_init_link_fc_topology()
708 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_hba_init_link_fc_topology()
710 phba->link_state = LPFC_HBA_ERROR; in lpfc_hba_init_link_fc_topology()
711 return -ENOMEM; in lpfc_hba_init_link_fc_topology()
713 mb = &pmb->u.mb; in lpfc_hba_init_link_fc_topology()
714 pmb->vport = vport; in lpfc_hba_init_link_fc_topology()
716 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || in lpfc_hba_init_link_fc_topology()
717 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && in lpfc_hba_init_link_fc_topology()
718 !(phba->lmt & LMT_1Gb)) || in lpfc_hba_init_link_fc_topology()
719 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && in lpfc_hba_init_link_fc_topology()
720 !(phba->lmt & LMT_2Gb)) || in lpfc_hba_init_link_fc_topology()
721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && in lpfc_hba_init_link_fc_topology()
722 !(phba->lmt & LMT_4Gb)) || in lpfc_hba_init_link_fc_topology()
723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && in lpfc_hba_init_link_fc_topology()
724 !(phba->lmt & LMT_8Gb)) || in lpfc_hba_init_link_fc_topology()
725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && in lpfc_hba_init_link_fc_topology()
726 !(phba->lmt & LMT_10Gb)) || in lpfc_hba_init_link_fc_topology()
727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && in lpfc_hba_init_link_fc_topology()
728 !(phba->lmt & LMT_16Gb)) || in lpfc_hba_init_link_fc_topology()
729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && in lpfc_hba_init_link_fc_topology()
730 !(phba->lmt & LMT_32Gb)) || in lpfc_hba_init_link_fc_topology()
731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && in lpfc_hba_init_link_fc_topology()
732 !(phba->lmt & LMT_64Gb))) { in lpfc_hba_init_link_fc_topology()
737 phba->cfg_link_speed); in lpfc_hba_init_link_fc_topology()
738 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; in lpfc_hba_init_link_fc_topology()
740 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); in lpfc_hba_init_link_fc_topology()
741 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; in lpfc_hba_init_link_fc_topology()
742 if (phba->sli_rev < LPFC_SLI_REV4) in lpfc_hba_init_link_fc_topology()
749 mb->mbxCommand, mb->mbxStatus); in lpfc_hba_init_link_fc_topology()
750 if (phba->sli_rev <= LPFC_SLI_REV3) { in lpfc_hba_init_link_fc_topology()
752 writel(0, phba->HCregaddr); in lpfc_hba_init_link_fc_topology()
753 readl(phba->HCregaddr); /* flush */ in lpfc_hba_init_link_fc_topology()
755 writel(0xffffffff, phba->HAregaddr); in lpfc_hba_init_link_fc_topology()
756 readl(phba->HAregaddr); /* flush */ in lpfc_hba_init_link_fc_topology()
758 phba->link_state = LPFC_HBA_ERROR; in lpfc_hba_init_link_fc_topology()
760 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_hba_init_link_fc_topology()
761 return -EIO; in lpfc_hba_init_link_fc_topology()
763 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; in lpfc_hba_init_link_fc_topology()
765 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_hba_init_link_fc_topology()
771 * lpfc_hba_down_link - this routine downs the FC link
773 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
780 * 0 - success
781 * Any other value - error
789 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_hba_down_link()
791 phba->link_state = LPFC_HBA_ERROR; in lpfc_hba_down_link()
792 return -ENOMEM; in lpfc_hba_down_link()
798 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; in lpfc_hba_down_link()
805 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_hba_down_link()
806 return -EIO; in lpfc_hba_down_link()
809 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_hba_down_link()
815 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
822 * 0 - success.
823 * Any other value - error.
831 if (phba->sli_rev <= LPFC_SLI_REV3) { in lpfc_hba_down_prep()
833 writel(0, phba->HCregaddr); in lpfc_hba_down_prep()
834 readl(phba->HCregaddr); /* flush */ in lpfc_hba_down_prep()
837 if (phba->pport->load_flag & FC_UNLOADING) in lpfc_hba_down_prep()
838 lpfc_cleanup_discovery_resources(phba->pport); in lpfc_hba_down_prep()
842 for (i = 0; i <= phba->max_vports && in lpfc_hba_down_prep()
851 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
870 spin_lock_irq(&phba->hbalock); in lpfc_sli4_free_sp_events()
871 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; in lpfc_sli4_free_sp_events()
872 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_free_sp_events()
874 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { in lpfc_sli4_free_sp_events()
876 spin_lock_irq(&phba->hbalock); in lpfc_sli4_free_sp_events()
877 list_remove_head(&phba->sli4_hba.sp_queue_event, in lpfc_sli4_free_sp_events()
879 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_free_sp_events()
881 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { in lpfc_sli4_free_sp_events()
891 lpfc_in_buf_free(phba, &dmabuf->dbuf); in lpfc_sli4_free_sp_events()
897 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
910 struct lpfc_sli *psli = &phba->sli; in lpfc_hba_free_post_buf()
916 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) in lpfc_hba_free_post_buf()
920 pring = &psli->sli3_ring[LPFC_ELS_RING]; in lpfc_hba_free_post_buf()
921 spin_lock_irq(&phba->hbalock); in lpfc_hba_free_post_buf()
922 list_splice_init(&pring->postbufq, &buflist); in lpfc_hba_free_post_buf()
923 spin_unlock_irq(&phba->hbalock); in lpfc_hba_free_post_buf()
927 list_del(&mp->list); in lpfc_hba_free_post_buf()
929 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_hba_free_post_buf()
933 spin_lock_irq(&phba->hbalock); in lpfc_hba_free_post_buf()
934 pring->postbufq_cnt -= count; in lpfc_hba_free_post_buf()
935 spin_unlock_irq(&phba->hbalock); in lpfc_hba_free_post_buf()
940 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
952 struct lpfc_sli *psli = &phba->sli; in lpfc_hba_clean_txcmplq()
959 if (phba->sli_rev != LPFC_SLI_REV4) { in lpfc_hba_clean_txcmplq()
960 for (i = 0; i < psli->num_rings; i++) { in lpfc_hba_clean_txcmplq()
961 pring = &psli->sli3_ring[i]; in lpfc_hba_clean_txcmplq()
962 spin_lock_irq(&phba->hbalock); in lpfc_hba_clean_txcmplq()
967 list_splice_init(&pring->txcmplq, &completions); in lpfc_hba_clean_txcmplq()
968 pring->txcmplq_cnt = 0; in lpfc_hba_clean_txcmplq()
969 spin_unlock_irq(&phba->hbalock); in lpfc_hba_clean_txcmplq()
978 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { in lpfc_hba_clean_txcmplq()
979 pring = qp->pring; in lpfc_hba_clean_txcmplq()
982 spin_lock_irq(&pring->ring_lock); in lpfc_hba_clean_txcmplq()
984 &pring->txcmplq, list) in lpfc_hba_clean_txcmplq()
985 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; in lpfc_hba_clean_txcmplq()
986 list_splice_init(&pring->txcmplq, &completions); in lpfc_hba_clean_txcmplq()
987 pring->txcmplq_cnt = 0; in lpfc_hba_clean_txcmplq()
988 spin_unlock_irq(&pring->ring_lock); in lpfc_hba_clean_txcmplq()
997 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1004 * 0 - success.
1005 * Any other value - error.
1016 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1023 * 0 - success.
1024 * Any other value - error.
1052 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_hba_down_post_s4()
1054 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) in lpfc_hba_down_post_s4()
1055 sglq_entry->state = SGL_FREED; in lpfc_hba_down_post_s4()
1057 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, in lpfc_hba_down_post_s4()
1058 &phba->sli4_hba.lpfc_els_sgl_list); in lpfc_hba_down_post_s4()
1061 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_hba_down_post_s4()
1066 spin_lock_irq(&phba->hbalock); in lpfc_hba_down_post_s4()
1068 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_hba_down_post_s4()
1069 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_hba_down_post_s4()
1071 spin_lock(&qp->abts_io_buf_list_lock); in lpfc_hba_down_post_s4()
1072 list_splice_init(&qp->lpfc_abts_io_buf_list, in lpfc_hba_down_post_s4()
1076 psb->pCmd = NULL; in lpfc_hba_down_post_s4()
1077 psb->status = IOSTAT_SUCCESS; in lpfc_hba_down_post_s4()
1080 spin_lock(&qp->io_buf_list_put_lock); in lpfc_hba_down_post_s4()
1081 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); in lpfc_hba_down_post_s4()
1082 qp->put_io_bufs += qp->abts_scsi_io_bufs; in lpfc_hba_down_post_s4()
1083 qp->put_io_bufs += qp->abts_nvme_io_bufs; in lpfc_hba_down_post_s4()
1084 qp->abts_scsi_io_bufs = 0; in lpfc_hba_down_post_s4()
1085 qp->abts_nvme_io_bufs = 0; in lpfc_hba_down_post_s4()
1086 spin_unlock(&qp->io_buf_list_put_lock); in lpfc_hba_down_post_s4()
1087 spin_unlock(&qp->abts_io_buf_list_lock); in lpfc_hba_down_post_s4()
1089 spin_unlock_irq(&phba->hbalock); in lpfc_hba_down_post_s4()
1091 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_hba_down_post_s4()
1092 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_hba_down_post_s4()
1093 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, in lpfc_hba_down_post_s4()
1095 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_hba_down_post_s4()
1097 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); in lpfc_hba_down_post_s4()
1098 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); in lpfc_hba_down_post_s4()
1107 * lpfc_hba_down_post - Wrapper func for hba down post routine
1114 * 0 - success.
1115 * Any other value - error.
1120 return (*phba->lpfc_hba_down_post)(phba); in lpfc_hba_down_post()
1124 * lpfc_hb_timeout - The HBA-timer timeout handler
1127 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1129 * work-port-events bitmap and the worker thread is notified. This timeout
1145 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); in lpfc_hb_timeout()
1146 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; in lpfc_hb_timeout()
1148 phba->pport->work_port_events |= WORKER_HB_TMO; in lpfc_hb_timeout()
1149 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); in lpfc_hb_timeout()
1158 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1161 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1163 * work-port-events bitmap and the worker thread is notified. This timeout
1176 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); in lpfc_rrq_timeout()
1177 if (!(phba->pport->load_flag & FC_UNLOADING)) in lpfc_rrq_timeout()
1178 phba->hba_flag |= HBA_RRQ_ACTIVE; in lpfc_rrq_timeout()
1180 phba->hba_flag &= ~HBA_RRQ_ACTIVE; in lpfc_rrq_timeout()
1181 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); in lpfc_rrq_timeout()
1183 if (!(phba->pport->load_flag & FC_UNLOADING)) in lpfc_rrq_timeout()
1188 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1192 * This is the callback function to the lpfc heart-beat mailbox command.
1193 * If configured, the lpfc driver issues the heart-beat mailbox command to
1195 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1197 * heart-beat outstanding state. Once the mailbox command comes back and
1198 * no error conditions detected, the heart-beat mailbox command timer is
1199 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1200 * state is cleared for the next heart-beat. If the timer expired with the
1201 * heart-beat outstanding state set, the driver will put the HBA offline.
1208 spin_lock_irqsave(&phba->hbalock, drvr_flag); in lpfc_hb_mbox_cmpl()
1209 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); in lpfc_hb_mbox_cmpl()
1210 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); in lpfc_hb_mbox_cmpl()
1212 /* Check and reset heart-beat timer if necessary */ in lpfc_hb_mbox_cmpl()
1213 mempool_free(pmboxq, phba->mbox_mem_pool); in lpfc_hb_mbox_cmpl()
1214 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && in lpfc_hb_mbox_cmpl()
1215 !(phba->link_state == LPFC_HBA_ERROR) && in lpfc_hb_mbox_cmpl()
1216 !(phba->pport->load_flag & FC_UNLOADING)) in lpfc_hb_mbox_cmpl()
1217 mod_timer(&phba->hb_tmofunc, in lpfc_hb_mbox_cmpl()
1224 * lpfc_idle_stat_delay_work - idle_stat tracking
1226 * This routine tracks per-cq idle_stat and determines polling decisions.
1243 if (phba->pport->load_flag & FC_UNLOADING) in lpfc_idle_stat_delay_work()
1246 if (phba->link_state == LPFC_HBA_ERROR || in lpfc_idle_stat_delay_work()
1247 phba->pport->fc_flag & FC_OFFLINE_MODE || in lpfc_idle_stat_delay_work()
1248 phba->cmf_active_mode != LPFC_CFG_OFF) in lpfc_idle_stat_delay_work()
1252 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; in lpfc_idle_stat_delay_work()
1253 cq = hdwq->io_cq; in lpfc_idle_stat_delay_work()
1255 /* Skip if we've already handled this cq's primary CPU */ in lpfc_idle_stat_delay_work()
1256 if (cq->chann != i) in lpfc_idle_stat_delay_work()
1259 idle_stat = &phba->sli4_hba.idle_stat[i]; in lpfc_idle_stat_delay_work()
1265 * percentage of 100 - the sum of the other consumption times. in lpfc_idle_stat_delay_work()
1268 diff_idle = wall_idle - idle_stat->prev_idle; in lpfc_idle_stat_delay_work()
1269 diff_wall = wall - idle_stat->prev_wall; in lpfc_idle_stat_delay_work()
1274 busy_time = diff_wall - diff_idle; in lpfc_idle_stat_delay_work()
1277 idle_percent = 100 - idle_percent; in lpfc_idle_stat_delay_work()
1280 cq->poll_mode = LPFC_QUEUE_WORK; in lpfc_idle_stat_delay_work()
1282 cq->poll_mode = LPFC_IRQ_POLL; in lpfc_idle_stat_delay_work()
1284 idle_stat->prev_idle = wall_idle; in lpfc_idle_stat_delay_work()
1285 idle_stat->prev_wall = wall; in lpfc_idle_stat_delay_work()
1289 schedule_delayed_work(&phba->idle_stat_delay_work, in lpfc_idle_stat_delay_work()
1304 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) in lpfc_hb_eq_delay_work()
1307 if (phba->link_state == LPFC_HBA_ERROR || in lpfc_hb_eq_delay_work()
1308 phba->pport->fc_flag & FC_OFFLINE_MODE) in lpfc_hb_eq_delay_work()
1311 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), in lpfc_hb_eq_delay_work()
1316 for (i = 0; i < phba->cfg_irq_chann; i++) { in lpfc_hb_eq_delay_work()
1318 eq = phba->sli4_hba.hba_eq_hdl[i].eq; in lpfc_hb_eq_delay_work()
1321 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { in lpfc_hb_eq_delay_work()
1322 eq->q_flag &= ~HBA_EQ_DELAY_CHK; in lpfc_hb_eq_delay_work()
1323 ena_delay[eq->last_cpu] = 1; in lpfc_hb_eq_delay_work()
1328 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); in lpfc_hb_eq_delay_work()
1330 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; in lpfc_hb_eq_delay_work()
1337 eqi->icnt = 0; in lpfc_hb_eq_delay_work()
1339 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { in lpfc_hb_eq_delay_work()
1340 if (unlikely(eq->last_cpu != i)) { in lpfc_hb_eq_delay_work()
1341 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, in lpfc_hb_eq_delay_work()
1342 eq->last_cpu); in lpfc_hb_eq_delay_work()
1343 list_move_tail(&eq->cpu_list, &eqi_new->list); in lpfc_hb_eq_delay_work()
1346 if (usdelay != eq->q_mode) in lpfc_hb_eq_delay_work()
1347 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, in lpfc_hb_eq_delay_work()
1355 queue_delayed_work(phba->wq, &phba->eq_delay_work, in lpfc_hb_eq_delay_work()
1360 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1371 hwq_count = phba->cfg_hdw_queue; in lpfc_hb_mxp_handler()
1387 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1401 if (phba->hba_flag & HBA_HBEAT_INP) in lpfc_issue_hb_mbox()
1404 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_issue_hb_mbox()
1406 return -ENOMEM; in lpfc_issue_hb_mbox()
1409 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; in lpfc_issue_hb_mbox()
1410 pmboxq->vport = phba->pport; in lpfc_issue_hb_mbox()
1414 mempool_free(pmboxq, phba->mbox_mem_pool); in lpfc_issue_hb_mbox()
1415 return -ENXIO; in lpfc_issue_hb_mbox()
1417 phba->hba_flag |= HBA_HBEAT_INP; in lpfc_issue_hb_mbox()
1423 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1435 if (phba->cfg_enable_hba_heartbeat) in lpfc_issue_hb_tmo()
1437 phba->hba_flag |= HBA_HBEAT_TMO; in lpfc_issue_hb_tmo()
1441 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1444 * This is the actual HBA-timer timeout handler to be invoked by the worker
1445 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1448 * or by processing slow-ring or fast-ring events within the HBA-timer
1450 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1451 * is configured and there is no heart-beat mailbox command outstanding, a
1452 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1453 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1463 struct lpfc_sli *psli = &phba->sli; in lpfc_hb_timeout_handler()
1466 if (phba->cfg_xri_rebalancing) { in lpfc_hb_timeout_handler()
1467 /* Multi-XRI pools handler */ in lpfc_hb_timeout_handler()
1473 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_hb_timeout_handler()
1479 if ((phba->link_state == LPFC_HBA_ERROR) || in lpfc_hb_timeout_handler()
1480 (phba->pport->load_flag & FC_UNLOADING) || in lpfc_hb_timeout_handler()
1481 (phba->pport->fc_flag & FC_OFFLINE_MODE)) in lpfc_hb_timeout_handler()
1484 if (phba->elsbuf_cnt && in lpfc_hb_timeout_handler()
1485 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { in lpfc_hb_timeout_handler()
1486 spin_lock_irq(&phba->hbalock); in lpfc_hb_timeout_handler()
1487 list_splice_init(&phba->elsbuf, &completions); in lpfc_hb_timeout_handler()
1488 phba->elsbuf_cnt = 0; in lpfc_hb_timeout_handler()
1489 phba->elsbuf_prev_cnt = 0; in lpfc_hb_timeout_handler()
1490 spin_unlock_irq(&phba->hbalock); in lpfc_hb_timeout_handler()
1495 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); in lpfc_hb_timeout_handler()
1499 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; in lpfc_hb_timeout_handler()
1502 if (phba->cfg_enable_hba_heartbeat) { in lpfc_hb_timeout_handler()
1504 spin_lock_irq(&phba->pport->work_port_lock); in lpfc_hb_timeout_handler()
1505 if (time_after(phba->last_completion_time + in lpfc_hb_timeout_handler()
1508 spin_unlock_irq(&phba->pport->work_port_lock); in lpfc_hb_timeout_handler()
1509 if (phba->hba_flag & HBA_HBEAT_INP) in lpfc_hb_timeout_handler()
1515 spin_unlock_irq(&phba->pport->work_port_lock); in lpfc_hb_timeout_handler()
1518 if (phba->hba_flag & HBA_HBEAT_INP) { in lpfc_hb_timeout_handler()
1528 - phba->last_completion_time)); in lpfc_hb_timeout_handler()
1531 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && in lpfc_hb_timeout_handler()
1532 (list_empty(&psli->mboxq))) { in lpfc_hb_timeout_handler()
1539 phba->skipped_hb = 0; in lpfc_hb_timeout_handler()
1540 } else if (time_before_eq(phba->last_completion_time, in lpfc_hb_timeout_handler()
1541 phba->skipped_hb)) { in lpfc_hb_timeout_handler()
1546 - phba->last_completion_time)); in lpfc_hb_timeout_handler()
1548 phba->skipped_hb = jiffies; in lpfc_hb_timeout_handler()
1555 if (phba->hba_flag & HBA_HBEAT_TMO) { in lpfc_hb_timeout_handler()
1566 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo)); in lpfc_hb_timeout_handler()
1570 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1579 struct lpfc_sli *psli = &phba->sli; in lpfc_offline_eratt()
1581 spin_lock_irq(&phba->hbalock); in lpfc_offline_eratt()
1582 psli->sli_flag &= ~LPFC_SLI_ACTIVE; in lpfc_offline_eratt()
1583 spin_unlock_irq(&phba->hbalock); in lpfc_offline_eratt()
1588 spin_lock_irq(&phba->hbalock); in lpfc_offline_eratt()
1590 spin_unlock_irq(&phba->hbalock); in lpfc_offline_eratt()
1594 phba->link_state = LPFC_HBA_ERROR; in lpfc_offline_eratt()
1599 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1608 spin_lock_irq(&phba->hbalock); in lpfc_sli4_offline_eratt()
1609 phba->link_state = LPFC_HBA_ERROR; in lpfc_sli4_offline_eratt()
1610 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_offline_eratt()
1620 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1631 uint32_t old_host_status = phba->work_hs; in lpfc_handle_deferred_eratt()
1632 struct lpfc_sli *psli = &phba->sli; in lpfc_handle_deferred_eratt()
1637 if (pci_channel_offline(phba->pcidev)) { in lpfc_handle_deferred_eratt()
1638 spin_lock_irq(&phba->hbalock); in lpfc_handle_deferred_eratt()
1639 phba->hba_flag &= ~DEFER_ERATT; in lpfc_handle_deferred_eratt()
1640 spin_unlock_irq(&phba->hbalock); in lpfc_handle_deferred_eratt()
1647 phba->work_hs, phba->work_status[0], in lpfc_handle_deferred_eratt()
1648 phba->work_status[1]); in lpfc_handle_deferred_eratt()
1650 spin_lock_irq(&phba->hbalock); in lpfc_handle_deferred_eratt()
1651 psli->sli_flag &= ~LPFC_SLI_ACTIVE; in lpfc_handle_deferred_eratt()
1652 spin_unlock_irq(&phba->hbalock); in lpfc_handle_deferred_eratt()
1658 * SCSI layer retry it after re-establishing link. in lpfc_handle_deferred_eratt()
1670 while (phba->work_hs & HS_FFER1) { in lpfc_handle_deferred_eratt()
1672 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { in lpfc_handle_deferred_eratt()
1673 phba->work_hs = UNPLUG_ERR ; in lpfc_handle_deferred_eratt()
1677 if (phba->pport->load_flag & FC_UNLOADING) { in lpfc_handle_deferred_eratt()
1678 phba->work_hs = 0; in lpfc_handle_deferred_eratt()
1688 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) in lpfc_handle_deferred_eratt()
1689 phba->work_hs = old_host_status & ~HS_FFER1; in lpfc_handle_deferred_eratt()
1691 spin_lock_irq(&phba->hbalock); in lpfc_handle_deferred_eratt()
1692 phba->hba_flag &= ~DEFER_ERATT; in lpfc_handle_deferred_eratt()
1693 spin_unlock_irq(&phba->hbalock); in lpfc_handle_deferred_eratt()
1694 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); in lpfc_handle_deferred_eratt()
1695 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); in lpfc_handle_deferred_eratt()
1706 shost = lpfc_shost_from_vport(phba->pport); in lpfc_board_errevt_to_mgmt()
1714 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1719 * 1 - HBA error attention interrupt
1720 * 2 - DMA ring index out of range
1721 * 3 - Mailbox command came back as unknown
1726 struct lpfc_vport *vport = phba->pport; in lpfc_handle_eratt_s3()
1727 struct lpfc_sli *psli = &phba->sli; in lpfc_handle_eratt_s3()
1736 if (pci_channel_offline(phba->pcidev)) { in lpfc_handle_eratt_s3()
1737 spin_lock_irq(&phba->hbalock); in lpfc_handle_eratt_s3()
1738 phba->hba_flag &= ~DEFER_ERATT; in lpfc_handle_eratt_s3()
1739 spin_unlock_irq(&phba->hbalock); in lpfc_handle_eratt_s3()
1744 if (!phba->cfg_enable_hba_reset) in lpfc_handle_eratt_s3()
1750 if (phba->hba_flag & DEFER_ERATT) in lpfc_handle_eratt_s3()
1753 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { in lpfc_handle_eratt_s3()
1754 if (phba->work_hs & HS_FFER6) in lpfc_handle_eratt_s3()
1755 /* Re-establishing Link */ in lpfc_handle_eratt_s3()
1757 "1301 Re-establishing Link " in lpfc_handle_eratt_s3()
1759 phba->work_hs, phba->work_status[0], in lpfc_handle_eratt_s3()
1760 phba->work_status[1]); in lpfc_handle_eratt_s3()
1761 if (phba->work_hs & HS_FFER8) in lpfc_handle_eratt_s3()
1766 phba->work_hs, phba->work_status[0], in lpfc_handle_eratt_s3()
1767 phba->work_status[1]); in lpfc_handle_eratt_s3()
1769 spin_lock_irq(&phba->hbalock); in lpfc_handle_eratt_s3()
1770 psli->sli_flag &= ~LPFC_SLI_ACTIVE; in lpfc_handle_eratt_s3()
1771 spin_unlock_irq(&phba->hbalock); in lpfc_handle_eratt_s3()
1777 * retry it after re-establishing link. in lpfc_handle_eratt_s3()
1793 } else if (phba->work_hs & HS_CRIT_TEMP) { in lpfc_handle_eratt_s3()
1794 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); in lpfc_handle_eratt_s3()
1803 temperature, phba->work_hs, in lpfc_handle_eratt_s3()
1804 phba->work_status[0], phba->work_status[1]); in lpfc_handle_eratt_s3()
1806 shost = lpfc_shost_from_vport(phba->pport); in lpfc_handle_eratt_s3()
1813 spin_lock_irq(&phba->hbalock); in lpfc_handle_eratt_s3()
1814 phba->over_temp_state = HBA_OVER_TEMP; in lpfc_handle_eratt_s3()
1815 spin_unlock_irq(&phba->hbalock); in lpfc_handle_eratt_s3()
1826 phba->work_hs, in lpfc_handle_eratt_s3()
1827 phba->work_status[0], phba->work_status[1]); in lpfc_handle_eratt_s3()
1841 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1859 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= in lpfc_sli4_port_sta_fn_reset()
1881 spin_lock_irq(&phba->hbalock); in lpfc_sli4_port_sta_fn_reset()
1882 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; in lpfc_sli4_port_sta_fn_reset()
1883 if (phba->sli.mbox_active) { in lpfc_sli4_port_sta_fn_reset()
1884 mboxq = phba->sli.mbox_active; in lpfc_sli4_port_sta_fn_reset()
1885 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; in lpfc_sli4_port_sta_fn_reset()
1887 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; in lpfc_sli4_port_sta_fn_reset()
1888 phba->sli.mbox_active = NULL; in lpfc_sli4_port_sta_fn_reset()
1890 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_port_sta_fn_reset()
1905 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); in lpfc_sli4_port_sta_fn_reset()
1909 return -EIO; in lpfc_sli4_port_sta_fn_reset()
1911 phba->intr_mode = intr_mode; in lpfc_sli4_port_sta_fn_reset()
1920 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1929 struct lpfc_vport *vport = phba->pport; in lpfc_handle_eratt_s4()
1945 if (pci_channel_offline(phba->pcidev)) { in lpfc_handle_eratt_s4()
1953 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); in lpfc_handle_eratt_s4()
1957 phba->sli4_hba.u.if_type0.UERRLOregaddr, in lpfc_handle_eratt_s4()
1960 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, in lpfc_handle_eratt_s4()
1963 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) in lpfc_handle_eratt_s4()
1965 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { in lpfc_handle_eratt_s4()
1972 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { in lpfc_handle_eratt_s4()
1973 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, in lpfc_handle_eratt_s4()
1995 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, in lpfc_handle_eratt_s4()
2014 phba->link_state = LPFC_HBA_ERROR; in lpfc_handle_eratt_s4()
2020 phba->sli4_hba.u.if_type2.STATUSregaddr, in lpfc_handle_eratt_s4()
2023 if (pci_rd_rc1 == -EIO) { in lpfc_handle_eratt_s4()
2026 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); in lpfc_handle_eratt_s4()
2030 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); in lpfc_handle_eratt_s4()
2031 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); in lpfc_handle_eratt_s4()
2038 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; in lpfc_handle_eratt_s4()
2043 shost = lpfc_shost_from_vport(phba->pport); in lpfc_handle_eratt_s4()
2050 spin_lock_irq(&phba->hbalock); in lpfc_handle_eratt_s4()
2051 phba->over_temp_state = HBA_OVER_TEMP; in lpfc_handle_eratt_s4()
2052 spin_unlock_irq(&phba->hbalock); in lpfc_handle_eratt_s4()
2072 if (!phba->cfg_enable_hba_reset) in lpfc_handle_eratt_s4()
2089 phba->link_state = LPFC_HBA_ERROR; in lpfc_handle_eratt_s4()
2108 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2115 * 0 - success.
2116 * Any other value - error.
2121 (*phba->lpfc_handle_eratt)(phba); in lpfc_handle_eratt()
2125 * lpfc_handle_latt - The HBA link event handler
2134 struct lpfc_vport *vport = phba->pport; in lpfc_handle_latt()
2135 struct lpfc_sli *psli = &phba->sli; in lpfc_handle_latt()
2141 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_handle_latt()
2153 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); in lpfc_handle_latt()
2154 if (!mp->virt) { in lpfc_handle_latt()
2162 psli->slistat.link_event++; in lpfc_handle_latt()
2164 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; in lpfc_handle_latt()
2165 pmb->vport = vport; in lpfc_handle_latt()
2167 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; in lpfc_handle_latt()
2175 spin_lock_irq(&phba->hbalock); in lpfc_handle_latt()
2176 writel(HA_LATT, phba->HAregaddr); in lpfc_handle_latt()
2177 readl(phba->HAregaddr); /* flush */ in lpfc_handle_latt()
2178 spin_unlock_irq(&phba->hbalock); in lpfc_handle_latt()
2183 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; in lpfc_handle_latt()
2184 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_handle_latt()
2188 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_handle_latt()
2191 spin_lock_irq(&phba->hbalock); in lpfc_handle_latt()
2192 psli->sli_flag |= LPFC_PROCESS_LA; in lpfc_handle_latt()
2193 control = readl(phba->HCregaddr); in lpfc_handle_latt()
2195 writel(control, phba->HCregaddr); in lpfc_handle_latt()
2196 readl(phba->HCregaddr); /* flush */ in lpfc_handle_latt()
2199 writel(HA_LATT, phba->HAregaddr); in lpfc_handle_latt()
2200 readl(phba->HAregaddr); /* flush */ in lpfc_handle_latt()
2201 spin_unlock_irq(&phba->hbalock); in lpfc_handle_latt()
2203 phba->link_state = LPFC_HBA_ERROR; in lpfc_handle_latt()
2212 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2222 * 0 - pointer to the VPD passed in is NULL
2223 * 1 - success
2242 while (!finished && (index < (len - 4))) { in lpfc_parse_vpd()
2261 if (Length > len - index) in lpfc_parse_vpd()
2262 Length = len - index; in lpfc_parse_vpd()
2270 Length -= (3+i); in lpfc_parse_vpd()
2271 while(i--) { in lpfc_parse_vpd()
2272 phba->SerialNumber[j++] = vpd[index++]; in lpfc_parse_vpd()
2276 phba->SerialNumber[j] = 0; in lpfc_parse_vpd()
2280 phba->vpd_flag |= VPD_MODEL_DESC; in lpfc_parse_vpd()
2285 Length -= (3+i); in lpfc_parse_vpd()
2286 while(i--) { in lpfc_parse_vpd()
2287 phba->ModelDesc[j++] = vpd[index++]; in lpfc_parse_vpd()
2291 phba->ModelDesc[j] = 0; in lpfc_parse_vpd()
2295 phba->vpd_flag |= VPD_MODEL_NAME; in lpfc_parse_vpd()
2300 Length -= (3+i); in lpfc_parse_vpd()
2301 while(i--) { in lpfc_parse_vpd()
2302 phba->ModelName[j++] = vpd[index++]; in lpfc_parse_vpd()
2306 phba->ModelName[j] = 0; in lpfc_parse_vpd()
2310 phba->vpd_flag |= VPD_PROGRAM_TYPE; in lpfc_parse_vpd()
2315 Length -= (3+i); in lpfc_parse_vpd()
2316 while(i--) { in lpfc_parse_vpd()
2317 phba->ProgramType[j++] = vpd[index++]; in lpfc_parse_vpd()
2321 phba->ProgramType[j] = 0; in lpfc_parse_vpd()
2325 phba->vpd_flag |= VPD_PORT; in lpfc_parse_vpd()
2330 Length -= (3+i); in lpfc_parse_vpd()
2331 while(i--) { in lpfc_parse_vpd()
2332 if ((phba->sli_rev == LPFC_SLI_REV4) && in lpfc_parse_vpd()
2333 (phba->sli4_hba.pport_name_sta == in lpfc_parse_vpd()
2338 phba->Port[j++] = vpd[index++]; in lpfc_parse_vpd()
2342 if ((phba->sli_rev != LPFC_SLI_REV4) || in lpfc_parse_vpd()
2343 (phba->sli4_hba.pport_name_sta == in lpfc_parse_vpd()
2345 phba->Port[j] = 0; in lpfc_parse_vpd()
2353 Length -= (3 + i); in lpfc_parse_vpd()
2371 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2386 uint16_t dev_id = phba->pcidev->device; in lpfc_get_hba_model_desc()
2400 if (phba->lmt & LMT_64Gb) in lpfc_get_hba_model_desc()
2402 else if (phba->lmt & LMT_32Gb) in lpfc_get_hba_model_desc()
2404 else if (phba->lmt & LMT_16Gb) in lpfc_get_hba_model_desc()
2406 else if (phba->lmt & LMT_10Gb) in lpfc_get_hba_model_desc()
2408 else if (phba->lmt & LMT_8Gb) in lpfc_get_hba_model_desc()
2410 else if (phba->lmt & LMT_4Gb) in lpfc_get_hba_model_desc()
2412 else if (phba->lmt & LMT_2Gb) in lpfc_get_hba_model_desc()
2414 else if (phba->lmt & LMT_1Gb) in lpfc_get_hba_model_desc()
2419 vp = &phba->vpd; in lpfc_get_hba_model_desc()
2427 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) in lpfc_get_hba_model_desc()
2438 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) in lpfc_get_hba_model_desc()
2449 m = (typeof(m)){"LP9802", "PCI-X", in lpfc_get_hba_model_desc()
2453 m = (typeof(m)){"LP10000", "PCI-X", in lpfc_get_hba_model_desc()
2457 m = (typeof(m)){"LPX1000", "PCI-X", in lpfc_get_hba_model_desc()
2461 m = (typeof(m)){"LP982", "PCI-X", in lpfc_get_hba_model_desc()
2465 m = (typeof(m)){"LP1050", "PCI-X", in lpfc_get_hba_model_desc()
2469 m = (typeof(m)){"LP11000", "PCI-X2", in lpfc_get_hba_model_desc()
2473 m = (typeof(m)){"LP11000-SP", "PCI-X2", in lpfc_get_hba_model_desc()
2477 m = (typeof(m)){"LP11002-SP", "PCI-X2", in lpfc_get_hba_model_desc()
2485 m = (typeof(m)){"LPe1000-SP", "PCIe", in lpfc_get_hba_model_desc()
2489 m = (typeof(m)){"LPe1002-SP", "PCIe", in lpfc_get_hba_model_desc()
2493 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; in lpfc_get_hba_model_desc()
2496 m = (typeof(m)){"LP111", "PCI-X2", in lpfc_get_hba_model_desc()
2516 m = (typeof(m)){"LP101", "PCI-X", in lpfc_get_hba_model_desc()
2520 m = (typeof(m)){"LP10000-S", "PCI", in lpfc_get_hba_model_desc()
2524 m = (typeof(m)){"LP11000-S", "PCI-X2", in lpfc_get_hba_model_desc()
2528 m = (typeof(m)){"LPe11000-S", "PCIe", in lpfc_get_hba_model_desc()
2541 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; in lpfc_get_hba_model_desc()
2544 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; in lpfc_get_hba_model_desc()
2547 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; in lpfc_get_hba_model_desc()
2563 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", in lpfc_get_hba_model_desc()
2575 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", in lpfc_get_hba_model_desc()
2628 phba->Port); in lpfc_get_hba_model_desc()
2642 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2660 cnt += pring->missbufcnt; in lpfc_post_buffer()
2667 pring->missbufcnt = cnt; in lpfc_post_buffer()
2670 icmd = &iocb->iocb; in lpfc_post_buffer()
2676 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); in lpfc_post_buffer()
2677 if (!mp1 || !mp1->virt) { in lpfc_post_buffer()
2680 pring->missbufcnt = cnt; in lpfc_post_buffer()
2684 INIT_LIST_HEAD(&mp1->list); in lpfc_post_buffer()
2689 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, in lpfc_post_buffer()
2690 &mp2->phys); in lpfc_post_buffer()
2691 if (!mp2 || !mp2->virt) { in lpfc_post_buffer()
2693 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); in lpfc_post_buffer()
2696 pring->missbufcnt = cnt; in lpfc_post_buffer()
2700 INIT_LIST_HEAD(&mp2->list); in lpfc_post_buffer()
2705 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); in lpfc_post_buffer()
2706 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); in lpfc_post_buffer()
2707 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; in lpfc_post_buffer()
2708 icmd->ulpBdeCount = 1; in lpfc_post_buffer()
2709 cnt--; in lpfc_post_buffer()
2711 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); in lpfc_post_buffer()
2712 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); in lpfc_post_buffer()
2713 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; in lpfc_post_buffer()
2714 cnt--; in lpfc_post_buffer()
2715 icmd->ulpBdeCount = 2; in lpfc_post_buffer()
2718 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; in lpfc_post_buffer()
2719 icmd->ulpLe = 1; in lpfc_post_buffer()
2721 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == in lpfc_post_buffer()
2723 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); in lpfc_post_buffer()
2727 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); in lpfc_post_buffer()
2732 pring->missbufcnt = cnt; in lpfc_post_buffer()
2739 pring->missbufcnt = 0; in lpfc_post_buffer()
2744 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2752 * 0 - success (currently always success)
2757 struct lpfc_sli *psli = &phba->sli; in lpfc_post_rcv_buf()
2760 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); in lpfc_post_rcv_buf()
2761 /* Ring 2 - FCP no buffers needed */ in lpfc_post_rcv_buf()
2766 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2769 * lpfc_sha_init - Set up initial array of hash table entries
2786 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2805 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - in lpfc_sha_iterate()
2807 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); in lpfc_sha_iterate()
2843 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2859 * lpfc_hba_init - Perform special handling for LC HBA initialization
2861 * @hbainit: pointer to an array of unsigned 32-bit integers.
2870 uint32_t *pwwnn = (uint32_t *) phba->wwnn; in lpfc_hba_init()
2880 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); in lpfc_hba_init()
2888 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2899 struct lpfc_hba *phba = vport->phba; in lpfc_cleanup()
2903 if (phba->link_state > LPFC_LINK_DOWN) in lpfc_cleanup()
2910 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { in lpfc_cleanup()
2911 if (vport->port_type != LPFC_PHYSICAL_PORT && in lpfc_cleanup()
2912 ndlp->nlp_DID == Fabric_DID) { in lpfc_cleanup()
2918 if (ndlp->nlp_DID == Fabric_Cntl_DID && in lpfc_cleanup()
2919 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { in lpfc_cleanup()
2927 if (ndlp->nlp_type & NLP_FABRIC && in lpfc_cleanup()
2928 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) in lpfc_cleanup()
2932 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD))) in lpfc_cleanup()
2941 while (!list_empty(&vport->fc_nodes)) { in lpfc_cleanup()
2947 &vport->fc_nodes, nlp_listp) { in lpfc_cleanup()
2948 lpfc_printf_vlog(ndlp->vport, KERN_ERR, in lpfc_cleanup()
2952 ndlp->nlp_DID, (void *)ndlp, in lpfc_cleanup()
2953 kref_read(&ndlp->kref), in lpfc_cleanup()
2954 ndlp->fc4_xpt_flags, in lpfc_cleanup()
2955 ndlp->nlp_flag); in lpfc_cleanup()
2967 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2977 del_timer_sync(&vport->els_tmofunc); in lpfc_stop_vport_timers()
2978 del_timer_sync(&vport->delayed_disc_tmo); in lpfc_stop_vport_timers()
2984 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2994 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; in __lpfc_sli4_stop_fcf_redisc_wait_timer()
2997 del_timer(&phba->fcf.redisc_wait); in __lpfc_sli4_stop_fcf_redisc_wait_timer()
3001 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3012 spin_lock_irq(&phba->hbalock); in lpfc_sli4_stop_fcf_redisc_wait_timer()
3013 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { in lpfc_sli4_stop_fcf_redisc_wait_timer()
3015 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_stop_fcf_redisc_wait_timer()
3020 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); in lpfc_sli4_stop_fcf_redisc_wait_timer()
3021 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_stop_fcf_redisc_wait_timer()
3025 * lpfc_cmf_stop - Stop CMF processing
3035 int cpu; in lpfc_cmf_stop() local
3039 if (!phba->sli4_hba.pc_sli4_params.cmf) in lpfc_cmf_stop()
3046 hrtimer_cancel(&phba->cmf_timer); in lpfc_cmf_stop()
3049 atomic_set(&phba->cmf_busy, 0); in lpfc_cmf_stop()
3050 for_each_present_cpu(cpu) { in lpfc_cmf_stop()
3051 cgs = per_cpu_ptr(phba->cmf_stat, cpu); in lpfc_cmf_stop()
3052 atomic64_set(&cgs->total_bytes, 0); in lpfc_cmf_stop()
3053 atomic64_set(&cgs->rcv_bytes, 0); in lpfc_cmf_stop()
3054 atomic_set(&cgs->rx_io_cnt, 0); in lpfc_cmf_stop()
3055 atomic64_set(&cgs->rx_latency, 0); in lpfc_cmf_stop()
3057 atomic_set(&phba->cmf_bw_wait, 0); in lpfc_cmf_stop()
3059 /* Resume any blocked IO - Queue unblock on workqueue */ in lpfc_cmf_stop()
3060 queue_work(phba->wq, &phba->unblock_request_work); in lpfc_cmf_stop()
3078 phba->cmf_interval_rate = LPFC_CMF_INTERVAL; in lpfc_cmf_signal_init()
3079 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba); in lpfc_cmf_signal_init()
3080 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * in lpfc_cmf_signal_init()
3081 phba->cmf_interval_rate, 1000); in lpfc_cmf_signal_init()
3082 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; in lpfc_cmf_signal_init()
3089 * lpfc_cmf_start - Start CMF processing
3099 int cpu; in lpfc_cmf_start() local
3102 if (!phba->sli4_hba.pc_sli4_params.cmf || in lpfc_cmf_start()
3103 phba->cmf_active_mode == LPFC_CFG_OFF) in lpfc_cmf_start()
3109 atomic_set(&phba->cgn_fabric_warn_cnt, 0); in lpfc_cmf_start()
3110 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); in lpfc_cmf_start()
3111 atomic_set(&phba->cgn_sync_alarm_cnt, 0); in lpfc_cmf_start()
3112 atomic_set(&phba->cgn_sync_warn_cnt, 0); in lpfc_cmf_start()
3114 atomic_set(&phba->cmf_busy, 0); in lpfc_cmf_start()
3115 for_each_present_cpu(cpu) { in lpfc_cmf_start()
3116 cgs = per_cpu_ptr(phba->cmf_stat, cpu); in lpfc_cmf_start()
3117 atomic64_set(&cgs->total_bytes, 0); in lpfc_cmf_start()
3118 atomic64_set(&cgs->rcv_bytes, 0); in lpfc_cmf_start()
3119 atomic_set(&cgs->rx_io_cnt, 0); in lpfc_cmf_start()
3120 atomic64_set(&cgs->rx_latency, 0); in lpfc_cmf_start()
3122 phba->cmf_latency.tv_sec = 0; in lpfc_cmf_start()
3123 phba->cmf_latency.tv_nsec = 0; in lpfc_cmf_start()
3130 phba->cmf_timer_cnt = 0; in lpfc_cmf_start()
3131 hrtimer_start(&phba->cmf_timer, in lpfc_cmf_start()
3135 ktime_get_real_ts64(&phba->cmf_latency); in lpfc_cmf_start()
3137 atomic_set(&phba->cmf_bw_wait, 0); in lpfc_cmf_start()
3138 atomic_set(&phba->cmf_stop_io, 0); in lpfc_cmf_start()
3142 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3151 if (phba->pport) in lpfc_stop_hba_timers()
3152 lpfc_stop_vport_timers(phba->pport); in lpfc_stop_hba_timers()
3153 cancel_delayed_work_sync(&phba->eq_delay_work); in lpfc_stop_hba_timers()
3154 cancel_delayed_work_sync(&phba->idle_stat_delay_work); in lpfc_stop_hba_timers()
3155 del_timer_sync(&phba->sli.mbox_tmo); in lpfc_stop_hba_timers()
3156 del_timer_sync(&phba->fabric_block_timer); in lpfc_stop_hba_timers()
3157 del_timer_sync(&phba->eratt_poll); in lpfc_stop_hba_timers()
3158 del_timer_sync(&phba->hb_tmofunc); in lpfc_stop_hba_timers()
3159 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_stop_hba_timers()
3160 del_timer_sync(&phba->rrq_tmr); in lpfc_stop_hba_timers()
3161 phba->hba_flag &= ~HBA_RRQ_ACTIVE; in lpfc_stop_hba_timers()
3163 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); in lpfc_stop_hba_timers()
3165 switch (phba->pci_dev_grp) { in lpfc_stop_hba_timers()
3168 del_timer_sync(&phba->fcp_poll_timer); in lpfc_stop_hba_timers()
3177 phba->pci_dev_grp); in lpfc_stop_hba_timers()
3184 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3201 spin_lock_irqsave(&phba->hbalock, iflag); in lpfc_block_mgmt_io()
3202 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; in lpfc_block_mgmt_io()
3203 spin_unlock_irqrestore(&phba->hbalock, iflag); in lpfc_block_mgmt_io()
3207 spin_lock_irqsave(&phba->hbalock, iflag); in lpfc_block_mgmt_io()
3208 if (phba->sli.mbox_active) { in lpfc_block_mgmt_io()
3209 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; in lpfc_block_mgmt_io()
3214 phba->sli.mbox_active) * 1000) + jiffies; in lpfc_block_mgmt_io()
3216 spin_unlock_irqrestore(&phba->hbalock, iflag); in lpfc_block_mgmt_io()
3219 while (phba->sli.mbox_active) { in lpfc_block_mgmt_io()
3225 "- mbox cmd %x still active\n", in lpfc_block_mgmt_io()
3226 phba->sli.sli_flag, actcmd); in lpfc_block_mgmt_io()
3233 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3247 if (phba->sli_rev != LPFC_SLI_REV4) in lpfc_sli4_node_prep()
3254 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_sli4_node_prep()
3255 if (vports[i]->load_flag & FC_UNLOADING) in lpfc_sli4_node_prep()
3259 &vports[i]->fc_nodes, in lpfc_sli4_node_prep()
3266 ndlp->nlp_rpi = rpi; in lpfc_sli4_node_prep()
3267 lpfc_printf_vlog(ndlp->vport, KERN_INFO, in lpfc_sli4_node_prep()
3271 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, in lpfc_sli4_node_prep()
3272 ndlp->nlp_flag); in lpfc_sli4_node_prep()
3279 * lpfc_create_expedite_pool - create expedite pool
3293 epd_pool = &phba->epd_pool; in lpfc_create_expedite_pool()
3294 qp = &phba->sli4_hba.hdwq[0]; in lpfc_create_expedite_pool()
3296 spin_lock_init(&epd_pool->lock); in lpfc_create_expedite_pool()
3297 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); in lpfc_create_expedite_pool()
3298 spin_lock(&epd_pool->lock); in lpfc_create_expedite_pool()
3299 INIT_LIST_HEAD(&epd_pool->list); in lpfc_create_expedite_pool()
3301 &qp->lpfc_io_buf_list_put, list) { in lpfc_create_expedite_pool()
3302 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); in lpfc_create_expedite_pool()
3303 lpfc_ncmd->expedite = true; in lpfc_create_expedite_pool()
3304 qp->put_io_bufs--; in lpfc_create_expedite_pool()
3305 epd_pool->count++; in lpfc_create_expedite_pool()
3306 if (epd_pool->count >= XRI_BATCH) in lpfc_create_expedite_pool()
3309 spin_unlock(&epd_pool->lock); in lpfc_create_expedite_pool()
3310 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); in lpfc_create_expedite_pool()
3314 * lpfc_destroy_expedite_pool - destroy expedite pool
3328 epd_pool = &phba->epd_pool; in lpfc_destroy_expedite_pool()
3329 qp = &phba->sli4_hba.hdwq[0]; in lpfc_destroy_expedite_pool()
3331 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); in lpfc_destroy_expedite_pool()
3332 spin_lock(&epd_pool->lock); in lpfc_destroy_expedite_pool()
3334 &epd_pool->list, list) { in lpfc_destroy_expedite_pool()
3335 list_move_tail(&lpfc_ncmd->list, in lpfc_destroy_expedite_pool()
3336 &qp->lpfc_io_buf_list_put); in lpfc_destroy_expedite_pool()
3337 lpfc_ncmd->flags = false; in lpfc_destroy_expedite_pool()
3338 qp->put_io_bufs++; in lpfc_destroy_expedite_pool()
3339 epd_pool->count--; in lpfc_destroy_expedite_pool()
3341 spin_unlock(&epd_pool->lock); in lpfc_destroy_expedite_pool()
3342 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); in lpfc_destroy_expedite_pool()
3346 * lpfc_create_multixri_pools - create multi-XRI pools
3368 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, in lpfc_create_multixri_pools()
3369 phba->sli4_hba.io_xri_cnt); in lpfc_create_multixri_pools()
3371 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) in lpfc_create_multixri_pools()
3374 hwq_count = phba->cfg_hdw_queue; in lpfc_create_multixri_pools()
3375 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; in lpfc_create_multixri_pools()
3385 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) in lpfc_create_multixri_pools()
3390 qp = &phba->sli4_hba.hdwq[j]; in lpfc_create_multixri_pools()
3391 kfree(qp->p_multixri_pool); in lpfc_create_multixri_pools()
3394 phba->cfg_xri_rebalancing = 0; in lpfc_create_multixri_pools()
3398 qp = &phba->sli4_hba.hdwq[i]; in lpfc_create_multixri_pools()
3399 qp->p_multixri_pool = multixri_pool; in lpfc_create_multixri_pools()
3401 multixri_pool->xri_limit = count_per_hwq; in lpfc_create_multixri_pools()
3402 multixri_pool->rrb_next_hwqid = i; in lpfc_create_multixri_pools()
3405 pbl_pool = &multixri_pool->pbl_pool; in lpfc_create_multixri_pools()
3406 spin_lock_init(&pbl_pool->lock); in lpfc_create_multixri_pools()
3407 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); in lpfc_create_multixri_pools()
3408 spin_lock(&pbl_pool->lock); in lpfc_create_multixri_pools()
3409 INIT_LIST_HEAD(&pbl_pool->list); in lpfc_create_multixri_pools()
3411 &qp->lpfc_io_buf_list_put, list) { in lpfc_create_multixri_pools()
3412 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); in lpfc_create_multixri_pools()
3413 qp->put_io_bufs--; in lpfc_create_multixri_pools()
3414 pbl_pool->count++; in lpfc_create_multixri_pools()
3418 pbl_pool->count, i); in lpfc_create_multixri_pools()
3419 spin_unlock(&pbl_pool->lock); in lpfc_create_multixri_pools()
3420 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); in lpfc_create_multixri_pools()
3423 pvt_pool = &multixri_pool->pvt_pool; in lpfc_create_multixri_pools()
3424 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; in lpfc_create_multixri_pools()
3425 pvt_pool->low_watermark = XRI_BATCH; in lpfc_create_multixri_pools()
3426 spin_lock_init(&pvt_pool->lock); in lpfc_create_multixri_pools()
3427 spin_lock_irqsave(&pvt_pool->lock, iflag); in lpfc_create_multixri_pools()
3428 INIT_LIST_HEAD(&pvt_pool->list); in lpfc_create_multixri_pools()
3429 pvt_pool->count = 0; in lpfc_create_multixri_pools()
3430 spin_unlock_irqrestore(&pvt_pool->lock, iflag); in lpfc_create_multixri_pools()
3435 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3452 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) in lpfc_destroy_multixri_pools()
3455 if (!(phba->pport->load_flag & FC_UNLOADING)) in lpfc_destroy_multixri_pools()
3458 hwq_count = phba->cfg_hdw_queue; in lpfc_destroy_multixri_pools()
3461 qp = &phba->sli4_hba.hdwq[i]; in lpfc_destroy_multixri_pools()
3462 multixri_pool = qp->p_multixri_pool; in lpfc_destroy_multixri_pools()
3466 qp->p_multixri_pool = NULL; in lpfc_destroy_multixri_pools()
3468 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); in lpfc_destroy_multixri_pools()
3471 pbl_pool = &multixri_pool->pbl_pool; in lpfc_destroy_multixri_pools()
3472 spin_lock(&pbl_pool->lock); in lpfc_destroy_multixri_pools()
3476 pbl_pool->count, i); in lpfc_destroy_multixri_pools()
3479 &pbl_pool->list, list) { in lpfc_destroy_multixri_pools()
3480 list_move_tail(&lpfc_ncmd->list, in lpfc_destroy_multixri_pools()
3481 &qp->lpfc_io_buf_list_put); in lpfc_destroy_multixri_pools()
3482 qp->put_io_bufs++; in lpfc_destroy_multixri_pools()
3483 pbl_pool->count--; in lpfc_destroy_multixri_pools()
3486 INIT_LIST_HEAD(&pbl_pool->list); in lpfc_destroy_multixri_pools()
3487 pbl_pool->count = 0; in lpfc_destroy_multixri_pools()
3489 spin_unlock(&pbl_pool->lock); in lpfc_destroy_multixri_pools()
3492 pvt_pool = &multixri_pool->pvt_pool; in lpfc_destroy_multixri_pools()
3493 spin_lock(&pvt_pool->lock); in lpfc_destroy_multixri_pools()
3497 pvt_pool->count, i); in lpfc_destroy_multixri_pools()
3500 &pvt_pool->list, list) { in lpfc_destroy_multixri_pools()
3501 list_move_tail(&lpfc_ncmd->list, in lpfc_destroy_multixri_pools()
3502 &qp->lpfc_io_buf_list_put); in lpfc_destroy_multixri_pools()
3503 qp->put_io_bufs++; in lpfc_destroy_multixri_pools()
3504 pvt_pool->count--; in lpfc_destroy_multixri_pools()
3507 INIT_LIST_HEAD(&pvt_pool->list); in lpfc_destroy_multixri_pools()
3508 pvt_pool->count = 0; in lpfc_destroy_multixri_pools()
3510 spin_unlock(&pvt_pool->lock); in lpfc_destroy_multixri_pools()
3511 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); in lpfc_destroy_multixri_pools()
3518 * lpfc_online - Initialize and bring a HBA online
3526 * 0 - successful
3527 * 1 - failed
3539 vport = phba->pport; in lpfc_online()
3541 if (!(vport->fc_flag & FC_OFFLINE_MODE)) in lpfc_online()
3549 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_online()
3554 spin_lock_irq(&phba->hbalock); in lpfc_online()
3555 if (!phba->sli4_hba.max_cfg_param.vpi_used) in lpfc_online()
3557 spin_unlock_irq(&phba->hbalock); in lpfc_online()
3562 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && in lpfc_online()
3563 !phba->nvmet_support) { in lpfc_online()
3564 error = lpfc_nvme_create_localport(phba->pport); in lpfc_online()
3580 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_online()
3583 spin_lock_irq(shost->host_lock); in lpfc_online()
3584 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; in lpfc_online()
3585 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) in lpfc_online()
3586 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; in lpfc_online()
3587 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_online()
3588 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; in lpfc_online()
3590 (vports[i]->port_type != in lpfc_online()
3592 vports[i]->vpi = 0; in lpfc_online()
3594 spin_unlock_irq(shost->host_lock); in lpfc_online()
3599 if (phba->cfg_xri_rebalancing) in lpfc_online()
3609 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3624 spin_lock_irqsave(&phba->hbalock, iflag); in lpfc_unblock_mgmt_io()
3625 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; in lpfc_unblock_mgmt_io()
3626 spin_unlock_irqrestore(&phba->hbalock, iflag); in lpfc_unblock_mgmt_io()
3630 * lpfc_offline_prep - Prepare a HBA to be brought offline
3641 struct lpfc_vport *vport = phba->pport; in lpfc_offline_prep()
3647 if (vport->fc_flag & FC_OFFLINE_MODE) in lpfc_offline_prep()
3657 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_offline_prep()
3658 if (vports[i]->load_flag & FC_UNLOADING) in lpfc_offline_prep()
3661 spin_lock_irq(shost->host_lock); in lpfc_offline_prep()
3662 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; in lpfc_offline_prep()
3663 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; in lpfc_offline_prep()
3664 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; in lpfc_offline_prep()
3665 spin_unlock_irq(shost->host_lock); in lpfc_offline_prep()
3669 &vports[i]->fc_nodes, in lpfc_offline_prep()
3672 spin_lock_irq(&ndlp->lock); in lpfc_offline_prep()
3673 ndlp->nlp_flag &= ~NLP_NPR_ADISC; in lpfc_offline_prep()
3674 spin_unlock_irq(&ndlp->lock); in lpfc_offline_prep()
3682 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_offline_prep()
3687 ndlp->nlp_rpi, ndlp, in lpfc_offline_prep()
3688 ndlp->nlp_DID); in lpfc_offline_prep()
3689 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); in lpfc_offline_prep()
3690 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; in lpfc_offline_prep()
3693 if (ndlp->nlp_type & NLP_FABRIC) { in lpfc_offline_prep()
3702 if (!(ndlp->fc4_xpt_flags & in lpfc_offline_prep()
3716 if (phba->wq) in lpfc_offline_prep()
3717 flush_workqueue(phba->wq); in lpfc_offline_prep()
3721 * lpfc_offline - Bring a HBA offline
3735 if (phba->pport->fc_flag & FC_OFFLINE_MODE) in lpfc_offline()
3745 lpfc_nvme_destroy_localport(phba->pport); in lpfc_offline()
3749 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) in lpfc_offline()
3757 spin_lock_irq(&phba->hbalock); in lpfc_offline()
3758 phba->work_ha = 0; in lpfc_offline()
3759 spin_unlock_irq(&phba->hbalock); in lpfc_offline()
3762 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_offline()
3764 spin_lock_irq(shost->host_lock); in lpfc_offline()
3765 vports[i]->work_port_events = 0; in lpfc_offline()
3766 vports[i]->fc_flag |= FC_OFFLINE_MODE; in lpfc_offline()
3767 spin_unlock_irq(shost->host_lock); in lpfc_offline()
3773 if (phba->pport->fc_flag & FC_OFFLINE_MODE) in lpfc_offline()
3776 if (phba->cfg_xri_rebalancing) in lpfc_offline()
3781 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3793 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) in lpfc_scsi_free()
3796 spin_lock_irq(&phba->hbalock); in lpfc_scsi_free()
3800 spin_lock(&phba->scsi_buf_list_put_lock); in lpfc_scsi_free()
3801 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, in lpfc_scsi_free()
3803 list_del(&sb->list); in lpfc_scsi_free()
3804 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, in lpfc_scsi_free()
3805 sb->dma_handle); in lpfc_scsi_free()
3807 phba->total_scsi_bufs--; in lpfc_scsi_free()
3809 spin_unlock(&phba->scsi_buf_list_put_lock); in lpfc_scsi_free()
3811 spin_lock(&phba->scsi_buf_list_get_lock); in lpfc_scsi_free()
3812 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, in lpfc_scsi_free()
3814 list_del(&sb->list); in lpfc_scsi_free()
3815 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, in lpfc_scsi_free()
3816 sb->dma_handle); in lpfc_scsi_free()
3818 phba->total_scsi_bufs--; in lpfc_scsi_free()
3820 spin_unlock(&phba->scsi_buf_list_get_lock); in lpfc_scsi_free()
3821 spin_unlock_irq(&phba->hbalock); in lpfc_scsi_free()
3825 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3839 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_io_free()
3840 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_io_free()
3842 spin_lock(&qp->io_buf_list_put_lock); in lpfc_io_free()
3844 &qp->lpfc_io_buf_list_put, in lpfc_io_free()
3846 list_del(&lpfc_ncmd->list); in lpfc_io_free()
3847 qp->put_io_bufs--; in lpfc_io_free()
3848 dma_pool_free(phba->lpfc_sg_dma_buf_pool, in lpfc_io_free()
3849 lpfc_ncmd->data, lpfc_ncmd->dma_handle); in lpfc_io_free()
3850 if (phba->cfg_xpsgl && !phba->nvmet_support) in lpfc_io_free()
3854 qp->total_io_bufs--; in lpfc_io_free()
3856 spin_unlock(&qp->io_buf_list_put_lock); in lpfc_io_free()
3858 spin_lock(&qp->io_buf_list_get_lock); in lpfc_io_free()
3860 &qp->lpfc_io_buf_list_get, in lpfc_io_free()
3862 list_del(&lpfc_ncmd->list); in lpfc_io_free()
3863 qp->get_io_bufs--; in lpfc_io_free()
3864 dma_pool_free(phba->lpfc_sg_dma_buf_pool, in lpfc_io_free()
3865 lpfc_ncmd->data, lpfc_ncmd->dma_handle); in lpfc_io_free()
3866 if (phba->cfg_xpsgl && !phba->nvmet_support) in lpfc_io_free()
3870 qp->total_io_bufs--; in lpfc_io_free()
3872 spin_unlock(&qp->io_buf_list_get_lock); in lpfc_io_free()
3877 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3886 * 0 - successful (for now, it always returns 0)
3897 * update on pci function's els xri-sgl list in lpfc_sli4_els_sgl_update()
3901 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { in lpfc_sli4_els_sgl_update()
3902 /* els xri-sgl expanded */ in lpfc_sli4_els_sgl_update()
3903 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; in lpfc_sli4_els_sgl_update()
3905 "3157 ELS xri-sgl count increased from " in lpfc_sli4_els_sgl_update()
3906 "%d to %d\n", phba->sli4_hba.els_xri_cnt, in lpfc_sli4_els_sgl_update()
3917 rc = -ENOMEM; in lpfc_sli4_els_sgl_update()
3920 sglq_entry->buff_type = GEN_BUFF_TYPE; in lpfc_sli4_els_sgl_update()
3921 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, in lpfc_sli4_els_sgl_update()
3922 &sglq_entry->phys); in lpfc_sli4_els_sgl_update()
3923 if (sglq_entry->virt == NULL) { in lpfc_sli4_els_sgl_update()
3929 rc = -ENOMEM; in lpfc_sli4_els_sgl_update()
3932 sglq_entry->sgl = sglq_entry->virt; in lpfc_sli4_els_sgl_update()
3933 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); in lpfc_sli4_els_sgl_update()
3934 sglq_entry->state = SGL_FREED; in lpfc_sli4_els_sgl_update()
3935 list_add_tail(&sglq_entry->list, &els_sgl_list); in lpfc_sli4_els_sgl_update()
3937 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_els_sgl_update()
3939 &phba->sli4_hba.lpfc_els_sgl_list); in lpfc_sli4_els_sgl_update()
3940 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_els_sgl_update()
3941 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { in lpfc_sli4_els_sgl_update()
3942 /* els xri-sgl shrinked */ in lpfc_sli4_els_sgl_update()
3943 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; in lpfc_sli4_els_sgl_update()
3945 "3158 ELS xri-sgl count decreased from " in lpfc_sli4_els_sgl_update()
3946 "%d to %d\n", phba->sli4_hba.els_xri_cnt, in lpfc_sli4_els_sgl_update()
3948 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_els_sgl_update()
3949 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, in lpfc_sli4_els_sgl_update()
3956 __lpfc_mbuf_free(phba, sglq_entry->virt, in lpfc_sli4_els_sgl_update()
3957 sglq_entry->phys); in lpfc_sli4_els_sgl_update()
3962 &phba->sli4_hba.lpfc_els_sgl_list); in lpfc_sli4_els_sgl_update()
3963 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_els_sgl_update()
3966 "3163 ELS xri-sgl count unchanged: %d\n", in lpfc_sli4_els_sgl_update()
3968 phba->sli4_hba.els_xri_cnt = els_xri_cnt; in lpfc_sli4_els_sgl_update()
3974 &phba->sli4_hba.lpfc_els_sgl_list, list) { in lpfc_sli4_els_sgl_update()
3981 rc = -ENOMEM; in lpfc_sli4_els_sgl_update()
3984 sglq_entry->sli4_lxritag = lxri; in lpfc_sli4_els_sgl_update()
3985 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; in lpfc_sli4_els_sgl_update()
3995 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4004 * 0 - successful (for now, it always returns 0)
4016 * update on pci function's nvmet xri-sgl list in lpfc_sli4_nvmet_sgl_update()
4021 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; in lpfc_sli4_nvmet_sgl_update()
4022 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { in lpfc_sli4_nvmet_sgl_update()
4023 /* els xri-sgl expanded */ in lpfc_sli4_nvmet_sgl_update()
4024 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; in lpfc_sli4_nvmet_sgl_update()
4026 "6302 NVMET xri-sgl cnt grew from %d to %d\n", in lpfc_sli4_nvmet_sgl_update()
4027 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); in lpfc_sli4_nvmet_sgl_update()
4037 rc = -ENOMEM; in lpfc_sli4_nvmet_sgl_update()
4040 sglq_entry->buff_type = NVMET_BUFF_TYPE; in lpfc_sli4_nvmet_sgl_update()
4041 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, in lpfc_sli4_nvmet_sgl_update()
4042 &sglq_entry->phys); in lpfc_sli4_nvmet_sgl_update()
4043 if (sglq_entry->virt == NULL) { in lpfc_sli4_nvmet_sgl_update()
4049 rc = -ENOMEM; in lpfc_sli4_nvmet_sgl_update()
4052 sglq_entry->sgl = sglq_entry->virt; in lpfc_sli4_nvmet_sgl_update()
4053 memset(sglq_entry->sgl, 0, in lpfc_sli4_nvmet_sgl_update()
4054 phba->cfg_sg_dma_buf_size); in lpfc_sli4_nvmet_sgl_update()
4055 sglq_entry->state = SGL_FREED; in lpfc_sli4_nvmet_sgl_update()
4056 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); in lpfc_sli4_nvmet_sgl_update()
4058 spin_lock_irq(&phba->hbalock); in lpfc_sli4_nvmet_sgl_update()
4059 spin_lock(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_nvmet_sgl_update()
4061 &phba->sli4_hba.lpfc_nvmet_sgl_list); in lpfc_sli4_nvmet_sgl_update()
4062 spin_unlock(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_nvmet_sgl_update()
4063 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_nvmet_sgl_update()
4064 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { in lpfc_sli4_nvmet_sgl_update()
4065 /* nvmet xri-sgl shrunk */ in lpfc_sli4_nvmet_sgl_update()
4066 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; in lpfc_sli4_nvmet_sgl_update()
4068 "6305 NVMET xri-sgl count decreased from " in lpfc_sli4_nvmet_sgl_update()
4069 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, in lpfc_sli4_nvmet_sgl_update()
4071 spin_lock_irq(&phba->hbalock); in lpfc_sli4_nvmet_sgl_update()
4072 spin_lock(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_nvmet_sgl_update()
4073 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, in lpfc_sli4_nvmet_sgl_update()
4080 lpfc_nvmet_buf_free(phba, sglq_entry->virt, in lpfc_sli4_nvmet_sgl_update()
4081 sglq_entry->phys); in lpfc_sli4_nvmet_sgl_update()
4086 &phba->sli4_hba.lpfc_nvmet_sgl_list); in lpfc_sli4_nvmet_sgl_update()
4087 spin_unlock(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_nvmet_sgl_update()
4088 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_nvmet_sgl_update()
4091 "6306 NVMET xri-sgl count unchanged: %d\n", in lpfc_sli4_nvmet_sgl_update()
4093 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; in lpfc_sli4_nvmet_sgl_update()
4099 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { in lpfc_sli4_nvmet_sgl_update()
4106 rc = -ENOMEM; in lpfc_sli4_nvmet_sgl_update()
4109 sglq_entry->sli4_lxritag = lxri; in lpfc_sli4_nvmet_sgl_update()
4110 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; in lpfc_sli4_nvmet_sgl_update()
4129 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_io_buf_flush()
4130 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_io_buf_flush()
4131 spin_lock_irq(&qp->io_buf_list_get_lock); in lpfc_io_buf_flush()
4132 spin_lock(&qp->io_buf_list_put_lock); in lpfc_io_buf_flush()
4135 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); in lpfc_io_buf_flush()
4136 list_splice(&qp->lpfc_io_buf_list_put, &blist); in lpfc_io_buf_flush()
4137 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); in lpfc_io_buf_flush()
4138 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); in lpfc_io_buf_flush()
4139 cnt += qp->get_io_bufs + qp->put_io_bufs; in lpfc_io_buf_flush()
4140 qp->get_io_bufs = 0; in lpfc_io_buf_flush()
4141 qp->put_io_bufs = 0; in lpfc_io_buf_flush()
4142 qp->total_io_bufs = 0; in lpfc_io_buf_flush()
4143 spin_unlock(&qp->io_buf_list_put_lock); in lpfc_io_buf_flush()
4144 spin_unlock_irq(&qp->io_buf_list_get_lock); in lpfc_io_buf_flush()
4157 list_add_tail(&lpfc_cmd->list, cbuf); in lpfc_io_buf_flush()
4160 xri = lpfc_cmd->cur_iocbq.sli4_xritag; in lpfc_io_buf_flush()
4164 if (xri < iobufp->cur_iocbq.sli4_xritag) { in lpfc_io_buf_flush()
4166 list_add(&lpfc_cmd->list, in lpfc_io_buf_flush()
4167 &prev_iobufp->list); in lpfc_io_buf_flush()
4169 list_add(&lpfc_cmd->list, cbuf); in lpfc_io_buf_flush()
4176 list_add_tail(&lpfc_cmd->list, cbuf); in lpfc_io_buf_flush()
4188 qp = phba->sli4_hba.hdwq; in lpfc_io_buf_replenish()
4191 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_io_buf_replenish()
4197 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_io_buf_replenish()
4198 lpfc_cmd->hdwq_no = idx; in lpfc_io_buf_replenish()
4199 lpfc_cmd->hdwq = qp; in lpfc_io_buf_replenish()
4200 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; in lpfc_io_buf_replenish()
4201 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; in lpfc_io_buf_replenish()
4202 spin_lock(&qp->io_buf_list_put_lock); in lpfc_io_buf_replenish()
4203 list_add_tail(&lpfc_cmd->list, in lpfc_io_buf_replenish()
4204 &qp->lpfc_io_buf_list_put); in lpfc_io_buf_replenish()
4205 qp->put_io_bufs++; in lpfc_io_buf_replenish()
4206 qp->total_io_bufs++; in lpfc_io_buf_replenish()
4207 spin_unlock(&qp->io_buf_list_put_lock); in lpfc_io_buf_replenish()
4214 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4223 * 0 - successful (for now, it always returns 0)
4235 * update on pci function's allocated nvme xri-sgl list in lpfc_sli4_io_sgl_update()
4240 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; in lpfc_sli4_io_sgl_update()
4241 phba->sli4_hba.io_xri_max = io_xri_max; in lpfc_sli4_io_sgl_update()
4246 phba->sli4_hba.io_xri_cnt, in lpfc_sli4_io_sgl_update()
4247 phba->sli4_hba.io_xri_max); in lpfc_sli4_io_sgl_update()
4251 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { in lpfc_sli4_io_sgl_update()
4253 io_xri_cnt = phba->sli4_hba.io_xri_cnt - in lpfc_sli4_io_sgl_update()
4254 phba->sli4_hba.io_xri_max; in lpfc_sli4_io_sgl_update()
4260 dma_pool_free(phba->lpfc_sg_dma_buf_pool, in lpfc_sli4_io_sgl_update()
4261 lpfc_ncmd->data, in lpfc_sli4_io_sgl_update()
4262 lpfc_ncmd->dma_handle); in lpfc_sli4_io_sgl_update()
4266 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; in lpfc_sli4_io_sgl_update()
4272 phba->sli4_hba.io_xri_cnt = cnt; in lpfc_sli4_io_sgl_update()
4281 rc = -ENOMEM; in lpfc_sli4_io_sgl_update()
4284 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; in lpfc_sli4_io_sgl_update()
4285 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; in lpfc_sli4_io_sgl_update()
4296 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4300 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4306 * int - number of IO buffers that were allocated and posted.
4320 phba->sli4_hba.io_xri_cnt = 0; in lpfc_new_io_buf()
4330 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, in lpfc_new_io_buf()
4332 &lpfc_ncmd->dma_handle); in lpfc_new_io_buf()
4333 if (!lpfc_ncmd->data) { in lpfc_new_io_buf()
4338 if (phba->cfg_xpsgl && !phba->nvmet_support) { in lpfc_new_io_buf()
4339 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); in lpfc_new_io_buf()
4345 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && in lpfc_new_io_buf()
4346 (((unsigned long)(lpfc_ncmd->data) & in lpfc_new_io_buf()
4347 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { in lpfc_new_io_buf()
4352 (unsigned long)lpfc_ncmd->data); in lpfc_new_io_buf()
4353 dma_pool_free(phba->lpfc_sg_dma_buf_pool, in lpfc_new_io_buf()
4354 lpfc_ncmd->data, in lpfc_new_io_buf()
4355 lpfc_ncmd->dma_handle); in lpfc_new_io_buf()
4361 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); in lpfc_new_io_buf()
4365 dma_pool_free(phba->lpfc_sg_dma_buf_pool, in lpfc_new_io_buf()
4366 lpfc_ncmd->data, lpfc_ncmd->dma_handle); in lpfc_new_io_buf()
4370 pwqeq = &lpfc_ncmd->cur_iocbq; in lpfc_new_io_buf()
4372 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ in lpfc_new_io_buf()
4375 dma_pool_free(phba->lpfc_sg_dma_buf_pool, in lpfc_new_io_buf()
4376 lpfc_ncmd->data, lpfc_ncmd->dma_handle); in lpfc_new_io_buf()
4384 pwqeq->sli4_lxritag = lxri; in lpfc_new_io_buf()
4385 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; in lpfc_new_io_buf()
4386 pwqeq->context1 = lpfc_ncmd; in lpfc_new_io_buf()
4388 /* Initialize local short-hand pointers. */ in lpfc_new_io_buf()
4389 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; in lpfc_new_io_buf()
4390 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; in lpfc_new_io_buf()
4391 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; in lpfc_new_io_buf()
4392 spin_lock_init(&lpfc_ncmd->buf_lock); in lpfc_new_io_buf()
4395 list_add_tail(&lpfc_ncmd->list, &post_nblist); in lpfc_new_io_buf()
4396 phba->sli4_hba.io_xri_cnt++; in lpfc_new_io_buf()
4420 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, in lpfc_get_wwpn()
4423 return (uint64_t)-1; in lpfc_get_wwpn()
4432 bf_get(lpfc_mqe_command, &mboxq->u.mqe), in lpfc_get_wwpn()
4433 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); in lpfc_get_wwpn()
4434 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_get_wwpn()
4435 return (uint64_t) -1; in lpfc_get_wwpn()
4437 mb = &mboxq->u.mb; in lpfc_get_wwpn()
4438 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); in lpfc_get_wwpn()
4440 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_get_wwpn()
4441 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_get_wwpn()
4448 * lpfc_vmid_res_alloc - Allocates resources for VMID
4456 * Non-0 on Failure
4462 if (phba->sli_rev == LPFC_SLI_REV3) { in lpfc_vmid_res_alloc()
4463 phba->cfg_vmid_app_header = 0; in lpfc_vmid_res_alloc()
4464 phba->cfg_vmid_priority_tagging = 0; in lpfc_vmid_res_alloc()
4468 vport->vmid = in lpfc_vmid_res_alloc()
4469 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid), in lpfc_vmid_res_alloc()
4471 if (!vport->vmid) in lpfc_vmid_res_alloc()
4472 return -ENOMEM; in lpfc_vmid_res_alloc()
4474 rwlock_init(&vport->vmid_lock); in lpfc_vmid_res_alloc()
4477 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging; in lpfc_vmid_res_alloc()
4478 vport->vmid_inactivity_timeout = in lpfc_vmid_res_alloc()
4479 phba->cfg_vmid_inactivity_timeout; in lpfc_vmid_res_alloc()
4480 vport->max_vmid = phba->cfg_max_vmid; in lpfc_vmid_res_alloc()
4481 vport->cur_vmid_cnt = 0; in lpfc_vmid_res_alloc()
4483 vport->vmid_priority_range = bitmap_zalloc in lpfc_vmid_res_alloc()
4486 if (!vport->vmid_priority_range) { in lpfc_vmid_res_alloc()
4487 kfree(vport->vmid); in lpfc_vmid_res_alloc()
4488 return -ENOMEM; in lpfc_vmid_res_alloc()
4491 hash_init(vport->hash_table); in lpfc_vmid_res_alloc()
4497 * lpfc_create_port - Create an FC port
4509 * @vport - pointer to the virtual N_Port data structure.
4510 * NULL - port create failed.
4525 if (phba->sli_rev < LPFC_SLI_REV4 && in lpfc_create_port()
4526 dev == &phba->pcidev->dev) { in lpfc_create_port()
4548 if (dev == &phba->pcidev->dev) { in lpfc_create_port()
4549 template = &phba->port_template; in lpfc_create_port()
4551 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { in lpfc_create_port()
4557 template->eh_host_reset_handler = NULL; in lpfc_create_port()
4560 memcpy(&phba->vport_template, &lpfc_template, in lpfc_create_port()
4562 phba->vport_template.shost_attrs = lpfc_vport_attrs; in lpfc_create_port()
4563 phba->vport_template.eh_bus_reset_handler = NULL; in lpfc_create_port()
4564 phba->vport_template.eh_host_reset_handler = NULL; in lpfc_create_port()
4565 phba->vport_template.vendor_id = 0; in lpfc_create_port()
4568 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_create_port()
4569 template->sg_tablesize = phba->cfg_scsi_seg_cnt; in lpfc_create_port()
4570 phba->vport_template.sg_tablesize = in lpfc_create_port()
4571 phba->cfg_scsi_seg_cnt; in lpfc_create_port()
4573 template->sg_tablesize = phba->cfg_sg_seg_cnt; in lpfc_create_port()
4574 phba->vport_template.sg_tablesize = in lpfc_create_port()
4575 phba->cfg_sg_seg_cnt; in lpfc_create_port()
4584 template = &phba->vport_template; in lpfc_create_port()
4591 vport = (struct lpfc_vport *) shost->hostdata; in lpfc_create_port()
4592 vport->phba = phba; in lpfc_create_port()
4593 vport->load_flag |= FC_LOADING; in lpfc_create_port()
4594 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; in lpfc_create_port()
4595 vport->fc_rscn_flush = 0; in lpfc_create_port()
4599 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; in lpfc_create_port()
4601 shost->unique_id = instance; in lpfc_create_port()
4602 shost->max_id = LPFC_MAX_TARGET; in lpfc_create_port()
4603 shost->max_lun = vport->cfg_max_luns; in lpfc_create_port()
4604 shost->this_id = -1; in lpfc_create_port()
4605 shost->max_cmd_len = 16; in lpfc_create_port()
4607 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_create_port()
4608 if (!phba->cfg_fcp_mq_threshold || in lpfc_create_port()
4609 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) in lpfc_create_port()
4610 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; in lpfc_create_port()
4612 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), in lpfc_create_port()
4613 phba->cfg_fcp_mq_threshold); in lpfc_create_port()
4615 shost->dma_boundary = in lpfc_create_port()
4616 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; in lpfc_create_port()
4618 if (phba->cfg_xpsgl && !phba->nvmet_support) in lpfc_create_port()
4619 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; in lpfc_create_port()
4621 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; in lpfc_create_port()
4623 /* SLI-3 has a limited number of hardware queues (3), in lpfc_create_port()
4626 shost->nr_hw_queues = 1; in lpfc_create_port()
4633 shost->can_queue = phba->cfg_hba_queue_depth - 10; in lpfc_create_port()
4634 if (dev != &phba->pcidev->dev) { in lpfc_create_port()
4635 shost->transportt = lpfc_vport_transport_template; in lpfc_create_port()
4636 vport->port_type = LPFC_NPIV_PORT; in lpfc_create_port()
4638 shost->transportt = lpfc_transport_template; in lpfc_create_port()
4639 vport->port_type = LPFC_PHYSICAL_PORT; in lpfc_create_port()
4645 vport->port_type, shost->sg_tablesize, in lpfc_create_port()
4646 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); in lpfc_create_port()
4655 INIT_LIST_HEAD(&vport->fc_nodes); in lpfc_create_port()
4656 INIT_LIST_HEAD(&vport->rcv_buffer_list); in lpfc_create_port()
4657 spin_lock_init(&vport->work_port_lock); in lpfc_create_port()
4659 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); in lpfc_create_port()
4661 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); in lpfc_create_port()
4663 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); in lpfc_create_port()
4665 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) in lpfc_create_port()
4668 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); in lpfc_create_port()
4672 spin_lock_irq(&phba->port_list_lock); in lpfc_create_port()
4673 list_add_tail(&vport->listentry, &phba->port_list); in lpfc_create_port()
4674 spin_unlock_irq(&phba->port_list_lock); in lpfc_create_port()
4678 kfree(vport->vmid); in lpfc_create_port()
4679 bitmap_free(vport->vmid_priority_range); in lpfc_create_port()
4686 * destroy_port - destroy an FC port
4696 struct lpfc_hba *phba = vport->phba; in destroy_port()
4702 spin_lock_irq(&phba->port_list_lock); in destroy_port()
4703 list_del_init(&vport->listentry); in destroy_port()
4704 spin_unlock_irq(&phba->port_list_lock); in destroy_port()
4711 * lpfc_get_instance - Get a unique integer ID
4717 * instance - a unique integer ID allocated as the new instance.
4718 * -1 - lpfc get instance failed.
4726 return ret < 0 ? -1 : ret; in lpfc_get_instance()
4730 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4741 * 0 - SCSI host scan is not over yet.
4742 * 1 - SCSI host scan is over.
4746 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; in lpfc_scan_finished()
4747 struct lpfc_hba *phba = vport->phba; in lpfc_scan_finished()
4750 spin_lock_irq(shost->host_lock); in lpfc_scan_finished()
4752 if (vport->load_flag & FC_UNLOADING) { in lpfc_scan_finished()
4764 phba->link_state <= LPFC_LINK_DOWN) { in lpfc_scan_finished()
4772 if (vport->port_state != LPFC_VPORT_READY) in lpfc_scan_finished()
4774 if (vport->num_disc_nodes || vport->fc_prli_sent) in lpfc_scan_finished()
4776 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) in lpfc_scan_finished()
4778 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) in lpfc_scan_finished()
4784 spin_unlock_irq(shost->host_lock); in lpfc_scan_finished()
4790 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; in lpfc_host_supported_speeds_set()
4791 struct lpfc_hba *phba = vport->phba; in lpfc_host_supported_speeds_set()
4798 if (phba->hba_flag & HBA_FCOE_MODE) in lpfc_host_supported_speeds_set()
4801 if (phba->lmt & LMT_256Gb) in lpfc_host_supported_speeds_set()
4803 if (phba->lmt & LMT_128Gb) in lpfc_host_supported_speeds_set()
4805 if (phba->lmt & LMT_64Gb) in lpfc_host_supported_speeds_set()
4807 if (phba->lmt & LMT_32Gb) in lpfc_host_supported_speeds_set()
4809 if (phba->lmt & LMT_16Gb) in lpfc_host_supported_speeds_set()
4811 if (phba->lmt & LMT_10Gb) in lpfc_host_supported_speeds_set()
4813 if (phba->lmt & LMT_8Gb) in lpfc_host_supported_speeds_set()
4815 if (phba->lmt & LMT_4Gb) in lpfc_host_supported_speeds_set()
4817 if (phba->lmt & LMT_2Gb) in lpfc_host_supported_speeds_set()
4819 if (phba->lmt & LMT_1Gb) in lpfc_host_supported_speeds_set()
4824 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4832 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; in lpfc_host_attrib_init()
4833 struct lpfc_hba *phba = vport->phba; in lpfc_host_attrib_init()
4838 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); in lpfc_host_attrib_init()
4839 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); in lpfc_host_attrib_init()
4853 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | in lpfc_host_attrib_init()
4854 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; in lpfc_host_attrib_init()
4856 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; in lpfc_host_attrib_init()
4864 fc_host_max_npiv_vports(shost) = phba->max_vpi; in lpfc_host_attrib_init()
4865 spin_lock_irq(shost->host_lock); in lpfc_host_attrib_init()
4866 vport->load_flag &= ~FC_LOADING; in lpfc_host_attrib_init()
4867 spin_unlock_irq(shost->host_lock); in lpfc_host_attrib_init()
4871 * lpfc_stop_port_s3 - Stop SLI3 device port
4882 writel(0, phba->HCregaddr); in lpfc_stop_port_s3()
4883 readl(phba->HCregaddr); /* flush */ in lpfc_stop_port_s3()
4885 writel(0xffffffff, phba->HAregaddr); in lpfc_stop_port_s3()
4886 readl(phba->HAregaddr); /* flush */ in lpfc_stop_port_s3()
4890 phba->pport->work_port_events = 0; in lpfc_stop_port_s3()
4894 * lpfc_stop_port_s4 - Stop SLI4 device port
4906 if (phba->pport) in lpfc_stop_port_s4()
4907 phba->pport->work_port_events = 0; in lpfc_stop_port_s4()
4908 phba->sli4_hba.intr_enable = 0; in lpfc_stop_port_s4()
4912 * lpfc_stop_port - Wrapper function for stopping hba port
4921 phba->lpfc_stop_port(phba); in lpfc_stop_port()
4923 if (phba->wq) in lpfc_stop_port()
4924 flush_workqueue(phba->wq); in lpfc_stop_port()
4928 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4939 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); in lpfc_fcf_redisc_wait_start_timer()
4940 spin_lock_irq(&phba->hbalock); in lpfc_fcf_redisc_wait_start_timer()
4942 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); in lpfc_fcf_redisc_wait_start_timer()
4944 phba->fcf.fcf_flag |= FCF_REDISC_PEND; in lpfc_fcf_redisc_wait_start_timer()
4945 spin_unlock_irq(&phba->hbalock); in lpfc_fcf_redisc_wait_start_timer()
4949 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4964 spin_lock_irq(&phba->hbalock); in lpfc_sli4_fcf_redisc_wait_tmo()
4965 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { in lpfc_sli4_fcf_redisc_wait_tmo()
4966 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_fcf_redisc_wait_tmo()
4970 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; in lpfc_sli4_fcf_redisc_wait_tmo()
4972 phba->fcf.fcf_flag |= FCF_REDISC_EVT; in lpfc_sli4_fcf_redisc_wait_tmo()
4973 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_fcf_redisc_wait_tmo()
4981 * lpfc_vmid_poll - VMID timeout detection
4996 if (phba->pport->vmid_priority_tagging) { in lpfc_vmid_poll()
4998 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; in lpfc_vmid_poll()
5002 if (phba->pport->vmid_inactivity_timeout || in lpfc_vmid_poll()
5003 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) { in lpfc_vmid_poll()
5005 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID; in lpfc_vmid_poll()
5012 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * in lpfc_vmid_poll()
5017 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5021 * This routine is to parse the SLI4 link-attention link fault code.
5042 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5063 /* Ignore physical link up events - wait for logical link up */ in lpfc_sli4_parse_latt_type()
5080 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5095 if (phba->sli_rev <= LPFC_SLI_REV3) { in lpfc_sli_port_speed_get()
5096 switch (phba->fc_linkspeed) { in lpfc_sli_port_speed_get()
5119 if (phba->sli4_hba.link_state.logical_speed) in lpfc_sli_port_speed_get()
5121 phba->sli4_hba.link_state.logical_speed; in lpfc_sli_port_speed_get()
5123 link_speed = phba->sli4_hba.link_state.speed; in lpfc_sli_port_speed_get()
5129 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5225 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5245 phba->fcoe_eventtag = acqe_link->event_tag; in lpfc_sli4_async_link_evt()
5246 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_sli4_async_link_evt()
5258 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); in lpfc_sli4_async_link_evt()
5259 if (!mp->virt) { in lpfc_sli4_async_link_evt()
5269 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; in lpfc_sli4_async_link_evt()
5272 phba->sli.slistat.link_event++; in lpfc_sli4_async_link_evt()
5276 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; in lpfc_sli4_async_link_evt()
5277 pmb->vport = phba->pport; in lpfc_sli4_async_link_evt()
5280 phba->sli4_hba.link_state.speed = in lpfc_sli4_async_link_evt()
5283 phba->sli4_hba.link_state.duplex = in lpfc_sli4_async_link_evt()
5285 phba->sli4_hba.link_state.status = in lpfc_sli4_async_link_evt()
5287 phba->sli4_hba.link_state.type = in lpfc_sli4_async_link_evt()
5289 phba->sli4_hba.link_state.number = in lpfc_sli4_async_link_evt()
5291 phba->sli4_hba.link_state.fault = in lpfc_sli4_async_link_evt()
5293 phba->sli4_hba.link_state.logical_speed = in lpfc_sli4_async_link_evt()
5297 "2900 Async FC/FCoE Link event - Speed:%dGBit " in lpfc_sli4_async_link_evt()
5300 phba->sli4_hba.link_state.speed, in lpfc_sli4_async_link_evt()
5301 phba->sli4_hba.link_state.topology, in lpfc_sli4_async_link_evt()
5302 phba->sli4_hba.link_state.status, in lpfc_sli4_async_link_evt()
5303 phba->sli4_hba.link_state.type, in lpfc_sli4_async_link_evt()
5304 phba->sli4_hba.link_state.number, in lpfc_sli4_async_link_evt()
5305 phba->sli4_hba.link_state.logical_speed, in lpfc_sli4_async_link_evt()
5306 phba->sli4_hba.link_state.fault); in lpfc_sli4_async_link_evt()
5309 * topology info. Note: Optional for non FC-AL ports. in lpfc_sli4_async_link_evt()
5311 if (!(phba->hba_flag & HBA_FCOE_MODE)) { in lpfc_sli4_async_link_evt()
5323 mb = &pmb->u.mb; in lpfc_sli4_async_link_evt()
5324 mb->mbxStatus = MBX_SUCCESS; in lpfc_sli4_async_link_evt()
5330 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; in lpfc_sli4_async_link_evt()
5331 la->eventTag = acqe_link->event_tag; in lpfc_sli4_async_link_evt()
5352 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_sli4_async_link_evt()
5356 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5413 head = atomic_read(&phba->rxtable_idx_head); in lpfc_cgn_dump_rxmonitor()
5414 tail = atomic_read(&phba->rxtable_idx_tail); in lpfc_cgn_dump_rxmonitor()
5415 if (!phba->rxtable || head == tail) { in lpfc_cgn_dump_rxmonitor()
5426 start--; in lpfc_cgn_dump_rxmonitor()
5428 start = LPFC_MAX_RXMONITOR_ENTRY - 1; in lpfc_cgn_dump_rxmonitor()
5429 entry = &phba->rxtable[start]; in lpfc_cgn_dump_rxmonitor()
5434 cnt, entry->max_bytes_per_interval, in lpfc_cgn_dump_rxmonitor()
5435 entry->total_bytes, entry->rcv_bytes, in lpfc_cgn_dump_rxmonitor()
5436 entry->avg_io_latency, entry->avg_io_size, in lpfc_cgn_dump_rxmonitor()
5437 entry->cmf_info, entry->timer_utilization, in lpfc_cgn_dump_rxmonitor()
5438 entry->timer_interval, start); in lpfc_cgn_dump_rxmonitor()
5446 * lpfc_cgn_update_stat - Save data into congestion stats buffer
5462 if (!phba->cgn_i) in lpfc_cgn_update_stat()
5464 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; in lpfc_cgn_update_stat()
5471 cnt = le32_to_cpu(cp->link_integ_notification); in lpfc_cgn_update_stat()
5473 cp->link_integ_notification = cpu_to_le32(cnt); in lpfc_cgn_update_stat()
5475 cp->cgn_stat_lnk_month = broken.tm_mon + 1; in lpfc_cgn_update_stat()
5476 cp->cgn_stat_lnk_day = broken.tm_mday; in lpfc_cgn_update_stat()
5477 cp->cgn_stat_lnk_year = broken.tm_year - 100; in lpfc_cgn_update_stat()
5478 cp->cgn_stat_lnk_hour = broken.tm_hour; in lpfc_cgn_update_stat()
5479 cp->cgn_stat_lnk_min = broken.tm_min; in lpfc_cgn_update_stat()
5480 cp->cgn_stat_lnk_sec = broken.tm_sec; in lpfc_cgn_update_stat()
5483 cnt = le32_to_cpu(cp->delivery_notification); in lpfc_cgn_update_stat()
5485 cp->delivery_notification = cpu_to_le32(cnt); in lpfc_cgn_update_stat()
5487 cp->cgn_stat_del_month = broken.tm_mon + 1; in lpfc_cgn_update_stat()
5488 cp->cgn_stat_del_day = broken.tm_mday; in lpfc_cgn_update_stat()
5489 cp->cgn_stat_del_year = broken.tm_year - 100; in lpfc_cgn_update_stat()
5490 cp->cgn_stat_del_hour = broken.tm_hour; in lpfc_cgn_update_stat()
5491 cp->cgn_stat_del_min = broken.tm_min; in lpfc_cgn_update_stat()
5492 cp->cgn_stat_del_sec = broken.tm_sec; in lpfc_cgn_update_stat()
5495 cnt = le32_to_cpu(cp->cgn_peer_notification); in lpfc_cgn_update_stat()
5497 cp->cgn_peer_notification = cpu_to_le32(cnt); in lpfc_cgn_update_stat()
5499 cp->cgn_stat_peer_month = broken.tm_mon + 1; in lpfc_cgn_update_stat()
5500 cp->cgn_stat_peer_day = broken.tm_mday; in lpfc_cgn_update_stat()
5501 cp->cgn_stat_peer_year = broken.tm_year - 100; in lpfc_cgn_update_stat()
5502 cp->cgn_stat_peer_hour = broken.tm_hour; in lpfc_cgn_update_stat()
5503 cp->cgn_stat_peer_min = broken.tm_min; in lpfc_cgn_update_stat()
5504 cp->cgn_stat_peer_sec = broken.tm_sec; in lpfc_cgn_update_stat()
5507 cnt = le32_to_cpu(cp->cgn_notification); in lpfc_cgn_update_stat()
5509 cp->cgn_notification = cpu_to_le32(cnt); in lpfc_cgn_update_stat()
5511 cp->cgn_stat_cgn_month = broken.tm_mon + 1; in lpfc_cgn_update_stat()
5512 cp->cgn_stat_cgn_day = broken.tm_mday; in lpfc_cgn_update_stat()
5513 cp->cgn_stat_cgn_year = broken.tm_year - 100; in lpfc_cgn_update_stat()
5514 cp->cgn_stat_cgn_hour = broken.tm_hour; in lpfc_cgn_update_stat()
5515 cp->cgn_stat_cgn_min = broken.tm_min; in lpfc_cgn_update_stat()
5516 cp->cgn_stat_cgn_sec = broken.tm_sec; in lpfc_cgn_update_stat()
5518 if (phba->cgn_fpin_frequency && in lpfc_cgn_update_stat()
5519 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { in lpfc_cgn_update_stat()
5520 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; in lpfc_cgn_update_stat()
5521 cp->cgn_stat_npm = value; in lpfc_cgn_update_stat()
5525 cp->cgn_info_crc = cpu_to_le32(value); in lpfc_cgn_update_stat()
5529 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
5555 if (!phba->cgn_i) in lpfc_cgn_save_evt_cnt()
5557 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; in lpfc_cgn_save_evt_cnt()
5559 if (time_before(jiffies, phba->cgn_evt_timestamp)) in lpfc_cgn_save_evt_cnt()
5561 phba->cgn_evt_timestamp = jiffies + in lpfc_cgn_save_evt_cnt()
5563 phba->cgn_evt_minute++; in lpfc_cgn_save_evt_cnt()
5570 if (phba->cgn_fpin_frequency && in lpfc_cgn_save_evt_cnt()
5571 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { in lpfc_cgn_save_evt_cnt()
5572 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; in lpfc_cgn_save_evt_cnt()
5573 cp->cgn_stat_npm = value; in lpfc_cgn_save_evt_cnt()
5577 lvalue = atomic_read(&phba->cgn_latency_evt_cnt); in lpfc_cgn_save_evt_cnt()
5578 latsum = atomic64_read(&phba->cgn_latency_evt); in lpfc_cgn_save_evt_cnt()
5579 atomic_set(&phba->cgn_latency_evt_cnt, 0); in lpfc_cgn_save_evt_cnt()
5580 atomic64_set(&phba->cgn_latency_evt, 0); in lpfc_cgn_save_evt_cnt()
5586 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512; in lpfc_cgn_save_evt_cnt()
5587 phba->rx_block_cnt = 0; in lpfc_cgn_save_evt_cnt()
5592 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; in lpfc_cgn_save_evt_cnt()
5593 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; in lpfc_cgn_save_evt_cnt()
5594 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; in lpfc_cgn_save_evt_cnt()
5595 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; in lpfc_cgn_save_evt_cnt()
5598 value = (uint16_t)(phba->pport->cfg_lun_queue_depth); in lpfc_cgn_save_evt_cnt()
5599 cp->cgn_lunq = cpu_to_le16(value); in lpfc_cgn_save_evt_cnt()
5601 /* Record congestion buffer info - every minute in lpfc_cgn_save_evt_cnt()
5607 index = ++cp->cgn_index_minute; in lpfc_cgn_save_evt_cnt()
5608 if (cp->cgn_index_minute == LPFC_MIN_HOUR) { in lpfc_cgn_save_evt_cnt()
5609 cp->cgn_index_minute = 0; in lpfc_cgn_save_evt_cnt()
5614 dvalue = atomic_read(&phba->cgn_driver_evt_cnt); in lpfc_cgn_save_evt_cnt()
5615 atomic_set(&phba->cgn_driver_evt_cnt, 0); in lpfc_cgn_save_evt_cnt()
5617 /* Get the number of warning events - FPIN and Signal for this minute */ in lpfc_cgn_save_evt_cnt()
5619 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) || in lpfc_cgn_save_evt_cnt()
5620 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || in lpfc_cgn_save_evt_cnt()
5621 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) in lpfc_cgn_save_evt_cnt()
5622 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt); in lpfc_cgn_save_evt_cnt()
5623 atomic_set(&phba->cgn_fabric_warn_cnt, 0); in lpfc_cgn_save_evt_cnt()
5625 /* Get the number of alarm events - FPIN and Signal for this minute */ in lpfc_cgn_save_evt_cnt()
5627 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) || in lpfc_cgn_save_evt_cnt()
5628 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) in lpfc_cgn_save_evt_cnt()
5629 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt); in lpfc_cgn_save_evt_cnt()
5630 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); in lpfc_cgn_save_evt_cnt()
5635 ptr = &cp->cgn_drvr_min[index]; in lpfc_cgn_save_evt_cnt()
5639 ptr = &cp->cgn_warn_min[index]; in lpfc_cgn_save_evt_cnt()
5643 ptr = &cp->cgn_alarm_min[index]; in lpfc_cgn_save_evt_cnt()
5647 lptr = &cp->cgn_latency_min[index]; in lpfc_cgn_save_evt_cnt()
5656 mptr = &cp->cgn_bw_min[index]; in lpfc_cgn_save_evt_cnt()
5660 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n", in lpfc_cgn_save_evt_cnt()
5664 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) { in lpfc_cgn_save_evt_cnt()
5665 /* Record congestion buffer info - every hour in lpfc_cgn_save_evt_cnt()
5668 index = ++cp->cgn_index_hour; in lpfc_cgn_save_evt_cnt()
5669 if (cp->cgn_index_hour == LPFC_HOUR_DAY) { in lpfc_cgn_save_evt_cnt()
5670 cp->cgn_index_hour = 0; in lpfc_cgn_save_evt_cnt()
5681 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]); in lpfc_cgn_save_evt_cnt()
5682 wvalue += le16_to_cpu(cp->cgn_warn_min[i]); in lpfc_cgn_save_evt_cnt()
5683 lvalue += le32_to_cpu(cp->cgn_latency_min[i]); in lpfc_cgn_save_evt_cnt()
5684 mbps += le16_to_cpu(cp->cgn_bw_min[i]); in lpfc_cgn_save_evt_cnt()
5685 avalue += le16_to_cpu(cp->cgn_alarm_min[i]); in lpfc_cgn_save_evt_cnt()
5692 lptr = &cp->cgn_drvr_hr[index]; in lpfc_cgn_save_evt_cnt()
5694 lptr = &cp->cgn_warn_hr[index]; in lpfc_cgn_save_evt_cnt()
5696 lptr = &cp->cgn_latency_hr[index]; in lpfc_cgn_save_evt_cnt()
5698 mptr = &cp->cgn_bw_hr[index]; in lpfc_cgn_save_evt_cnt()
5700 lptr = &cp->cgn_alarm_hr[index]; in lpfc_cgn_save_evt_cnt()
5704 "2419 Congestion Info - hour " in lpfc_cgn_save_evt_cnt()
5710 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) { in lpfc_cgn_save_evt_cnt()
5711 /* Record congestion buffer info - every hour in lpfc_cgn_save_evt_cnt()
5715 index = ++cp->cgn_index_day; in lpfc_cgn_save_evt_cnt()
5716 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) { in lpfc_cgn_save_evt_cnt()
5717 cp->cgn_index_day = 0; in lpfc_cgn_save_evt_cnt()
5727 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) { in lpfc_cgn_save_evt_cnt()
5728 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken); in lpfc_cgn_save_evt_cnt()
5730 cp->cgn_info_month = broken.tm_mon + 1; in lpfc_cgn_save_evt_cnt()
5731 cp->cgn_info_day = broken.tm_mday; in lpfc_cgn_save_evt_cnt()
5732 cp->cgn_info_year = broken.tm_year - 100; in lpfc_cgn_save_evt_cnt()
5733 cp->cgn_info_hour = broken.tm_hour; in lpfc_cgn_save_evt_cnt()
5734 cp->cgn_info_minute = broken.tm_min; in lpfc_cgn_save_evt_cnt()
5735 cp->cgn_info_second = broken.tm_sec; in lpfc_cgn_save_evt_cnt()
5741 cp->cgn_info_day, cp->cgn_info_month, in lpfc_cgn_save_evt_cnt()
5742 cp->cgn_info_year, cp->cgn_info_hour, in lpfc_cgn_save_evt_cnt()
5743 cp->cgn_info_minute, cp->cgn_info_second); in lpfc_cgn_save_evt_cnt()
5753 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); in lpfc_cgn_save_evt_cnt()
5754 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); in lpfc_cgn_save_evt_cnt()
5755 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); in lpfc_cgn_save_evt_cnt()
5756 mbps += le16_to_cpu(cp->cgn_bw_hr[i]); in lpfc_cgn_save_evt_cnt()
5757 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); in lpfc_cgn_save_evt_cnt()
5764 lptr = &cp->cgn_drvr_day[index]; in lpfc_cgn_save_evt_cnt()
5766 lptr = &cp->cgn_warn_day[index]; in lpfc_cgn_save_evt_cnt()
5768 lptr = &cp->cgn_latency_day[index]; in lpfc_cgn_save_evt_cnt()
5770 mptr = &cp->cgn_bw_day[index]; in lpfc_cgn_save_evt_cnt()
5772 lptr = &cp->cgn_alarm_day[index]; in lpfc_cgn_save_evt_cnt()
5776 "2420 Congestion Info - daily (%d): " in lpfc_cgn_save_evt_cnt()
5784 if (index == (LPFC_MAX_CGN_DAYS - 1)) { in lpfc_cgn_save_evt_cnt()
5785 phba->hba_flag |= HBA_CGN_DAY_WRAP; in lpfc_cgn_save_evt_cnt()
5786 ktime_get_real_ts64(&phba->cgn_daily_ts); in lpfc_cgn_save_evt_cnt()
5791 value = phba->cgn_fpin_frequency; in lpfc_cgn_save_evt_cnt()
5792 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) in lpfc_cgn_save_evt_cnt()
5793 cp->cgn_warn_freq = cpu_to_le16(value); in lpfc_cgn_save_evt_cnt()
5794 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) in lpfc_cgn_save_evt_cnt()
5795 cp->cgn_alarm_freq = cpu_to_le16(value); in lpfc_cgn_save_evt_cnt()
5800 value = phba->cgn_sig_freq; in lpfc_cgn_save_evt_cnt()
5802 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || in lpfc_cgn_save_evt_cnt()
5803 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) in lpfc_cgn_save_evt_cnt()
5804 cp->cgn_warn_freq = cpu_to_le16(value); in lpfc_cgn_save_evt_cnt()
5805 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) in lpfc_cgn_save_evt_cnt()
5806 cp->cgn_alarm_freq = cpu_to_le16(value); in lpfc_cgn_save_evt_cnt()
5810 cp->cgn_info_crc = cpu_to_le32(lvalue); in lpfc_cgn_save_evt_cnt()
5814 * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5832 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) { in lpfc_calc_cmf_latency()
5833 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / in lpfc_calc_cmf_latency()
5836 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) { in lpfc_calc_cmf_latency()
5837 msec = (cmpl_time.tv_sec - in lpfc_calc_cmf_latency()
5838 phba->cmf_latency.tv_sec) * MSEC_PER_SEC; in lpfc_calc_cmf_latency()
5839 msec += ((cmpl_time.tv_nsec - in lpfc_calc_cmf_latency()
5840 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC); in lpfc_calc_cmf_latency()
5842 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec - in lpfc_calc_cmf_latency()
5844 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) + in lpfc_calc_cmf_latency()
5852 * lpfc_cmf_timer - This is the timer function for one congestion
5869 int cpu; in lpfc_cmf_timer() local
5872 if (phba->cmf_active_mode == LPFC_CFG_OFF || in lpfc_cmf_timer()
5873 !phba->cmf_latency.tv_sec) { in lpfc_cmf_timer()
5876 phba->cmf_active_mode, in lpfc_cmf_timer()
5877 (uint64_t)phba->cmf_latency.tv_sec); in lpfc_cmf_timer()
5884 if (!phba->pport) in lpfc_cmf_timer()
5890 atomic_set(&phba->cmf_stop_io, 1); in lpfc_cmf_timer()
5904 ktime_get_real_ts64(&phba->cmf_latency); in lpfc_cmf_timer()
5906 phba->cmf_link_byte_count = in lpfc_cmf_timer()
5907 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000); in lpfc_cmf_timer()
5914 for_each_present_cpu(cpu) { in lpfc_cmf_timer()
5915 cgs = per_cpu_ptr(phba->cmf_stat, cpu); in lpfc_cmf_timer()
5916 total += atomic64_xchg(&cgs->total_bytes, 0); in lpfc_cmf_timer()
5917 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0); in lpfc_cmf_timer()
5918 lat += atomic64_xchg(&cgs->rx_latency, 0); in lpfc_cmf_timer()
5919 rcv += atomic64_xchg(&cgs->rcv_bytes, 0); in lpfc_cmf_timer()
5927 if (phba->cmf_active_mode == LPFC_CFG_MANAGED && in lpfc_cmf_timer()
5928 phba->link_state != LPFC_LINK_DOWN && in lpfc_cmf_timer()
5929 phba->hba_flag & HBA_SETUP) { in lpfc_cmf_timer()
5930 mbpi = phba->cmf_last_sync_bw; in lpfc_cmf_timer()
5931 phba->cmf_last_sync_bw = 0; in lpfc_cmf_timer()
5937 mbpi = phba->cmf_link_byte_count; in lpfc_cmf_timer()
5939 phba->cmf_timer_cnt++; in lpfc_cmf_timer()
5943 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); in lpfc_cmf_timer()
5944 atomic64_add(lat, &phba->cgn_latency_evt); in lpfc_cmf_timer()
5946 busy = atomic_xchg(&phba->cmf_busy, 0); in lpfc_cmf_timer()
5947 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); in lpfc_cmf_timer()
5951 if (mbpi > phba->cmf_link_byte_count || in lpfc_cmf_timer()
5952 phba->cmf_active_mode == LPFC_CFG_MONITOR) in lpfc_cmf_timer()
5953 mbpi = phba->cmf_link_byte_count; in lpfc_cmf_timer()
5958 if (mbpi != phba->cmf_max_bytes_per_interval) in lpfc_cmf_timer()
5959 phba->cmf_max_bytes_per_interval = mbpi; in lpfc_cmf_timer()
5963 if (phba->rxtable) { in lpfc_cmf_timer()
5964 head = atomic_xchg(&phba->rxtable_idx_head, in lpfc_cmf_timer()
5966 entry = &phba->rxtable[head]; in lpfc_cmf_timer()
5967 entry->total_bytes = total; in lpfc_cmf_timer()
5968 entry->rcv_bytes = rcv; in lpfc_cmf_timer()
5969 entry->cmf_busy = busy; in lpfc_cmf_timer()
5970 entry->cmf_info = phba->cmf_active_info; in lpfc_cmf_timer()
5972 entry->avg_io_latency = div_u64(lat, io_cnt); in lpfc_cmf_timer()
5973 entry->avg_io_size = div_u64(rcv, io_cnt); in lpfc_cmf_timer()
5975 entry->avg_io_latency = 0; in lpfc_cmf_timer()
5976 entry->avg_io_size = 0; in lpfc_cmf_timer()
5978 entry->max_read_cnt = max_read; in lpfc_cmf_timer()
5979 entry->io_cnt = io_cnt; in lpfc_cmf_timer()
5980 entry->max_bytes_per_interval = mbpi; in lpfc_cmf_timer()
5981 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) in lpfc_cmf_timer()
5982 entry->timer_utilization = phba->cmf_last_ts; in lpfc_cmf_timer()
5984 entry->timer_utilization = ms; in lpfc_cmf_timer()
5985 entry->timer_interval = ms; in lpfc_cmf_timer()
5986 phba->cmf_last_ts = 0; in lpfc_cmf_timer()
5990 tail = atomic_read(&phba->rxtable_idx_tail); in lpfc_cmf_timer()
5993 atomic_set(&phba->rxtable_idx_tail, tail); in lpfc_cmf_timer()
5995 atomic_set(&phba->rxtable_idx_head, head); in lpfc_cmf_timer()
5998 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { in lpfc_cmf_timer()
6003 atomic_inc(&phba->cgn_driver_evt_cnt); in lpfc_cmf_timer()
6005 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ in lpfc_cmf_timer()
6015 phba->cgn_evt_timestamp)) { in lpfc_cmf_timer()
6016 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp - in lpfc_cmf_timer()
6024 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * in lpfc_cmf_timer()
6026 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) in lpfc_cmf_timer()
6027 phba->cmf_max_bytes_per_interval = in lpfc_cmf_timer()
6028 phba->cmf_link_byte_count; in lpfc_cmf_timer()
6034 if (atomic_xchg(&phba->cmf_bw_wait, 0)) in lpfc_cmf_timer()
6035 queue_work(phba->wq, &phba->unblock_request_work); in lpfc_cmf_timer()
6038 atomic_set(&phba->cmf_stop_io, 0); in lpfc_cmf_timer()
6048 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6062 phba->sli4_hba.link_state.speed = in lpfc_update_trunk_link_status()
6066 phba->sli4_hba.link_state.logical_speed = in lpfc_update_trunk_link_status()
6069 phba->fc_linkspeed = in lpfc_update_trunk_link_status()
6075 phba->trunk_link.link0.state = in lpfc_update_trunk_link_status()
6078 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; in lpfc_update_trunk_link_status()
6081 phba->trunk_link.link1.state = in lpfc_update_trunk_link_status()
6084 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; in lpfc_update_trunk_link_status()
6087 phba->trunk_link.link2.state = in lpfc_update_trunk_link_status()
6090 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; in lpfc_update_trunk_link_status()
6093 phba->trunk_link.link3.state = in lpfc_update_trunk_link_status()
6096 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; in lpfc_update_trunk_link_status()
6100 "2910 Async FC Trunking Event - Speed:%d\n" in lpfc_update_trunk_link_status()
6103 phba->sli4_hba.link_state.speed, in lpfc_update_trunk_link_status()
6104 phba->sli4_hba.link_state.logical_speed, in lpfc_update_trunk_link_status()
6108 if (phba->cmf_active_mode != LPFC_CFG_OFF) in lpfc_update_trunk_link_status()
6115 * SLI-4: We have only 0xA error codes in lpfc_update_trunk_link_status()
6127 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6159 phba->sli4_hba.link_state.speed = in lpfc_sli4_async_fc_evt()
6162 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; in lpfc_sli4_async_fc_evt()
6163 phba->sli4_hba.link_state.topology = in lpfc_sli4_async_fc_evt()
6165 phba->sli4_hba.link_state.status = in lpfc_sli4_async_fc_evt()
6167 phba->sli4_hba.link_state.type = in lpfc_sli4_async_fc_evt()
6169 phba->sli4_hba.link_state.number = in lpfc_sli4_async_fc_evt()
6171 phba->sli4_hba.link_state.fault = in lpfc_sli4_async_fc_evt()
6176 phba->sli4_hba.link_state.logical_speed = 0; in lpfc_sli4_async_fc_evt()
6177 else if (!phba->sli4_hba.conf_trunk) in lpfc_sli4_async_fc_evt()
6178 phba->sli4_hba.link_state.logical_speed = in lpfc_sli4_async_fc_evt()
6182 "2896 Async FC event - Speed:%dGBaud Topology:x%x " in lpfc_sli4_async_fc_evt()
6185 phba->sli4_hba.link_state.speed, in lpfc_sli4_async_fc_evt()
6186 phba->sli4_hba.link_state.topology, in lpfc_sli4_async_fc_evt()
6187 phba->sli4_hba.link_state.status, in lpfc_sli4_async_fc_evt()
6188 phba->sli4_hba.link_state.type, in lpfc_sli4_async_fc_evt()
6189 phba->sli4_hba.link_state.number, in lpfc_sli4_async_fc_evt()
6190 phba->sli4_hba.link_state.logical_speed, in lpfc_sli4_async_fc_evt()
6191 phba->sli4_hba.link_state.fault); in lpfc_sli4_async_fc_evt()
6192 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_sli4_async_fc_evt()
6204 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); in lpfc_sli4_async_fc_evt()
6205 if (!mp->virt) { in lpfc_sli4_async_fc_evt()
6215 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; in lpfc_sli4_async_fc_evt()
6218 phba->sli.slistat.link_event++; in lpfc_sli4_async_fc_evt()
6222 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; in lpfc_sli4_async_fc_evt()
6223 pmb->vport = phba->pport; in lpfc_sli4_async_fc_evt()
6225 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { in lpfc_sli4_async_fc_evt()
6226 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); in lpfc_sli4_async_fc_evt()
6228 switch (phba->sli4_hba.link_state.status) { in lpfc_sli4_async_fc_evt()
6230 phba->link_flag |= LS_MDS_LINK_DOWN; in lpfc_sli4_async_fc_evt()
6233 phba->link_flag |= LS_MDS_LOOPBACK; in lpfc_sli4_async_fc_evt()
6240 mb = &pmb->u.mb; in lpfc_sli4_async_fc_evt()
6241 mb->mbxStatus = MBX_SUCCESS; in lpfc_sli4_async_fc_evt()
6247 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; in lpfc_sli4_async_fc_evt()
6248 la->eventTag = acqe_fc->event_tag; in lpfc_sli4_async_fc_evt()
6250 if (phba->sli4_hba.link_state.status == in lpfc_sli4_async_fc_evt()
6272 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_sli4_async_fc_evt()
6276 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6300 "2901 Async SLI event - Type:%d, Event Data: x%08x " in lpfc_sli4_async_sli_evt()
6302 acqe_sli->event_data1, acqe_sli->event_data2, in lpfc_sli4_async_sli_evt()
6303 acqe_sli->reserved, acqe_sli->trailer); in lpfc_sli4_async_sli_evt()
6305 port_name = phba->Port[0]; in lpfc_sli4_async_sli_evt()
6313 temp_event_data.data = (uint32_t)acqe_sli->event_data1; in lpfc_sli4_async_sli_evt()
6316 "3190 Over Temperature:%d Celsius- Port Name %c\n", in lpfc_sli4_async_sli_evt()
6317 acqe_sli->event_data1, port_name); in lpfc_sli4_async_sli_evt()
6319 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; in lpfc_sli4_async_sli_evt()
6320 shost = lpfc_shost_from_vport(phba->pport); in lpfc_sli4_async_sli_evt()
6330 temp_event_data.data = (uint32_t)acqe_sli->event_data1; in lpfc_sli4_async_sli_evt()
6333 "3191 Normal Temperature:%d Celsius - Port Name %c\n", in lpfc_sli4_async_sli_evt()
6334 acqe_sli->event_data1, port_name); in lpfc_sli4_async_sli_evt()
6336 shost = lpfc_shost_from_vport(phba->pport); in lpfc_sli4_async_sli_evt()
6345 &acqe_sli->event_data1; in lpfc_sli4_async_sli_evt()
6348 switch (phba->sli4_hba.lnk_info.lnk_no) { in lpfc_sli4_async_sli_evt()
6351 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6353 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6357 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6359 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6363 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6365 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6369 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6371 &misconfigured->theEvent); in lpfc_sli4_async_sli_evt()
6378 phba->sli4_hba.lnk_info.lnk_no); in lpfc_sli4_async_sli_evt()
6383 if (phba->sli4_hba.lnk_info.optic_state == status) in lpfc_sli4_async_sli_evt()
6392 "installed/not installed - Reseat optics, " in lpfc_sli4_async_sli_evt()
6397 "Optics of two types installed - Remove one " in lpfc_sli4_async_sli_evt()
6401 sprintf(message, "Incompatible optics - Replace with " in lpfc_sli4_async_sli_evt()
6405 sprintf(message, "Unqualified optics - Replace with " in lpfc_sli4_async_sli_evt()
6407 "Support - Link is%s operational", in lpfc_sli4_async_sli_evt()
6411 sprintf(message, "Uncertified optics - Replace with " in lpfc_sli4_async_sli_evt()
6412 "Avago-certified optics to enable link " in lpfc_sli4_async_sli_evt()
6413 "operation - Link is%s operational", in lpfc_sli4_async_sli_evt()
6425 phba->lmt = 0; in lpfc_sli4_async_sli_evt()
6433 for (i = 0; i <= phba->max_vports && vports[i] != NULL; in lpfc_sli4_async_sli_evt()
6441 phba->sli4_hba.lnk_info.optic_state = status; in lpfc_sli4_async_sli_evt()
6447 "3192 Remote DPort Test Initiated - " in lpfc_sli4_async_sli_evt()
6449 acqe_sli->event_data1, acqe_sli->event_data2); in lpfc_sli4_async_sli_evt()
6457 * to use FA-WWN, but the attached device doesn’t support it. in lpfc_sli4_async_sli_evt()
6459 * Event Data1 - N.A, Event Data2 - N.A in lpfc_sli4_async_sli_evt()
6462 "2699 Misconfigured FA-WWN - Attached device does " in lpfc_sli4_async_sli_evt()
6463 "not support FA-WWN\n"); in lpfc_sli4_async_sli_evt()
6468 "2518 EEPROM failure - " in lpfc_sli4_async_sli_evt()
6470 acqe_sli->event_data1, acqe_sli->event_data2); in lpfc_sli4_async_sli_evt()
6473 if (phba->cmf_active_mode == LPFC_CFG_OFF) in lpfc_sli4_async_sli_evt()
6476 &acqe_sli->event_data1; in lpfc_sli4_async_sli_evt()
6477 phba->cgn_acqe_cnt++; in lpfc_sli4_async_sli_evt()
6480 atomic64_add(cnt, &phba->cgn_acqe_stat.warn); in lpfc_sli4_async_sli_evt()
6481 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm); in lpfc_sli4_async_sli_evt()
6486 if (cgn_signal->alarm_cnt) { in lpfc_sli4_async_sli_evt()
6487 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { in lpfc_sli4_async_sli_evt()
6489 atomic_add(cgn_signal->alarm_cnt, in lpfc_sli4_async_sli_evt()
6490 &phba->cgn_fabric_alarm_cnt); in lpfc_sli4_async_sli_evt()
6492 atomic_add(cgn_signal->alarm_cnt, in lpfc_sli4_async_sli_evt()
6493 &phba->cgn_sync_alarm_cnt); in lpfc_sli4_async_sli_evt()
6497 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || in lpfc_sli4_async_sli_evt()
6498 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { in lpfc_sli4_async_sli_evt()
6500 atomic_add(cnt, &phba->cgn_fabric_warn_cnt); in lpfc_sli4_async_sli_evt()
6502 atomic_add(cnt, &phba->cgn_sync_warn_cnt); in lpfc_sli4_async_sli_evt()
6515 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6533 phba = vport->phba; in lpfc_sli4_perform_vport_cvl()
6543 ndlp->nlp_type |= NLP_FABRIC; in lpfc_sli4_perform_vport_cvl()
6547 if ((phba->pport->port_state < LPFC_FLOGI) && in lpfc_sli4_perform_vport_cvl()
6548 (phba->pport->port_state != LPFC_VPORT_FAILED)) in lpfc_sli4_perform_vport_cvl()
6551 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) in lpfc_sli4_perform_vport_cvl()
6552 && (vport->port_state != LPFC_VPORT_FAILED)) in lpfc_sli4_perform_vport_cvl()
6559 spin_lock_irq(shost->host_lock); in lpfc_sli4_perform_vport_cvl()
6560 vport->fc_flag |= FC_VPORT_CVL_RCVD; in lpfc_sli4_perform_vport_cvl()
6561 spin_unlock_irq(shost->host_lock); in lpfc_sli4_perform_vport_cvl()
6567 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6581 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) in lpfc_sli4_perform_all_vport_cvl()
6587 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6605 phba->fc_eventTag = acqe_fip->event_tag; in lpfc_sli4_async_fip_evt()
6606 phba->fcoe_eventtag = acqe_fip->event_tag; in lpfc_sli4_async_fip_evt()
6614 acqe_fip->event_tag, in lpfc_sli4_async_fip_evt()
6615 acqe_fip->index); in lpfc_sli4_async_fip_evt()
6621 acqe_fip->event_tag, in lpfc_sli4_async_fip_evt()
6622 acqe_fip->index); in lpfc_sli4_async_fip_evt()
6623 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { in lpfc_sli4_async_fip_evt()
6633 acqe_fip->index); in lpfc_sli4_async_fip_evt()
6634 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); in lpfc_sli4_async_fip_evt()
6638 spin_lock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6639 if (phba->hba_flag & FCF_TS_INPROG) { in lpfc_sli4_async_fip_evt()
6640 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6644 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { in lpfc_sli4_async_fip_evt()
6645 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6650 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { in lpfc_sli4_async_fip_evt()
6651 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6654 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6656 /* Otherwise, scan the entire FCF table and re-discover SAN */ in lpfc_sli4_async_fip_evt()
6660 acqe_fip->event_tag, acqe_fip->index); in lpfc_sli4_async_fip_evt()
6673 acqe_fip->event_tag); in lpfc_sli4_async_fip_evt()
6677 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; in lpfc_sli4_async_fip_evt()
6680 "tag:x%x\n", acqe_fip->index, in lpfc_sli4_async_fip_evt()
6681 acqe_fip->event_tag); in lpfc_sli4_async_fip_evt()
6686 spin_lock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6687 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && in lpfc_sli4_async_fip_evt()
6688 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { in lpfc_sli4_async_fip_evt()
6689 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6691 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); in lpfc_sli4_async_fip_evt()
6694 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6697 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) in lpfc_sli4_async_fip_evt()
6706 spin_lock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6708 phba->fcf.fcf_flag |= FCF_DEAD_DISC; in lpfc_sli4_async_fip_evt()
6709 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6714 "\n", acqe_fip->event_tag, acqe_fip->index); in lpfc_sli4_async_fip_evt()
6722 spin_lock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6723 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; in lpfc_sli4_async_fip_evt()
6724 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6741 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; in lpfc_sli4_async_fip_evt()
6745 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); in lpfc_sli4_async_fip_evt()
6748 acqe_fip->index); in lpfc_sli4_async_fip_evt()
6756 for (i = 0; i <= phba->max_vports && vports[i] != NULL; in lpfc_sli4_async_fip_evt()
6758 if ((!(vports[i]->fc_flag & in lpfc_sli4_async_fip_evt()
6760 (vports[i]->port_state > LPFC_FDISC)) { in lpfc_sli4_async_fip_evt()
6769 * Don't re-instantiate if vport is marked for deletion. in lpfc_sli4_async_fip_evt()
6773 if (!(vport->load_flag & FC_UNLOADING) && in lpfc_sli4_async_fip_evt()
6777 * re-instantiate the Vlink using FDISC. in lpfc_sli4_async_fip_evt()
6779 mod_timer(&ndlp->nlp_delayfunc, in lpfc_sli4_async_fip_evt()
6781 spin_lock_irq(&ndlp->lock); in lpfc_sli4_async_fip_evt()
6782 ndlp->nlp_flag |= NLP_DELAY_TMO; in lpfc_sli4_async_fip_evt()
6783 spin_unlock_irq(&ndlp->lock); in lpfc_sli4_async_fip_evt()
6784 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; in lpfc_sli4_async_fip_evt()
6785 vport->port_state = LPFC_FDISC; in lpfc_sli4_async_fip_evt()
6794 spin_lock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6795 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { in lpfc_sli4_async_fip_evt()
6796 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6800 phba->fcf.fcf_flag |= FCF_ACVL_DISC; in lpfc_sli4_async_fip_evt()
6801 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6805 "evt_tag:x%x\n", acqe_fip->event_tag); in lpfc_sli4_async_fip_evt()
6813 spin_lock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6814 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; in lpfc_sli4_async_fip_evt()
6815 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_async_fip_evt()
6817 * Last resort will be re-try on the in lpfc_sli4_async_fip_evt()
6832 "0x%x\n", event_type, acqe_fip->event_tag); in lpfc_sli4_async_fip_evt()
6838 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6848 phba->fc_eventTag = acqe_dcbx->event_tag; in lpfc_sli4_async_dcbx_evt()
6855 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6869 phba->fc_eventTag = acqe_grp5->event_tag; in lpfc_sli4_async_grp5_evt()
6870 phba->fcoe_eventtag = acqe_grp5->event_tag; in lpfc_sli4_async_grp5_evt()
6871 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; in lpfc_sli4_async_grp5_evt()
6872 phba->sli4_hba.link_state.logical_speed = in lpfc_sli4_async_grp5_evt()
6877 phba->sli4_hba.link_state.logical_speed); in lpfc_sli4_async_grp5_evt()
6881 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
6890 if (!phba->cgn_i) in lpfc_sli4_async_cmstat_evt()
6896 * lpfc_cgn_params_val - Validate FW congestion parameters.
6906 spin_lock_irq(&phba->hbalock); in lpfc_cgn_params_val()
6908 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF, in lpfc_cgn_params_val()
6912 p_cfg_param->cgn_param_mode); in lpfc_cgn_params_val()
6913 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF; in lpfc_cgn_params_val()
6916 spin_unlock_irq(&phba->hbalock); in lpfc_cgn_params_val()
6920 * lpfc_cgn_params_parse - Process a FW cong parm change event
6927 * valid, in-range values. If the signature magic is correct and
6942 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) { in lpfc_cgn_params_parse()
6950 p_cgn_param->cgn_param_magic, in lpfc_cgn_params_parse()
6951 p_cgn_param->cgn_param_version, in lpfc_cgn_params_parse()
6952 p_cgn_param->cgn_param_mode, in lpfc_cgn_params_parse()
6953 p_cgn_param->cgn_param_level0, in lpfc_cgn_params_parse()
6954 p_cgn_param->cgn_param_level1, in lpfc_cgn_params_parse()
6955 p_cgn_param->cgn_param_level2, in lpfc_cgn_params_parse()
6956 p_cgn_param->byte13, in lpfc_cgn_params_parse()
6957 p_cgn_param->byte14, in lpfc_cgn_params_parse()
6958 p_cgn_param->byte15, in lpfc_cgn_params_parse()
6959 p_cgn_param->byte11, in lpfc_cgn_params_parse()
6960 p_cgn_param->byte12, in lpfc_cgn_params_parse()
6961 phba->cmf_active_mode); in lpfc_cgn_params_parse()
6963 oldmode = phba->cmf_active_mode; in lpfc_cgn_params_parse()
6971 spin_lock_irq(&phba->hbalock); in lpfc_cgn_params_parse()
6972 memcpy(&phba->cgn_p, p_cgn_param, in lpfc_cgn_params_parse()
6976 if (phba->cgn_i) { in lpfc_cgn_params_parse()
6977 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; in lpfc_cgn_params_parse()
6978 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; in lpfc_cgn_params_parse()
6979 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; in lpfc_cgn_params_parse()
6980 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; in lpfc_cgn_params_parse()
6981 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; in lpfc_cgn_params_parse()
6984 cp->cgn_info_crc = cpu_to_le32(crc); in lpfc_cgn_params_parse()
6986 spin_unlock_irq(&phba->hbalock); in lpfc_cgn_params_parse()
6988 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode; in lpfc_cgn_params_parse()
6992 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) { in lpfc_cgn_params_parse()
6996 if (phba->link_state >= LPFC_LINK_UP) { in lpfc_cgn_params_parse()
6997 phba->cgn_reg_fpin = in lpfc_cgn_params_parse()
6998 phba->cgn_init_reg_fpin; in lpfc_cgn_params_parse()
6999 phba->cgn_reg_signal = in lpfc_cgn_params_parse()
7000 phba->cgn_init_reg_signal; in lpfc_cgn_params_parse()
7001 lpfc_issue_els_edc(phba->pport, 0); in lpfc_cgn_params_parse()
7006 switch (phba->cgn_p.cgn_param_mode) { in lpfc_cgn_params_parse()
7010 if (phba->link_state >= LPFC_LINK_UP) in lpfc_cgn_params_parse()
7011 lpfc_issue_els_edc(phba->pport, 0); in lpfc_cgn_params_parse()
7017 phba->cmf_max_bytes_per_interval = in lpfc_cgn_params_parse()
7018 phba->cmf_link_byte_count; in lpfc_cgn_params_parse()
7020 /* Resume blocked IO - unblock on workqueue */ in lpfc_cgn_params_parse()
7021 queue_work(phba->wq, in lpfc_cgn_params_parse()
7022 &phba->unblock_request_work); in lpfc_cgn_params_parse()
7027 switch (phba->cgn_p.cgn_param_mode) { in lpfc_cgn_params_parse()
7031 if (phba->link_state >= LPFC_LINK_UP) in lpfc_cgn_params_parse()
7032 lpfc_issue_els_edc(phba->pport, 0); in lpfc_cgn_params_parse()
7046 "version %d\n", p_cgn_param->cgn_param_magic, in lpfc_cgn_params_parse()
7047 p_cgn_param->cgn_param_version); in lpfc_cgn_params_parse()
7052 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7061 * -Eval if an error was encountered
7106 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7128 if (!phba->sli4_hba.pc_sli4_params.cmf) { in lpfc_sli4_cgn_parm_chg_evt()
7131 return -EACCES; in lpfc_sli4_cgn_parm_chg_evt()
7151 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7163 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_sli4_async_event_proc()
7164 phba->hba_flag &= ~ASYNC_EVENT; in lpfc_sli4_async_event_proc()
7165 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_sli4_async_event_proc()
7168 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); in lpfc_sli4_async_event_proc()
7169 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { in lpfc_sli4_async_event_proc()
7170 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, in lpfc_sli4_async_event_proc()
7172 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, in lpfc_sli4_async_event_proc()
7176 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { in lpfc_sli4_async_event_proc()
7179 &cq_event->cqe.acqe_link); in lpfc_sli4_async_event_proc()
7182 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); in lpfc_sli4_async_event_proc()
7186 &cq_event->cqe.acqe_dcbx); in lpfc_sli4_async_event_proc()
7190 &cq_event->cqe.acqe_grp5); in lpfc_sli4_async_event_proc()
7193 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); in lpfc_sli4_async_event_proc()
7196 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); in lpfc_sli4_async_event_proc()
7206 &cq_event->cqe.mcqe_cmpl)); in lpfc_sli4_async_event_proc()
7212 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); in lpfc_sli4_async_event_proc()
7214 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); in lpfc_sli4_async_event_proc()
7218 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7228 spin_lock_irq(&phba->hbalock); in lpfc_sli4_fcf_redisc_event_proc()
7230 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; in lpfc_sli4_fcf_redisc_event_proc()
7232 phba->fcf.failover_rec.flag = 0; in lpfc_sli4_fcf_redisc_event_proc()
7234 phba->fcf.fcf_flag |= FCF_REDISC_FOV; in lpfc_sli4_fcf_redisc_event_proc()
7235 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_fcf_redisc_event_proc()
7237 /* Scan FCF table from the first entry to re-discover SAN */ in lpfc_sli4_fcf_redisc_event_proc()
7239 "2777 Start post-quiescent FCF table scan\n"); in lpfc_sli4_fcf_redisc_event_proc()
7248 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7250 * @dev_grp: The HBA PCI-Device group number.
7252 * This routine is invoked to set up the per HBA PCI-Device group function
7255 * Return: 0 if success, otherwise -ENODEV
7262 /* Set up lpfc PCI-device group */ in lpfc_api_table_setup()
7263 phba->pci_dev_grp = dev_grp; in lpfc_api_table_setup()
7267 phba->sli_rev = LPFC_SLI_REV4; in lpfc_api_table_setup()
7272 return -ENODEV; in lpfc_api_table_setup()
7276 return -ENODEV; in lpfc_api_table_setup()
7280 return -ENODEV; in lpfc_api_table_setup()
7284 return -ENODEV; in lpfc_api_table_setup()
7290 * lpfc_log_intr_mode - Log the active interrupt mode
7310 "0480 Enabled MSI-X interrupt mode.\n"); in lpfc_log_intr_mode()
7321 * lpfc_enable_pci_dev - Enable a generic PCI device.
7328 * 0 - successful
7329 * other values - error
7337 if (!phba->pcidev) in lpfc_enable_pci_dev()
7340 pdev = phba->pcidev; in lpfc_enable_pci_dev()
7354 pdev->needs_freset = 1; in lpfc_enable_pci_dev()
7363 return -ENODEV; in lpfc_enable_pci_dev()
7367 * lpfc_disable_pci_dev - Disable a generic PCI device.
7379 if (!phba->pcidev) in lpfc_disable_pci_dev()
7382 pdev = phba->pcidev; in lpfc_disable_pci_dev()
7391 * lpfc_reset_hba - Reset a hba
7403 if (!phba->cfg_enable_hba_reset) { in lpfc_reset_hba()
7404 phba->link_state = LPFC_HBA_ERROR; in lpfc_reset_hba()
7409 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) { in lpfc_reset_hba()
7422 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7425 * This function enables the PCI SR-IOV virtual functions to a physical
7426 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7428 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7434 struct pci_dev *pdev = phba->pcidev; in lpfc_sli_sriov_nr_virtfn_get()
7447 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7451 * This function enables the PCI SR-IOV virtual functions to a physical
7452 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7454 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7460 struct pci_dev *pdev = phba->pcidev; in lpfc_sli_probe_sriov_nr_virtfn()
7469 return -EINVAL; in lpfc_sli_probe_sriov_nr_virtfn()
7495 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7502 * 0 - successful
7503 * other values - error
7508 struct lpfc_sli *psli = &phba->sli; in lpfc_setup_driver_resource_phase1()
7513 atomic_set(&phba->fast_event_count, 0); in lpfc_setup_driver_resource_phase1()
7514 atomic_set(&phba->dbg_log_idx, 0); in lpfc_setup_driver_resource_phase1()
7515 atomic_set(&phba->dbg_log_cnt, 0); in lpfc_setup_driver_resource_phase1()
7516 atomic_set(&phba->dbg_log_dmping, 0); in lpfc_setup_driver_resource_phase1()
7517 spin_lock_init(&phba->hbalock); in lpfc_setup_driver_resource_phase1()
7520 spin_lock_init(&phba->port_list_lock); in lpfc_setup_driver_resource_phase1()
7521 INIT_LIST_HEAD(&phba->port_list); in lpfc_setup_driver_resource_phase1()
7523 INIT_LIST_HEAD(&phba->work_list); in lpfc_setup_driver_resource_phase1()
7524 init_waitqueue_head(&phba->wait_4_mlo_m_q); in lpfc_setup_driver_resource_phase1()
7527 init_waitqueue_head(&phba->work_waitq); in lpfc_setup_driver_resource_phase1()
7531 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? in lpfc_setup_driver_resource_phase1()
7533 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? in lpfc_setup_driver_resource_phase1()
7535 (phba->nvmet_support ? "NVMET" : " ")); in lpfc_setup_driver_resource_phase1()
7538 spin_lock_init(&phba->scsi_buf_list_get_lock); in lpfc_setup_driver_resource_phase1()
7539 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); in lpfc_setup_driver_resource_phase1()
7540 spin_lock_init(&phba->scsi_buf_list_put_lock); in lpfc_setup_driver_resource_phase1()
7541 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); in lpfc_setup_driver_resource_phase1()
7544 INIT_LIST_HEAD(&phba->fabric_iocb_list); in lpfc_setup_driver_resource_phase1()
7547 INIT_LIST_HEAD(&phba->elsbuf); in lpfc_setup_driver_resource_phase1()
7550 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); in lpfc_setup_driver_resource_phase1()
7553 spin_lock_init(&phba->devicelock); in lpfc_setup_driver_resource_phase1()
7554 INIT_LIST_HEAD(&phba->luns); in lpfc_setup_driver_resource_phase1()
7557 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); in lpfc_setup_driver_resource_phase1()
7559 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); in lpfc_setup_driver_resource_phase1()
7561 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); in lpfc_setup_driver_resource_phase1()
7563 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); in lpfc_setup_driver_resource_phase1()
7565 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); in lpfc_setup_driver_resource_phase1()
7567 INIT_DELAYED_WORK(&phba->idle_stat_delay_work, in lpfc_setup_driver_resource_phase1()
7569 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work); in lpfc_setup_driver_resource_phase1()
7574 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7578 * support the SLI-3 HBA device it attached to.
7581 * 0 - successful
7582 * other values - error
7594 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); in lpfc_sli_driver_resource_setup()
7597 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); in lpfc_sli_driver_resource_setup()
7598 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); in lpfc_sli_driver_resource_setup()
7602 /* Set up phase-1 common device driver resources */ in lpfc_sli_driver_resource_setup()
7606 return -ENODEV; in lpfc_sli_driver_resource_setup()
7608 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { in lpfc_sli_driver_resource_setup()
7609 phba->menlo_flag |= HBA_MENLO_SUPPORT; in lpfc_sli_driver_resource_setup()
7611 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) in lpfc_sli_driver_resource_setup()
7612 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; in lpfc_sli_driver_resource_setup()
7615 if (!phba->sli.sli3_ring) in lpfc_sli_driver_resource_setup()
7616 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, in lpfc_sli_driver_resource_setup()
7619 if (!phba->sli.sli3_ring) in lpfc_sli_driver_resource_setup()
7620 return -ENOMEM; in lpfc_sli_driver_resource_setup()
7627 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_sli_driver_resource_setup()
7633 if (phba->cfg_enable_bg) { in lpfc_sli_driver_resource_setup()
7635 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, in lpfc_sli_driver_resource_setup()
7643 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + in lpfc_sli_driver_resource_setup()
7647 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) in lpfc_sli_driver_resource_setup()
7648 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; in lpfc_sli_driver_resource_setup()
7651 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; in lpfc_sli_driver_resource_setup()
7658 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + in lpfc_sli_driver_resource_setup()
7660 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); in lpfc_sli_driver_resource_setup()
7663 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; in lpfc_sli_driver_resource_setup()
7668 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, in lpfc_sli_driver_resource_setup()
7669 phba->cfg_total_seg_cnt); in lpfc_sli_driver_resource_setup()
7671 phba->max_vpi = LPFC_MAX_VPI; in lpfc_sli_driver_resource_setup()
7673 phba->max_vports = 0; in lpfc_sli_driver_resource_setup()
7683 return -ENOMEM; in lpfc_sli_driver_resource_setup()
7685 phba->lpfc_sg_dma_buf_pool = in lpfc_sli_driver_resource_setup()
7687 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, in lpfc_sli_driver_resource_setup()
7690 if (!phba->lpfc_sg_dma_buf_pool) in lpfc_sli_driver_resource_setup()
7693 phba->lpfc_cmd_rsp_buf_pool = in lpfc_sli_driver_resource_setup()
7695 &phba->pcidev->dev, in lpfc_sli_driver_resource_setup()
7700 if (!phba->lpfc_cmd_rsp_buf_pool) in lpfc_sli_driver_resource_setup()
7704 * Enable sr-iov virtual functions if supported and configured in lpfc_sli_driver_resource_setup()
7707 if (phba->cfg_sriov_nr_virtfn > 0) { in lpfc_sli_driver_resource_setup()
7709 phba->cfg_sriov_nr_virtfn); in lpfc_sli_driver_resource_setup()
7712 "2808 Requested number of SR-IOV " in lpfc_sli_driver_resource_setup()
7715 phba->cfg_sriov_nr_virtfn); in lpfc_sli_driver_resource_setup()
7716 phba->cfg_sriov_nr_virtfn = 0; in lpfc_sli_driver_resource_setup()
7723 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); in lpfc_sli_driver_resource_setup()
7724 phba->lpfc_sg_dma_buf_pool = NULL; in lpfc_sli_driver_resource_setup()
7727 return -ENOMEM; in lpfc_sli_driver_resource_setup()
7731 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7735 * specific for supporting the SLI-3 HBA device it attached to.
7747 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7751 * support the SLI-4 HBA device it attached to.
7754 * 0 - successful
7755 * other values - error
7769 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; in lpfc_sli4_driver_resource_setup()
7770 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; in lpfc_sli4_driver_resource_setup()
7771 phba->sli4_hba.curr_disp_cpu = 0; in lpfc_sli4_driver_resource_setup()
7776 /* Set up phase-1 common device driver resources */ in lpfc_sli4_driver_resource_setup()
7779 return -ENODEV; in lpfc_sli4_driver_resource_setup()
7784 return -ENODEV; in lpfc_sli4_driver_resource_setup()
7789 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); in lpfc_sli4_driver_resource_setup()
7795 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); in lpfc_sli4_driver_resource_setup()
7798 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); in lpfc_sli4_driver_resource_setup()
7801 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in lpfc_sli4_driver_resource_setup()
7802 phba->cmf_timer.function = lpfc_cmf_timer; in lpfc_sli4_driver_resource_setup()
7805 * Control structure for handling external multi-buffer mailbox in lpfc_sli4_driver_resource_setup()
7806 * command pass-through. in lpfc_sli4_driver_resource_setup()
7808 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, in lpfc_sli4_driver_resource_setup()
7810 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); in lpfc_sli4_driver_resource_setup()
7812 phba->max_vpi = LPFC_MAX_VPI; in lpfc_sli4_driver_resource_setup()
7815 phba->max_vports = 0; in lpfc_sli4_driver_resource_setup()
7818 phba->valid_vlan = 0; in lpfc_sli4_driver_resource_setup()
7819 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; in lpfc_sli4_driver_resource_setup()
7820 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; in lpfc_sli4_driver_resource_setup()
7821 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; in lpfc_sli4_driver_resource_setup()
7830 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); in lpfc_sli4_driver_resource_setup()
7831 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; in lpfc_sli4_driver_resource_setup()
7832 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; in lpfc_sli4_driver_resource_setup()
7836 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0); in lpfc_sli4_driver_resource_setup()
7842 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); in lpfc_sli4_driver_resource_setup()
7843 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); in lpfc_sli4_driver_resource_setup()
7845 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_driver_resource_setup()
7847 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_sli4_driver_resource_setup()
7848 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); in lpfc_sli4_driver_resource_setup()
7849 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); in lpfc_sli4_driver_resource_setup()
7850 spin_lock_init(&phba->sli4_hba.t_active_list_lock); in lpfc_sli4_driver_resource_setup()
7851 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); in lpfc_sli4_driver_resource_setup()
7855 spin_lock_init(&phba->sli4_hba.sgl_list_lock); in lpfc_sli4_driver_resource_setup()
7856 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); in lpfc_sli4_driver_resource_setup()
7857 spin_lock_init(&phba->sli4_hba.asynce_list_lock); in lpfc_sli4_driver_resource_setup()
7858 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); in lpfc_sli4_driver_resource_setup()
7861 * Initialize driver internal slow-path work queues in lpfc_sli4_driver_resource_setup()
7864 /* Driver internel slow-path CQ Event pool */ in lpfc_sli4_driver_resource_setup()
7865 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); in lpfc_sli4_driver_resource_setup()
7867 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); in lpfc_sli4_driver_resource_setup()
7869 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); in lpfc_sli4_driver_resource_setup()
7870 /* Slow-path XRI aborted CQ Event work queue list */ in lpfc_sli4_driver_resource_setup()
7871 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); in lpfc_sli4_driver_resource_setup()
7873 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); in lpfc_sli4_driver_resource_setup()
7876 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); in lpfc_sli4_driver_resource_setup()
7877 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); in lpfc_sli4_driver_resource_setup()
7878 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); in lpfc_sli4_driver_resource_setup()
7879 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); in lpfc_sli4_driver_resource_setup()
7884 INIT_LIST_HEAD(&phba->sli.mboxq); in lpfc_sli4_driver_resource_setup()
7885 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); in lpfc_sli4_driver_resource_setup()
7888 phba->sli4_hba.lnk_info.optic_state = 0xff; in lpfc_sli4_driver_resource_setup()
7893 return -ENOMEM; in lpfc_sli4_driver_resource_setup()
7896 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= in lpfc_sli4_driver_resource_setup()
7900 rc = -ENODEV; in lpfc_sli4_driver_resource_setup()
7903 phba->temp_sensor_support = 1; in lpfc_sli4_driver_resource_setup()
7925 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == in lpfc_sli4_driver_resource_setup()
7932 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, in lpfc_sli4_driver_resource_setup()
7935 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
7940 phba->nvmet_support = 0; in lpfc_sli4_driver_resource_setup()
7951 bf_get(lpfc_mqe_command, &mboxq->u.mqe), in lpfc_sli4_driver_resource_setup()
7952 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); in lpfc_sli4_driver_resource_setup()
7953 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_driver_resource_setup()
7954 rc = -EIO; in lpfc_sli4_driver_resource_setup()
7957 mb = &mboxq->u.mb; in lpfc_sli4_driver_resource_setup()
7958 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, in lpfc_sli4_driver_resource_setup()
7961 phba->sli4_hba.wwnn.u.name = wwn; in lpfc_sli4_driver_resource_setup()
7962 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, in lpfc_sli4_driver_resource_setup()
7966 phba->sli4_hba.wwpn.u.name = wwn; in lpfc_sli4_driver_resource_setup()
7975 phba->nvmet_support = 1; /* a match */ in lpfc_sli4_driver_resource_setup()
7989 phba->cfg_xri_rebalancing = 0; in lpfc_sli4_driver_resource_setup()
7990 if (phba->irq_chann_mode == NHT_MODE) { in lpfc_sli4_driver_resource_setup()
7991 phba->cfg_irq_chann = in lpfc_sli4_driver_resource_setup()
7992 phba->sli4_hba.num_present_cpu; in lpfc_sli4_driver_resource_setup()
7993 phba->cfg_hdw_queue = in lpfc_sli4_driver_resource_setup()
7994 phba->sli4_hba.num_present_cpu; in lpfc_sli4_driver_resource_setup()
7995 phba->irq_chann_mode = NORMAL_MODE; in lpfc_sli4_driver_resource_setup()
8012 &phba->sli4_hba.sli_intf); in lpfc_sli4_driver_resource_setup()
8014 &phba->sli4_hba.sli_intf); in lpfc_sli4_driver_resource_setup()
8015 if (phba->sli4_hba.extents_in_use && in lpfc_sli4_driver_resource_setup()
8016 phba->sli4_hba.rpi_hdrs_in_use) { in lpfc_sli4_driver_resource_setup()
8022 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_driver_resource_setup()
8023 rc = -EIO; in lpfc_sli4_driver_resource_setup()
8029 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_driver_resource_setup()
8030 rc = -EIO; in lpfc_sli4_driver_resource_setup()
8040 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) in lpfc_sli4_driver_resource_setup()
8054 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { in lpfc_sli4_driver_resource_setup()
8058 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, in lpfc_sli4_driver_resource_setup()
8066 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + in lpfc_sli4_driver_resource_setup()
8070 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; in lpfc_sli4_driver_resource_setup()
8076 if (phba->cfg_enable_bg && in lpfc_sli4_driver_resource_setup()
8077 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) in lpfc_sli4_driver_resource_setup()
8078 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; in lpfc_sli4_driver_resource_setup()
8080 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; in lpfc_sli4_driver_resource_setup()
8088 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + in lpfc_sli4_driver_resource_setup()
8090 ((phba->cfg_sg_seg_cnt + extra) * in lpfc_sli4_driver_resource_setup()
8094 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; in lpfc_sli4_driver_resource_setup()
8095 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; in lpfc_sli4_driver_resource_setup()
8098 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only in lpfc_sli4_driver_resource_setup()
8103 if (phba->cfg_xpsgl && !phba->nvmet_support) in lpfc_sli4_driver_resource_setup()
8104 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; in lpfc_sli4_driver_resource_setup()
8105 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) in lpfc_sli4_driver_resource_setup()
8106 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; in lpfc_sli4_driver_resource_setup()
8108 phba->cfg_sg_dma_buf_size = in lpfc_sli4_driver_resource_setup()
8109 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); in lpfc_sli4_driver_resource_setup()
8111 phba->border_sge_num = phba->cfg_sg_dma_buf_size / in lpfc_sli4_driver_resource_setup()
8115 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_driver_resource_setup()
8116 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { in lpfc_sli4_driver_resource_setup()
8121 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; in lpfc_sli4_driver_resource_setup()
8123 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; in lpfc_sli4_driver_resource_setup()
8129 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, in lpfc_sli4_driver_resource_setup()
8130 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, in lpfc_sli4_driver_resource_setup()
8131 phba->cfg_nvme_seg_cnt); in lpfc_sli4_driver_resource_setup()
8133 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) in lpfc_sli4_driver_resource_setup()
8134 i = phba->cfg_sg_dma_buf_size; in lpfc_sli4_driver_resource_setup()
8138 phba->lpfc_sg_dma_buf_pool = in lpfc_sli4_driver_resource_setup()
8140 &phba->pcidev->dev, in lpfc_sli4_driver_resource_setup()
8141 phba->cfg_sg_dma_buf_size, in lpfc_sli4_driver_resource_setup()
8143 if (!phba->lpfc_sg_dma_buf_pool) in lpfc_sli4_driver_resource_setup()
8146 phba->lpfc_cmd_rsp_buf_pool = in lpfc_sli4_driver_resource_setup()
8148 &phba->pcidev->dev, in lpfc_sli4_driver_resource_setup()
8152 if (!phba->lpfc_cmd_rsp_buf_pool) in lpfc_sli4_driver_resource_setup()
8155 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_driver_resource_setup()
8191 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; in lpfc_sli4_driver_resource_setup()
8192 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), in lpfc_sli4_driver_resource_setup()
8194 if (!phba->fcf.fcf_rr_bmask) { in lpfc_sli4_driver_resource_setup()
8198 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8202 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, in lpfc_sli4_driver_resource_setup()
8205 if (!phba->sli4_hba.hba_eq_hdl) { in lpfc_sli4_driver_resource_setup()
8208 "fast-path per-EQ handle array\n"); in lpfc_sli4_driver_resource_setup()
8209 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8213 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, in lpfc_sli4_driver_resource_setup()
8216 if (!phba->sli4_hba.cpu_map) { in lpfc_sli4_driver_resource_setup()
8218 "3327 Failed allocate memory for msi-x " in lpfc_sli4_driver_resource_setup()
8220 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8224 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); in lpfc_sli4_driver_resource_setup()
8225 if (!phba->sli4_hba.eq_info) { in lpfc_sli4_driver_resource_setup()
8228 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8232 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, in lpfc_sli4_driver_resource_setup()
8233 sizeof(*phba->sli4_hba.idle_stat), in lpfc_sli4_driver_resource_setup()
8235 if (!phba->sli4_hba.idle_stat) { in lpfc_sli4_driver_resource_setup()
8238 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8243 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); in lpfc_sli4_driver_resource_setup()
8244 if (!phba->sli4_hba.c_stat) { in lpfc_sli4_driver_resource_setup()
8246 "3332 Failed allocating per cpu hdwq stats\n"); in lpfc_sli4_driver_resource_setup()
8247 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8252 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat); in lpfc_sli4_driver_resource_setup()
8253 if (!phba->cmf_stat) { in lpfc_sli4_driver_resource_setup()
8255 "3331 Failed allocating per cpu cgn stats\n"); in lpfc_sli4_driver_resource_setup()
8256 rc = -ENOMEM; in lpfc_sli4_driver_resource_setup()
8261 * Enable sr-iov virtual functions if supported and configured in lpfc_sli4_driver_resource_setup()
8264 if (phba->cfg_sriov_nr_virtfn > 0) { in lpfc_sli4_driver_resource_setup()
8266 phba->cfg_sriov_nr_virtfn); in lpfc_sli4_driver_resource_setup()
8269 "3020 Requested number of SR-IOV " in lpfc_sli4_driver_resource_setup()
8272 phba->cfg_sriov_nr_virtfn); in lpfc_sli4_driver_resource_setup()
8273 phba->cfg_sriov_nr_virtfn = 0; in lpfc_sli4_driver_resource_setup()
8281 free_percpu(phba->sli4_hba.c_stat); in lpfc_sli4_driver_resource_setup()
8284 kfree(phba->sli4_hba.idle_stat); in lpfc_sli4_driver_resource_setup()
8286 free_percpu(phba->sli4_hba.eq_info); in lpfc_sli4_driver_resource_setup()
8288 kfree(phba->sli4_hba.cpu_map); in lpfc_sli4_driver_resource_setup()
8290 kfree(phba->sli4_hba.hba_eq_hdl); in lpfc_sli4_driver_resource_setup()
8292 kfree(phba->fcf.fcf_rr_bmask); in lpfc_sli4_driver_resource_setup()
8300 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); in lpfc_sli4_driver_resource_setup()
8301 phba->lpfc_cmd_rsp_buf_pool = NULL; in lpfc_sli4_driver_resource_setup()
8303 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); in lpfc_sli4_driver_resource_setup()
8304 phba->lpfc_sg_dma_buf_pool = NULL; in lpfc_sli4_driver_resource_setup()
8313 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8317 * specific for supporting the SLI-4 HBA device it attached to.
8324 free_percpu(phba->sli4_hba.eq_info); in lpfc_sli4_driver_resource_unset()
8326 free_percpu(phba->sli4_hba.c_stat); in lpfc_sli4_driver_resource_unset()
8328 free_percpu(phba->cmf_stat); in lpfc_sli4_driver_resource_unset()
8329 kfree(phba->sli4_hba.idle_stat); in lpfc_sli4_driver_resource_unset()
8331 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ in lpfc_sli4_driver_resource_unset()
8332 kfree(phba->sli4_hba.cpu_map); in lpfc_sli4_driver_resource_unset()
8333 phba->sli4_hba.num_possible_cpu = 0; in lpfc_sli4_driver_resource_unset()
8334 phba->sli4_hba.num_present_cpu = 0; in lpfc_sli4_driver_resource_unset()
8335 phba->sli4_hba.curr_disp_cpu = 0; in lpfc_sli4_driver_resource_unset()
8336 cpumask_clear(&phba->sli4_hba.irq_aff_mask); in lpfc_sli4_driver_resource_unset()
8338 /* Free memory allocated for fast-path work queue handles */ in lpfc_sli4_driver_resource_unset()
8339 kfree(phba->sli4_hba.hba_eq_hdl); in lpfc_sli4_driver_resource_unset()
8346 kfree(phba->fcf.fcf_rr_bmask); in lpfc_sli4_driver_resource_unset()
8368 &phba->fcf_conn_rec_list, list) { in lpfc_sli4_driver_resource_unset()
8369 list_del_init(&conn_entry->list); in lpfc_sli4_driver_resource_unset()
8377 * lpfc_init_api_table_setup - Set up init api function jump table
8379 * @dev_grp: The HBA PCI-Device group number.
8384 * Returns: 0 - success, -ENODEV - failure.
8389 phba->lpfc_hba_init_link = lpfc_hba_init_link; in lpfc_init_api_table_setup()
8390 phba->lpfc_hba_down_link = lpfc_hba_down_link; in lpfc_init_api_table_setup()
8391 phba->lpfc_selective_reset = lpfc_selective_reset; in lpfc_init_api_table_setup()
8394 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; in lpfc_init_api_table_setup()
8395 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; in lpfc_init_api_table_setup()
8396 phba->lpfc_stop_port = lpfc_stop_port_s3; in lpfc_init_api_table_setup()
8399 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; in lpfc_init_api_table_setup()
8400 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; in lpfc_init_api_table_setup()
8401 phba->lpfc_stop_port = lpfc_stop_port_s4; in lpfc_init_api_table_setup()
8405 "1431 Invalid HBA PCI-device group: 0x%x\n", in lpfc_init_api_table_setup()
8407 return -ENODEV; in lpfc_init_api_table_setup()
8413 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8420 * 0 - successful
8421 * other values - error
8429 phba->worker_thread = kthread_run(lpfc_do_work, phba, in lpfc_setup_driver_resource_phase2()
8430 "lpfc_worker_%d", phba->brd_no); in lpfc_setup_driver_resource_phase2()
8431 if (IS_ERR(phba->worker_thread)) { in lpfc_setup_driver_resource_phase2()
8432 error = PTR_ERR(phba->worker_thread); in lpfc_setup_driver_resource_phase2()
8440 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8450 if (phba->wq) { in lpfc_unset_driver_resource_phase2()
8451 flush_workqueue(phba->wq); in lpfc_unset_driver_resource_phase2()
8452 destroy_workqueue(phba->wq); in lpfc_unset_driver_resource_phase2()
8453 phba->wq = NULL; in lpfc_unset_driver_resource_phase2()
8457 if (phba->worker_thread) in lpfc_unset_driver_resource_phase2()
8458 kthread_stop(phba->worker_thread); in lpfc_unset_driver_resource_phase2()
8462 * lpfc_free_iocb_list - Free iocb list.
8472 spin_lock_irq(&phba->hbalock); in lpfc_free_iocb_list()
8474 &phba->lpfc_iocb_list, list) { in lpfc_free_iocb_list()
8475 list_del(&iocbq_entry->list); in lpfc_free_iocb_list()
8477 phba->total_iocbq_bufs--; in lpfc_free_iocb_list()
8479 spin_unlock_irq(&phba->hbalock); in lpfc_free_iocb_list()
8485 * lpfc_init_iocb_list - Allocate and initialize iocb list.
8493 * 0 - successful
8494 * other values - error
8504 INIT_LIST_HEAD(&phba->lpfc_iocb_list); in lpfc_init_iocb_list()
8521 iocbq_entry->sli4_lxritag = NO_XRI; in lpfc_init_iocb_list()
8522 iocbq_entry->sli4_xritag = NO_XRI; in lpfc_init_iocb_list()
8524 spin_lock_irq(&phba->hbalock); in lpfc_init_iocb_list()
8525 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); in lpfc_init_iocb_list()
8526 phba->total_iocbq_bufs++; in lpfc_init_iocb_list()
8527 spin_unlock_irq(&phba->hbalock); in lpfc_init_iocb_list()
8535 return -ENOMEM; in lpfc_init_iocb_list()
8539 * lpfc_free_sgl_list - Free a given sgl list.
8551 list_del(&sglq_entry->list); in lpfc_free_sgl_list()
8552 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); in lpfc_free_sgl_list()
8558 * lpfc_free_els_sgl_list - Free els sgl list.
8569 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_free_els_sgl_list()
8570 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); in lpfc_free_els_sgl_list()
8571 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); in lpfc_free_els_sgl_list()
8578 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8590 spin_lock_irq(&phba->hbalock); in lpfc_free_nvmet_sgl_list()
8591 spin_lock(&phba->sli4_hba.sgl_list_lock); in lpfc_free_nvmet_sgl_list()
8592 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); in lpfc_free_nvmet_sgl_list()
8593 spin_unlock(&phba->sli4_hba.sgl_list_lock); in lpfc_free_nvmet_sgl_list()
8594 spin_unlock_irq(&phba->hbalock); in lpfc_free_nvmet_sgl_list()
8598 list_del(&sglq_entry->list); in lpfc_free_nvmet_sgl_list()
8599 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); in lpfc_free_nvmet_sgl_list()
8607 phba->sli4_hba.nvmet_xri_cnt = 0; in lpfc_free_nvmet_sgl_list()
8611 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8622 size *= phba->sli4_hba.max_cfg_param.max_xri; in lpfc_init_active_sgl_array()
8624 phba->sli4_hba.lpfc_sglq_active_list = in lpfc_init_active_sgl_array()
8626 if (!phba->sli4_hba.lpfc_sglq_active_list) in lpfc_init_active_sgl_array()
8627 return -ENOMEM; in lpfc_init_active_sgl_array()
8632 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8642 kfree(phba->sli4_hba.lpfc_sglq_active_list); in lpfc_free_active_sgl()
8646 * lpfc_init_sgl_list - Allocate and initialize sgl list.
8657 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); in lpfc_init_sgl_list()
8658 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); in lpfc_init_sgl_list()
8659 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); in lpfc_init_sgl_list()
8660 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); in lpfc_init_sgl_list()
8662 /* els xri-sgl book keeping */ in lpfc_init_sgl_list()
8663 phba->sli4_hba.els_xri_cnt = 0; in lpfc_init_sgl_list()
8665 /* nvme xri-buffer book keeping */ in lpfc_init_sgl_list()
8666 phba->sli4_hba.io_xri_cnt = 0; in lpfc_init_sgl_list()
8670 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8680 * 0 - successful
8681 * -ERROR - otherwise.
8689 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); in lpfc_sli4_init_rpi_hdrs()
8690 if (!phba->sli4_hba.rpi_hdrs_in_use) in lpfc_sli4_init_rpi_hdrs()
8692 if (phba->sli4_hba.extents_in_use) in lpfc_sli4_init_rpi_hdrs()
8693 return -EIO; in lpfc_sli4_init_rpi_hdrs()
8700 rc = -ENODEV; in lpfc_sli4_init_rpi_hdrs()
8707 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8731 if (!phba->sli4_hba.rpi_hdrs_in_use) in lpfc_sli4_create_rpi_hdr()
8733 if (phba->sli4_hba.extents_in_use) in lpfc_sli4_create_rpi_hdr()
8737 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; in lpfc_sli4_create_rpi_hdr()
8739 spin_lock_irq(&phba->hbalock); in lpfc_sli4_create_rpi_hdr()
8745 curr_rpi_range = phba->sli4_hba.next_rpi; in lpfc_sli4_create_rpi_hdr()
8746 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_create_rpi_hdr()
8754 * port expects a 4KB DMA-mapped memory region that is 4K aligned. in lpfc_sli4_create_rpi_hdr()
8760 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, in lpfc_sli4_create_rpi_hdr()
8762 &dmabuf->phys, GFP_KERNEL); in lpfc_sli4_create_rpi_hdr()
8763 if (!dmabuf->virt) { in lpfc_sli4_create_rpi_hdr()
8768 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { in lpfc_sli4_create_rpi_hdr()
8778 rpi_hdr->dmabuf = dmabuf; in lpfc_sli4_create_rpi_hdr()
8779 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; in lpfc_sli4_create_rpi_hdr()
8780 rpi_hdr->page_count = 1; in lpfc_sli4_create_rpi_hdr()
8781 spin_lock_irq(&phba->hbalock); in lpfc_sli4_create_rpi_hdr()
8784 rpi_hdr->start_rpi = curr_rpi_range; in lpfc_sli4_create_rpi_hdr()
8785 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; in lpfc_sli4_create_rpi_hdr()
8786 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); in lpfc_sli4_create_rpi_hdr()
8788 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_create_rpi_hdr()
8792 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, in lpfc_sli4_create_rpi_hdr()
8793 dmabuf->virt, dmabuf->phys); in lpfc_sli4_create_rpi_hdr()
8800 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8813 if (!phba->sli4_hba.rpi_hdrs_in_use) in lpfc_sli4_remove_rpi_hdrs()
8817 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { in lpfc_sli4_remove_rpi_hdrs()
8818 list_del(&rpi_hdr->list); in lpfc_sli4_remove_rpi_hdrs()
8819 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, in lpfc_sli4_remove_rpi_hdrs()
8820 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); in lpfc_sli4_remove_rpi_hdrs()
8821 kfree(rpi_hdr->dmabuf); in lpfc_sli4_remove_rpi_hdrs()
8826 phba->sli4_hba.next_rpi = 0; in lpfc_sli4_remove_rpi_hdrs()
8830 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
8838 * pointer to @phba - successful
8839 * NULL - error
8849 dev_err(&pdev->dev, "failed to allocate hba struct\n"); in lpfc_hba_alloc()
8854 phba->pcidev = pdev; in lpfc_hba_alloc()
8857 phba->brd_no = lpfc_get_instance(); in lpfc_hba_alloc()
8858 if (phba->brd_no < 0) { in lpfc_hba_alloc()
8862 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; in lpfc_hba_alloc()
8864 spin_lock_init(&phba->ct_ev_lock); in lpfc_hba_alloc()
8865 INIT_LIST_HEAD(&phba->ct_ev_waiters); in lpfc_hba_alloc()
8871 * lpfc_hba_free - Free driver hba data structure with a device.
8880 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_hba_free()
8881 kfree(phba->sli4_hba.hdwq); in lpfc_hba_free()
8884 idr_remove(&lpfc_hba_index, phba->brd_no); in lpfc_hba_free()
8887 kfree(phba->sli.sli3_ring); in lpfc_hba_free()
8888 phba->sli.sli3_ring = NULL; in lpfc_hba_free()
8895 * lpfc_create_shost - Create hba physical port with associated scsi host.
8902 * 0 - successful
8903 * other values - error
8912 phba->fc_edtov = FF_DEF_EDTOV; in lpfc_create_shost()
8913 phba->fc_ratov = FF_DEF_RATOV; in lpfc_create_shost()
8914 phba->fc_altov = FF_DEF_ALTOV; in lpfc_create_shost()
8915 phba->fc_arbtov = FF_DEF_ARBTOV; in lpfc_create_shost()
8917 atomic_set(&phba->sdev_cnt, 0); in lpfc_create_shost()
8918 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); in lpfc_create_shost()
8920 return -ENODEV; in lpfc_create_shost()
8923 phba->pport = vport; in lpfc_create_shost()
8925 if (phba->nvmet_support) { in lpfc_create_shost()
8927 phba->targetport = NULL; in lpfc_create_shost()
8928 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; in lpfc_create_shost()
8935 pci_set_drvdata(phba->pcidev, shost); in lpfc_create_shost()
8941 vport->load_flag |= FC_ALLOW_FDMI; in lpfc_create_shost()
8942 if (phba->cfg_enable_SmartSAN || in lpfc_create_shost()
8943 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { in lpfc_create_shost()
8946 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; in lpfc_create_shost()
8947 if (phba->cfg_enable_SmartSAN) in lpfc_create_shost()
8948 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; in lpfc_create_shost()
8950 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; in lpfc_create_shost()
8956 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
8965 struct lpfc_vport *vport = phba->pport; in lpfc_destroy_shost()
8974 * lpfc_setup_bg - Setup Block guard structures and debug areas.
8987 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { in lpfc_setup_bg()
8992 old_mask = phba->cfg_prot_mask; in lpfc_setup_bg()
8993 old_guard = phba->cfg_prot_guard; in lpfc_setup_bg()
8996 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | in lpfc_setup_bg()
8999 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | in lpfc_setup_bg()
9003 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) in lpfc_setup_bg()
9004 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; in lpfc_setup_bg()
9006 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { in lpfc_setup_bg()
9007 if ((old_mask != phba->cfg_prot_mask) || in lpfc_setup_bg()
9008 (old_guard != phba->cfg_prot_guard)) in lpfc_setup_bg()
9012 phba->cfg_prot_mask, in lpfc_setup_bg()
9013 phba->cfg_prot_guard); in lpfc_setup_bg()
9015 scsi_host_set_prot(shost, phba->cfg_prot_mask); in lpfc_setup_bg()
9016 scsi_host_set_guard(shost, phba->cfg_prot_guard); in lpfc_setup_bg()
9026 * lpfc_post_init_setup - Perform necessary device post initialization setup.
9039 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); in lpfc_post_init_setup()
9045 shost = pci_get_drvdata(phba->pcidev); in lpfc_post_init_setup()
9046 shost->can_queue = phba->cfg_hba_queue_depth - 10; in lpfc_post_init_setup()
9050 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { in lpfc_post_init_setup()
9051 spin_lock_irq(shost->host_lock); in lpfc_post_init_setup()
9053 spin_unlock_irq(shost->host_lock); in lpfc_post_init_setup()
9069 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9073 * with SLI-3 interface spec.
9076 * 0 - successful
9077 * other values - error
9082 struct pci_dev *pdev = phba->pcidev; in lpfc_sli_pci_mem_setup()
9089 return -ENODEV; in lpfc_sli_pci_mem_setup()
9092 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in lpfc_sli_pci_mem_setup()
9094 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in lpfc_sli_pci_mem_setup()
9097 error = -ENODEV; in lpfc_sli_pci_mem_setup()
9102 phba->pci_bar0_map = pci_resource_start(pdev, 0); in lpfc_sli_pci_mem_setup()
9105 phba->pci_bar2_map = pci_resource_start(pdev, 2); in lpfc_sli_pci_mem_setup()
9109 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); in lpfc_sli_pci_mem_setup()
9110 if (!phba->slim_memmap_p) { in lpfc_sli_pci_mem_setup()
9111 dev_printk(KERN_ERR, &pdev->dev, in lpfc_sli_pci_mem_setup()
9117 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); in lpfc_sli_pci_mem_setup()
9118 if (!phba->ctrl_regs_memmap_p) { in lpfc_sli_pci_mem_setup()
9119 dev_printk(KERN_ERR, &pdev->dev, in lpfc_sli_pci_mem_setup()
9124 /* Allocate memory for SLI-2 structures */ in lpfc_sli_pci_mem_setup()
9125 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, in lpfc_sli_pci_mem_setup()
9126 &phba->slim2p.phys, GFP_KERNEL); in lpfc_sli_pci_mem_setup()
9127 if (!phba->slim2p.virt) in lpfc_sli_pci_mem_setup()
9130 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); in lpfc_sli_pci_mem_setup()
9131 phba->mbox_ext = (phba->slim2p.virt + in lpfc_sli_pci_mem_setup()
9133 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); in lpfc_sli_pci_mem_setup()
9134 phba->IOCBs = (phba->slim2p.virt + in lpfc_sli_pci_mem_setup()
9137 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, in lpfc_sli_pci_mem_setup()
9139 &phba->hbqslimp.phys, in lpfc_sli_pci_mem_setup()
9141 if (!phba->hbqslimp.virt) in lpfc_sli_pci_mem_setup()
9145 ptr = phba->hbqslimp.virt; in lpfc_sli_pci_mem_setup()
9147 phba->hbqs[i].hbq_virt = ptr; in lpfc_sli_pci_mem_setup()
9148 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); in lpfc_sli_pci_mem_setup()
9149 ptr += (lpfc_hbq_defs[i]->entry_count * in lpfc_sli_pci_mem_setup()
9152 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; in lpfc_sli_pci_mem_setup()
9153 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; in lpfc_sli_pci_mem_setup()
9155 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); in lpfc_sli_pci_mem_setup()
9157 phba->MBslimaddr = phba->slim_memmap_p; in lpfc_sli_pci_mem_setup()
9158 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; in lpfc_sli_pci_mem_setup()
9159 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; in lpfc_sli_pci_mem_setup()
9160 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; in lpfc_sli_pci_mem_setup()
9161 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; in lpfc_sli_pci_mem_setup()
9166 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, in lpfc_sli_pci_mem_setup()
9167 phba->slim2p.virt, phba->slim2p.phys); in lpfc_sli_pci_mem_setup()
9169 iounmap(phba->ctrl_regs_memmap_p); in lpfc_sli_pci_mem_setup()
9171 iounmap(phba->slim_memmap_p); in lpfc_sli_pci_mem_setup()
9177 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9181 * with SLI-3 interface spec.
9189 if (!phba->pcidev) in lpfc_sli_pci_mem_unset()
9192 pdev = phba->pcidev; in lpfc_sli_pci_mem_unset()
9195 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), in lpfc_sli_pci_mem_unset()
9196 phba->hbqslimp.virt, phba->hbqslimp.phys); in lpfc_sli_pci_mem_unset()
9197 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, in lpfc_sli_pci_mem_unset()
9198 phba->slim2p.virt, phba->slim2p.phys); in lpfc_sli_pci_mem_unset()
9201 iounmap(phba->ctrl_regs_memmap_p); in lpfc_sli_pci_mem_unset()
9202 iounmap(phba->slim_memmap_p); in lpfc_sli_pci_mem_unset()
9208 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9214 * Return 0 if successful, otherwise -ENODEV.
9226 if (!phba->sli4_hba.PSMPHRregaddr) in lpfc_sli4_post_status_check()
9227 return -ENODEV; in lpfc_sli4_post_status_check()
9231 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, in lpfc_sli4_post_status_check()
9235 port_error = -ENODEV; in lpfc_sli4_post_status_check()
9250 "1408 Port Failed POST - portsmphr=0x%x, " in lpfc_sli4_post_status_check()
9268 &phba->sli4_hba.sli_intf), in lpfc_sli4_post_status_check()
9270 &phba->sli4_hba.sli_intf), in lpfc_sli4_post_status_check()
9272 &phba->sli4_hba.sli_intf), in lpfc_sli4_post_status_check()
9274 &phba->sli4_hba.sli_intf), in lpfc_sli4_post_status_check()
9276 &phba->sli4_hba.sli_intf), in lpfc_sli4_post_status_check()
9278 &phba->sli4_hba.sli_intf)); in lpfc_sli4_post_status_check()
9285 &phba->sli4_hba.sli_intf); in lpfc_sli4_post_status_check()
9288 phba->sli4_hba.ue_mask_lo = in lpfc_sli4_post_status_check()
9289 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); in lpfc_sli4_post_status_check()
9290 phba->sli4_hba.ue_mask_hi = in lpfc_sli4_post_status_check()
9291 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); in lpfc_sli4_post_status_check()
9293 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); in lpfc_sli4_post_status_check()
9295 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); in lpfc_sli4_post_status_check()
9296 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || in lpfc_sli4_post_status_check()
9297 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { in lpfc_sli4_post_status_check()
9308 phba->sli4_hba.ue_mask_lo, in lpfc_sli4_post_status_check()
9309 phba->sli4_hba.ue_mask_hi); in lpfc_sli4_post_status_check()
9310 port_error = -ENODEV; in lpfc_sli4_post_status_check()
9316 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, in lpfc_sli4_post_status_check()
9320 phba->work_status[0] = in lpfc_sli4_post_status_check()
9321 readl(phba->sli4_hba.u.if_type2. in lpfc_sli4_post_status_check()
9323 phba->work_status[1] = in lpfc_sli4_post_status_check()
9324 readl(phba->sli4_hba.u.if_type2. in lpfc_sli4_post_status_check()
9333 phba->work_status[0], in lpfc_sli4_post_status_check()
9334 phba->work_status[1]); in lpfc_sli4_post_status_check()
9335 port_error = -ENODEV; in lpfc_sli4_post_status_check()
9347 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9359 phba->sli4_hba.u.if_type0.UERRLOregaddr = in lpfc_sli4_bar0_register_memmap()
9360 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; in lpfc_sli4_bar0_register_memmap()
9361 phba->sli4_hba.u.if_type0.UERRHIregaddr = in lpfc_sli4_bar0_register_memmap()
9362 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; in lpfc_sli4_bar0_register_memmap()
9363 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = in lpfc_sli4_bar0_register_memmap()
9364 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; in lpfc_sli4_bar0_register_memmap()
9365 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = in lpfc_sli4_bar0_register_memmap()
9366 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; in lpfc_sli4_bar0_register_memmap()
9367 phba->sli4_hba.SLIINTFregaddr = in lpfc_sli4_bar0_register_memmap()
9368 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; in lpfc_sli4_bar0_register_memmap()
9371 phba->sli4_hba.u.if_type2.EQDregaddr = in lpfc_sli4_bar0_register_memmap()
9372 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9374 phba->sli4_hba.u.if_type2.ERR1regaddr = in lpfc_sli4_bar0_register_memmap()
9375 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9377 phba->sli4_hba.u.if_type2.ERR2regaddr = in lpfc_sli4_bar0_register_memmap()
9378 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9380 phba->sli4_hba.u.if_type2.CTRLregaddr = in lpfc_sli4_bar0_register_memmap()
9381 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9383 phba->sli4_hba.u.if_type2.STATUSregaddr = in lpfc_sli4_bar0_register_memmap()
9384 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9386 phba->sli4_hba.SLIINTFregaddr = in lpfc_sli4_bar0_register_memmap()
9387 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; in lpfc_sli4_bar0_register_memmap()
9388 phba->sli4_hba.PSMPHRregaddr = in lpfc_sli4_bar0_register_memmap()
9389 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9391 phba->sli4_hba.RQDBregaddr = in lpfc_sli4_bar0_register_memmap()
9392 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9394 phba->sli4_hba.WQDBregaddr = in lpfc_sli4_bar0_register_memmap()
9395 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9397 phba->sli4_hba.CQDBregaddr = in lpfc_sli4_bar0_register_memmap()
9398 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; in lpfc_sli4_bar0_register_memmap()
9399 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; in lpfc_sli4_bar0_register_memmap()
9400 phba->sli4_hba.MQDBregaddr = in lpfc_sli4_bar0_register_memmap()
9401 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; in lpfc_sli4_bar0_register_memmap()
9402 phba->sli4_hba.BMBXregaddr = in lpfc_sli4_bar0_register_memmap()
9403 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; in lpfc_sli4_bar0_register_memmap()
9406 phba->sli4_hba.u.if_type2.EQDregaddr = in lpfc_sli4_bar0_register_memmap()
9407 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9409 phba->sli4_hba.u.if_type2.ERR1regaddr = in lpfc_sli4_bar0_register_memmap()
9410 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9412 phba->sli4_hba.u.if_type2.ERR2regaddr = in lpfc_sli4_bar0_register_memmap()
9413 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9415 phba->sli4_hba.u.if_type2.CTRLregaddr = in lpfc_sli4_bar0_register_memmap()
9416 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9418 phba->sli4_hba.u.if_type2.STATUSregaddr = in lpfc_sli4_bar0_register_memmap()
9419 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9421 phba->sli4_hba.PSMPHRregaddr = in lpfc_sli4_bar0_register_memmap()
9422 phba->sli4_hba.conf_regs_memmap_p + in lpfc_sli4_bar0_register_memmap()
9424 phba->sli4_hba.BMBXregaddr = in lpfc_sli4_bar0_register_memmap()
9425 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; in lpfc_sli4_bar0_register_memmap()
9429 dev_printk(KERN_ERR, &phba->pcidev->dev, in lpfc_sli4_bar0_register_memmap()
9430 "FATAL - unsupported SLI4 interface type - %d\n", in lpfc_sli4_bar0_register_memmap()
9437 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9448 phba->sli4_hba.PSMPHRregaddr = in lpfc_sli4_bar1_register_memmap()
9449 phba->sli4_hba.ctrl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9451 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9453 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9455 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9459 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9461 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9463 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9465 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9467 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar1_register_memmap()
9473 dev_err(&phba->pcidev->dev, in lpfc_sli4_bar1_register_memmap()
9474 "FATAL - unsupported SLI4 interface type - %d\n", in lpfc_sli4_bar1_register_memmap()
9481 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9488 * Return 0 if successful, otherwise -ENODEV.
9494 return -ENODEV; in lpfc_sli4_bar2_register_memmap()
9496 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar2_register_memmap()
9499 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar2_register_memmap()
9502 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar2_register_memmap()
9505 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; in lpfc_sli4_bar2_register_memmap()
9506 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar2_register_memmap()
9508 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + in lpfc_sli4_bar2_register_memmap()
9514 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9518 * region consistent with the SLI-4 interface spec. This
9525 * 0 - successful
9526 * -ENOMEM - could not allocated memory.
9539 return -ENOMEM; in lpfc_create_bootstrap_mbox()
9545 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); in lpfc_create_bootstrap_mbox()
9546 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, in lpfc_create_bootstrap_mbox()
9547 &dmabuf->phys, GFP_KERNEL); in lpfc_create_bootstrap_mbox()
9548 if (!dmabuf->virt) { in lpfc_create_bootstrap_mbox()
9550 return -ENOMEM; in lpfc_create_bootstrap_mbox()
9556 * to be 16-byte aligned. Also align the virtual memory as each in lpfc_create_bootstrap_mbox()
9560 phba->sli4_hba.bmbx.dmabuf = dmabuf; in lpfc_create_bootstrap_mbox()
9561 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; in lpfc_create_bootstrap_mbox()
9563 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, in lpfc_create_bootstrap_mbox()
9565 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, in lpfc_create_bootstrap_mbox()
9571 * as two 30-bit addresses. The other data is a bit marking whether in lpfc_create_bootstrap_mbox()
9572 * the 30-bit address is the high or low address. in lpfc_create_bootstrap_mbox()
9576 dma_address = &phba->sli4_hba.bmbx.dma_address; in lpfc_create_bootstrap_mbox()
9577 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; in lpfc_create_bootstrap_mbox()
9579 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | in lpfc_create_bootstrap_mbox()
9582 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); in lpfc_create_bootstrap_mbox()
9583 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | in lpfc_create_bootstrap_mbox()
9589 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9602 dma_free_coherent(&phba->pcidev->dev, in lpfc_destroy_bootstrap_mbox()
9603 phba->sli4_hba.bmbx.bmbx_size, in lpfc_destroy_bootstrap_mbox()
9604 phba->sli4_hba.bmbx.dmabuf->virt, in lpfc_destroy_bootstrap_mbox()
9605 phba->sli4_hba.bmbx.dmabuf->phys); in lpfc_destroy_bootstrap_mbox()
9607 kfree(phba->sli4_hba.bmbx.dmabuf); in lpfc_destroy_bootstrap_mbox()
9608 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); in lpfc_destroy_bootstrap_mbox()
9625 * lpfc_map_topology - Map the topology read from READ_CONFIG
9650 lpfc_topo_to_str[phba->cfg_topology]); in lpfc_map_topology()
9653 /* FW supports persistent topology - override module parameter value */ in lpfc_map_topology()
9654 phba->hba_flag |= HBA_PERSISTENT_TOPO; in lpfc_map_topology()
9657 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == in lpfc_map_topology()
9659 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == in lpfc_map_topology()
9662 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) in lpfc_map_topology()
9666 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; in lpfc_map_topology()
9670 /* If topology failover set - pt is '0' or '1' */ in lpfc_map_topology()
9671 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : in lpfc_map_topology()
9674 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) in lpfc_map_topology()
9679 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { in lpfc_map_topology()
9682 lpfc_topo_to_str[phba->cfg_topology]); in lpfc_map_topology()
9687 lpfc_topo_to_str[phba->cfg_topology]); in lpfc_map_topology()
9692 * lpfc_sli4_read_config - Get the config parameters.
9701 * 0 - successful
9702 * -ENOMEM - No available memory
9703 * -EIO - The mailbox failed to complete successfully.
9719 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_sli4_read_config()
9724 return -ENOMEM; in lpfc_sli4_read_config()
9734 bf_get(lpfc_mqe_command, &pmb->u.mqe), in lpfc_sli4_read_config()
9735 bf_get(lpfc_mqe_status, &pmb->u.mqe)); in lpfc_sli4_read_config()
9736 rc = -EIO; in lpfc_sli4_read_config()
9738 rd_config = &pmb->u.mqe.un.rd_config; in lpfc_sli4_read_config()
9740 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; in lpfc_sli4_read_config()
9741 phba->sli4_hba.lnk_info.lnk_tp = in lpfc_sli4_read_config()
9743 phba->sli4_hba.lnk_info.lnk_no = in lpfc_sli4_read_config()
9747 phba->sli4_hba.lnk_info.lnk_tp, in lpfc_sli4_read_config()
9748 phba->sli4_hba.lnk_info.lnk_no); in lpfc_sli4_read_config()
9752 bf_get(lpfc_mqe_command, &pmb->u.mqe)); in lpfc_sli4_read_config()
9754 phba->bbcredit_support = 1; in lpfc_sli4_read_config()
9755 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; in lpfc_sli4_read_config()
9758 phba->sli4_hba.conf_trunk = in lpfc_sli4_read_config()
9760 phba->sli4_hba.extents_in_use = in lpfc_sli4_read_config()
9762 phba->sli4_hba.max_cfg_param.max_xri = in lpfc_sli4_read_config()
9766 phba->sli4_hba.max_cfg_param.max_xri > 512) in lpfc_sli4_read_config()
9767 phba->sli4_hba.max_cfg_param.max_xri = 512; in lpfc_sli4_read_config()
9768 phba->sli4_hba.max_cfg_param.xri_base = in lpfc_sli4_read_config()
9770 phba->sli4_hba.max_cfg_param.max_vpi = in lpfc_sli4_read_config()
9773 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) in lpfc_sli4_read_config()
9774 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; in lpfc_sli4_read_config()
9775 phba->sli4_hba.max_cfg_param.vpi_base = in lpfc_sli4_read_config()
9777 phba->sli4_hba.max_cfg_param.max_rpi = in lpfc_sli4_read_config()
9779 phba->sli4_hba.max_cfg_param.rpi_base = in lpfc_sli4_read_config()
9781 phba->sli4_hba.max_cfg_param.max_vfi = in lpfc_sli4_read_config()
9783 phba->sli4_hba.max_cfg_param.vfi_base = in lpfc_sli4_read_config()
9785 phba->sli4_hba.max_cfg_param.max_fcfi = in lpfc_sli4_read_config()
9787 phba->sli4_hba.max_cfg_param.max_eq = in lpfc_sli4_read_config()
9789 phba->sli4_hba.max_cfg_param.max_rq = in lpfc_sli4_read_config()
9791 phba->sli4_hba.max_cfg_param.max_wq = in lpfc_sli4_read_config()
9793 phba->sli4_hba.max_cfg_param.max_cq = in lpfc_sli4_read_config()
9795 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); in lpfc_sli4_read_config()
9796 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; in lpfc_sli4_read_config()
9797 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; in lpfc_sli4_read_config()
9798 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; in lpfc_sli4_read_config()
9799 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? in lpfc_sli4_read_config()
9800 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; in lpfc_sli4_read_config()
9801 phba->max_vports = phba->max_vpi; in lpfc_sli4_read_config()
9812 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; in lpfc_sli4_read_config()
9813 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; in lpfc_sli4_read_config()
9814 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; in lpfc_sli4_read_config()
9818 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; in lpfc_sli4_read_config()
9819 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; in lpfc_sli4_read_config()
9825 if (phba->cgn_reg_signal != in lpfc_sli4_read_config()
9828 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; in lpfc_sli4_read_config()
9829 phba->cgn_reg_signal = in lpfc_sli4_read_config()
9832 phba->cgn_reg_signal = in lpfc_sli4_read_config()
9834 phba->cgn_reg_fpin = in lpfc_sli4_read_config()
9841 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin; in lpfc_sli4_read_config()
9842 phba->cgn_init_reg_signal = phba->cgn_reg_signal; in lpfc_sli4_read_config()
9846 phba->cgn_reg_signal, phba->cgn_reg_fpin); in lpfc_sli4_read_config()
9856 phba->sli4_hba.extents_in_use, in lpfc_sli4_read_config()
9857 phba->sli4_hba.max_cfg_param.xri_base, in lpfc_sli4_read_config()
9858 phba->sli4_hba.max_cfg_param.max_xri, in lpfc_sli4_read_config()
9859 phba->sli4_hba.max_cfg_param.vpi_base, in lpfc_sli4_read_config()
9860 phba->sli4_hba.max_cfg_param.max_vpi, in lpfc_sli4_read_config()
9861 phba->sli4_hba.max_cfg_param.vfi_base, in lpfc_sli4_read_config()
9862 phba->sli4_hba.max_cfg_param.max_vfi, in lpfc_sli4_read_config()
9863 phba->sli4_hba.max_cfg_param.rpi_base, in lpfc_sli4_read_config()
9864 phba->sli4_hba.max_cfg_param.max_rpi, in lpfc_sli4_read_config()
9865 phba->sli4_hba.max_cfg_param.max_fcfi, in lpfc_sli4_read_config()
9866 phba->sli4_hba.max_cfg_param.max_eq, in lpfc_sli4_read_config()
9867 phba->sli4_hba.max_cfg_param.max_cq, in lpfc_sli4_read_config()
9868 phba->sli4_hba.max_cfg_param.max_wq, in lpfc_sli4_read_config()
9869 phba->sli4_hba.max_cfg_param.max_rq, in lpfc_sli4_read_config()
9870 phba->lmt); in lpfc_sli4_read_config()
9876 qmin = phba->sli4_hba.max_cfg_param.max_wq; in lpfc_sli4_read_config()
9877 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) in lpfc_sli4_read_config()
9878 qmin = phba->sli4_hba.max_cfg_param.max_cq; in lpfc_sli4_read_config()
9879 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) in lpfc_sli4_read_config()
9880 qmin = phba->sli4_hba.max_cfg_param.max_eq; in lpfc_sli4_read_config()
9887 qmin -= 4; in lpfc_sli4_read_config()
9890 if ((phba->cfg_irq_chann > qmin) || in lpfc_sli4_read_config()
9891 (phba->cfg_hdw_queue > qmin)) { in lpfc_sli4_read_config()
9893 "2005 Reducing Queues - " in lpfc_sli4_read_config()
9897 phba->sli4_hba.max_cfg_param.max_wq, in lpfc_sli4_read_config()
9898 phba->sli4_hba.max_cfg_param.max_cq, in lpfc_sli4_read_config()
9899 phba->sli4_hba.max_cfg_param.max_eq, in lpfc_sli4_read_config()
9900 qmin, phba->cfg_irq_chann, in lpfc_sli4_read_config()
9901 phba->cfg_hdw_queue); in lpfc_sli4_read_config()
9903 if (phba->cfg_irq_chann > qmin) in lpfc_sli4_read_config()
9904 phba->cfg_irq_chann = qmin; in lpfc_sli4_read_config()
9905 if (phba->cfg_hdw_queue > qmin) in lpfc_sli4_read_config()
9906 phba->cfg_hdw_queue = qmin; in lpfc_sli4_read_config()
9914 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); in lpfc_sli4_read_config()
9919 phba->hba_flag |= HBA_FORCED_LINK_SPEED; in lpfc_sli4_read_config()
9923 phba->cfg_link_speed = in lpfc_sli4_read_config()
9927 phba->cfg_link_speed = in lpfc_sli4_read_config()
9931 phba->cfg_link_speed = in lpfc_sli4_read_config()
9935 phba->cfg_link_speed = in lpfc_sli4_read_config()
9939 phba->cfg_link_speed = in lpfc_sli4_read_config()
9943 phba->cfg_link_speed = in lpfc_sli4_read_config()
9947 phba->cfg_link_speed = in lpfc_sli4_read_config()
9951 phba->cfg_link_speed = in lpfc_sli4_read_config()
9955 phba->cfg_link_speed = in lpfc_sli4_read_config()
9964 phba->cfg_link_speed = in lpfc_sli4_read_config()
9971 length = phba->sli4_hba.max_cfg_param.max_xri - in lpfc_sli4_read_config()
9973 if (phba->cfg_hba_queue_depth > length) { in lpfc_sli4_read_config()
9976 phba->cfg_hba_queue_depth, length); in lpfc_sli4_read_config()
9977 phba->cfg_hba_queue_depth = length; in lpfc_sli4_read_config()
9980 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < in lpfc_sli4_read_config()
9985 length = (sizeof(struct lpfc_mbx_get_func_cfg) - in lpfc_sli4_read_config()
9993 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; in lpfc_sli4_read_config()
9994 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); in lpfc_sli4_read_config()
9995 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); in lpfc_sli4_read_config()
10000 bf_get(lpfc_mqe_command, &pmb->u.mqe), in lpfc_sli4_read_config()
10001 bf_get(lpfc_mqe_status, &pmb->u.mqe)); in lpfc_sli4_read_config()
10006 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; in lpfc_sli4_read_config()
10008 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; in lpfc_sli4_read_config()
10020 phba->sli4_hba.iov.pf_number = in lpfc_sli4_read_config()
10022 phba->sli4_hba.iov.vf_number = in lpfc_sli4_read_config()
10031 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, in lpfc_sli4_read_config()
10032 phba->sli4_hba.iov.vf_number); in lpfc_sli4_read_config()
10040 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_sli4_read_config()
10045 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10048 * This routine is invoked to setup the port-side endian order when
10053 * 0 - successful
10054 * -ENOMEM - No available memory
10055 * -EIO - The mailbox failed to complete successfully.
10065 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); in lpfc_setup_endian_order()
10068 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, in lpfc_setup_endian_order()
10075 return -ENOMEM; in lpfc_setup_endian_order()
10083 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); in lpfc_setup_endian_order()
10090 rc = -EIO; in lpfc_setup_endian_order()
10092 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_setup_endian_order()
10104 * lpfc_sli4_queue_verify - Verify and update EQ counts
10113 * 0 - successful
10114 * -ENOMEM - No available memory
10120 * Sanity check for configured queue parameters against the run-time in lpfc_sli4_queue_verify()
10124 if (phba->nvmet_support) { in lpfc_sli4_queue_verify()
10125 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) in lpfc_sli4_queue_verify()
10126 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; in lpfc_sli4_queue_verify()
10127 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) in lpfc_sli4_queue_verify()
10128 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; in lpfc_sli4_queue_verify()
10133 phba->cfg_hdw_queue, phba->cfg_irq_chann, in lpfc_sli4_queue_verify()
10134 phba->cfg_nvmet_mrq); in lpfc_sli4_queue_verify()
10137 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; in lpfc_sli4_queue_verify()
10138 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; in lpfc_sli4_queue_verify()
10141 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; in lpfc_sli4_queue_verify()
10142 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; in lpfc_sli4_queue_verify()
10151 int cpu; in lpfc_alloc_io_wq_cq() local
10153 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); in lpfc_alloc_io_wq_cq()
10155 if (phba->enab_exp_wqcq_pages) in lpfc_alloc_io_wq_cq()
10158 phba->sli4_hba.cq_esize, in lpfc_alloc_io_wq_cq()
10159 LPFC_CQE_EXP_COUNT, cpu); in lpfc_alloc_io_wq_cq()
10163 phba->sli4_hba.cq_esize, in lpfc_alloc_io_wq_cq()
10164 phba->sli4_hba.cq_ecount, cpu); in lpfc_alloc_io_wq_cq()
10167 "0499 Failed allocate fast-path IO CQ (%d)\n", in lpfc_alloc_io_wq_cq()
10171 qdesc->qe_valid = 1; in lpfc_alloc_io_wq_cq()
10172 qdesc->hdwq = idx; in lpfc_alloc_io_wq_cq()
10173 qdesc->chann = cpu; in lpfc_alloc_io_wq_cq()
10174 phba->sli4_hba.hdwq[idx].io_cq = qdesc; in lpfc_alloc_io_wq_cq()
10177 if (phba->enab_exp_wqcq_pages) { in lpfc_alloc_io_wq_cq()
10179 wqesize = (phba->fcp_embed_io) ? in lpfc_alloc_io_wq_cq()
10180 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; in lpfc_alloc_io_wq_cq()
10183 LPFC_WQE_EXP_COUNT, cpu); in lpfc_alloc_io_wq_cq()
10186 phba->sli4_hba.wq_esize, in lpfc_alloc_io_wq_cq()
10187 phba->sli4_hba.wq_ecount, cpu); in lpfc_alloc_io_wq_cq()
10191 "0503 Failed allocate fast-path IO WQ (%d)\n", in lpfc_alloc_io_wq_cq()
10195 qdesc->hdwq = idx; in lpfc_alloc_io_wq_cq()
10196 qdesc->chann = cpu; in lpfc_alloc_io_wq_cq()
10197 phba->sli4_hba.hdwq[idx].io_wq = qdesc; in lpfc_alloc_io_wq_cq()
10198 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); in lpfc_alloc_io_wq_cq()
10203 * lpfc_sli4_queue_create - Create all the SLI4 queues
10212 * 0 - successful
10213 * -ENOMEM - No availble memory
10214 * -EIO - The mailbox failed to complete successfully.
10220 int idx, cpu, eqcpu; in lpfc_sli4_queue_create() local
10230 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; in lpfc_sli4_queue_create()
10231 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; in lpfc_sli4_queue_create()
10232 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; in lpfc_sli4_queue_create()
10233 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; in lpfc_sli4_queue_create()
10234 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; in lpfc_sli4_queue_create()
10235 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; in lpfc_sli4_queue_create()
10236 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; in lpfc_sli4_queue_create()
10237 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; in lpfc_sli4_queue_create()
10238 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; in lpfc_sli4_queue_create()
10239 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; in lpfc_sli4_queue_create()
10241 if (!phba->sli4_hba.hdwq) { in lpfc_sli4_queue_create()
10242 phba->sli4_hba.hdwq = kcalloc( in lpfc_sli4_queue_create()
10243 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), in lpfc_sli4_queue_create()
10245 if (!phba->sli4_hba.hdwq) { in lpfc_sli4_queue_create()
10248 "fast-path Hardware Queue array\n"); in lpfc_sli4_queue_create()
10252 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_queue_create()
10253 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_sli4_queue_create()
10254 spin_lock_init(&qp->io_buf_list_get_lock); in lpfc_sli4_queue_create()
10255 spin_lock_init(&qp->io_buf_list_put_lock); in lpfc_sli4_queue_create()
10256 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); in lpfc_sli4_queue_create()
10257 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); in lpfc_sli4_queue_create()
10258 qp->get_io_bufs = 0; in lpfc_sli4_queue_create()
10259 qp->put_io_bufs = 0; in lpfc_sli4_queue_create()
10260 qp->total_io_bufs = 0; in lpfc_sli4_queue_create()
10261 spin_lock_init(&qp->abts_io_buf_list_lock); in lpfc_sli4_queue_create()
10262 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); in lpfc_sli4_queue_create()
10263 qp->abts_scsi_io_bufs = 0; in lpfc_sli4_queue_create()
10264 qp->abts_nvme_io_bufs = 0; in lpfc_sli4_queue_create()
10265 INIT_LIST_HEAD(&qp->sgl_list); in lpfc_sli4_queue_create()
10266 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); in lpfc_sli4_queue_create()
10267 spin_lock_init(&qp->hdwq_lock); in lpfc_sli4_queue_create()
10271 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_queue_create()
10272 if (phba->nvmet_support) { in lpfc_sli4_queue_create()
10273 phba->sli4_hba.nvmet_cqset = kcalloc( in lpfc_sli4_queue_create()
10274 phba->cfg_nvmet_mrq, in lpfc_sli4_queue_create()
10277 if (!phba->sli4_hba.nvmet_cqset) { in lpfc_sli4_queue_create()
10280 "fast-path CQ set array\n"); in lpfc_sli4_queue_create()
10283 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( in lpfc_sli4_queue_create()
10284 phba->cfg_nvmet_mrq, in lpfc_sli4_queue_create()
10287 if (!phba->sli4_hba.nvmet_mrq_hdr) { in lpfc_sli4_queue_create()
10290 "fast-path RQ set hdr array\n"); in lpfc_sli4_queue_create()
10293 phba->sli4_hba.nvmet_mrq_data = kcalloc( in lpfc_sli4_queue_create()
10294 phba->cfg_nvmet_mrq, in lpfc_sli4_queue_create()
10297 if (!phba->sli4_hba.nvmet_mrq_data) { in lpfc_sli4_queue_create()
10300 "fast-path RQ set data array\n"); in lpfc_sli4_queue_create()
10306 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); in lpfc_sli4_queue_create()
10309 for_each_present_cpu(cpu) { in lpfc_sli4_queue_create()
10314 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_sli4_queue_create()
10315 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) in lpfc_sli4_queue_create()
10318 /* Get a ptr to the Hardware Queue associated with this CPU */ in lpfc_sli4_queue_create()
10319 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; in lpfc_sli4_queue_create()
10323 phba->sli4_hba.eq_esize, in lpfc_sli4_queue_create()
10324 phba->sli4_hba.eq_ecount, cpu); in lpfc_sli4_queue_create()
10328 cpup->hdwq); in lpfc_sli4_queue_create()
10331 qdesc->qe_valid = 1; in lpfc_sli4_queue_create()
10332 qdesc->hdwq = cpup->hdwq; in lpfc_sli4_queue_create()
10333 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ in lpfc_sli4_queue_create()
10334 qdesc->last_cpu = qdesc->chann; in lpfc_sli4_queue_create()
10337 qp->hba_eq = qdesc; in lpfc_sli4_queue_create()
10339 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); in lpfc_sli4_queue_create()
10340 list_add(&qdesc->cpu_list, &eqi->list); in lpfc_sli4_queue_create()
10346 for_each_present_cpu(cpu) { in lpfc_sli4_queue_create()
10347 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_sli4_queue_create()
10350 if (cpup->flag & LPFC_CPU_FIRST_IRQ) in lpfc_sli4_queue_create()
10354 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; in lpfc_sli4_queue_create()
10355 if (qp->hba_eq) in lpfc_sli4_queue_create()
10359 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); in lpfc_sli4_queue_create()
10360 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; in lpfc_sli4_queue_create()
10361 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; in lpfc_sli4_queue_create()
10365 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_queue_create()
10370 if (phba->nvmet_support) { in lpfc_sli4_queue_create()
10371 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { in lpfc_sli4_queue_create()
10372 cpu = lpfc_find_cpu_handle(phba, idx, in lpfc_sli4_queue_create()
10376 phba->sli4_hba.cq_esize, in lpfc_sli4_queue_create()
10377 phba->sli4_hba.cq_ecount, in lpfc_sli4_queue_create()
10378 cpu); in lpfc_sli4_queue_create()
10385 qdesc->qe_valid = 1; in lpfc_sli4_queue_create()
10386 qdesc->hdwq = idx; in lpfc_sli4_queue_create()
10387 qdesc->chann = cpu; in lpfc_sli4_queue_create()
10388 phba->sli4_hba.nvmet_cqset[idx] = qdesc; in lpfc_sli4_queue_create()
10396 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); in lpfc_sli4_queue_create()
10397 /* Create slow-path Mailbox Command Complete Queue */ in lpfc_sli4_queue_create()
10399 phba->sli4_hba.cq_esize, in lpfc_sli4_queue_create()
10400 phba->sli4_hba.cq_ecount, cpu); in lpfc_sli4_queue_create()
10403 "0500 Failed allocate slow-path mailbox CQ\n"); in lpfc_sli4_queue_create()
10406 qdesc->qe_valid = 1; in lpfc_sli4_queue_create()
10407 phba->sli4_hba.mbx_cq = qdesc; in lpfc_sli4_queue_create()
10409 /* Create slow-path ELS Complete Queue */ in lpfc_sli4_queue_create()
10411 phba->sli4_hba.cq_esize, in lpfc_sli4_queue_create()
10412 phba->sli4_hba.cq_ecount, cpu); in lpfc_sli4_queue_create()
10415 "0501 Failed allocate slow-path ELS CQ\n"); in lpfc_sli4_queue_create()
10418 qdesc->qe_valid = 1; in lpfc_sli4_queue_create()
10419 qdesc->chann = cpu; in lpfc_sli4_queue_create()
10420 phba->sli4_hba.els_cq = qdesc; in lpfc_sli4_queue_create()
10430 phba->sli4_hba.mq_esize, in lpfc_sli4_queue_create()
10431 phba->sli4_hba.mq_ecount, cpu); in lpfc_sli4_queue_create()
10434 "0505 Failed allocate slow-path MQ\n"); in lpfc_sli4_queue_create()
10437 qdesc->chann = cpu; in lpfc_sli4_queue_create()
10438 phba->sli4_hba.mbx_wq = qdesc; in lpfc_sli4_queue_create()
10444 /* Create slow-path ELS Work Queue */ in lpfc_sli4_queue_create()
10446 phba->sli4_hba.wq_esize, in lpfc_sli4_queue_create()
10447 phba->sli4_hba.wq_ecount, cpu); in lpfc_sli4_queue_create()
10450 "0504 Failed allocate slow-path ELS WQ\n"); in lpfc_sli4_queue_create()
10453 qdesc->chann = cpu; in lpfc_sli4_queue_create()
10454 phba->sli4_hba.els_wq = qdesc; in lpfc_sli4_queue_create()
10455 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); in lpfc_sli4_queue_create()
10457 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_queue_create()
10460 phba->sli4_hba.cq_esize, in lpfc_sli4_queue_create()
10461 phba->sli4_hba.cq_ecount, cpu); in lpfc_sli4_queue_create()
10467 qdesc->chann = cpu; in lpfc_sli4_queue_create()
10468 qdesc->qe_valid = 1; in lpfc_sli4_queue_create()
10469 phba->sli4_hba.nvmels_cq = qdesc; in lpfc_sli4_queue_create()
10473 phba->sli4_hba.wq_esize, in lpfc_sli4_queue_create()
10474 phba->sli4_hba.wq_ecount, cpu); in lpfc_sli4_queue_create()
10480 qdesc->chann = cpu; in lpfc_sli4_queue_create()
10481 phba->sli4_hba.nvmels_wq = qdesc; in lpfc_sli4_queue_create()
10482 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); in lpfc_sli4_queue_create()
10491 phba->sli4_hba.rq_esize, in lpfc_sli4_queue_create()
10492 phba->sli4_hba.rq_ecount, cpu); in lpfc_sli4_queue_create()
10498 phba->sli4_hba.hdr_rq = qdesc; in lpfc_sli4_queue_create()
10502 phba->sli4_hba.rq_esize, in lpfc_sli4_queue_create()
10503 phba->sli4_hba.rq_ecount, cpu); in lpfc_sli4_queue_create()
10509 phba->sli4_hba.dat_rq = qdesc; in lpfc_sli4_queue_create()
10511 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && in lpfc_sli4_queue_create()
10512 phba->nvmet_support) { in lpfc_sli4_queue_create()
10513 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { in lpfc_sli4_queue_create()
10514 cpu = lpfc_find_cpu_handle(phba, idx, in lpfc_sli4_queue_create()
10519 phba->sli4_hba.rq_esize, in lpfc_sli4_queue_create()
10521 cpu); in lpfc_sli4_queue_create()
10528 qdesc->hdwq = idx; in lpfc_sli4_queue_create()
10529 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; in lpfc_sli4_queue_create()
10532 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), in lpfc_sli4_queue_create()
10534 cpu_to_node(cpu)); in lpfc_sli4_queue_create()
10535 if (qdesc->rqbp == NULL) { in lpfc_sli4_queue_create()
10543 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); in lpfc_sli4_queue_create()
10548 phba->sli4_hba.rq_esize, in lpfc_sli4_queue_create()
10550 cpu); in lpfc_sli4_queue_create()
10557 qdesc->hdwq = idx; in lpfc_sli4_queue_create()
10558 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; in lpfc_sli4_queue_create()
10563 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_queue_create()
10564 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_queue_create()
10565 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, in lpfc_sli4_queue_create()
10566 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); in lpfc_sli4_queue_create()
10571 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { in lpfc_sli4_queue_create()
10572 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_queue_create()
10573 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, in lpfc_sli4_queue_create()
10574 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); in lpfc_sli4_queue_create()
10582 return -ENOMEM; in lpfc_sli4_queue_create()
10616 hdwq = phba->sli4_hba.hdwq; in lpfc_sli4_release_hdwq()
10619 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_release_hdwq()
10626 if (phba->cfg_xpsgl && !phba->nvmet_support) in lpfc_sli4_release_hdwq()
10631 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { in lpfc_sli4_release_hdwq()
10633 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; in lpfc_sli4_release_hdwq()
10635 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; in lpfc_sli4_release_hdwq()
10640 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10647 * 0 - successful
10648 * -ENOMEM - No available memory
10649 * -EIO - The mailbox failed to complete successfully.
10659 spin_lock_irq(&phba->hbalock); in lpfc_sli4_queue_destroy()
10660 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; in lpfc_sli4_queue_destroy()
10661 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { in lpfc_sli4_queue_destroy()
10662 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_queue_destroy()
10664 spin_lock_irq(&phba->hbalock); in lpfc_sli4_queue_destroy()
10666 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_queue_destroy()
10671 if (phba->sli4_hba.hdwq) in lpfc_sli4_queue_destroy()
10674 if (phba->nvmet_support) { in lpfc_sli4_queue_destroy()
10675 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, in lpfc_sli4_queue_destroy()
10676 phba->cfg_nvmet_mrq); in lpfc_sli4_queue_destroy()
10678 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, in lpfc_sli4_queue_destroy()
10679 phba->cfg_nvmet_mrq); in lpfc_sli4_queue_destroy()
10680 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, in lpfc_sli4_queue_destroy()
10681 phba->cfg_nvmet_mrq); in lpfc_sli4_queue_destroy()
10685 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); in lpfc_sli4_queue_destroy()
10688 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); in lpfc_sli4_queue_destroy()
10691 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); in lpfc_sli4_queue_destroy()
10694 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); in lpfc_sli4_queue_destroy()
10695 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); in lpfc_sli4_queue_destroy()
10698 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); in lpfc_sli4_queue_destroy()
10701 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); in lpfc_sli4_queue_destroy()
10704 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); in lpfc_sli4_queue_destroy()
10707 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); in lpfc_sli4_queue_destroy()
10710 spin_lock_irq(&phba->hbalock); in lpfc_sli4_queue_destroy()
10711 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; in lpfc_sli4_queue_destroy()
10712 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_queue_destroy()
10722 rqbp = rq->rqbp; in lpfc_free_rq_buffer()
10723 while (!list_empty(&rqbp->rqb_buffer_list)) { in lpfc_free_rq_buffer()
10724 list_remove_head(&rqbp->rqb_buffer_list, h_buf, in lpfc_free_rq_buffer()
10728 (rqbp->rqb_free_buffer)(phba, rqb_buffer); in lpfc_free_rq_buffer()
10729 rqbp->buffer_count--; in lpfc_free_rq_buffer()
10744 "6085 Fast-path %s (%d) not allocated\n", in lpfc_create_wq_cq()
10746 return -ENOMEM; in lpfc_create_wq_cq()
10762 *cq_map = cq->queue_id; in lpfc_create_wq_cq()
10765 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", in lpfc_create_wq_cq()
10766 qidx, cq->queue_id, qidx, eq->queue_id); in lpfc_create_wq_cq()
10774 /* no need to tear down cq - caller will do so */ in lpfc_create_wq_cq()
10779 pring = wq->pring; in lpfc_create_wq_cq()
10780 pring->sli.sli4.wqp = (void *)wq; in lpfc_create_wq_cq()
10781 cq->pring = pring; in lpfc_create_wq_cq()
10784 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", in lpfc_create_wq_cq()
10785 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); in lpfc_create_wq_cq()
10790 "0539 Failed setup of slow-path MQ: " in lpfc_create_wq_cq()
10792 /* no need to tear down cq - caller will do so */ in lpfc_create_wq_cq()
10797 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", in lpfc_create_wq_cq()
10798 phba->sli4_hba.mbx_wq->queue_id, in lpfc_create_wq_cq()
10799 phba->sli4_hba.mbx_cq->queue_id); in lpfc_create_wq_cq()
10806 * lpfc_setup_cq_lookup - Setup the CQ lookup table
10818 memset(phba->sli4_hba.cq_lookup, 0, in lpfc_setup_cq_lookup()
10819 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); in lpfc_setup_cq_lookup()
10821 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { in lpfc_setup_cq_lookup()
10823 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; in lpfc_setup_cq_lookup()
10827 list_for_each_entry(childq, &eq->child_list, list) { in lpfc_setup_cq_lookup()
10828 if (childq->queue_id > phba->sli4_hba.cq_max) in lpfc_setup_cq_lookup()
10830 if (childq->subtype == LPFC_IO) in lpfc_setup_cq_lookup()
10831 phba->sli4_hba.cq_lookup[childq->queue_id] = in lpfc_setup_cq_lookup()
10838 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
10845 * 0 - successful
10846 * -ENOMEM - No available memory
10847 * -EIO - The mailbox failed to complete successfully.
10857 int qidx, cpu; in lpfc_sli4_queue_setup() local
10859 int rc = -ENOMEM; in lpfc_sli4_queue_setup()
10861 /* Check for dual-ULP support */ in lpfc_sli4_queue_setup()
10862 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_sli4_queue_setup()
10867 return -ENOMEM; in lpfc_sli4_queue_setup()
10869 length = (sizeof(struct lpfc_mbx_query_fw_config) - in lpfc_sli4_queue_setup()
10878 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; in lpfc_sli4_queue_setup()
10879 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); in lpfc_sli4_queue_setup()
10880 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); in lpfc_sli4_queue_setup()
10886 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_queue_setup()
10887 rc = -ENXIO; in lpfc_sli4_queue_setup()
10891 phba->sli4_hba.fw_func_mode = in lpfc_sli4_queue_setup()
10892 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; in lpfc_sli4_queue_setup()
10893 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; in lpfc_sli4_queue_setup()
10894 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; in lpfc_sli4_queue_setup()
10895 phba->sli4_hba.physical_port = in lpfc_sli4_queue_setup()
10896 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; in lpfc_sli4_queue_setup()
10899 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, in lpfc_sli4_queue_setup()
10900 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); in lpfc_sli4_queue_setup()
10902 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_sli4_queue_setup()
10907 qp = phba->sli4_hba.hdwq; in lpfc_sli4_queue_setup()
10912 "3147 Fast-path EQs not allocated\n"); in lpfc_sli4_queue_setup()
10913 rc = -ENOMEM; in lpfc_sli4_queue_setup()
10918 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { in lpfc_sli4_queue_setup()
10920 for_each_present_cpu(cpu) { in lpfc_sli4_queue_setup()
10921 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_sli4_queue_setup()
10923 /* Look for the CPU thats using that vector with in lpfc_sli4_queue_setup()
10926 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) in lpfc_sli4_queue_setup()
10928 if (qidx != cpup->eq) in lpfc_sli4_queue_setup()
10932 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, in lpfc_sli4_queue_setup()
10933 phba->cfg_fcp_imax); in lpfc_sli4_queue_setup()
10936 "0523 Failed setup of fast-path" in lpfc_sli4_queue_setup()
10938 cpup->eq, (uint32_t)rc); in lpfc_sli4_queue_setup()
10943 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = in lpfc_sli4_queue_setup()
10944 qp[cpup->hdwq].hba_eq; in lpfc_sli4_queue_setup()
10947 "2584 HBA EQ setup: queue[%d]-id=%d\n", in lpfc_sli4_queue_setup()
10948 cpup->eq, in lpfc_sli4_queue_setup()
10949 qp[cpup->hdwq].hba_eq->queue_id); in lpfc_sli4_queue_setup()
10954 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { in lpfc_sli4_queue_setup()
10955 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); in lpfc_sli4_queue_setup()
10956 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_sli4_queue_setup()
10960 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, in lpfc_sli4_queue_setup()
10963 &phba->sli4_hba.hdwq[qidx].io_cq_map, in lpfc_sli4_queue_setup()
10979 /* Set up slow-path MBOX CQ/MQ */ in lpfc_sli4_queue_setup()
10981 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { in lpfc_sli4_queue_setup()
10984 phba->sli4_hba.mbx_cq ? in lpfc_sli4_queue_setup()
10986 rc = -ENOMEM; in lpfc_sli4_queue_setup()
10991 phba->sli4_hba.mbx_cq, in lpfc_sli4_queue_setup()
10992 phba->sli4_hba.mbx_wq, in lpfc_sli4_queue_setup()
11000 if (phba->nvmet_support) { in lpfc_sli4_queue_setup()
11001 if (!phba->sli4_hba.nvmet_cqset) { in lpfc_sli4_queue_setup()
11003 "3165 Fast-path NVME CQ Set " in lpfc_sli4_queue_setup()
11005 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11008 if (phba->cfg_nvmet_mrq > 1) { in lpfc_sli4_queue_setup()
11010 phba->sli4_hba.nvmet_cqset, in lpfc_sli4_queue_setup()
11022 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], in lpfc_sli4_queue_setup()
11031 phba->sli4_hba.nvmet_cqset[0]->chann = 0; in lpfc_sli4_queue_setup()
11034 "6090 NVMET CQ setup: cq-id=%d, " in lpfc_sli4_queue_setup()
11035 "parent eq-id=%d\n", in lpfc_sli4_queue_setup()
11036 phba->sli4_hba.nvmet_cqset[0]->queue_id, in lpfc_sli4_queue_setup()
11037 qp[0].hba_eq->queue_id); in lpfc_sli4_queue_setup()
11041 /* Set up slow-path ELS WQ/CQ */ in lpfc_sli4_queue_setup()
11042 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { in lpfc_sli4_queue_setup()
11045 phba->sli4_hba.els_cq ? "WQ" : "CQ"); in lpfc_sli4_queue_setup()
11046 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11050 phba->sli4_hba.els_cq, in lpfc_sli4_queue_setup()
11051 phba->sli4_hba.els_wq, in lpfc_sli4_queue_setup()
11060 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", in lpfc_sli4_queue_setup()
11061 phba->sli4_hba.els_wq->queue_id, in lpfc_sli4_queue_setup()
11062 phba->sli4_hba.els_cq->queue_id); in lpfc_sli4_queue_setup()
11064 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_queue_setup()
11066 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { in lpfc_sli4_queue_setup()
11069 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); in lpfc_sli4_queue_setup()
11070 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11074 phba->sli4_hba.nvmels_cq, in lpfc_sli4_queue_setup()
11075 phba->sli4_hba.nvmels_wq, in lpfc_sli4_queue_setup()
11085 "6096 ELS WQ setup: wq-id=%d, " in lpfc_sli4_queue_setup()
11086 "parent cq-id=%d\n", in lpfc_sli4_queue_setup()
11087 phba->sli4_hba.nvmels_wq->queue_id, in lpfc_sli4_queue_setup()
11088 phba->sli4_hba.nvmels_cq->queue_id); in lpfc_sli4_queue_setup()
11094 if (phba->nvmet_support) { in lpfc_sli4_queue_setup()
11095 if ((!phba->sli4_hba.nvmet_cqset) || in lpfc_sli4_queue_setup()
11096 (!phba->sli4_hba.nvmet_mrq_hdr) || in lpfc_sli4_queue_setup()
11097 (!phba->sli4_hba.nvmet_mrq_data)) { in lpfc_sli4_queue_setup()
11101 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11104 if (phba->cfg_nvmet_mrq > 1) { in lpfc_sli4_queue_setup()
11106 phba->sli4_hba.nvmet_mrq_hdr, in lpfc_sli4_queue_setup()
11107 phba->sli4_hba.nvmet_mrq_data, in lpfc_sli4_queue_setup()
11108 phba->sli4_hba.nvmet_cqset, in lpfc_sli4_queue_setup()
11120 phba->sli4_hba.nvmet_mrq_hdr[0], in lpfc_sli4_queue_setup()
11121 phba->sli4_hba.nvmet_mrq_data[0], in lpfc_sli4_queue_setup()
11122 phba->sli4_hba.nvmet_cqset[0], in lpfc_sli4_queue_setup()
11134 "6099 NVMET RQ setup: hdr-rq-id=%d, " in lpfc_sli4_queue_setup()
11135 "dat-rq-id=%d parent cq-id=%d\n", in lpfc_sli4_queue_setup()
11136 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, in lpfc_sli4_queue_setup()
11137 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, in lpfc_sli4_queue_setup()
11138 phba->sli4_hba.nvmet_cqset[0]->queue_id); in lpfc_sli4_queue_setup()
11143 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { in lpfc_sli4_queue_setup()
11146 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11150 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, in lpfc_sli4_queue_setup()
11151 phba->sli4_hba.els_cq, LPFC_USOL); in lpfc_sli4_queue_setup()
11160 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " in lpfc_sli4_queue_setup()
11161 "parent cq-id=%d\n", in lpfc_sli4_queue_setup()
11162 phba->sli4_hba.hdr_rq->queue_id, in lpfc_sli4_queue_setup()
11163 phba->sli4_hba.dat_rq->queue_id, in lpfc_sli4_queue_setup()
11164 phba->sli4_hba.els_cq->queue_id); in lpfc_sli4_queue_setup()
11166 if (phba->cfg_fcp_imax) in lpfc_sli4_queue_setup()
11167 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; in lpfc_sli4_queue_setup()
11171 for (qidx = 0; qidx < phba->cfg_irq_chann; in lpfc_sli4_queue_setup()
11176 if (phba->sli4_hba.cq_max) { in lpfc_sli4_queue_setup()
11177 kfree(phba->sli4_hba.cq_lookup); in lpfc_sli4_queue_setup()
11178 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), in lpfc_sli4_queue_setup()
11180 if (!phba->sli4_hba.cq_lookup) { in lpfc_sli4_queue_setup()
11183 "size 0x%x\n", phba->sli4_hba.cq_max); in lpfc_sli4_queue_setup()
11184 rc = -ENOMEM; in lpfc_sli4_queue_setup()
11198 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11205 * 0 - successful
11206 * -ENOMEM - No available memory
11207 * -EIO - The mailbox failed to complete successfully.
11217 if (phba->sli4_hba.mbx_wq) in lpfc_sli4_queue_unset()
11218 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); in lpfc_sli4_queue_unset()
11221 if (phba->sli4_hba.nvmels_wq) in lpfc_sli4_queue_unset()
11222 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); in lpfc_sli4_queue_unset()
11225 if (phba->sli4_hba.els_wq) in lpfc_sli4_queue_unset()
11226 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); in lpfc_sli4_queue_unset()
11229 if (phba->sli4_hba.hdr_rq) in lpfc_sli4_queue_unset()
11230 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, in lpfc_sli4_queue_unset()
11231 phba->sli4_hba.dat_rq); in lpfc_sli4_queue_unset()
11234 if (phba->sli4_hba.mbx_cq) in lpfc_sli4_queue_unset()
11235 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); in lpfc_sli4_queue_unset()
11238 if (phba->sli4_hba.els_cq) in lpfc_sli4_queue_unset()
11239 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); in lpfc_sli4_queue_unset()
11242 if (phba->sli4_hba.nvmels_cq) in lpfc_sli4_queue_unset()
11243 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); in lpfc_sli4_queue_unset()
11245 if (phba->nvmet_support) { in lpfc_sli4_queue_unset()
11247 if (phba->sli4_hba.nvmet_mrq_hdr) { in lpfc_sli4_queue_unset()
11248 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) in lpfc_sli4_queue_unset()
11251 phba->sli4_hba.nvmet_mrq_hdr[qidx], in lpfc_sli4_queue_unset()
11252 phba->sli4_hba.nvmet_mrq_data[qidx]); in lpfc_sli4_queue_unset()
11256 if (phba->sli4_hba.nvmet_cqset) { in lpfc_sli4_queue_unset()
11257 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) in lpfc_sli4_queue_unset()
11259 phba, phba->sli4_hba.nvmet_cqset[qidx]); in lpfc_sli4_queue_unset()
11263 /* Unset fast-path SLI4 queues */ in lpfc_sli4_queue_unset()
11264 if (phba->sli4_hba.hdwq) { in lpfc_sli4_queue_unset()
11266 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { in lpfc_sli4_queue_unset()
11268 qp = &phba->sli4_hba.hdwq[qidx]; in lpfc_sli4_queue_unset()
11269 lpfc_wq_destroy(phba, qp->io_wq); in lpfc_sli4_queue_unset()
11270 lpfc_cq_destroy(phba, qp->io_cq); in lpfc_sli4_queue_unset()
11273 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { in lpfc_sli4_queue_unset()
11275 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; in lpfc_sli4_queue_unset()
11280 kfree(phba->sli4_hba.cq_lookup); in lpfc_sli4_queue_unset()
11281 phba->sli4_hba.cq_lookup = NULL; in lpfc_sli4_queue_unset()
11282 phba->sli4_hba.cq_max = 0; in lpfc_sli4_queue_unset()
11286 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11293 * - Mailbox asynchronous events
11294 * - Receive queue completion unsolicited events
11295 * Later, this can be used for all the slow-path events.
11298 * 0 - successful
11299 * -ENOMEM - No available memory
11307 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { in lpfc_sli4_cq_event_pool_create()
11311 list_add_tail(&cq_event->list, in lpfc_sli4_cq_event_pool_create()
11312 &phba->sli4_hba.sp_cqe_event_pool); in lpfc_sli4_cq_event_pool_create()
11318 return -ENOMEM; in lpfc_sli4_cq_event_pool_create()
11322 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11327 * cleanup routine to free all the outstanding completion-queue events
11337 &phba->sli4_hba.sp_cqe_event_pool, list) { in lpfc_sli4_cq_event_pool_destroy()
11338 list_del(&cq_event->list); in lpfc_sli4_cq_event_pool_destroy()
11344 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11348 * completion-queue event from the free pool.
11350 * Return: Pointer to the newly allocated completion-queue event if successful
11358 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, in __lpfc_sli4_cq_event_alloc()
11364 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11368 * completion-queue event from the free pool.
11370 * Return: Pointer to the newly allocated completion-queue event if successful
11379 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_sli4_cq_event_alloc()
11381 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_sli4_cq_event_alloc()
11386 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11391 * completion-queue event back into the free pool.
11397 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); in __lpfc_sli4_cq_event_release()
11401 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11406 * completion-queue event back into the free pool.
11413 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_sli4_cq_event_release()
11415 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_sli4_cq_event_release()
11419 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11422 * This routine is to free all the pending completion-queue events to the
11435 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); in lpfc_sli4_cq_event_release_all()
11436 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, in lpfc_sli4_cq_event_release_all()
11438 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); in lpfc_sli4_cq_event_release_all()
11441 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); in lpfc_sli4_cq_event_release_all()
11442 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, in lpfc_sli4_cq_event_release_all()
11444 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); in lpfc_sli4_cq_event_release_all()
11454 * lpfc_pci_function_reset - Reset pci function.
11461 * 0 - successful
11462 * -ENOMEM - No available memory
11463 * -EIO - The mailbox failed to complete successfully.
11477 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); in lpfc_pci_function_reset()
11480 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, in lpfc_pci_function_reset()
11487 return -ENOMEM; in lpfc_pci_function_reset()
11490 /* Setup PCI function reset mailbox-ioctl command */ in lpfc_pci_function_reset()
11496 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; in lpfc_pci_function_reset()
11497 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); in lpfc_pci_function_reset()
11499 &shdr->response); in lpfc_pci_function_reset()
11500 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_pci_function_reset()
11507 rc = -ENXIO; in lpfc_pci_function_reset()
11519 if (lpfc_readl(phba->sli4_hba.u.if_type2. in lpfc_pci_function_reset()
11521 rc = -ENODEV; in lpfc_pci_function_reset()
11530 phba->work_status[0] = readl( in lpfc_pci_function_reset()
11531 phba->sli4_hba.u.if_type2.ERR1regaddr); in lpfc_pci_function_reset()
11532 phba->work_status[1] = readl( in lpfc_pci_function_reset()
11533 phba->sli4_hba.u.if_type2.ERR2regaddr); in lpfc_pci_function_reset()
11538 phba->work_status[0], in lpfc_pci_function_reset()
11539 phba->work_status[1]); in lpfc_pci_function_reset()
11540 rc = -ENODEV; in lpfc_pci_function_reset()
11553 writel(reg_data.word0, phba->sli4_hba.u.if_type2. in lpfc_pci_function_reset()
11556 pci_read_config_word(phba->pcidev, in lpfc_pci_function_reset()
11563 rc = -ENODEV; in lpfc_pci_function_reset()
11574 /* Catch the not-ready port failure after a port reset. */ in lpfc_pci_function_reset()
11579 rc = -ENODEV; in lpfc_pci_function_reset()
11586 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11590 * with SLI-4 interface spec.
11593 * 0 - successful
11594 * other values - error
11599 struct pci_dev *pdev = phba->pcidev; in lpfc_sli4_pci_mem_setup()
11605 return -ENODEV; in lpfc_sli4_pci_mem_setup()
11608 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in lpfc_sli4_pci_mem_setup()
11610 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in lpfc_sli4_pci_mem_setup()
11619 &phba->sli4_hba.sli_intf.word0)) { in lpfc_sli4_pci_mem_setup()
11620 return -ENODEV; in lpfc_sli4_pci_mem_setup()
11624 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != in lpfc_sli4_pci_mem_setup()
11629 phba->sli4_hba.sli_intf.word0); in lpfc_sli4_pci_mem_setup()
11630 return -ENODEV; in lpfc_sli4_pci_mem_setup()
11633 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); in lpfc_sli4_pci_mem_setup()
11641 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); in lpfc_sli4_pci_mem_setup()
11648 phba->sli4_hba.conf_regs_memmap_p = in lpfc_sli4_pci_mem_setup()
11649 ioremap(phba->pci_bar0_map, bar0map_len); in lpfc_sli4_pci_mem_setup()
11650 if (!phba->sli4_hba.conf_regs_memmap_p) { in lpfc_sli4_pci_mem_setup()
11651 dev_printk(KERN_ERR, &pdev->dev, in lpfc_sli4_pci_mem_setup()
11654 return -ENODEV; in lpfc_sli4_pci_mem_setup()
11656 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; in lpfc_sli4_pci_mem_setup()
11660 phba->pci_bar0_map = pci_resource_start(pdev, 1); in lpfc_sli4_pci_mem_setup()
11663 dev_printk(KERN_ERR, &pdev->dev, in lpfc_sli4_pci_mem_setup()
11664 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); in lpfc_sli4_pci_mem_setup()
11665 return -ENODEV; in lpfc_sli4_pci_mem_setup()
11667 phba->sli4_hba.conf_regs_memmap_p = in lpfc_sli4_pci_mem_setup()
11668 ioremap(phba->pci_bar0_map, bar0map_len); in lpfc_sli4_pci_mem_setup()
11669 if (!phba->sli4_hba.conf_regs_memmap_p) { in lpfc_sli4_pci_mem_setup()
11670 dev_printk(KERN_ERR, &pdev->dev, in lpfc_sli4_pci_mem_setup()
11673 return -ENODEV; in lpfc_sli4_pci_mem_setup()
11684 phba->pci_bar1_map = pci_resource_start(pdev, in lpfc_sli4_pci_mem_setup()
11687 phba->sli4_hba.ctrl_regs_memmap_p = in lpfc_sli4_pci_mem_setup()
11688 ioremap(phba->pci_bar1_map, in lpfc_sli4_pci_mem_setup()
11690 if (!phba->sli4_hba.ctrl_regs_memmap_p) { in lpfc_sli4_pci_mem_setup()
11691 dev_err(&pdev->dev, in lpfc_sli4_pci_mem_setup()
11694 error = -ENOMEM; in lpfc_sli4_pci_mem_setup()
11697 phba->pci_bar2_memmap_p = in lpfc_sli4_pci_mem_setup()
11698 phba->sli4_hba.ctrl_regs_memmap_p; in lpfc_sli4_pci_mem_setup()
11701 error = -ENOMEM; in lpfc_sli4_pci_mem_setup()
11712 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); in lpfc_sli4_pci_mem_setup()
11714 phba->sli4_hba.drbl_regs_memmap_p = in lpfc_sli4_pci_mem_setup()
11715 ioremap(phba->pci_bar1_map, bar1map_len); in lpfc_sli4_pci_mem_setup()
11716 if (!phba->sli4_hba.drbl_regs_memmap_p) { in lpfc_sli4_pci_mem_setup()
11717 dev_err(&pdev->dev, in lpfc_sli4_pci_mem_setup()
11719 error = -ENOMEM; in lpfc_sli4_pci_mem_setup()
11722 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; in lpfc_sli4_pci_mem_setup()
11732 phba->pci_bar2_map = pci_resource_start(pdev, in lpfc_sli4_pci_mem_setup()
11735 phba->sli4_hba.drbl_regs_memmap_p = in lpfc_sli4_pci_mem_setup()
11736 ioremap(phba->pci_bar2_map, in lpfc_sli4_pci_mem_setup()
11738 if (!phba->sli4_hba.drbl_regs_memmap_p) { in lpfc_sli4_pci_mem_setup()
11739 dev_err(&pdev->dev, in lpfc_sli4_pci_mem_setup()
11742 error = -ENOMEM; in lpfc_sli4_pci_mem_setup()
11745 phba->pci_bar4_memmap_p = in lpfc_sli4_pci_mem_setup()
11746 phba->sli4_hba.drbl_regs_memmap_p; in lpfc_sli4_pci_mem_setup()
11751 error = -ENOMEM; in lpfc_sli4_pci_mem_setup()
11762 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); in lpfc_sli4_pci_mem_setup()
11764 phba->sli4_hba.dpp_regs_memmap_p = in lpfc_sli4_pci_mem_setup()
11765 ioremap(phba->pci_bar2_map, bar2map_len); in lpfc_sli4_pci_mem_setup()
11766 if (!phba->sli4_hba.dpp_regs_memmap_p) { in lpfc_sli4_pci_mem_setup()
11767 dev_err(&pdev->dev, in lpfc_sli4_pci_mem_setup()
11769 error = -ENOMEM; in lpfc_sli4_pci_mem_setup()
11772 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; in lpfc_sli4_pci_mem_setup()
11779 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; in lpfc_sli4_pci_mem_setup()
11780 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; in lpfc_sli4_pci_mem_setup()
11781 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; in lpfc_sli4_pci_mem_setup()
11784 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; in lpfc_sli4_pci_mem_setup()
11785 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; in lpfc_sli4_pci_mem_setup()
11786 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; in lpfc_sli4_pci_mem_setup()
11795 iounmap(phba->sli4_hba.drbl_regs_memmap_p); in lpfc_sli4_pci_mem_setup()
11797 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); in lpfc_sli4_pci_mem_setup()
11799 iounmap(phba->sli4_hba.conf_regs_memmap_p); in lpfc_sli4_pci_mem_setup()
11805 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
11809 * with SLI-4 interface spec.
11815 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); in lpfc_sli4_pci_mem_unset()
11819 iounmap(phba->sli4_hba.drbl_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
11820 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
11821 iounmap(phba->sli4_hba.conf_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
11824 iounmap(phba->sli4_hba.conf_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
11827 iounmap(phba->sli4_hba.drbl_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
11828 iounmap(phba->sli4_hba.conf_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
11829 if (phba->sli4_hba.dpp_regs_memmap_p) in lpfc_sli4_pci_mem_unset()
11830 iounmap(phba->sli4_hba.dpp_regs_memmap_p); in lpfc_sli4_pci_mem_unset()
11834 dev_printk(KERN_ERR, &phba->pcidev->dev, in lpfc_sli4_pci_mem_unset()
11835 "FATAL - unsupported SLI4 interface type - %d\n", in lpfc_sli4_pci_mem_unset()
11842 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
11845 * This routine is invoked to enable the MSI-X interrupt vectors to device
11846 * with SLI-3 interface specs.
11849 * 0 - successful
11850 * other values - error
11858 /* Set up MSI-X multi-message vectors */ in lpfc_sli_enable_msix()
11859 rc = pci_alloc_irq_vectors(phba->pcidev, in lpfc_sli_enable_msix()
11863 "0420 PCI enable MSI-X failed (%d)\n", rc); in lpfc_sli_enable_msix()
11868 * Assign MSI-X vectors to interrupt handlers in lpfc_sli_enable_msix()
11871 /* vector-0 is associated to slow-path handler */ in lpfc_sli_enable_msix()
11872 rc = request_irq(pci_irq_vector(phba->pcidev, 0), in lpfc_sli_enable_msix()
11877 "0421 MSI-X slow-path request_irq failed " in lpfc_sli_enable_msix()
11882 /* vector-1 is associated to fast-path handler */ in lpfc_sli_enable_msix()
11883 rc = request_irq(pci_irq_vector(phba->pcidev, 1), in lpfc_sli_enable_msix()
11889 "0429 MSI-X fast-path request_irq failed " in lpfc_sli_enable_msix()
11895 * Configure HBA MSI-X attention conditions to messages in lpfc_sli_enable_msix()
11897 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_sli_enable_msix()
11900 rc = -ENOMEM; in lpfc_sli_enable_msix()
11914 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); in lpfc_sli_enable_msix()
11919 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_sli_enable_msix()
11924 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_sli_enable_msix()
11928 free_irq(pci_irq_vector(phba->pcidev, 1), phba); in lpfc_sli_enable_msix()
11932 free_irq(pci_irq_vector(phba->pcidev, 0), phba); in lpfc_sli_enable_msix()
11935 /* Unconfigure MSI-X capability structure */ in lpfc_sli_enable_msix()
11936 pci_free_irq_vectors(phba->pcidev); in lpfc_sli_enable_msix()
11943 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
11947 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
11953 * 0 - successful
11954 * other values - error
11961 rc = pci_enable_msi(phba->pcidev); in lpfc_sli_enable_msi()
11971 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, in lpfc_sli_enable_msi()
11974 pci_disable_msi(phba->pcidev); in lpfc_sli_enable_msi()
11982 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
11984 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
11987 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
11992 * MSI-X -> MSI -> IRQ.
11995 * 0 - successful
11996 * other values - error
12008 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; in lpfc_sli_enable_intr()
12011 /* Now, try to enable MSI-X interrupt mode */ in lpfc_sli_enable_intr()
12014 /* Indicate initialization to MSI-X mode */ in lpfc_sli_enable_intr()
12015 phba->intr_type = MSIX; in lpfc_sli_enable_intr()
12020 /* Fallback to MSI if MSI-X initialization failed */ in lpfc_sli_enable_intr()
12021 if (cfg_mode >= 1 && phba->intr_type == NONE) { in lpfc_sli_enable_intr()
12025 phba->intr_type = MSI; in lpfc_sli_enable_intr()
12030 /* Fallback to INTx if both MSI-X/MSI initalization failed */ in lpfc_sli_enable_intr()
12031 if (phba->intr_type == NONE) { in lpfc_sli_enable_intr()
12032 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, in lpfc_sli_enable_intr()
12036 phba->intr_type = INTx; in lpfc_sli_enable_intr()
12044 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12049 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12057 if (phba->intr_type == MSIX) in lpfc_sli_disable_intr()
12063 free_irq(pci_irq_vector(phba->pcidev, i), phba); in lpfc_sli_disable_intr()
12064 pci_free_irq_vectors(phba->pcidev); in lpfc_sli_disable_intr()
12067 phba->intr_type = NONE; in lpfc_sli_disable_intr()
12068 phba->sli.slistat.sli_intr = 0; in lpfc_sli_disable_intr()
12072 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12077 * Return the CPU that matches the selection criteria
12083 int cpu; in lpfc_find_cpu_handle() local
12086 for_each_present_cpu(cpu) { in lpfc_find_cpu_handle()
12087 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_find_cpu_handle()
12094 (cpup->flag & LPFC_CPU_FIRST_IRQ) && in lpfc_find_cpu_handle()
12095 (cpup->eq == id)) in lpfc_find_cpu_handle()
12096 return cpu; in lpfc_find_cpu_handle()
12098 /* If matching by HDWQ, select the first CPU that matches */ in lpfc_find_cpu_handle()
12099 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) in lpfc_find_cpu_handle()
12100 return cpu; in lpfc_find_cpu_handle()
12107 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12109 * @cpu: CPU map index
12110 * @phys_id: CPU package physical id
12111 * @core_id: CPU core id
12114 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, in lpfc_find_hyper() argument
12121 cpup = &phba->sli4_hba.cpu_map[idx]; in lpfc_find_hyper()
12123 if ((cpup->phys_id == phys_id) && in lpfc_find_hyper()
12124 (cpup->core_id == core_id) && in lpfc_find_hyper()
12125 (cpu != idx)) in lpfc_find_hyper()
12133 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12137 * @cpu: cpu used to index vector_map structure
12143 unsigned int cpu) in lpfc_assign_eq_map_info() argument
12145 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_assign_eq_map_info()
12148 cpup->eq = eqidx; in lpfc_assign_eq_map_info()
12149 cpup->flag |= flag; in lpfc_assign_eq_map_info()
12152 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", in lpfc_assign_eq_map_info()
12153 cpu, eqhdl->irq, cpup->eq, cpup->flag); in lpfc_assign_eq_map_info()
12157 * lpfc_cpu_map_array_init - Initialize cpu_map structure
12167 int cpu; in lpfc_cpu_map_array_init() local
12169 for_each_possible_cpu(cpu) { in lpfc_cpu_map_array_init()
12170 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_map_array_init()
12171 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; in lpfc_cpu_map_array_init()
12172 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; in lpfc_cpu_map_array_init()
12173 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; in lpfc_cpu_map_array_init()
12174 cpup->eq = LPFC_VECTOR_MAP_EMPTY; in lpfc_cpu_map_array_init()
12175 cpup->flag = 0; in lpfc_cpu_map_array_init()
12176 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); in lpfc_cpu_map_array_init()
12177 INIT_LIST_HEAD(&eqi->list); in lpfc_cpu_map_array_init()
12178 eqi->icnt = 0; in lpfc_cpu_map_array_init()
12183 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12194 for (i = 0; i < phba->cfg_irq_chann; i++) { in lpfc_hba_eq_hdl_array_init()
12196 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; in lpfc_hba_eq_hdl_array_init()
12197 eqhdl->phba = phba; in lpfc_hba_eq_hdl_array_init()
12202 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12206 * The routine will figure out the CPU affinity assignment for every
12207 * MSI-X vector allocated for the HBA.
12208 * In addition, the CPU to IO channel mapping will be calculated
12209 * and the phba->sli4_hba.cpu_map array will reflect this.
12214 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; in lpfc_cpu_affinity_check() local
12231 /* Update CPU map with physical id and core id of each CPU */ in lpfc_cpu_affinity_check()
12232 for_each_present_cpu(cpu) { in lpfc_cpu_affinity_check()
12233 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_affinity_check()
12235 cpuinfo = &cpu_data(cpu); in lpfc_cpu_affinity_check()
12236 cpup->phys_id = cpuinfo->phys_proc_id; in lpfc_cpu_affinity_check()
12237 cpup->core_id = cpuinfo->cpu_core_id; in lpfc_cpu_affinity_check()
12238 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) in lpfc_cpu_affinity_check()
12239 cpup->flag |= LPFC_CPU_MAP_HYPER; in lpfc_cpu_affinity_check()
12242 cpup->phys_id = 0; in lpfc_cpu_affinity_check()
12243 cpup->core_id = cpu; in lpfc_cpu_affinity_check()
12247 "3328 CPU %d physid %d coreid %d flag x%x\n", in lpfc_cpu_affinity_check()
12248 cpu, cpup->phys_id, cpup->core_id, cpup->flag); in lpfc_cpu_affinity_check()
12250 if (cpup->phys_id > max_phys_id) in lpfc_cpu_affinity_check()
12251 max_phys_id = cpup->phys_id; in lpfc_cpu_affinity_check()
12252 if (cpup->phys_id < min_phys_id) in lpfc_cpu_affinity_check()
12253 min_phys_id = cpup->phys_id; in lpfc_cpu_affinity_check()
12255 if (cpup->core_id > max_core_id) in lpfc_cpu_affinity_check()
12256 max_core_id = cpup->core_id; in lpfc_cpu_affinity_check()
12257 if (cpup->core_id < min_core_id) in lpfc_cpu_affinity_check()
12258 min_core_id = cpup->core_id; in lpfc_cpu_affinity_check()
12263 * Next we will set any unassigned (unaffinitized) cpu map in lpfc_cpu_affinity_check()
12269 for_each_present_cpu(cpu) { in lpfc_cpu_affinity_check()
12270 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_affinity_check()
12272 /* Is this CPU entry unassigned */ in lpfc_cpu_affinity_check()
12273 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { in lpfc_cpu_affinity_check()
12274 /* Mark CPU as IRQ not assigned by the kernel */ in lpfc_cpu_affinity_check()
12275 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; in lpfc_cpu_affinity_check()
12283 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { in lpfc_cpu_affinity_check()
12284 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; in lpfc_cpu_affinity_check()
12285 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && in lpfc_cpu_affinity_check()
12286 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && in lpfc_cpu_affinity_check()
12287 (new_cpup->phys_id == cpup->phys_id)) in lpfc_cpu_affinity_check()
12294 /* At this point, we leave the CPU as unassigned */ in lpfc_cpu_affinity_check()
12298 cpup->eq = new_cpup->eq; in lpfc_cpu_affinity_check()
12301 * chance of having multiple unassigned CPU entries in lpfc_cpu_affinity_check()
12309 "3337 Set Affinity: CPU %d " in lpfc_cpu_affinity_check()
12310 "eq %d from peer cpu %d same " in lpfc_cpu_affinity_check()
12312 cpu, cpup->eq, new_cpu, in lpfc_cpu_affinity_check()
12313 cpup->phys_id); in lpfc_cpu_affinity_check()
12317 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ in lpfc_cpu_affinity_check()
12320 for_each_present_cpu(cpu) { in lpfc_cpu_affinity_check()
12321 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_affinity_check()
12324 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { in lpfc_cpu_affinity_check()
12326 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; in lpfc_cpu_affinity_check()
12334 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { in lpfc_cpu_affinity_check()
12335 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; in lpfc_cpu_affinity_check()
12336 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && in lpfc_cpu_affinity_check()
12337 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) in lpfc_cpu_affinity_check()
12346 "3339 Set Affinity: CPU %d " in lpfc_cpu_affinity_check()
12348 cpup->hdwq, cpup->eq); in lpfc_cpu_affinity_check()
12352 cpup->eq = new_cpup->eq; in lpfc_cpu_affinity_check()
12355 * chance of having multiple unassigned CPU entries in lpfc_cpu_affinity_check()
12363 "3338 Set Affinity: CPU %d " in lpfc_cpu_affinity_check()
12364 "eq %d from peer cpu %d (%d/%d)\n", in lpfc_cpu_affinity_check()
12365 cpu, cpup->eq, new_cpu, in lpfc_cpu_affinity_check()
12366 new_cpup->phys_id, new_cpup->core_id); in lpfc_cpu_affinity_check()
12374 for_each_present_cpu(cpu) { in lpfc_cpu_affinity_check()
12375 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_affinity_check()
12378 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) in lpfc_cpu_affinity_check()
12382 cpup->hdwq = idx; in lpfc_cpu_affinity_check()
12385 "3333 Set Affinity: CPU %d (phys %d core %d): " in lpfc_cpu_affinity_check()
12387 cpu, cpup->phys_id, cpup->core_id, in lpfc_cpu_affinity_check()
12388 cpup->hdwq, cpup->eq, cpup->flag); in lpfc_cpu_affinity_check()
12391 * This will be 1 to 1 - hdwq to cpu, unless there are less in lpfc_cpu_affinity_check()
12392 * hardware queues then CPUs. For that case we will just round-robin in lpfc_cpu_affinity_check()
12395 * for irq_chann < hdwq. The idx is used for round-robin assignments in lpfc_cpu_affinity_check()
12401 for_each_present_cpu(cpu) { in lpfc_cpu_affinity_check()
12402 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_affinity_check()
12405 if (cpup->flag & LPFC_CPU_FIRST_IRQ) in lpfc_cpu_affinity_check()
12412 if (next_idx < phba->cfg_hdw_queue) { in lpfc_cpu_affinity_check()
12413 cpup->hdwq = next_idx; in lpfc_cpu_affinity_check()
12418 /* Not a First CPU and all hdw_queues are used. Reuse a in lpfc_cpu_affinity_check()
12419 * Hardware Queue for another CPU, so be smart about it in lpfc_cpu_affinity_check()
12421 * (CPU package) and core_id. in lpfc_cpu_affinity_check()
12424 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { in lpfc_cpu_affinity_check()
12425 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; in lpfc_cpu_affinity_check()
12426 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && in lpfc_cpu_affinity_check()
12427 new_cpup->phys_id == cpup->phys_id && in lpfc_cpu_affinity_check()
12428 new_cpup->core_id == cpup->core_id) { in lpfc_cpu_affinity_check()
12440 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { in lpfc_cpu_affinity_check()
12441 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; in lpfc_cpu_affinity_check()
12442 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && in lpfc_cpu_affinity_check()
12443 new_cpup->phys_id == cpup->phys_id) in lpfc_cpu_affinity_check()
12452 cpup->hdwq = idx % phba->cfg_hdw_queue; in lpfc_cpu_affinity_check()
12460 cpup->hdwq = new_cpup->hdwq; in lpfc_cpu_affinity_check()
12463 "3335 Set Affinity: CPU %d (phys %d core %d): " in lpfc_cpu_affinity_check()
12465 cpu, cpup->phys_id, cpup->core_id, in lpfc_cpu_affinity_check()
12466 cpup->hdwq, cpup->eq, cpup->flag); in lpfc_cpu_affinity_check()
12470 * Initialize the cpu_map slots for not-present cpus in case in lpfc_cpu_affinity_check()
12471 * a cpu is hot-added. Perform a simple hdwq round robin assignment. in lpfc_cpu_affinity_check()
12474 for_each_possible_cpu(cpu) { in lpfc_cpu_affinity_check()
12475 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_cpu_affinity_check()
12477 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); in lpfc_cpu_affinity_check()
12478 c_stat->hdwq_no = cpup->hdwq; in lpfc_cpu_affinity_check()
12480 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) in lpfc_cpu_affinity_check()
12483 cpup->hdwq = idx++ % phba->cfg_hdw_queue; in lpfc_cpu_affinity_check()
12485 c_stat->hdwq_no = cpup->hdwq; in lpfc_cpu_affinity_check()
12489 "CPU %d hdwq %d\n", in lpfc_cpu_affinity_check()
12490 cpu, cpup->hdwq); in lpfc_cpu_affinity_check()
12503 * @cpu: cpu going offline
12507 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, in lpfc_cpuhp_get_eq() argument
12517 return -ENOMEM; in lpfc_cpuhp_get_eq()
12519 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { in lpfc_cpuhp_get_eq()
12520 maskp = pci_irq_get_affinity(phba->pcidev, idx); in lpfc_cpuhp_get_eq()
12524 * if irq is not affinitized to the cpu going in lpfc_cpuhp_get_eq()
12528 if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) in lpfc_cpuhp_get_eq()
12530 /* get the cpus that are online and are affini- in lpfc_cpuhp_get_eq()
12532 * more than 1 then cpuhp is not going to shut- in lpfc_cpuhp_get_eq()
12533 * down this vector. Since this cpu has not in lpfc_cpuhp_get_eq()
12545 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; in lpfc_cpuhp_get_eq()
12546 list_add(&eq->_poll_list, eqlist); in lpfc_cpuhp_get_eq()
12554 if (phba->sli_rev != LPFC_SLI_REV4) in __lpfc_cpuhp_remove()
12558 &phba->cpuhp); in __lpfc_cpuhp_remove()
12564 del_timer_sync(&phba->cpuhp_poll_timer); in __lpfc_cpuhp_remove()
12569 if (phba->pport->fc_flag & FC_OFFLINE_MODE) in lpfc_cpuhp_remove()
12577 if (phba->sli_rev != LPFC_SLI_REV4) in lpfc_cpuhp_add()
12582 if (!list_empty(&phba->poll_list)) in lpfc_cpuhp_add()
12583 mod_timer(&phba->cpuhp_poll_timer, in lpfc_cpuhp_add()
12589 &phba->cpuhp); in lpfc_cpuhp_add()
12594 if (phba->pport->load_flag & FC_UNLOADING) { in __lpfc_cpuhp_checks()
12595 *retval = -EAGAIN; in __lpfc_cpuhp_checks()
12599 if (phba->sli_rev != LPFC_SLI_REV4) { in __lpfc_cpuhp_checks()
12609 * lpfc_irq_set_aff - set IRQ affinity
12611 * @cpu: cpu to set affinity
12615 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) in lpfc_irq_set_aff() argument
12617 cpumask_clear(&eqhdl->aff_mask); in lpfc_irq_set_aff()
12618 cpumask_set_cpu(cpu, &eqhdl->aff_mask); in lpfc_irq_set_aff()
12619 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); in lpfc_irq_set_aff()
12620 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); in lpfc_irq_set_aff()
12624 * lpfc_irq_clear_aff - clear IRQ affinity
12631 cpumask_clear(&eqhdl->aff_mask); in lpfc_irq_clear_aff()
12632 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); in lpfc_irq_clear_aff()
12636 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12638 * @cpu: cpu going offline/online
12639 * @offline: true, cpu is going offline. false, cpu is coming online.
12641 * If cpu is going offline, we'll try our best effort to find the next
12642 * online cpu on the phba's original_mask and migrate all offlining IRQ
12645 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12648 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12652 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) in lpfc_irq_rebalance() argument
12659 if (phba->irq_chann_mode == NORMAL_MODE) in lpfc_irq_rebalance()
12662 orig_mask = &phba->sli4_hba.irq_aff_mask; in lpfc_irq_rebalance()
12664 if (!cpumask_test_cpu(cpu, orig_mask)) in lpfc_irq_rebalance()
12667 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_irq_rebalance()
12669 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) in lpfc_irq_rebalance()
12673 /* Find next online CPU on original mask */ in lpfc_irq_rebalance()
12674 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); in lpfc_irq_rebalance()
12677 /* Found a valid CPU */ in lpfc_irq_rebalance()
12678 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { in lpfc_irq_rebalance()
12680 * cpu aff_mask is migrated in lpfc_irq_rebalance()
12682 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { in lpfc_irq_rebalance()
12686 if (cpumask_test_cpu(cpu, aff_mask)) in lpfc_irq_rebalance()
12692 for (idx = 0; idx < phba->cfg_irq_chann; idx++) in lpfc_irq_rebalance()
12696 /* Migrate affinity back to this CPU */ in lpfc_irq_rebalance()
12697 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); in lpfc_irq_rebalance()
12701 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) in lpfc_cpu_offline() argument
12709 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); in lpfc_cpu_offline()
12716 lpfc_irq_rebalance(phba, cpu, true); in lpfc_cpu_offline()
12718 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); in lpfc_cpu_offline()
12724 list_del_init(&eq->_poll_list); in lpfc_cpu_offline()
12731 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) in lpfc_cpu_online() argument
12739 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); in lpfc_cpu_online()
12746 lpfc_irq_rebalance(phba, cpu, false); in lpfc_cpu_online()
12748 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { in lpfc_cpu_online()
12749 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); in lpfc_cpu_online()
12750 if (n == cpu) in lpfc_cpu_online()
12758 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12761 * This routine is invoked to enable the MSI-X interrupt vectors to device
12762 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
12768 * allocated and assigned to each online and offline cpu. If the cpu is
12769 * online, then affinity will be set to that cpu. If the cpu is offline, then
12770 * affinity will be set to the nearest peer cpu within the numa node that is
12772 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
12773 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
12778 * cpu affinity. The driver will then use that affinity mapping to setup its
12779 * cpu mapping table.
12782 * 0 - successful
12783 * other values - error
12791 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; in lpfc_sli4_enable_msix() local
12797 /* Set up MSI-X multi-message vectors */ in lpfc_sli4_enable_msix()
12798 vectors = phba->cfg_irq_chann; in lpfc_sli4_enable_msix()
12800 if (phba->irq_chann_mode != NORMAL_MODE) in lpfc_sli4_enable_msix()
12801 aff_mask = &phba->sli4_hba.irq_aff_mask; in lpfc_sli4_enable_msix()
12805 vectors = min(phba->cfg_irq_chann, cpu_cnt); in lpfc_sli4_enable_msix()
12807 /* cpu: iterates over aff_mask including offline or online in lpfc_sli4_enable_msix()
12810 cpu = cpumask_first(aff_mask); in lpfc_sli4_enable_msix()
12811 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); in lpfc_sli4_enable_msix()
12816 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); in lpfc_sli4_enable_msix()
12819 "0484 PCI enable MSI-X failed (%d)\n", rc); in lpfc_sli4_enable_msix()
12824 /* Assign MSI-X vectors to interrupt handlers */ in lpfc_sli4_enable_msix()
12827 name = eqhdl->handler_name; in lpfc_sli4_enable_msix()
12832 eqhdl->idx = index; in lpfc_sli4_enable_msix()
12833 rc = request_irq(pci_irq_vector(phba->pcidev, index), in lpfc_sli4_enable_msix()
12838 "0486 MSI-X fast-path (%d) " in lpfc_sli4_enable_msix()
12843 eqhdl->irq = pci_irq_vector(phba->pcidev, index); in lpfc_sli4_enable_msix()
12846 /* If found a neighboring online cpu, set affinity */ in lpfc_sli4_enable_msix()
12853 cpu); in lpfc_sli4_enable_msix()
12855 /* Iterate to next offline or online cpu in aff_mask */ in lpfc_sli4_enable_msix()
12856 cpu = cpumask_next(cpu, aff_mask); in lpfc_sli4_enable_msix()
12858 /* Find next online cpu in aff_mask to set affinity */ in lpfc_sli4_enable_msix()
12859 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); in lpfc_sli4_enable_msix()
12861 cpu = cpumask_first(cpu_present_mask); in lpfc_sli4_enable_msix()
12863 cpu); in lpfc_sli4_enable_msix()
12865 maskp = pci_irq_get_affinity(phba->pcidev, index); in lpfc_sli4_enable_msix()
12868 for_each_cpu_and(cpu, maskp, cpu_present_mask) { in lpfc_sli4_enable_msix()
12869 cpup = &phba->sli4_hba.cpu_map[cpu]; in lpfc_sli4_enable_msix()
12871 /* If this is the first CPU thats assigned to in lpfc_sli4_enable_msix()
12875 * vectors are affinitized to all the cpu's. in lpfc_sli4_enable_msix()
12883 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) in lpfc_sli4_enable_msix()
12887 cpu); in lpfc_sli4_enable_msix()
12893 if (vectors != phba->cfg_irq_chann) { in lpfc_sli4_enable_msix()
12896 "MSI-X vectors, requested %d got %d\n", in lpfc_sli4_enable_msix()
12897 phba->cfg_irq_chann, vectors); in lpfc_sli4_enable_msix()
12898 if (phba->cfg_irq_chann > vectors) in lpfc_sli4_enable_msix()
12899 phba->cfg_irq_chann = vectors; in lpfc_sli4_enable_msix()
12906 for (--index; index >= 0; index--) { in lpfc_sli4_enable_msix()
12909 irq_set_affinity_hint(eqhdl->irq, NULL); in lpfc_sli4_enable_msix()
12910 free_irq(eqhdl->irq, eqhdl); in lpfc_sli4_enable_msix()
12913 /* Unconfigure MSI-X capability structure */ in lpfc_sli4_enable_msix()
12914 pci_free_irq_vectors(phba->pcidev); in lpfc_sli4_enable_msix()
12921 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
12925 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
12931 * 0 - successful
12932 * other values - error
12938 unsigned int cpu; in lpfc_sli4_enable_msi() local
12941 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, in lpfc_sli4_enable_msi()
12949 return rc ? rc : -1; in lpfc_sli4_enable_msi()
12952 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, in lpfc_sli4_enable_msi()
12955 pci_free_irq_vectors(phba->pcidev); in lpfc_sli4_enable_msi()
12962 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); in lpfc_sli4_enable_msi()
12964 cpu = cpumask_first(cpu_present_mask); in lpfc_sli4_enable_msi()
12965 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); in lpfc_sli4_enable_msi()
12967 for (index = 0; index < phba->cfg_irq_chann; index++) { in lpfc_sli4_enable_msi()
12969 eqhdl->idx = index; in lpfc_sli4_enable_msi()
12976 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
12978 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12981 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
12986 * MSI-X -> MSI -> IRQ.
12989 * 0 - successful
12990 * other values - error
13002 /* Now, try to enable MSI-X interrupt mode */ in lpfc_sli4_enable_intr()
13005 /* Indicate initialization to MSI-X mode */ in lpfc_sli4_enable_intr()
13006 phba->intr_type = MSIX; in lpfc_sli4_enable_intr()
13012 /* Fallback to MSI if MSI-X initialization failed */ in lpfc_sli4_enable_intr()
13013 if (cfg_mode >= 1 && phba->intr_type == NONE) { in lpfc_sli4_enable_intr()
13017 phba->intr_type = MSI; in lpfc_sli4_enable_intr()
13022 /* Fallback to INTx if both MSI-X/MSI initalization failed */ in lpfc_sli4_enable_intr()
13023 if (phba->intr_type == NONE) { in lpfc_sli4_enable_intr()
13024 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, in lpfc_sli4_enable_intr()
13028 unsigned int cpu; in lpfc_sli4_enable_intr() local
13031 phba->intr_type = INTx; in lpfc_sli4_enable_intr()
13035 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); in lpfc_sli4_enable_intr()
13037 cpu = cpumask_first(cpu_present_mask); in lpfc_sli4_enable_intr()
13039 cpu); in lpfc_sli4_enable_intr()
13040 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { in lpfc_sli4_enable_intr()
13042 eqhdl->idx = idx; in lpfc_sli4_enable_intr()
13050 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13055 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13062 if (phba->intr_type == MSIX) { in lpfc_sli4_disable_intr()
13066 /* Free up MSI-X multi-message vectors */ in lpfc_sli4_disable_intr()
13067 for (index = 0; index < phba->cfg_irq_chann; index++) { in lpfc_sli4_disable_intr()
13070 irq_set_affinity_hint(eqhdl->irq, NULL); in lpfc_sli4_disable_intr()
13071 free_irq(eqhdl->irq, eqhdl); in lpfc_sli4_disable_intr()
13074 free_irq(phba->pcidev->irq, phba); in lpfc_sli4_disable_intr()
13077 pci_free_irq_vectors(phba->pcidev); in lpfc_sli4_disable_intr()
13080 phba->intr_type = NONE; in lpfc_sli4_disable_intr()
13081 phba->sli.slistat.sli_intr = 0; in lpfc_sli4_disable_intr()
13085 * lpfc_unset_hba - Unset SLI3 hba device initialization
13089 * a device with SLI-3 interface spec.
13094 struct lpfc_vport *vport = phba->pport; in lpfc_unset_hba()
13097 spin_lock_irq(shost->host_lock); in lpfc_unset_hba()
13098 vport->load_flag |= FC_UNLOADING; in lpfc_unset_hba()
13099 spin_unlock_irq(shost->host_lock); in lpfc_unset_hba()
13101 kfree(phba->vpi_bmask); in lpfc_unset_hba()
13102 kfree(phba->vpi_ids); in lpfc_unset_hba()
13106 phba->pport->work_port_events = 0; in lpfc_unset_hba()
13118 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13138 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); in lpfc_sli4_xri_exchange_busy_wait()
13147 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) in lpfc_sli4_xri_exchange_busy_wait()
13151 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_xri_exchange_busy_wait()
13152 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_sli4_xri_exchange_busy_wait()
13153 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); in lpfc_sli4_xri_exchange_busy_wait()
13160 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_xri_exchange_busy_wait()
13162 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); in lpfc_sli4_xri_exchange_busy_wait()
13190 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { in lpfc_sli4_xri_exchange_busy_wait()
13191 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_sli4_xri_exchange_busy_wait()
13193 &qp->lpfc_abts_io_buf_list); in lpfc_sli4_xri_exchange_busy_wait()
13200 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_sli4_xri_exchange_busy_wait()
13202 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); in lpfc_sli4_xri_exchange_busy_wait()
13205 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); in lpfc_sli4_xri_exchange_busy_wait()
13211 * lpfc_sli4_hba_unset - Unset the fcoe hba
13225 struct pci_dev *pdev = phba->pcidev; in lpfc_sli4_hba_unset()
13228 hrtimer_cancel(&phba->cmf_timer); in lpfc_sli4_hba_unset()
13230 if (phba->pport) in lpfc_sli4_hba_unset()
13231 phba->sli4_hba.intr_enable = 0; in lpfc_sli4_hba_unset()
13239 spin_lock_irq(&phba->hbalock); in lpfc_sli4_hba_unset()
13240 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; in lpfc_sli4_hba_unset()
13241 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_hba_unset()
13243 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { in lpfc_sli4_hba_unset()
13249 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { in lpfc_sli4_hba_unset()
13250 spin_lock_irq(&phba->hbalock); in lpfc_sli4_hba_unset()
13251 mboxq = phba->sli.mbox_active; in lpfc_sli4_hba_unset()
13252 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; in lpfc_sli4_hba_unset()
13254 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; in lpfc_sli4_hba_unset()
13255 phba->sli.mbox_active = NULL; in lpfc_sli4_hba_unset()
13256 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_hba_unset()
13265 /* per-phba callback de-registration for hotplug event */ in lpfc_sli4_hba_unset()
13266 if (phba->pport) in lpfc_sli4_hba_unset()
13272 /* Disable SR-IOV if enabled */ in lpfc_sli4_hba_unset()
13273 if (phba->cfg_sriov_nr_virtfn) in lpfc_sli4_hba_unset()
13277 kthread_stop(phba->worker_thread); in lpfc_sli4_hba_unset()
13292 if (phba->ras_fwlog.ras_enabled) in lpfc_sli4_hba_unset()
13296 if (phba->pport) in lpfc_sli4_hba_unset()
13297 phba->pport->work_port_events = 0; in lpfc_sli4_hba_unset()
13360 "6235 INIT Congestion Buffer %p\n", phba->cgn_i); in lpfc_init_congestion_buf()
13362 if (!phba->cgn_i) in lpfc_init_congestion_buf()
13364 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; in lpfc_init_congestion_buf()
13366 atomic_set(&phba->cgn_fabric_warn_cnt, 0); in lpfc_init_congestion_buf()
13367 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); in lpfc_init_congestion_buf()
13368 atomic_set(&phba->cgn_sync_alarm_cnt, 0); in lpfc_init_congestion_buf()
13369 atomic_set(&phba->cgn_sync_warn_cnt, 0); in lpfc_init_congestion_buf()
13371 atomic64_set(&phba->cgn_acqe_stat.alarm, 0); in lpfc_init_congestion_buf()
13372 atomic64_set(&phba->cgn_acqe_stat.warn, 0); in lpfc_init_congestion_buf()
13373 atomic_set(&phba->cgn_driver_evt_cnt, 0); in lpfc_init_congestion_buf()
13374 atomic_set(&phba->cgn_latency_evt_cnt, 0); in lpfc_init_congestion_buf()
13375 atomic64_set(&phba->cgn_latency_evt, 0); in lpfc_init_congestion_buf()
13376 phba->cgn_evt_minute = 0; in lpfc_init_congestion_buf()
13377 phba->hba_flag &= ~HBA_CGN_DAY_WRAP; in lpfc_init_congestion_buf()
13380 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); in lpfc_init_congestion_buf()
13381 cp->cgn_info_version = LPFC_CGN_INFO_V3; in lpfc_init_congestion_buf()
13384 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; in lpfc_init_congestion_buf()
13385 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; in lpfc_init_congestion_buf()
13386 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; in lpfc_init_congestion_buf()
13387 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; in lpfc_init_congestion_buf()
13392 cp->cgn_info_month = broken.tm_mon + 1; in lpfc_init_congestion_buf()
13393 cp->cgn_info_day = broken.tm_mday; in lpfc_init_congestion_buf()
13394 cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */ in lpfc_init_congestion_buf()
13395 cp->cgn_info_hour = broken.tm_hour; in lpfc_init_congestion_buf()
13396 cp->cgn_info_minute = broken.tm_min; in lpfc_init_congestion_buf()
13397 cp->cgn_info_second = broken.tm_sec; in lpfc_init_congestion_buf()
13402 cp->cgn_info_day, cp->cgn_info_month, in lpfc_init_congestion_buf()
13403 cp->cgn_info_year, cp->cgn_info_hour, in lpfc_init_congestion_buf()
13404 cp->cgn_info_minute, cp->cgn_info_second); in lpfc_init_congestion_buf()
13407 if (phba->pport) { in lpfc_init_congestion_buf()
13408 size = (uint16_t)(phba->pport->cfg_lun_queue_depth); in lpfc_init_congestion_buf()
13409 cp->cgn_lunq = cpu_to_le16(size); in lpfc_init_congestion_buf()
13414 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); in lpfc_init_congestion_buf()
13415 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); in lpfc_init_congestion_buf()
13417 cp->cgn_info_crc = cpu_to_le32(crc); in lpfc_init_congestion_buf()
13419 phba->cgn_evt_timestamp = jiffies + in lpfc_init_congestion_buf()
13432 "6236 INIT Congestion Stat %p\n", phba->cgn_i); in lpfc_init_congestion_stat()
13434 if (!phba->cgn_i) in lpfc_init_congestion_stat()
13437 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; in lpfc_init_congestion_stat()
13438 memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE); in lpfc_init_congestion_stat()
13443 cp->cgn_stat_month = broken.tm_mon + 1; in lpfc_init_congestion_stat()
13444 cp->cgn_stat_day = broken.tm_mday; in lpfc_init_congestion_stat()
13445 cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */ in lpfc_init_congestion_stat()
13446 cp->cgn_stat_hour = broken.tm_hour; in lpfc_init_congestion_stat()
13447 cp->cgn_stat_minute = broken.tm_min; in lpfc_init_congestion_stat()
13452 cp->cgn_stat_day, cp->cgn_stat_month, in lpfc_init_congestion_stat()
13453 cp->cgn_stat_year, cp->cgn_stat_hour, in lpfc_init_congestion_stat()
13454 cp->cgn_stat_minute); in lpfc_init_congestion_stat()
13457 cp->cgn_info_crc = cpu_to_le32(crc); in lpfc_init_congestion_stat()
13461 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13474 if (!phba->cgn_i) in __lpfc_reg_congestion_buf()
13475 return -ENXIO; in __lpfc_reg_congestion_buf()
13477 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in __lpfc_reg_congestion_buf()
13482 phba->pport->port_state, reg); in __lpfc_reg_congestion_buf()
13483 return -ENOMEM; in __lpfc_reg_congestion_buf()
13486 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) - in __lpfc_reg_congestion_buf()
13491 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf; in __lpfc_reg_congestion_buf()
13497 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info); in __lpfc_reg_congestion_buf()
13498 reg_congestion_buf->addr_lo = in __lpfc_reg_congestion_buf()
13499 putPaddrLow(phba->cgn_i->phys); in __lpfc_reg_congestion_buf()
13500 reg_congestion_buf->addr_hi = in __lpfc_reg_congestion_buf()
13501 putPaddrHigh(phba->cgn_i->phys); in __lpfc_reg_congestion_buf()
13505 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; in __lpfc_reg_congestion_buf()
13506 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); in __lpfc_reg_congestion_buf()
13508 &shdr->response); in __lpfc_reg_congestion_buf()
13509 mempool_free(mboxq, phba->mbox_mem_pool); in __lpfc_reg_congestion_buf()
13516 return -ENXIO; in __lpfc_reg_congestion_buf()
13535 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13542 * This function may be be called from any context that can block-wait
13550 struct lpfc_mqe *mqe = &mboxq->u.mqe; in lpfc_get_sli4_parameters()
13562 phba->sli4_hba.rpi_hdrs_in_use = 1; in lpfc_get_sli4_parameters()
13565 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - in lpfc_get_sli4_parameters()
13570 if (!phba->sli4_hba.intr_enable) in lpfc_get_sli4_parameters()
13578 sli4_params = &phba->sli4_hba.pc_sli4_params; in lpfc_get_sli4_parameters()
13579 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; in lpfc_get_sli4_parameters()
13580 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13581 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13582 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13583 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, in lpfc_get_sli4_parameters()
13585 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, in lpfc_get_sli4_parameters()
13588 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; in lpfc_get_sli4_parameters()
13590 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; in lpfc_get_sli4_parameters()
13591 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; in lpfc_get_sli4_parameters()
13592 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope, in lpfc_get_sli4_parameters()
13594 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13595 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13596 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13597 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13598 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13599 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13600 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13601 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13602 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13603 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13604 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, in lpfc_get_sli4_parameters()
13606 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13607 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, in lpfc_get_sli4_parameters()
13609 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13610 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13612 /* Check for Extended Pre-Registered SGL support */ in lpfc_get_sli4_parameters()
13613 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); in lpfc_get_sli4_parameters()
13621 sli4_params->nvme = 1; in lpfc_get_sli4_parameters()
13624 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { in lpfc_get_sli4_parameters()
13628 phba->cfg_enable_fc4_type); in lpfc_get_sli4_parameters()
13633 sli4_params->nvme = 0; in lpfc_get_sli4_parameters()
13634 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_get_sli4_parameters()
13640 phba->cfg_enable_fc4_type); in lpfc_get_sli4_parameters()
13642 phba->nvmet_support = 0; in lpfc_get_sli4_parameters()
13643 phba->cfg_nvmet_mrq = 0; in lpfc_get_sli4_parameters()
13644 phba->cfg_nvme_seg_cnt = 0; in lpfc_get_sli4_parameters()
13647 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) in lpfc_get_sli4_parameters()
13648 return -ENODEV; in lpfc_get_sli4_parameters()
13649 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; in lpfc_get_sli4_parameters()
13656 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) in lpfc_get_sli4_parameters()
13657 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; in lpfc_get_sli4_parameters()
13661 phba->cfg_enable_pbde = 1; in lpfc_get_sli4_parameters()
13663 phba->cfg_enable_pbde = 0; in lpfc_get_sli4_parameters()
13668 * In SLI4-Parameters Descriptor: in lpfc_get_sli4_parameters()
13673 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && in lpfc_get_sli4_parameters()
13675 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; in lpfc_get_sli4_parameters()
13677 phba->cfg_suppress_rsp = 0; in lpfc_get_sli4_parameters()
13680 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; in lpfc_get_sli4_parameters()
13683 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) in lpfc_get_sli4_parameters()
13684 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; in lpfc_get_sli4_parameters()
13689 * to use this option, 128-byte WQEs must be used. in lpfc_get_sli4_parameters()
13692 phba->fcp_embed_io = 1; in lpfc_get_sli4_parameters()
13694 phba->fcp_embed_io = 0; in lpfc_get_sli4_parameters()
13699 phba->cfg_enable_pbde, in lpfc_get_sli4_parameters()
13700 phba->fcp_embed_io, sli4_params->nvme, in lpfc_get_sli4_parameters()
13701 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); in lpfc_get_sli4_parameters()
13703 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == in lpfc_get_sli4_parameters()
13705 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == in lpfc_get_sli4_parameters()
13712 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) in lpfc_get_sli4_parameters()
13713 phba->enab_exp_wqcq_pages = 1; in lpfc_get_sli4_parameters()
13715 phba->enab_exp_wqcq_pages = 0; in lpfc_get_sli4_parameters()
13720 phba->mds_diags_support = 1; in lpfc_get_sli4_parameters()
13722 phba->mds_diags_support = 0; in lpfc_get_sli4_parameters()
13728 phba->nsler = 1; in lpfc_get_sli4_parameters()
13730 phba->nsler = 0; in lpfc_get_sli4_parameters()
13736 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13740 * This routine is to be called to attach a device with SLI-3 interface spec
13741 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13742 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13749 * 0 - driver can claim the device
13750 * negative value - driver can not claim the device
13764 return -ENOMEM; in lpfc_pci_probe_one_s3()
13771 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ in lpfc_pci_probe_one_s3()
13776 /* Set up SLI-3 specific device PCI memory space */ in lpfc_pci_probe_one_s3()
13784 /* Set up SLI-3 specific device driver resources */ in lpfc_pci_probe_one_s3()
13810 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); in lpfc_pci_probe_one_s3()
13821 vport = phba->pport; in lpfc_pci_probe_one_s3()
13831 cfg_mode = phba->cfg_use_msi; in lpfc_pci_probe_one_s3()
13840 error = -ENODEV; in lpfc_pci_probe_one_s3()
13843 /* SLI-3 HBA setup */ in lpfc_pci_probe_one_s3()
13847 error = -ENODEV; in lpfc_pci_probe_one_s3()
13855 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { in lpfc_pci_probe_one_s3()
13857 phba->intr_mode = intr_mode; in lpfc_pci_probe_one_s3()
13868 cfg_mode = --intr_mode; in lpfc_pci_probe_one_s3()
13904 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
13907 * This routine is to be called to disattach a device with SLI-3 interface
13908 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13916 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; in lpfc_pci_remove_one_s3()
13918 struct lpfc_hba *phba = vport->phba; in lpfc_pci_remove_one_s3()
13921 spin_lock_irq(&phba->hbalock); in lpfc_pci_remove_one_s3()
13922 vport->load_flag |= FC_UNLOADING; in lpfc_pci_remove_one_s3()
13923 spin_unlock_irq(&phba->hbalock); in lpfc_pci_remove_one_s3()
13930 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_pci_remove_one_s3()
13931 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) in lpfc_pci_remove_one_s3()
13933 fc_vport_terminate(vports[i]->fc_vport); in lpfc_pci_remove_one_s3()
13953 kthread_stop(phba->worker_thread); in lpfc_pci_remove_one_s3()
13957 kfree(phba->vpi_bmask); in lpfc_pci_remove_one_s3()
13958 kfree(phba->vpi_ids); in lpfc_pci_remove_one_s3()
13961 spin_lock_irq(&phba->port_list_lock); in lpfc_pci_remove_one_s3()
13962 list_del_init(&vport->listentry); in lpfc_pci_remove_one_s3()
13963 spin_unlock_irq(&phba->port_list_lock); in lpfc_pci_remove_one_s3()
13967 /* Disable SR-IOV if enabled */ in lpfc_pci_remove_one_s3()
13968 if (phba->cfg_sriov_nr_virtfn) in lpfc_pci_remove_one_s3()
13985 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), in lpfc_pci_remove_one_s3()
13986 phba->hbqslimp.virt, phba->hbqslimp.phys); in lpfc_pci_remove_one_s3()
13989 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, in lpfc_pci_remove_one_s3()
13990 phba->slim2p.virt, phba->slim2p.phys); in lpfc_pci_remove_one_s3()
13993 iounmap(phba->ctrl_regs_memmap_p); in lpfc_pci_remove_one_s3()
13994 iounmap(phba->slim_memmap_p); in lpfc_pci_remove_one_s3()
14003 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14007 * system Power Management (PM) to device with SLI-3 interface spec. When
14011 * minimum PM requirements to a power-aware driver's PM support for the
14012 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14019 * 0 - driver suspended the device
14026 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_suspend_one_s3()
14034 kthread_stop(phba->worker_thread); in lpfc_pci_suspend_one_s3()
14043 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14047 * system Power Management (PM) to device with SLI-3 interface spec. When PM
14050 * driver implements the minimum PM requirements to a power-aware driver's
14051 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14058 * 0 - driver suspended the device
14065 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_resume_one_s3()
14073 phba->worker_thread = kthread_run(lpfc_do_work, phba, in lpfc_pci_resume_one_s3()
14074 "lpfc_worker_%d", phba->brd_no); in lpfc_pci_resume_one_s3()
14075 if (IS_ERR(phba->worker_thread)) { in lpfc_pci_resume_one_s3()
14076 error = PTR_ERR(phba->worker_thread); in lpfc_pci_resume_one_s3()
14084 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); in lpfc_pci_resume_one_s3()
14088 return -EIO; in lpfc_pci_resume_one_s3()
14090 phba->intr_mode = intr_mode; in lpfc_pci_resume_one_s3()
14097 lpfc_log_intr_mode(phba, phba->intr_mode); in lpfc_pci_resume_one_s3()
14103 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14117 * and let the SCSI mid-layer to retry them to recover. in lpfc_sli_prep_dev_for_recover()
14123 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14150 pci_disable_device(phba->pcidev); in lpfc_sli_prep_dev_for_reset()
14154 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14177 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14182 * device with SLI-3 interface spec. This function is called by the PCI
14190 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14191 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14192 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14198 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_error_detected_s3()
14202 /* Non-fatal error, prepare for recovery */ in lpfc_io_error_detected_s3()
14223 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14227 * device with SLI-3 interface spec. This is called after PCI bus has been
14228 * reset to restart the PCI card from scratch, as if from a cold-boot.
14237 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
14238 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14244 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_slot_reset_s3()
14245 struct lpfc_sli *psli = &phba->sli; in lpfc_io_slot_reset_s3()
14248 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); in lpfc_io_slot_reset_s3()
14250 printk(KERN_ERR "lpfc: Cannot re-enable " in lpfc_io_slot_reset_s3()
14263 if (pdev->is_busmaster) in lpfc_io_slot_reset_s3()
14266 spin_lock_irq(&phba->hbalock); in lpfc_io_slot_reset_s3()
14267 psli->sli_flag &= ~LPFC_SLI_ACTIVE; in lpfc_io_slot_reset_s3()
14268 spin_unlock_irq(&phba->hbalock); in lpfc_io_slot_reset_s3()
14271 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); in lpfc_io_slot_reset_s3()
14274 "0427 Cannot re-enable interrupt after " in lpfc_io_slot_reset_s3()
14278 phba->intr_mode = intr_mode; in lpfc_io_slot_reset_s3()
14286 lpfc_log_intr_mode(phba, phba->intr_mode); in lpfc_io_slot_reset_s3()
14292 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14296 * with SLI-3 interface spec. It is called when kernel error recovery tells
14305 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_resume_s3()
14307 /* Bring device online, it will be no-op for non-fatal error resume */ in lpfc_io_resume_s3()
14312 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14320 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; in lpfc_sli4_get_els_iocb_cnt()
14322 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_sli4_get_els_iocb_cnt()
14342 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14352 if (phba->nvmet_support) in lpfc_sli4_get_iocb_cnt()
14366 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); in lpfc_log_write_firmware_error()
14384 phba->pcidev->device, magic_number, ftype, fid, in lpfc_log_write_firmware_error()
14385 fsize, fw->size); in lpfc_log_write_firmware_error()
14386 rc = -EINVAL; in lpfc_log_write_firmware_error()
14393 phba->pcidev->device, magic_number, ftype, fid, in lpfc_log_write_firmware_error()
14394 fsize, fw->size); in lpfc_log_write_firmware_error()
14395 rc = -EACCES; in lpfc_log_write_firmware_error()
14401 offset, phba->pcidev->device, magic_number, in lpfc_log_write_firmware_error()
14402 ftype, fid, fsize, fw->size); in lpfc_log_write_firmware_error()
14403 rc = -EIO; in lpfc_log_write_firmware_error()
14409 * lpfc_write_firmware - attempt to write a firmware image to the port
14426 /* It can be null in no-wait mode, sanity check */ in lpfc_write_firmware()
14428 rc = -ENXIO; in lpfc_write_firmware()
14431 image = (struct lpfc_grp_hdr *)fw->data; in lpfc_write_firmware()
14433 magic_number = be32_to_cpu(image->magic_number); in lpfc_write_firmware()
14436 fsize = be32_to_cpu(image->size); in lpfc_write_firmware()
14440 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { in lpfc_write_firmware()
14444 fwrev, image->revision); in lpfc_write_firmware()
14449 rc = -ENOMEM; in lpfc_write_firmware()
14452 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, in lpfc_write_firmware()
14454 &dmabuf->phys, in lpfc_write_firmware()
14456 if (!dmabuf->virt) { in lpfc_write_firmware()
14458 rc = -ENOMEM; in lpfc_write_firmware()
14461 list_add_tail(&dmabuf->list, &dma_buffer_list); in lpfc_write_firmware()
14463 while (offset < fw->size) { in lpfc_write_firmware()
14466 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { in lpfc_write_firmware()
14467 memcpy(dmabuf->virt, in lpfc_write_firmware()
14468 fw->data + temp_offset, in lpfc_write_firmware()
14469 fw->size - temp_offset); in lpfc_write_firmware()
14470 temp_offset = fw->size; in lpfc_write_firmware()
14473 memcpy(dmabuf->virt, fw->data + temp_offset, in lpfc_write_firmware()
14478 (fw->size - offset), &offset); in lpfc_write_firmware()
14494 fwrev, image->revision); in lpfc_write_firmware()
14498 list_del(&dmabuf->list); in lpfc_write_firmware()
14499 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, in lpfc_write_firmware()
14500 dmabuf->virt, dmabuf->phys); in lpfc_write_firmware()
14514 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14529 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < in lpfc_sli4_request_firmware_update()
14531 return -EPERM; in lpfc_sli4_request_firmware_update()
14533 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); in lpfc_sli4_request_firmware_update()
14537 file_name, &phba->pcidev->dev, in lpfc_sli4_request_firmware_update()
14541 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); in lpfc_sli4_request_firmware_update()
14545 ret = -EINVAL; in lpfc_sli4_request_firmware_update()
14552 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14557 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14558 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14566 * 0 - driver can claim the device
14567 * negative value - driver can not claim the device
14581 return -ENOMEM; in lpfc_pci_probe_one_s4()
14583 INIT_LIST_HEAD(&phba->poll_list); in lpfc_pci_probe_one_s4()
14590 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ in lpfc_pci_probe_one_s4()
14595 /* Set up SLI-4 specific device PCI memory space */ in lpfc_pci_probe_one_s4()
14603 /* Set up SLI-4 Specific device driver resources */ in lpfc_pci_probe_one_s4()
14611 INIT_LIST_HEAD(&phba->active_rrq_list); in lpfc_pci_probe_one_s4()
14612 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); in lpfc_pci_probe_one_s4()
14623 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); in lpfc_pci_probe_one_s4()
14626 cfg_mode = phba->cfg_use_msi; in lpfc_pci_probe_one_s4()
14629 phba->pport = NULL; in lpfc_pci_probe_one_s4()
14643 error = -ENODEV; in lpfc_pci_probe_one_s4()
14646 /* Default to single EQ for non-MSI-X */ in lpfc_pci_probe_one_s4()
14647 if (phba->intr_type != MSIX) { in lpfc_pci_probe_one_s4()
14648 phba->cfg_irq_chann = 1; in lpfc_pci_probe_one_s4()
14649 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_pci_probe_one_s4()
14650 if (phba->nvmet_support) in lpfc_pci_probe_one_s4()
14651 phba->cfg_nvmet_mrq = 1; in lpfc_pci_probe_one_s4()
14654 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); in lpfc_pci_probe_one_s4()
14663 vport = phba->pport; in lpfc_pci_probe_one_s4()
14674 /* Set up SLI-4 HBA */ in lpfc_pci_probe_one_s4()
14678 error = -ENODEV; in lpfc_pci_probe_one_s4()
14683 phba->intr_mode = intr_mode; in lpfc_pci_probe_one_s4()
14692 if (phba->nvmet_support == 0) { in lpfc_pci_probe_one_s4()
14693 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { in lpfc_pci_probe_one_s4()
14710 if (phba->cfg_request_firmware_upgrade) in lpfc_pci_probe_one_s4()
14719 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); in lpfc_pci_probe_one_s4()
14720 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); in lpfc_pci_probe_one_s4()
14746 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14750 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14758 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; in lpfc_pci_remove_one_s4()
14760 struct lpfc_hba *phba = vport->phba; in lpfc_pci_remove_one_s4()
14764 spin_lock_irq(&phba->hbalock); in lpfc_pci_remove_one_s4()
14765 vport->load_flag |= FC_UNLOADING; in lpfc_pci_remove_one_s4()
14766 spin_unlock_irq(&phba->hbalock); in lpfc_pci_remove_one_s4()
14767 if (phba->cgn_i) in lpfc_pci_remove_one_s4()
14775 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_pci_remove_one_s4()
14776 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) in lpfc_pci_remove_one_s4()
14778 fc_vport_terminate(vports[i]->fc_vport); in lpfc_pci_remove_one_s4()
14793 /* De-allocate multi-XRI pools */ in lpfc_pci_remove_one_s4()
14794 if (phba->cfg_xri_rebalancing) in lpfc_pci_remove_one_s4()
14805 spin_lock_irq(&phba->port_list_lock); in lpfc_pci_remove_one_s4()
14806 list_del_init(&vport->listentry); in lpfc_pci_remove_one_s4()
14807 spin_unlock_irq(&phba->port_list_lock); in lpfc_pci_remove_one_s4()
14833 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
14837 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
14841 * requirements to a power-aware driver's PM support for suspend/resume -- all
14849 * 0 - driver suspended the device
14856 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_suspend_one_s4()
14864 kthread_stop(phba->worker_thread); in lpfc_pci_suspend_one_s4()
14874 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
14878 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
14881 * implements the minimum PM requirements to a power-aware driver's PM for
14882 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14889 * 0 - driver suspended the device
14896 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_resume_one_s4()
14904 phba->worker_thread = kthread_run(lpfc_do_work, phba, in lpfc_pci_resume_one_s4()
14905 "lpfc_worker_%d", phba->brd_no); in lpfc_pci_resume_one_s4()
14906 if (IS_ERR(phba->worker_thread)) { in lpfc_pci_resume_one_s4()
14907 error = PTR_ERR(phba->worker_thread); in lpfc_pci_resume_one_s4()
14915 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); in lpfc_pci_resume_one_s4()
14919 return -EIO; in lpfc_pci_resume_one_s4()
14921 phba->intr_mode = intr_mode; in lpfc_pci_resume_one_s4()
14928 lpfc_log_intr_mode(phba, phba->intr_mode); in lpfc_pci_resume_one_s4()
14934 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
14947 * and let the SCSI mid-layer to retry them to recover. in lpfc_sli4_prep_dev_for_recover()
14953 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
14981 pci_disable_device(phba->pcidev); in lpfc_sli4_prep_dev_for_reset()
14985 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15009 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15014 * with SLI-4 interface spec. This function is called by the PCI subsystem
15021 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15022 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15028 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_error_detected_s4()
15032 /* Non-fatal error, prepare for recovery */ in lpfc_io_error_detected_s4()
15053 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15057 * with SLI-4 interface spec. It is called after PCI bus has been reset to
15058 * restart the PCI card from scratch, as if from a cold-boot. During the
15067 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15068 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15074 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_slot_reset_s4()
15075 struct lpfc_sli *psli = &phba->sli; in lpfc_io_slot_reset_s4()
15078 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); in lpfc_io_slot_reset_s4()
15080 printk(KERN_ERR "lpfc: Cannot re-enable " in lpfc_io_slot_reset_s4()
15093 if (pdev->is_busmaster) in lpfc_io_slot_reset_s4()
15096 spin_lock_irq(&phba->hbalock); in lpfc_io_slot_reset_s4()
15097 psli->sli_flag &= ~LPFC_SLI_ACTIVE; in lpfc_io_slot_reset_s4()
15098 spin_unlock_irq(&phba->hbalock); in lpfc_io_slot_reset_s4()
15101 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); in lpfc_io_slot_reset_s4()
15104 "2824 Cannot re-enable interrupt after " in lpfc_io_slot_reset_s4()
15108 phba->intr_mode = intr_mode; in lpfc_io_slot_reset_s4()
15111 lpfc_log_intr_mode(phba, phba->intr_mode); in lpfc_io_slot_reset_s4()
15117 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15121 * with SLI-4 interface spec. It is called when kernel error recovery tells
15130 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_resume_s4()
15138 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { in lpfc_io_resume_s4()
15149 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15155 * at PCI device-specific information of the device and driver to see if the
15158 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15163 * 0 - driver can claim the device
15164 * negative value - driver can not claim the device
15173 return -ENODEV; in lpfc_pci_probe_one()
15185 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15190 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15198 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_remove_one()
15200 switch (phba->pci_dev_grp) { in lpfc_pci_remove_one()
15210 phba->pci_dev_grp); in lpfc_pci_remove_one()
15217 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15222 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15226 * 0 - driver suspended the device
15233 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_suspend_one()
15234 int rc = -ENODEV; in lpfc_pci_suspend_one()
15236 switch (phba->pci_dev_grp) { in lpfc_pci_suspend_one()
15246 phba->pci_dev_grp); in lpfc_pci_suspend_one()
15253 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15258 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15262 * 0 - driver suspended the device
15269 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_pci_resume_one()
15270 int rc = -ENODEV; in lpfc_pci_resume_one()
15272 switch (phba->pci_dev_grp) { in lpfc_pci_resume_one()
15282 phba->pci_dev_grp); in lpfc_pci_resume_one()
15289 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15296 * the action to the proper SLI-3 or SLI-4 device error detected handling
15300 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15301 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15307 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_error_detected()
15310 switch (phba->pci_dev_grp) { in lpfc_io_error_detected()
15320 phba->pci_dev_grp); in lpfc_io_error_detected()
15327 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15332 * from scratch, as if from a cold-boot. When this routine is invoked, it
15333 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15337 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15338 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15344 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_slot_reset()
15347 switch (phba->pci_dev_grp) { in lpfc_io_slot_reset()
15357 phba->pci_dev_grp); in lpfc_io_slot_reset()
15364 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15370 * this routine is invoked, it dispatches the action to the proper SLI-3
15371 * or SLI-4 device io_resume routine, which will resume the device operation.
15377 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; in lpfc_io_resume()
15379 switch (phba->pci_dev_grp) { in lpfc_io_resume()
15389 phba->pci_dev_grp); in lpfc_io_resume()
15396 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15409 if (!phba->cfg_EnableXLane) in lpfc_sli4_oas_verify()
15412 if (phba->sli4_hba.pc_sli4_params.oas_supported) { in lpfc_sli4_oas_verify()
15413 phba->cfg_fof = 1; in lpfc_sli4_oas_verify()
15415 phba->cfg_fof = 0; in lpfc_sli4_oas_verify()
15416 mempool_destroy(phba->device_data_mem_pool); in lpfc_sli4_oas_verify()
15417 phba->device_data_mem_pool = NULL; in lpfc_sli4_oas_verify()
15424 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15434 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == in lpfc_sli4_ras_init()
15436 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == in lpfc_sli4_ras_init()
15438 phba->ras_fwlog.ras_hwsupport = true; in lpfc_sli4_ras_init()
15439 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && in lpfc_sli4_ras_init()
15440 phba->cfg_ras_fwlog_buffsize) in lpfc_sli4_ras_init()
15441 phba->ras_fwlog.ras_enabled = true; in lpfc_sli4_ras_init()
15443 phba->ras_fwlog.ras_enabled = false; in lpfc_sli4_ras_init()
15445 phba->ras_fwlog.ras_hwsupport = false; in lpfc_sli4_ras_init()
15483 * lpfc_init - lpfc module initialization routine
15490 * 0 - successful
15491 * -ENOMEM - FC attach transport failed
15492 * all others - failed
15507 error = -ENOMEM; in lpfc_init()
15564 if (phba->cfg_log_verbose) in lpfc_dmp_dbg()
15567 spin_lock_irqsave(&phba->port_list_lock, iflags); in lpfc_dmp_dbg()
15568 list_for_each_entry(port_iterator, &phba->port_list, listentry) { in lpfc_dmp_dbg()
15569 if (port_iterator->load_flag & FC_UNLOADING) in lpfc_dmp_dbg()
15572 if (port_iterator->cfg_log_verbose) in lpfc_dmp_dbg()
15578 spin_unlock_irqrestore(&phba->port_list_lock, in lpfc_dmp_dbg()
15584 spin_unlock_irqrestore(&phba->port_list_lock, iflags); in lpfc_dmp_dbg()
15586 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) in lpfc_dmp_dbg()
15589 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; in lpfc_dmp_dbg()
15590 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); in lpfc_dmp_dbg()
15596 temp_idx -= 1; in lpfc_dmp_dbg()
15598 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { in lpfc_dmp_dbg()
15602 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); in lpfc_dmp_dbg()
15604 start_idx -= dbg_cnt; in lpfc_dmp_dbg()
15607 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", in lpfc_dmp_dbg()
15615 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); in lpfc_dmp_dbg()
15616 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", in lpfc_dmp_dbg()
15618 (unsigned long)phba->dbg_log[temp_idx].t_ns, in lpfc_dmp_dbg()
15620 phba->dbg_log[temp_idx].log); in lpfc_dmp_dbg()
15623 atomic_set(&phba->dbg_log_cnt, 0); in lpfc_dmp_dbg()
15624 atomic_set(&phba->dbg_log_dmping, 0); in lpfc_dmp_dbg()
15632 int dbg_dmping = atomic_read(&phba->dbg_log_dmping); in lpfc_dbg_print()
15640 dev_info(&phba->pcidev->dev, "%pV", &vaf); in lpfc_dbg_print()
15644 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % in lpfc_dbg_print()
15647 atomic_inc(&phba->dbg_log_cnt); in lpfc_dbg_print()
15649 vscnprintf(phba->dbg_log[idx].log, in lpfc_dbg_print()
15650 sizeof(phba->dbg_log[idx].log), fmt, args); in lpfc_dbg_print()
15653 phba->dbg_log[idx].t_ns = local_clock(); in lpfc_dbg_print()
15657 * lpfc_exit - lpfc module removal routine