Lines Matching full:pf
67 static void otx2_disable_flr_me_intr(struct otx2_nic *pf) in otx2_disable_flr_me_intr() argument
69 int irq, vfs = pf->total_vfs; in otx2_disable_flr_me_intr()
72 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); in otx2_disable_flr_me_intr()
73 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0); in otx2_disable_flr_me_intr()
74 free_irq(irq, pf); in otx2_disable_flr_me_intr()
77 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); in otx2_disable_flr_me_intr()
78 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0); in otx2_disable_flr_me_intr()
79 free_irq(irq, pf); in otx2_disable_flr_me_intr()
84 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); in otx2_disable_flr_me_intr()
85 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1); in otx2_disable_flr_me_intr()
86 free_irq(irq, pf); in otx2_disable_flr_me_intr()
88 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); in otx2_disable_flr_me_intr()
89 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1); in otx2_disable_flr_me_intr()
90 free_irq(irq, pf); in otx2_disable_flr_me_intr()
93 static void otx2_flr_wq_destroy(struct otx2_nic *pf) in otx2_flr_wq_destroy() argument
95 if (!pf->flr_wq) in otx2_flr_wq_destroy()
97 destroy_workqueue(pf->flr_wq); in otx2_flr_wq_destroy()
98 pf->flr_wq = NULL; in otx2_flr_wq_destroy()
99 devm_kfree(pf->dev, pf->flr_wrk); in otx2_flr_wq_destroy()
105 struct otx2_nic *pf = flrwork->pf; in otx2_flr_handler() local
106 struct mbox *mbox = &pf->mbox; in otx2_flr_handler()
110 vf = flrwork - pf->flr_wrk; in otx2_flr_handler()
121 if (!otx2_sync_mbox_msg(&pf->mbox)) { in otx2_flr_handler()
127 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); in otx2_flr_handler()
128 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); in otx2_flr_handler()
136 struct otx2_nic *pf = (struct otx2_nic *)pf_irq; in otx2_pf_flr_intr_handler() local
140 if (pf->total_vfs > 64) in otx2_pf_flr_intr_handler()
144 intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg)); in otx2_pf_flr_intr_handler()
152 queue_work(pf->flr_wq, &pf->flr_wrk[dev].work); in otx2_pf_flr_intr_handler()
154 otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); in otx2_pf_flr_intr_handler()
156 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg), in otx2_pf_flr_intr_handler()
165 struct otx2_nic *pf = (struct otx2_nic *)pf_irq; in otx2_pf_me_intr_handler() local
169 if (pf->total_vfs > 64) in otx2_pf_me_intr_handler()
173 intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg)); in otx2_pf_me_intr_handler()
180 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); in otx2_pf_me_intr_handler()
182 otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf)); in otx2_pf_me_intr_handler()
188 static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) in otx2_register_flr_me_intr() argument
190 struct otx2_hw *hw = &pf->hw; in otx2_register_flr_me_intr()
196 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc)); in otx2_register_flr_me_intr()
197 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0), in otx2_register_flr_me_intr()
198 otx2_pf_me_intr_handler, 0, irq_name, pf); in otx2_register_flr_me_intr()
200 dev_err(pf->dev, in otx2_register_flr_me_intr()
206 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc)); in otx2_register_flr_me_intr()
207 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0), in otx2_register_flr_me_intr()
208 otx2_pf_flr_intr_handler, 0, irq_name, pf); in otx2_register_flr_me_intr()
210 dev_err(pf->dev, in otx2_register_flr_me_intr()
218 rvu_get_pf(pf->pcifunc)); in otx2_register_flr_me_intr()
220 (pf->pdev, RVU_PF_INT_VEC_VFME1), in otx2_register_flr_me_intr()
221 otx2_pf_me_intr_handler, 0, irq_name, pf); in otx2_register_flr_me_intr()
223 dev_err(pf->dev, in otx2_register_flr_me_intr()
228 rvu_get_pf(pf->pcifunc)); in otx2_register_flr_me_intr()
230 (pf->pdev, RVU_PF_INT_VEC_VFFLR1), in otx2_register_flr_me_intr()
231 otx2_pf_flr_intr_handler, 0, irq_name, pf); in otx2_register_flr_me_intr()
233 dev_err(pf->dev, in otx2_register_flr_me_intr()
240 otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs)); in otx2_register_flr_me_intr()
241 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs)); in otx2_register_flr_me_intr()
244 otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs)); in otx2_register_flr_me_intr()
245 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs)); in otx2_register_flr_me_intr()
250 otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs)); in otx2_register_flr_me_intr()
251 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1), in otx2_register_flr_me_intr()
254 otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs)); in otx2_register_flr_me_intr()
255 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1), in otx2_register_flr_me_intr()
261 static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs) in otx2_pf_flr_init() argument
265 pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq", in otx2_pf_flr_init()
267 if (!pf->flr_wq) in otx2_pf_flr_init()
270 pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs, in otx2_pf_flr_init()
272 if (!pf->flr_wrk) { in otx2_pf_flr_init()
273 destroy_workqueue(pf->flr_wq); in otx2_pf_flr_init()
278 pf->flr_wrk[vf].pf = pf; in otx2_pf_flr_init()
279 INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler); in otx2_pf_flr_init()
306 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler in otx2_queue_work()
307 * pf>mbox.up_num_msgs holds the data for use in in otx2_queue_work()
356 static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf, in otx2_forward_vf_mbox_msgs() argument
367 /* Set VF's mailbox memory as PF's bounce buffer memory, so in otx2_forward_vf_mbox_msgs()
368 * that explicit copying of VF's msgs to PF=>AF mbox region in otx2_forward_vf_mbox_msgs()
369 * and AF=>PF responses to VF's mbox region can be avoided. in otx2_forward_vf_mbox_msgs()
375 dst_mbox = &pf->mbox; in otx2_forward_vf_mbox_msgs()
384 mutex_lock(&pf->mbox.lock); in otx2_forward_vf_mbox_msgs()
390 dev_warn(pf->dev, in otx2_forward_vf_mbox_msgs()
392 /* restore PF mbase and exit */ in otx2_forward_vf_mbox_msgs()
393 dst_mdev->mbase = pf->mbox.bbuf_base; in otx2_forward_vf_mbox_msgs()
394 mutex_unlock(&pf->mbox.lock); in otx2_forward_vf_mbox_msgs()
405 otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox, in otx2_forward_vf_mbox_msgs()
406 pf->mbox.bbuf_base, vf); in otx2_forward_vf_mbox_msgs()
407 mutex_unlock(&pf->mbox.lock); in otx2_forward_vf_mbox_msgs()
415 dst_mbox = &pf->mbox_pfvf[0]; in otx2_forward_vf_mbox_msgs()
428 dev_warn(pf->dev, in otx2_forward_vf_mbox_msgs()
436 otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf], in otx2_forward_vf_mbox_msgs()
437 &pf->mbox.mbox_up, in otx2_forward_vf_mbox_msgs()
438 pf->mbox_pfvf[vf].bbuf_base, in otx2_forward_vf_mbox_msgs()
453 struct otx2_nic *pf; in otx2_pfvf_mbox_handler() local
456 pf = vf_mbox->pfvf; in otx2_pfvf_mbox_handler()
457 vf_idx = vf_mbox - pf->mbox_pfvf; in otx2_pfvf_mbox_handler()
459 mbox = &pf->mbox_pfvf[0].mbox; in otx2_pfvf_mbox_handler()
477 err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx, in otx2_pfvf_mbox_handler()
491 struct otx2_nic *pf = vf_mbox->pfvf; in otx2_pfvf_mbox_up_handler() local
498 vf_idx = vf_mbox - pf->mbox_pfvf; in otx2_pfvf_mbox_up_handler()
499 mbox = &pf->mbox_pfvf[0].mbox_up; in otx2_pfvf_mbox_up_handler()
509 dev_err(pf->dev, in otx2_pfvf_mbox_up_handler()
515 dev_err(pf->dev, in otx2_pfvf_mbox_up_handler()
526 dev_err(pf->dev, in otx2_pfvf_mbox_up_handler()
542 struct otx2_nic *pf = (struct otx2_nic *)(pf_irq); in otx2_pfvf_mbox_intr_handler() local
543 int vfs = pf->total_vfs; in otx2_pfvf_mbox_intr_handler()
547 mbox = pf->mbox_pfvf; in otx2_pfvf_mbox_intr_handler()
550 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1)); in otx2_pfvf_mbox_intr_handler()
551 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr); in otx2_pfvf_mbox_intr_handler()
552 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr, in otx2_pfvf_mbox_intr_handler()
557 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0)); in otx2_pfvf_mbox_intr_handler()
558 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr); in otx2_pfvf_mbox_intr_handler()
560 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF); in otx2_pfvf_mbox_intr_handler()
562 trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); in otx2_pfvf_mbox_intr_handler()
567 static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) in otx2_pfvf_mbox_init() argument
577 pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs, in otx2_pfvf_mbox_init()
579 if (!pf->mbox_pfvf) in otx2_pfvf_mbox_init()
582 pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox", in otx2_pfvf_mbox_init()
585 if (!pf->mbox_pfvf_wq) in otx2_pfvf_mbox_init()
588 base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR)); in otx2_pfvf_mbox_init()
589 hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); in otx2_pfvf_mbox_init()
596 mbox = &pf->mbox_pfvf[0]; in otx2_pfvf_mbox_init()
597 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base, in otx2_pfvf_mbox_init()
602 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base, in otx2_pfvf_mbox_init()
608 mbox->pfvf = pf; in otx2_pfvf_mbox_init()
620 destroy_workqueue(pf->mbox_pfvf_wq); in otx2_pfvf_mbox_init()
624 static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf) in otx2_pfvf_mbox_destroy() argument
626 struct mbox *mbox = &pf->mbox_pfvf[0]; in otx2_pfvf_mbox_destroy()
631 if (pf->mbox_pfvf_wq) { in otx2_pfvf_mbox_destroy()
632 destroy_workqueue(pf->mbox_pfvf_wq); in otx2_pfvf_mbox_destroy()
633 pf->mbox_pfvf_wq = NULL; in otx2_pfvf_mbox_destroy()
642 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) in otx2_enable_pfvf_mbox_intr() argument
644 /* Clear PF <=> VF mailbox IRQ */ in otx2_enable_pfvf_mbox_intr()
645 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull); in otx2_enable_pfvf_mbox_intr()
646 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull); in otx2_enable_pfvf_mbox_intr()
648 /* Enable PF <=> VF mailbox IRQ */ in otx2_enable_pfvf_mbox_intr()
649 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs)); in otx2_enable_pfvf_mbox_intr()
652 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), in otx2_enable_pfvf_mbox_intr()
657 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) in otx2_disable_pfvf_mbox_intr() argument
661 /* Disable PF <=> VF mailbox IRQ */ in otx2_disable_pfvf_mbox_intr()
662 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull); in otx2_disable_pfvf_mbox_intr()
663 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull); in otx2_disable_pfvf_mbox_intr()
665 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull); in otx2_disable_pfvf_mbox_intr()
666 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0); in otx2_disable_pfvf_mbox_intr()
667 free_irq(vector, pf); in otx2_disable_pfvf_mbox_intr()
670 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull); in otx2_disable_pfvf_mbox_intr()
671 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1); in otx2_disable_pfvf_mbox_intr()
672 free_irq(vector, pf); in otx2_disable_pfvf_mbox_intr()
676 static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) in otx2_register_pfvf_mbox_intr() argument
678 struct otx2_hw *hw = &pf->hw; in otx2_register_pfvf_mbox_intr()
684 if (pf->pcifunc) in otx2_register_pfvf_mbox_intr()
686 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc)); in otx2_register_pfvf_mbox_intr()
689 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0), in otx2_register_pfvf_mbox_intr()
690 otx2_pfvf_mbox_intr_handler, 0, irq_name, pf); in otx2_register_pfvf_mbox_intr()
692 dev_err(pf->dev, in otx2_register_pfvf_mbox_intr()
700 if (pf->pcifunc) in otx2_register_pfvf_mbox_intr()
702 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc)); in otx2_register_pfvf_mbox_intr()
705 err = request_irq(pci_irq_vector(pf->pdev, in otx2_register_pfvf_mbox_intr()
708 0, irq_name, pf); in otx2_register_pfvf_mbox_intr()
710 dev_err(pf->dev, in otx2_register_pfvf_mbox_intr()
716 otx2_enable_pfvf_mbox_intr(pf, numvfs); in otx2_register_pfvf_mbox_intr()
721 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf, in otx2_process_pfaf_mbox_msg() argument
727 dev_err(pf->dev, in otx2_process_pfaf_mbox_msg()
733 dev_err(pf->dev, in otx2_process_pfaf_mbox_msg()
742 struct otx2_vf_config *config = &pf->vf_configs[devid - 1]; in otx2_process_pfaf_mbox_msg()
761 pf->pcifunc = msg->pcifunc; in otx2_process_pfaf_mbox_msg()
764 mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg); in otx2_process_pfaf_mbox_msg()
767 mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg); in otx2_process_pfaf_mbox_msg()
770 mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg); in otx2_process_pfaf_mbox_msg()
773 mbox_handler_nix_txsch_alloc(pf, in otx2_process_pfaf_mbox_msg()
777 mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg); in otx2_process_pfaf_mbox_msg()
780 mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg); in otx2_process_pfaf_mbox_msg()
784 dev_err(pf->dev, in otx2_process_pfaf_mbox_msg()
798 struct otx2_nic *pf; in otx2_pfaf_mbox_handler() local
807 pf = af_mbox->pfvf; in otx2_pfaf_mbox_handler()
811 otx2_process_pfaf_mbox_msg(pf, msg); in otx2_pfaf_mbox_handler()
820 static void otx2_handle_link_event(struct otx2_nic *pf) in otx2_handle_link_event() argument
822 struct cgx_link_user_info *linfo = &pf->linfo; in otx2_handle_link_event()
823 struct net_device *netdev = pf->netdev; in otx2_handle_link_event()
837 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf, in otx2_mbox_up_handler_cgx_link_event() argument
844 pf->linfo = msg->link_info; in otx2_mbox_up_handler_cgx_link_event()
847 for (i = 0; i < pci_num_vf(pf->pdev); i++) { in otx2_mbox_up_handler_cgx_link_event()
848 struct otx2_vf_config *config = &pf->vf_configs[i]; in otx2_mbox_up_handler_cgx_link_event()
858 if (pf->flags & OTX2_FLAG_INTF_DOWN) in otx2_mbox_up_handler_cgx_link_event()
861 otx2_handle_link_event(pf); in otx2_mbox_up_handler_cgx_link_event()
865 static int otx2_process_mbox_msg_up(struct otx2_nic *pf, in otx2_process_mbox_msg_up() argument
870 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id); in otx2_process_mbox_msg_up()
881 &pf->mbox.mbox_up, 0, \ in otx2_process_mbox_msg_up()
892 pf, (struct _req_type *)req, rsp); \ in otx2_process_mbox_msg_up()
899 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id); in otx2_process_mbox_msg_up()
910 struct otx2_nic *pf = af_mbox->pfvf; in otx2_pfaf_mbox_up_handler() local
925 otx2_process_mbox_msg_up(pf, msg); in otx2_pfaf_mbox_up_handler()
929 otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up, in otx2_pfaf_mbox_up_handler()
940 struct otx2_nic *pf = (struct otx2_nic *)pf_irq; in otx2_pfaf_mbox_intr_handler() local
944 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); in otx2_pfaf_mbox_intr_handler()
946 mbox = &pf->mbox; in otx2_pfaf_mbox_intr_handler()
948 trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0)); in otx2_pfaf_mbox_intr_handler()
950 otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF); in otx2_pfaf_mbox_intr_handler()
955 static void otx2_disable_mbox_intr(struct otx2_nic *pf) in otx2_disable_mbox_intr() argument
957 int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX); in otx2_disable_mbox_intr()
959 /* Disable AF => PF mailbox IRQ */ in otx2_disable_mbox_intr()
960 otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); in otx2_disable_mbox_intr()
961 free_irq(vector, pf); in otx2_disable_mbox_intr()
964 static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) in otx2_register_mbox_intr() argument
966 struct otx2_hw *hw = &pf->hw; in otx2_register_mbox_intr()
974 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX), in otx2_register_mbox_intr()
975 otx2_pfaf_mbox_intr_handler, 0, irq_name, pf); in otx2_register_mbox_intr()
977 dev_err(pf->dev, in otx2_register_mbox_intr()
985 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); in otx2_register_mbox_intr()
986 otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0)); in otx2_register_mbox_intr()
992 req = otx2_mbox_alloc_msg_ready(&pf->mbox); in otx2_register_mbox_intr()
994 otx2_disable_mbox_intr(pf); in otx2_register_mbox_intr()
997 err = otx2_sync_mbox_msg(&pf->mbox); in otx2_register_mbox_intr()
999 dev_warn(pf->dev, in otx2_register_mbox_intr()
1001 otx2_disable_mbox_intr(pf); in otx2_register_mbox_intr()
1008 static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) in otx2_pfaf_mbox_destroy() argument
1010 struct mbox *mbox = &pf->mbox; in otx2_pfaf_mbox_destroy()
1012 if (pf->mbox_wq) { in otx2_pfaf_mbox_destroy()
1013 destroy_workqueue(pf->mbox_wq); in otx2_pfaf_mbox_destroy()
1014 pf->mbox_wq = NULL; in otx2_pfaf_mbox_destroy()
1024 static int otx2_pfaf_mbox_init(struct otx2_nic *pf) in otx2_pfaf_mbox_init() argument
1026 struct mbox *mbox = &pf->mbox; in otx2_pfaf_mbox_init()
1030 mbox->pfvf = pf; in otx2_pfaf_mbox_init()
1031 pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox", in otx2_pfaf_mbox_init()
1034 if (!pf->mbox_wq) in otx2_pfaf_mbox_init()
1038 * admin function (i.e AF) and this PF, shouldn't be mapped as in otx2_pfaf_mbox_init()
1041 hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM), in otx2_pfaf_mbox_init()
1042 pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM)); in otx2_pfaf_mbox_init()
1044 dev_err(pf->dev, "Unable to map PFAF mailbox region\n"); in otx2_pfaf_mbox_init()
1049 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base, in otx2_pfaf_mbox_init()
1054 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base, in otx2_pfaf_mbox_init()
1059 err = otx2_mbox_bbuf_init(mbox, pf->pdev); in otx2_pfaf_mbox_init()
1069 otx2_pfaf_mbox_destroy(pf); in otx2_pfaf_mbox_init()
1073 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable) in otx2_cgx_config_linkevents() argument
1078 mutex_lock(&pf->mbox.lock); in otx2_cgx_config_linkevents()
1080 msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox); in otx2_cgx_config_linkevents()
1082 msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox); in otx2_cgx_config_linkevents()
1085 mutex_unlock(&pf->mbox.lock); in otx2_cgx_config_linkevents()
1089 err = otx2_sync_mbox_msg(&pf->mbox); in otx2_cgx_config_linkevents()
1090 mutex_unlock(&pf->mbox.lock); in otx2_cgx_config_linkevents()
1094 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) in otx2_cgx_config_loopback() argument
1099 mutex_lock(&pf->mbox.lock); in otx2_cgx_config_loopback()
1101 msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox); in otx2_cgx_config_loopback()
1103 msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox); in otx2_cgx_config_loopback()
1106 mutex_unlock(&pf->mbox.lock); in otx2_cgx_config_loopback()
1110 err = otx2_sync_mbox_msg(&pf->mbox); in otx2_cgx_config_loopback()
1111 mutex_unlock(&pf->mbox.lock); in otx2_cgx_config_loopback()
1137 struct otx2_nic *pf = data; in otx2_q_intr_handler() local
1142 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { in otx2_q_intr_handler()
1143 ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT); in otx2_q_intr_handler()
1146 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) | in otx2_q_intr_handler()
1152 netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", in otx2_q_intr_handler()
1153 qidx, otx2_read64(pf, NIX_LF_ERR_INT)); in otx2_q_intr_handler()
1156 netdev_err(pf->netdev, "CQ%lld: Doorbell error", in otx2_q_intr_handler()
1159 netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM", in otx2_q_intr_handler()
1163 schedule_work(&pf->reset_task); in otx2_q_intr_handler()
1167 for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { in otx2_q_intr_handler()
1168 ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT); in otx2_q_intr_handler()
1170 otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) | in otx2_q_intr_handler()
1177 netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", in otx2_q_intr_handler()
1178 qidx, otx2_read64(pf, NIX_LF_ERR_INT)); in otx2_q_intr_handler()
1181 netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx", in otx2_q_intr_handler()
1183 otx2_read64(pf, in otx2_q_intr_handler()
1185 otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, in otx2_q_intr_handler()
1189 netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n", in otx2_q_intr_handler()
1191 otx2_read64(pf, NIX_LF_MNQ_ERR_DBG)); in otx2_q_intr_handler()
1192 otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, in otx2_q_intr_handler()
1196 netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx", in otx2_q_intr_handler()
1198 otx2_read64(pf, in otx2_q_intr_handler()
1200 otx2_write64(pf, NIX_LF_SEND_ERR_DBG, in otx2_q_intr_handler()
1204 netdev_err(pf->netdev, "SQ%lld: SQB allocation failed", in otx2_q_intr_handler()
1208 schedule_work(&pf->reset_task); in otx2_q_intr_handler()
1217 struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev; in otx2_cq_intr_handler() local
1225 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); in otx2_cq_intr_handler()
1233 static void otx2_disable_napi(struct otx2_nic *pf) in otx2_disable_napi() argument
1235 struct otx2_qset *qset = &pf->qset; in otx2_disable_napi()
1239 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { in otx2_disable_napi()
1246 static void otx2_free_cq_res(struct otx2_nic *pf) in otx2_free_cq_res() argument
1248 struct otx2_qset *qset = &pf->qset; in otx2_free_cq_res()
1253 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false); in otx2_free_cq_res()
1256 qmem_free(pf->dev, cq->cqe); in otx2_free_cq_res()
1260 static void otx2_free_sq_res(struct otx2_nic *pf) in otx2_free_sq_res() argument
1262 struct otx2_qset *qset = &pf->qset; in otx2_free_sq_res()
1267 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false); in otx2_free_sq_res()
1269 otx2_sq_free_sqbs(pf); in otx2_free_sq_res()
1270 for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { in otx2_free_sq_res()
1272 qmem_free(pf->dev, sq->sqe); in otx2_free_sq_res()
1273 qmem_free(pf->dev, sq->tso_hdrs); in otx2_free_sq_res()
1279 static int otx2_init_hw_resources(struct otx2_nic *pf) in otx2_init_hw_resources() argument
1281 struct mbox *mbox = &pf->mbox; in otx2_init_hw_resources()
1282 struct otx2_hw *hw = &pf->hw; in otx2_init_hw_resources()
1295 pf->rbsize = RCV_FRAG_LEN(OTX2_HW_TIMESTAMP_LEN + pf->netdev->mtu + in otx2_init_hw_resources()
1300 err = otx2_config_npa(pf); in otx2_init_hw_resources()
1305 err = otx2_config_nix(pf); in otx2_init_hw_resources()
1310 otx2_nix_config_bp(pf, true); in otx2_init_hw_resources()
1313 err = otx2_rq_aura_pool_init(pf); in otx2_init_hw_resources()
1319 err = otx2_sq_aura_pool_init(pf); in otx2_init_hw_resources()
1325 err = otx2_txsch_alloc(pf); in otx2_init_hw_resources()
1331 err = otx2_config_nix_queues(pf); in otx2_init_hw_resources()
1337 err = otx2_txschq_config(pf, lvl); in otx2_init_hw_resources()
1347 otx2_free_sq_res(pf); in otx2_init_hw_resources()
1348 otx2_free_cq_res(pf); in otx2_init_hw_resources()
1351 if (otx2_txschq_stop(pf)) in otx2_init_hw_resources()
1352 dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__); in otx2_init_hw_resources()
1354 otx2_sq_free_sqbs(pf); in otx2_init_hw_resources()
1356 otx2_free_aura_ptr(pf, AURA_NIX_RQ); in otx2_init_hw_resources()
1359 otx2_aura_pool_free(pf); in otx2_init_hw_resources()
1365 dev_err(pf->dev, "%s failed to free nixlf\n", __func__); in otx2_init_hw_resources()
1372 dev_err(pf->dev, "%s failed to free npalf\n", __func__); in otx2_init_hw_resources()
1379 static void otx2_free_hw_resources(struct otx2_nic *pf) in otx2_free_hw_resources() argument
1381 struct otx2_qset *qset = &pf->qset; in otx2_free_hw_resources()
1382 struct mbox *mbox = &pf->mbox; in otx2_free_hw_resources()
1388 otx2_sqb_flush(pf); in otx2_free_hw_resources()
1391 err = otx2_txschq_stop(pf); in otx2_free_hw_resources()
1393 dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n"); in otx2_free_hw_resources()
1397 if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK)) in otx2_free_hw_resources()
1398 otx2_nix_config_bp(pf, false); in otx2_free_hw_resources()
1408 otx2_cleanup_rx_cqes(pf, cq); in otx2_free_hw_resources()
1410 otx2_cleanup_tx_cqes(pf, cq); in otx2_free_hw_resources()
1413 otx2_free_sq_res(pf); in otx2_free_hw_resources()
1416 otx2_free_aura_ptr(pf, AURA_NIX_RQ); in otx2_free_hw_resources()
1418 otx2_free_cq_res(pf); in otx2_free_hw_resources()
1425 dev_err(pf->dev, "%s failed to free nixlf\n", __func__); in otx2_free_hw_resources()
1432 otx2_aura_pool_free(pf); in otx2_free_hw_resources()
1439 dev_err(pf->dev, "%s failed to free npalf\n", __func__); in otx2_free_hw_resources()
1446 struct otx2_nic *pf = netdev_priv(netdev); in otx2_open() local
1448 struct otx2_qset *qset = &pf->qset; in otx2_open()
1454 pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues; in otx2_open()
1458 pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues); in otx2_open()
1459 qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL); in otx2_open()
1469 qset->cq = kcalloc(pf->qset.cq_cnt, in otx2_open()
1474 qset->sq = kcalloc(pf->hw.tx_queues, in otx2_open()
1479 qset->rq = kcalloc(pf->hw.rx_queues, in otx2_open()
1484 err = otx2_init_hw_resources(pf); in otx2_open()
1489 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { in otx2_open()
1497 (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ; in otx2_open()
1498 cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ? in otx2_open()
1499 qidx + pf->hw.rx_queues : CINT_INVALID_CQ; in otx2_open()
1500 cq_poll->dev = (void *)pf; in otx2_open()
1507 err = otx2_hw_set_mtu(pf, netdev->mtu); in otx2_open()
1512 otx2_setup_segmentation(pf); in otx2_open()
1515 err = otx2_rss_init(pf); in otx2_open()
1520 vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START; in otx2_open()
1521 irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; in otx2_open()
1523 snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name); in otx2_open()
1525 err = request_irq(pci_irq_vector(pf->pdev, vec), in otx2_open()
1526 otx2_q_intr_handler, 0, irq_name, pf); in otx2_open()
1528 dev_err(pf->dev, in otx2_open()
1530 rvu_get_pf(pf->pcifunc)); in otx2_open()
1535 otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0)); in otx2_open()
1538 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; in otx2_open()
1539 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { in otx2_open()
1540 irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; in otx2_open()
1542 snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name, in otx2_open()
1545 err = request_irq(pci_irq_vector(pf->pdev, vec), in otx2_open()
1549 dev_err(pf->dev, in otx2_open()
1551 rvu_get_pf(pf->pcifunc), qidx); in otx2_open()
1556 otx2_config_irq_coalescing(pf, qidx); in otx2_open()
1559 otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0)); in otx2_open()
1560 otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0)); in otx2_open()
1563 otx2_set_cints_affinity(pf); in otx2_open()
1566 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) { in otx2_open()
1567 pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED; in otx2_open()
1568 otx2_config_hw_tx_tstamp(pf, true); in otx2_open()
1570 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) { in otx2_open()
1571 pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED; in otx2_open()
1572 otx2_config_hw_rx_tstamp(pf, true); in otx2_open()
1575 pf->flags &= ~OTX2_FLAG_INTF_DOWN; in otx2_open()
1580 if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK)) in otx2_open()
1581 otx2_handle_link_event(pf); in otx2_open()
1584 otx2_config_pause_frm(pf); in otx2_open()
1586 err = otx2_rxtx_enable(pf, true); in otx2_open()
1596 otx2_free_cints(pf, qidx); in otx2_open()
1597 vec = pci_irq_vector(pf->pdev, in otx2_open()
1598 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); in otx2_open()
1599 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); in otx2_open()
1601 free_irq(vec, pf); in otx2_open()
1603 otx2_disable_napi(pf); in otx2_open()
1604 otx2_free_hw_resources(pf); in otx2_open()
1616 struct otx2_nic *pf = netdev_priv(netdev); in otx2_stop() local
1618 struct otx2_qset *qset = &pf->qset; in otx2_stop()
1624 pf->flags |= OTX2_FLAG_INTF_DOWN; in otx2_stop()
1629 otx2_rxtx_enable(pf, false); in otx2_stop()
1632 vec = pci_irq_vector(pf->pdev, in otx2_stop()
1633 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); in otx2_stop()
1634 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); in otx2_stop()
1636 free_irq(vec, pf); in otx2_stop()
1639 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; in otx2_stop()
1640 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { in otx2_stop()
1642 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); in otx2_stop()
1644 synchronize_irq(pci_irq_vector(pf->pdev, vec)); in otx2_stop()
1653 otx2_free_hw_resources(pf); in otx2_stop()
1654 otx2_free_cints(pf, pf->hw.cint_cnt); in otx2_stop()
1655 otx2_disable_napi(pf); in otx2_stop()
1660 for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) in otx2_stop()
1661 cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); in otx2_stop()
1662 devm_kfree(pf->dev, pf->refill_wrk); in otx2_stop()
1677 struct otx2_nic *pf = netdev_priv(netdev); in otx2_xmit() local
1684 (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) { in otx2_xmit()
1689 sq = &pf->qset.sq[qidx]; in otx2_xmit()
1709 struct otx2_nic *pf = netdev_priv(netdev); in otx2_set_rx_mode() local
1711 queue_work(pf->otx2_wq, &pf->rx_mode_work); in otx2_set_rx_mode()
1716 struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work); in otx2_do_set_rx_mode() local
1717 struct net_device *netdev = pf->netdev; in otx2_do_set_rx_mode()
1723 mutex_lock(&pf->mbox.lock); in otx2_do_set_rx_mode()
1724 req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); in otx2_do_set_rx_mode()
1726 mutex_unlock(&pf->mbox.lock); in otx2_do_set_rx_mode()
1738 otx2_sync_mbox_msg(&pf->mbox); in otx2_do_set_rx_mode()
1739 mutex_unlock(&pf->mbox.lock); in otx2_do_set_rx_mode()
1746 struct otx2_nic *pf = netdev_priv(netdev); in otx2_set_features() local
1749 return otx2_cgx_config_loopback(pf, in otx2_set_features()
1756 struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task); in otx2_reset_task() local
1758 if (!netif_running(pf->netdev)) in otx2_reset_task()
1762 otx2_stop(pf->netdev); in otx2_reset_task()
1763 pf->reset_count++; in otx2_reset_task()
1764 otx2_open(pf->netdev); in otx2_reset_task()
1765 netif_trans_update(pf->netdev); in otx2_reset_task()
1919 static int otx2_wq_init(struct otx2_nic *pf) in otx2_wq_init() argument
1921 pf->otx2_wq = create_singlethread_workqueue("otx2_wq"); in otx2_wq_init()
1922 if (!pf->otx2_wq) in otx2_wq_init()
1925 INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode); in otx2_wq_init()
1926 INIT_WORK(&pf->reset_task, otx2_reset_task); in otx2_wq_init()
1948 static int otx2_realloc_msix_vectors(struct otx2_nic *pf) in otx2_realloc_msix_vectors() argument
1950 struct otx2_hw *hw = &pf->hw; in otx2_realloc_msix_vectors()
1959 otx2_disable_mbox_intr(pf); in otx2_realloc_msix_vectors()
1963 dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n", in otx2_realloc_msix_vectors()
1968 return otx2_register_mbox_intr(pf, false); in otx2_realloc_msix_vectors()
1975 struct otx2_nic *pf; in otx2_probe() local
2003 netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount); in otx2_probe()
2011 pf = netdev_priv(netdev); in otx2_probe()
2012 pf->netdev = netdev; in otx2_probe()
2013 pf->pdev = pdev; in otx2_probe()
2014 pf->dev = dev; in otx2_probe()
2015 pf->total_vfs = pci_sriov_get_totalvfs(pdev); in otx2_probe()
2016 pf->flags |= OTX2_FLAG_INTF_DOWN; in otx2_probe()
2018 hw = &pf->hw; in otx2_probe()
2040 pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); in otx2_probe()
2041 if (!pf->reg_base) { in otx2_probe()
2047 err = otx2_check_pf_usable(pf); in otx2_probe()
2059 /* Init PF <=> AF mailbox stuff */ in otx2_probe()
2060 err = otx2_pfaf_mbox_init(pf); in otx2_probe()
2065 err = otx2_register_mbox_intr(pf, true); in otx2_probe()
2069 /* Request AF to attach NPA and NIX LFs to this PF. in otx2_probe()
2070 * NIX and NPA LFs are needed for this PF to function as a NIC. in otx2_probe()
2072 err = otx2_attach_npa_nix(pf); in otx2_probe()
2076 err = otx2_realloc_msix_vectors(pf); in otx2_probe()
2084 otx2_setup_dev_hw_settings(pf); in otx2_probe()
2090 otx2_ptp_init(pf); in otx2_probe()
2103 pf->iommu_domain = iommu_get_domain_for_dev(dev); in otx2_probe()
2128 err = otx2_wq_init(pf); in otx2_probe()
2135 otx2_cgx_config_linkevents(pf, true); in otx2_probe()
2138 pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; in otx2_probe()
2139 pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; in otx2_probe()
2146 otx2_ptp_destroy(pf); in otx2_probe()
2148 otx2_detach_resources(&pf->mbox); in otx2_probe()
2150 otx2_disable_mbox_intr(pf); in otx2_probe()
2152 otx2_pfaf_mbox_destroy(pf); in otx2_probe()
2168 struct otx2_nic *pf; in otx2_vf_link_event_task() local
2173 vf_idx = config - config->pf->vf_configs; in otx2_vf_link_event_task()
2174 pf = config->pf; in otx2_vf_link_event_task()
2176 msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx, in otx2_vf_link_event_task()
2179 dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx); in otx2_vf_link_event_task()
2186 memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info)); in otx2_vf_link_event_task()
2188 otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx); in otx2_vf_link_event_task()
2194 struct otx2_nic *pf = netdev_priv(netdev); in otx2_sriov_enable() local
2197 /* Init PF <=> VF mailbox stuff */ in otx2_sriov_enable()
2198 ret = otx2_pfvf_mbox_init(pf, numvfs); in otx2_sriov_enable()
2202 ret = otx2_register_pfvf_mbox_intr(pf, numvfs); in otx2_sriov_enable()
2206 pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config), in otx2_sriov_enable()
2208 if (!pf->vf_configs) { in otx2_sriov_enable()
2214 pf->vf_configs[i].pf = pf; in otx2_sriov_enable()
2215 pf->vf_configs[i].intf_down = true; in otx2_sriov_enable()
2216 INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work, in otx2_sriov_enable()
2220 ret = otx2_pf_flr_init(pf, numvfs); in otx2_sriov_enable()
2224 ret = otx2_register_flr_me_intr(pf, numvfs); in otx2_sriov_enable()
2234 otx2_disable_flr_me_intr(pf); in otx2_sriov_enable()
2236 otx2_flr_wq_destroy(pf); in otx2_sriov_enable()
2238 kfree(pf->vf_configs); in otx2_sriov_enable()
2240 otx2_disable_pfvf_mbox_intr(pf, numvfs); in otx2_sriov_enable()
2242 otx2_pfvf_mbox_destroy(pf); in otx2_sriov_enable()
2249 struct otx2_nic *pf = netdev_priv(netdev); in otx2_sriov_disable() local
2259 cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work); in otx2_sriov_disable()
2260 kfree(pf->vf_configs); in otx2_sriov_disable()
2262 otx2_disable_flr_me_intr(pf); in otx2_sriov_disable()
2263 otx2_flr_wq_destroy(pf); in otx2_sriov_disable()
2264 otx2_disable_pfvf_mbox_intr(pf, numvfs); in otx2_sriov_disable()
2265 otx2_pfvf_mbox_destroy(pf); in otx2_sriov_disable()
2281 struct otx2_nic *pf; in otx2_remove() local
2286 pf = netdev_priv(netdev); in otx2_remove()
2288 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) in otx2_remove()
2289 otx2_config_hw_tx_tstamp(pf, false); in otx2_remove()
2290 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) in otx2_remove()
2291 otx2_config_hw_rx_tstamp(pf, false); in otx2_remove()
2293 cancel_work_sync(&pf->reset_task); in otx2_remove()
2295 otx2_cgx_config_linkevents(pf, false); in otx2_remove()
2298 otx2_sriov_disable(pf->pdev); in otx2_remove()
2299 if (pf->otx2_wq) in otx2_remove()
2300 destroy_workqueue(pf->otx2_wq); in otx2_remove()
2302 otx2_ptp_destroy(pf); in otx2_remove()
2303 otx2_detach_resources(&pf->mbox); in otx2_remove()
2304 otx2_disable_mbox_intr(pf); in otx2_remove()
2305 otx2_pfaf_mbox_destroy(pf); in otx2_remove()
2306 pci_free_irq_vectors(pf->pdev); in otx2_remove()