Lines Matching refs:rvu
26 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
28 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
30 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
32 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
34 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
62 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) in rvu_poll_reg() argument
68 reg = rvu->afreg_base + ((block << 28) | offset); in rvu_poll_reg()
165 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) in rvu_get_lf() argument
170 mutex_lock(&rvu->rsrc_lock); in rvu_get_lf()
174 mutex_unlock(&rvu->rsrc_lock); in rvu_get_lf()
180 mutex_unlock(&rvu->rsrc_lock); in rvu_get_lf()
190 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) in rvu_get_blkaddr() argument
231 devnum = rvu_get_hwvf(rvu, pcifunc); in rvu_get_blkaddr()
240 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); in rvu_get_blkaddr()
248 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); in rvu_get_blkaddr()
254 if (is_block_implemented(rvu->hw, blkaddr)) in rvu_get_blkaddr()
259 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, in rvu_update_rsrc_map() argument
268 dev_err(&rvu->pdev->dev, in rvu_update_rsrc_map()
277 devnum = rvu_get_hwvf(rvu, pcifunc); in rvu_update_rsrc_map()
313 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); in rvu_update_rsrc_map()
321 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) in rvu_get_pf_numvfs() argument
326 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); in rvu_get_pf_numvfs()
331 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) in rvu_get_hwvf() argument
340 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); in rvu_get_hwvf()
345 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) in rvu_get_pfvf() argument
349 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; in rvu_get_pfvf()
351 return &rvu->pf[rvu_get_pf(pcifunc)]; in rvu_get_pfvf()
354 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) in is_pf_func_valid() argument
360 if (pf >= rvu->hw->total_pfs) in is_pf_func_valid()
368 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); in is_pf_func_valid()
387 static void rvu_check_block_implemented(struct rvu *rvu) in rvu_check_block_implemented() argument
389 struct rvu_hwinfo *hw = rvu->hw; in rvu_check_block_implemented()
397 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid)); in rvu_check_block_implemented()
403 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf) in rvu_lf_reset() argument
410 rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12)); in rvu_lf_reset()
411 err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12), in rvu_lf_reset()
416 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) in rvu_block_reset() argument
418 struct rvu_block *block = &rvu->hw->block[blkaddr]; in rvu_block_reset()
423 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0)); in rvu_block_reset()
424 rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); in rvu_block_reset()
427 static void rvu_reset_all_blocks(struct rvu *rvu) in rvu_reset_all_blocks() argument
430 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST); in rvu_reset_all_blocks()
431 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST); in rvu_reset_all_blocks()
432 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST); in rvu_reset_all_blocks()
433 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST); in rvu_reset_all_blocks()
434 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST); in rvu_reset_all_blocks()
435 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST); in rvu_reset_all_blocks()
436 rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST); in rvu_reset_all_blocks()
437 rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST); in rvu_reset_all_blocks()
438 rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST); in rvu_reset_all_blocks()
441 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block) in rvu_scan_block() argument
448 cfg = rvu_read64(rvu, block->addr, in rvu_scan_block()
457 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF); in rvu_scan_block()
458 rvu_update_rsrc_map(rvu, pfvf, block, in rvu_scan_block()
462 rvu_set_msix_offset(rvu, pfvf, block, lf); in rvu_scan_block()
466 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf) in rvu_check_min_msix_vec() argument
474 dev_warn(rvu->dev, in rvu_check_min_msix_vec()
488 dev_warn(rvu->dev, in rvu_check_min_msix_vec()
493 static int rvu_setup_msix_resources(struct rvu *rvu) in rvu_setup_msix_resources() argument
495 struct rvu_hwinfo *hw = rvu->hw; in rvu_setup_msix_resources()
503 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); in rvu_setup_msix_resources()
508 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); in rvu_setup_msix_resources()
510 pfvf = &rvu->pf[pf]; in rvu_setup_msix_resources()
512 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf)); in rvu_setup_msix_resources()
514 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0); in rvu_setup_msix_resources()
522 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, in rvu_setup_msix_resources()
537 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf)); in rvu_setup_msix_resources()
541 rvu_write64(rvu, BLKADDR_RVUM, in rvu_setup_msix_resources()
546 pfvf = &rvu->hwvf[hwvf + vf]; in rvu_setup_msix_resources()
548 cfg = rvu_read64(rvu, BLKADDR_RVUM, in rvu_setup_msix_resources()
551 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1); in rvu_setup_msix_resources()
559 devm_kcalloc(rvu->dev, pfvf->msix.max, in rvu_setup_msix_resources()
568 cfg = rvu_read64(rvu, BLKADDR_RVUM, in rvu_setup_msix_resources()
573 rvu_write64(rvu, BLKADDR_RVUM, in rvu_setup_msix_resources()
583 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); in rvu_setup_msix_resources()
585 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE); in rvu_setup_msix_resources()
586 iova = dma_map_resource(rvu->dev, phy_addr, in rvu_setup_msix_resources()
590 if (dma_mapping_error(rvu->dev, iova)) in rvu_setup_msix_resources()
593 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova); in rvu_setup_msix_resources()
594 rvu->msix_base_iova = iova; in rvu_setup_msix_resources()
599 static void rvu_free_hw_resources(struct rvu *rvu) in rvu_free_hw_resources() argument
601 struct rvu_hwinfo *hw = rvu->hw; in rvu_free_hw_resources()
607 rvu_npa_freemem(rvu); in rvu_free_hw_resources()
608 rvu_npc_freemem(rvu); in rvu_free_hw_resources()
609 rvu_nix_freemem(rvu); in rvu_free_hw_resources()
619 pfvf = &rvu->pf[id]; in rvu_free_hw_resources()
624 pfvf = &rvu->hwvf[id]; in rvu_free_hw_resources()
629 if (!rvu->msix_base_iova) in rvu_free_hw_resources()
631 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); in rvu_free_hw_resources()
633 dma_unmap_resource(rvu->dev, rvu->msix_base_iova, in rvu_free_hw_resources()
637 mutex_destroy(&rvu->rsrc_lock); in rvu_free_hw_resources()
640 static int rvu_setup_hw_resources(struct rvu *rvu) in rvu_setup_hw_resources() argument
642 struct rvu_hwinfo *hw = rvu->hw; in rvu_setup_hw_resources()
648 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); in rvu_setup_hw_resources()
657 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST); in rvu_setup_hw_resources()
678 cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2); in rvu_setup_hw_resources()
699 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST); in rvu_setup_hw_resources()
742 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST); in rvu_setup_hw_resources()
764 cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0); in rvu_setup_hw_resources()
783 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, in rvu_setup_hw_resources()
785 if (!rvu->pf) in rvu_setup_hw_resources()
788 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs, in rvu_setup_hw_resources()
790 if (!rvu->hwvf) in rvu_setup_hw_resources()
793 mutex_init(&rvu->rsrc_lock); in rvu_setup_hw_resources()
795 err = rvu_setup_msix_resources(rvu); in rvu_setup_hw_resources()
805 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max, in rvu_setup_hw_resources()
813 rvu_scan_block(rvu, block); in rvu_setup_hw_resources()
816 err = rvu_npc_init(rvu); in rvu_setup_hw_resources()
820 err = rvu_cgx_init(rvu); in rvu_setup_hw_resources()
824 err = rvu_npa_init(rvu); in rvu_setup_hw_resources()
828 err = rvu_nix_init(rvu); in rvu_setup_hw_resources()
835 rvu_cgx_exit(rvu); in rvu_setup_hw_resources()
841 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq) in rvu_aq_free() argument
846 qmem_free(rvu->dev, aq->inst); in rvu_aq_free()
847 qmem_free(rvu->dev, aq->res); in rvu_aq_free()
848 devm_kfree(rvu->dev, aq); in rvu_aq_free()
851 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, in rvu_aq_alloc() argument
857 *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL); in rvu_aq_alloc()
863 err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size); in rvu_aq_alloc()
865 devm_kfree(rvu->dev, aq); in rvu_aq_alloc()
870 err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size); in rvu_aq_alloc()
872 rvu_aq_free(rvu, aq); in rvu_aq_alloc()
880 static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_ready() argument
908 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype) in is_pffunc_map_valid() argument
912 if (!is_pf_func_valid(rvu, pcifunc)) in is_pffunc_map_valid()
915 pfvf = rvu_get_pfvf(rvu, pcifunc); in is_pffunc_map_valid()
924 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, in rvu_lookup_rsrc() argument
930 rvu_write64(rvu, block->addr, block->lookup_reg, val); in rvu_lookup_rsrc()
933 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13)) in rvu_lookup_rsrc()
936 val = rvu_read64(rvu, block->addr, block->lookup_reg); in rvu_lookup_rsrc()
945 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) in rvu_detach_block() argument
947 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); in rvu_detach_block()
948 struct rvu_hwinfo *hw = rvu->hw; in rvu_detach_block()
953 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc); in rvu_detach_block()
964 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot); in rvu_detach_block()
969 rvu_write64(rvu, blkaddr, block->lfcfg_reg | in rvu_detach_block()
973 rvu_update_rsrc_map(rvu, pfvf, block, in rvu_detach_block()
980 rvu_clear_msix_offset(rvu, pfvf, block, lf); in rvu_detach_block()
984 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, in rvu_detach_rsrcs() argument
987 struct rvu_hwinfo *hw = rvu->hw; in rvu_detach_rsrcs()
992 mutex_lock(&rvu->rsrc_lock); in rvu_detach_rsrcs()
1019 rvu_detach_block(rvu, pcifunc, block->type); in rvu_detach_rsrcs()
1022 mutex_unlock(&rvu->rsrc_lock); in rvu_detach_rsrcs()
1026 static int rvu_mbox_handler_detach_resources(struct rvu *rvu, in rvu_mbox_handler_detach_resources() argument
1030 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); in rvu_mbox_handler_detach_resources()
1033 static void rvu_attach_block(struct rvu *rvu, int pcifunc, in rvu_attach_block() argument
1036 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); in rvu_attach_block()
1037 struct rvu_hwinfo *hw = rvu->hw; in rvu_attach_block()
1046 blkaddr = rvu_get_blkaddr(rvu, blktype, 0); in rvu_attach_block()
1061 rvu_write64(rvu, blkaddr, block->lfcfg_reg | in rvu_attach_block()
1063 rvu_update_rsrc_map(rvu, pfvf, block, in rvu_attach_block()
1067 rvu_set_msix_offset(rvu, pfvf, block, lf); in rvu_attach_block()
1071 static int rvu_check_rsrc_availability(struct rvu *rvu, in rvu_check_rsrc_availability() argument
1074 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); in rvu_check_rsrc_availability()
1075 struct rvu_hwinfo *hw = rvu->hw; in rvu_check_rsrc_availability()
1086 dev_err(&rvu->pdev->dev, in rvu_check_rsrc_availability()
1099 dev_err(&rvu->pdev->dev, in rvu_check_rsrc_availability()
1109 dev_err(&rvu->pdev->dev, in rvu_check_rsrc_availability()
1125 dev_err(&rvu->pdev->dev, in rvu_check_rsrc_availability()
1140 dev_err(&rvu->pdev->dev, in rvu_check_rsrc_availability()
1155 dev_err(&rvu->pdev->dev, in rvu_check_rsrc_availability()
1170 dev_info(rvu->dev, "Request for %s failed\n", block->name); in rvu_check_rsrc_availability()
1174 static int rvu_mbox_handler_attach_resources(struct rvu *rvu, in rvu_mbox_handler_attach_resources() argument
1183 rvu_detach_rsrcs(rvu, NULL, pcifunc); in rvu_mbox_handler_attach_resources()
1185 mutex_lock(&rvu->rsrc_lock); in rvu_mbox_handler_attach_resources()
1188 err = rvu_check_rsrc_availability(rvu, attach, pcifunc); in rvu_mbox_handler_attach_resources()
1194 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1); in rvu_mbox_handler_attach_resources()
1197 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1); in rvu_mbox_handler_attach_resources()
1206 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO); in rvu_mbox_handler_attach_resources()
1207 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso); in rvu_mbox_handler_attach_resources()
1212 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW); in rvu_mbox_handler_attach_resources()
1213 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow); in rvu_mbox_handler_attach_resources()
1218 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM); in rvu_mbox_handler_attach_resources()
1219 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs); in rvu_mbox_handler_attach_resources()
1224 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT); in rvu_mbox_handler_attach_resources()
1225 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs); in rvu_mbox_handler_attach_resources()
1229 mutex_unlock(&rvu->rsrc_lock); in rvu_mbox_handler_attach_resources()
1233 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, in rvu_get_msix_offset() argument
1248 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, in rvu_set_msix_offset() argument
1254 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | in rvu_set_msix_offset()
1265 rvu_write64(rvu, block->addr, block->msixcfg_reg | in rvu_set_msix_offset()
1273 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, in rvu_clear_msix_offset() argument
1279 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | in rvu_clear_msix_offset()
1284 rvu_write64(rvu, block->addr, block->msixcfg_reg | in rvu_clear_msix_offset()
1287 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf); in rvu_clear_msix_offset()
1297 static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_msix_offset() argument
1300 struct rvu_hwinfo *hw = rvu->hw; in rvu_mbox_handler_msix_offset()
1305 pfvf = rvu_get_pfvf(rvu, pcifunc); in rvu_mbox_handler_msix_offset()
1310 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0); in rvu_mbox_handler_msix_offset()
1311 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf); in rvu_mbox_handler_msix_offset()
1313 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0); in rvu_mbox_handler_msix_offset()
1314 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf); in rvu_mbox_handler_msix_offset()
1318 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot); in rvu_mbox_handler_msix_offset()
1320 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf); in rvu_mbox_handler_msix_offset()
1325 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot); in rvu_mbox_handler_msix_offset()
1327 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf); in rvu_mbox_handler_msix_offset()
1332 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot); in rvu_mbox_handler_msix_offset()
1334 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf); in rvu_mbox_handler_msix_offset()
1339 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot); in rvu_mbox_handler_msix_offset()
1341 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf); in rvu_mbox_handler_msix_offset()
1346 static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_vf_flr() argument
1354 cfg = rvu_read64(rvu, BLKADDR_RVUM, in rvu_mbox_handler_vf_flr()
1359 __rvu_flr_handler(rvu, pcifunc); in rvu_mbox_handler_vf_flr()
1369 struct rvu *rvu = pci_get_drvdata(mbox->pdev); in rvu_process_mbox_msg() local
1398 err = rvu_mbox_handler_ ## _fn_name(rvu, \ in rvu_process_mbox_msg()
1418 struct rvu *rvu = mwork->rvu; in __rvu_mbox_handler() local
1428 mw = &rvu->afpf_wq_info; in __rvu_mbox_handler()
1431 mw = &rvu->afvf_wq_info; in __rvu_mbox_handler()
1472 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", in __rvu_mbox_handler()
1477 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", in __rvu_mbox_handler()
1502 struct rvu *rvu = mwork->rvu; in __rvu_mbox_up_handler() local
1512 mw = &rvu->afpf_wq_info; in __rvu_mbox_up_handler()
1515 mw = &rvu->afvf_wq_info; in __rvu_mbox_up_handler()
1527 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n"); in __rvu_mbox_up_handler()
1537 dev_err(rvu->dev, in __rvu_mbox_up_handler()
1543 dev_err(rvu->dev, in __rvu_mbox_up_handler()
1554 dev_err(rvu->dev, in __rvu_mbox_up_handler()
1581 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, in rvu_mbox_init() argument
1595 bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); in rvu_mbox_init()
1598 reg_base = rvu->afreg_base; in rvu_mbox_init()
1602 bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); in rvu_mbox_init()
1605 reg_base = rvu->pfreg_base; in rvu_mbox_init()
1617 mw->mbox_wrk = devm_kcalloc(rvu->dev, num, in rvu_mbox_init()
1624 mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num, in rvu_mbox_init()
1637 dev_err(rvu->dev, "Unable to map mailbox region\n"); in rvu_mbox_init()
1642 err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num); in rvu_mbox_init()
1646 err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev, in rvu_mbox_init()
1653 mwork->rvu = rvu; in rvu_mbox_init()
1657 mwork->rvu = rvu; in rvu_mbox_init()
1713 struct rvu *rvu = (struct rvu *)rvu_irq; in rvu_mbox_intr_handler() local
1714 int vfs = rvu->vfs; in rvu_mbox_intr_handler()
1717 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); in rvu_mbox_intr_handler()
1719 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr); in rvu_mbox_intr_handler()
1724 rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr); in rvu_mbox_intr_handler()
1728 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1)); in rvu_mbox_intr_handler()
1729 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr); in rvu_mbox_intr_handler()
1731 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); in rvu_mbox_intr_handler()
1735 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); in rvu_mbox_intr_handler()
1736 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr); in rvu_mbox_intr_handler()
1738 rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr); in rvu_mbox_intr_handler()
1743 static void rvu_enable_mbox_intr(struct rvu *rvu) in rvu_enable_mbox_intr() argument
1745 struct rvu_hwinfo *hw = rvu->hw; in rvu_enable_mbox_intr()
1748 rvu_write64(rvu, BLKADDR_RVUM, in rvu_enable_mbox_intr()
1752 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S, in rvu_enable_mbox_intr()
1756 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) in rvu_blklf_teardown() argument
1762 block = &rvu->hw->block[blkaddr]; in rvu_blklf_teardown()
1763 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), in rvu_blklf_teardown()
1768 lf = rvu_get_lf(rvu, block, pcifunc, slot); in rvu_blklf_teardown()
1774 rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf); in rvu_blklf_teardown()
1776 rvu_npa_lf_teardown(rvu, pcifunc, lf); in rvu_blklf_teardown()
1778 err = rvu_lf_reset(rvu, block, lf); in rvu_blklf_teardown()
1780 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", in rvu_blklf_teardown()
1786 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) in __rvu_flr_handler() argument
1788 mutex_lock(&rvu->flr_lock); in __rvu_flr_handler()
1794 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0); in __rvu_flr_handler()
1795 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0); in __rvu_flr_handler()
1796 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM); in __rvu_flr_handler()
1797 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); in __rvu_flr_handler()
1798 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); in __rvu_flr_handler()
1799 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); in __rvu_flr_handler()
1800 rvu_detach_rsrcs(rvu, NULL, pcifunc); in __rvu_flr_handler()
1801 mutex_unlock(&rvu->flr_lock); in __rvu_flr_handler()
1804 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf) in rvu_afvf_flr_handler() argument
1809 __rvu_flr_handler(rvu, vf + 1); in rvu_afvf_flr_handler()
1817 rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); in rvu_afvf_flr_handler()
1818 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); in rvu_afvf_flr_handler()
1824 struct rvu *rvu = flrwork->rvu; in rvu_flr_handler() local
1829 pf = flrwork - rvu->flr_wrk; in rvu_flr_handler()
1830 if (pf >= rvu->hw->total_pfs) { in rvu_flr_handler()
1831 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs); in rvu_flr_handler()
1835 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); in rvu_flr_handler()
1840 __rvu_flr_handler(rvu, (pcifunc | (vf + 1))); in rvu_flr_handler()
1842 __rvu_flr_handler(rvu, pcifunc); in rvu_flr_handler()
1845 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf)); in rvu_flr_handler()
1848 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf)); in rvu_flr_handler()
1851 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs) in rvu_afvf_queue_flr_work() argument
1859 intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg)); in rvu_afvf_queue_flr_work()
1866 dev = vf + start_vf + rvu->hw->total_pfs; in rvu_afvf_queue_flr_work()
1867 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); in rvu_afvf_queue_flr_work()
1869 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); in rvu_afvf_queue_flr_work()
1870 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf)); in rvu_afvf_queue_flr_work()
1876 struct rvu *rvu = (struct rvu *)rvu_irq; in rvu_flr_intr_handler() local
1880 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT); in rvu_flr_intr_handler()
1884 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { in rvu_flr_intr_handler()
1887 queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); in rvu_flr_intr_handler()
1889 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT, in rvu_flr_intr_handler()
1892 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, in rvu_flr_intr_handler()
1898 rvu_afvf_queue_flr_work(rvu, 0, 64); in rvu_flr_intr_handler()
1899 if (rvu->vfs > 64) in rvu_flr_intr_handler()
1900 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64); in rvu_flr_intr_handler()
1905 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr) in rvu_me_handle_vfset() argument
1915 rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf)); in rvu_me_handle_vfset()
1917 rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf)); in rvu_me_handle_vfset()
1925 struct rvu *rvu = (struct rvu *)rvu_irq; in rvu_me_vf_intr_handler() local
1929 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); in rvu_me_vf_intr_handler()
1932 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset)); in rvu_me_vf_intr_handler()
1934 rvu_me_handle_vfset(rvu, vfset, intr); in rvu_me_vf_intr_handler()
1943 struct rvu *rvu = (struct rvu *)rvu_irq; in rvu_me_pf_intr_handler() local
1947 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); in rvu_me_pf_intr_handler()
1952 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { in rvu_me_pf_intr_handler()
1955 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, in rvu_me_pf_intr_handler()
1958 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT, in rvu_me_pf_intr_handler()
1966 static void rvu_unregister_interrupts(struct rvu *rvu) in rvu_unregister_interrupts() argument
1971 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, in rvu_unregister_interrupts()
1972 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); in rvu_unregister_interrupts()
1975 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, in rvu_unregister_interrupts()
1976 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); in rvu_unregister_interrupts()
1979 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C, in rvu_unregister_interrupts()
1980 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); in rvu_unregister_interrupts()
1982 for (irq = 0; irq < rvu->num_vec; irq++) { in rvu_unregister_interrupts()
1983 if (rvu->irq_allocated[irq]) in rvu_unregister_interrupts()
1984 free_irq(pci_irq_vector(rvu->pdev, irq), rvu); in rvu_unregister_interrupts()
1987 pci_free_irq_vectors(rvu->pdev); in rvu_unregister_interrupts()
1988 rvu->num_vec = 0; in rvu_unregister_interrupts()
1991 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu) in rvu_afvf_msix_vectors_num_ok() argument
1993 struct rvu_pfvf *pfvf = &rvu->pf[0]; in rvu_afvf_msix_vectors_num_ok()
1996 pfvf = &rvu->pf[0]; in rvu_afvf_msix_vectors_num_ok()
1997 offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; in rvu_afvf_msix_vectors_num_ok()
2007 static int rvu_register_interrupts(struct rvu *rvu) in rvu_register_interrupts() argument
2011 rvu->num_vec = pci_msix_vec_count(rvu->pdev); in rvu_register_interrupts()
2013 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec, in rvu_register_interrupts()
2015 if (!rvu->irq_name) in rvu_register_interrupts()
2018 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec, in rvu_register_interrupts()
2020 if (!rvu->irq_allocated) in rvu_register_interrupts()
2024 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec, in rvu_register_interrupts()
2025 rvu->num_vec, PCI_IRQ_MSIX); in rvu_register_interrupts()
2027 dev_err(rvu->dev, in rvu_register_interrupts()
2029 rvu->num_vec, ret); in rvu_register_interrupts()
2034 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); in rvu_register_interrupts()
2035 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), in rvu_register_interrupts()
2037 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); in rvu_register_interrupts()
2039 dev_err(rvu->dev, in rvu_register_interrupts()
2044 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; in rvu_register_interrupts()
2047 rvu_enable_mbox_intr(rvu); in rvu_register_interrupts()
2050 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], in rvu_register_interrupts()
2052 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR), in rvu_register_interrupts()
2054 &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], in rvu_register_interrupts()
2055 rvu); in rvu_register_interrupts()
2057 dev_err(rvu->dev, in rvu_register_interrupts()
2061 rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true; in rvu_register_interrupts()
2064 rvu_write64(rvu, BLKADDR_RVUM, in rvu_register_interrupts()
2065 RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs)); in rvu_register_interrupts()
2067 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, in rvu_register_interrupts()
2068 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); in rvu_register_interrupts()
2071 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], in rvu_register_interrupts()
2073 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME), in rvu_register_interrupts()
2075 &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], in rvu_register_interrupts()
2076 rvu); in rvu_register_interrupts()
2078 dev_err(rvu->dev, in rvu_register_interrupts()
2081 rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true; in rvu_register_interrupts()
2084 rvu_write64(rvu, BLKADDR_RVUM, in rvu_register_interrupts()
2085 RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs)); in rvu_register_interrupts()
2087 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S, in rvu_register_interrupts()
2088 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); in rvu_register_interrupts()
2090 if (!rvu_afvf_msix_vectors_num_ok(rvu)) in rvu_register_interrupts()
2094 pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM, in rvu_register_interrupts()
2099 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); in rvu_register_interrupts()
2100 ret = request_irq(pci_irq_vector(rvu->pdev, offset), in rvu_register_interrupts()
2102 &rvu->irq_name[offset * NAME_SIZE], in rvu_register_interrupts()
2103 rvu); in rvu_register_interrupts()
2105 dev_err(rvu->dev, in rvu_register_interrupts()
2108 rvu->irq_allocated[offset] = true; in rvu_register_interrupts()
2114 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); in rvu_register_interrupts()
2115 ret = request_irq(pci_irq_vector(rvu->pdev, offset), in rvu_register_interrupts()
2117 &rvu->irq_name[offset * NAME_SIZE], in rvu_register_interrupts()
2118 rvu); in rvu_register_interrupts()
2120 dev_err(rvu->dev, in rvu_register_interrupts()
2123 rvu->irq_allocated[offset] = true; in rvu_register_interrupts()
2127 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0"); in rvu_register_interrupts()
2128 ret = request_irq(pci_irq_vector(rvu->pdev, offset), in rvu_register_interrupts()
2130 &rvu->irq_name[offset * NAME_SIZE], rvu); in rvu_register_interrupts()
2132 dev_err(rvu->dev, in rvu_register_interrupts()
2136 rvu->irq_allocated[offset] = true; in rvu_register_interrupts()
2139 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1"); in rvu_register_interrupts()
2140 ret = request_irq(pci_irq_vector(rvu->pdev, offset), in rvu_register_interrupts()
2142 &rvu->irq_name[offset * NAME_SIZE], rvu); in rvu_register_interrupts()
2144 dev_err(rvu->dev, in rvu_register_interrupts()
2148 rvu->irq_allocated[offset] = true; in rvu_register_interrupts()
2152 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0"); in rvu_register_interrupts()
2153 ret = request_irq(pci_irq_vector(rvu->pdev, offset), in rvu_register_interrupts()
2155 &rvu->irq_name[offset * NAME_SIZE], rvu); in rvu_register_interrupts()
2157 dev_err(rvu->dev, in rvu_register_interrupts()
2161 rvu->irq_allocated[offset] = true; in rvu_register_interrupts()
2164 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1"); in rvu_register_interrupts()
2165 ret = request_irq(pci_irq_vector(rvu->pdev, offset), in rvu_register_interrupts()
2167 &rvu->irq_name[offset * NAME_SIZE], rvu); in rvu_register_interrupts()
2169 dev_err(rvu->dev, in rvu_register_interrupts()
2173 rvu->irq_allocated[offset] = true; in rvu_register_interrupts()
2177 rvu_unregister_interrupts(rvu); in rvu_register_interrupts()
2181 static void rvu_flr_wq_destroy(struct rvu *rvu) in rvu_flr_wq_destroy() argument
2183 if (rvu->flr_wq) { in rvu_flr_wq_destroy()
2184 flush_workqueue(rvu->flr_wq); in rvu_flr_wq_destroy()
2185 destroy_workqueue(rvu->flr_wq); in rvu_flr_wq_destroy()
2186 rvu->flr_wq = NULL; in rvu_flr_wq_destroy()
2190 static int rvu_flr_init(struct rvu *rvu) in rvu_flr_init() argument
2197 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { in rvu_flr_init()
2198 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); in rvu_flr_init()
2199 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf), in rvu_flr_init()
2203 rvu->flr_wq = alloc_workqueue("rvu_afpf_flr", in rvu_flr_init()
2206 if (!rvu->flr_wq) in rvu_flr_init()
2209 num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev); in rvu_flr_init()
2210 rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs, in rvu_flr_init()
2212 if (!rvu->flr_wrk) { in rvu_flr_init()
2213 destroy_workqueue(rvu->flr_wq); in rvu_flr_init()
2218 rvu->flr_wrk[dev].rvu = rvu; in rvu_flr_init()
2219 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler); in rvu_flr_init()
2222 mutex_init(&rvu->flr_lock); in rvu_flr_init()
2227 static void rvu_disable_afvf_intr(struct rvu *rvu) in rvu_disable_afvf_intr() argument
2229 int vfs = rvu->vfs; in rvu_disable_afvf_intr()
2231 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs)); in rvu_disable_afvf_intr()
2232 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); in rvu_disable_afvf_intr()
2233 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); in rvu_disable_afvf_intr()
2237 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), in rvu_disable_afvf_intr()
2239 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); in rvu_disable_afvf_intr()
2240 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); in rvu_disable_afvf_intr()
2243 static void rvu_enable_afvf_intr(struct rvu *rvu) in rvu_enable_afvf_intr() argument
2245 int vfs = rvu->vfs; in rvu_enable_afvf_intr()
2251 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs)); in rvu_enable_afvf_intr()
2252 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs)); in rvu_enable_afvf_intr()
2255 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs)); in rvu_enable_afvf_intr()
2256 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs)); in rvu_enable_afvf_intr()
2257 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs)); in rvu_enable_afvf_intr()
2263 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64)); in rvu_enable_afvf_intr()
2264 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), in rvu_enable_afvf_intr()
2267 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64)); in rvu_enable_afvf_intr()
2268 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); in rvu_enable_afvf_intr()
2269 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); in rvu_enable_afvf_intr()
2298 static int rvu_enable_sriov(struct rvu *rvu) in rvu_enable_sriov() argument
2300 struct pci_dev *pdev = rvu->pdev; in rvu_enable_sriov()
2303 if (!rvu_afvf_msix_vectors_num_ok(rvu)) { in rvu_enable_sriov()
2338 rvu->vfs = vfs; in rvu_enable_sriov()
2340 err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs, in rvu_enable_sriov()
2345 rvu_enable_afvf_intr(rvu); in rvu_enable_sriov()
2351 rvu_disable_afvf_intr(rvu); in rvu_enable_sriov()
2352 rvu_mbox_destroy(&rvu->afvf_wq_info); in rvu_enable_sriov()
2359 static void rvu_disable_sriov(struct rvu *rvu) in rvu_disable_sriov() argument
2361 rvu_disable_afvf_intr(rvu); in rvu_disable_sriov()
2362 rvu_mbox_destroy(&rvu->afvf_wq_info); in rvu_disable_sriov()
2363 pci_disable_sriov(rvu->pdev); in rvu_disable_sriov()
2366 static void rvu_update_module_params(struct rvu *rvu) in rvu_update_module_params() argument
2370 strscpy(rvu->mkex_pfl_name, in rvu_update_module_params()
2377 struct rvu *rvu; in rvu_probe() local
2380 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL); in rvu_probe()
2381 if (!rvu) in rvu_probe()
2384 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL); in rvu_probe()
2385 if (!rvu->hw) { in rvu_probe()
2386 devm_kfree(dev, rvu); in rvu_probe()
2390 pci_set_drvdata(pdev, rvu); in rvu_probe()
2391 rvu->pdev = pdev; in rvu_probe()
2392 rvu->dev = &pdev->dev; in rvu_probe()
2419 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0); in rvu_probe()
2420 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); in rvu_probe()
2421 if (!rvu->afreg_base || !rvu->pfreg_base) { in rvu_probe()
2428 rvu_update_module_params(rvu); in rvu_probe()
2431 rvu_check_block_implemented(rvu); in rvu_probe()
2433 rvu_reset_all_blocks(rvu); in rvu_probe()
2435 err = rvu_setup_hw_resources(rvu); in rvu_probe()
2440 err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF, in rvu_probe()
2441 rvu->hw->total_pfs, rvu_afpf_mbox_handler, in rvu_probe()
2446 err = rvu_flr_init(rvu); in rvu_probe()
2450 err = rvu_register_interrupts(rvu); in rvu_probe()
2455 err = rvu_enable_sriov(rvu); in rvu_probe()
2461 rvu_unregister_interrupts(rvu); in rvu_probe()
2463 rvu_flr_wq_destroy(rvu); in rvu_probe()
2465 rvu_mbox_destroy(&rvu->afpf_wq_info); in rvu_probe()
2467 rvu_cgx_exit(rvu); in rvu_probe()
2468 rvu_reset_all_blocks(rvu); in rvu_probe()
2469 rvu_free_hw_resources(rvu); in rvu_probe()
2476 devm_kfree(&pdev->dev, rvu->hw); in rvu_probe()
2477 devm_kfree(dev, rvu); in rvu_probe()
2483 struct rvu *rvu = pci_get_drvdata(pdev); in rvu_remove() local
2485 rvu_unregister_interrupts(rvu); in rvu_remove()
2486 rvu_flr_wq_destroy(rvu); in rvu_remove()
2487 rvu_cgx_exit(rvu); in rvu_remove()
2488 rvu_mbox_destroy(&rvu->afpf_wq_info); in rvu_remove()
2489 rvu_disable_sriov(rvu); in rvu_remove()
2490 rvu_reset_all_blocks(rvu); in rvu_remove()
2491 rvu_free_hw_resources(rvu); in rvu_remove()
2497 devm_kfree(&pdev->dev, rvu->hw); in rvu_remove()
2498 devm_kfree(&pdev->dev, rvu); in rvu_remove()