Lines Matching full:pf
19 * @pf: pointer to the PF structure
24 static void ice_free_vf_entries(struct ice_pf *pf) in ice_free_vf_entries() argument
26 struct ice_vfs *vfs = &pf->vfs; in ice_free_vf_entries()
64 struct ice_pf *pf = vf->pf; in ice_free_vf_res() local
82 last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1; in ice_free_vf_res()
90 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); in ice_free_vf_res()
91 ice_flush(&pf->hw); in ice_free_vf_res()
104 struct ice_pf *pf = vf->pf; in ice_dis_vf_mappings() local
110 hw = &pf->hw; in ice_dis_vf_mappings()
115 dev = ice_pf_to_dev(pf); in ice_dis_vf_mappings()
120 last = first + pf->vfs.num_msix_per - 1; in ice_dis_vf_mappings()
144 * @pf: pointer to the PF structure
146 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
147 * the pf->sriov_base_vector.
151 static int ice_sriov_free_msix_res(struct ice_pf *pf) in ice_sriov_free_msix_res() argument
155 if (!pf) in ice_sriov_free_msix_res()
158 res = pf->irq_tracker; in ice_sriov_free_msix_res()
163 WARN_ON(pf->sriov_base_vector < res->num_entries); in ice_sriov_free_msix_res()
165 pf->sriov_base_vector = 0; in ice_sriov_free_msix_res()
172 * @pf: pointer to the PF structure
174 void ice_free_vfs(struct ice_pf *pf) in ice_free_vfs() argument
176 struct device *dev = ice_pf_to_dev(pf); in ice_free_vfs()
177 struct ice_vfs *vfs = &pf->vfs; in ice_free_vfs()
178 struct ice_hw *hw = &pf->hw; in ice_free_vfs()
182 if (!ice_has_vfs(pf)) in ice_free_vfs()
185 while (test_and_set_bit(ICE_VF_DIS, pf->state)) in ice_free_vfs()
192 if (!pci_vfs_assigned(pf->pdev)) in ice_free_vfs()
193 pci_disable_sriov(pf->pdev); in ice_free_vfs()
199 ice_eswitch_release(pf); in ice_free_vfs()
201 ice_for_each_vf(pf, bkt, vf) { in ice_free_vfs()
213 if (!pci_vfs_assigned(pf->pdev)) { in ice_free_vfs()
222 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, in ice_free_vfs()
230 if (ice_sriov_free_msix_res(pf)) in ice_free_vfs()
234 ice_free_vf_entries(pf); in ice_free_vfs()
238 clear_bit(ICE_VF_DIS, pf->state); in ice_free_vfs()
239 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); in ice_free_vfs()
252 struct ice_pf *pf = vf->pf; in ice_vf_vsi_setup() local
255 vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL); in ice_vf_vsi_setup()
258 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); in ice_vf_vsi_setup()
270 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
271 * @pf: pointer to PF structure
274 * This returns the first MSIX vector index in PF space that is used by this VF.
275 * This index is used when accessing PF relative registers such as
281 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) in ice_calc_vf_first_vector_idx() argument
283 return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; in ice_calc_vf_first_vector_idx()
291 * device values and other registers need 0-based values, which represent PF
298 struct ice_pf *pf = vf->pf; in ice_ena_vf_msix_mappings() local
303 hw = &pf->hw; in ice_ena_vf_msix_mappings()
305 pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1; in ice_ena_vf_msix_mappings()
308 pf->hw.func_caps.common_cap.msix_vector_first_id; in ice_ena_vf_msix_mappings()
310 (device_based_first_msix + pf->vfs.num_msix_per) - 1; in ice_ena_vf_msix_mappings()
346 struct device *dev = ice_pf_to_dev(vf->pf); in ice_ena_vf_q_mappings()
348 struct ice_hw *hw = &vf->pf->hw; in ice_ena_vf_q_mappings()
359 /* set the VF PF Tx queue range in ice_ena_vf_q_mappings()
377 /* set the VF PF Rx queue range in ice_ena_vf_q_mappings()
407 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
413 struct ice_pf *pf; in ice_calc_vf_reg_idx() local
418 pf = vf->pf; in ice_calc_vf_reg_idx()
421 return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + in ice_calc_vf_reg_idx()
450 * @pf: pointer to PF structure
453 * This function allows SR-IOV resources to be taken from the end of the PF's
455 * just set the pf->sriov_base_vector and return success.
461 * in the PF's space available for SR-IOV.
463 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) in ice_sriov_set_msix_res() argument
465 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; in ice_sriov_set_msix_res()
466 int vectors_used = pf->irq_tracker->num_entries; in ice_sriov_set_msix_res()
477 pf->sriov_base_vector = sriov_base_vector; in ice_sriov_set_msix_res()
484 * @pf: pointer to the PF structure
501 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
504 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) in ice_set_per_vf_res() argument
506 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); in ice_set_per_vf_res()
509 struct device *dev = ice_pf_to_dev(pf); in ice_set_per_vf_res()
512 lockdep_assert_held(&pf->vfs.table_lock); in ice_set_per_vf_res()
521 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - in ice_set_per_vf_res()
522 pf->irq_tracker->num_entries; in ice_set_per_vf_res()
541 avail_qs = ice_get_avail_txq_count(pf) / num_vfs; in ice_set_per_vf_res()
549 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; in ice_set_per_vf_res()
561 err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs); in ice_set_per_vf_res()
569 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); in ice_set_per_vf_res()
570 pf->vfs.num_msix_per = num_msix_per_vf; in ice_set_per_vf_res()
572 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); in ice_set_per_vf_res()
587 struct ice_pf *pf = vf->pf; in ice_init_vf_vsi_res() local
593 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); in ice_init_vf_vsi_res()
595 dev = ice_pf_to_dev(pf); in ice_init_vf_vsi_res()
641 * @pf: PF the VFs are associated with
643 static int ice_start_vfs(struct ice_pf *pf) in ice_start_vfs() argument
645 struct ice_hw *hw = &pf->hw; in ice_start_vfs()
650 lockdep_assert_held(&pf->vfs.table_lock); in ice_start_vfs()
653 ice_for_each_vf(pf, bkt, vf) { in ice_start_vfs()
658 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", in ice_start_vfs()
673 ice_for_each_vf(pf, bkt, vf) { in ice_start_vfs()
705 struct ice_pf *pf = vf->pf; in ice_sriov_clear_mbx_register() local
707 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0); in ice_sriov_clear_mbx_register()
708 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0); in ice_sriov_clear_mbx_register()
720 struct ice_pf *pf = vf->pf; in ice_sriov_trigger_reset_register() local
726 dev = ice_pf_to_dev(pf); in ice_sriov_trigger_reset_register()
727 hw = &pf->hw; in ice_sriov_trigger_reset_register()
767 struct ice_pf *pf = vf->pf; in ice_sriov_poll_reset_status() local
776 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); in ice_sriov_poll_reset_status()
792 struct ice_hw *hw = &vf->pf->hw; in ice_sriov_clear_reset_trigger()
810 struct ice_pf *pf = vf->pf; in ice_sriov_vsi_rebuild() local
814 dev_err(ice_pf_to_dev(pf), in ice_sriov_vsi_rebuild()
832 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); in ice_sriov_post_vsi_rebuild()
848 * @pf: pointer to the PF structure
859 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) in ice_create_vf_entries() argument
861 struct ice_vfs *vfs = &pf->vfs; in ice_create_vf_entries()
876 vf->pf = pf; in ice_create_vf_entries()
882 vf->vf_sw_id = pf->first_sw; in ice_create_vf_entries()
885 vf->num_vf_qs = pf->vfs.num_qps_per; in ice_create_vf_entries()
904 ice_free_vf_entries(pf); in ice_create_vf_entries()
910 * @pf: pointer to the PF structure
913 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) in ice_ena_vfs() argument
915 struct device *dev = ice_pf_to_dev(pf); in ice_ena_vfs()
916 struct ice_hw *hw = &pf->hw; in ice_ena_vfs()
920 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), in ice_ena_vfs()
922 set_bit(ICE_OICR_INTR_DIS, pf->state); in ice_ena_vfs()
925 ret = pci_enable_sriov(pf->pdev, num_vfs); in ice_ena_vfs()
929 mutex_lock(&pf->vfs.table_lock); in ice_ena_vfs()
931 ret = ice_set_per_vf_res(pf, num_vfs); in ice_ena_vfs()
938 ret = ice_create_vf_entries(pf, num_vfs); in ice_ena_vfs()
945 ret = ice_start_vfs(pf); in ice_ena_vfs()
952 clear_bit(ICE_VF_DIS, pf->state); in ice_ena_vfs()
954 ret = ice_eswitch_configure(pf); in ice_ena_vfs()
961 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) in ice_ena_vfs()
964 mutex_unlock(&pf->vfs.table_lock); in ice_ena_vfs()
969 ice_free_vf_entries(pf); in ice_ena_vfs()
971 mutex_unlock(&pf->vfs.table_lock); in ice_ena_vfs()
972 pci_disable_sriov(pf->pdev); in ice_ena_vfs()
976 clear_bit(ICE_OICR_INTR_DIS, pf->state); in ice_ena_vfs()
982 * @pf: pointer to the PF structure
987 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) in ice_pci_sriov_ena() argument
989 int pre_existing_vfs = pci_num_vf(pf->pdev); in ice_pci_sriov_ena()
990 struct device *dev = ice_pf_to_dev(pf); in ice_pci_sriov_ena()
994 ice_free_vfs(pf); in ice_pci_sriov_ena()
998 if (num_vfs > pf->vfs.num_supported) { in ice_pci_sriov_ena()
1000 num_vfs, pf->vfs.num_supported); in ice_pci_sriov_ena()
1005 err = ice_ena_vfs(pf, num_vfs); in ice_pci_sriov_ena()
1011 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); in ice_pci_sriov_ena()
1017 * @pf: PF to enabled SR-IOV on
1019 static int ice_check_sriov_allowed(struct ice_pf *pf) in ice_check_sriov_allowed() argument
1021 struct device *dev = ice_pf_to_dev(pf); in ice_check_sriov_allowed()
1023 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { in ice_check_sriov_allowed()
1028 if (ice_is_safe_mode(pf)) { in ice_check_sriov_allowed()
1033 if (!ice_pf_state_is_nominal(pf)) { in ice_check_sriov_allowed()
1052 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_sriov_configure() local
1053 struct device *dev = ice_pf_to_dev(pf); in ice_sriov_configure()
1056 err = ice_check_sriov_allowed(pf); in ice_sriov_configure()
1062 ice_free_vfs(pf); in ice_sriov_configure()
1063 ice_mbx_deinit_snapshot(&pf->hw); in ice_sriov_configure()
1064 if (pf->lag) in ice_sriov_configure()
1065 ice_enable_lag(pf->lag); in ice_sriov_configure()
1073 err = ice_mbx_init_snapshot(&pf->hw, num_vfs); in ice_sriov_configure()
1077 err = ice_pci_sriov_ena(pf, num_vfs); in ice_sriov_configure()
1079 ice_mbx_deinit_snapshot(&pf->hw); in ice_sriov_configure()
1083 if (pf->lag) in ice_sriov_configure()
1084 ice_disable_lag(pf->lag); in ice_sriov_configure()
1090 * @pf: pointer to the PF structure
1095 void ice_process_vflr_event(struct ice_pf *pf) in ice_process_vflr_event() argument
1097 struct ice_hw *hw = &pf->hw; in ice_process_vflr_event()
1102 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || in ice_process_vflr_event()
1103 !ice_has_vfs(pf)) in ice_process_vflr_event()
1106 mutex_lock(&pf->vfs.table_lock); in ice_process_vflr_event()
1107 ice_for_each_vf(pf, bkt, vf) { in ice_process_vflr_event()
1118 mutex_unlock(&pf->vfs.table_lock); in ice_process_vflr_event()
1122 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1123 * @pf: PF used to index all VFs
1124 * @pfq: queue index relative to the PF's function space
1133 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) in ice_get_vf_from_pfq() argument
1139 ice_for_each_vf_rcu(pf, bkt, vf) { in ice_get_vf_from_pfq()
1165 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1166 * @pf: PF used for conversion
1167 * @globalq: global queue index used to convert to PF space queue index
1169 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) in ice_globalq_to_pfq() argument
1171 return globalq - pf->hw.func_caps.common_cap.rxq_first_id; in ice_globalq_to_pfq()
1176 * @pf: PF that the LAN overflow event happened on
1184 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) in ice_vf_lan_overflow_event() argument
1190 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); in ice_vf_lan_overflow_event()
1196 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); in ice_vf_lan_overflow_event()
1215 struct ice_pf *pf = np->vsi->back; in ice_set_vf_spoofchk() local
1221 dev = ice_pf_to_dev(pf); in ice_set_vf_spoofchk()
1223 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_spoofchk()
1275 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_get_vf_cfg() local
1279 vf = ice_get_vf_by_id(pf, vf_id); in ice_get_vf_cfg()
1322 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_mac() local
1331 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_mac()
1348 /* VF is notified of its new MAC via the PF's response to the in ice_set_vf_mac()
1359 /* PF will add MAC rule for the VF */ in ice_set_vf_mac()
1383 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_trust() local
1387 if (ice_is_eswitch_mode_switchdev(pf)) { in ice_set_vf_trust()
1388 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); in ice_set_vf_trust()
1392 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_trust()
1410 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", in ice_set_vf_trust()
1430 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_link_state() local
1434 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_link_state()
1468 * @pf: PF associated with VFs
1470 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) in ice_calc_all_vfs_min_tx_rate() argument
1477 ice_for_each_vf_rcu(pf, bkt, vf) in ice_calc_all_vfs_min_tx_rate()
1507 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); in ice_min_tx_rate_oversubscribed()
1513 …dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d… in ice_min_tx_rate_oversubscribed()
1534 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_bw() local
1540 dev = ice_pf_to_dev(pf); in ice_set_vf_bw()
1542 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_bw()
1556 if (min_tx_rate && ice_is_dcb_active(pf)) { in ice_set_vf_bw()
1557 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); in ice_set_vf_bw()
1596 * @netdev: the netdev of the PF
1603 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_get_vf_stats() local
1609 vf = ice_get_vf_by_id(pf, vf_id); in ice_get_vf_stats()
1685 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_port_vlan() local
1691 dev = ice_pf_to_dev(pf); in ice_set_vf_port_vlan()
1699 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { in ice_set_vf_port_vlan()
1705 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_port_vlan()
1746 struct ice_pf *pf = vf->pf; in ice_print_vf_rx_mdd_event() local
1749 dev = ice_pf_to_dev(pf); in ice_print_vf_rx_mdd_event()
1751 …dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-r… in ice_print_vf_rx_mdd_event()
1752 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, in ice_print_vf_rx_mdd_event()
1754 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) in ice_print_vf_rx_mdd_event()
1760 * @pf: pointer to the PF structure
1764 void ice_print_vfs_mdd_events(struct ice_pf *pf) in ice_print_vfs_mdd_events() argument
1766 struct device *dev = ice_pf_to_dev(pf); in ice_print_vfs_mdd_events()
1767 struct ice_hw *hw = &pf->hw; in ice_print_vfs_mdd_events()
1772 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) in ice_print_vfs_mdd_events()
1776 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1)) in ice_print_vfs_mdd_events()
1779 pf->vfs.last_printed_mdd_jiffies = jiffies; in ice_print_vfs_mdd_events()
1781 mutex_lock(&pf->vfs.table_lock); in ice_print_vfs_mdd_events()
1782 ice_for_each_vf(pf, bkt, vf) { in ice_print_vfs_mdd_events()
1795 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", in ice_print_vfs_mdd_events()
1800 mutex_unlock(&pf->vfs.table_lock); in ice_print_vfs_mdd_events()
1804 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
1807 * Called when recovering from a PF FLR to restore interrupt capability to
1836 * @pf: ptr to struct ice_pf
1842 ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, in ice_is_malicious_vf() argument
1846 struct device *dev = ice_pf_to_dev(pf); in ice_is_malicious_vf()
1852 vf = ice_get_vf_by_id(pf, vf_id); in ice_is_malicious_vf()
1861 mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries; in ice_is_malicious_vf()
1866 status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf); in ice_is_malicious_vf()
1876 status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs, in ice_is_malicious_vf()
1883 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); in ice_is_malicious_vf()
1886 …dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing… in ice_is_malicious_vf()