Lines Matching full:pf
11 * @pf: pointer to the PF structure
14 static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id) in ice_validate_vf_id() argument
17 if (vf_id >= pf->num_alloc_vfs) { in ice_validate_vf_id()
18 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id); in ice_validate_vf_id()
26 * @pf: pointer to the PF structure
29 static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf) in ice_check_vf_init() argument
32 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", in ice_check_vf_init()
71 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
72 * @pf: pointer to the PF structure
79 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, in ice_vc_vf_broadcast() argument
82 struct ice_hw *hw = &pf->hw; in ice_vc_vf_broadcast()
85 ice_for_each_vf(pf, i) { in ice_vc_vf_broadcast()
86 struct ice_vf *vf = &pf->vf[i]; in ice_vc_vf_broadcast()
145 struct ice_pf *pf = vf->pf; in ice_is_vf_link_up() local
147 if (ice_check_vf_init(pf, vf)) in ice_is_vf_link_up()
155 return pf->hw.port_info->phy.link_info.link_info & in ice_is_vf_link_up()
168 struct ice_hw *hw = &vf->pf->hw; in ice_vc_notify_vf_link_state()
200 ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]); in ice_vf_vsi_release()
210 struct ice_pf *pf = vf->pf; in ice_free_vf_res() local
224 last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1; in ice_free_vf_res()
232 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); in ice_free_vf_res()
233 ice_flush(&pf->hw); in ice_free_vf_res()
246 struct ice_pf *pf = vf->pf; in ice_dis_vf_mappings() local
252 hw = &pf->hw; in ice_dis_vf_mappings()
253 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_dis_vf_mappings()
255 dev = ice_pf_to_dev(pf); in ice_dis_vf_mappings()
260 last = first + pf->num_msix_per_vf - 1; in ice_dis_vf_mappings()
284 * @pf: pointer to the PF structure
286 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
287 * the pf->sriov_base_vector.
291 static int ice_sriov_free_msix_res(struct ice_pf *pf) in ice_sriov_free_msix_res() argument
295 if (!pf) in ice_sriov_free_msix_res()
298 res = pf->irq_tracker; in ice_sriov_free_msix_res()
303 WARN_ON(pf->sriov_base_vector < res->num_entries); in ice_sriov_free_msix_res()
305 pf->sriov_base_vector = 0; in ice_sriov_free_msix_res()
328 struct ice_pf *pf = vf->pf; in ice_dis_vf_qs() local
331 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_dis_vf_qs()
340 * @pf: pointer to the PF structure
342 void ice_free_vfs(struct ice_pf *pf) in ice_free_vfs() argument
344 struct device *dev = ice_pf_to_dev(pf); in ice_free_vfs()
345 struct ice_hw *hw = &pf->hw; in ice_free_vfs()
348 if (!pf->vf) in ice_free_vfs()
351 while (test_and_set_bit(__ICE_VF_DIS, pf->state)) in ice_free_vfs()
358 if (!pci_vfs_assigned(pf->pdev)) in ice_free_vfs()
359 pci_disable_sriov(pf->pdev); in ice_free_vfs()
364 ice_for_each_vf(pf, i) in ice_free_vfs()
365 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states)) in ice_free_vfs()
366 ice_dis_vf_qs(&pf->vf[i]); in ice_free_vfs()
368 tmp = pf->num_alloc_vfs; in ice_free_vfs()
369 pf->num_qps_per_vf = 0; in ice_free_vfs()
370 pf->num_alloc_vfs = 0; in ice_free_vfs()
372 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { in ice_free_vfs()
374 ice_dis_vf_mappings(&pf->vf[i]); in ice_free_vfs()
375 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states); in ice_free_vfs()
376 ice_free_vf_res(&pf->vf[i]); in ice_free_vfs()
380 if (ice_sriov_free_msix_res(pf)) in ice_free_vfs()
383 devm_kfree(dev, pf->vf); in ice_free_vfs()
384 pf->vf = NULL; in ice_free_vfs()
390 if (!pci_vfs_assigned(pf->pdev)) { in ice_free_vfs()
404 clear_bit(__ICE_VF_DIS, pf->state); in ice_free_vfs()
405 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); in ice_free_vfs()
420 struct ice_pf *pf = vf->pf; in ice_trigger_vf_reset() local
426 dev = ice_pf_to_dev(pf); in ice_trigger_vf_reset()
427 hw = &pf->hw; in ice_trigger_vf_reset()
532 return vf->pf->hw.port_info; in ice_vf_get_port_info()
545 struct ice_pf *pf = vf->pf; in ice_vf_vsi_setup() local
548 vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id); in ice_vf_vsi_setup()
551 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); in ice_vf_vsi_setup()
563 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
564 * @pf: pointer to PF structure
567 * This returns the first MSIX vector index in PF space that is used by this VF.
568 * This index is used when accessing PF relative registers such as
574 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) in ice_calc_vf_first_vector_idx() argument
576 return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf; in ice_calc_vf_first_vector_idx()
583 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
588 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; in ice_vf_rebuild_host_vlan_cfg()
589 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vf_rebuild_host_vlan_cfg()
620 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
625 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; in ice_vf_rebuild_host_mac_cfg()
626 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vf_rebuild_host_mac_cfg()
672 * device values and other registers need 0-based values, which represent PF
679 struct ice_pf *pf = vf->pf; in ice_ena_vf_msix_mappings() local
684 hw = &pf->hw; in ice_ena_vf_msix_mappings()
686 pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1; in ice_ena_vf_msix_mappings()
689 pf->hw.func_caps.common_cap.msix_vector_first_id; in ice_ena_vf_msix_mappings()
691 (device_based_first_msix + pf->num_msix_per_vf) - 1; in ice_ena_vf_msix_mappings()
727 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; in ice_ena_vf_q_mappings()
728 struct device *dev = ice_pf_to_dev(vf->pf); in ice_ena_vf_q_mappings()
729 struct ice_hw *hw = &vf->pf->hw; in ice_ena_vf_q_mappings()
737 /* set the VF PF Tx queue range in ice_ena_vf_q_mappings()
755 /* set the VF PF Rx queue range in ice_ena_vf_q_mappings()
775 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; in ice_ena_vf_mappings()
783 * @pf: pointer to the PF structure
784 * @avail_res: available resources in the PF structure
789 * returns zero if PF cannot accommodate for all num_alloc_vfs.
792 ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res) in ice_determine_res() argument
797 /* start by checking if PF can assign max number of resources for in ice_determine_res()
808 num_all_res = pf->num_alloc_vfs * res; in ice_determine_res()
821 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
827 struct ice_pf *pf; in ice_calc_vf_reg_idx() local
832 pf = vf->pf; in ice_calc_vf_reg_idx()
835 return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id + in ice_calc_vf_reg_idx()
864 * @pf: pointer to PF structure
867 * This function allows SR-IOV resources to be taken from the end of the PF's
869 * just set the pf->sriov_base_vector and return success.
875 * in the PF's space available for SR-IOV.
877 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) in ice_sriov_set_msix_res() argument
879 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; in ice_sriov_set_msix_res()
880 int vectors_used = pf->irq_tracker->num_entries; in ice_sriov_set_msix_res()
891 pf->sriov_base_vector = sriov_base_vector; in ice_sriov_set_msix_res()
898 * @pf: pointer to the PF structure
914 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
917 static int ice_set_per_vf_res(struct ice_pf *pf) in ice_set_per_vf_res() argument
919 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); in ice_set_per_vf_res()
921 struct device *dev = ice_pf_to_dev(pf); in ice_set_per_vf_res()
924 if (!pf->num_alloc_vfs || max_valid_res_idx < 0) in ice_set_per_vf_res()
928 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - in ice_set_per_vf_res()
929 pf->irq_tracker->num_entries; in ice_set_per_vf_res()
930 msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs; in ice_set_per_vf_res()
942 pf->num_alloc_vfs); in ice_set_per_vf_res()
947 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf), in ice_set_per_vf_res()
953 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf), in ice_set_per_vf_res()
961 ICE_MIN_QS_PER_VF, pf->num_alloc_vfs); in ice_set_per_vf_res()
965 if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) { in ice_set_per_vf_res()
967 pf->num_alloc_vfs); in ice_set_per_vf_res()
972 pf->num_qps_per_vf = min_t(int, num_txq, num_rxq); in ice_set_per_vf_res()
973 pf->num_msix_per_vf = num_msix_per_vf; in ice_set_per_vf_res()
975 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf); in ice_set_per_vf_res()
986 struct ice_hw *hw = &vf->pf->hw; in ice_clear_vf_reset_trigger()
1009 struct ice_pf *pf = vf->pf; in ice_vf_set_vsi_promisc() local
1013 hw = &pf->hw; in ice_vf_set_vsi_promisc()
1038 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; in ice_vf_clear_counters()
1065 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vf_rebuild_host_cfg()
1103 struct ice_pf *pf = vf->pf; in ice_vf_rebuild_vsi() local
1106 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vf_rebuild_vsi()
1109 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", in ice_vf_rebuild_vsi()
1116 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_vf_rebuild_vsi()
1144 struct ice_pf *pf = vf->pf; in ice_vf_post_vsi_rebuild() local
1147 hw = &pf->hw; in ice_vf_post_vsi_rebuild()
1158 * @pf: pointer to the PF structure
1163 * during PF routines which need to reset all VFs, as otherwise it must perform
1168 bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) in ice_reset_all_vfs() argument
1170 struct device *dev = ice_pf_to_dev(pf); in ice_reset_all_vfs()
1171 struct ice_hw *hw = &pf->hw; in ice_reset_all_vfs()
1176 if (!pf->num_alloc_vfs) in ice_reset_all_vfs()
1180 if (test_and_set_bit(__ICE_VF_DIS, pf->state)) in ice_reset_all_vfs()
1184 ice_for_each_vf(pf, v) in ice_reset_all_vfs()
1185 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true); in ice_reset_all_vfs()
1193 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { in ice_reset_all_vfs()
1195 while (v < pf->num_alloc_vfs) { in ice_reset_all_vfs()
1198 vf = &pf->vf[v]; in ice_reset_all_vfs()
1216 if (v < pf->num_alloc_vfs) in ice_reset_all_vfs()
1220 ice_for_each_vf(pf, v) { in ice_reset_all_vfs()
1221 vf = &pf->vf[v]; in ice_reset_all_vfs()
1229 clear_bit(__ICE_VF_DIS, pf->state); in ice_reset_all_vfs()
1238 * Returns true if the PF or VF is disabled, false otherwise.
1242 struct ice_pf *pf = vf->pf; in ice_is_vf_disabled() local
1244 /* If the PF has been disabled, there is no need resetting VF until in ice_is_vf_disabled()
1245 * PF is active again. Similarly, if the VF has been disabled, this in ice_is_vf_disabled()
1249 return (test_bit(__ICE_VF_DIS, pf->state) || in ice_is_vf_disabled()
1263 struct ice_pf *pf = vf->pf; in ice_reset_vf() local
1272 dev = ice_pf_to_dev(pf); in ice_reset_vf()
1274 if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) { in ice_reset_vf()
1290 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_reset_vf()
1301 hw = &pf->hw; in ice_reset_vf()
1336 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_reset_vf()
1349 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1350 * @pf: pointer to the PF structure
1352 void ice_vc_notify_link_state(struct ice_pf *pf) in ice_vc_notify_link_state() argument
1356 ice_for_each_vf(pf, i) in ice_vc_notify_link_state()
1357 ice_vc_notify_vf_link_state(&pf->vf[i]); in ice_vc_notify_link_state()
1362 * @pf: pointer to the PF structure
1364 * indicate a pending reset to all VFs on a given PF
1366 void ice_vc_notify_reset(struct ice_pf *pf) in ice_vc_notify_reset() argument
1370 if (!pf->num_alloc_vfs) in ice_vc_notify_reset()
1375 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS, in ice_vc_notify_reset()
1386 struct ice_pf *pf; in ice_vc_notify_vf_reset() local
1391 pf = vf->pf; in ice_vc_notify_vf_reset()
1392 if (ice_validate_vf_id(pf, vf->vf_id)) in ice_vc_notify_vf_reset()
1405 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, in ice_vc_notify_vf_reset()
1419 struct ice_pf *pf = vf->pf; in ice_init_vf_vsi_res() local
1426 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); in ice_init_vf_vsi_res()
1428 dev = ice_pf_to_dev(pf); in ice_init_vf_vsi_res()
1460 * @pf: PF the VFs are associated with
1462 static int ice_start_vfs(struct ice_pf *pf) in ice_start_vfs() argument
1464 struct ice_hw *hw = &pf->hw; in ice_start_vfs()
1467 ice_for_each_vf(pf, i) { in ice_start_vfs()
1468 struct ice_vf *vf = &pf->vf[i]; in ice_start_vfs()
1474 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", in ice_start_vfs()
1489 struct ice_vf *vf = &pf->vf[i]; in ice_start_vfs()
1500 * @pf: PF holding reference to all VFs for default configuration
1502 static void ice_set_dflt_settings_vfs(struct ice_pf *pf) in ice_set_dflt_settings_vfs() argument
1506 ice_for_each_vf(pf, i) { in ice_set_dflt_settings_vfs()
1507 struct ice_vf *vf = &pf->vf[i]; in ice_set_dflt_settings_vfs()
1509 vf->pf = pf; in ice_set_dflt_settings_vfs()
1511 vf->vf_sw_id = pf->first_sw; in ice_set_dflt_settings_vfs()
1515 vf->num_vf_qs = pf->num_qps_per_vf; in ice_set_dflt_settings_vfs()
1520 * ice_alloc_vfs - allocate num_vfs in the PF structure
1521 * @pf: PF to store the allocated VFs in
1524 static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs) in ice_alloc_vfs() argument
1528 vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs), in ice_alloc_vfs()
1533 pf->vf = vfs; in ice_alloc_vfs()
1534 pf->num_alloc_vfs = num_vfs; in ice_alloc_vfs()
1541 * @pf: pointer to the PF structure
1544 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) in ice_ena_vfs() argument
1546 struct device *dev = ice_pf_to_dev(pf); in ice_ena_vfs()
1547 struct ice_hw *hw = &pf->hw; in ice_ena_vfs()
1551 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), in ice_ena_vfs()
1553 set_bit(__ICE_OICR_INTR_DIS, pf->state); in ice_ena_vfs()
1556 ret = pci_enable_sriov(pf->pdev, num_vfs); in ice_ena_vfs()
1558 pf->num_alloc_vfs = 0; in ice_ena_vfs()
1562 ret = ice_alloc_vfs(pf, num_vfs); in ice_ena_vfs()
1566 if (ice_set_per_vf_res(pf)) { in ice_ena_vfs()
1573 ice_set_dflt_settings_vfs(pf); in ice_ena_vfs()
1575 if (ice_start_vfs(pf)) { in ice_ena_vfs()
1581 clear_bit(__ICE_VF_DIS, pf->state); in ice_ena_vfs()
1585 devm_kfree(dev, pf->vf); in ice_ena_vfs()
1586 pf->vf = NULL; in ice_ena_vfs()
1587 pf->num_alloc_vfs = 0; in ice_ena_vfs()
1589 pci_disable_sriov(pf->pdev); in ice_ena_vfs()
1593 clear_bit(__ICE_OICR_INTR_DIS, pf->state); in ice_ena_vfs()
1599 * @pf: pointer to the PF structure
1604 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) in ice_pci_sriov_ena() argument
1606 int pre_existing_vfs = pci_num_vf(pf->pdev); in ice_pci_sriov_ena()
1607 struct device *dev = ice_pf_to_dev(pf); in ice_pci_sriov_ena()
1611 ice_free_vfs(pf); in ice_pci_sriov_ena()
1615 if (num_vfs > pf->num_vfs_supported) { in ice_pci_sriov_ena()
1617 num_vfs, pf->num_vfs_supported); in ice_pci_sriov_ena()
1622 err = ice_ena_vfs(pf, num_vfs); in ice_pci_sriov_ena()
1628 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); in ice_pci_sriov_ena()
1634 * @pf: PF to enabled SR-IOV on
1636 static int ice_check_sriov_allowed(struct ice_pf *pf) in ice_check_sriov_allowed() argument
1638 struct device *dev = ice_pf_to_dev(pf); in ice_check_sriov_allowed()
1640 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { in ice_check_sriov_allowed()
1645 if (ice_is_safe_mode(pf)) { in ice_check_sriov_allowed()
1650 if (!ice_pf_state_is_nominal(pf)) { in ice_check_sriov_allowed()
1669 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_sriov_configure() local
1670 struct device *dev = ice_pf_to_dev(pf); in ice_sriov_configure()
1673 err = ice_check_sriov_allowed(pf); in ice_sriov_configure()
1679 ice_free_vfs(pf); in ice_sriov_configure()
1687 err = ice_pci_sriov_ena(pf, num_vfs); in ice_sriov_configure()
1696 * @pf: pointer to the PF structure
1701 void ice_process_vflr_event(struct ice_pf *pf) in ice_process_vflr_event() argument
1703 struct ice_hw *hw = &pf->hw; in ice_process_vflr_event()
1707 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || in ice_process_vflr_event()
1708 !pf->num_alloc_vfs) in ice_process_vflr_event()
1711 ice_for_each_vf(pf, vf_id) { in ice_process_vflr_event()
1712 struct ice_vf *vf = &pf->vf[vf_id]; in ice_process_vflr_event()
1736 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1737 * @pf: PF used to index all VFs
1738 * @pfq: queue index relative to the PF's function space
1743 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) in ice_get_vf_from_pfq() argument
1747 ice_for_each_vf(pf, vf_id) { in ice_get_vf_from_pfq()
1748 struct ice_vf *vf = &pf->vf[vf_id]; in ice_get_vf_from_pfq()
1752 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_get_vf_from_pfq()
1763 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1764 * @pf: PF used for conversion
1765 * @globalq: global queue index used to convert to PF space queue index
1767 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) in ice_globalq_to_pfq() argument
1769 return globalq - pf->hw.func_caps.common_cap.rxq_first_id; in ice_globalq_to_pfq()
1774 * @pf: PF that the LAN overflow event happened on
1782 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) in ice_vf_lan_overflow_event() argument
1788 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); in ice_vf_lan_overflow_event()
1794 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); in ice_vf_lan_overflow_event()
1817 struct ice_pf *pf; in ice_vc_send_msg_to_vf() local
1822 pf = vf->pf; in ice_vc_send_msg_to_vf()
1823 if (ice_validate_vf_id(pf, vf->vf_id)) in ice_vc_send_msg_to_vf()
1826 dev = ice_pf_to_dev(pf); in ice_vc_send_msg_to_vf()
1836 dev_err(dev, "Use PF Control I/F to enable the VF\n"); in ice_vc_send_msg_to_vf()
1846 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, in ice_vc_send_msg_to_vf()
1848 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { in ice_vc_send_msg_to_vf()
1851 ice_aq_str(pf->hw.mailboxq.sq_last_status)); in ice_vc_send_msg_to_vf()
1863 * called from the VF to request the API version used by the PF
1892 struct ice_pf *pf = vf->pf; in ice_vc_get_vf_res_msg() local
1897 if (ice_check_vf_init(pf, vf)) { in ice_vc_get_vf_res_msg()
1918 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_get_vf_res_msg()
1960 vfres->max_vectors = pf->num_msix_per_vf; in ice_vc_get_vf_res_msg()
1989 * unlike other virtchnl messages, PF driver
2000 * @pf: the PF structure to search for the VSI
2005 static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) in ice_find_vsi_from_id() argument
2009 ice_for_each_vsi(pf, i) in ice_find_vsi_from_id()
2010 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id) in ice_find_vsi_from_id()
2011 return pf->vsi[i]; in ice_find_vsi_from_id()
2025 struct ice_pf *pf = vf->pf; in ice_vc_isvalid_vsi_id() local
2028 vsi = ice_find_vsi_from_id(pf, vsi_id); in ice_vc_isvalid_vsi_id()
2043 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id); in ice_vc_isvalid_q_id()
2075 struct ice_pf *pf = vf->pf; in ice_vc_config_rss_key() local
2093 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { in ice_vc_config_rss_key()
2098 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_config_rss_key()
2122 struct ice_pf *pf = vf->pf; in ice_vc_config_rss_lut() local
2140 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { in ice_vc_config_rss_lut()
2145 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_config_rss_lut()
2186 struct ice_pf *pf; in ice_check_vf_ready_for_cfg() local
2193 pf = vf->pf; in ice_check_vf_ready_for_cfg()
2194 if (ice_check_vf_init(pf, vf)) in ice_check_vf_ready_for_cfg()
2211 struct ice_pf *pf = np->vsi->back; in ice_set_vf_spoofchk() local
2219 dev = ice_pf_to_dev(pf); in ice_set_vf_spoofchk()
2220 if (ice_validate_vf_id(pf, vf_id)) in ice_set_vf_spoofchk()
2223 vf = &pf->vf[vf_id]; in ice_set_vf_spoofchk()
2228 vf_vsi = pf->vsi[vf->lan_vsi_idx]; in ice_set_vf_spoofchk()
2264 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL); in ice_set_vf_spoofchk()
2284 * @pf: PF structure for accessing VF(s)
2289 bool ice_is_any_vf_in_promisc(struct ice_pf *pf) in ice_is_any_vf_in_promisc() argument
2293 ice_for_each_vf(pf, vf_idx) { in ice_is_any_vf_in_promisc()
2294 struct ice_vf *vf = &pf->vf[vf_idx]; in ice_is_any_vf_in_promisc()
2317 struct ice_pf *pf = vf->pf; in ice_vc_cfg_promiscuous_mode_msg() local
2333 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_cfg_promiscuous_mode_msg()
2339 dev = ice_pf_to_dev(pf); in ice_vc_cfg_promiscuous_mode_msg()
2351 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); in ice_vc_cfg_promiscuous_mode_msg()
2377 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { in ice_vc_cfg_promiscuous_mode_msg()
2380 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw)) in ice_vc_cfg_promiscuous_mode_msg()
2384 ret = ice_set_dflt_vsi(pf->first_sw, vsi); in ice_vc_cfg_promiscuous_mode_msg()
2386 ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) in ice_vc_cfg_promiscuous_mode_msg()
2390 ret = ice_clear_dflt_vsi(pf->first_sw); in ice_vc_cfg_promiscuous_mode_msg()
2463 struct ice_pf *pf = vf->pf; in ice_vc_get_stats_msg() local
2476 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_get_stats_msg()
2511 * @q_idx: VF queue index used to determine the queue in the PF's space
2534 * @q_idx: VF queue index used to determine the queue in the PF's space
2566 struct ice_pf *pf = vf->pf; in ice_vc_ena_qs_msg() local
2586 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_ena_qs_msg()
2618 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_ena_qs_msg()
2657 struct ice_pf *pf = vf->pf; in ice_vc_dis_qs_msg() local
2678 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_dis_qs_msg()
2825 struct ice_pf *pf = vf->pf; in ice_vc_cfg_irq_map_msg() local
2837 pf->num_msix_per_vf < num_q_vectors_mapped || in ice_vc_cfg_irq_map_msg()
2843 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_cfg_irq_map_msg()
2859 if (!(vector_id < pf->num_msix_per_vf) || in ice_vc_cfg_irq_map_msg()
2906 struct ice_pf *pf = vf->pf; in ice_vc_cfg_qs_msg() local
2920 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_cfg_qs_msg()
2928 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", in ice_vc_cfg_qs_msg()
3031 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vc_add_mac_addr()
3055 * by the VF. The default LAN address is reported by the PF via in ice_vc_add_mac_addr()
3075 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vc_del_mac_addr()
3117 struct ice_pf *pf = vf->pf; in ice_vc_handle_mac_addr_msg() local
3142 …dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the… in ice_vc_handle_mac_addr_msg()
3148 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_handle_mac_addr_msg()
3206 * different number. If the request is successful, PF will reset the VF and
3207 * return 0. If unsuccessful, PF will send message informing VF of number of
3216 struct ice_pf *pf = vf->pf; in ice_vc_request_qs_msg() local
3222 dev = ice_pf_to_dev(pf); in ice_vc_request_qs_msg()
3229 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf), in ice_vc_request_qs_msg()
3230 ice_get_avail_rxq_count(pf)); in ice_vc_request_qs_msg()
3274 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_port_vlan() local
3280 dev = ice_pf_to_dev(pf); in ice_set_vf_port_vlan()
3281 if (ice_validate_vf_id(pf, vf_id)) in ice_set_vf_port_vlan()
3295 vf = &pf->vf[vf_id]; in ice_set_vf_port_vlan()
3345 struct ice_pf *pf = vf->pf; in ice_vc_process_vlan_msg() local
3354 dev = ice_pf_to_dev(pf); in ice_vc_process_vlan_msg()
3379 hw = &pf->hw; in ice_vc_process_vlan_msg()
3380 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_process_vlan_msg()
3403 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) in ice_vc_process_vlan_msg()
3460 * to PF for removal might be greater than number of VLANs in ice_vc_process_vlan_msg()
3547 struct ice_pf *pf = vf->pf; in ice_vc_ena_vlan_stripping() local
3560 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_ena_vlan_stripping()
3578 struct ice_pf *pf = vf->pf; in ice_vc_dis_vlan_stripping() local
3591 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_vc_dis_vlan_stripping()
3617 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; in ice_vf_init_vlan_stripping()
3634 * @pf: pointer to the PF structure
3640 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) in ice_vc_process_vf_msg() argument
3650 dev = ice_pf_to_dev(pf); in ice_vc_process_vf_msg()
3651 if (ice_validate_vf_id(pf, vf_id)) { in ice_vc_process_vf_msg()
3656 vf = &pf->vf[vf_id]; in ice_vc_process_vf_msg()
3755 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", in ice_vc_process_vf_msg()
3771 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_get_vf_cfg() local
3774 if (ice_validate_vf_id(pf, vf_id)) in ice_get_vf_cfg()
3777 vf = &pf->vf[vf_id]; in ice_get_vf_cfg()
3779 if (ice_check_vf_init(pf, vf)) in ice_get_vf_cfg()
3803 * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
3804 * @pf: PF used to reference the switch's rules
3809 static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac) in ice_unicast_mac_exists() argument
3812 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC]; in ice_unicast_mac_exists()
3845 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_mac() local
3849 if (ice_validate_vf_id(pf, vf_id)) in ice_set_vf_mac()
3857 vf = &pf->vf[vf_id]; in ice_set_vf_mac()
3866 if (ice_unicast_mac_exists(pf, mac)) { in ice_set_vf_mac()
3867 …netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MA… in ice_set_vf_mac()
3872 /* VF is notified of its new MAC via the PF's response to the in ice_set_vf_mac()
3882 /* PF will add MAC rule for the VF */ in ice_set_vf_mac()
3902 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_trust() local
3906 if (ice_validate_vf_id(pf, vf_id)) in ice_set_vf_trust()
3909 vf = &pf->vf[vf_id]; in ice_set_vf_trust()
3920 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", in ice_set_vf_trust()
3936 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_link_state() local
3940 if (ice_validate_vf_id(pf, vf_id)) in ice_set_vf_link_state()
3943 vf = &pf->vf[vf_id]; in ice_set_vf_link_state()
3971 * @netdev: the netdev of the PF
3978 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_get_vf_stats() local
3984 if (ice_validate_vf_id(pf, vf_id)) in ice_get_vf_stats()
3987 vf = &pf->vf[vf_id]; in ice_get_vf_stats()
3992 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_get_vf_stats()
4021 struct ice_pf *pf = vf->pf; in ice_print_vf_rx_mdd_event() local
4024 dev = ice_pf_to_dev(pf); in ice_print_vf_rx_mdd_event()
4026 …dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-r… in ice_print_vf_rx_mdd_event()
4027 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, in ice_print_vf_rx_mdd_event()
4029 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) in ice_print_vf_rx_mdd_event()
4035 * @pf: pointer to the PF structure
4039 void ice_print_vfs_mdd_events(struct ice_pf *pf) in ice_print_vfs_mdd_events() argument
4041 struct device *dev = ice_pf_to_dev(pf); in ice_print_vfs_mdd_events()
4042 struct ice_hw *hw = &pf->hw; in ice_print_vfs_mdd_events()
4046 if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state)) in ice_print_vfs_mdd_events()
4050 if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1)) in ice_print_vfs_mdd_events()
4053 pf->last_printed_mdd_jiffies = jiffies; in ice_print_vfs_mdd_events()
4055 ice_for_each_vf(pf, i) { in ice_print_vfs_mdd_events()
4056 struct ice_vf *vf = &pf->vf[i]; in ice_print_vfs_mdd_events()
4070 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", in ice_print_vfs_mdd_events()
4078 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
4081 * Called when recovering from a PF FLR to restore interrupt capability to