Lines Matching refs:p_vf
53 static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) in qed_vf_calculate_legacy() argument
57 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == in qed_vf_calculate_legacy()
61 if (!(p_vf->acquire.vfdev_info.capabilities & in qed_vf_calculate_legacy()
69 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) in qed_sp_vf_start() argument
80 init_data.opaque_fid = p_vf->opaque_fid; in qed_sp_vf_start()
91 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); in qed_sp_vf_start()
92 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); in qed_sp_vf_start()
108 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; in qed_sp_vf_start()
114 p_vf->abs_vf_id, in qed_sp_vf_start()
125 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); in qed_sp_vf_start()
222 struct qed_vf_info *p_vf, in qed_iov_validate_queue_mode() argument
235 p_qcid = &p_vf->vf_queues[qid].cids[i]; in qed_iov_validate_queue_mode()
251 struct qed_vf_info *p_vf, in qed_iov_validate_rxq() argument
255 if (rx_qid >= p_vf->num_rxqs) { in qed_iov_validate_rxq()
259 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); in qed_iov_validate_rxq()
263 return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false); in qed_iov_validate_rxq()
267 struct qed_vf_info *p_vf, in qed_iov_validate_txq() argument
271 if (tx_qid >= p_vf->num_txqs) { in qed_iov_validate_txq()
275 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); in qed_iov_validate_txq()
279 return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true); in qed_iov_validate_txq()
283 struct qed_vf_info *p_vf, u16 sb_idx) in qed_iov_validate_sb() argument
287 for (i = 0; i < p_vf->num_sbs; i++) in qed_iov_validate_sb()
288 if (p_vf->igu_sbs[i] == sb_idx) in qed_iov_validate_sb()
294 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); in qed_iov_validate_sb()
300 struct qed_vf_info *p_vf) in qed_iov_validate_active_rxq() argument
304 for (i = 0; i < p_vf->num_rxqs; i++) in qed_iov_validate_active_rxq()
305 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, in qed_iov_validate_active_rxq()
314 struct qed_vf_info *p_vf) in qed_iov_validate_active_txq() argument
318 for (i = 0; i < p_vf->num_txqs; i++) in qed_iov_validate_active_txq()
319 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, in qed_iov_validate_active_txq()
333 struct qed_vf_info *p_vf; in qed_iov_post_vf_bulletin() local
335 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); in qed_iov_post_vf_bulletin()
336 if (!p_vf) in qed_iov_post_vf_bulletin()
339 if (!p_vf->vf_bulletin) in qed_iov_post_vf_bulletin()
342 p_bulletin = p_vf->bulletin.p_virt; in qed_iov_post_vf_bulletin()
347 p_vf->bulletin.size - crc_size); in qed_iov_post_vf_bulletin()
351 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); in qed_iov_post_vf_bulletin()
356 params.dst_vfid = p_vf->abs_vf_id; in qed_iov_post_vf_bulletin()
357 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, in qed_iov_post_vf_bulletin()
358 p_vf->vf_bulletin, p_vf->bulletin.size / 4, in qed_iov_post_vf_bulletin()
785 struct qed_vf_info *p_vf; in qed_iov_enable_vf_access_msix() local
787 p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true); in qed_iov_enable_vf_access_msix()
788 if (!p_vf) in qed_iov_enable_vf_access_msix()
791 current_max = max_t(u8, current_max, p_vf->num_sbs); in qed_iov_enable_vf_access_msix()
962 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, in qed_iov_set_link() local
967 if (!p_vf) in qed_iov_set_link()
970 p_bulletin = p_vf->bulletin.p_virt; in qed_iov_set_link()
1215 struct qed_vf_info *p_vf, in qed_iov_send_response() argument
1218 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_send_response()
1226 eng_vf_id = p_vf->abs_vf_id; in qed_iov_send_response()
1277 struct qed_vf_info *p_vf, in qed_iov_prep_vp_update_resp_tlvs() argument
1308 p_vf->relative_vf_id, in qed_iov_prep_vp_update_resp_tlvs()
1367 struct qed_vf_info *p_vf) in qed_iov_vf_cleanup() argument
1371 p_vf->vf_bulletin = 0; in qed_iov_vf_cleanup()
1372 p_vf->vport_instance = 0; in qed_iov_vf_cleanup()
1373 p_vf->configured_features = 0; in qed_iov_vf_cleanup()
1376 p_vf->num_rxqs = p_vf->num_sbs; in qed_iov_vf_cleanup()
1377 p_vf->num_txqs = p_vf->num_sbs; in qed_iov_vf_cleanup()
1379 p_vf->num_active_rxqs = 0; in qed_iov_vf_cleanup()
1382 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; in qed_iov_vf_cleanup()
1394 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); in qed_iov_vf_cleanup()
1395 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); in qed_iov_vf_cleanup()
1396 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); in qed_iov_vf_cleanup()
1413 struct qed_vf_info *p_vf, in qed_iov_vf_mbx_acquire_resc_cids() argument
1428 if (!(p_vf->acquire.vfdev_info.capabilities & in qed_iov_vf_mbx_acquire_resc_cids()
1436 if (p_vf->acquire.vfdev_info.capabilities & in qed_iov_vf_mbx_acquire_resc_cids()
1455 struct qed_vf_info *p_vf, in qed_iov_vf_mbx_acquire_resc() argument
1462 p_resp->num_rxqs = p_vf->num_rxqs; in qed_iov_vf_mbx_acquire_resc()
1463 p_resp->num_txqs = p_vf->num_txqs; in qed_iov_vf_mbx_acquire_resc()
1464 p_resp->num_sbs = p_vf->num_sbs; in qed_iov_vf_mbx_acquire_resc()
1467 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; in qed_iov_vf_mbx_acquire_resc()
1475 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, in qed_iov_vf_mbx_acquire_resc()
1481 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, in qed_iov_vf_mbx_acquire_resc()
1483 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, in qed_iov_vf_mbx_acquire_resc()
1486 qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp); in qed_iov_vf_mbx_acquire_resc()
1504 p_vf->abs_vf_id, in qed_iov_vf_mbx_acquire_resc()
1522 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == in qed_iov_vf_mbx_acquire_resc()
1524 (p_vf->acquire.vfdev_info.os_type == in qed_iov_vf_mbx_acquire_resc()
1705 struct qed_vf_info *p_vf, bool val) in __qed_iov_spoofchk_set() argument
1710 if (val == p_vf->spoof_chk) { in __qed_iov_spoofchk_set()
1717 params.opaque_fid = p_vf->opaque_fid; in __qed_iov_spoofchk_set()
1718 params.vport_id = p_vf->vport_id; in __qed_iov_spoofchk_set()
1724 p_vf->spoof_chk = val; in __qed_iov_spoofchk_set()
1725 p_vf->req_spoofchk_val = p_vf->spoof_chk; in __qed_iov_spoofchk_set()
1731 val, p_vf->relative_vf_id); in __qed_iov_spoofchk_set()
1738 struct qed_vf_info *p_vf) in qed_iov_reconfigure_unicast_vlan() argument
1747 filter.vport_to_add_to = p_vf->vport_id; in qed_iov_reconfigure_unicast_vlan()
1752 if (!p_vf->shadow_config.vlans[i].used) in qed_iov_reconfigure_unicast_vlan()
1756 filter.vlan = p_vf->shadow_config.vlans[i].vid; in qed_iov_reconfigure_unicast_vlan()
1759 filter.vlan, p_vf->relative_vf_id); in qed_iov_reconfigure_unicast_vlan()
1760 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, in qed_iov_reconfigure_unicast_vlan()
1765 filter.vlan, p_vf->relative_vf_id); in qed_iov_reconfigure_unicast_vlan()
1775 struct qed_vf_info *p_vf, u64 events) in qed_iov_reconfigure_unicast_shadow() argument
1780 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) in qed_iov_reconfigure_unicast_shadow()
1781 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); in qed_iov_reconfigure_unicast_shadow()
1787 struct qed_vf_info *p_vf, u64 events) in qed_iov_configure_vport_forced() argument
1792 if (!p_vf->vport_instance) in qed_iov_configure_vport_forced()
1796 p_vf->p_vf_info.is_trusted_configured) { in qed_iov_configure_vport_forced()
1805 filter.vport_to_add_to = p_vf->vport_id; in qed_iov_configure_vport_forced()
1806 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); in qed_iov_configure_vport_forced()
1808 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, in qed_iov_configure_vport_forced()
1815 if (p_vf->p_vf_info.is_trusted_configured) in qed_iov_configure_vport_forced()
1816 p_vf->configured_features |= in qed_iov_configure_vport_forced()
1819 p_vf->configured_features |= in qed_iov_configure_vport_forced()
1832 filter.vport_to_add_to = p_vf->vport_id; in qed_iov_configure_vport_forced()
1833 filter.vlan = p_vf->bulletin.p_virt->pvid; in qed_iov_configure_vport_forced()
1838 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, in qed_iov_configure_vport_forced()
1848 vport_update.opaque_fid = p_vf->opaque_fid; in qed_iov_configure_vport_forced()
1849 vport_update.vport_id = p_vf->vport_id; in qed_iov_configure_vport_forced()
1857 : p_vf->shadow_config.inner_vlan_removal; in qed_iov_configure_vport_forced()
1871 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; in qed_iov_configure_vport_forced()
1893 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; in qed_iov_configure_vport_forced()
1895 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); in qed_iov_configure_vport_forced()
1902 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); in qed_iov_configure_vport_forced()
2069 struct qed_vf_info *p_vf, bool b_is_tx) in qed_iov_vf_mbx_qid() argument
2071 struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; in qed_iov_vf_mbx_qid()
2075 if (!(p_vf->acquire.vfdev_info.capabilities & in qed_iov_vf_mbx_qid()
2089 p_vf->relative_vf_id); in qed_iov_vf_mbx_qid()
2097 p_vf->relative_vf_id, p_qid_tlv->qid); in qed_iov_vf_mbx_qid()
2318 struct qed_vf_info *p_vf) in qed_iov_vf_mbx_update_tunn_param() argument
2321 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_vf_mbx_update_tunn_param()
2397 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); in qed_iov_vf_mbx_update_tunn_param()
2402 struct qed_vf_info *p_vf, in qed_iov_vf_mbx_start_txq_resp() argument
2405 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_vf_mbx_start_txq_resp()
2416 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == in qed_iov_vf_mbx_start_txq_resp()
2434 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); in qed_iov_vf_mbx_start_txq_resp()
2782 struct qed_vf_info *p_vf, in qed_iov_vp_update_vlan_param() argument
2793 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; in qed_iov_vp_update_vlan_param()
2796 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { in qed_iov_vp_update_vlan_param()
3115 struct qed_vf_info *p_vf, in qed_iov_vf_update_vlan_shadow() argument
3123 if (p_vf->shadow_config.vlans[i].used && in qed_iov_vf_update_vlan_shadow()
3124 p_vf->shadow_config.vlans[i].vid == in qed_iov_vf_update_vlan_shadow()
3126 p_vf->shadow_config.vlans[i].used = false; in qed_iov_vf_update_vlan_shadow()
3133 p_vf->relative_vf_id); in qed_iov_vf_update_vlan_shadow()
3139 p_vf->shadow_config.vlans[i].used = false; in qed_iov_vf_update_vlan_shadow()
3145 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) in qed_iov_vf_update_vlan_shadow()
3151 if (p_vf->shadow_config.vlans[i].used) in qed_iov_vf_update_vlan_shadow()
3154 p_vf->shadow_config.vlans[i].used = true; in qed_iov_vf_update_vlan_shadow()
3155 p_vf->shadow_config.vlans[i].vid = p_params->vlan; in qed_iov_vf_update_vlan_shadow()
3163 p_vf->relative_vf_id, in qed_iov_vf_update_vlan_shadow()
3173 struct qed_vf_info *p_vf, in qed_iov_vf_update_mac_shadow() argument
3179 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) in qed_iov_vf_update_mac_shadow()
3183 if (p_vf->p_vf_info.is_trusted_configured) in qed_iov_vf_update_mac_shadow()
3189 if (ether_addr_equal(p_vf->shadow_config.macs[i], in qed_iov_vf_update_mac_shadow()
3191 eth_zero_addr(p_vf->shadow_config.macs[i]); in qed_iov_vf_update_mac_shadow()
3204 eth_zero_addr(p_vf->shadow_config.macs[i]); in qed_iov_vf_update_mac_shadow()
3213 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { in qed_iov_vf_update_mac_shadow()
3214 ether_addr_copy(p_vf->shadow_config.macs[i], in qed_iov_vf_update_mac_shadow()
3232 struct qed_vf_info *p_vf, in qed_iov_vf_update_unicast_shadow() argument
3238 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); in qed_iov_vf_update_unicast_shadow()
3244 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); in qed_iov_vf_update_unicast_shadow()
3398 struct qed_vf_info *p_vf) in qed_iov_vf_mbx_release() argument
3404 qed_iov_vf_cleanup(p_hwfn, p_vf); in qed_iov_vf_mbx_release()
3406 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { in qed_iov_vf_mbx_release()
3408 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, in qed_iov_vf_mbx_release()
3409 p_vf->opaque_fid); in qed_iov_vf_mbx_release()
3417 p_vf->state = VF_STOPPED; in qed_iov_vf_mbx_release()
3420 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, in qed_iov_vf_mbx_release()
3426 struct qed_vf_info *p_vf) in qed_iov_vf_pf_get_coalesce() argument
3428 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_vf_pf_get_coalesce()
3445 if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid, in qed_iov_vf_pf_get_coalesce()
3449 p_vf->abs_vf_id, qid); in qed_iov_vf_pf_get_coalesce()
3453 p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); in qed_iov_vf_pf_get_coalesce()
3458 if (!qed_iov_validate_txq(p_hwfn, p_vf, qid, in qed_iov_vf_pf_get_coalesce()
3462 p_vf->abs_vf_id, qid); in qed_iov_vf_pf_get_coalesce()
3466 p_queue = &p_vf->vf_queues[qid]; in qed_iov_vf_pf_get_coalesce()
3490 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); in qed_iov_vf_pf_get_coalesce()
3577 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) in qed_iov_vf_flr_poll_dorq() argument
3582 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); in qed_iov_vf_flr_poll_dorq()
3595 p_vf->abs_vf_id, val); in qed_iov_vf_flr_poll_dorq()
3604 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) in qed_iov_vf_flr_poll_pbf() argument
3643 p_vf->abs_vf_id, i); in qed_iov_vf_flr_poll_pbf()
3651 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) in qed_iov_vf_flr_poll() argument
3655 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); in qed_iov_vf_flr_poll()
3659 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); in qed_iov_vf_flr_poll()
3671 struct qed_vf_info *p_vf; in qed_iov_execute_vf_flr_cleanup() local
3674 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); in qed_iov_execute_vf_flr_cleanup()
3675 if (!p_vf) in qed_iov_execute_vf_flr_cleanup()
3680 u16 vfid = p_vf->abs_vf_id; in qed_iov_execute_vf_flr_cleanup()
3685 qed_iov_vf_cleanup(p_hwfn, p_vf); in qed_iov_execute_vf_flr_cleanup()
3688 if (!p_vf->b_init) in qed_iov_execute_vf_flr_cleanup()
3691 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); in qed_iov_execute_vf_flr_cleanup()
3711 p_vf->state = VF_STOPPED; in qed_iov_execute_vf_flr_cleanup()
3713 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); in qed_iov_execute_vf_flr_cleanup()
3721 if (p_vf->state == VF_RESET) in qed_iov_execute_vf_flr_cleanup()
3722 p_vf->state = VF_STOPPED; in qed_iov_execute_vf_flr_cleanup()
3726 p_vf->vf_mbx.b_pending_msg = false; in qed_iov_execute_vf_flr_cleanup()
3772 struct qed_vf_info *p_vf; in qed_iov_mark_vf_flr() local
3775 p_vf = qed_iov_get_vf_info(p_hwfn, i, false); in qed_iov_mark_vf_flr()
3776 if (!p_vf) in qed_iov_mark_vf_flr()
3779 vfid = p_vf->abs_vf_id; in qed_iov_mark_vf_flr()
3782 u16 rel_vf_id = p_vf->relative_vf_id; in qed_iov_mark_vf_flr()
3788 p_vf->state = VF_RESET; in qed_iov_mark_vf_flr()
3809 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, in qed_iov_get_link() local
3814 if (!p_vf) in qed_iov_get_link()
3817 p_bulletin = p_vf->bulletin.p_virt; in qed_iov_get_link()
3830 struct qed_vf_info *p_vf) in qed_iov_vf_pf_bulletin_update_mac() argument
3832 struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt; in qed_iov_vf_pf_bulletin_update_mac()
3833 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_vf_pf_bulletin_update_mac()
3838 if (!p_vf->p_vf_info.is_trusted_configured) { in qed_iov_vf_pf_bulletin_update_mac()
3842 p_vf->abs_vf_id); in qed_iov_vf_pf_bulletin_update_mac()
3852 p_vf->abs_vf_id, p_req->mac); in qed_iov_vf_pf_bulletin_update_mac()
3855 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, in qed_iov_vf_pf_bulletin_update_mac()
3865 struct qed_vf_info *p_vf; in qed_iov_process_mbx_req() local
3867 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); in qed_iov_process_mbx_req()
3868 if (!p_vf) in qed_iov_process_mbx_req()
3871 mbx = &p_vf->vf_mbx; in qed_iov_process_mbx_req()
3877 p_vf->abs_vf_id); in qed_iov_process_mbx_req()
3886 p_vf->abs_vf_id, mbx->first_tlv.tl.type); in qed_iov_process_mbx_req()
3890 !p_vf->b_malicious) { in qed_iov_process_mbx_req()
3893 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3896 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3899 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3902 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3905 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3908 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3911 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3914 qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3917 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3920 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3923 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3926 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3929 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3932 qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3935 qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3938 qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3941 qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3947 p_vf->abs_vf_id, mbx->first_tlv.tl.type); in qed_iov_process_mbx_req()
3949 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, in qed_iov_process_mbx_req()
3962 p_vf->abs_vf_id, in qed_iov_process_mbx_req()
3970 if (p_vf->acquire.first_tlv.reply_address && in qed_iov_process_mbx_req()
3972 p_vf->acquire.first_tlv.reply_address)) { in qed_iov_process_mbx_req()
3973 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, in qed_iov_process_mbx_req()
3981 p_vf->abs_vf_id); in qed_iov_process_mbx_req()
3993 struct qed_vf_info *p_vf; in qed_iov_pf_get_pending_events() local
3995 p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; in qed_iov_pf_get_pending_events()
3996 if (p_vf->vf_mbx.b_pending_msg) in qed_iov_pf_get_pending_events()
4020 struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn, in qed_sriov_vfpf_msg() local
4023 if (!p_vf) in qed_sriov_vfpf_msg()
4029 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; in qed_sriov_vfpf_msg()
4032 p_vf->vf_mbx.b_pending_msg = true; in qed_sriov_vfpf_msg()
4041 struct qed_vf_info *p_vf; in qed_sriov_vfpf_malicious() local
4043 p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); in qed_sriov_vfpf_malicious()
4045 if (!p_vf) in qed_sriov_vfpf_malicious()
4048 if (!p_vf->b_malicious) { in qed_sriov_vfpf_malicious()
4051 p_vf->abs_vf_id, p_data->err_id); in qed_sriov_vfpf_malicious()
4053 p_vf->b_malicious = true; in qed_sriov_vfpf_malicious()
4057 p_vf->abs_vf_id, p_data->err_id); in qed_sriov_vfpf_malicious()
4311 struct qed_vf_info *p_vf; in qed_iov_bulletin_get_mac() local
4313 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); in qed_iov_bulletin_get_mac()
4314 if (!p_vf || !p_vf->bulletin.p_virt) in qed_iov_bulletin_get_mac()
4317 if (!(p_vf->bulletin.p_virt->valid_bitmap & in qed_iov_bulletin_get_mac()
4321 return p_vf->bulletin.p_virt->mac; in qed_iov_bulletin_get_mac()
4327 struct qed_vf_info *p_vf; in qed_iov_bulletin_get_forced_mac() local
4329 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); in qed_iov_bulletin_get_forced_mac()
4330 if (!p_vf || !p_vf->bulletin.p_virt) in qed_iov_bulletin_get_forced_mac()
4333 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) in qed_iov_bulletin_get_forced_mac()
4336 return p_vf->bulletin.p_virt->mac; in qed_iov_bulletin_get_forced_mac()
4342 struct qed_vf_info *p_vf; in qed_iov_bulletin_get_forced_vlan() local
4344 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); in qed_iov_bulletin_get_forced_vlan()
4345 if (!p_vf || !p_vf->bulletin.p_virt) in qed_iov_bulletin_get_forced_vlan()
4348 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) in qed_iov_bulletin_get_forced_vlan()
4351 return p_vf->bulletin.p_virt->pvid; in qed_iov_bulletin_get_forced_vlan()